diff --git a/.agents/docs/v0.1.0/README.md b/.agents/docs/v0.1.0/README.md new file mode 100644 index 0000000..a0e0743 --- /dev/null +++ b/.agents/docs/v0.1.0/README.md @@ -0,0 +1,1437 @@ +# llmapi v0.1.0 设计文档 + +> 日期:2026-03-10 +> 状态:Draft +> 作者:Sunrisepeak / Claude Code + +--- + +## 1. 概述 + +### 1.1 目标 + +将 `mcpplibs.llmapi` 从当前的 OpenAI-only 单文件客户端,升级为生产级多 Provider LLM API 库: + +- **多 Provider 抽象** — 统一接口适配 OpenAI / Anthropic(及未来 Provider) +- **完整功能** — Tool Calling、多模态输入、结构化输出、Embeddings +- **自研 tinyhttps** — 替换 libcurl,仅依赖 mbedtls,支持 musl-gcc 全静态链接 +- **C++20 协程** — 同步 + `co_await` 异步双模式 +- **跨平台** — Linux / macOS / Windows + +### 1.2 设计原则 + +| 原则 | 说明 | +|------|------| +| 分层抽象 | 核心统一接口 + Provider 特有扩展 + raw JSON escape hatch | +| 编译期多态 | C++20 concepts 约束 Provider,零虚表开销 | +| 最小依赖 | 仅 mbedtls 3.6.1 + nlohmann/json,无 libcurl | +| 静态链接友好 | musl-gcc 全静态可行,无 .so 运行时依赖 | +| 独立可拆分 | tinyhttps 模块完全独立,未来可作为单独库发布 | +| mcpp 风格 | 遵循 mcpp-style-ref 命名与模块规范 | + +### 1.3 决策记录 + +| 决策点 | 选择 | 理由 | +|--------|------|------| +| 抽象哲学 | 分层抽象 | 兼顾统一性和 Provider 特有能力 | +| 多态机制 | C++20 concepts | 零开销,符合项目 C++23 风格 | +| 异步策略 | C++20 协程 | 现代异步范式,自研最小封装 | +| Event Loop | `poll()` / `WSAPoll()` | 跨平台,无平台特定依赖 | +| TLS | mbedtls 3.6.1 | 静态链接友好,Apache 2.0 许可 | +| HTTP | 自研 tinyhttps | 仅实现 LLM API 所需子集,接口通用 | +| CA 证书 | 内嵌 + 系统 fallback | 独立部署自带证书,有系统证书时自动更新 | + +--- + +## 2. 整体架构 + +### 2.1 分层总览 + +``` +┌─────────────────────────────────────────────────────┐ +│ 用户代码 (Application) │ +│ client.chat("hello") | co_await client.chat(...) │ +└──────────────────────┬──────────────────────────────┘ + │ +┌──────────────────────▼──────────────────────────────┐ +│ mcpplibs.llmapi (核心层) │ +│ │ +│ Client

统一泛型客户端 │ +│ concept Provider Provider 约束 │ +│ ChatParams 通用请求参数 │ +│ Message/Content 统一消息模型 │ +│ ToolDef/ToolCall 工具调用抽象 │ +│ Task 协程 return type │ +└──────────┬─────────────────────┬────────────────────┘ + │ │ +┌──────────▼──────┐ ┌─────────▼─────────┐ +│ :openai │ │ :anthropic │ +│ Provider 实现 │ │ Provider 实现 │ +│ - 请求构建 │ │ - 请求构建 │ +│ - 响应解析 │ │ - 响应解析 │ +│ - SSE 解析 │ │ - SSE 解析 │ +│ - Auth header │ │ - Auth header │ +└──────────┬──────┘ └─────────┬─────────┘ + │ │ +┌──────────▼─────────────────────▼────────────────────┐ +│ mcpplibs.tinyhttps (独立通用模块) │ +│ │ +│ HttpClient 连接管理 + keep-alive │ +│ HttpRequest 请求构建 │ +│ HttpResponse 响应解析 + chunked decoding │ +│ SseParser SSE 事件流解析 │ +│ TlsSocket mbedtls TLS 封装 │ +│ Socket 平台抽象 (BSD/Winsock) │ +│ CaBundle 内嵌 CA + 系统 fallback │ +└─────────────────────────────────────────────────────┘ + │ + ┌─────▼─────┐ + │ mbedtls │ add_requires("mbedtls 3.6.1") + │ 3.6.1 │ + └───────────┘ +``` + +### 2.2 文件结构 + +``` +src/ +├── llmapi.cppm # export module mcpplibs.llmapi +├── types.cppm # :types — Message, Content, ToolDef, ChatParams... +├── provider.cppm # :provider — concept Provider +├── client.cppm # :client — Client

泛型客户端 +├── coro.cppm # :coro — Task, EventLoop +├── openai.cppm # :openai — OpenAI Provider 实现 +├── anthropic.cppm # :anthropic — Anthropic Provider 实现 +├── json/ +│ ├── json.cppm +│ └── json.hpp +└── tinyhttps/ + ├── tinyhttps.cppm # export module mcpplibs.tinyhttps + ├── socket.cppm # :socket — 平台 socket 抽象 + ├── tls.cppm # :tls — mbedtls TLS 封装 + ├── http.cppm # :http — HTTP/1.1 client + ├── sse.cppm # :sse — SSE parser + ├── proxy.cppm # :proxy — HTTP CONNECT 代理 + └── ca_bundle.cppm # :ca_bundle — 内嵌 CA 证书 +``` + +### 2.3 模块依赖关系 + +``` +mcpplibs.llmapi + ├── :client → :provider, :types, :coro + ├── :openai → :types, mcpplibs.tinyhttps, nlohmann.json + ├── :anthropic → :types, mcpplibs.tinyhttps, nlohmann.json + ├── :types → (独立) + ├── :provider → :types, :coro + └── :coro → (独立) + +mcpplibs.tinyhttps (完全独立,不依赖 llmapi) + ├── :http → :tls, :sse, :proxy + ├── :tls → :socket, :ca_bundle, mbedtls + ├── :socket → (平台 API) + ├── :sse → (独立) + ├── :proxy → :socket, :tls + └── :ca_bundle → mbedtls +``` + +### 2.4 xmake 构建 + +```lua +-- tinyhttps (独立 target,未来可拆出去) +target("tinyhttps") + set_kind("static") + set_languages("c++23") + set_policy("build.c++.modules", true) + add_requires("mbedtls 3.6.1") + add_files("src/tinyhttps/*.cppm") + add_packages("mbedtls") + +-- llmapi (依赖 tinyhttps) +target("llmapi") + set_kind("static") + set_languages("c++23") + set_policy("build.c++.modules", true) + add_deps("tinyhttps") + add_files("src/*.cppm", "src/json/json.cppm") +``` + +--- + +## 3. 核心类型系统 + +### 3.1 消息模型 + +统一两家 API 的消息表示差异。关键矛盾: +- OpenAI: system 是 messages 数组中的一个 role +- Anthropic: system 是顶层独立字段,messages 只有 user/assistant + +设计:统一用 `Role::System` 存入消息列表,由 Provider 在序列化时提取。 + +```cpp +// src/types.cppm +export module mcpplibs.llmapi:types; + +import std; + +export namespace mcpplibs::llmapi { + +// ─── 角色 ─── +enum class Role { + System, + User, + Assistant, + Tool // OpenAI 的 tool 角色, Anthropic 用 tool_result content block +}; + +// ─── 内容块(统一多模态) ─── +struct TextContent { + std::string text; +}; + +struct ImageContent { + std::string data; // base64 编码或 URL + std::string mediaType; // "image/png", "image/jpeg", ... + bool isUrl { false }; // true=URL, false=base64 +}; + +struct AudioContent { + std::string data; // base64 编码 + std::string format; // "wav", "mp3" +}; + +struct ToolUseContent { + std::string id; + std::string name; + std::string inputJson; // JSON string +}; + +struct ToolResultContent { + std::string toolUseId; + std::string content; // 结果文本 + bool isError { false }; +}; + +using ContentPart = std::variant< + TextContent, + ImageContent, + AudioContent, + ToolUseContent, + ToolResultContent +>; + +// 内容:纯文本(string) 或 多部分(vector) +using Content = std::variant< + std::string, + std::vector +>; + +// ─── 消息 ─── +struct Message { + Role role; + Content content; + std::string name; // 可选,参与者名称 + + // 便捷构造 + static Message system(std::string_view text) { + return { Role::System, std::string(text), {} }; + } + static Message user(std::string_view text) { + return { Role::User, std::string(text), {} }; + } + static Message assistant(std::string_view text) { + return { Role::Assistant, std::string(text), {} }; + } +}; + +// ─── 工具定义 ─── +struct ToolDef { + std::string name; // 工具名称 + std::string description; // 描述 + std::string inputSchema; // JSON Schema string +}; + +// ─── 工具调用(响应中) ─── +struct ToolCall { + std::string id; // 调用 ID + std::string name; // 工具名称 + std::string arguments; // JSON string +}; + +// ─── 工具选择策略 ─── +enum class ToolChoice { + Auto, // 模型决定 + None, // 禁止调用 + Required, // 必须调用(OpenAI: "required", Anthropic: "any") +}; + +struct ToolChoiceForced { + std::string name; // 指定工具名 +}; + +using ToolChoicePolicy = std::variant; + +// ─── 结构化输出 ─── +enum class ResponseFormatType { + Text, // 默认纯文本 + JsonObject, // JSON 模式 + JsonSchema, // JSON Schema 约束 (OpenAI only, Anthropic 降级为提示词) +}; + +struct ResponseFormat { + ResponseFormatType type { ResponseFormatType::Text }; + std::string schemaName; // json_schema 模式下的 schema name + std::string schema; // JSON Schema string +}; + +// ─── 请求参数 ─── +struct ChatParams { + // 通用参数(两家都支持) + std::optional temperature; + std::optional topP; + std::optional maxTokens; + std::optional> stop; + + // 工具 + std::optional> tools; + std::optional toolChoice; + + // 结构化输出 + std::optional responseFormat; + + // Provider 特有参数 (escape hatch) + std::optional extraJson; // 合并到请求 JSON 中 +}; + +// ─── 停止原因 ─── +enum class StopReason { + EndOfTurn, // 正常结束 + MaxTokens, // 达到 token 上限 + ToolUse, // 需要调用工具 + ContentFilter, // 内容过滤 (OpenAI only) + StopSequence, // 命中停止序列 (Anthropic only) +}; + +// ─── Token 用量 ─── +struct Usage { + int inputTokens { 0 }; + int outputTokens { 0 }; + int totalTokens { 0 }; // inputTokens + outputTokens +}; + +// ─── 聊天响应 ─── +struct ChatResponse { + std::string id; + std::string model; + std::vector content; // 响应内容块 + StopReason stopReason; + Usage usage; + + // 便捷方法:提取纯文本 + std::string text() const { + std::string result; + for (const auto& part : content) { + if (auto* t = std::get_if(&part)) { + result += t->text; + } + } + return result; + } + + // 便捷方法:提取工具调用 + std::vector tool_calls() const { + std::vector calls; + for (const auto& part : content) { + if (auto* t = std::get_if(&part)) { + calls.push_back({ t->id, t->name, t->inputJson }); + } + } + return calls; + } +}; + +// ─── Embedding 响应 ─── +struct EmbeddingResponse { + std::vector> embeddings; + std::string model; + Usage usage; +}; + +// ─── 对话序列化 ─── +struct Conversation { + std::vector messages; + + void save(std::string_view filePath) const; + static Conversation load(std::string_view filePath); + + void push(Message msg) { messages.push_back(std::move(msg)); } + void clear() { messages.clear(); } + int size() const { return static_cast(messages.size()); } +}; + +} // namespace mcpplibs::llmapi +``` + +### 3.2 Provider Concept + +```cpp +// src/provider.cppm +export module mcpplibs.llmapi:provider; + +import :types; +import :coro; + +import std; + +export namespace mcpplibs::llmapi { + +// ─── 流式回调 concept ─── +template +concept StreamCallback = std::invocable && + std::same_as, void>; + +// ─── Provider concept ─── +template +concept Provider = requires(P p, + const std::vector& messages, + const ChatParams& params) { + // 基本信息 + { p.name() } -> std::convertible_to; + + // 同步请求 + { p.chat(messages, params) } -> std::same_as; + + // 异步请求(协程) + { p.chat_async(messages, params) } -> std::same_as>; +}; + +// ─── 可选能力 concepts(Provider 可选实现) ─── + +// 支持流式 +template +concept StreamableProvider = Provider

&& requires(P p, + const std::vector& messages, + const ChatParams& params, + std::function cb) { + { p.chat_stream(messages, params, cb) } -> std::same_as; + { p.chat_stream_async(messages, params, cb) } -> std::same_as>; +}; + +// 支持 Embeddings +template +concept EmbeddableProvider = Provider

&& requires(P p, + const std::vector& inputs, + std::string_view model) { + { p.embed(inputs, model) } -> std::same_as; +}; + +} // namespace mcpplibs::llmapi +``` + +### 3.3 Client 泛型客户端 + +```cpp +// src/client.cppm +export module mcpplibs.llmapi:client; + +import :types; +import :provider; +import :coro; + +import std; + +export namespace mcpplibs::llmapi { + +template +class Client { +private: + P provider_; + Conversation conversation_; + ChatParams defaultParams_; + +public: + // ─── 构造 ─── + explicit Client(P provider) + : provider_(std::move(provider)) {} + + // ─── 配置(链式) ─── + Client& default_params(ChatParams params) { + defaultParams_ = std::move(params); + return *this; + } + + // ─── 消息管理 ─── + Client& system(std::string_view content) { + conversation_.push(Message::system(content)); + return *this; + } + + Client& user(std::string_view content) { + conversation_.push(Message::user(content)); + return *this; + } + + Client& add_message(Message msg) { + conversation_.push(std::move(msg)); + return *this; + } + + Client& clear() { + conversation_.clear(); + return *this; + } + + // ─── 同步聊天 ─── + ChatResponse chat(std::string_view userMessage) { + conversation_.push(Message::user(userMessage)); + auto response = provider_.chat(conversation_.messages, defaultParams_); + conversation_.push(Message::assistant(response.text())); + return response; + } + + ChatResponse chat(std::string_view userMessage, ChatParams params) { + conversation_.push(Message::user(userMessage)); + auto response = provider_.chat(conversation_.messages, params); + conversation_.push(Message::assistant(response.text())); + return response; + } + + // ─── 异步聊天 ─── + Task chat_async(std::string_view userMessage) { + conversation_.push(Message::user(userMessage)); + auto response = co_await provider_.chat_async( + conversation_.messages, defaultParams_); + conversation_.push(Message::assistant(response.text())); + co_return response; + } + + // ─── 流式聊天(需要 StreamableProvider) ─── + ChatResponse chat_stream(std::string_view userMessage, + StreamCallback auto&& callback) + requires StreamableProvider

+ { + conversation_.push(Message::user(userMessage)); + auto response = provider_.chat_stream( + conversation_.messages, defaultParams_, + std::forward(callback)); + conversation_.push(Message::assistant(response.text())); + return response; + } + + Task chat_stream_async(std::string_view userMessage, + StreamCallback auto&& callback) + requires StreamableProvider

+ { + conversation_.push(Message::user(userMessage)); + auto response = co_await provider_.chat_stream_async( + conversation_.messages, defaultParams_, + std::forward(callback)); + conversation_.push(Message::assistant(response.text())); + co_return response; + } + + // ─── Embeddings(需要 EmbeddableProvider) ─── + EmbeddingResponse embed(const std::vector& inputs, + std::string_view model) + requires EmbeddableProvider

+ { + return provider_.embed(inputs, model); + } + + // ─── 对话管理 ─── + const Conversation& conversation() const { return conversation_; } + Conversation& conversation() { return conversation_; } + + void save_conversation(std::string_view filePath) const { + conversation_.save(filePath); + } + + void load_conversation(std::string_view filePath) { + conversation_ = Conversation::load(filePath); + } + + // ─── Provider 访问 ─── + const P& provider() const { return provider_; } + P& provider() { return provider_; } +}; + +} // namespace mcpplibs::llmapi +``` + +--- + +## 4. Provider 实现 + +### 4.1 OpenAI Provider + +```cpp +// src/openai.cppm +export module mcpplibs.llmapi:openai; + +import :types; +import :coro; + +import mcpplibs.tinyhttps; +import mcpplibs.llmapi.nlohmann.json; +import std; + +export namespace mcpplibs::llmapi::openai { + +using Json = nlohmann::json; + +struct Config { + std::string apiKey; + std::string baseUrl { "https://api.openai.com/v1" }; + std::string model; + std::string organization; // 可选 OpenAI-Organization header + + // HTTP 配置 + std::optional proxy; + std::map customHeaders; +}; + +class OpenAI { +private: + Config config_; + tinyhttps::HttpClient http_; + +public: + explicit OpenAI(Config config) + : config_(std::move(config)) + , http_(tinyhttps::HttpClientConfig { + .proxy = config_.proxy, + }) + {} + + // ─── Provider concept 实现 ─── + + std::string_view name() const { return "openai"; } + + ChatResponse chat(const std::vector& messages, + const ChatParams& params); + + Task chat_async(const std::vector& messages, + const ChatParams& params); + + // ─── StreamableProvider ─── + + ChatResponse chat_stream(const std::vector& messages, + const ChatParams& params, + std::function callback); + + Task chat_stream_async(const std::vector& messages, + const ChatParams& params, + std::function callback); + + // ─── EmbeddableProvider ─── + + EmbeddingResponse embed(const std::vector& inputs, + std::string_view model); + +private: + // ─── 序列化 ─── + Json serialize_messages_(const std::vector& messages) const; + Json serialize_params_(const ChatParams& params, bool stream) const; + Json serialize_tools_(const std::vector& tools) const; + + // ─── 反序列化 ─── + ChatResponse parse_response_(const Json& json) const; + void parse_sse_chunk_(const std::string& data, + std::string& fullResponse, + std::function& callback) const; + + // ─── HTTP ─── + tinyhttps::HttpRequest build_request_(const std::string& endpoint, + const Json& payload) const; +}; + +} // namespace mcpplibs::llmapi::openai +``` + +**OpenAI 序列化要点:** + +| 字段 | 处理方式 | +|------|----------| +| system message | 保留在 messages 数组中,`role: "system"` | +| content | 纯文本 → `string`;多模态 → `ContentPart[]` | +| tool_calls | 响应中 `message.tool_calls[]`,独立字段 | +| tool result | `role: "tool"` + `tool_call_id` | +| auth | `Authorization: Bearer {key}` | +| response_format | 直接映射 `json_object` / `json_schema` | +| max_tokens | 映射到 `max_completion_tokens` | + +### 4.2 Anthropic Provider + +```cpp +// src/anthropic.cppm +export module mcpplibs.llmapi:anthropic; + +import :types; +import :coro; + +import mcpplibs.tinyhttps; +import mcpplibs.llmapi.nlohmann.json; +import std; + +export namespace mcpplibs::llmapi::anthropic { + +using Json = nlohmann::json; + +struct Config { + std::string apiKey; + std::string baseUrl { "https://api.anthropic.com/v1" }; + std::string model; + std::string version { "2023-06-01" }; // anthropic-version header + + int defaultMaxTokens { 4096 }; // Anthropic 要求 max_tokens 必填 + + // HTTP 配置 + std::optional proxy; + std::map customHeaders; +}; + +class Anthropic { +private: + Config config_; + tinyhttps::HttpClient http_; + +public: + explicit Anthropic(Config config) + : config_(std::move(config)) + , http_(tinyhttps::HttpClientConfig { + .proxy = config_.proxy, + }) + {} + + // ─── Provider concept 实现 ─── + + std::string_view name() const { return "anthropic"; } + + ChatResponse chat(const std::vector& messages, + const ChatParams& params); + + Task chat_async(const std::vector& messages, + const ChatParams& params); + + // ─── StreamableProvider ─── + + ChatResponse chat_stream(const std::vector& messages, + const ChatParams& params, + std::function callback); + + Task chat_stream_async(const std::vector& messages, + const ChatParams& params, + std::function callback); + + // 注意:Anthropic 没有 Embeddings API,不实现 EmbeddableProvider + +private: + // ─── 序列化(关键差异点) ─── + + // 从消息列表中提取 system 消息,放入顶层 system 字段 + std::pair, std::vector> + extract_system_(const std::vector& messages) const; + + Json serialize_messages_(const std::vector& messages) const; + Json serialize_params_(const ChatParams& params, bool stream) const; + Json serialize_tools_(const std::vector& tools) const; + + // ─── 反序列化 ─── + ChatResponse parse_response_(const Json& json) const; + + // Anthropic SSE 有 event type 行 (event: xxx),需单独解析 + void parse_sse_event_(const std::string& eventType, + const std::string& data, + std::string& fullResponse, + std::function& callback) const; + + // ─── HTTP ─── + tinyhttps::HttpRequest build_request_(const std::string& endpoint, + const Json& payload) const; +}; + +} // namespace mcpplibs::llmapi::anthropic +``` + +**Anthropic 序列化要点:** + +| 字段 | 处理方式 | +|------|----------| +| system message | 从 messages 提取,放入顶层 `"system"` 字段 | +| content | 纯文本 → `string`;多模态 → `ContentBlock[]`(type: text/image) | +| image | `source.type: "base64"` + `media_type` 或 `source.type: "url"` | +| tool_use | 响应 content 中的 `type: "tool_use"` block | +| tool_result | user 消息 content 中的 `type: "tool_result"` block | +| auth | `x-api-key: {key}` + `anthropic-version: {version}` | +| max_tokens | **必填**,用 `defaultMaxTokens` 兜底 | +| response_format | 不支持 json_schema,降级为工具调用或提示词引导 | +| SSE | `event: content_block_delta` + `data: {...}` 格式,非 OpenAI 的 `data:` only | + +### 4.3 两家 API 差异映射表 + +| 统一类型 | OpenAI 映射 | Anthropic 映射 | +|----------|-------------|----------------| +| `Role::System` | `messages[]: role="system"` | 顶层 `system` 字段 | +| `Role::Tool` | `role="tool"` + `tool_call_id` | user 消息中 `tool_result` block | +| `StopReason::EndOfTurn` | `finish_reason="stop"` | `stop_reason="end_turn"` | +| `StopReason::MaxTokens` | `finish_reason="length"` | `stop_reason="max_tokens"` | +| `StopReason::ToolUse` | `finish_reason="tool_calls"` | `stop_reason="tool_use"` | +| `Usage.inputTokens` | `usage.prompt_tokens` | `usage.input_tokens` | +| `Usage.outputTokens` | `usage.completion_tokens` | `usage.output_tokens` | +| `ImageContent` | `image_url.url` (URL/data URI) | `source.type` + `media_type` + `data` | +| `ToolDef.inputSchema` | `function.parameters` (JSON Schema) | `input_schema` (JSON Schema) | +| `ToolCall` | `tool_calls[].function` | content block `type="tool_use"` | +| `StreamCallback` | `data: {json}` → `choices[0].delta.content` | `event: content_block_delta` → `delta.text` | +| Stream 结束 | `data: [DONE]` | `event: message_stop` | + +--- + +## 5. tinyhttps 模块设计 + +### 5.1 目标 + +通用 HTTPS 客户端模块,完全独立于 llmapi,未来可作为 `mcpplibs.tinyhttps` 单独发布。 + +### 5.2 核心接口 + +```cpp +// src/tinyhttps/tinyhttps.cppm +export module mcpplibs.tinyhttps; + +export import :socket; +export import :tls; +export import :http; +export import :sse; +export import :proxy; +export import :ca_bundle; + +import std; + +export namespace mcpplibs::tinyhttps { + +// ─── HTTP 方法 ─── +enum class Method { + GET, POST, PUT, DELETE_, PATCH, HEAD +}; + +// ─── HTTP 请求 ─── +struct HttpRequest { + Method method { Method::GET }; + std::string url; // 完整 URL: https://host/path + std::map headers; + std::string body; + + // 便捷构造 + static HttpRequest post(std::string_view url, std::string_view body) { + return { Method::POST, std::string(url), + {{"Content-Type", "application/json"}}, + std::string(body) }; + } +}; + +// ─── HTTP 响应 ─── +struct HttpResponse { + int statusCode { 0 }; + std::string statusText; + std::map headers; + std::string body; + + bool ok() const { return statusCode >= 200 && statusCode < 300; } +}; + +// ─── SSE 事件 ─── +struct SseEvent { + std::string event; // event type (默认 "message") + std::string data; // event data + std::string id; // event id (可选) +}; + +// ─── SSE 回调 ─── +template +concept SseCallback = std::invocable && + std::same_as, bool>; + // 返回 false 停止接收 + +// ─── 客户端配置 ─── +struct HttpClientConfig { + std::optional proxy; // HTTP 代理 URL + int connectTimeoutMs { 10000 }; // 连接超时 + int readTimeoutMs { 60000 }; // 读超时 + bool verifySsl { true }; // TLS 证书验证 + bool keepAlive { true }; // 连接复用 +}; + +// ─── HTTP 客户端 ─── +class HttpClient { +private: + HttpClientConfig config_; + // 连接池(host:port → TlsSocket) + std::map pool_; + +public: + explicit HttpClient(HttpClientConfig config = {}); + ~HttpClient(); + + // 同步请求 + HttpResponse send(const HttpRequest& request); + + // 流式请求(SSE) + HttpResponse send_stream(const HttpRequest& request, + SseCallback auto&& callback); + + // 配置 + HttpClientConfig& config() { return config_; } + +private: + TlsSocket& get_connection_(std::string_view host, int port); + void return_connection_(std::string_view host, int port, TlsSocket socket); +}; + +} // namespace mcpplibs::tinyhttps +``` + +### 5.3 平台 Socket 抽象 + +```cpp +// src/tinyhttps/socket.cppm +export module mcpplibs.tinyhttps:socket; + +import std; + +export namespace mcpplibs::tinyhttps { + +class Socket { +private: +#ifdef _WIN32 + using SocketHandle = unsigned long long; // SOCKET + static constexpr SocketHandle INVALID { ~0ULL }; +#else + using SocketHandle = int; + static constexpr SocketHandle INVALID { -1 }; +#endif + SocketHandle handle_ { INVALID }; + +public: + Socket() = default; + ~Socket(); + + // Move only (RAII) + Socket(Socket&& other) noexcept; + Socket& operator=(Socket&& other) noexcept; + Socket(const Socket&) = delete; + Socket& operator=(const Socket&) = delete; + + // 连接 + bool connect(std::string_view host, int port, int timeoutMs); + void close(); + bool is_valid() const { return handle_ != INVALID; } + + // 读写 + int read(void* buf, int len); + int write(const void* buf, int len); + + // 等待可读/可写 (poll-based) + bool wait_readable(int timeoutMs); + bool wait_writable(int timeoutMs); + + // 底层 handle(供 TLS 层使用) + SocketHandle handle() const { return handle_; } + + // 平台初始化 (Windows: WSAStartup) + static void platform_init(); + static void platform_cleanup(); +}; + +} // namespace mcpplibs::tinyhttps +``` + +### 5.4 TLS 封装 + +```cpp +// src/tinyhttps/tls.cppm +export module mcpplibs.tinyhttps:tls; + +import :socket; +import :ca_bundle; +import std; + +export namespace mcpplibs::tinyhttps { + +class TlsSocket { +private: + Socket socket_; + // mbedtls 内部状态 (pimpl 或直接持有) + struct TlsState; + std::unique_ptr state_; + +public: + TlsSocket() = default; + ~TlsSocket(); + + // Move only + TlsSocket(TlsSocket&&) noexcept; + TlsSocket& operator=(TlsSocket&&) noexcept; + + // 连接 + TLS 握手 + bool connect(std::string_view host, int port, + int timeoutMs, bool verifySsl); + void close(); + bool is_valid() const; + + // 加密读写 + int read(void* buf, int len); + int write(const void* buf, int len); + + // poll (包装底层 socket) + bool wait_readable(int timeoutMs); +}; + +} // namespace mcpplibs::tinyhttps +``` + +### 5.5 SSE Parser + +```cpp +// src/tinyhttps/sse.cppm +export module mcpplibs.tinyhttps:sse; + +import std; + +export namespace mcpplibs::tinyhttps { + +struct SseEvent; // forward, 定义在 tinyhttps.cppm + +class SseParser { +private: + std::string buffer_; + std::string currentEvent_; + std::string currentData_; + std::string currentId_; + +public: + // 喂入原始数据,解析出完整事件 + // 返回解析出的事件列表 + std::vector feed(std::string_view chunk); + + void reset(); + +private: + void process_line_(std::string_view line); + void dispatch_event_(std::vector& events); +}; + +} // namespace mcpplibs::tinyhttps +``` + +--- + +## 6. 协程支持 + +### 6.1 Task + +最小 coroutine return type,不依赖任何平台 API。 + +```cpp +// src/coro.cppm +export module mcpplibs.llmapi:coro; + +import std; + +export namespace mcpplibs::llmapi { + +template +class Task { +public: + struct promise_type { + std::optional value; + std::exception_ptr exception; + + Task get_return_object() { + return Task { + std::coroutine_handle::from_promise(*this) + }; + } + + std::suspend_always initial_suspend() noexcept { return {}; } + std::suspend_always final_suspend() noexcept { return {}; } + + void return_value(T val) { + value = std::move(val); + } + + void unhandled_exception() { + exception = std::current_exception(); + } + }; + +private: + std::coroutine_handle handle_; + +public: + explicit Task(std::coroutine_handle h) : handle_(h) {} + ~Task() { if (handle_) handle_.destroy(); } + + // Move only + Task(Task&& other) noexcept : handle_(std::exchange(other.handle_, {})) {} + Task& operator=(Task&& other) noexcept { + if (this != &other) { + if (handle_) handle_.destroy(); + handle_ = std::exchange(other.handle_, {}); + } + return *this; + } + Task(const Task&) = delete; + Task& operator=(const Task&) = delete; + + // Awaitable interface + bool await_ready() const noexcept { return handle_.done(); } + void await_suspend(std::coroutine_handle<> awaiter) noexcept { + handle_.resume(); + // 简单实现:同步恢复后返回 awaiter + // 生产环境可扩展为事件循环调度 + awaiter.resume(); + } + T await_resume() { + if (handle_.promise().exception) { + std::rethrow_exception(handle_.promise().exception); + } + return std::move(*handle_.promise().value); + } + + // 同步等待(阻塞) + T get() { + if (!handle_.done()) { + handle_.resume(); + } + if (handle_.promise().exception) { + std::rethrow_exception(handle_.promise().exception); + } + return std::move(*handle_.promise().value); + } +}; + +// Task 特化 +template<> +class Task { +public: + struct promise_type { + std::exception_ptr exception; + + Task get_return_object() { + return Task { + std::coroutine_handle::from_promise(*this) + }; + } + + std::suspend_always initial_suspend() noexcept { return {}; } + std::suspend_always final_suspend() noexcept { return {}; } + void return_void() noexcept {} + void unhandled_exception() { exception = std::current_exception(); } + }; + +private: + std::coroutine_handle handle_; + +public: + explicit Task(std::coroutine_handle h) : handle_(h) {} + ~Task() { if (handle_) handle_.destroy(); } + + Task(Task&& other) noexcept : handle_(std::exchange(other.handle_, {})) {} + Task& operator=(Task&&) noexcept; + Task(const Task&) = delete; + Task& operator=(const Task&) = delete; + + bool await_ready() const noexcept { return handle_.done(); } + void await_suspend(std::coroutine_handle<> awaiter) noexcept { + handle_.resume(); + awaiter.resume(); + } + void await_resume() { + if (handle_.promise().exception) { + std::rethrow_exception(handle_.promise().exception); + } + } + + void get() { + if (!handle_.done()) handle_.resume(); + if (handle_.promise().exception) { + std::rethrow_exception(handle_.promise().exception); + } + } +}; + +} // namespace mcpplibs::llmapi +``` + +### 6.2 设计说明 + +当前 `Task` 是**同步恢复**语义(`await_suspend` 中直接 resume)。这足以支持: +- `co_await` 语法糖 +- `.get()` 阻塞获取结果 +- 异常传播 + +未来可扩展为真正的异步调度(基于 `poll()` 的 event loop),但初始版本不引入此复杂度。 + +--- + +## 7. 用户 API 示例 + +### 7.1 基本聊天 + +```cpp +import mcpplibs.llmapi; + +using namespace mcpplibs::llmapi; + +int main() { + // OpenAI + auto client = Client(openai::OpenAI({ + .apiKey = std::getenv("OPENAI_API_KEY"), + .model = "gpt-4o", + })); + + auto response = client.chat("Hello!"); + std::println("{}", response.text()); +} +``` + +### 7.2 切换 Provider + +```cpp +// Anthropic — 只改 Provider,其余代码不变 +auto client = Client(anthropic::Anthropic({ + .apiKey = std::getenv("ANTHROPIC_API_KEY"), + .model = "claude-sonnet-4-20250514", +})); + +auto response = client.chat("Hello!"); +std::println("{}", response.text()); +``` + +### 7.3 流式输出 + +```cpp +auto response = client.chat_stream("讲个笑话", [](std::string_view chunk) { + std::print("{}", chunk); +}); +std::println("\n[tokens: {}]", response.usage.totalTokens); +``` + +### 7.4 Tool Calling + +```cpp +auto params = ChatParams { + .tools = std::vector{{ + .name = "get_weather", + .description = "获取天气信息", + .inputSchema = R"({"type":"object","properties":{"city":{"type":"string"}},"required":["city"]})", + }}, + .toolChoice = ToolChoice::Auto, +}; + +auto response = client.chat("北京今天天气怎么样?", params); + +for (auto& call : response.tool_calls()) { + std::println("调用工具: {} 参数: {}", call.name, call.arguments); + + // 返回工具结果 + client.add_message({ + .role = Role::Tool, + .content = std::vector{ + ToolResultContent { .toolUseId = call.id, .content = R"({"temp":"22°C"})" } + }, + }); +} + +// 继续对话,模型根据工具结果生成最终回答 +auto finalResponse = client.chat(""); +``` + +### 7.5 多模态(图片) + +```cpp +client.add_message({ + .role = Role::User, + .content = std::vector{ + TextContent { "这张图片里有什么?" }, + ImageContent { .data = "https://example.com/photo.jpg", .isUrl = true }, + }, +}); + +auto response = client.chat(""); // 消息已手动添加,传空触发请求 +``` + +### 7.6 结构化输出 + +```cpp +auto params = ChatParams { + .responseFormat = ResponseFormat { + .type = ResponseFormatType::JsonSchema, + .schemaName = "person", + .schema = R"({"type":"object","properties":{"name":{"type":"string"},"age":{"type":"integer"}},"required":["name","age"]})", + }, +}; + +auto response = client.chat("生成一个虚构人物的信息", params); +auto json = nlohmann::json::parse(response.text()); +``` + +### 7.7 对话序列化 + +```cpp +// 保存 +client.save_conversation("chat_history.json"); + +// 加载并继续 +auto client2 = Client(openai::OpenAI({ ... })); +client2.load_conversation("chat_history.json"); +auto response = client2.chat("继续我们之前的对话"); +``` + +### 7.8 协程异步 + +```cpp +Task async_main() { + auto client = Client(openai::OpenAI({ ... })); + auto response = co_await client.chat_async("Hello!"); + std::println("{}", response.text()); +} + +int main() { + async_main().get(); +} +``` + +### 7.9 自定义 Headers + 代理 + +```cpp +auto client = Client(openai::OpenAI({ + .apiKey = "sk-xxx", + .model = "gpt-4o", + .proxy = "http://proxy.corp.com:8080", + .customHeaders = { + {"X-Custom-Header", "value"}, + }, +})); +``` + +--- + +## 8. 错误处理 + +### 8.1 策略 + +使用 C++ 异常 + `std::expected`(可选未来扩展)。 + +```cpp +export namespace mcpplibs::llmapi { + +// HTTP 层错误 +struct HttpError : std::runtime_error { + int statusCode; + std::string responseBody; + + HttpError(int code, std::string body) + : std::runtime_error("HTTP " + std::to_string(code)) + , statusCode(code), responseBody(std::move(body)) {} +}; + +// API 层错误(解析自 Provider 错误响应) +struct ApiError : std::runtime_error { + std::string type; // "invalid_request_error", "authentication_error", ... + std::string param; // 哪个参数出错(可选) + int statusCode; + + ApiError(int code, std::string type, std::string message) + : std::runtime_error(std::move(message)) + , type(std::move(type)), statusCode(code) {} +}; + +// 连接错误 +struct ConnectionError : std::runtime_error { + using std::runtime_error::runtime_error; +}; + +// TLS 错误 +struct TlsError : std::runtime_error { + using std::runtime_error::runtime_error; +}; + +} // namespace mcpplibs::llmapi +``` + +### 8.2 错误映射 + +| HTTP 状态码 | 异常类型 | 说明 | +|-------------|----------|------| +| 400 | `ApiError` | 请求参数错误 | +| 401 | `ApiError` | 认证失败 | +| 403 | `ApiError` | 权限不足 | +| 429 | `ApiError` | 限流 | +| 500+ | `ApiError` | 服务端错误 | +| 连接失败 | `ConnectionError` | DNS/TCP 连接失败 | +| TLS 握手失败 | `TlsError` | 证书/握手错误 | +| 响应解析失败 | `std::runtime_error` | JSON 解析等 | + +--- + +## 9. 未来扩展点 + +| 方向 | 说明 | 优先级 | +|------|------|--------| +| 重试 + 退避 | 指数退避重试策略(可在 Client 层或 HttpClient 层实现) | P1 | +| 真正异步 Event Loop | 基于 poll() 的非阻塞事件循环,替代同步恢复 | P1 | +| 更多 Provider | Gemini, Mistral, 本地 Ollama 等 | P2 | +| 连接池优化 | LRU 淘汰、最大连接数限制 | P2 | +| 日志系统 | 可插拔日志回调 | P2 | +| HTTP/2 | 多路复用(复杂度高,按需) | P3 | + +--- + +## 10. 依赖与编译要求 + +| 依赖 | 版本 | 用途 | 许可 | +|------|------|------|------| +| mbedtls | 3.6.1 | TLS | Apache 2.0 | +| nlohmann/json | 3.12.0 | JSON 序列化 | MIT | + +| 编译器 | 最低版本 | 说明 | +|--------|----------|------| +| GCC | 14+ | C++23 modules + coroutines | +| Clang | 18+ | C++23 modules + coroutines | +| MSVC | 2022 17.5+ | C++23 modules + coroutines | + +| 构建系统 | 版本 | +|----------|------| +| xmake | 3.0.0+ | + +### 静态链接命令示例 + +```bash +# musl-gcc 全静态 +xmake f -p linux --sdk=/usr/local/musl --links="mbedtls mbedx509 mbedcrypto" +xmake build -m release +``` + +--- + +## 附录 A:OpenAI vs Anthropic API 完整对照 + +| 维度 | OpenAI `/v1/chat/completions` | Anthropic `/v1/messages` | +|------|------|------| +| Auth | `Authorization: Bearer {key}` | `x-api-key: {key}` + `anthropic-version` | +| System | messages 中 `role: "system"` | 顶层 `system` 字段 | +| Roles | system, user, assistant, tool | user, assistant (仅两种) | +| max_tokens | 可选 (`max_completion_tokens`) | **必填** | +| Content | `string` (纯文本) 或 `ContentPart[]` | `string` 或 `ContentBlock[]` | +| Image | `image_url.url` (URL/data URI) | `source: {type, media_type, data}` | +| Tool 定义 | `{type:"function", function:{...}}` | `{name, description, input_schema}` | +| Tool 调用 | `message.tool_calls[]` | content block `type:"tool_use"` | +| Tool 结果 | `role:"tool"` + `tool_call_id` | user msg 中 `type:"tool_result"` block | +| Streaming | `data: {json}` + `data: [DONE]` | `event: {type}\ndata: {json}` | +| 流式文本 | `choices[0].delta.content` | `delta.type:"text_delta"` → `delta.text` | +| finish_reason | stop, length, tool_calls, content_filter | end_turn, max_tokens, tool_use, stop_sequence | +| Usage | prompt_tokens, completion_tokens, total_tokens | input_tokens, output_tokens | +| Embeddings | `/v1/embeddings` | 无 | +| JSON 模式 | `response_format: {type:"json_object"}` | 无原生支持 | +| JSON Schema | `response_format: {type:"json_schema",...}` | 无原生支持 | diff --git a/.agents/plans/2026-03-10-llmapi-v0.1.0.md b/.agents/plans/2026-03-10-llmapi-v0.1.0.md new file mode 100644 index 0000000..791d98d --- /dev/null +++ b/.agents/plans/2026-03-10-llmapi-v0.1.0.md @@ -0,0 +1,2103 @@ +# llmapi v0.1.0 Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Upgrade llmapi from OpenAI-only client to production-grade multi-Provider LLM API library with self-contained HTTPS, C++20 coroutines, and full feature set. + +**Architecture:** Layered abstraction — `mcpplibs.tinyhttps` (independent HTTPS module using mbedtls) → `mcpplibs.llmapi` core types + concept-based Provider system → OpenAI/Anthropic Provider implementations. C++20 concepts for compile-time polymorphism, C++20 coroutines for async. + +**Tech Stack:** C++23 modules, xmake, mbedtls 3.6.1, nlohmann/json 3.12.0, C++20 concepts/coroutines + +**Design Doc:** `.agents/docs/v0.1.0/README.md` + +**Style Guide:** `.agents/skills/mcpp-style-ref/reference.md` — PascalCase types, camelCase members, snake_case functions, `_` suffix for private, `{}` init, `import std`, `.cppm` modules + +--- + +## Phase 1: Foundation — tinyhttps Module + +### Task 1: Platform Socket Abstraction + +**Files:** +- Create: `src/tinyhttps/socket.cppm` +- Create: `tests/tinyhttps/xmake.lua` +- Create: `tests/tinyhttps/test_socket.cpp` +- Create: `tests/xmake.lua` + +**Step 1: Create test infrastructure** + +`tests/xmake.lua`: +```lua +includes("tinyhttps") +``` + +`tests/tinyhttps/xmake.lua`: +```lua +target("test_socket") + set_kind("binary") + set_languages("c++23") + set_policy("build.c++.modules", true) + add_files("test_socket.cpp") + add_deps("tinyhttps") +``` + +**Step 2: Write the validation test** + +`tests/tinyhttps/test_socket.cpp`: +```cpp +import mcpplibs.tinyhttps; +import std; + +int main() { + using namespace mcpplibs::tinyhttps; + + // Test 1: platform init/cleanup + Socket::platform_init(); + + // Test 2: construct and validate + Socket s; + assert(!s.is_valid()); + + // Test 3: connect to known host (httpbin.org:80) + bool connected = s.connect("httpbin.org", 80, 5000); + assert(connected); + assert(s.is_valid()); + + // Test 4: write HTTP request + std::string req = "GET /get HTTP/1.1\r\nHost: httpbin.org\r\nConnection: close\r\n\r\n"; + int written = s.write(req.data(), static_cast(req.size())); + assert(written > 0); + + // Test 5: read response + char buf[4096]; + int n = s.read(buf, sizeof(buf)); + assert(n > 0); + std::string_view resp(buf, n); + assert(resp.starts_with("HTTP/1.1 200")); + + // Test 6: close and validate + s.close(); + assert(!s.is_valid()); + + // Test 7: move semantics + Socket s1; + s1.connect("httpbin.org", 80, 5000); + Socket s2 = std::move(s1); + assert(!s1.is_valid()); + assert(s2.is_valid()); + s2.close(); + + Socket::platform_cleanup(); + + std::println("test_socket: ALL PASSED"); + return 0; +} +``` + +**Step 3: Run test to verify it fails** + +Run: `xmake build test_socket` +Expected: FAIL — module `mcpplibs.tinyhttps` not found + +**Step 4: Implement Socket** + +`src/tinyhttps/socket.cppm` — implement: +- `Socket` class with RAII (destructor calls close) +- `#ifdef _WIN32` for Winsock2, else BSD sockets +- `connect()` with `getaddrinfo` + non-blocking connect + `poll()`/`WSAPoll()` timeout +- `read()`/`write()` wrapping `recv()`/`send()` +- `wait_readable()`/`wait_writable()` using `poll()` +- `platform_init()` — Windows: `WSAStartup`; others: no-op +- `platform_cleanup()` — Windows: `WSACleanup`; others: no-op +- Move constructor/assignment (transfer handle, invalidate source) + +**Step 5: Create tinyhttps xmake target stub** + +Add to top-level or create `src/tinyhttps/xmake.lua` and include from root `xmake.lua`. + +Minimal `src/tinyhttps/tinyhttps.cppm`: +```cpp +export module mcpplibs.tinyhttps; +export import :socket; +``` + +**Step 6: Run test to verify it passes** + +Run: `xmake build test_socket && xmake run test_socket` +Expected: `test_socket: ALL PASSED` + +**Step 7: Commit** + +```bash +git add src/tinyhttps/ tests/ +git commit -m "feat(tinyhttps): add cross-platform Socket abstraction" +``` + +**Acceptance Criteria:** +- [x] Socket connects to remote TCP host with timeout +- [x] Read/write work over plain TCP +- [x] RAII — destructor closes socket +- [x] Move semantics work correctly +- [x] Compiles on Linux (GCC 14+) +- [x] platform_init/cleanup for Windows compat + +--- + +### Task 2: TLS Socket (mbedtls) + +**Files:** +- Create: `src/tinyhttps/tls.cppm` +- Create: `src/tinyhttps/ca_bundle.cppm` +- Create: `tests/tinyhttps/test_tls.cpp` +- Modify: `src/tinyhttps/tinyhttps.cppm` — add `export import :tls;` +- Modify: `xmake.lua` — add `add_requires("mbedtls 3.6.1")` + +**Step 1: Write the validation test** + +`tests/tinyhttps/test_tls.cpp`: +```cpp +import mcpplibs.tinyhttps; +import std; + +int main() { + using namespace mcpplibs::tinyhttps; + Socket::platform_init(); + + // Test 1: TLS connect to HTTPS host + TlsSocket tls; + bool connected = tls.connect("httpbin.org", 443, 5000, true); + assert(connected); + assert(tls.is_valid()); + + // Test 2: HTTPS request over TLS + std::string req = "GET /get HTTP/1.1\r\nHost: httpbin.org\r\nConnection: close\r\n\r\n"; + int written = tls.write(req.data(), static_cast(req.size())); + assert(written > 0); + + // Test 3: Read HTTPS response + std::string response; + char buf[4096]; + int n; + while ((n = tls.read(buf, sizeof(buf))) > 0) { + response.append(buf, n); + } + assert(response.starts_with("HTTP/1.1 200")); + assert(response.find("\"url\"") != std::string::npos); // httpbin JSON body + + // Test 4: close + tls.close(); + assert(!tls.is_valid()); + + // Test 5: move semantics + TlsSocket t1; + t1.connect("httpbin.org", 443, 5000, true); + TlsSocket t2 = std::move(t1); + assert(!t1.is_valid()); + assert(t2.is_valid()); + t2.close(); + + Socket::platform_cleanup(); + std::println("test_tls: ALL PASSED"); + return 0; +} +``` + +**Step 2: Run test to verify it fails** + +Run: `xmake build test_tls` +Expected: FAIL — `:tls` partition not found + +**Step 3: Implement CaBundle** + +`src/tinyhttps/ca_bundle.cppm`: +- Embed Mozilla CA bundle as `constexpr` string or load from system paths +- `load_system_certs()` — try system paths first, fallback to embedded +- System paths: `/etc/ssl/certs/ca-certificates.crt` (Debian/Ubuntu), `/etc/pki/tls/certs/ca-bundle.crt` (RHEL), `/etc/ssl/cert.pem` (macOS) + +**Step 4: Implement TlsSocket** + +`src/tinyhttps/tls.cppm`: +- Uses pimpl pattern (`std::unique_ptr`) to hide mbedtls headers +- `TlsState` holds: `mbedtls_ssl_context`, `mbedtls_ssl_config`, `mbedtls_ctr_drbg_context`, `mbedtls_entropy_context`, `mbedtls_x509_crt` +- `connect()`: create Socket → TCP connect → init mbedtls → set hostname (SNI) → TLS handshake +- `read()`/`write()`: `mbedtls_ssl_read`/`mbedtls_ssl_write` +- `close()`: `mbedtls_ssl_close_notify` → socket close +- RAII + move semantics + +**Step 5: Update tinyhttps module and xmake** + +`src/tinyhttps/tinyhttps.cppm` add: `export import :tls; export import :ca_bundle;` + +xmake: add `add_requires("mbedtls 3.6.1")`, add packages to tinyhttps target. + +**Step 6: Run test to verify it passes** + +Run: `xmake build test_tls && xmake run test_tls` +Expected: `test_tls: ALL PASSED` + +**Step 7: Commit** + +```bash +git add src/tinyhttps/tls.cppm src/tinyhttps/ca_bundle.cppm tests/tinyhttps/test_tls.cpp +git commit -m "feat(tinyhttps): add TLS socket with mbedtls + CA bundle" +``` + +**Acceptance Criteria:** +- [x] TLS handshake to public HTTPS host succeeds +- [x] Certificate verification works (rejects invalid certs when verifySsl=true) +- [x] Read/write encrypted data +- [x] CA bundle loads (embedded or system) +- [x] RAII + move semantics +- [x] No mbedtls headers leak into public module interface + +--- + +### Task 3: HTTP/1.1 Client + +**Files:** +- Create: `src/tinyhttps/http.cppm` +- Create: `tests/tinyhttps/test_http.cpp` +- Modify: `src/tinyhttps/tinyhttps.cppm` — add `export import :http;` + +**Step 1: Write the validation test** + +`tests/tinyhttps/test_http.cpp`: +```cpp +import mcpplibs.tinyhttps; +import std; + +int main() { + using namespace mcpplibs::tinyhttps; + Socket::platform_init(); + + HttpClient client; + + // Test 1: simple GET + auto resp = client.send(HttpRequest { + .method = Method::GET, + .url = "https://httpbin.org/get", + .headers = {{"Accept", "application/json"}}, + }); + assert(resp.ok()); + assert(resp.statusCode == 200); + assert(resp.body.find("\"url\"") != std::string::npos); + + // Test 2: POST with JSON body + auto resp2 = client.send(HttpRequest::post( + "https://httpbin.org/post", + R"({"key":"value"})" + )); + assert(resp2.ok()); + assert(resp2.body.find("\"key\"") != std::string::npos); + + // Test 3: custom headers + auto resp3 = client.send(HttpRequest { + .method = Method::GET, + .url = "https://httpbin.org/headers", + .headers = {{"X-Custom", "test123"}}, + }); + assert(resp3.ok()); + assert(resp3.body.find("test123") != std::string::npos); + + // Test 4: 404 handling + auto resp4 = client.send(HttpRequest { + .method = Method::GET, + .url = "https://httpbin.org/status/404", + }); + assert(!resp4.ok()); + assert(resp4.statusCode == 404); + + // Test 5: connection timeout + auto clientFast = HttpClient(HttpClientConfig { .connectTimeoutMs = 1 }); + try { + clientFast.send(HttpRequest { + .method = Method::GET, + .url = "https://httpbin.org/delay/10", + }); + assert(false); // should have thrown + } catch (const std::exception&) { + // expected + } + + Socket::platform_cleanup(); + std::println("test_http: ALL PASSED"); + return 0; +} +``` + +**Step 2: Run test to verify it fails** + +Run: `xmake build test_http` +Expected: FAIL — `HttpClient` not defined + +**Step 3: Implement HTTP client** + +`src/tinyhttps/http.cppm`: +- URL parsing: extract scheme, host, port, path from URL string +- `send()`: + 1. Parse URL + 2. Get or create TlsSocket connection (connection pool by host:port) + 3. Build HTTP/1.1 request string (method, path, headers, Content-Length, body) + 4. Write request + 5. Read response: parse status line, headers (handle chunked Transfer-Encoding), body + 6. Return `HttpResponse` +- Chunked decoding: read chunk-size line, read chunk data, repeat until `0\r\n` +- Content-Length mode: read exactly N bytes +- Connection pool: `std::map` keyed by `host:port` +- Keep-alive: reuse connections when `Connection: keep-alive` + +**Step 4: Run test to verify it passes** + +Run: `xmake build test_http && xmake run test_http` +Expected: `test_http: ALL PASSED` + +**Step 5: Commit** + +```bash +git add src/tinyhttps/http.cppm tests/tinyhttps/test_http.cpp +git commit -m "feat(tinyhttps): add HTTP/1.1 client with connection pooling" +``` + +**Acceptance Criteria:** +- [x] GET and POST requests work over HTTPS +- [x] Custom headers sent correctly +- [x] HTTP status codes parsed (200, 404, etc.) +- [x] Chunked transfer-encoding decoded +- [x] Content-Length body reading works +- [x] Connection timeout throws exception +- [x] Connection pooling reuses sockets + +--- + +### Task 4: SSE Parser + +**Files:** +- Create: `src/tinyhttps/sse.cppm` +- Create: `tests/tinyhttps/test_sse.cpp` +- Modify: `src/tinyhttps/tinyhttps.cppm` — add `export import :sse;` + +**Step 1: Write the validation test** + +`tests/tinyhttps/test_sse.cpp`: +```cpp +import mcpplibs.tinyhttps; +import std; + +int main() { + using namespace mcpplibs::tinyhttps; + + SseParser parser; + + // Test 1: simple data event + auto events = parser.feed("data: hello\n\n"); + assert(events.size() == 1); + assert(events[0].data == "hello"); + assert(events[0].event == "message"); // default event type + + // Test 2: named event + events = parser.feed("event: ping\ndata: {}\n\n"); + assert(events.size() == 1); + assert(events[0].event == "ping"); + assert(events[0].data == "{}"); + + // Test 3: multi-line data + events = parser.feed("data: line1\ndata: line2\n\n"); + assert(events.size() == 1); + assert(events[0].data == "line1\nline2"); + + // Test 4: chunked feed (data arrives in pieces) + parser.reset(); + events = parser.feed("data: hel"); + assert(events.empty()); // incomplete + events = parser.feed("lo\n\n"); + assert(events.size() == 1); + assert(events[0].data == "hello"); + + // Test 5: multiple events in one chunk + events = parser.feed("data: first\n\ndata: second\n\n"); + assert(events.size() == 2); + assert(events[0].data == "first"); + assert(events[1].data == "second"); + + // Test 6: OpenAI format + parser.reset(); + events = parser.feed("data: {\"choices\":[{\"delta\":{\"content\":\"Hi\"}}]}\n\n"); + assert(events.size() == 1); + assert(events[0].data.find("Hi") != std::string::npos); + + // Test 7: Anthropic format (event type line) + parser.reset(); + events = parser.feed("event: content_block_delta\ndata: {\"type\":\"content_block_delta\"}\n\n"); + assert(events.size() == 1); + assert(events[0].event == "content_block_delta"); + + // Test 8: [DONE] sentinel + parser.reset(); + events = parser.feed("data: [DONE]\n\n"); + assert(events.size() == 1); + assert(events[0].data == "[DONE]"); + + // Test 9: comment lines (start with :) ignored + parser.reset(); + events = parser.feed(": this is a comment\ndata: actual\n\n"); + assert(events.size() == 1); + assert(events[0].data == "actual"); + + // Test 10: id field + parser.reset(); + events = parser.feed("id: 123\ndata: msg\n\n"); + assert(events.size() == 1); + assert(events[0].id == "123"); + assert(events[0].data == "msg"); + + std::println("test_sse: ALL PASSED"); + return 0; +} +``` + +**Step 2: Run test to verify it fails** + +Run: `xmake build test_sse` +Expected: FAIL + +**Step 3: Implement SseParser** + +`src/tinyhttps/sse.cppm`: +- `feed(string_view chunk)` → appends to internal buffer, scans for `\n\n` delimiters +- For each complete event block, parse lines: + - `data: xxx` → append to currentData (multi-line: join with `\n`) + - `event: xxx` → set currentEvent + - `id: xxx` → set currentId + - `: xxx` → comment, ignore + - empty line → dispatch event, reset accumulators +- Default event type is `"message"` per SSE spec + +**Step 4: Run test to verify it passes** + +Run: `xmake build test_sse && xmake run test_sse` +Expected: `test_sse: ALL PASSED` + +**Step 5: Commit** + +```bash +git add src/tinyhttps/sse.cppm tests/tinyhttps/test_sse.cpp +git commit -m "feat(tinyhttps): add SSE parser with chunked feed support" +``` + +**Acceptance Criteria:** +- [x] Parses standard SSE format (data, event, id fields) +- [x] Handles chunked data arrival (partial lines buffered) +- [x] Multi-line data fields concatenated with `\n` +- [x] Comment lines ignored +- [x] Works for both OpenAI (`data:` only) and Anthropic (`event:` + `data:`) formats +- [x] Default event type is "message" + +--- + +### Task 5: HTTP Streaming (SSE) Integration + +**Files:** +- Modify: `src/tinyhttps/http.cppm` — add `send_stream()` method +- Create: `tests/tinyhttps/test_http_stream.cpp` + +**Step 1: Write the validation test** + +`tests/tinyhttps/test_http_stream.cpp`: +```cpp +import mcpplibs.tinyhttps; +import std; + +int main() { + using namespace mcpplibs::tinyhttps; + Socket::platform_init(); + + HttpClient client; + + // Test 1: SSE stream from httpbin (simulated with /stream/3) + int eventCount { 0 }; + auto resp = client.send_stream( + HttpRequest { + .method = Method::GET, + .url = "https://httpbin.org/stream/3", + }, + [&eventCount](const SseEvent& event) -> bool { + eventCount++; + std::println("SSE event: {}", event.data.substr(0, 50)); + return true; // continue + } + ); + assert(resp.statusCode == 200); + // httpbin /stream/N returns N JSON lines, not strict SSE, + // but validates our streaming read path + + // Test 2: early stop (return false from callback) + int stopCount { 0 }; + auto resp2 = client.send_stream( + HttpRequest { + .method = Method::GET, + .url = "https://httpbin.org/stream/10", + }, + [&stopCount](const SseEvent&) -> bool { + stopCount++; + return stopCount < 2; // stop after 2 + } + ); + assert(stopCount <= 3); // may get 1-2 extra from buffer + + Socket::platform_cleanup(); + std::println("test_http_stream: ALL PASSED"); + return 0; +} +``` + +**Step 2: Run test to verify it fails** + +Run: `xmake build test_http_stream` +Expected: FAIL — `send_stream` not defined + +**Step 3: Implement send_stream** + +In `http.cppm`, add `send_stream()`: +- Send HTTP request as normal +- Parse response status + headers +- Instead of reading full body, read chunks incrementally +- Feed each chunk to `SseParser` +- For each parsed event, call user callback +- If callback returns `false`, stop reading and return +- Return `HttpResponse` with status (body may be partial) + +**Step 4: Run test to verify it passes** + +Run: `xmake build test_http_stream && xmake run test_http_stream` +Expected: `test_http_stream: ALL PASSED` + +**Step 5: Commit** + +```bash +git add src/tinyhttps/http.cppm tests/tinyhttps/test_http_stream.cpp +git commit -m "feat(tinyhttps): add SSE streaming support to HttpClient" +``` + +**Acceptance Criteria:** +- [x] Streaming read with incremental SSE parsing +- [x] Callback receives events as they arrive +- [x] Early stop via callback returning false +- [x] Response status/headers still available + +--- + +### Task 6: HTTP CONNECT Proxy + +**Files:** +- Create: `src/tinyhttps/proxy.cppm` +- Create: `tests/tinyhttps/test_proxy.cpp` +- Modify: `src/tinyhttps/tinyhttps.cppm` — add `export import :proxy;` +- Modify: `src/tinyhttps/http.cppm` — use proxy when configured + +**Step 1: Write the validation test** + +`tests/tinyhttps/test_proxy.cpp`: +```cpp +import mcpplibs.tinyhttps; +import std; + +int main() { + using namespace mcpplibs::tinyhttps; + Socket::platform_init(); + + // Test 1: proxy URL parsing + // Internal: verify parse_proxy_url("http://proxy:8080") -> host="proxy", port=8080 + // This tests the URL parsing path used by proxy + + // Test 2: HttpClient with proxy config (integration test, requires proxy) + // Skip if no proxy available — compile-time validation only + auto client = HttpClient(HttpClientConfig { + .proxy = "http://127.0.0.1:8080", // won't connect, but tests config path + }); + + // Test 3: verify proxy config is stored + assert(client.config().proxy.has_value()); + assert(client.config().proxy.value() == "http://127.0.0.1:8080"); + + Socket::platform_cleanup(); + std::println("test_proxy: ALL PASSED"); + return 0; +} +``` + +**Step 2: Implement proxy** + +`src/tinyhttps/proxy.cppm`: +- `proxy_connect()`: connect to proxy host:port via plain Socket, send `CONNECT target:443 HTTP/1.1\r\n`, read `200 Connection established`, then layer TLS on top +- URL parsing helper for proxy URL + +Update `http.cppm` `get_connection_()`: +- If proxy configured, use `proxy_connect()` instead of direct `TlsSocket::connect()` + +**Step 3: Run test to verify it passes** + +Run: `xmake build test_proxy && xmake run test_proxy` +Expected: `test_proxy: ALL PASSED` + +**Step 4: Commit** + +```bash +git add src/tinyhttps/proxy.cppm tests/tinyhttps/test_proxy.cpp +git commit -m "feat(tinyhttps): add HTTP CONNECT proxy support" +``` + +**Acceptance Criteria:** +- [x] Proxy config stored and accessible +- [x] CONNECT tunnel implementation (code review — integration test needs real proxy) +- [x] Falls back to direct connect when no proxy configured +- [x] Proxy URL parsing handles http://host:port format + +--- + +### Task 7: tinyhttps Module Integration Test + +**Files:** +- Create: `tests/tinyhttps/test_integration.cpp` +- Modify: `src/tinyhttps/tinyhttps.cppm` — ensure all exports complete + +**Step 1: Write integration test** + +`tests/tinyhttps/test_integration.cpp` — full end-to-end: +```cpp +import mcpplibs.tinyhttps; +import std; + +int main() { + using namespace mcpplibs::tinyhttps; + Socket::platform_init(); + + HttpClient client(HttpClientConfig { + .connectTimeoutMs = 10000, + .readTimeoutMs = 30000, + .verifySsl = true, + .keepAlive = true, + }); + + // Test: full HTTPS POST (simulates LLM API call pattern) + auto resp = client.send(HttpRequest { + .method = Method::POST, + .url = "https://httpbin.org/post", + .headers = { + {"Content-Type", "application/json"}, + {"Authorization", "Bearer test-key"}, + {"X-Custom-Header", "custom-value"}, + }, + .body = R"({"model":"test","messages":[{"role":"user","content":"hi"}]})", + }); + + assert(resp.ok()); + assert(resp.statusCode == 200); + // httpbin echoes back the request + assert(resp.body.find("test-key") != std::string::npos); + assert(resp.body.find("custom-value") != std::string::npos); + assert(resp.body.find("\"model\"") != std::string::npos); + + // Test: connection reuse (second request to same host) + auto resp2 = client.send(HttpRequest { + .method = Method::GET, + .url = "https://httpbin.org/get", + }); + assert(resp2.ok()); + + Socket::platform_cleanup(); + std::println("test_integration (tinyhttps): ALL PASSED"); + return 0; +} +``` + +**Step 2: Run test** + +Run: `xmake build test_integration && xmake run test_integration` +Expected: `test_integration (tinyhttps): ALL PASSED` + +**Step 3: Commit** + +```bash +git add tests/tinyhttps/test_integration.cpp +git commit -m "test(tinyhttps): add integration test for full HTTPS flow" +``` + +**Acceptance Criteria:** +- [x] Full HTTPS POST with custom headers works end-to-end +- [x] Connection reuse works +- [x] All tinyhttps sub-modules exported correctly from main module + +--- + +## Phase 2: llmapi Core Types & Provider Abstraction + +### Task 8: Core Type System + +**Files:** +- Create: `src/types.cppm` +- Create: `tests/llmapi/xmake.lua` +- Create: `tests/llmapi/test_types.cpp` +- Modify: `tests/xmake.lua` — add `includes("llmapi")` + +**Step 1: Write the validation test** + +`tests/llmapi/test_types.cpp`: +```cpp +import mcpplibs.llmapi; +import std; + +int main() { + using namespace mcpplibs::llmapi; + + // Test 1: Message construction + auto msg = Message::user("hello"); + assert(msg.role == Role::User); + assert(std::holds_alternative(msg.content)); + assert(std::get(msg.content) == "hello"); + + // Test 2: System/Assistant messages + auto sys = Message::system("you are helpful"); + assert(sys.role == Role::System); + auto asst = Message::assistant("hi there"); + assert(asst.role == Role::Assistant); + + // Test 3: Multimodal content + auto multiMsg = Message { + .role = Role::User, + .content = std::vector{ + TextContent { "describe this" }, + ImageContent { .data = "https://example.com/img.jpg", .isUrl = true }, + }, + }; + auto& parts = std::get>(multiMsg.content); + assert(parts.size() == 2); + assert(std::holds_alternative(parts[0])); + assert(std::holds_alternative(parts[1])); + + // Test 4: ToolDef + ToolDef tool { + .name = "get_weather", + .description = "Get weather", + .inputSchema = R"({"type":"object"})", + }; + assert(tool.name == "get_weather"); + + // Test 5: ChatParams with optionals + ChatParams params { + .temperature = 0.7, + .maxTokens = 1024, + }; + assert(params.temperature.has_value()); + assert(!params.topP.has_value()); + + // Test 6: ChatResponse text extraction + ChatResponse resp { + .content = { TextContent{"hello"}, TextContent{" world"} }, + .stopReason = StopReason::EndOfTurn, + }; + assert(resp.text() == "hello world"); + + // Test 7: ChatResponse tool_calls extraction + ChatResponse toolResp { + .content = { + TextContent{"Let me check"}, + ToolUseContent{ .id = "call_1", .name = "weather", .inputJson = "{}" }, + }, + }; + auto calls = toolResp.tool_calls(); + assert(calls.size() == 1); + assert(calls[0].name == "weather"); + + // Test 8: Conversation + Conversation conv; + conv.push(Message::user("hi")); + conv.push(Message::assistant("hello")); + assert(conv.size() == 2); + conv.clear(); + assert(conv.size() == 0); + + // Test 9: Usage + Usage usage { .inputTokens = 10, .outputTokens = 20, .totalTokens = 30 }; + assert(usage.totalTokens == 30); + + // Test 10: StopReason enum + assert(StopReason::EndOfTurn != StopReason::ToolUse); + + std::println("test_types: ALL PASSED"); + return 0; +} +``` + +**Step 2: Run test to verify it fails** + +Run: `xmake build test_types` +Expected: FAIL + +**Step 3: Implement types.cppm** + +Create `src/types.cppm` as specified in design doc section 3.1. Key points: +- All types in `namespace mcpplibs::llmapi` +- Use `std::variant` for `Content` and `ContentPart` +- `Message` with static factory methods +- `Conversation` with `save()`/`load()` (use nlohmann::json for serialization) + +**Step 4: Update llmapi module** + +Update `src/llmapi.cppm` to add `export import :types;` + +**Step 5: Run test to verify it passes** + +Run: `xmake build test_types && xmake run test_types` +Expected: `test_types: ALL PASSED` + +**Step 6: Commit** + +```bash +git add src/types.cppm tests/llmapi/ +git commit -m "feat(llmapi): add core type system — Message, Content, ToolDef, ChatParams" +``` + +**Acceptance Criteria:** +- [x] All types compile and construct correctly +- [x] std::variant content works for text and multimodal +- [x] Convenience factories (Message::user, etc.) +- [x] ChatResponse helper methods (text(), tool_calls()) +- [x] Conversation container works + +--- + +### Task 9: Conversation Serialization + +**Files:** +- Modify: `src/types.cppm` — implement `Conversation::save()` and `Conversation::load()` +- Create: `tests/llmapi/test_serialization.cpp` + +**Step 1: Write the validation test** + +`tests/llmapi/test_serialization.cpp`: +```cpp +import mcpplibs.llmapi; +import std; + +int main() { + using namespace mcpplibs::llmapi; + + // Build conversation with various content types + Conversation conv; + conv.push(Message::system("you are helpful")); + conv.push(Message::user("hello")); + conv.push(Message::assistant("hi there")); + conv.push(Message { + .role = Role::User, + .content = std::vector{ + TextContent { "look at this" }, + ImageContent { .data = "base64data", .mediaType = "image/png" }, + }, + }); + + // Save + std::string path = "/tmp/test_conv.json"; + conv.save(path); + + // Load + auto loaded = Conversation::load(path); + assert(loaded.size() == conv.size()); + + // Verify content preserved + assert(std::get(loaded.messages[0].content) == "you are helpful"); + assert(loaded.messages[0].role == Role::System); + assert(std::get(loaded.messages[1].content) == "hello"); + + // Verify multimodal preserved + auto& parts = std::get>(loaded.messages[3].content); + assert(parts.size() == 2); + auto& img = std::get(parts[1]); + assert(img.mediaType == "image/png"); + + // Cleanup + std::filesystem::remove(path); + + std::println("test_serialization: ALL PASSED"); + return 0; +} +``` + +**Step 2: Implement save/load** + +Use nlohmann::json for serialization: +- `save()`: serialize messages to JSON array, write to file +- `load()`: read file, parse JSON, reconstruct messages +- Handle all ContentPart variants with type discriminator + +**Step 3: Run test** + +Run: `xmake build test_serialization && xmake run test_serialization` +Expected: `test_serialization: ALL PASSED` + +**Step 4: Commit** + +```bash +git add src/types.cppm tests/llmapi/test_serialization.cpp +git commit -m "feat(llmapi): add conversation save/load serialization" +``` + +**Acceptance Criteria:** +- [x] Save conversation to JSON file +- [x] Load conversation from JSON file +- [x] All content types round-trip correctly (text, image, tool_use, tool_result) +- [x] Role information preserved + +--- + +### Task 10: Provider Concept + Coroutine Task + +**Files:** +- Create: `src/provider.cppm` +- Create: `src/coro.cppm` +- Create: `tests/llmapi/test_coro.cpp` + +**Step 1: Write the validation test** + +`tests/llmapi/test_coro.cpp`: +```cpp +import mcpplibs.llmapi; +import std; + +using namespace mcpplibs::llmapi; + +// Test coroutine +Task compute_async(int a, int b) { + co_return a + b; +} + +Task chain_async() { + auto result = co_await compute_async(2, 3); + co_return "result=" + std::to_string(result); +} + +Task void_task() { + co_return; +} + +Task throwing_task() { + throw std::runtime_error("test error"); + co_return 0; +} + +// Mock Provider for concept validation +struct MockProvider { + std::string_view name() const { return "mock"; } + + ChatResponse chat(const std::vector&, const ChatParams&) { + return ChatResponse { + .content = { TextContent { "mock response" } }, + .stopReason = StopReason::EndOfTurn, + }; + } + + Task chat_async(const std::vector&, const ChatParams&) { + co_return ChatResponse { + .content = { TextContent { "mock async" } }, + .stopReason = StopReason::EndOfTurn, + }; + } +}; + +// Compile-time concept check +static_assert(Provider); + +int main() { + // Test 1: Task sync get + auto t1 = compute_async(3, 4); + assert(t1.get() == 7); + + // Test 2: Task with co_await chain + auto t2 = chain_async(); + assert(t2.get() == "result=5"); + + // Test 3: Task + auto t3 = void_task(); + t3.get(); // should not throw + + // Test 4: exception propagation + auto t4 = throwing_task(); + try { + t4.get(); + assert(false); + } catch (const std::runtime_error& e) { + assert(std::string(e.what()) == "test error"); + } + + // Test 5: MockProvider satisfies concept + MockProvider mock; + auto resp = mock.chat({}, {}); + assert(resp.text() == "mock response"); + + auto asyncResp = mock.chat_async({}, {}); + assert(asyncResp.get().text() == "mock async"); + + std::println("test_coro: ALL PASSED"); + return 0; +} +``` + +**Step 2: Implement coro.cppm and provider.cppm** + +`src/coro.cppm` — as specified in design doc section 6.1: +- `Task` with promise_type, coroutine_handle, await interface +- `Task` specialization +- `.get()` for synchronous blocking +- Exception propagation + +`src/provider.cppm` — as specified in design doc section 3.2: +- `concept StreamCallback` +- `concept Provider` — requires `name()`, `chat()`, `chat_async()` +- `concept StreamableProvider` — additionally requires `chat_stream()`, `chat_stream_async()` +- `concept EmbeddableProvider` — requires `embed()` + +**Step 3: Update llmapi module** + +`src/llmapi.cppm`: add `export import :coro; export import :provider;` + +**Step 4: Run test** + +Run: `xmake build test_coro && xmake run test_coro` +Expected: `test_coro: ALL PASSED` + +**Step 5: Commit** + +```bash +git add src/coro.cppm src/provider.cppm tests/llmapi/test_coro.cpp +git commit -m "feat(llmapi): add Task coroutine + Provider concept" +``` + +**Acceptance Criteria:** +- [x] Task works with co_return and co_await +- [x] Task works +- [x] Exception propagation through coroutine chain +- [x] .get() blocks and returns result +- [x] Provider concept compiles with mock +- [x] static_assert(Provider) passes + +--- + +### Task 11: Client

Template + +**Files:** +- Create: `src/client.cppm` +- Create: `tests/llmapi/test_client.cpp` + +**Step 1: Write the validation test** + +`tests/llmapi/test_client.cpp`: +```cpp +import mcpplibs.llmapi; +import std; + +using namespace mcpplibs::llmapi; + +// Full mock provider with streaming +struct FullMockProvider { + std::string_view name() const { return "full_mock"; } + + ChatResponse chat(const std::vector& msgs, const ChatParams&) { + return ChatResponse { + .content = { TextContent { "reply to: " + std::get(msgs.back().content) } }, + .stopReason = StopReason::EndOfTurn, + .usage = { .inputTokens = 10, .outputTokens = 5, .totalTokens = 15 }, + }; + } + + Task chat_async(const std::vector& msgs, const ChatParams& p) { + co_return chat(msgs, p); + } + + ChatResponse chat_stream(const std::vector& msgs, + const ChatParams& params, + std::function callback) { + auto resp = chat(msgs, params); + auto text = resp.text(); + // Simulate streaming word by word + for (size_t i = 0; i < text.size(); i += 5) { + callback(std::string_view(text).substr(i, 5)); + } + return resp; + } + + Task chat_stream_async(const std::vector& msgs, + const ChatParams& params, + std::function callback) { + co_return chat_stream(msgs, params, std::move(callback)); + } +}; + +static_assert(Provider); +static_assert(StreamableProvider); + +int main() { + auto client = Client(FullMockProvider{}); + + // Test 1: basic chat + auto resp = client.chat("hello"); + assert(resp.text() == "reply to: hello"); + + // Test 2: conversation auto-saved + assert(client.conversation().size() == 2); // user + assistant + + // Test 3: system message + client.clear(); + client.system("be helpful"); + auto resp2 = client.chat("hi"); + assert(client.conversation().size() == 3); // system + user + assistant + + // Test 4: streaming + client.clear(); + std::string streamed; + auto resp3 = client.chat_stream("test", [&streamed](std::string_view chunk) { + streamed += chunk; + }); + assert(!streamed.empty()); + assert(resp3.text() == streamed); + + // Test 5: async chat + client.clear(); + auto asyncResp = client.chat_async("async hello"); + auto result = asyncResp.get(); + assert(result.text() == "reply to: async hello"); + + // Test 6: default params + client.clear(); + client.default_params(ChatParams { .temperature = 0.5 }); + auto resp4 = client.chat("with params"); + assert(resp4.text().find("with params") != std::string::npos); + + // Test 7: conversation save/load + client.clear(); + client.chat("save me"); + client.save_conversation("/tmp/test_client_conv.json"); + + auto client2 = Client(FullMockProvider{}); + client2.load_conversation("/tmp/test_client_conv.json"); + assert(client2.conversation().size() == 2); + std::filesystem::remove("/tmp/test_client_conv.json"); + + std::println("test_client: ALL PASSED"); + return 0; +} +``` + +**Step 2: Implement client.cppm** + +As specified in design doc section 3.3. Key: `Client` template class with: +- `chat()`, `chat_async()`, `chat_stream()`, `chat_stream_async()` +- Auto conversation history management +- `default_params()`, `system()`, `user()`, `clear()`, `add_message()` +- `save_conversation()`, `load_conversation()` + +**Step 3: Run test** + +Run: `xmake build test_client && xmake run test_client` +Expected: `test_client: ALL PASSED` + +**Step 4: Commit** + +```bash +git add src/client.cppm tests/llmapi/test_client.cpp +git commit -m "feat(llmapi): add Client

template with conversation management" +``` + +**Acceptance Criteria:** +- [x] Client

compiles with any Provider-satisfying type +- [x] chat() sends message and auto-saves response +- [x] chat_stream() streams and auto-saves +- [x] chat_async() works with co_await +- [x] Conversation history management (clear, system, save/load) +- [x] Default params applied + +--- + +## Phase 3: Provider Implementations + +### Task 12: OpenAI Provider — Serialization + +**Files:** +- Create: `src/openai.cppm` (replace existing) +- Create: `tests/llmapi/test_openai_serialize.cpp` + +**Step 1: Write the validation test** + +Test OpenAI request/response JSON serialization without making real API calls: + +`tests/llmapi/test_openai_serialize.cpp`: +```cpp +import mcpplibs.llmapi; +import mcpplibs.llmapi.nlohmann.json; +import std; + +using namespace mcpplibs::llmapi; +using Json = nlohmann::json; + +int main() { + // Use OpenAI provider's internal serialization + // We test by constructing messages and verifying JSON output + + openai::OpenAI provider(openai::Config { + .apiKey = "test-key", + .model = "gpt-4o", + }); + + // Test 1: basic message serialization + // Provider should serialize to OpenAI format + // (we'll need a test helper or friend access to serialize_messages_) + + // Test via build_request_ or a public test helper + // For now, test the response parsing path + + // Test 2: parse OpenAI non-streaming response + Json openaiResp = Json::parse(R"({ + "id": "chatcmpl-123", + "object": "chat.completion", + "model": "gpt-4o", + "choices": [{ + "index": 0, + "message": { + "role": "assistant", + "content": "Hello!" + }, + "finish_reason": "stop" + }], + "usage": { + "prompt_tokens": 10, + "completion_tokens": 5, + "total_tokens": 15 + } + })"); + + // Test: parse_response_ produces correct ChatResponse + // (exposed via test helper or friend) + + // Test 3: parse tool_calls response + Json toolResp = Json::parse(R"({ + "id": "chatcmpl-456", + "model": "gpt-4o", + "choices": [{ + "message": { + "role": "assistant", + "content": null, + "tool_calls": [{ + "id": "call_abc", + "type": "function", + "function": { + "name": "get_weather", + "arguments": "{\"city\":\"Beijing\"}" + } + }] + }, + "finish_reason": "tool_calls" + }], + "usage": {"prompt_tokens": 20, "completion_tokens": 10, "total_tokens": 30} + })"); + + // Test 4: parse SSE chunk + std::string sseData = R"({"id":"chatcmpl-789","choices":[{"delta":{"content":"Hi"},"index":0}]})"; + // Verify delta content extraction + + std::println("test_openai_serialize: ALL PASSED"); + return 0; +} +``` + +> Note: This task focuses on serialization/deserialization logic. Actual API calls are in Task 14. + +**Step 2: Implement new openai.cppm** + +Replace `src/openai.cppm` with new implementation as designed in section 4.1: +- `Config` struct with apiKey, baseUrl, model, proxy, customHeaders +- `serialize_messages_()` — convert `vector` to OpenAI JSON (system role inline) +- `serialize_params_()` — ChatParams → OpenAI JSON fields +- `serialize_tools_()` — ToolDef[] → OpenAI tools format +- `parse_response_()` — OpenAI JSON → ChatResponse (handle text, tool_calls, finish_reason, usage) +- `parse_sse_chunk_()` — SSE data → extract delta content +- `build_request_()` — construct HttpRequest with auth headers + +Expose test helpers via `namespace detail` or make them testable. + +**Step 3: Run test** + +Run: `xmake build test_openai_serialize && xmake run test_openai_serialize` +Expected: `test_openai_serialize: ALL PASSED` + +**Step 4: Commit** + +```bash +git add src/openai.cppm tests/llmapi/test_openai_serialize.cpp +git commit -m "feat(llmapi): add OpenAI provider serialization/deserialization" +``` + +**Acceptance Criteria:** +- [x] Messages serialize to OpenAI JSON format (system role inline) +- [x] Multimodal content serializes (image_url format) +- [x] Tool definitions serialize correctly +- [x] Non-streaming response parses (text, tool_calls, finish_reason, usage) +- [x] SSE delta chunks parse correctly +- [x] Auth header is `Authorization: Bearer {key}` + +--- + +### Task 13: Anthropic Provider — Serialization + +**Files:** +- Create: `src/anthropic.cppm` +- Create: `tests/llmapi/test_anthropic_serialize.cpp` + +**Step 1: Write the validation test** + +`tests/llmapi/test_anthropic_serialize.cpp`: +```cpp +import mcpplibs.llmapi; +import mcpplibs.llmapi.nlohmann.json; +import std; + +using namespace mcpplibs::llmapi; +using Json = nlohmann::json; + +int main() { + anthropic::Anthropic provider(anthropic::Config { + .apiKey = "test-key", + .model = "claude-sonnet-4-20250514", + }); + + // Test 1: Anthropic response parsing + Json anthropicResp = Json::parse(R"({ + "id": "msg_01abc", + "type": "message", + "role": "assistant", + "model": "claude-sonnet-4-20250514", + "content": [ + {"type": "text", "text": "Hello!"} + ], + "stop_reason": "end_turn", + "usage": { + "input_tokens": 10, + "output_tokens": 5 + } + })"); + + // Test 2: tool_use response + Json toolResp = Json::parse(R"({ + "id": "msg_02def", + "type": "message", + "role": "assistant", + "model": "claude-sonnet-4-20250514", + "content": [ + {"type": "text", "text": "Let me check the weather."}, + {"type": "tool_use", "id": "toolu_01abc", "name": "get_weather", "input": {"city": "Beijing"}} + ], + "stop_reason": "tool_use", + "usage": {"input_tokens": 20, "output_tokens": 15} + })"); + + // Test 3: SSE event parsing + // Anthropic uses event: type + data: json format + std::string eventType = "content_block_delta"; + std::string eventData = R"({"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"Hi"}})"; + + // Test 4: system message extraction + // system should be removed from messages and placed at top level + + std::println("test_anthropic_serialize: ALL PASSED"); + return 0; +} +``` + +**Step 2: Implement anthropic.cppm** + +As designed in section 4.2: +- `extract_system_()` — remove Role::System from messages, return as separate string +- `serialize_messages_()` — only user/assistant roles, content blocks format +- Image serialization: `source.type: "base64"` + `media_type` (differs from OpenAI) +- Tool result as content block in user message (not separate role) +- Auth: `x-api-key` + `anthropic-version` headers +- `max_tokens` always included (required by Anthropic) +- SSE parsing: handle `event:` type lines + content_block_delta events + +**Step 3: Run test** + +Run: `xmake build test_anthropic_serialize && xmake run test_anthropic_serialize` +Expected: `test_anthropic_serialize: ALL PASSED` + +**Step 4: Commit** + +```bash +git add src/anthropic.cppm tests/llmapi/test_anthropic_serialize.cpp +git commit -m "feat(llmapi): add Anthropic provider serialization/deserialization" +``` + +**Acceptance Criteria:** +- [x] System message extracted to top-level field +- [x] Only user/assistant roles in messages +- [x] Image content uses Anthropic format (base64 source) +- [x] Tool use/result as content blocks +- [x] Auth headers: x-api-key + anthropic-version +- [x] max_tokens always present +- [x] SSE event type parsing (content_block_delta, text_delta) + +--- + +### Task 14: OpenAI Provider — Live API Integration + +**Files:** +- Create: `tests/llmapi/test_openai_live.cpp` + +**Step 1: Write the live test** (requires `OPENAI_API_KEY` env var) + +`tests/llmapi/test_openai_live.cpp`: +```cpp +import mcpplibs.llmapi; +import std; + +using namespace mcpplibs::llmapi; + +int main() { + auto apiKey = std::getenv("OPENAI_API_KEY"); + if (!apiKey) { + std::println("OPENAI_API_KEY not set, skipping live test"); + return 0; + } + + auto client = Client(openai::OpenAI({ + .apiKey = apiKey, + .model = "gpt-4o-mini", // cheapest model for testing + })); + + // Test 1: basic chat + auto resp = client.chat("Say exactly: HELLO_TEST_OK"); + std::println("Response: {}", resp.text()); + assert(!resp.text().empty()); + assert(resp.usage.totalTokens > 0); + assert(resp.stopReason == StopReason::EndOfTurn); + + // Test 2: streaming + client.clear(); + std::string streamed; + auto resp2 = client.chat_stream("Say exactly: STREAM_OK", [&](std::string_view chunk) { + streamed += chunk; + std::print("{}", chunk); + }); + std::println(""); + assert(!streamed.empty()); + + // Test 3: conversation continuity + auto resp3 = client.chat("What did I just ask you to say?"); + assert(!resp3.text().empty()); + assert(client.conversation().size() == 4); // 2 turns + + std::println("test_openai_live: ALL PASSED"); + return 0; +} +``` + +**Step 2: Implement full chat/chat_stream in openai.cppm** + +Wire serialization to tinyhttps HttpClient: +- `chat()`: serialize → `http_.send()` → parse response +- `chat_stream()`: serialize with `stream:true` → `http_.send_stream()` → parse SSE → callback + +**Step 3: Run test** + +Run: `OPENAI_API_KEY=sk-xxx xmake run test_openai_live` +Expected: `test_openai_live: ALL PASSED` + +**Step 4: Commit** + +```bash +git add tests/llmapi/test_openai_live.cpp src/openai.cppm +git commit -m "feat(llmapi): OpenAI provider live API integration" +``` + +**Acceptance Criteria:** +- [x] Real API call works (non-streaming) +- [x] Streaming works with callback +- [x] Token usage reported +- [x] Conversation continuity maintained +- [x] Gracefully skips if no API key + +--- + +### Task 15: Anthropic Provider — Live API Integration + +**Files:** +- Create: `tests/llmapi/test_anthropic_live.cpp` + +**Step 1: Write the live test** (requires `ANTHROPIC_API_KEY`) + +`tests/llmapi/test_anthropic_live.cpp`: +```cpp +import mcpplibs.llmapi; +import std; + +using namespace mcpplibs::llmapi; + +int main() { + auto apiKey = std::getenv("ANTHROPIC_API_KEY"); + if (!apiKey) { + std::println("ANTHROPIC_API_KEY not set, skipping live test"); + return 0; + } + + auto client = Client(anthropic::Anthropic({ + .apiKey = apiKey, + .model = "claude-haiku-4-5-20251001", // cheapest for testing + })); + + // Test 1: basic chat + auto resp = client.chat("Say exactly: HELLO_TEST_OK"); + std::println("Response: {}", resp.text()); + assert(!resp.text().empty()); + assert(resp.usage.inputTokens > 0); + + // Test 2: system message + client.clear(); + client.system("Always respond with exactly one word."); + auto resp2 = client.chat("What color is the sky?"); + std::println("System test: {}", resp2.text()); + + // Test 3: streaming + client.clear(); + std::string streamed; + auto resp3 = client.chat_stream("Say exactly: STREAM_OK", [&](std::string_view chunk) { + streamed += chunk; + std::print("{}", chunk); + }); + std::println(""); + assert(!streamed.empty()); + + std::println("test_anthropic_live: ALL PASSED"); + return 0; +} +``` + +**Step 2: Implement full chat/chat_stream in anthropic.cppm** + +Wire serialization to tinyhttps: +- `chat()`: extract system → serialize → `http_.send()` → parse +- `chat_stream()`: serialize with `stream:true` → `http_.send_stream()` → parse Anthropic SSE events +- Handle Anthropic SSE event types: `message_start`, `content_block_delta` (text_delta), `message_stop` + +**Step 3: Run test** + +Run: `ANTHROPIC_API_KEY=sk-xxx xmake run test_anthropic_live` +Expected: `test_anthropic_live: ALL PASSED` + +**Step 4: Commit** + +```bash +git add tests/llmapi/test_anthropic_live.cpp src/anthropic.cppm +git commit -m "feat(llmapi): Anthropic provider live API integration" +``` + +**Acceptance Criteria:** +- [x] Real API call works with Anthropic auth (x-api-key) +- [x] System message sent as top-level field +- [x] Streaming with Anthropic SSE format +- [x] Token usage reported +- [x] Gracefully skips if no API key + +--- + +## Phase 4: Advanced Features + +### Task 16: Tool Calling End-to-End + +**Files:** +- Create: `tests/llmapi/test_tool_calling.cpp` + +**Step 1: Write the test** + +`tests/llmapi/test_tool_calling.cpp`: +```cpp +import mcpplibs.llmapi; +import mcpplibs.llmapi.nlohmann.json; +import std; + +using namespace mcpplibs::llmapi; +using Json = nlohmann::json; + +int main() { + auto apiKey = std::getenv("OPENAI_API_KEY"); + if (!apiKey) { + std::println("OPENAI_API_KEY not set, skipping"); + return 0; + } + + auto client = Client(openai::OpenAI({ + .apiKey = apiKey, + .model = "gpt-4o-mini", + })); + + auto params = ChatParams { + .tools = std::vector{{ + .name = "get_temperature", + .description = "Get the current temperature in a city", + .inputSchema = R"({"type":"object","properties":{"city":{"type":"string"}},"required":["city"]})", + }}, + .toolChoice = ToolChoice::Auto, + }; + + // Ask about weather — model should call the tool + auto resp = client.chat("What's the temperature in Tokyo?", params); + + if (resp.stopReason == StopReason::ToolUse) { + auto calls = resp.tool_calls(); + assert(!calls.empty()); + std::println("Tool called: {} with args: {}", calls[0].name, calls[0].arguments); + assert(calls[0].name == "get_temperature"); + + auto args = Json::parse(calls[0].arguments); + assert(args.contains("city")); + + // Send tool result back + client.add_message(Message { + .role = Role::Tool, + .content = std::vector{ + ToolResultContent { + .toolUseId = calls[0].id, + .content = R"({"temperature": "22°C", "condition": "sunny"})", + }, + }, + }); + + // Get final response + auto finalResp = client.chat("", params); + std::println("Final: {}", finalResp.text()); + assert(!finalResp.text().empty()); + } else { + std::println("Model didn't call tool (non-deterministic), response: {}", + resp.text()); + } + + std::println("test_tool_calling: ALL PASSED"); + return 0; +} +``` + +**Step 2: Verify tool calling works end-to-end** + +Run: `OPENAI_API_KEY=sk-xxx xmake run test_tool_calling` +Expected: `test_tool_calling: ALL PASSED` + +**Step 3: Commit** + +```bash +git add tests/llmapi/test_tool_calling.cpp +git commit -m "test(llmapi): add tool calling end-to-end test" +``` + +**Acceptance Criteria:** +- [x] Tools serialized in request +- [x] Tool calls parsed from response +- [x] Tool results sent back correctly +- [x] Multi-turn tool calling flow works + +--- + +### Task 17: Structured Output + Embeddings + +**Files:** +- Create: `tests/llmapi/test_structured_output.cpp` +- Create: `tests/llmapi/test_embeddings.cpp` + +**Step 1: Write structured output test** + +`tests/llmapi/test_structured_output.cpp`: +```cpp +import mcpplibs.llmapi; +import mcpplibs.llmapi.nlohmann.json; +import std; + +using namespace mcpplibs::llmapi; +using Json = nlohmann::json; + +int main() { + auto apiKey = std::getenv("OPENAI_API_KEY"); + if (!apiKey) { + std::println("OPENAI_API_KEY not set, skipping"); + return 0; + } + + auto client = Client(openai::OpenAI({ + .apiKey = apiKey, + .model = "gpt-4o-mini", + })); + + // Test: JSON object mode + auto resp = client.chat("Generate a JSON object with fields: name (string), age (number)", + ChatParams { + .responseFormat = ResponseFormat { + .type = ResponseFormatType::JsonObject, + }, + }); + + auto json = Json::parse(resp.text()); + assert(json.contains("name")); + assert(json.contains("age")); + std::println("JSON output: {}", resp.text()); + + std::println("test_structured_output: ALL PASSED"); + return 0; +} +``` + +**Step 2: Write embeddings test** + +`tests/llmapi/test_embeddings.cpp`: +```cpp +import mcpplibs.llmapi; +import std; + +using namespace mcpplibs::llmapi; + +int main() { + auto apiKey = std::getenv("OPENAI_API_KEY"); + if (!apiKey) { + std::println("OPENAI_API_KEY not set, skipping"); + return 0; + } + + auto provider = openai::OpenAI({ + .apiKey = apiKey, + .model = "gpt-4o-mini", + }); + + auto resp = provider.embed( + {"Hello world", "How are you"}, + "text-embedding-3-small" + ); + + assert(resp.embeddings.size() == 2); + assert(!resp.embeddings[0].empty()); + assert(resp.usage.inputTokens > 0); + std::println("Embedding dim: {}", resp.embeddings[0].size()); + + std::println("test_embeddings: ALL PASSED"); + return 0; +} +``` + +**Step 3: Implement embeddings in openai.cppm** + +Add `embed()` method: +- POST to `/v1/embeddings` with `{model, input}` +- Parse response: extract embedding vectors + usage + +**Step 4: Run tests** + +Run: `OPENAI_API_KEY=sk-xxx xmake run test_structured_output && xmake run test_embeddings` +Expected: both PASSED + +**Step 5: Commit** + +```bash +git add tests/llmapi/test_structured_output.cpp tests/llmapi/test_embeddings.cpp src/openai.cppm +git commit -m "feat(llmapi): add structured output + embeddings support" +``` + +**Acceptance Criteria:** +- [x] response_format: json_object produces valid JSON +- [x] Embeddings endpoint returns vectors +- [x] Multiple inputs return multiple embeddings +- [x] Usage tokens tracked for embeddings + +--- + +## Phase 5: Build System & Integration + +### Task 18: xmake Refactor — Remove libcurl, Add mbedtls + +**Files:** +- Modify: `xmake.lua` — remove libcurl, add tinyhttps/llmapi targets +- Modify: `examples/xmake.lua` — update deps +- Modify: `examples/basic.cpp` — update to new API +- Modify: `examples/chat.cpp` — update to new API +- Modify: `examples/hello_mcpp.cpp` — update to new API + +**Step 1: Update root xmake.lua** + +```lua +set_languages("c++23") +set_policy("build.c++.modules", true) + +add_requires("mbedtls 3.6.1") + +target("tinyhttps") + set_kind("static") + add_files("src/tinyhttps/*.cppm", { public = true }) + add_packages("mbedtls", { public = true }) + +target("llmapi") + set_kind("static") + add_deps("tinyhttps") + add_files("src/*.cppm", { public = true }) + add_includedirs("src/json") + add_headerfiles("src/json/json.hpp") + add_files("src/json/json.cppm", { public = true }) + +includes("examples") +includes("tests") +``` + +**Step 2: Update examples to new API** + +`examples/hello_mcpp.cpp`: +```cpp +import mcpplibs.llmapi; +import std; + +using namespace mcpplibs::llmapi; + +int main() { + auto client = Client(openai::OpenAI({ + .apiKey = std::getenv("OPENAI_API_KEY"), + .model = "gpt-4o-mini", + })); + + auto resp = client.chat("Hello!"); + std::println("{}", resp.text()); +} +``` + +**Step 3: Build everything** + +Run: `xmake build` +Expected: all targets compile + +**Step 4: Run examples** (with API key) + +Run: `OPENAI_API_KEY=sk-xxx xmake run hello_mcpp` +Expected: prints a greeting + +**Step 5: Commit** + +```bash +git add xmake.lua examples/ src/llmapi.cppm +git commit -m "refactor: replace libcurl with tinyhttps, update build and examples" +``` + +**Acceptance Criteria:** +- [x] libcurl completely removed from build +- [x] mbedtls 3.6.1 linked via xmake package +- [x] All targets build (tinyhttps, llmapi, examples, tests) +- [x] Examples updated to new Client

API +- [x] `xmake build` succeeds clean + +--- + +### Task 19: Error Handling + +**Files:** +- Create: `src/errors.cppm` +- Create: `tests/llmapi/test_errors.cpp` +- Modify: `src/llmapi.cppm` — add `export import :errors;` + +**Step 1: Write the test** + +`tests/llmapi/test_errors.cpp`: +```cpp +import mcpplibs.llmapi; +import std; + +using namespace mcpplibs::llmapi; + +int main() { + // Test 1: invalid API key + auto client = Client(openai::OpenAI({ + .apiKey = "invalid-key", + .model = "gpt-4o-mini", + })); + + try { + client.chat("hello"); + assert(false); + } catch (const ApiError& e) { + std::println("ApiError: status={} type={} msg={}", + e.statusCode, e.type, e.what()); + assert(e.statusCode == 401); + } + + // Test 2: connection error (bad host) + auto client2 = Client(openai::OpenAI({ + .apiKey = "key", + .baseUrl = "https://nonexistent.invalid", + .model = "test", + })); + + try { + client2.chat("hello"); + assert(false); + } catch (const ConnectionError& e) { + std::println("ConnectionError: {}", e.what()); + } + + // Test 3: HttpError hierarchy + try { + throw HttpError(500, "internal error"); + } catch (const std::runtime_error& e) { + // caught as base class + } + + std::println("test_errors: ALL PASSED"); + return 0; +} +``` + +**Step 2: Implement errors.cppm** + +As designed in section 8: `HttpError`, `ApiError`, `ConnectionError`, `TlsError`. + +Integrate into providers: parse error responses, throw appropriate exceptions. + +**Step 3: Run test** + +Run: `xmake build test_errors && xmake run test_errors` +Expected: `test_errors: ALL PASSED` + +**Step 4: Commit** + +```bash +git add src/errors.cppm tests/llmapi/test_errors.cpp +git commit -m "feat(llmapi): add structured error handling with error hierarchy" +``` + +**Acceptance Criteria:** +- [x] ApiError thrown for 4xx/5xx API responses +- [x] ConnectionError thrown for network failures +- [x] Error hierarchy inherits from std::runtime_error +- [x] Error includes status code, type, and message + +--- + +### Task 20: Final Integration Test + Cleanup + +**Files:** +- Create: `tests/test_full_integration.cpp` +- Remove: old `src/openai.cppm` (if any remnants) +- Update: `src/llmapi.cppm` — final module exports + +**Step 1: Write full integration test** + +`tests/test_full_integration.cpp`: +```cpp +import mcpplibs.llmapi; +import std; + +using namespace mcpplibs::llmapi; + +int main() { + // Test 1: compile-time — both providers satisfy concepts + static_assert(Provider); + static_assert(StreamableProvider); + static_assert(EmbeddableProvider); + static_assert(Provider); + static_assert(StreamableProvider); + + // Test 2: type system + auto msg = Message::user("hello"); + ChatParams params { .temperature = 0.7, .maxTokens = 100 }; + Conversation conv; + conv.push(msg); + assert(conv.size() == 1); + + // Test 3: Client compiles with both providers + auto openaiClient = Client(openai::OpenAI({ + .apiKey = "test", + .model = "gpt-4o", + })); + auto anthropicClient = Client(anthropic::Anthropic({ + .apiKey = "test", + .model = "claude-sonnet-4-20250514", + })); + + // Test 4: provider name + assert(openaiClient.provider().name() == "openai"); + assert(anthropicClient.provider().name() == "anthropic"); + + std::println("test_full_integration: ALL PASSED"); + return 0; +} +``` + +**Step 2: Final llmapi.cppm** + +```cpp +export module mcpplibs.llmapi; + +export import :types; +export import :errors; +export import :coro; +export import :provider; +export import :client; +export import :openai; +export import :anthropic; + +import std; + +export namespace mcpplibs::llmapi { + // Convenience aliases + using OpenAI = openai::OpenAI; + using Anthropic = anthropic::Anthropic; + using Json = nlohmann::json; +} +``` + +**Step 3: Build and run all tests** + +Run: +```bash +xmake build +xmake run test_socket +xmake run test_tls +xmake run test_http +xmake run test_sse +xmake run test_http_stream +xmake run test_proxy +xmake run test_integration +xmake run test_types +xmake run test_serialization +xmake run test_coro +xmake run test_client +xmake run test_openai_serialize +xmake run test_anthropic_serialize +xmake run test_errors +xmake run test_full_integration +``` + +Expected: ALL PASSED for all tests + +**Step 4: Commit** + +```bash +git add -A +git commit -m "feat(llmapi): v0.1.0 complete — multi-provider LLM API with tinyhttps" +``` + +**Acceptance Criteria:** +- [x] All 16 test binaries build and pass +- [x] Both providers satisfy concepts at compile time +- [x] Client and Client both compile +- [x] No libcurl dependency remains +- [x] Clean build with `xmake build` +- [x] Examples work with real API keys (manual verification) + +--- + +## Summary + +| Phase | Tasks | Focus | +|-------|-------|-------| +| 1: Foundation | 1-7 | tinyhttps: Socket, TLS, HTTP, SSE, Proxy | +| 2: Core | 8-11 | Types, Serialization, Coroutine, Client

| +| 3: Providers | 12-15 | OpenAI + Anthropic serialization + live tests | +| 4: Features | 16-17 | Tool Calling, Structured Output, Embeddings | +| 5: Integration | 18-20 | Build refactor, Error handling, Final test | + +**Total: 20 tasks, ~16 test programs, estimated ~3000-4000 lines of new code** diff --git a/.agents/skills/README.md b/.agents/skills/README.md new file mode 100644 index 0000000..d21b545 --- /dev/null +++ b/.agents/skills/README.md @@ -0,0 +1,24 @@ +# mcpp-style-ref Agent Skills + +用于指导 Agent 在编写或审查 Modern/Module C++ 代码时遵循 mcpp-style-ref 规范的技能。 + +## 可用技能 + +| 技能 | 说明 | +|------|------| +| [mcpp-style-ref](mcpp-style-ref/SKILL.md) | 面向 mcpp 项目的 Modern/Module C++ (C++23) 命名、模块化与实践规则 | + +## 使用方式 + +要在 Cursor 中使用,请将技能软链接或复制到项目的 `.cursor/skills/`: + +```bash +mkdir -p .cursor/skills +ln -s ../../skills/mcpp-style-ref .cursor/skills/mcpp-style-ref +``` + +或安装为个人技能: + +```bash +ln -s /path/to/mcpp-style-ref/skills/mcpp-style-ref ~/.cursor/skills/mcpp-style-ref +``` diff --git a/.agents/skills/mcpp-style-ref/SKILL.md b/.agents/skills/mcpp-style-ref/SKILL.md new file mode 100644 index 0000000..52b9451 --- /dev/null +++ b/.agents/skills/mcpp-style-ref/SKILL.md @@ -0,0 +1,171 @@ +--- +name: mcpp-style-ref +description: 为 mcpp 项目应用 Modern/Module C++ (C++23) 编码风格。适用于编写或审查带模块的 C++ 代码、命名标识符、组织 .cppm/.cpp 文件,或用户提及 mcpp、module C++、现代 C++ 风格时。 +--- + +# mcpp-style-ref + +mcpp 项目的 Modern/Module C++ 风格参考。C++23,使用 `import std`。 + +## 快速参考 + +### 命名 + +| 种类 | 风格 | 示例 | +|------|------|------| +| 类型/类 | PascalCase(大驼峰) | `StyleRef`, `HttpServer` | +| 对象/成员 | camelCase(小驼峰) | `fileName`, `configText` | +| 函数 | snake_case(下划线) | `load_config_file()`, `parse_()` | +| 私有 | `_` 后缀 | `fileName_`, `parse_()` | +| 常量 | UPPER_SNAKE | `MAX_SIZE`, `DEFAULT_TIMEOUT` | +| 全局 | `g` 前缀 | `gStyleRef` | +| 命名空间 | 全小写 | `mcpplibs`, `mylib` | + +### 模块基础 + +- 使用 `import std` 替代 `#include ` 和 `#include ` +- 使用 `.cppm` 作为模块接口;分离实现时用 `.cpp` +- `export module module_name;` — 模块声明 +- `export import :partition;` — 导出分区 +- `import :partition;` — 内部分区(不导出) + +### 模块结构 + +``` +// .cppm +export module a; + +export import a.b; +export import :a2; // 可导出分区 + +import std; +import :a1; // 内部分区 +``` + +### 模块命名 + +- 模块:`topdir.subdir.filename`(如 `a.b`, `a.c`) +- 分区:`module_name:partition`(如 `a:a1`, `a.b:b1`) +- 用目录路径区分同名:`a/c.cppm` → `a.c`,`b/c.cppm` → `b.c` + +### 类布局 + +```cpp +class StyleRef { +private: + std::string fileName_; // 数据成员带 _ 后缀 + +public: // Big Five + StyleRef() = default; + StyleRef(const StyleRef&) = default; + // ... + +public: // 公有接口 + void load_config_file(std::string fileName); // 函数 snake_case,参数 camelCase + +private: + void parse_(std::string config); // 私有函数以 _ 结尾 +}; +``` + +### 实践规则 + +- **初始化**:用 `{}` — `int n { 42 }`,`std::vector v { 1, 2, 3 }` +- **字符串**:只读参数用 `std::string_view` +- **错误**:用 `std::optional` / `std::expected` 替代 int 错误码 +- **内存**:用 `std::unique_ptr`、`std::shared_ptr`;避免裸 `new`/`delete` +- **RAII**:将资源与对象生命周期绑定 +- **auto**:用于迭代器、lambda、复杂类型;需要明确表达意图时保留显式类型 +- **宏**:优先用 `constexpr`、`inline`、`concept` 替代宏 + +### 接口与实现 + +两种写法均支持。 + +**写法 A:合并** — 接口与实现同在一个 `.cppm` 中: + +```cpp +// mylib.cppm +export module mylib; + +export int add(int a, int b) { + return a + b; +} +``` + +**写法 B:分离** — 接口在 `.cppm`,实现在 `.cpp`(编译期隐藏实现): + +```cpp +// error.cppm(接口) +export module error; + +export struct Error { + void test(); +}; +``` + +```cpp +// error.cpp(实现) +module error; + +import std; + +void Error::test() { + std::println("Hello"); +} +``` + +简单模块用写法 A;需隐藏实现或减少编译依赖时用写法 B。 + +## 项目环境配置 + +安装 xlings 包管理器后,获取 GCC 15 工具链: + +#### Linux/MacOS + +```bash +curl -fsSL https://raw.githubusercontent.com/d2learn/xlings/refs/heads/main/tools/other/quick_install.sh | bash +``` + +#### Windows - PowerShell + +```bash +irm https://raw.githubusercontent.com/d2learn/xlings/refs/heads/main/tools/other/quick_install.ps1 | iex +``` + +然后安装工具链(仅linux, 其中windows默认用msvc): + +```bash +xlings install gcc@15 -y +``` + +> xlings详细信息可参考 [xlings](https://github.com/d2learn/xlings) 文档。 + +## 示例项目创建 + +参考本仓库 `src/` 目录结构: + +- `xmake.lua`:配置 `set_languages("c++23")`、`set_policy("build.c++.modules", true)` +- `add_files("main.cpp")`、`add_files("**.cppm")` 添加源文件 +- 可执行目标与静态库目标分离(如 `mcpp-style-ref` 主程序、`error` 静态库) + +构建: + +```bash +xmake build +xmake run +``` + +## 适用场景 + +- 编写新的 C++ 模块代码(`.cppm`、`.cpp`) +- 审查或重构 mcpp 项目中的 C++ 代码 +- 用户询问「mcpp 风格」「module C++ 风格」或「现代 C++ 惯例」 + +## 更多资源 + +- 完整参考:[reference.md](reference.md) +- mcpp-style-ref 仓库:[github.com/mcpp-community/mcpp-style-ref](https://github.com/mcpp-community/mcpp-style-ref) + - 项目说明:[../../README.md](../../README.md) + - 示例项目:[src/](../../../src) +- xlings 包管理器:[github.com/d2learn/xlings](https://github.com/d2learn/xlings) diff --git a/.agents/skills/mcpp-style-ref/reference.md b/.agents/skills/mcpp-style-ref/reference.md new file mode 100644 index 0000000..1fd0007 --- /dev/null +++ b/.agents/skills/mcpp-style-ref/reference.md @@ -0,0 +1,187 @@ +# mcpp-style-ref 参考 + +来自 [mcpp-style-ref](https://github.com/mcpp-community/mcpp-style-ref) 的详细风格规则。 + +## 一、标识符命名 + +### 1.0 类型 — PascalCase(大驼峰) + +```cpp +struct StyleRef { + using FileNameType = std::string; +}; +``` + +### 1.1 对象/数据成员 — camelCase(小驼峰) + +```cpp +struct StyleRef { + std::string fileName; +}; +StyleRef mcppStyle; +``` + +### 1.2 函数 — snake_case(下划线) + +```cpp +void load_config_file(const std::string& fileName); +void parse_(); +int max_retry_count(); +``` + +### 1.3 私有 — `_` 后缀 + +私有的数据成员和函数使用 `_` 后缀: + +```cpp +private: + std::string fileName_; + void parse_(const std::string& config); +``` + +### 1.4 空格 + +运算符两侧加空格以增强可读性:`T x { ... }`、`int n { 42 }`。 + +### 1.5 其他 + +- 常量:`MAX_SIZE`、`DEFAULT_TIMEOUT` +- 全局:`gStyleRef`、`g_debug` +- 模板命名:遵循类/函数命名风格 + +--- + +## 二、模块化 + +### 模块文件结构 + +```cpp +module; // 可选的全局模块片段 +#include // 需要传统头文件时 + +export module module_name; +// export import :partition; +// import :partition; + +import std; +import xxx; + +export int add(int a, int b) { + return a + b; +} +``` + +### .cppm 与 .h/.hpp + +使用 `.cppm` 作为模块接口。用 `export` 关键字导出: + +```cpp +export module mcpplibs; + +export int add(int a, int b) { + return a + b; +} +``` + +### 接口与实现 + +合并(全部在 .cppm)与分离(.cppm + .cpp)均有效。 + +**合并于 .cppm** — 见上方「.cppm 与 .h/.hpp」:导出与实现在同一文件。 + +**方式一:命名空间隔离** + +```cpp +export module mcpplibs; + +namespace mcpplibs_impl { + int add(int a, int b) { return a + b; } +} + +export namespace mcpplibs { + using mcpplibs_impl::add; +}; +``` + +**方式二:分离(.cppm + .cpp)** + +- `.cppm`:仅接口 — `export module error;` + `export struct Error { void test(); };` +- `.cpp`:实现 — `module error;` + 函数体 + +简单模块用合并;需隐藏实现或减少编译依赖时用分离。 + +### 多文件模块 + +``` +a/ +├── a1.cppm # module a:a1(内部分区) +├── a2.cppm # export module a:a2 +├── b/ +│ ├── b1.cppm # export module a.b:b1 +│ └── b2.cppm # export module a.b:b2 +├── b.cppm # export module a.b +└── c.cppm # module a.c +a.cppm # export module a +``` + +- **可导出分区**:`export module a:a2;` — 可被重新导出 +- **内部分区**:`module a:a1;` — 不导出,仅模块内部使用 + +```cpp +// a.cppm +export module a; +export import :a2; +import :a1; +``` + +### 向前兼容 + +将传统 C/C++ 头文件封装到兼容模块中: + +```cpp +module; + +#include +// ... + +export module lua; + +export namespace lua { + using lua_State = ::lua_State; + // ... +} +``` + +### 其他 + +- 优先用 `constexpr` 替代宏 +- 模板的静态成员:使用 `inline static`(C++17)确保单一定义 + +--- + +## 三、实践参考 + +### auto + +用于迭代器、lambda、复杂类型。显式类型更清晰时避免使用。 + +### 花括号初始化 + +`int n { 42 }`、`std::vector v { 1, 2, 3 }`、`Point p { 10, 20 }`。 + +### 智能指针 + +`std::make_unique`、`std::make_shared`;避免裸 `new`/`delete`。 + +### string_view + +用于只读字符串参数。不拥有数据,调用方需保证底层数据有效。 + +### optional / expected + +- `std::optional`:可有可无的值 +- `std::expected`(C++23):成功返回值或错误 + +### RAII + +将资源与对象生命周期绑定。使用 `std::fstream`、`std::lock_guard` 等。 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..0e38a2a --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,160 @@ +name: CI + +on: + push: + branches: [main] + pull_request: + branches: [main] + +env: + XLINGS_VERSION: v0.4.0 + +jobs: + build-linux: + runs-on: ubuntu-24.04 + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Install system deps + run: | + sudo apt-get update -qq + sudo apt-get install -y curl git build-essential + + - name: Install Xlings + env: + XLINGS_NON_INTERACTIVE: 1 + run: | + VERSION_NUM="${XLINGS_VERSION#v}" + TARBALL="xlings-${VERSION_NUM}-linux-x86_64.tar.gz" + curl -fSL -o "$RUNNER_TEMP/$TARBALL" "https://github.com/d2learn/xlings/releases/download/${XLINGS_VERSION}/${TARBALL}" + tar -xzf "$RUNNER_TEMP/$TARBALL" -C "$RUNNER_TEMP" + EXTRACT_DIR=$(find "$RUNNER_TEMP" -maxdepth 1 -type d -name "xlings-*" | head -1) + chmod +x "$EXTRACT_DIR/bin/xlings" + "$EXTRACT_DIR/bin/xlings" self install + echo "PATH=$HOME/.xlings/subos/current/bin:$PATH" >> "$GITHUB_ENV" + + - name: Install project dependencies via Xlings + run: | + xlings install + xmake --version + gcc --version + + - name: Build with xmake + run: | + xmake f -m release -y -vvD + xmake -a -j"$(nproc)" + + - name: Run tests + run: | + xmake run test_socket -y + xmake run test_tls -y + xmake run test_http -y + xmake run test_sse -y + xmake run test_http_stream -y + xmake run test_proxy -y + xmake run test_integration -y + xmake run test_types -y + xmake run test_serialization -y + xmake run test_coro -y + xmake run test_client -y + xmake run test_openai_serialize -y + xmake run test_anthropic_serialize -y + xmake run test_tool_calling -y + xmake run test_structured_output -y + xmake run test_embeddings -y + xmake run test_llmapi_integration -y + + build-macos: + runs-on: macos-15 + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Install Xlings + env: + XLINGS_NON_INTERACTIVE: 1 + run: | + VERSION_NUM="${XLINGS_VERSION#v}" + TARBALL="xlings-${VERSION_NUM}-macosx-arm64.tar.gz" + curl -fSL -o "$RUNNER_TEMP/$TARBALL" "https://github.com/d2learn/xlings/releases/download/${XLINGS_VERSION}/${TARBALL}" + tar -xzf "$RUNNER_TEMP/$TARBALL" -C "$RUNNER_TEMP" + EXTRACT_DIR=$(find "$RUNNER_TEMP" -maxdepth 1 -type d -name "xlings-*" | head -1) + xattr -dr com.apple.quarantine "$EXTRACT_DIR" 2>/dev/null || true + chmod +x "$EXTRACT_DIR/bin/xlings" + "$EXTRACT_DIR/bin/xlings" self install + echo "PATH=$HOME/.xlings/subos/current/bin:$PATH" >> "$GITHUB_ENV" + + - name: Install project dependencies via Xlings + run: | + xlings install + clang --version + + - name: Configure xmake + run: | + LLVM_ROOT="$HOME/.xlings/data/xpkgs/xim-x-llvm" + LLVM_SDK=$(find "$LLVM_ROOT" -mindepth 1 -maxdepth 1 -type d | sort -V | tail -1) + test -d "$LLVM_SDK" + "$LLVM_SDK/bin/clang++" --version + xmake f -m release --toolchain=llvm --sdk="$LLVM_SDK" -y -vvD + + - name: Build with xmake + run: xmake -a -j"$(sysctl -n hw.logicalcpu)" + + - name: Run tests + run: | + xmake run test_socket -y + xmake run test_tls -y + xmake run test_http -y + xmake run test_sse -y + xmake run test_http_stream -y + xmake run test_proxy -y + xmake run test_integration -y + xmake run test_types -y + xmake run test_serialization -y + xmake run test_coro -y + xmake run test_client -y + xmake run test_openai_serialize -y + xmake run test_anthropic_serialize -y + xmake run test_tool_calling -y + xmake run test_structured_output -y + xmake run test_embeddings -y + xmake run test_llmapi_integration -y + + build-windows: + runs-on: windows-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup xmake + uses: xmake-io/github-action-setup-xmake@v1 + with: + xmake-version: latest + + - name: Build with xmake + shell: pwsh + run: | + xmake f -m release -y -vvD + xmake -a -j$env:NUMBER_OF_PROCESSORS + + - name: Run tests + shell: pwsh + run: | + xmake run test_socket -y + xmake run test_tls -y + xmake run test_http -y + xmake run test_sse -y + xmake run test_http_stream -y + xmake run test_proxy -y + xmake run test_integration -y + xmake run test_types -y + xmake run test_serialization -y + xmake run test_coro -y + xmake run test_client -y + xmake run test_openai_serialize -y + xmake run test_anthropic_serialize -y + xmake run test_tool_calling -y + xmake run test_structured_output -y + xmake run test_embeddings -y + xmake run test_llmapi_integration -y diff --git a/.gitignore b/.gitignore index effd0db..66c473c 100644 --- a/.gitignore +++ b/.gitignore @@ -58,3 +58,7 @@ Thumbs.db # Logs *.log + +# Xlings + +.xlings diff --git a/.xlings.json b/.xlings.json new file mode 100644 index 0000000..380bca6 --- /dev/null +++ b/.xlings.json @@ -0,0 +1,9 @@ +{ + "workspace": { + "cmake": "4.0.2", + "ninja": "1.12.1", + "xmake": "3.0.7", + "gcc": { "linux": "15.1.0" }, + "llvm": { "macosx": "20.1.7" } + } +} \ No newline at end of file diff --git a/examples/basic.cpp b/examples/basic.cpp index ce51416..2e9282a 100644 --- a/examples/basic.cpp +++ b/examples/basic.cpp @@ -1,18 +1,21 @@ // Basic usage example - demonstrates both streaming and non-streaming modes -import std; import mcpplibs.llmapi; +import std; -using namespace mcpplibs; +using namespace mcpplibs::llmapi; int main() { - auto api_key = std::getenv("OPENAI_API_KEY"); - if (!api_key) { + auto apiKey = std::getenv("OPENAI_API_KEY"); + if (!apiKey) { std::println("Error: OPENAI_API_KEY not set"); return 1; } - llmapi::Client client(api_key, llmapi::URL::Poe); - client.model("gpt-5").system("You are a helpful assistant."); + auto client = Client(openai::OpenAI({ + .apiKey = apiKey, + .model = "gpt-4o-mini", + })); + client.system("You are a helpful assistant."); std::println("=== llmapi Basic Usage Demo ===\n"); @@ -20,44 +23,24 @@ int main() { // Example 1: Non-streaming request std::println("[Example 1] Non-streaming mode:"); std::println("Question: What is the capital of China?\n"); - - client.user("What is the capital of China?"); - client.request(); - - std::println("Answer: {}\n", client.getAnswer()); + + auto resp = client.chat("What is the capital of China?"); + std::println("Answer: {}\n", resp.text()); // Example 2: Streaming request std::println("[Example 2] Streaming mode:"); std::println("Question: Convince me to use modern C++ (100 words)\n"); - client.user("Convince me to use modern C++ (100 words)"); + client.clear(); + client.system("You are a helpful assistant."); std::print("Answer: "); - - client.request([](std::string_view chunk) { - std::print("{}", chunk); - std::cout.flush(); - }); - + auto resp2 = client.chat_stream("Convince me to use modern C++ (100 words)", + [](std::string_view chunk) { + std::print("{}", chunk); + }); std::println("\n"); + std::println("[Verification] Answer length: {} chars\n", resp2.text().size()); - // Verify auto-save: get the last answer - auto last_answer = client.getAnswer(); - std::println("[Verification] Last answer length: {} chars\n", last_answer.size()); - - // Example 3: Translate the story to Chinese - std::println("[Example 3] Translation (streaming):"); - std::println("Question: 请把上个回答翻译成中文。\n"); - - client.user("请把上面的故事翻译成中文。"); - std::print("Answer: "); - - client.request([](std::string_view chunk) { - std::print("{}", chunk); - std::cout.flush(); - }); - - std::println("\n"); - } catch (const std::exception& e) { std::println("\nError: {}\n", e.what()); return 1; @@ -65,4 +48,4 @@ int main() { std::println("=== Demo Complete ==="); return 0; -} \ No newline at end of file +} diff --git a/examples/chat.cpp b/examples/chat.cpp index 59a4d47..9081f12 100644 --- a/examples/chat.cpp +++ b/examples/chat.cpp @@ -1,18 +1,21 @@ // Simple and elegant AI chat CLI tool using streaming -import std; import mcpplibs.llmapi; +import std; -using namespace mcpplibs; +using namespace mcpplibs::llmapi; int main() { - auto api_key = std::getenv("OPENAI_API_KEY"); - if (!api_key) { + auto apiKey = std::getenv("OPENAI_API_KEY"); + if (!apiKey) { std::println("Error: OPENAI_API_KEY not set"); return 1; } - llmapi::Client client(api_key, llmapi::URL::Poe); - client.model("gpt-5").system("You are a helpful assistant."); + auto client = Client(openai::OpenAI({ + .apiKey = apiKey, + .model = "gpt-4o-mini", + })); + client.system("You are a helpful assistant."); std::println("AI Chat CLI - Type 'quit' to exit\n"); @@ -29,20 +32,16 @@ int main() { if (input.empty()) continue; try { - client.user(input); std::print("\nAI: "); - - client.request([](std::string_view chunk) { + client.chat_stream(input, [](std::string_view chunk) { std::print("{}", chunk); - std::cout.flush(); }); - std::println("\n"); - + } catch (const std::exception& e) { std::println("\nError: {}\n", e.what()); } } return 0; -} \ No newline at end of file +} diff --git a/examples/hello_mcpp.cpp b/examples/hello_mcpp.cpp index 291f2de..33462f2 100644 --- a/examples/hello_mcpp.cpp +++ b/examples/hello_mcpp.cpp @@ -1,19 +1,23 @@ // Minimal example - simplest way to use llmapi -import std; import mcpplibs.llmapi; +import std; + +using namespace mcpplibs::llmapi; int main() { - using namespace mcpplibs; - - llmapi::Client client(std::getenv("OPENAI_API_KEY"), llmapi::URL::Poe); + auto apiKey = std::getenv("OPENAI_API_KEY"); + if (!apiKey) { + std::println("Error: OPENAI_API_KEY not set"); + return 1; + } + + auto client = Client(openai::OpenAI({ + .apiKey = apiKey, + .model = "gpt-4o-mini", + })); - client.model("gpt-5") - .system("You are a helpful assistant.") - .user("In one sentence, introduce modern C++. 并给出中文翻译") - .request([](std::string_view chunk) { - std::print("{}", chunk); - std::cout.flush(); - }); + auto resp = client.chat("Hello! In one sentence, introduce modern C++."); + std::println("{}", resp.text()); return 0; -} \ No newline at end of file +} diff --git a/src/client.cppm b/src/client.cppm new file mode 100644 index 0000000..c46e4ed --- /dev/null +++ b/src/client.cppm @@ -0,0 +1,110 @@ +export module mcpplibs.llmapi:client; + +import :types; +import :provider; +import :coro; +import std; + +export namespace mcpplibs::llmapi { + +template +class Client { +private: + P provider_; + Conversation conversation_; + ChatParams defaultParams_; + +public: + explicit Client(P provider) : provider_(std::move(provider)) {} + + // Config (chainable) + Client& default_params(ChatParams params) { + defaultParams_ = std::move(params); + return *this; + } + + // Message management + Client& system(std::string_view content) { + conversation_.push(Message::system(content)); + return *this; + } + Client& user(std::string_view content) { + conversation_.push(Message::user(content)); + return *this; + } + Client& add_message(Message msg) { + conversation_.push(std::move(msg)); + return *this; + } + Client& clear() { + conversation_.clear(); + return *this; + } + + // Sync chat + ChatResponse chat(std::string_view userMessage) { + conversation_.push(Message::user(userMessage)); + auto response = provider_.chat(conversation_.messages, defaultParams_); + conversation_.push(Message::assistant(response.text())); + return response; + } + ChatResponse chat(std::string_view userMessage, ChatParams params) { + conversation_.push(Message::user(userMessage)); + auto response = provider_.chat(conversation_.messages, params); + conversation_.push(Message::assistant(response.text())); + return response; + } + + // Async chat (synchronous implementation wrapped as Task to avoid GCC coroutine-in-template issues) + Task chat_async(std::string_view userMessage) { + conversation_.push(Message::user(userMessage)); + auto response = provider_.chat(conversation_.messages, defaultParams_); + conversation_.push(Message::assistant(response.text())); + co_return response; + } + + // Streaming (requires StreamableProvider) + ChatResponse chat_stream(std::string_view userMessage, + std::function callback) + requires StreamableProvider

+ { + conversation_.push(Message::user(userMessage)); + auto response = provider_.chat_stream(conversation_.messages, defaultParams_, std::move(callback)); + conversation_.push(Message::assistant(response.text())); + return response; + } + + Task chat_stream_async(std::string_view userMessage, + std::function callback) + requires StreamableProvider

+ { + conversation_.push(Message::user(userMessage)); + auto response = provider_.chat_stream(conversation_.messages, defaultParams_, std::move(callback)); + conversation_.push(Message::assistant(response.text())); + co_return response; + } + + // Embeddings (requires EmbeddableProvider) + EmbeddingResponse embed(const std::vector& inputs, std::string_view model) + requires EmbeddableProvider

+ { + return provider_.embed(inputs, model); + } + + // Conversation access + const Conversation& conversation() const { return conversation_; } + Conversation& conversation() { return conversation_; } + + void save_conversation(std::string_view filePath) const { + conversation_.save(filePath); + } + void load_conversation(std::string_view filePath) { + conversation_ = Conversation::load(filePath); + } + + // Provider access + const P& provider() const { return provider_; } + P& provider() { return provider_; } +}; + +} // namespace mcpplibs::llmapi diff --git a/src/coro.cppm b/src/coro.cppm new file mode 100644 index 0000000..2a7ffcf --- /dev/null +++ b/src/coro.cppm @@ -0,0 +1,111 @@ +export module mcpplibs.llmapi:coro; + +import std; + +export namespace mcpplibs::llmapi { + +template +class Task { +public: + struct promise_type { + std::optional value; + std::exception_ptr exception; + + Task get_return_object() { + return Task{std::coroutine_handle::from_promise(*this)}; + } + std::suspend_always initial_suspend() noexcept { return {}; } + std::suspend_always final_suspend() noexcept { return {}; } + void return_value(T val) { value = std::move(val); } + void unhandled_exception() { exception = std::current_exception(); } + }; + +private: + std::coroutine_handle handle_; + +public: + explicit Task(std::coroutine_handle h) : handle_(h) {} + ~Task() { if (handle_) handle_.destroy(); } + + // Move only + Task(Task&& other) noexcept : handle_(std::exchange(other.handle_, {})) {} + Task& operator=(Task&& other) noexcept { + if (this != &other) { + if (handle_) handle_.destroy(); + handle_ = std::exchange(other.handle_, {}); + } + return *this; + } + Task(const Task&) = delete; + Task& operator=(const Task&) = delete; + + // Awaitable + bool await_ready() const noexcept { return handle_.done(); } + void await_suspend(std::coroutine_handle<> awaiter) noexcept { + handle_.resume(); + awaiter.resume(); + } + T await_resume() { + if (handle_.promise().exception) + std::rethrow_exception(handle_.promise().exception); + return std::move(*handle_.promise().value); + } + + // Sync get + T get() { + if (!handle_.done()) handle_.resume(); + if (handle_.promise().exception) + std::rethrow_exception(handle_.promise().exception); + return std::move(*handle_.promise().value); + } +}; + +// Task specialization +template<> +class Task { +public: + struct promise_type { + std::exception_ptr exception; + Task get_return_object() { + return Task{std::coroutine_handle::from_promise(*this)}; + } + std::suspend_always initial_suspend() noexcept { return {}; } + std::suspend_always final_suspend() noexcept { return {}; } + void return_void() noexcept {} + void unhandled_exception() { exception = std::current_exception(); } + }; + +private: + std::coroutine_handle handle_; + +public: + explicit Task(std::coroutine_handle h) : handle_(h) {} + ~Task() { if (handle_) handle_.destroy(); } + Task(Task&& other) noexcept : handle_(std::exchange(other.handle_, {})) {} + Task& operator=(Task&& other) noexcept { + if (this != &other) { + if (handle_) handle_.destroy(); + handle_ = std::exchange(other.handle_, {}); + } + return *this; + } + Task(const Task&) = delete; + Task& operator=(const Task&) = delete; + + bool await_ready() const noexcept { return handle_.done(); } + void await_suspend(std::coroutine_handle<> awaiter) noexcept { + handle_.resume(); + awaiter.resume(); + } + void await_resume() { + if (handle_.promise().exception) + std::rethrow_exception(handle_.promise().exception); + } + void get() { + if (!handle_.done()) handle_.resume(); + if (handle_.promise().exception) + std::rethrow_exception(handle_.promise().exception); + } +}; + +} // namespace mcpplibs::llmapi diff --git a/src/errors.cppm b/src/errors.cppm new file mode 100644 index 0000000..0c0b1ee --- /dev/null +++ b/src/errors.cppm @@ -0,0 +1,28 @@ +export module mcpplibs.llmapi:errors; + +import std; + +export namespace mcpplibs::llmapi { + +// Base API error with HTTP status and structured error info +class ApiError : public std::runtime_error { +public: + int statusCode; + std::string type; + std::string body; + + ApiError(int status, std::string errorType, std::string errorBody, const std::string& message) + : std::runtime_error(message) + , statusCode(status) + , type(std::move(errorType)) + , body(std::move(errorBody)) + {} +}; + +// Network/connection errors (DNS, TLS, timeout) +class ConnectionError : public std::runtime_error { +public: + using std::runtime_error::runtime_error; +}; + +} // namespace mcpplibs::llmapi diff --git a/src/llmapi.cppm b/src/llmapi.cppm index 9da4092..64d642b 100644 --- a/src/llmapi.cppm +++ b/src/llmapi.cppm @@ -1,7 +1,13 @@ export module mcpplibs.llmapi; +export import :types; export import :url; +export import :coro; +export import :provider; +export import :client; export import :openai; +export import :anthropic; +export import :errors; import std; @@ -9,7 +15,6 @@ import mcpplibs.llmapi.nlohmann.json; namespace mcpplibs::llmapi { export using OpenAI = openai::OpenAI; - export using Client = openai::OpenAI; export using URL = llmapi::URL; export using Json = nlohmann::json; } // namespace mcpplibs::llmapi \ No newline at end of file diff --git a/src/openai.cppm b/src/openai.cppm deleted file mode 100644 index 66240d5..0000000 --- a/src/openai.cppm +++ /dev/null @@ -1,323 +0,0 @@ -module; - -#include - -export module mcpplibs.llmapi:openai; - -export import :url; - -import std; - -import mcpplibs.llmapi.nlohmann.json; - -export namespace mcpplibs::llmapi::openai { - -using Json = nlohmann::json; - -// Concept to constrain callback type -template -concept StreamCallback = std::invocable && - std::same_as, void>; - -class OpenAI { - std::string mApiKey; - std::string mBaseUrl; - std::string mModel; - std::string mEndpoint; - Json mMessages; - -public: - OpenAI(std::string_view apiKey, std::string_view baseUrl = llmapi::URL::OpenAI) - : mApiKey(apiKey), - mBaseUrl(baseUrl), - mMessages(Json::array()) - { - if (mApiKey.empty()) { - throw std::runtime_error("API key cannot be empty"); - } - } - - // add safe check for const char* overload - example: std::getenv("KEY") - OpenAI(const char* apiKey, std::string_view baseUrl = llmapi::URL::OpenAI) - : OpenAI(std::string_view(apiKey ? apiKey : ""), baseUrl) { } - - // Rule of five - explicitly defaulted - OpenAI(const OpenAI&) = default; - OpenAI(OpenAI&&) = default; - OpenAI& operator=(const OpenAI&) = default; - OpenAI& operator=(OpenAI&&) = default; - ~OpenAI() = default; - -public: // config methods (chainable) - - OpenAI& model(std::string_view model) { - mEndpoint = mBaseUrl + "/chat/completions"; - mModel = model; - return *this; - } - -public: // Message methods - - // Add messages - OpenAI& add_message(std::string_view role, std::string_view content) { - mMessages.push_back({ - {"role", role}, - {"content", content} - }); - return *this; - } - - OpenAI& user(std::string_view content) { - return add_message("user", content); - } - - OpenAI& system(std::string_view content) { - return add_message("system", content); - } - - OpenAI& assistant(std::string_view content) { - return add_message("assistant", content); - } - - // Clear conversation history - OpenAI& clear() { - mMessages = Json::array(); - return *this; - } - -public: - - // Getters - std::string_view getApiKey() const { return mApiKey; } - std::string_view getBaseUrl() const { return mBaseUrl; } - std::string_view getModel() const { return mModel; } - - Json getMessages() const { return mMessages; } - int getMessageCount() const { return static_cast(mMessages.size()) / 2; } - - std::string getAnswer() const { - if (mMessages.empty()) return ""; - const auto& lastMessage = mMessages.back(); - if (lastMessage.contains("role") && lastMessage["role"] == "assistant" && - lastMessage.contains("content")) { - return lastMessage["content"].get(); - } - return ""; - } - -public: // Request methods - - // Execute request (non-streaming) - auto saves assistant reply - Json request() { - validate_request(); - auto response = send_request(build_payload(mMessages, false)); - - // Auto-save assistant reply to conversation history - if (response.contains("choices") && !response["choices"].empty()) { - auto& choice = response["choices"][0]; - if (choice.contains("message") && choice["message"].contains("content")) { - std::string content = choice["message"]["content"]; - assistant(content); - } - } - - return response; - } - - // One-shot request without building conversation (non-streaming) - Json request(const Json& messages) { - validate_request(); - return send_request(build_payload(messages, false)); - } - - // Execute request with callback (streaming) - auto saves assistant reply - template - void request(Callback&& callback) { - validate_request(); - - // Wrapper to collect full response - std::string full_response; - auto wrapper_callback = [&full_response, &callback](std::string_view chunk) { - full_response += chunk; - callback(chunk); - }; - - send_stream_request(build_payload(mMessages, true), wrapper_callback); - - // Auto-save assistant reply to conversation history - if (!full_response.empty()) { - assistant(full_response); - } - } - -private: - struct StreamContext { - std::function callback; - std::string buffer; - }; - - // Validate request preconditions - void validate_request() const { - if (mEndpoint.empty()) { - throw std::runtime_error("Endpoint not set. Call model() first."); - } - if (mModel.empty()) { - throw std::runtime_error("Model not set."); - } - } - - // Build request payload - Json build_payload(const Json& messages, bool stream) const { - Json payload; - payload["model"] = mModel; - payload["messages"] = messages; - if (stream) { - payload["stream"] = true; - } - return payload; - } - - // Setup common CURL headers - struct curl_slist* setup_headers() const { - struct curl_slist* headers = nullptr; - headers = curl_slist_append(headers, "Content-Type: application/json"); - std::string authHeader = "Authorization: Bearer " + mApiKey; - headers = curl_slist_append(headers, authHeader.c_str()); - return headers; - } - - Json send_request(const Json& payload) { - std::string payloadStr = payload.dump(); - std::string response; - - CURL* curl = curl_easy_init(); - if (!curl) { - throw std::runtime_error("Failed to initialize CURL"); - } - - // Set up headers - struct curl_slist* headers = setup_headers(); - - // Set CURL options - curl_easy_setopt(curl, CURLOPT_URL, mEndpoint.c_str()); - curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); - curl_easy_setopt(curl, CURLOPT_POSTFIELDS, payloadStr.c_str()); - curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, writeCallback); - curl_easy_setopt(curl, CURLOPT_WRITEDATA, &response); - - // Perform the request - CURLcode res = curl_easy_perform(curl); - - // Cleanup - curl_slist_free_all(headers); - curl_easy_cleanup(curl); - - if (res != CURLE_OK) { - throw std::runtime_error(std::string("CURL error: ") + curl_easy_strerror(res)); - } - - return Json::parse(response); - } - - template - void send_stream_request(const Json& payload, Callback&& callback) { - std::string payloadStr = payload.dump(); - - CURL* curl = curl_easy_init(); - if (!curl) { - throw std::runtime_error("Failed to initialize CURL"); - } - - StreamContext context; - context.callback = std::forward(callback); - - // Set up headers - struct curl_slist* headers = setup_headers(); - - // Set CURL options - curl_easy_setopt(curl, CURLOPT_URL, mEndpoint.c_str()); - curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); - curl_easy_setopt(curl, CURLOPT_POSTFIELDS, payloadStr.c_str()); - curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, streamCallback); - curl_easy_setopt(curl, CURLOPT_WRITEDATA, &context); - - // Perform the request - CURLcode res = curl_easy_perform(curl); - - // Cleanup - curl_slist_free_all(headers); - curl_easy_cleanup(curl); - - if (res != CURLE_OK) { - throw std::runtime_error(std::string("CURL error: ") + curl_easy_strerror(res)); - } - } - - static size_t writeCallback(void* contents, size_t size, size_t nmemb, void* userp) { - size_t totalSize = size * nmemb; - std::string* response = static_cast(userp); - response->append(static_cast(contents), totalSize); - return totalSize; - } - - static size_t streamCallback(void* contents, size_t size, size_t nmemb, void* userp) { - size_t totalSize = size * nmemb; - StreamContext* context = static_cast(userp); - - std::string_view data(static_cast(contents), totalSize); - context->buffer.append(data); - - // Process SSE data line by line - size_t pos = 0; - while ((pos = context->buffer.find('\n')) != std::string::npos) { - std::string line = context->buffer.substr(0, pos); - context->buffer.erase(0, pos + 1); - - // Remove \r if present - if (!line.empty() && line.back() == '\r') { - line.pop_back(); - } - - // Skip empty lines - if (line.empty()) { - continue; - } - - // Check for data: prefix - if (line.starts_with("data: ")) { - std::string jsonStr = line.substr(6); - - // Check for [DONE] message - if (jsonStr == "[DONE]") { - continue; - } - - try { - auto chunk = Json::parse(jsonStr); - - // Extract content from the chunk - if (chunk.contains("choices") && !chunk["choices"].empty()) { - auto& choice = chunk["choices"][0]; - - // For chat completions streaming - if (choice.contains("delta") && choice["delta"].contains("content")) { - std::string content = choice["delta"]["content"]; - context->callback(content); - } - // For responses endpoint streaming - else if (choice.contains("message") && choice["message"].contains("content")) { - std::string content = choice["message"]["content"]; - context->callback(content); - } - } - } catch (const Json::exception& e) { - // Silently ignore JSON parsing errors in streaming - } - } - } - - return totalSize; - } -}; - -} // namespace mcpplibs::openai \ No newline at end of file diff --git a/src/providers/anthropic.cppm b/src/providers/anthropic.cppm new file mode 100644 index 0000000..cb64bae --- /dev/null +++ b/src/providers/anthropic.cppm @@ -0,0 +1,473 @@ +module; + +#include + +export module mcpplibs.llmapi:anthropic; + +export import :url; + +import :types; +import :coro; +import mcpplibs.tinyhttps; +import mcpplibs.llmapi.nlohmann.json; +import std; + +export namespace mcpplibs::llmapi::anthropic { + +using Json = nlohmann::json; + +struct Config { + std::string apiKey; + std::string baseUrl { "https://api.anthropic.com/v1" }; + std::string model; + std::string version { "2023-06-01" }; // anthropic-version header + int defaultMaxTokens { 4096 }; // REQUIRED by Anthropic + std::optional proxy; + std::map customHeaders; +}; + +class Anthropic { +private: + Config config_; + tinyhttps::HttpClient http_; + +public: + explicit Anthropic(Config config) + : config_(std::move(config)) + , http_(tinyhttps::HttpClientConfig { + .proxy = config_.proxy, + .keepAlive = true, + }) + { + } + + // Non-copyable (HttpClient owns TLS connections) + Anthropic(const Anthropic&) = delete; + Anthropic& operator=(const Anthropic&) = delete; + Anthropic(Anthropic&&) = default; + Anthropic& operator=(Anthropic&&) = default; + + // Provider concept + std::string_view name() const { return "anthropic"; } + + ChatResponse chat(const std::vector& messages, const ChatParams& params) { + auto payload = build_payload_(messages, params, false); + auto request = build_request_("/messages", payload); + auto response = http_.send(request); + if (!response.ok()) { + throw std::runtime_error("Anthropic API error: " + + std::to_string(response.statusCode) + " " + response.body); + } + return parse_response_(Json::parse(response.body)); + } + + Task chat_async(const std::vector& messages, const ChatParams& params) { + co_return chat(messages, params); + } + + // StreamableProvider + ChatResponse chat_stream(const std::vector& messages, const ChatParams& params, + std::function callback) { + auto payload = build_payload_(messages, params, true); + auto request = build_request_("/messages", payload); + + ChatResponse result; + std::string fullContent; + std::string currentToolId; + std::string currentToolName; + std::string currentToolArgs; + bool inToolCall = false; + + auto sseResponse = http_.send_stream(request, [&](const tinyhttps::SseEvent& event) -> bool { + // Anthropic uses named events + if (event.event == "message_stop") { + return false; + } + if (event.event == "ping") { + return true; + } + + try { + auto chunk = Json::parse(event.data); + + if (event.event == "message_start") { + if (chunk.contains("message")) { + const auto& msg = chunk["message"]; + if (msg.contains("id")) { + result.id = msg["id"].get(); + } + if (msg.contains("model")) { + result.model = msg["model"].get(); + } + if (msg.contains("usage")) { + result.usage.inputTokens = msg["usage"].value("input_tokens", 0); + } + } + } else if (event.event == "content_block_start") { + if (chunk.contains("content_block")) { + const auto& block = chunk["content_block"]; + auto type = block.value("type", ""); + if (type == "tool_use") { + // Flush previous tool call if any + if (inToolCall) { + result.content.push_back(ToolUseContent { + .id = currentToolId, + .name = currentToolName, + .inputJson = currentToolArgs, + }); + } + currentToolId = block.value("id", ""); + currentToolName = block.value("name", ""); + currentToolArgs = ""; + inToolCall = true; + } + } + } else if (event.event == "content_block_delta") { + if (chunk.contains("delta")) { + const auto& delta = chunk["delta"]; + auto type = delta.value("type", ""); + if (type == "text_delta") { + std::string text = delta.value("text", ""); + fullContent += text; + callback(text); + } else if (type == "input_json_delta") { + currentToolArgs += delta.value("partial_json", ""); + } + } + } else if (event.event == "content_block_stop") { + // Block complete — nothing special needed here + } else if (event.event == "message_delta") { + if (chunk.contains("delta")) { + const auto& delta = chunk["delta"]; + if (delta.contains("stop_reason") && !delta["stop_reason"].is_null()) { + result.stopReason = parse_stop_reason_(delta["stop_reason"].get()); + } + } + if (chunk.contains("usage")) { + result.usage.outputTokens = chunk["usage"].value("output_tokens", 0); + result.usage.totalTokens = result.usage.inputTokens + result.usage.outputTokens; + } + } + } catch (const Json::exception&) { + // Skip malformed chunks + } + return true; + }); + + // Flush last tool call if any + if (inToolCall) { + result.content.push_back(ToolUseContent { + .id = currentToolId, + .name = currentToolName, + .inputJson = currentToolArgs, + }); + } + + // Add text content if present + if (!fullContent.empty()) { + result.content.insert(result.content.begin(), TextContent { .text = fullContent }); + } + + if (!sseResponse.ok()) { + throw std::runtime_error("Anthropic API stream error: " + + std::to_string(sseResponse.statusCode) + " " + sseResponse.statusText); + } + + return result; + } + + Task chat_stream_async(const std::vector& messages, const ChatParams& params, + std::function callback) { + co_return chat_stream(messages, params, std::move(callback)); + } + + // NOTE: No embed() — Anthropic doesn't have an embeddings API + +private: + // Serialization — extract system message and serialize remaining messages + std::pair extract_system_and_messages_(const std::vector& messages) const { + std::string systemText; + Json arr = Json::array(); + + for (const auto& msg : messages) { + if (msg.role == Role::System) { + // Extract system content as top-level field + if (auto* text = std::get_if(&msg.content)) { + if (!systemText.empty()) systemText += "\n"; + systemText += *text; + } else if (auto* parts = std::get_if>(&msg.content)) { + for (const auto& part : *parts) { + if (auto* t = std::get_if(&part)) { + if (!systemText.empty()) systemText += "\n"; + systemText += t->text; + } + } + } + continue; + } + + if (msg.role == Role::Tool) { + // Convert Tool messages to user messages with tool_result content blocks + Json j; + j["role"] = "user"; + Json contentArr = Json::array(); + if (auto* parts = std::get_if>(&msg.content)) { + for (const auto& part : *parts) { + if (auto* tr = std::get_if(&part)) { + Json block; + block["type"] = "tool_result"; + block["tool_use_id"] = tr->toolUseId; + block["content"] = tr->content; + if (tr->isError) { + block["is_error"] = true; + } + contentArr.push_back(block); + } + } + } + j["content"] = contentArr; + arr.push_back(j); + continue; + } + + arr.push_back(serialize_message_(msg)); + } + + return {systemText, arr}; + } + + Json serialize_message_(const Message& msg) const { + Json j; + j["role"] = role_string_(msg.role); + + std::visit([&](const auto& c) { + using T = std::decay_t; + if constexpr (std::is_same_v) { + j["content"] = c; + } else { + // vector + bool hasOnlyText = true; + std::string textOnly; + for (const auto& part : c) { + if (!std::holds_alternative(part)) { + hasOnlyText = false; + break; + } + } + + if (hasOnlyText && c.size() == 1) { + // Single text block — use simple string + j["content"] = std::get(c[0]).text; + } else { + // Multimodal or multi-block — use content array + Json parts = Json::array(); + for (const auto& part : c) { + std::visit([&](const auto& p) { + using P = std::decay_t; + if constexpr (std::is_same_v) { + parts.push_back(Json{{"type", "text"}, {"text", p.text}}); + } else if constexpr (std::is_same_v) { + if (p.isUrl) { + parts.push_back(Json{ + {"type", "image"}, + {"source", Json{{"type", "url"}, {"url", p.data}}}, + }); + } else { + parts.push_back(Json{ + {"type", "image"}, + {"source", Json{ + {"type", "base64"}, + {"media_type", p.mediaType}, + {"data", p.data}, + }}, + }); + } + } else if constexpr (std::is_same_v) { + // Tool use in assistant messages — inline content blocks + Json block; + block["type"] = "tool_use"; + block["id"] = p.id; + block["name"] = p.name; + if (!p.inputJson.empty()) { + block["input"] = Json::parse(p.inputJson); + } else { + block["input"] = Json::object(); + } + parts.push_back(block); + } else if constexpr (std::is_same_v) { + // Tool results in user messages + Json block; + block["type"] = "tool_result"; + block["tool_use_id"] = p.toolUseId; + block["content"] = p.content; + if (p.isError) { + block["is_error"] = true; + } + parts.push_back(block); + } + }, part); + } + if (!parts.empty()) { + j["content"] = parts; + } + } + } + }, msg.content); + + return j; + } + + Json build_payload_(const std::vector& messages, const ChatParams& params, bool stream) const { + Json payload; + payload["model"] = config_.model; + + auto [systemText, msgArray] = extract_system_and_messages_(messages); + + if (!systemText.empty()) { + payload["system"] = systemText; + } + payload["messages"] = msgArray; + + // max_tokens is REQUIRED by Anthropic + payload["max_tokens"] = params.maxTokens.value_or(config_.defaultMaxTokens); + + if (stream) { + payload["stream"] = true; + } + + if (params.temperature.has_value()) { + payload["temperature"] = *params.temperature; + } + if (params.topP.has_value()) { + payload["top_p"] = *params.topP; + } + if (params.stop.has_value()) { + payload["stop_sequences"] = *params.stop; + } + + // Tools — Anthropic format (no function wrapper) + if (params.tools.has_value() && !params.tools->empty()) { + Json tools = Json::array(); + for (const auto& tool : *params.tools) { + Json t; + t["name"] = tool.name; + t["description"] = tool.description; + if (!tool.inputSchema.empty()) { + t["input_schema"] = Json::parse(tool.inputSchema); + } else { + t["input_schema"] = Json{{"type", "object"}}; + } + tools.push_back(t); + } + payload["tools"] = tools; + } + + // Tool choice — Anthropic format + if (params.toolChoice.has_value()) { + std::visit([&](const auto& tc) { + using T = std::decay_t; + if constexpr (std::is_same_v) { + switch (tc) { + case ToolChoice::Auto: payload["tool_choice"] = Json{{"type", "auto"}}; break; + case ToolChoice::None: payload["tool_choice"] = Json{{"type", "none"}}; break; + case ToolChoice::Required: payload["tool_choice"] = Json{{"type", "any"}}; break; + } + } else if constexpr (std::is_same_v) { + payload["tool_choice"] = Json{ + {"type", "tool"}, + {"name", tc.name}, + }; + } + }, *params.toolChoice); + } + + // Extra JSON merge + if (params.extraJson.has_value() && !params.extraJson->empty()) { + auto extra = Json::parse(*params.extraJson); + payload.merge_patch(extra); + } + + return payload; + } + + // Deserialization + ChatResponse parse_response_(const Json& json) const { + ChatResponse result; + + result.id = json.value("id", ""); + result.model = json.value("model", ""); + + // Anthropic returns content as array of blocks + if (json.contains("content") && json["content"].is_array()) { + for (const auto& block : json["content"]) { + auto type = block.value("type", ""); + if (type == "text") { + result.content.push_back(TextContent { + .text = block.value("text", ""), + }); + } else if (type == "tool_use") { + std::string inputJson; + if (block.contains("input")) { + inputJson = block["input"].dump(); + } + result.content.push_back(ToolUseContent { + .id = block.value("id", ""), + .name = block.value("name", ""), + .inputJson = inputJson, + }); + } + } + } + + if (json.contains("stop_reason") && !json["stop_reason"].is_null()) { + result.stopReason = parse_stop_reason_(json["stop_reason"].get()); + } + + if (json.contains("usage")) { + const auto& usage = json["usage"]; + result.usage.inputTokens = usage.value("input_tokens", 0); + result.usage.outputTokens = usage.value("output_tokens", 0); + result.usage.totalTokens = result.usage.inputTokens + result.usage.outputTokens; + } + + return result; + } + + static StopReason parse_stop_reason_(const std::string& reason) { + if (reason == "end_turn") return StopReason::EndOfTurn; + if (reason == "max_tokens") return StopReason::MaxTokens; + if (reason == "tool_use") return StopReason::ToolUse; + if (reason == "stop_sequence") return StopReason::StopSequence; + return StopReason::EndOfTurn; + } + + static std::string role_string_(Role role) { + switch (role) { + case Role::System: return "system"; + case Role::User: return "user"; + case Role::Assistant: return "assistant"; + case Role::Tool: return "user"; // Anthropic: tool results go in user messages + } + return "user"; + } + + // HTTP helpers + tinyhttps::HttpRequest build_request_(std::string_view endpoint, const Json& payload) const { + tinyhttps::HttpRequest req; + req.method = tinyhttps::Method::POST; + req.url = config_.baseUrl + std::string(endpoint); + req.body = payload.dump(); + + req.headers["Content-Type"] = "application/json"; + req.headers["x-api-key"] = config_.apiKey; + req.headers["anthropic-version"] = config_.version; + + for (const auto& [key, value] : config_.customHeaders) { + req.headers[key] = value; + } + + return req; + } +}; + +} // namespace mcpplibs::llmapi::anthropic diff --git a/src/providers/openai.cppm b/src/providers/openai.cppm new file mode 100644 index 0000000..8607c41 --- /dev/null +++ b/src/providers/openai.cppm @@ -0,0 +1,473 @@ +module; + +#include + +export module mcpplibs.llmapi:openai; + +export import :url; + +import :types; +import :coro; +import mcpplibs.tinyhttps; +import mcpplibs.llmapi.nlohmann.json; +import std; + +export namespace mcpplibs::llmapi::openai { + +using Json = nlohmann::json; + +struct Config { + std::string apiKey; + std::string baseUrl { "https://api.openai.com/v1" }; + std::string model; + std::string organization; + std::optional proxy; + std::map customHeaders; +}; + +class OpenAI { +private: + Config config_; + tinyhttps::HttpClient http_; + +public: + explicit OpenAI(Config config) + : config_(std::move(config)) + , http_(tinyhttps::HttpClientConfig { + .proxy = config_.proxy, + .keepAlive = true, + }) + { + } + + // Non-copyable (HttpClient owns TLS connections) + OpenAI(const OpenAI&) = delete; + OpenAI& operator=(const OpenAI&) = delete; + OpenAI(OpenAI&&) = default; + OpenAI& operator=(OpenAI&&) = default; + + // Provider concept + std::string_view name() const { return "openai"; } + + ChatResponse chat(const std::vector& messages, const ChatParams& params) { + auto payload = build_payload_(messages, params, false); + auto request = build_request_("/chat/completions", payload); + auto response = http_.send(request); + if (!response.ok()) { + throw std::runtime_error("OpenAI API error: " + + std::to_string(response.statusCode) + " " + response.body); + } + return parse_response_(Json::parse(response.body)); + } + + Task chat_async(const std::vector& messages, const ChatParams& params) { + co_return chat(messages, params); + } + + // StreamableProvider + ChatResponse chat_stream(const std::vector& messages, const ChatParams& params, + std::function callback) { + auto payload = build_payload_(messages, params, true); + auto request = build_request_("/chat/completions", payload); + + ChatResponse result; + std::string fullContent; + std::string currentToolId; + std::string currentToolName; + std::string currentToolArgs; + bool inToolCall = false; + + auto sseResponse = http_.send_stream(request, [&](const tinyhttps::SseEvent& event) -> bool { + if (event.data == "[DONE]") { + return false; + } + try { + auto chunk = Json::parse(event.data); + if (result.id.empty() && chunk.contains("id")) { + result.id = chunk["id"].get(); + } + if (result.model.empty() && chunk.contains("model")) { + result.model = chunk["model"].get(); + } + if (chunk.contains("choices") && !chunk["choices"].empty()) { + const auto& choice = chunk["choices"][0]; + if (choice.contains("delta")) { + const auto& delta = choice["delta"]; + if (delta.contains("content") && !delta["content"].is_null()) { + std::string content = delta["content"].get(); + fullContent += content; + callback(content); + } + if (delta.contains("tool_calls")) { + for (const auto& tc : delta["tool_calls"]) { + if (tc.contains("id")) { + // New tool call starting — flush previous if any + if (inToolCall) { + result.content.push_back(ToolUseContent { + .id = currentToolId, + .name = currentToolName, + .inputJson = currentToolArgs, + }); + } + currentToolId = tc["id"].get(); + currentToolName = tc.contains("function") && tc["function"].contains("name") + ? tc["function"]["name"].get() : ""; + currentToolArgs = tc.contains("function") && tc["function"].contains("arguments") + ? tc["function"]["arguments"].get() : ""; + inToolCall = true; + } else { + // Continuation of existing tool call + if (tc.contains("function") && tc["function"].contains("arguments")) { + currentToolArgs += tc["function"]["arguments"].get(); + } + } + } + } + } + if (choice.contains("finish_reason") && !choice["finish_reason"].is_null()) { + result.stopReason = parse_stop_reason_(choice["finish_reason"].get()); + } + } + if (chunk.contains("usage") && !chunk["usage"].is_null()) { + const auto& usage = chunk["usage"]; + result.usage.inputTokens = usage.value("prompt_tokens", 0); + result.usage.outputTokens = usage.value("completion_tokens", 0); + result.usage.totalTokens = result.usage.inputTokens + result.usage.outputTokens; + } + } catch (const Json::exception&) { + // Skip malformed chunks + } + return true; + }); + + // Flush last tool call if any + if (inToolCall) { + result.content.push_back(ToolUseContent { + .id = currentToolId, + .name = currentToolName, + .inputJson = currentToolArgs, + }); + } + + // Add text content if present + if (!fullContent.empty()) { + result.content.insert(result.content.begin(), TextContent { .text = fullContent }); + } + + if (!sseResponse.ok()) { + throw std::runtime_error("OpenAI API stream error: " + + std::to_string(sseResponse.statusCode) + " " + sseResponse.statusText); + } + + return result; + } + + Task chat_stream_async(const std::vector& messages, const ChatParams& params, + std::function callback) { + co_return chat_stream(messages, params, std::move(callback)); + } + + // EmbeddableProvider + EmbeddingResponse embed(const std::vector& inputs, std::string_view model) { + Json payload; + payload["model"] = std::string(model); + payload["input"] = inputs; + + auto request = build_request_("/embeddings", payload); + auto response = http_.send(request); + if (!response.ok()) { + throw std::runtime_error("OpenAI embeddings error: " + + std::to_string(response.statusCode) + " " + response.body); + } + + auto json = Json::parse(response.body); + EmbeddingResponse result; + result.model = json.value("model", std::string(model)); + + for (const auto& item : json["data"]) { + std::vector vec; + for (const auto& val : item["embedding"]) { + vec.push_back(val.get()); + } + result.embeddings.push_back(std::move(vec)); + } + + if (json.contains("usage")) { + result.usage.inputTokens = json["usage"].value("prompt_tokens", 0); + result.usage.totalTokens = json["usage"].value("total_tokens", 0); + } + + return result; + } + +private: + // Serialization + Json serialize_messages_(const std::vector& messages) const { + Json arr = Json::array(); + for (const auto& msg : messages) { + arr.push_back(serialize_message_(msg)); + } + return arr; + } + + Json serialize_message_(const Message& msg) const { + Json j; + j["role"] = role_string_(msg.role); + + std::visit([&](const auto& c) { + using T = std::decay_t; + if constexpr (std::is_same_v) { + j["content"] = c; + } else { + // vector + Json parts = Json::array(); + for (const auto& part : c) { + std::visit([&](const auto& p) { + using P = std::decay_t; + if constexpr (std::is_same_v) { + parts.push_back(Json{{"type", "text"}, {"text", p.text}}); + } else if constexpr (std::is_same_v) { + Json imgUrl; + if (p.isUrl) { + imgUrl["url"] = p.data; + } else { + imgUrl["url"] = "data:" + p.mediaType + ";base64," + p.data; + } + parts.push_back(Json{{"type", "image_url"}, {"image_url", imgUrl}}); + } else if constexpr (std::is_same_v) { + // Tool use in assistant messages — handled via tool_calls field + } else if constexpr (std::is_same_v) { + // Tool results go as separate role=tool messages + } + }, part); + } + if (!parts.empty()) { + j["content"] = parts; + } + } + }, msg.content); + + // Handle tool role: add tool_call_id + if (msg.role == Role::Tool) { + // Try to extract tool_call_id from ToolResultContent + if (auto* parts = std::get_if>(&msg.content)) { + for (const auto& part : *parts) { + if (auto* tr = std::get_if(&part)) { + j["tool_call_id"] = tr->toolUseId; + j["content"] = tr->content; + break; + } + } + } + } + + // Handle assistant messages with tool_calls + if (msg.role == Role::Assistant) { + if (auto* parts = std::get_if>(&msg.content)) { + Json toolCalls = Json::array(); + std::string textContent; + for (const auto& part : *parts) { + if (auto* tu = std::get_if(&part)) { + Json tc; + tc["id"] = tu->id; + tc["type"] = "function"; + tc["function"] = Json{ + {"name", tu->name}, + {"arguments", tu->inputJson}, + }; + toolCalls.push_back(tc); + } else if (auto* t = std::get_if(&part)) { + textContent += t->text; + } + } + if (!toolCalls.empty()) { + j["tool_calls"] = toolCalls; + } + if (!textContent.empty()) { + j["content"] = textContent; + } else if (toolCalls.empty()) { + // Keep array content if no special handling needed + } else { + j["content"] = nullptr; + } + } + } + + return j; + } + + Json build_payload_(const std::vector& messages, const ChatParams& params, bool stream) const { + Json payload; + payload["model"] = config_.model; + payload["messages"] = serialize_messages_(messages); + + if (stream) { + payload["stream"] = true; + } + + if (params.temperature.has_value()) { + payload["temperature"] = *params.temperature; + } + if (params.topP.has_value()) { + payload["top_p"] = *params.topP; + } + if (params.maxTokens.has_value()) { + payload["max_completion_tokens"] = *params.maxTokens; + } + if (params.stop.has_value()) { + payload["stop"] = *params.stop; + } + + // Tools + if (params.tools.has_value() && !params.tools->empty()) { + Json tools = Json::array(); + for (const auto& tool : *params.tools) { + Json t; + t["type"] = "function"; + t["function"] = Json{ + {"name", tool.name}, + {"description", tool.description}, + }; + if (!tool.inputSchema.empty()) { + t["function"]["parameters"] = Json::parse(tool.inputSchema); + } + tools.push_back(t); + } + payload["tools"] = tools; + } + + // Tool choice + if (params.toolChoice.has_value()) { + std::visit([&](const auto& tc) { + using T = std::decay_t; + if constexpr (std::is_same_v) { + switch (tc) { + case ToolChoice::Auto: payload["tool_choice"] = "auto"; break; + case ToolChoice::None: payload["tool_choice"] = "none"; break; + case ToolChoice::Required: payload["tool_choice"] = "required"; break; + } + } else if constexpr (std::is_same_v) { + payload["tool_choice"] = Json{ + {"type", "function"}, + {"function", Json{{"name", tc.name}}}, + }; + } + }, *params.toolChoice); + } + + // Response format + if (params.responseFormat.has_value()) { + const auto& rf = *params.responseFormat; + switch (rf.type) { + case ResponseFormatType::Text: + payload["response_format"] = Json{{"type", "text"}}; + break; + case ResponseFormatType::JsonObject: + payload["response_format"] = Json{{"type", "json_object"}}; + break; + case ResponseFormatType::JsonSchema: { + Json schemaObj; + schemaObj["type"] = "json_schema"; + Json jsonSchema; + jsonSchema["name"] = rf.schemaName; + if (!rf.schema.empty()) { + jsonSchema["schema"] = Json::parse(rf.schema); + } + schemaObj["json_schema"] = jsonSchema; + payload["response_format"] = schemaObj; + break; + } + } + } + + // Extra JSON merge + if (params.extraJson.has_value() && !params.extraJson->empty()) { + auto extra = Json::parse(*params.extraJson); + payload.merge_patch(extra); + } + + return payload; + } + + // Deserialization + ChatResponse parse_response_(const Json& json) const { + ChatResponse result; + + result.id = json.value("id", ""); + result.model = json.value("model", ""); + + if (json.contains("choices") && !json["choices"].empty()) { + const auto& choice = json["choices"][0]; + if (choice.contains("message")) { + const auto& msg = choice["message"]; + if (msg.contains("content") && !msg["content"].is_null()) { + result.content.push_back(TextContent { + .text = msg["content"].get(), + }); + } + if (msg.contains("tool_calls")) { + for (const auto& tc : msg["tool_calls"]) { + result.content.push_back(ToolUseContent { + .id = tc.value("id", ""), + .name = tc["function"].value("name", ""), + .inputJson = tc["function"].value("arguments", ""), + }); + } + } + } + if (choice.contains("finish_reason") && !choice["finish_reason"].is_null()) { + result.stopReason = parse_stop_reason_(choice["finish_reason"].get()); + } + } + + if (json.contains("usage")) { + const auto& usage = json["usage"]; + result.usage.inputTokens = usage.value("prompt_tokens", 0); + result.usage.outputTokens = usage.value("completion_tokens", 0); + result.usage.totalTokens = result.usage.inputTokens + result.usage.outputTokens; + } + + return result; + } + + static StopReason parse_stop_reason_(const std::string& reason) { + if (reason == "stop") return StopReason::EndOfTurn; + if (reason == "length") return StopReason::MaxTokens; + if (reason == "tool_calls") return StopReason::ToolUse; + if (reason == "content_filter") return StopReason::ContentFilter; + return StopReason::EndOfTurn; + } + + static std::string role_string_(Role role) { + switch (role) { + case Role::System: return "system"; + case Role::User: return "user"; + case Role::Assistant: return "assistant"; + case Role::Tool: return "tool"; + } + return "user"; + } + + // HTTP helpers + tinyhttps::HttpRequest build_request_(std::string_view endpoint, const Json& payload) const { + tinyhttps::HttpRequest req; + req.method = tinyhttps::Method::POST; + req.url = config_.baseUrl + std::string(endpoint); + req.body = payload.dump(); + + req.headers["Content-Type"] = "application/json"; + req.headers["Authorization"] = "Bearer " + config_.apiKey; + + if (!config_.organization.empty()) { + req.headers["OpenAI-Organization"] = config_.organization; + } + + for (const auto& [key, value] : config_.customHeaders) { + req.headers[key] = value; + } + + return req; + } +}; + +} // namespace mcpplibs::llmapi::openai diff --git a/src/providers/provider.cppm b/src/providers/provider.cppm new file mode 100644 index 0000000..deb000c --- /dev/null +++ b/src/providers/provider.cppm @@ -0,0 +1,35 @@ +export module mcpplibs.llmapi:provider; + +import std; + +export import :types; +export import :coro; + +export namespace mcpplibs::llmapi { + +template +concept StreamCallback = std::invocable && + std::same_as, void>; + +template +concept Provider = requires(P p, const std::vector& messages, const ChatParams& params) { + { p.name() } -> std::convertible_to; + { p.chat(messages, params) } -> std::same_as; + { p.chat_async(messages, params) } -> std::same_as>; +}; + +template +concept StreamableProvider = Provider

&& requires(P p, + const std::vector& messages, const ChatParams& params, + std::function cb) { + { p.chat_stream(messages, params, cb) } -> std::same_as; + { p.chat_stream_async(messages, params, cb) } -> std::same_as>; +}; + +template +concept EmbeddableProvider = Provider

&& requires(P p, + const std::vector& inputs, std::string_view model) { + { p.embed(inputs, model) } -> std::same_as; +}; + +} // namespace mcpplibs::llmapi diff --git a/src/tinyhttps/ca_bundle.cppm b/src/tinyhttps/ca_bundle.cppm new file mode 100644 index 0000000..844074b --- /dev/null +++ b/src/tinyhttps/ca_bundle.cppm @@ -0,0 +1,49 @@ +module; + +#include + +export module mcpplibs.tinyhttps:ca_bundle; + +import std; + +namespace mcpplibs::tinyhttps { + +namespace { + +auto read_file(const char* path) -> std::string { + std::FILE* f = std::fopen(path, "rb"); + if (f == nullptr) { + return {}; + } + std::string result; + char buf[4096]; + while (auto n = std::fread(buf, 1, sizeof(buf), f)) { + result.append(buf, n); + } + std::fclose(f); + return result; +} + +} // anonymous namespace + +export auto load_ca_certs() -> std::string { + // Try known system CA paths + static constexpr const char* ca_paths[] = { + "/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu + "/etc/pki/tls/certs/ca-bundle.crt", // RHEL/CentOS + "/etc/ssl/cert.pem", // macOS / general + }; + + for (auto* path : ca_paths) { + auto pem = read_file(path); + if (!pem.empty()) { + return pem; + } + } + + // No system certs found — return empty. + // A production build could embed a Mozilla CA root bundle here. + return {}; +} + +} // namespace mcpplibs::tinyhttps diff --git a/src/tinyhttps/http.cppm b/src/tinyhttps/http.cppm new file mode 100644 index 0000000..d6d39c7 --- /dev/null +++ b/src/tinyhttps/http.cppm @@ -0,0 +1,722 @@ +export module mcpplibs.tinyhttps:http; + +import :tls; +import :socket; +import :sse; +import :proxy; +import std; + +namespace mcpplibs::tinyhttps { + +export enum class Method { GET, POST, PUT, DELETE_, PATCH, HEAD }; + +export struct HttpRequest { + Method method { Method::GET }; + std::string url; + std::map headers; + std::string body; + + static HttpRequest post(std::string_view url, std::string_view body) { + return { Method::POST, std::string(url), + {{"Content-Type", "application/json"}}, + std::string(body) }; + } +}; + +export struct HttpResponse { + int statusCode { 0 }; + std::string statusText; + std::map headers; + std::string body; + + bool ok() const { return statusCode >= 200 && statusCode < 300; } +}; + +export struct HttpClientConfig { + std::optional proxy; + int connectTimeoutMs { 10000 }; + int readTimeoutMs { 60000 }; + bool verifySsl { true }; + bool keepAlive { true }; +}; + +export template +concept SseCallback = std::invocable && + std::same_as, bool>; + // return false to stop receiving + +export using SseCallbackFn = std::function; + +struct ParsedUrl { + std::string scheme; + std::string host; + int port { 443 }; + std::string path; +}; + +static ParsedUrl parse_url(std::string_view url) { + ParsedUrl result; + + // Extract scheme + auto schemeEnd = url.find("://"); + if (schemeEnd == std::string_view::npos) { + return result; + } + result.scheme = std::string(url.substr(0, schemeEnd)); + url = url.substr(schemeEnd + 3); + + // Extract host (and optional port) + auto pathStart = url.find('/'); + std::string_view authority; + if (pathStart == std::string_view::npos) { + authority = url; + result.path = "/"; + } else { + authority = url.substr(0, pathStart); + result.path = std::string(url.substr(pathStart)); + } + + // Check for port + auto colonPos = authority.find(':'); + if (colonPos != std::string_view::npos) { + result.host = std::string(authority.substr(0, colonPos)); + auto portStr = authority.substr(colonPos + 1); + result.port = 0; + for (char c : portStr) { + if (c >= '0' && c <= '9') { + result.port = result.port * 10 + (c - '0'); + } + } + } else { + result.host = std::string(authority); + result.port = (result.scheme == "https") ? 443 : 80; + } + + if (result.path.empty()) { + result.path = "/"; + } + + return result; +} + +// Check if user headers contain a key (case-insensitive) +static bool has_header(const std::map& headers, std::string_view key) { + for (const auto& [k, v] : headers) { + if (k.size() == key.size()) { + bool match = true; + for (std::size_t i = 0; i < k.size(); ++i) { + if (std::tolower(static_cast(k[i])) != + std::tolower(static_cast(key[i]))) { + match = false; + break; + } + } + if (match) return true; + } + } + return false; +} + +static std::string_view method_to_string(Method m) { + switch (m) { + case Method::GET: return "GET"; + case Method::POST: return "POST"; + case Method::PUT: return "PUT"; + case Method::DELETE_: return "DELETE"; + case Method::PATCH: return "PATCH"; + case Method::HEAD: return "HEAD"; + } + return "GET"; +} + +// Read exactly n bytes from socket, using wait_readable for timeout +static bool read_exact(TlsSocket& sock, char* buf, int n, int timeoutMs) { + int total = 0; + while (total < n) { + if (!sock.wait_readable(timeoutMs)) { + return false; + } + int ret = sock.read(buf + total, n - total); + if (ret < 0) return false; + if (ret == 0) { + // Try again after wait + if (!sock.wait_readable(timeoutMs)) return false; + ret = sock.read(buf + total, n - total); + if (ret <= 0) return false; + } + total += ret; + } + return true; +} + +// Read a line (ending with \r\n) from socket +static std::string read_line(TlsSocket& sock, int timeoutMs) { + std::string line; + char c; + while (true) { + if (!sock.wait_readable(timeoutMs)) { + break; + } + int ret = sock.read(&c, 1); + if (ret < 0) break; + if (ret == 0) { + // Try once more + if (!sock.wait_readable(timeoutMs)) break; + ret = sock.read(&c, 1); + if (ret <= 0) break; + } + line += c; + if (line.size() >= 2 && line[line.size() - 2] == '\r' && line[line.size() - 1] == '\n') { + line.resize(line.size() - 2); + break; + } + } + return line; +} + +// Write all data to socket +static bool write_all(TlsSocket& sock, const std::string& data) { + int total = 0; + int len = static_cast(data.size()); + while (total < len) { + int ret = sock.write(data.c_str() + total, len - total); + if (ret < 0) return false; + if (ret == 0) { + // Try again + ret = sock.write(data.c_str() + total, len - total); + if (ret <= 0) return false; + } + total += ret; + } + return true; +} + +// Parse hex string to int +static int parse_hex(std::string_view s) { + int result = 0; + for (char c : s) { + result <<= 4; + if (c >= '0' && c <= '9') result |= (c - '0'); + else if (c >= 'a' && c <= 'f') result |= (c - 'a' + 10); + else if (c >= 'A' && c <= 'F') result |= (c - 'A' + 10); + else break; + } + return result; +} + +// Case-insensitive string comparison +static bool iequals(std::string_view a, std::string_view b) { + if (a.size() != b.size()) return false; + for (std::size_t i = 0; i < a.size(); ++i) { + char ca = a[i]; + char cb = b[i]; + if (ca >= 'A' && ca <= 'Z') ca += 32; + if (cb >= 'A' && cb <= 'Z') cb += 32; + if (ca != cb) return false; + } + return true; +} + +export class HttpClient { +public: + explicit HttpClient(HttpClientConfig config = {}) + : config_(std::move(config)) {} + + ~HttpClient() = default; + + // Non-copyable (connection pool owns TLS sockets) + HttpClient(const HttpClient&) = delete; + HttpClient& operator=(const HttpClient&) = delete; + HttpClient(HttpClient&&) = default; + HttpClient& operator=(HttpClient&&) = default; + + HttpResponse send(const HttpRequest& request) { + HttpResponse response; + + auto parsed = parse_url(request.url); + if (parsed.scheme != "https") { + response.statusCode = 0; + response.statusText = "Only HTTPS is supported"; + return response; + } + + std::string poolKey = parsed.host + ":" + std::to_string(parsed.port); + + // Get or create connection + TlsSocket* sock = nullptr; + auto it = pool_.find(poolKey); + if (it != pool_.end() && it->second.is_valid()) { + sock = &it->second; + } else { + // Remove stale entry if exists + if (it != pool_.end()) { + pool_.erase(it); + } + // Create new connection + auto [insertIt, ok] = pool_.emplace(poolKey, TlsSocket{}); + sock = &insertIt->second; + bool connected = false; + if (config_.proxy.has_value()) { + auto proxyConf = parse_proxy_url(config_.proxy.value()); + auto tunnel = proxy_connect(proxyConf.host, proxyConf.port, + parsed.host, parsed.port, + config_.connectTimeoutMs); + if (tunnel.is_valid()) { + connected = sock->connect_over(std::move(tunnel), + parsed.host.c_str(), + config_.verifySsl); + } + } else { + connected = sock->connect(parsed.host.c_str(), parsed.port, + config_.connectTimeoutMs, config_.verifySsl); + } + if (!connected) { + pool_.erase(poolKey); + response.statusCode = 0; + response.statusText = "Connection failed"; + return response; + } + } + + // Build request + std::string reqStr; + reqStr += method_to_string(request.method); + reqStr += " "; + reqStr += parsed.path; + reqStr += " HTTP/1.1\r\n"; + // Add Host header (skip if user provided) + if (!has_header(request.headers, "Host")) { + reqStr += "Host: "; + reqStr += parsed.host; + if (parsed.port != 443) { + reqStr += ":"; + reqStr += std::to_string(parsed.port); + } + reqStr += "\r\n"; + } + + // Add Content-Length if body present (skip if user provided) + if (!request.body.empty() && !has_header(request.headers, "Content-Length")) { + reqStr += "Content-Length: "; + reqStr += std::to_string(request.body.size()); + reqStr += "\r\n"; + } + + // Add user headers + for (const auto& [key, value] : request.headers) { + reqStr += key; + reqStr += ": "; + reqStr += value; + reqStr += "\r\n"; + } + + // Add connection header (skip if user provided) + if (!has_header(request.headers, "Connection")) { + if (config_.keepAlive) { + reqStr += "Connection: keep-alive\r\n"; + } else { + reqStr += "Connection: close\r\n"; + } + } + + reqStr += "\r\n"; + + // Append body + if (!request.body.empty()) { + reqStr += request.body; + } + + // Send request + if (!write_all(*sock, reqStr)) { + pool_.erase(poolKey); + response.statusCode = 0; + response.statusText = "Write failed"; + return response; + } + + // Read status line + std::string statusLine = read_line(*sock, config_.readTimeoutMs); + if (statusLine.empty()) { + pool_.erase(poolKey); + response.statusCode = 0; + response.statusText = "No response"; + return response; + } + + // Parse status line: HTTP/1.1 200 OK + { + auto spacePos = statusLine.find(' '); + if (spacePos == std::string::npos) { + pool_.erase(poolKey); + response.statusCode = 0; + response.statusText = "Invalid status line"; + return response; + } + auto rest = std::string_view(statusLine).substr(spacePos + 1); + auto spacePos2 = rest.find(' '); + if (spacePos2 != std::string_view::npos) { + auto codeStr = rest.substr(0, spacePos2); + response.statusCode = 0; + for (char c : codeStr) { + if (c >= '0' && c <= '9') { + response.statusCode = response.statusCode * 10 + (c - '0'); + } + } + response.statusText = std::string(rest.substr(spacePos2 + 1)); + } else { + // No status text, just code + response.statusCode = 0; + for (char c : rest) { + if (c >= '0' && c <= '9') { + response.statusCode = response.statusCode * 10 + (c - '0'); + } + } + } + } + + // Read headers + bool chunked = false; + int contentLength = -1; + bool connectionClose = false; + + while (true) { + std::string headerLine = read_line(*sock, config_.readTimeoutMs); + if (headerLine.empty()) { + break; // End of headers (empty line after stripping \r\n) + } + + auto colonPos = headerLine.find(':'); + if (colonPos != std::string::npos) { + std::string key = headerLine.substr(0, colonPos); + std::string_view value = std::string_view(headerLine).substr(colonPos + 1); + // Trim leading whitespace from value + while (!value.empty() && value[0] == ' ') { + value = value.substr(1); + } + std::string valStr(value); + response.headers[key] = valStr; + + if (iequals(key, "Transfer-Encoding") && iequals(valStr, "chunked")) { + chunked = true; + } + if (iequals(key, "Content-Length")) { + contentLength = 0; + for (char c : valStr) { + if (c >= '0' && c <= '9') { + contentLength = contentLength * 10 + (c - '0'); + } + } + } + if (iequals(key, "Connection") && iequals(valStr, "close")) { + connectionClose = true; + } + } + } + + // Read body + if (request.method == Method::HEAD) { + // HEAD responses have no body + } else if (chunked) { + // Chunked transfer encoding + while (true) { + std::string sizeLine = read_line(*sock, config_.readTimeoutMs); + // Strip any chunk extensions (after semicolon) + auto semiPos = sizeLine.find(';'); + if (semiPos != std::string::npos) { + sizeLine = sizeLine.substr(0, semiPos); + } + // Trim whitespace + while (!sizeLine.empty() && (sizeLine.back() == ' ' || sizeLine.back() == '\t')) { + sizeLine.pop_back(); + } + + int chunkSize = parse_hex(sizeLine); + if (chunkSize == 0) { + // Read trailing \r\n after last chunk + read_line(*sock, config_.readTimeoutMs); + break; + } + + // Read chunk data + std::string chunkData(chunkSize, '\0'); + if (!read_exact(*sock, chunkData.data(), chunkSize, config_.readTimeoutMs)) { + break; + } + response.body += chunkData; + + // Read trailing \r\n after chunk + read_line(*sock, config_.readTimeoutMs); + } + } else if (contentLength >= 0) { + // Read exactly contentLength bytes + if (contentLength > 0) { + response.body.resize(contentLength); + if (!read_exact(*sock, response.body.data(), contentLength, config_.readTimeoutMs)) { + pool_.erase(poolKey); + return response; + } + } + } else { + // Read until connection closed + connectionClose = true; + char buf[4096]; + while (true) { + if (!sock->wait_readable(config_.readTimeoutMs)) { + break; + } + int ret = sock->read(buf, sizeof(buf)); + if (ret <= 0) break; + response.body.append(buf, ret); + } + } + + // Handle connection pooling + if (connectionClose) { + sock->close(); + pool_.erase(poolKey); + } + + return response; + } + + // Streaming SSE request — reads response body incrementally, feeding + // chunks through SseParser to the caller's callback. The callback + // receives each SseEvent and returns true to continue or false to stop. + HttpResponse send_stream(const HttpRequest& request, SseCallbackFn callback) { + HttpResponse response; + + auto parsed = parse_url(request.url); + if (parsed.scheme != "https") { + response.statusCode = 0; + response.statusText = "Only HTTPS is supported"; + return response; + } + + std::string poolKey = parsed.host + ":" + std::to_string(parsed.port); + + // Get or create connection + TlsSocket* sock = nullptr; + auto it = pool_.find(poolKey); + if (it != pool_.end() && it->second.is_valid()) { + sock = &it->second; + } else { + if (it != pool_.end()) { + pool_.erase(it); + } + auto [insertIt, ok] = pool_.emplace(poolKey, TlsSocket{}); + sock = &insertIt->second; + bool connected = false; + if (config_.proxy.has_value()) { + auto proxyConf = parse_proxy_url(config_.proxy.value()); + auto tunnel = proxy_connect(proxyConf.host, proxyConf.port, + parsed.host, parsed.port, + config_.connectTimeoutMs); + if (tunnel.is_valid()) { + connected = sock->connect_over(std::move(tunnel), + parsed.host.c_str(), + config_.verifySsl); + } + } else { + connected = sock->connect(parsed.host.c_str(), parsed.port, + config_.connectTimeoutMs, config_.verifySsl); + } + if (!connected) { + pool_.erase(poolKey); + response.statusCode = 0; + response.statusText = "Connection failed"; + return response; + } + } + + // Build request — same as send() + std::string reqStr; + reqStr += method_to_string(request.method); + reqStr += " "; + reqStr += parsed.path; + reqStr += " HTTP/1.1\r\n"; + if (!has_header(request.headers, "Host")) { + reqStr += "Host: "; + reqStr += parsed.host; + if (parsed.port != 443) { + reqStr += ":"; + reqStr += std::to_string(parsed.port); + } + reqStr += "\r\n"; + } + if (!request.body.empty() && !has_header(request.headers, "Content-Length")) { + reqStr += "Content-Length: "; + reqStr += std::to_string(request.body.size()); + reqStr += "\r\n"; + } + for (const auto& [key, value] : request.headers) { + reqStr += key; + reqStr += ": "; + reqStr += value; + reqStr += "\r\n"; + } + if (!has_header(request.headers, "Connection")) { + if (config_.keepAlive) { + reqStr += "Connection: keep-alive\r\n"; + } else { + reqStr += "Connection: close\r\n"; + } + } + reqStr += "\r\n"; + if (!request.body.empty()) { + reqStr += request.body; + } + + if (!write_all(*sock, reqStr)) { + pool_.erase(poolKey); + response.statusCode = 0; + response.statusText = "Write failed"; + return response; + } + + // Read status line + std::string statusLine = read_line(*sock, config_.readTimeoutMs); + if (statusLine.empty()) { + pool_.erase(poolKey); + response.statusCode = 0; + response.statusText = "No response"; + return response; + } + + // Parse status line + { + auto spacePos = statusLine.find(' '); + if (spacePos == std::string::npos) { + pool_.erase(poolKey); + response.statusCode = 0; + response.statusText = "Invalid status line"; + return response; + } + auto rest = std::string_view(statusLine).substr(spacePos + 1); + auto spacePos2 = rest.find(' '); + if (spacePos2 != std::string_view::npos) { + auto codeStr = rest.substr(0, spacePos2); + response.statusCode = 0; + for (char c : codeStr) { + if (c >= '0' && c <= '9') { + response.statusCode = response.statusCode * 10 + (c - '0'); + } + } + response.statusText = std::string(rest.substr(spacePos2 + 1)); + } else { + response.statusCode = 0; + for (char c : rest) { + if (c >= '0' && c <= '9') { + response.statusCode = response.statusCode * 10 + (c - '0'); + } + } + } + } + + // Read headers + bool chunked = false; + bool connectionClose = false; + + while (true) { + std::string headerLine = read_line(*sock, config_.readTimeoutMs); + if (headerLine.empty()) { + break; + } + auto colonPos = headerLine.find(':'); + if (colonPos != std::string::npos) { + std::string key = headerLine.substr(0, colonPos); + std::string_view value = std::string_view(headerLine).substr(colonPos + 1); + while (!value.empty() && value[0] == ' ') { + value = value.substr(1); + } + std::string valStr(value); + response.headers[key] = valStr; + + if (iequals(key, "Transfer-Encoding") && iequals(valStr, "chunked")) { + chunked = true; + } + if (iequals(key, "Connection") && iequals(valStr, "close")) { + connectionClose = true; + } + } + } + + // Stream body incrementally, feeding chunks to SseParser + SseParser parser; + bool stopped = false; + + auto dispatch = [&](std::string_view data) -> bool { + auto events = parser.feed(data); + for (const auto& ev : events) { + if (!callback(ev)) { + stopped = true; + return false; + } + } + return true; + }; + + if (chunked) { + // Incrementally decode chunked transfer-encoding + while (!stopped) { + std::string sizeLine = read_line(*sock, config_.readTimeoutMs); + auto semiPos = sizeLine.find(';'); + if (semiPos != std::string::npos) { + sizeLine = sizeLine.substr(0, semiPos); + } + while (!sizeLine.empty() && (sizeLine.back() == ' ' || sizeLine.back() == '\t')) { + sizeLine.pop_back(); + } + + int chunkSize = parse_hex(sizeLine); + if (chunkSize == 0) { + // Terminal chunk — read trailing \r\n + read_line(*sock, config_.readTimeoutMs); + break; + } + + // Read chunk data + std::string chunkData(chunkSize, '\0'); + if (!read_exact(*sock, chunkData.data(), chunkSize, config_.readTimeoutMs)) { + break; + } + // Read trailing \r\n after chunk + read_line(*sock, config_.readTimeoutMs); + + if (!dispatch(chunkData)) { + break; + } + } + } else { + // Not chunked — read until connection closes + connectionClose = true; + char buf[4096]; + while (!stopped) { + if (!sock->wait_readable(config_.readTimeoutMs)) { + break; + } + int ret = sock->read(buf, sizeof(buf)); + if (ret <= 0) break; + if (!dispatch(std::string_view(buf, static_cast(ret)))) { + break; + } + } + } + + // Clean up connection + if (connectionClose || stopped) { + sock->close(); + pool_.erase(poolKey); + } + + return response; + } + + HttpClientConfig& config() { return config_; } + const HttpClientConfig& config() const { return config_; } + +private: + HttpClientConfig config_; + std::map pool_; +}; + +} // namespace mcpplibs::tinyhttps diff --git a/src/tinyhttps/proxy.cppm b/src/tinyhttps/proxy.cppm new file mode 100644 index 0000000..e102de6 --- /dev/null +++ b/src/tinyhttps/proxy.cppm @@ -0,0 +1,155 @@ +export module mcpplibs.tinyhttps:proxy; + +import :socket; +import std; + +namespace mcpplibs::tinyhttps { + +export struct ProxyConfig { + std::string host; + int port { 8080 }; +}; + +// Parse "http://host:port" proxy URL +export ProxyConfig parse_proxy_url(std::string_view url) { + ProxyConfig config; + + // Strip scheme if present + auto schemeEnd = url.find("://"); + if (schemeEnd != std::string_view::npos) { + url = url.substr(schemeEnd + 3); + } + + // Strip trailing path if present + auto pathStart = url.find('/'); + if (pathStart != std::string_view::npos) { + url = url.substr(0, pathStart); + } + + // Check for port + auto colonPos = url.find(':'); + if (colonPos != std::string_view::npos) { + config.host = std::string(url.substr(0, colonPos)); + auto portStr = url.substr(colonPos + 1); + config.port = 0; + for (char c : portStr) { + if (c >= '0' && c <= '9') { + config.port = config.port * 10 + (c - '0'); + } + } + } else { + config.host = std::string(url); + config.port = 8080; + } + + return config; +} + +// Read a line (ending with \r\n) from a plain Socket +static std::string read_line_plain(Socket& sock, int timeoutMs) { + std::string line; + char c; + while (true) { + if (!sock.wait_readable(timeoutMs)) { + break; + } + int ret = sock.read(&c, 1); + if (ret < 0) break; + if (ret == 0) { + if (!sock.wait_readable(timeoutMs)) break; + ret = sock.read(&c, 1); + if (ret <= 0) break; + } + line += c; + if (line.size() >= 2 && line[line.size() - 2] == '\r' && line[line.size() - 1] == '\n') { + line.resize(line.size() - 2); + break; + } + } + return line; +} + +// Write all data to a plain Socket +static bool write_all_plain(Socket& sock, const std::string& data) { + int total = 0; + int len = static_cast(data.size()); + while (total < len) { + int ret = sock.write(data.c_str() + total, len - total); + if (ret < 0) return false; + if (ret == 0) { + ret = sock.write(data.c_str() + total, len - total); + if (ret <= 0) return false; + } + total += ret; + } + return true; +} + +// Connect through HTTP CONNECT proxy, returning a Socket connected to target through tunnel. +// On failure, returns an invalid (closed) Socket. +export Socket proxy_connect(std::string_view proxyHost, int proxyPort, + std::string_view targetHost, int targetPort, + int timeoutMs) { + Socket sock; + + // Step 1: Connect to proxy + std::string proxyHostStr(proxyHost); + if (!sock.connect(proxyHostStr.c_str(), proxyPort, timeoutMs)) { + return sock; + } + + // Step 2: Send CONNECT request + std::string request = "CONNECT "; + request += targetHost; + request += ":"; + request += std::to_string(targetPort); + request += " HTTP/1.1\r\nHost: "; + request += targetHost; + request += ":"; + request += std::to_string(targetPort); + request += "\r\n\r\n"; + + if (!write_all_plain(sock, request)) { + sock.close(); + return sock; + } + + // Step 3: Read response status line + std::string statusLine = read_line_plain(sock, timeoutMs); + if (statusLine.empty()) { + sock.close(); + return sock; + } + + // Parse status code from "HTTP/1.x 200 ..." + int statusCode = 0; + auto spacePos = statusLine.find(' '); + if (spacePos != std::string::npos) { + auto rest = std::string_view(statusLine).substr(spacePos + 1); + for (char c : rest) { + if (c >= '0' && c <= '9') { + statusCode = statusCode * 10 + (c - '0'); + } else { + break; + } + } + } + + if (statusCode != 200) { + sock.close(); + return sock; + } + + // Step 4: Read remaining response headers until empty line + while (true) { + std::string headerLine = read_line_plain(sock, timeoutMs); + if (headerLine.empty()) { + break; + } + } + + // Socket is now tunneled to the target + return sock; +} + +} // namespace mcpplibs::tinyhttps diff --git a/src/tinyhttps/socket.cppm b/src/tinyhttps/socket.cppm new file mode 100644 index 0000000..6f77114 --- /dev/null +++ b/src/tinyhttps/socket.cppm @@ -0,0 +1,220 @@ +module; + +#ifdef _WIN32 +#include +#include +#pragma comment(lib, "ws2_32.lib") +#else +#include +#include +#include +#include +#include +#include +#include +#endif + +export module mcpplibs.tinyhttps:socket; + +import std; + +namespace mcpplibs::tinyhttps { + +#ifdef _WIN32 +using SocketHandle = SOCKET; +constexpr SocketHandle INVALID_SOCKET_FD = INVALID_SOCKET; +#else +using SocketHandle = int; +constexpr SocketHandle INVALID_SOCKET_FD = -1; +#endif + +export class Socket { +public: + Socket() = default; + + ~Socket() { + close(); + } + + // Non-copyable + Socket(const Socket&) = delete; + Socket& operator=(const Socket&) = delete; + + // Move constructor + Socket(Socket&& other) noexcept + : fd_(other.fd_) { + other.fd_ = INVALID_SOCKET_FD; + } + + // Move assignment + Socket& operator=(Socket&& other) noexcept { + if (this != &other) { + close(); + fd_ = other.fd_; + other.fd_ = INVALID_SOCKET_FD; + } + return *this; + } + + [[nodiscard]] bool is_valid() const { + return fd_ != INVALID_SOCKET_FD; + } + + bool connect(const char* host, int port, int timeoutMs) { + // Close existing connection if any + if (is_valid()) { + close(); + } + + // Resolve address + struct addrinfo hints{}; + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + hints.ai_protocol = IPPROTO_TCP; + + auto portStr = std::to_string(port); + struct addrinfo* result = nullptr; + int rc = ::getaddrinfo(host, portStr.c_str(), &hints, &result); + if (rc != 0 || result == nullptr) { + return false; + } + + // Try each address + for (auto* rp = result; rp != nullptr; rp = rp->ai_next) { + SocketHandle fd = ::socket(rp->ai_family, rp->ai_socktype, rp->ai_protocol); + if (fd == INVALID_SOCKET_FD) { + continue; + } + + // Set non-blocking + if (!set_non_blocking(fd, true)) { + close_handle(fd); + continue; + } + + rc = ::connect(fd, rp->ai_addr, static_cast(rp->ai_addrlen)); + + bool connected = false; + if (rc == 0) { + connected = true; + } else { +#ifdef _WIN32 + if (WSAGetLastError() == WSAEWOULDBLOCK) { +#else + if (errno == EINPROGRESS) { +#endif + // Wait for connection with timeout + if (poll_fd(fd, timeoutMs, false)) { + int err = 0; + socklen_t len = sizeof(err); + if (::getsockopt(fd, SOL_SOCKET, SO_ERROR, reinterpret_cast(&err), &len) == 0 && err == 0) { + connected = true; + } + } + } + } + + if (connected) { + // Restore blocking mode + set_non_blocking(fd, false); + fd_ = fd; + ::freeaddrinfo(result); + return true; + } + + close_handle(fd); + } + + ::freeaddrinfo(result); + return false; + } + + int read(char* buf, int len) { + if (!is_valid()) return -1; + return static_cast(::recv(fd_, buf, len, 0)); + } + + int write(const char* buf, int len) { + if (!is_valid()) return -1; + return static_cast(::send(fd_, buf, len, 0)); + } + + bool wait_readable(int timeoutMs) { + if (!is_valid()) return false; + return poll_fd(fd_, timeoutMs, true); + } + + bool wait_writable(int timeoutMs) { + if (!is_valid()) return false; + return poll_fd(fd_, timeoutMs, false); + } + + [[nodiscard]] SocketHandle native_handle() const { + return fd_; + } + + void close() { + if (is_valid()) { + close_handle(fd_); + fd_ = INVALID_SOCKET_FD; + } + } + + static void platform_init() { +#ifdef _WIN32 + WSADATA wsaData; + WSAStartup(MAKEWORD(2, 2), &wsaData); +#endif + } + + static void platform_cleanup() { +#ifdef _WIN32 + WSACleanup(); +#endif + } + +private: + SocketHandle fd_ = INVALID_SOCKET_FD; + + static bool set_non_blocking(SocketHandle fd, bool nonBlocking) { +#ifdef _WIN32 + u_long mode = nonBlocking ? 1 : 0; + return ioctlsocket(fd, FIONBIO, &mode) == 0; +#else + int flags = ::fcntl(fd, F_GETFL, 0); + if (flags == -1) return false; + if (nonBlocking) { + flags |= O_NONBLOCK; + } else { + flags &= ~O_NONBLOCK; + } + return ::fcntl(fd, F_SETFL, flags) == 0; +#endif + } + + static bool poll_fd(SocketHandle fd, int timeoutMs, bool forRead) { +#ifdef _WIN32 + WSAPOLLFD pfd{}; + pfd.fd = fd; + pfd.events = forRead ? POLLIN : POLLOUT; + int ret = WSAPoll(&pfd, 1, timeoutMs); + return ret > 0 && (pfd.revents & (pfd.events | POLLERR | POLLHUP)); +#else + struct pollfd pfd{}; + pfd.fd = fd; + pfd.events = forRead ? POLLIN : POLLOUT; + int ret = ::poll(&pfd, 1, timeoutMs); + return ret > 0 && (pfd.revents & (pfd.events | POLLERR | POLLHUP)); +#endif + } + + static void close_handle(SocketHandle fd) { +#ifdef _WIN32 + ::closesocket(fd); +#else + ::close(fd); +#endif + } +}; + +} // namespace mcpplibs::tinyhttps diff --git a/src/tinyhttps/sse.cppm b/src/tinyhttps/sse.cppm new file mode 100644 index 0000000..204b311 --- /dev/null +++ b/src/tinyhttps/sse.cppm @@ -0,0 +1,163 @@ +export module mcpplibs.tinyhttps:sse; + +import std; + +namespace mcpplibs::tinyhttps { + +export struct SseEvent { + std::string event; // event type (default "message") + std::string data; // event data + std::string id; // event id (optional) +}; + +export class SseParser { +private: + std::string buffer_; + std::string currentEvent_; + std::string currentData_; + std::string currentId_; + +public: + std::vector feed(std::string_view chunk) { + buffer_.append(chunk); + std::vector events; + + // Scan for event boundaries: \n\n or \r\n\r\n + while (true) { + // Find double newline (event boundary) + auto pos = find_event_boundary_(); + if (pos == std::string::npos) { + break; + } + + // Extract the event block + std::string_view block(buffer_.data(), pos); + // Determine how many chars to skip past the boundary + std::size_t skip = 0; + if (pos + 1 < buffer_.size() && buffer_[pos] == '\n' && buffer_[pos + 1] == '\n') { + skip = pos + 2; + } else if (pos + 3 < buffer_.size() && + buffer_[pos] == '\r' && buffer_[pos + 1] == '\n' && + buffer_[pos + 2] == '\r' && buffer_[pos + 3] == '\n') { + skip = pos + 4; + } else { + skip = pos + 2; // fallback for \n\n + } + + // Process each line in the block + process_block_(block); + dispatch_event_(events); + + buffer_.erase(0, skip); + } + + return events; + } + + void reset() { + buffer_.clear(); + currentEvent_.clear(); + currentData_.clear(); + currentId_.clear(); + } + +private: + std::size_t find_event_boundary_() const { + for (std::size_t i = 0; i < buffer_.size(); ++i) { + if (buffer_[i] == '\n' && i + 1 < buffer_.size() && buffer_[i + 1] == '\n') { + return i; + } + if (buffer_[i] == '\r' && i + 3 < buffer_.size() && + buffer_[i + 1] == '\n' && buffer_[i + 2] == '\r' && buffer_[i + 3] == '\n') { + return i; + } + } + return std::string::npos; + } + + void process_block_(std::string_view block) { + while (!block.empty()) { + // Find end of line + std::size_t lineEnd = 0; + std::size_t skip = 0; + bool found = false; + for (std::size_t i = 0; i < block.size(); ++i) { + if (block[i] == '\r' && i + 1 < block.size() && block[i + 1] == '\n') { + lineEnd = i; + skip = i + 2; + found = true; + break; + } + if (block[i] == '\n') { + lineEnd = i; + skip = i + 1; + found = true; + break; + } + } + if (!found) { + lineEnd = block.size(); + skip = block.size(); + } + + process_line_(block.substr(0, lineEnd)); + block = block.substr(skip); + } + } + + void process_line_(std::string_view line) { + if (line.empty()) { + return; + } + + // Comment line + if (line[0] == ':') { + return; + } + + // Find colon + auto colonPos = line.find(':'); + if (colonPos == std::string_view::npos) { + // Field with no value — treat field name as the whole line, value as empty + return; + } + + auto field = line.substr(0, colonPos); + auto value = line.substr(colonPos + 1); + + // Strip single leading space from value if present + if (!value.empty() && value[0] == ' ') { + value = value.substr(1); + } + + if (field == "data") { + if (!currentData_.empty()) { + currentData_ += '\n'; + } + currentData_.append(value); + } else if (field == "event") { + currentEvent_ = std::string(value); + } else if (field == "id") { + currentId_ = std::string(value); + } + // Other fields ignored + } + + void dispatch_event_(std::vector& events) { + if (currentData_.empty() && currentEvent_.empty() && currentId_.empty()) { + return; + } + + SseEvent ev; + ev.event = currentEvent_.empty() ? "message" : std::move(currentEvent_); + ev.data = std::move(currentData_); + ev.id = std::move(currentId_); + events.push_back(std::move(ev)); + + currentEvent_.clear(); + currentData_.clear(); + currentId_.clear(); + } +}; + +} // namespace mcpplibs::tinyhttps diff --git a/src/tinyhttps/tinyhttps.cppm b/src/tinyhttps/tinyhttps.cppm new file mode 100644 index 0000000..82084a1 --- /dev/null +++ b/src/tinyhttps/tinyhttps.cppm @@ -0,0 +1,7 @@ +export module mcpplibs.tinyhttps; +export import :socket; +export import :ca_bundle; +export import :tls; +export import :proxy; +export import :http; +export import :sse; diff --git a/src/tinyhttps/tls.cppm b/src/tinyhttps/tls.cppm new file mode 100644 index 0000000..11e0417 --- /dev/null +++ b/src/tinyhttps/tls.cppm @@ -0,0 +1,248 @@ +module; + +#include +#include +#include +#include +#include +#include + +export module mcpplibs.tinyhttps:tls; + +import :socket; +import :ca_bundle; +import std; + +namespace mcpplibs::tinyhttps { + +struct TlsState { + mbedtls_ssl_context ssl; + mbedtls_ssl_config conf; + mbedtls_ctr_drbg_context ctr_drbg; + mbedtls_entropy_context entropy; + mbedtls_x509_crt ca_cert; + + TlsState() { + mbedtls_ssl_init(&ssl); + mbedtls_ssl_config_init(&conf); + mbedtls_ctr_drbg_init(&ctr_drbg); + mbedtls_entropy_init(&entropy); + mbedtls_x509_crt_init(&ca_cert); + } + + ~TlsState() { + mbedtls_ssl_free(&ssl); + mbedtls_ssl_config_free(&conf); + mbedtls_ctr_drbg_free(&ctr_drbg); + mbedtls_entropy_free(&entropy); + mbedtls_x509_crt_free(&ca_cert); + } + + TlsState(const TlsState&) = delete; + TlsState& operator=(const TlsState&) = delete; +}; + +// BIO callbacks for mbedtls — forward to Socket read/write +static int bio_send(void* ctx, const unsigned char* buf, size_t len) { + auto* sock = static_cast(ctx); + int ret = sock->write(reinterpret_cast(buf), static_cast(len)); + if (ret <= 0) { + return MBEDTLS_ERR_NET_SEND_FAILED; + } + return ret; +} + +static int bio_recv(void* ctx, unsigned char* buf, size_t len) { + auto* sock = static_cast(ctx); + int ret = sock->read(reinterpret_cast(buf), static_cast(len)); + if (ret < 0) { + return MBEDTLS_ERR_NET_RECV_FAILED; + } + if (ret == 0) { + return MBEDTLS_ERR_NET_CONN_RESET; + } + return ret; +} + +export class TlsSocket { +public: + TlsSocket() = default; + ~TlsSocket() { close(); } + + // Non-copyable + TlsSocket(const TlsSocket&) = delete; + TlsSocket& operator=(const TlsSocket&) = delete; + + // Move constructor + TlsSocket(TlsSocket&& other) noexcept + : socket_(std::move(other.socket_)) + , state_(std::move(other.state_)) { + // Re-bind BIO to point to our socket_ (not the moved-from one) + if (state_) { + mbedtls_ssl_set_bio(&state_->ssl, &socket_, bio_send, bio_recv, nullptr); + } + } + + // Move assignment + TlsSocket& operator=(TlsSocket&& other) noexcept { + if (this != &other) { + close(); + socket_ = std::move(other.socket_); + state_ = std::move(other.state_); + // Re-bind BIO to point to our socket_ + if (state_) { + mbedtls_ssl_set_bio(&state_->ssl, &socket_, bio_send, bio_recv, nullptr); + } + } + return *this; + } + + [[nodiscard]] bool is_valid() const { + return state_ != nullptr && socket_.is_valid(); + } + + // Connect over an already-established Socket (e.g. a proxy tunnel). + // Takes ownership of the socket and performs TLS handshake on top of it. + bool connect_over(Socket&& socket, const char* host, bool verifySsl) { + socket_ = std::move(socket); + return setup_tls(host, verifySsl); + } + + bool connect(const char* host, int port, int timeoutMs, bool verifySsl) { + // Step 1: TCP connect via Socket + if (!socket_.connect(host, port, timeoutMs)) { + return false; + } + + return setup_tls(host, verifySsl); + } + + int read(char* buf, int len) { + if (!is_valid()) return -1; + int ret = mbedtls_ssl_read(&state_->ssl, + reinterpret_cast(buf), static_cast(len)); + if (ret == MBEDTLS_ERR_SSL_PEER_CLOSE_NOTIFY || ret == 0) { + return 0; // Connection closed + } + if (ret < 0) { + if (ret == MBEDTLS_ERR_SSL_WANT_READ || ret == MBEDTLS_ERR_SSL_WANT_WRITE) { + return 0; // Would block, treat as no data yet + } + return -1; + } + return ret; + } + + int write(const char* buf, int len) { + if (!is_valid()) return -1; + int ret = mbedtls_ssl_write(&state_->ssl, + reinterpret_cast(buf), static_cast(len)); + if (ret < 0) { + if (ret == MBEDTLS_ERR_SSL_WANT_READ || ret == MBEDTLS_ERR_SSL_WANT_WRITE) { + return 0; + } + return -1; + } + return ret; + } + + void close() { + if (state_) { + mbedtls_ssl_close_notify(&state_->ssl); + state_.reset(); + } + socket_.close(); + } + + bool wait_readable(int timeoutMs) { + // Check if mbedtls has already buffered decrypted data + if (state_ && mbedtls_ssl_get_bytes_avail(&state_->ssl) > 0) { + return true; + } + return socket_.wait_readable(timeoutMs); + } + +private: + Socket socket_; + std::unique_ptr state_; + + bool setup_tls(const char* host, bool verifySsl) { + state_ = std::make_unique(); + + int ret = mbedtls_ctr_drbg_seed( + &state_->ctr_drbg, mbedtls_entropy_func, &state_->entropy, + nullptr, 0); + if (ret != 0) { + state_.reset(); + socket_.close(); + return false; + } + + ret = mbedtls_ssl_config_defaults( + &state_->conf, + MBEDTLS_SSL_IS_CLIENT, + MBEDTLS_SSL_TRANSPORT_STREAM, + MBEDTLS_SSL_PRESET_DEFAULT); + if (ret != 0) { + state_.reset(); + socket_.close(); + return false; + } + + mbedtls_ssl_conf_rng(&state_->conf, mbedtls_ctr_drbg_random, &state_->ctr_drbg); + + // Load CA certs + auto ca_pem = load_ca_certs(); + if (!ca_pem.empty()) { + ret = mbedtls_x509_crt_parse( + &state_->ca_cert, + reinterpret_cast(ca_pem.c_str()), + ca_pem.size() + 1); // +1 for null terminator required by mbedtls + // ret > 0 means some certs failed to parse but others succeeded — acceptable + if (ret < 0) { + state_.reset(); + socket_.close(); + return false; + } + mbedtls_ssl_conf_ca_chain(&state_->conf, &state_->ca_cert, nullptr); + } + + // Certificate verification + if (verifySsl) { + mbedtls_ssl_conf_authmode(&state_->conf, MBEDTLS_SSL_VERIFY_REQUIRED); + } else { + mbedtls_ssl_conf_authmode(&state_->conf, MBEDTLS_SSL_VERIFY_NONE); + } + + ret = mbedtls_ssl_setup(&state_->ssl, &state_->conf); + if (ret != 0) { + state_.reset(); + socket_.close(); + return false; + } + + // Set hostname for SNI + ret = mbedtls_ssl_set_hostname(&state_->ssl, host); + if (ret != 0) { + state_.reset(); + socket_.close(); + return false; + } + + // Set BIO callbacks using our Socket + mbedtls_ssl_set_bio(&state_->ssl, &socket_, bio_send, bio_recv, nullptr); + + // Perform TLS handshake + while ((ret = mbedtls_ssl_handshake(&state_->ssl)) != 0) { + if (ret != MBEDTLS_ERR_SSL_WANT_READ && ret != MBEDTLS_ERR_SSL_WANT_WRITE) { + state_.reset(); + socket_.close(); + return false; + } + } + + return true; + } +}; + +} // namespace mcpplibs::tinyhttps diff --git a/src/types.cppm b/src/types.cppm new file mode 100644 index 0000000..9552436 --- /dev/null +++ b/src/types.cppm @@ -0,0 +1,294 @@ +export module mcpplibs.llmapi:types; + +import std; +import mcpplibs.llmapi.nlohmann.json; + +namespace mcpplibs::llmapi { + +// Roles +export enum class Role { System, User, Assistant, Tool }; + +// Content blocks (multimodal) +export struct TextContent { + std::string text; +}; + +export struct ImageContent { + std::string data; // base64 or URL + std::string mediaType; // "image/png", "image/jpeg" + bool isUrl{false}; +}; + +export struct AudioContent { + std::string data; // base64 + std::string format; // "wav", "mp3" +}; + +export struct ToolUseContent { + std::string id; + std::string name; + std::string inputJson; +}; + +export struct ToolResultContent { + std::string toolUseId; + std::string content; + bool isError{false}; +}; + +export using ContentPart = + std::variant; +export using Content = std::variant>; + +// Message +export struct Message { + Role role; + Content content; + std::string name; + + static Message system(std::string_view text) { + return Message{.role = Role::System, .content = std::string{text}}; + } + + static Message user(std::string_view text) { + return Message{.role = Role::User, .content = std::string{text}}; + } + + static Message assistant(std::string_view text) { + return Message{.role = Role::Assistant, .content = std::string{text}}; + } +}; + +// Tool definition +export struct ToolDef { + std::string name; + std::string description; + std::string inputSchema; // JSON Schema string +}; + +// Tool call (from response) +export struct ToolCall { + std::string id; + std::string name; + std::string arguments; +}; + +// Tool choice +export enum class ToolChoice { Auto, None, Required }; + +export struct ToolChoiceForced { + std::string name; +}; + +export using ToolChoicePolicy = std::variant; + +// Response format +export enum class ResponseFormatType { Text, JsonObject, JsonSchema }; + +export struct ResponseFormat { + ResponseFormatType type{ResponseFormatType::Text}; + std::string schemaName; + std::string schema; +}; + +// Chat params +export struct ChatParams { + std::optional temperature; + std::optional topP; + std::optional maxTokens; + std::optional> stop; + std::optional> tools; + std::optional toolChoice; + std::optional responseFormat; + std::optional extraJson; +}; + +// Stop reason +export enum class StopReason { EndOfTurn, MaxTokens, ToolUse, ContentFilter, StopSequence }; + +// Usage +export struct Usage { + int inputTokens{0}; + int outputTokens{0}; + int totalTokens{0}; +}; + +// Chat response +export struct ChatResponse { + std::string id; + std::string model; + std::vector content; + StopReason stopReason; + Usage usage; + + std::string text() const { + std::string result; + for (const auto& part : content) { + if (auto* t = std::get_if(&part)) { + result += t->text; + } + } + return result; + } + + std::vector tool_calls() const { + std::vector calls; + for (const auto& part : content) { + if (auto* t = std::get_if(&part)) { + calls.push_back(ToolCall{ + .id = t->id, + .name = t->name, + .arguments = t->inputJson, + }); + } + } + return calls; + } +}; + +// Embedding response +export struct EmbeddingResponse { + std::vector> embeddings; + std::string model; + Usage usage; +}; + +// Conversation container +export struct Conversation { + std::vector messages; + + void push(Message msg) { messages.push_back(std::move(msg)); } + + void clear() { messages.clear(); } + + int size() const { return static_cast(messages.size()); } + + void save(std::string_view filePath) const; + static Conversation load(std::string_view filePath); +}; + +// -- Serialization helpers (internal) -- + +using Json = nlohmann::json; + +inline std::string roleToString(Role r) { + switch (r) { + case Role::System: return "system"; + case Role::User: return "user"; + case Role::Assistant: return "assistant"; + case Role::Tool: return "tool"; + } + return "user"; +} + +inline Role roleFromString(const std::string& s) { + if (s == "system") return Role::System; + if (s == "assistant") return Role::Assistant; + if (s == "tool") return Role::Tool; + return Role::User; +} + +inline Json contentPartToJson(const ContentPart& part) { + return std::visit([](const auto& p) -> Json { + using T = std::decay_t; + if constexpr (std::is_same_v) { + return Json{{"type", "text"}, {"text", p.text}}; + } else if constexpr (std::is_same_v) { + return Json{{"type", "image"}, {"data", p.data}, {"mediaType", p.mediaType}, {"isUrl", p.isUrl}}; + } else if constexpr (std::is_same_v) { + return Json{{"type", "audio"}, {"data", p.data}, {"format", p.format}}; + } else if constexpr (std::is_same_v) { + return Json{{"type", "tool_use"}, {"id", p.id}, {"name", p.name}, {"inputJson", p.inputJson}}; + } else if constexpr (std::is_same_v) { + return Json{{"type", "tool_result"}, {"toolUseId", p.toolUseId}, {"content", p.content}, {"isError", p.isError}}; + } + }, part); +} + +inline ContentPart contentPartFromJson(const Json& j) { + auto type = j.at("type").get(); + if (type == "text") { + return TextContent{.text = j.at("text").get()}; + } else if (type == "image") { + return ImageContent{ + .data = j.at("data").get(), + .mediaType = j.at("mediaType").get(), + .isUrl = j.value("isUrl", false), + }; + } else if (type == "audio") { + return AudioContent{ + .data = j.at("data").get(), + .format = j.at("format").get(), + }; + } else if (type == "tool_use") { + return ToolUseContent{ + .id = j.at("id").get(), + .name = j.at("name").get(), + .inputJson = j.at("inputJson").get(), + }; + } else if (type == "tool_result") { + return ToolResultContent{ + .toolUseId = j.at("toolUseId").get(), + .content = j.at("content").get(), + .isError = j.value("isError", false), + }; + } + return TextContent{.text = ""}; +} + +inline Json messageToJson(const Message& msg) { + Json j; + j["role"] = roleToString(msg.role); + std::visit([&j](const auto& c) { + using T = std::decay_t; + if constexpr (std::is_same_v) { + j["content"] = c; + } else { + Json arr = Json::array(); + for (const auto& part : c) { + arr.push_back(contentPartToJson(part)); + } + j["content"] = arr; + } + }, msg.content); + return j; +} + +inline Message messageFromJson(const Json& j) { + Message msg; + msg.role = roleFromString(j.at("role").get()); + const auto& c = j.at("content"); + if (c.is_string()) { + msg.content = c.get(); + } else if (c.is_array()) { + std::vector parts; + for (const auto& elem : c) { + parts.push_back(contentPartFromJson(elem)); + } + msg.content = std::move(parts); + } + return msg; +} + +void Conversation::save(std::string_view filePath) const { + Json j; + j["messages"] = Json::array(); + for (const auto& msg : messages) { + j["messages"].push_back(messageToJson(msg)); + } + std::ofstream out{std::string{filePath}}; + out << j.dump(2); +} + +Conversation Conversation::load(std::string_view filePath) { + std::ifstream in{std::string{filePath}}; + std::string content{std::istreambuf_iterator{in}, std::istreambuf_iterator{}}; + auto j = Json::parse(content); + Conversation conv; + for (const auto& msgJson : j.at("messages")) { + conv.push(messageFromJson(msgJson)); + } + return conv; +} + +} // namespace mcpplibs::llmapi diff --git a/tests/llmapi/test_anthropic_live.cpp b/tests/llmapi/test_anthropic_live.cpp new file mode 100644 index 0000000..200abd3 --- /dev/null +++ b/tests/llmapi/test_anthropic_live.cpp @@ -0,0 +1,45 @@ +import mcpplibs.llmapi; +import std; + +#include +#include "../test_print.hpp" + +using namespace mcpplibs::llmapi; + +int main() { + auto apiKey = std::getenv("ANTHROPIC_API_KEY"); + if (!apiKey) { + println("ANTHROPIC_API_KEY not set, skipping live test"); + return 0; + } + + auto client = Client(anthropic::Anthropic({ + .apiKey = apiKey, + .model = "claude-haiku-4-5-20251001", + })); + + // Test 1: basic chat + auto resp = client.chat("Say exactly: HELLO_TEST_OK"); + println("Response: ", resp.text()); + assert(!resp.text().empty()); + assert(resp.usage.inputTokens > 0); + + // Test 2: system message + client.clear(); + client.system("Always respond with exactly one word."); + auto resp2 = client.chat("What color is the sky?"); + println("System test: ", resp2.text()); + + // Test 3: streaming + client.clear(); + std::string streamed; + auto resp3 = client.chat_stream("Say exactly: STREAM_OK", [&](std::string_view chunk) { + streamed += chunk; + print(chunk); + }); + println(); + assert(!streamed.empty()); + + println("test_anthropic_live: ALL PASSED"); + return 0; +} diff --git a/tests/llmapi/test_anthropic_serialize.cpp b/tests/llmapi/test_anthropic_serialize.cpp new file mode 100644 index 0000000..fb310b1 --- /dev/null +++ b/tests/llmapi/test_anthropic_serialize.cpp @@ -0,0 +1,32 @@ +import mcpplibs.llmapi; +import std; + +#include +#include "../test_print.hpp" + +using namespace mcpplibs::llmapi; + +int main() { + anthropic::Anthropic provider(anthropic::Config { + .apiKey = "test-key", + .model = "claude-sonnet-4-20250514", + }); + + // Test 1: concept satisfaction + static_assert(Provider); + static_assert(StreamableProvider); + // NOT EmbeddableProvider — Anthropic has no embeddings + + // Test 2: name + assert(provider.name() == "anthropic"); + + // Test 3: Client compiles + auto client = Client(anthropic::Anthropic({ + .apiKey = "test", + .model = "claude-sonnet-4-20250514", + })); + assert(client.provider().name() == "anthropic"); + + println("test_anthropic_serialize: ALL PASSED"); + return 0; +} diff --git a/tests/llmapi/test_client.cpp b/tests/llmapi/test_client.cpp new file mode 100644 index 0000000..e148252 --- /dev/null +++ b/tests/llmapi/test_client.cpp @@ -0,0 +1,101 @@ +import mcpplibs.llmapi; +import std; + +#include +#include "../test_print.hpp" + +using namespace mcpplibs::llmapi; + +struct FullMockProvider { + std::string_view name() const { return "full_mock"; } + + ChatResponse chat(const std::vector& msgs, const ChatParams&) { + std::string lastContent; + if (!msgs.empty()) { + auto& c = msgs.back().content; + if (auto* s = std::get_if(&c)) { + lastContent = *s; + } + } + return ChatResponse { + .content = { TextContent { "reply to: " + lastContent } }, + .stopReason = StopReason::EndOfTurn, + .usage = { .inputTokens = 10, .outputTokens = 5, .totalTokens = 15 }, + }; + } + + Task chat_async(const std::vector& msgs, const ChatParams& p) { + co_return chat(msgs, p); + } + + ChatResponse chat_stream(const std::vector& msgs, + const ChatParams& params, + std::function callback) { + auto resp = chat(msgs, params); + auto text = resp.text(); + for (std::size_t i = 0; i < text.size(); i += 5) { + auto len = std::min(std::size_t{5}, text.size() - i); + callback(std::string_view(text).substr(i, len)); + } + return resp; + } + + Task chat_stream_async(const std::vector& msgs, + const ChatParams& params, + std::function callback) { + co_return chat_stream(msgs, params, std::move(callback)); + } +}; + +static_assert(Provider); +static_assert(StreamableProvider); + +int main() { + auto client = Client(FullMockProvider{}); + + // Test 1: basic chat + auto resp = client.chat("hello"); + assert(resp.text() == "reply to: hello"); + + // Test 2: conversation auto-saved + assert(client.conversation().size() == 2); + + // Test 3: system message + client.clear(); + client.system("be helpful"); + auto resp2 = client.chat("hi"); + assert(client.conversation().size() == 3); + + // Test 4: streaming + client.clear(); + std::string streamed; + auto resp3 = client.chat_stream("test", [&streamed](std::string_view chunk) { + streamed += chunk; + }); + assert(!streamed.empty()); + assert(resp3.text() == streamed); + + // Test 5: async chat + client.clear(); + auto asyncResp = client.chat_async("async hello"); + auto result = asyncResp.get(); + assert(result.text() == "reply to: async hello"); + + // Test 6: default params + client.clear(); + client.default_params(ChatParams { .temperature = 0.5 }); + auto resp4 = client.chat("with params"); + assert(resp4.text().find("with params") != std::string::npos); + + // Test 7: save/load conversation + client.clear(); + client.chat("save me"); + client.save_conversation("/tmp/test_client_conv.json"); + auto client2 = Client(FullMockProvider{}); + client2.load_conversation("/tmp/test_client_conv.json"); + assert(client2.conversation().size() == 2); + std::filesystem::remove("/tmp/test_client_conv.json"); + + println("test_client: ALL PASSED"); + return 0; +} diff --git a/tests/llmapi/test_coro.cpp b/tests/llmapi/test_coro.cpp new file mode 100644 index 0000000..6068732 --- /dev/null +++ b/tests/llmapi/test_coro.cpp @@ -0,0 +1,80 @@ +import mcpplibs.llmapi; +import std; + +#include +#include "../test_print.hpp" + +using namespace mcpplibs::llmapi; + +Task compute_async(int a, int b) { + co_return a + b; +} + +Task chain_async() { + auto result = co_await compute_async(2, 3); + co_return "result=" + std::to_string(result); +} + +Task void_task() { + co_return; +} + +Task throwing_task() { + throw std::runtime_error("test error"); + co_return 0; +} + +// Mock Provider +struct MockProvider { + std::string_view name() const { return "mock"; } + + ChatResponse chat(const std::vector&, const ChatParams&) { + return ChatResponse { + .content = { TextContent { "mock response" } }, + .stopReason = StopReason::EndOfTurn, + }; + } + + Task chat_async(const std::vector&, const ChatParams&) { + co_return ChatResponse { + .content = { TextContent { "mock async" } }, + .stopReason = StopReason::EndOfTurn, + }; + } +}; + +static_assert(Provider); + +int main() { + // Test 1: Task sync get + auto t1 = compute_async(3, 4); + assert(t1.get() == 7); + + // Test 2: Task with co_await chain + auto t2 = chain_async(); + assert(t2.get() == "result=5"); + + // Test 3: Task + auto t3 = void_task(); + t3.get(); + + // Test 4: exception propagation + auto t4 = throwing_task(); + try { + t4.get(); + assert(false); + } catch (const std::runtime_error& e) { + assert(std::string(e.what()) == "test error"); + } + + // Test 5: MockProvider satisfies concept + MockProvider mock; + auto resp = mock.chat({}, {}); + assert(resp.text() == "mock response"); + + auto asyncResp = mock.chat_async({}, {}); + assert(asyncResp.get().text() == "mock async"); + + println("test_coro: ALL PASSED"); + return 0; +} diff --git a/tests/llmapi/test_embeddings.cpp b/tests/llmapi/test_embeddings.cpp new file mode 100644 index 0000000..85d1aa9 --- /dev/null +++ b/tests/llmapi/test_embeddings.cpp @@ -0,0 +1,33 @@ +import mcpplibs.llmapi; +import std; + +#include +#include "../test_print.hpp" + +using namespace mcpplibs::llmapi; + +int main() { + auto apiKey = std::getenv("OPENAI_API_KEY"); + if (!apiKey) { + println("OPENAI_API_KEY not set, skipping"); + return 0; + } + + auto provider = openai::OpenAI({ + .apiKey = apiKey, + .model = "gpt-4o-mini", + }); + + auto resp = provider.embed( + {"Hello world", "How are you"}, + "text-embedding-3-small" + ); + + assert(resp.embeddings.size() == 2); + assert(!resp.embeddings[0].empty()); + assert(resp.usage.inputTokens > 0); + println("Embedding dim: ", resp.embeddings[0].size()); + + println("test_embeddings: ALL PASSED"); + return 0; +} diff --git a/tests/llmapi/test_llmapi_integration.cpp b/tests/llmapi/test_llmapi_integration.cpp new file mode 100644 index 0000000..b5c0de9 --- /dev/null +++ b/tests/llmapi/test_llmapi_integration.cpp @@ -0,0 +1,82 @@ +import mcpplibs.llmapi; +import std; + +#include +#include "../test_print.hpp" + +using namespace mcpplibs::llmapi; + +int main() { + // Test 1: compile-time — both providers satisfy concepts + static_assert(Provider); + static_assert(StreamableProvider); + static_assert(EmbeddableProvider); + static_assert(Provider); + static_assert(StreamableProvider); + + // Test 2: type system + auto msg = Message::user("hello"); + ChatParams params { .temperature = 0.7, .maxTokens = 100 }; + Conversation conv; + conv.push(msg); + assert(conv.size() == 1); + + // Test 3: Client compiles with both providers + auto openaiClient = Client(openai::OpenAI({ + .apiKey = "test", + .model = "gpt-4o", + })); + auto anthropicClient = Client(anthropic::Anthropic({ + .apiKey = "test", + .model = "claude-sonnet-4-20250514", + })); + + // Test 4: conversation serialization round-trip via save/load + Conversation conv2; + conv2.push(Message::system("You are helpful")); + conv2.push(Message::user("Hello")); + conv2.push(Message::assistant("Hi there!")); + + auto tmpPath = std::filesystem::temp_directory_path() / "llmapi_test_integration.json"; + conv2.save(tmpPath.string()); + auto conv3 = Conversation::load(tmpPath.string()); + std::filesystem::remove(tmpPath); + assert(conv3.size() == 3); + assert(conv3.messages[0].role == Role::System); + assert(conv3.messages[1].role == Role::User); + assert(conv3.messages[2].role == Role::Assistant); + + // Test 5: tool definitions + ToolDef tool { + .name = "search", + .description = "Search the web", + .inputSchema = R"({"type":"object","properties":{"query":{"type":"string"}}})", + }; + ChatParams toolParams { + .tools = std::vector{tool}, + .toolChoice = ToolChoicePolicy{ToolChoice::Auto}, + }; + assert(toolParams.tools->size() == 1); + + // Test 6: response format + ChatParams jsonParams { + .responseFormat = ResponseFormat { + .type = ResponseFormatType::JsonObject, + }, + }; + + // Test 7: ChatResponse helpers + ChatResponse resp; + resp.content.push_back(TextContent { .text = "Hello" }); + resp.content.push_back(ToolUseContent { + .id = "call_1", + .name = "search", + .inputJson = R"({"query":"test"})", + }); + assert(resp.text() == "Hello"); + assert(resp.tool_calls().size() == 1); + assert(resp.tool_calls()[0].name == "search"); + + println("test_integration: ALL PASSED"); + return 0; +} diff --git a/tests/llmapi/test_openai_live.cpp b/tests/llmapi/test_openai_live.cpp new file mode 100644 index 0000000..921f697 --- /dev/null +++ b/tests/llmapi/test_openai_live.cpp @@ -0,0 +1,45 @@ +import mcpplibs.llmapi; +import std; + +#include +#include "../test_print.hpp" + +using namespace mcpplibs::llmapi; + +int main() { + auto apiKey = std::getenv("OPENAI_API_KEY"); + if (!apiKey) { + println("OPENAI_API_KEY not set, skipping live test"); + return 0; + } + + auto client = Client(openai::OpenAI({ + .apiKey = apiKey, + .model = "gpt-4o-mini", + })); + + // Test 1: basic chat + auto resp = client.chat("Say exactly: HELLO_TEST_OK"); + println("Response: ", resp.text()); + assert(!resp.text().empty()); + assert(resp.usage.totalTokens > 0); + assert(resp.stopReason == StopReason::EndOfTurn); + + // Test 2: streaming + client.clear(); + std::string streamed; + auto resp2 = client.chat_stream("Say exactly: STREAM_OK", [&](std::string_view chunk) { + streamed += chunk; + print(chunk); + }); + println(); + assert(!streamed.empty()); + + // Test 3: conversation continuity + auto resp3 = client.chat("What did I just ask you to say?"); + assert(!resp3.text().empty()); + assert(client.conversation().messages.size() == 4); // 2 user + 2 assistant + + println("test_openai_live: ALL PASSED"); + return 0; +} diff --git a/tests/llmapi/test_openai_serialize.cpp b/tests/llmapi/test_openai_serialize.cpp new file mode 100644 index 0000000..994323f --- /dev/null +++ b/tests/llmapi/test_openai_serialize.cpp @@ -0,0 +1,39 @@ +import mcpplibs.llmapi; +import mcpplibs.llmapi.nlohmann.json; +import std; + +#include +#include "../test_print.hpp" + +using namespace mcpplibs::llmapi; +using Json = nlohmann::json; + +int main() { + // We can't test private methods directly, so test through the public API + // by creating an OpenAI provider and verifying it satisfies concepts + + openai::OpenAI provider(openai::Config { + .apiKey = "test-key", + .model = "gpt-4o", + }); + + // Test 1: Provider concept satisfaction + static_assert(Provider); + static_assert(StreamableProvider); + static_assert(EmbeddableProvider); + + // Test 2: name() + assert(provider.name() == "openai"); + + // Test 3: Client compiles + auto client = Client(openai::OpenAI({ + .apiKey = "test", + .model = "gpt-4o", + })); + + // Test 4: provider() access + assert(client.provider().name() == "openai"); + + println("test_openai_serialize: ALL PASSED"); + return 0; +} diff --git a/tests/llmapi/test_serialization.cpp b/tests/llmapi/test_serialization.cpp new file mode 100644 index 0000000..8203ebb --- /dev/null +++ b/tests/llmapi/test_serialization.cpp @@ -0,0 +1,46 @@ +import mcpplibs.llmapi; +import std; + +#include +#include "../test_print.hpp" + +int main() { + using namespace mcpplibs::llmapi; + + Conversation conv; + conv.push(Message::system("you are helpful")); + conv.push(Message::user("hello")); + conv.push(Message::assistant("hi there")); + conv.push(Message { + .role = Role::User, + .content = std::vector{ + TextContent { "look at this" }, + ImageContent { .data = "base64data", .mediaType = "image/png" }, + }, + }); + + // Save + std::string path = "/tmp/test_conv.json"; + conv.save(path); + + // Load + auto loaded = Conversation::load(path); + assert(loaded.size() == conv.size()); + + // Verify content preserved + assert(std::get(loaded.messages[0].content) == "you are helpful"); + assert(loaded.messages[0].role == Role::System); + assert(std::get(loaded.messages[1].content) == "hello"); + + // Verify multimodal preserved + auto& parts = std::get>(loaded.messages[3].content); + assert(parts.size() == 2); + auto& img = std::get(parts[1]); + assert(img.mediaType == "image/png"); + + // Cleanup + std::filesystem::remove(path); + + println("test_serialization: ALL PASSED"); + return 0; +} diff --git a/tests/llmapi/test_structured_output.cpp b/tests/llmapi/test_structured_output.cpp new file mode 100644 index 0000000..510d734 --- /dev/null +++ b/tests/llmapi/test_structured_output.cpp @@ -0,0 +1,38 @@ +import mcpplibs.llmapi; +import mcpplibs.llmapi.nlohmann.json; +import std; + +#include +#include "../test_print.hpp" + +using namespace mcpplibs::llmapi; +using Json = nlohmann::json; + +int main() { + auto apiKey = std::getenv("OPENAI_API_KEY"); + if (!apiKey) { + println("OPENAI_API_KEY not set, skipping"); + return 0; + } + + auto client = Client(openai::OpenAI({ + .apiKey = apiKey, + .model = "gpt-4o-mini", + })); + + // Test: JSON object mode + auto resp = client.chat("Generate a JSON object with fields: name (string), age (number)", + ChatParams { + .responseFormat = ResponseFormat { + .type = ResponseFormatType::JsonObject, + }, + }); + + auto json = Json::parse(resp.text()); + assert(json.contains("name")); + assert(json.contains("age")); + println("JSON output: ", resp.text()); + + println("test_structured_output: ALL PASSED"); + return 0; +} diff --git a/tests/llmapi/test_tool_calling.cpp b/tests/llmapi/test_tool_calling.cpp new file mode 100644 index 0000000..2e60265 --- /dev/null +++ b/tests/llmapi/test_tool_calling.cpp @@ -0,0 +1,67 @@ +import mcpplibs.llmapi; +import mcpplibs.llmapi.nlohmann.json; +import std; + +#include +#include "../test_print.hpp" + +using namespace mcpplibs::llmapi; +using Json = nlohmann::json; + +int main() { + auto apiKey = std::getenv("OPENAI_API_KEY"); + if (!apiKey) { + println("OPENAI_API_KEY not set, skipping"); + return 0; + } + + auto client = Client(openai::OpenAI({ + .apiKey = apiKey, + .model = "gpt-4o-mini", + })); + + auto params = ChatParams { + .tools = std::vector{{ + .name = "get_temperature", + .description = "Get the current temperature in a city", + .inputSchema = R"({"type":"object","properties":{"city":{"type":"string"}},"required":["city"]})", + }}, + .toolChoice = ToolChoice::Auto, + }; + + // Ask about weather — model should call the tool + auto resp = client.chat("What's the temperature in Tokyo?", params); + + if (resp.stopReason == StopReason::ToolUse) { + auto calls = resp.tool_calls(); + assert(!calls.empty()); + println("Tool called: ", calls[0].name, " with args: ", calls[0].arguments); + assert(calls[0].name == "get_temperature"); + + auto args = Json::parse(calls[0].arguments); + assert(args.contains("city")); + + // Send tool result back + client.add_message(Message { + .role = Role::Tool, + .content = std::vector{ + ToolResultContent { + .toolUseId = calls[0].id, + .content = R"({"temperature": "22°C", "condition": "sunny"})", + }, + }, + }); + + // Get final response — don't add another user message, just continue + auto messages = client.conversation().messages; + auto& provider = client.provider(); + auto finalResp = provider.chat(messages, params); + println("Final: ", finalResp.text()); + assert(!finalResp.text().empty()); + } else { + println("Model didn't call tool (non-deterministic), response: ", resp.text()); + } + + println("test_tool_calling: ALL PASSED"); + return 0; +} diff --git a/tests/llmapi/test_types.cpp b/tests/llmapi/test_types.cpp new file mode 100644 index 0000000..d94c558 --- /dev/null +++ b/tests/llmapi/test_types.cpp @@ -0,0 +1,87 @@ +import mcpplibs.llmapi; +import std; + +#include +#include "../test_print.hpp" + +int main() { + using namespace mcpplibs::llmapi; + + // Test 1: Message construction + auto msg = Message::user("hello"); + assert(msg.role == Role::User); + assert(std::holds_alternative(msg.content)); + assert(std::get(msg.content) == "hello"); + + // Test 2: System/Assistant messages + auto sys = Message::system("you are helpful"); + assert(sys.role == Role::System); + auto asst = Message::assistant("hi there"); + assert(asst.role == Role::Assistant); + + // Test 3: Multimodal content + auto multiMsg = Message{ + .role = Role::User, + .content = std::vector{ + TextContent{"describe this"}, + ImageContent{.data = "https://example.com/img.jpg", .isUrl = true}, + }, + }; + auto& parts = std::get>(multiMsg.content); + assert(parts.size() == 2); + assert(std::holds_alternative(parts[0])); + assert(std::holds_alternative(parts[1])); + + // Test 4: ToolDef + ToolDef tool{ + .name = "get_weather", + .description = "Get weather", + .inputSchema = R"({"type":"object"})", + }; + assert(tool.name == "get_weather"); + + // Test 5: ChatParams with optionals + ChatParams params{ + .temperature = 0.7, + .maxTokens = 1024, + }; + assert(params.temperature.has_value()); + assert(!params.topP.has_value()); + + // Test 6: ChatResponse text extraction + ChatResponse resp{ + .content = {TextContent{"hello"}, TextContent{" world"}}, + .stopReason = StopReason::EndOfTurn, + }; + assert(resp.text() == "hello world"); + + // Test 7: ChatResponse tool_calls extraction + ChatResponse toolResp{ + .content = + { + TextContent{"Let me check"}, + ToolUseContent{.id = "call_1", .name = "weather", .inputJson = "{}"}, + }, + }; + auto calls = toolResp.tool_calls(); + assert(calls.size() == 1); + assert(calls[0].name == "weather"); + + // Test 8: Conversation + Conversation conv; + conv.push(Message::user("hi")); + conv.push(Message::assistant("hello")); + assert(conv.size() == 2); + conv.clear(); + assert(conv.size() == 0); + + // Test 9: Usage + Usage usage{.inputTokens = 10, .outputTokens = 20, .totalTokens = 30}; + assert(usage.totalTokens == 30); + + // Test 10: StopReason enum + assert(StopReason::EndOfTurn != StopReason::ToolUse); + + println("test_types: ALL PASSED"); + return 0; +} diff --git a/tests/llmapi/xmake.lua b/tests/llmapi/xmake.lua new file mode 100644 index 0000000..329bb83 --- /dev/null +++ b/tests/llmapi/xmake.lua @@ -0,0 +1,83 @@ +target("test_types") + set_kind("binary") + set_languages("c++23") + set_policy("build.c++.modules", true) + add_files("test_types.cpp") + add_deps("llmapi") + +target("test_serialization") + set_kind("binary") + set_languages("c++23") + set_policy("build.c++.modules", true) + add_files("test_serialization.cpp") + add_deps("llmapi") + +target("test_coro") + set_kind("binary") + set_languages("c++23") + set_policy("build.c++.modules", true) + add_files("test_coro.cpp") + add_deps("llmapi") + +target("test_client") + set_kind("binary") + set_languages("c++23") + set_policy("build.c++.modules", true) + add_files("test_client.cpp") + add_deps("llmapi") + +target("test_openai_serialize") + set_kind("binary") + set_languages("c++23") + set_policy("build.c++.modules", true) + add_files("test_openai_serialize.cpp") + add_deps("llmapi") + +target("test_anthropic_serialize") + set_kind("binary") + set_languages("c++23") + set_policy("build.c++.modules", true) + add_files("test_anthropic_serialize.cpp") + add_deps("llmapi") + +target("test_openai_live") + set_kind("binary") + set_languages("c++23") + set_policy("build.c++.modules", true) + add_files("test_openai_live.cpp") + add_deps("llmapi") + +target("test_anthropic_live") + set_kind("binary") + set_languages("c++23") + set_policy("build.c++.modules", true) + add_files("test_anthropic_live.cpp") + add_deps("llmapi") + +target("test_tool_calling") + set_kind("binary") + set_languages("c++23") + set_policy("build.c++.modules", true) + add_files("test_tool_calling.cpp") + add_deps("llmapi") + +target("test_structured_output") + set_kind("binary") + set_languages("c++23") + set_policy("build.c++.modules", true) + add_files("test_structured_output.cpp") + add_deps("llmapi") + +target("test_embeddings") + set_kind("binary") + set_languages("c++23") + set_policy("build.c++.modules", true) + add_files("test_embeddings.cpp") + add_deps("llmapi") + +target("test_llmapi_integration") + set_kind("binary") + set_languages("c++23") + set_policy("build.c++.modules", true) + add_files("test_llmapi_integration.cpp") + add_deps("llmapi") diff --git a/tests/test_print.hpp b/tests/test_print.hpp new file mode 100644 index 0000000..0e88031 --- /dev/null +++ b/tests/test_print.hpp @@ -0,0 +1,12 @@ +#pragma once + +template +inline void print(Args&&... args) { + (std::cout << ... << std::forward(args)); +} + +template +inline void println(Args&&... args) { + print(std::forward(args)...); + std::cout << '\n'; +} diff --git a/tests/tinyhttps/test_http.cpp b/tests/tinyhttps/test_http.cpp new file mode 100644 index 0000000..b36b90e --- /dev/null +++ b/tests/tinyhttps/test_http.cpp @@ -0,0 +1,51 @@ +import mcpplibs.tinyhttps; +import std; + +#include +#include "../test_print.hpp" + +int main() { + using namespace mcpplibs::tinyhttps; + Socket::platform_init(); + + HttpClient client; + + // Test 1: simple GET + auto resp = client.send(HttpRequest { + .method = Method::GET, + .url = "https://httpbin.org/get", + .headers = {{"Accept", "application/json"}}, + }); + assert(resp.ok()); + assert(resp.statusCode == 200); + assert(resp.body.find("\"url\"") != std::string::npos); + + // Test 2: POST with JSON body + auto resp2 = client.send(HttpRequest::post( + "https://httpbin.org/post", + R"({"key":"value"})" + )); + assert(resp2.ok()); + assert(resp2.body.find("\"key\"") != std::string::npos); + + // Test 3: custom headers + auto resp3 = client.send(HttpRequest { + .method = Method::GET, + .url = "https://httpbin.org/headers", + .headers = {{"X-Custom", "test123"}}, + }); + assert(resp3.ok()); + assert(resp3.body.find("test123") != std::string::npos); + + // Test 4: 404 handling + auto resp4 = client.send(HttpRequest { + .method = Method::GET, + .url = "https://httpbin.org/status/404", + }); + assert(!resp4.ok()); + assert(resp4.statusCode == 404); + + Socket::platform_cleanup(); + println("test_http: ALL PASSED"); + return 0; +} diff --git a/tests/tinyhttps/test_http_stream.cpp b/tests/tinyhttps/test_http_stream.cpp new file mode 100644 index 0000000..c7d79eb --- /dev/null +++ b/tests/tinyhttps/test_http_stream.cpp @@ -0,0 +1,62 @@ +import mcpplibs.tinyhttps; +import std; + +#include +#include "../test_print.hpp" + +int main() { + using namespace mcpplibs::tinyhttps; + Socket::platform_init(); + + HttpClient client; + + // Test 1: Stream from a server that returns chunked data + // httpbin.org/stream/3 returns 3 JSON lines (newline-delimited JSON, not SSE) + // The SseParser won't produce events from plain JSON lines (no "data:" prefix), + // but the streaming read path is exercised and should complete without error. + int lineCount { 0 }; + auto resp = client.send_stream( + HttpRequest { + .method = Method::GET, + .url = "https://httpbin.org/stream/3", + }, + [&lineCount](const SseEvent& event) -> bool { + lineCount++; + println("Event ", lineCount, ": ", event.data.substr(0, 50)); + return true; + } + ); + assert(resp.statusCode == 200); + println("Stream/3 completed, received ", lineCount, " events, status ", resp.statusCode); + + // Test 2: early stop (return false from callback) + int stopCount { 0 }; + auto resp2 = client.send_stream( + HttpRequest { + .method = Method::GET, + .url = "https://httpbin.org/stream/10", + }, + [&stopCount](const SseEvent&) -> bool { + stopCount++; + return stopCount < 2; // stop after processing 1 event + } + ); + assert(resp2.statusCode == 200); + println("Stopped after ", stopCount, " events"); + + // Test 3: verify response headers are captured + auto resp3 = client.send_stream( + HttpRequest { + .method = Method::GET, + .url = "https://httpbin.org/stream/1", + }, + [](const SseEvent&) -> bool { return true; } + ); + assert(resp3.statusCode == 200); + assert(!resp3.headers.empty()); + println("Response has ", resp3.headers.size(), " headers"); + + Socket::platform_cleanup(); + println("test_http_stream: ALL PASSED"); + return 0; +} diff --git a/tests/tinyhttps/test_integration.cpp b/tests/tinyhttps/test_integration.cpp new file mode 100644 index 0000000..482b3cd --- /dev/null +++ b/tests/tinyhttps/test_integration.cpp @@ -0,0 +1,52 @@ +import mcpplibs.tinyhttps; +import std; + +#include +#include "../test_print.hpp" + +int main() { + using namespace mcpplibs::tinyhttps; + Socket::platform_init(); + + HttpClient client(HttpClientConfig { + .connectTimeoutMs = 10000, + .readTimeoutMs = 30000, + .verifySsl = true, + .keepAlive = true, + }); + + // Test 1: full HTTPS POST (simulates LLM API call pattern) + auto resp = client.send(HttpRequest { + .method = Method::POST, + .url = "https://httpbin.org/post", + .headers = { + {"Content-Type", "application/json"}, + {"Authorization", "Bearer test-key"}, + {"X-Custom-Header", "custom-value"}, + }, + .body = R"({"model":"test","messages":[{"role":"user","content":"hi"}]})", + }); + + assert(resp.ok()); + assert(resp.statusCode == 200); + assert(resp.body.find("test-key") != std::string::npos); + assert(resp.body.find("custom-value") != std::string::npos); + assert(resp.body.find("\"model\"") != std::string::npos); + + // Test 2: connection reuse (second request to same host) + auto resp2 = client.send(HttpRequest { + .method = Method::GET, + .url = "https://httpbin.org/get", + }); + assert(resp2.ok()); + + // Test 3: SSE parser standalone verification + SseParser parser; + auto events = parser.feed("event: content_block_delta\ndata: {\"text\":\"hello\"}\n\n"); + assert(events.size() == 1); + assert(events[0].event == "content_block_delta"); + + Socket::platform_cleanup(); + println("test_integration (tinyhttps): ALL PASSED"); + return 0; +} diff --git a/tests/tinyhttps/test_proxy.cpp b/tests/tinyhttps/test_proxy.cpp new file mode 100644 index 0000000..6ddf0e9 --- /dev/null +++ b/tests/tinyhttps/test_proxy.cpp @@ -0,0 +1,34 @@ +import mcpplibs.tinyhttps; +import std; + +#include +#include "../test_print.hpp" + +int main() { + using namespace mcpplibs::tinyhttps; + Socket::platform_init(); + + // Test 1: proxy URL parsing + auto config = parse_proxy_url("http://proxy.example.com:8080"); + assert(config.host == "proxy.example.com"); + assert(config.port == 8080); + + // Test 2: proxy URL parsing without port (default 8080) + auto config2 = parse_proxy_url("http://myproxy.com"); + assert(config2.host == "myproxy.com"); + assert(config2.port == 8080); + + // Test 3: HttpClient stores proxy config + auto client = HttpClient(HttpClientConfig { + .proxy = "http://127.0.0.1:8080", + }); + assert(client.config().proxy.has_value()); + assert(client.config().proxy.value() == "http://127.0.0.1:8080"); + + // Note: actual proxy connection test requires a real proxy, + // so we only test config/parsing here + + Socket::platform_cleanup(); + println("test_proxy: ALL PASSED"); + return 0; +} diff --git a/tests/tinyhttps/test_socket.cpp b/tests/tinyhttps/test_socket.cpp new file mode 100644 index 0000000..9705da0 --- /dev/null +++ b/tests/tinyhttps/test_socket.cpp @@ -0,0 +1,50 @@ +import mcpplibs.tinyhttps; +import std; + +#include +#include "../test_print.hpp" + +int main() { + using namespace mcpplibs::tinyhttps; + + // Test 1: platform init/cleanup + Socket::platform_init(); + + // Test 2: construct and validate + Socket s; + assert(!s.is_valid()); + + // Test 3: connect to known host (httpbin.org:80) + bool connected = s.connect("httpbin.org", 80, 5000); + assert(connected); + assert(s.is_valid()); + + // Test 4: write HTTP request + std::string req = "GET /get HTTP/1.1\r\nHost: httpbin.org\r\nConnection: close\r\n\r\n"; + int written = s.write(req.data(), static_cast(req.size())); + assert(written > 0); + + // Test 5: read response + char buf[4096]; + int n = s.read(buf, sizeof(buf)); + assert(n > 0); + std::string_view resp(buf, n); + assert(resp.starts_with("HTTP/1.1 200")); + + // Test 6: close and validate + s.close(); + assert(!s.is_valid()); + + // Test 7: move semantics + Socket s1; + s1.connect("httpbin.org", 80, 5000); + Socket s2 = std::move(s1); + assert(!s1.is_valid()); + assert(s2.is_valid()); + s2.close(); + + Socket::platform_cleanup(); + + println("test_socket: ALL PASSED"); + return 0; +} diff --git a/tests/tinyhttps/test_sse.cpp b/tests/tinyhttps/test_sse.cpp new file mode 100644 index 0000000..a60200d --- /dev/null +++ b/tests/tinyhttps/test_sse.cpp @@ -0,0 +1,76 @@ +import mcpplibs.tinyhttps; +import std; + +#include +#include "../test_print.hpp" + +int main() { + using namespace mcpplibs::tinyhttps; + + SseParser parser; + + // Test 1: simple data event + auto events = parser.feed("data: hello\n\n"); + assert(events.size() == 1); + assert(events[0].data == "hello"); + assert(events[0].event == "message"); + + // Test 2: named event + events = parser.feed("event: ping\ndata: {}\n\n"); + assert(events.size() == 1); + assert(events[0].event == "ping"); + assert(events[0].data == "{}"); + + // Test 3: multi-line data + events = parser.feed("data: line1\ndata: line2\n\n"); + assert(events.size() == 1); + assert(events[0].data == "line1\nline2"); + + // Test 4: chunked feed (data arrives in pieces) + parser.reset(); + events = parser.feed("data: hel"); + assert(events.empty()); + events = parser.feed("lo\n\n"); + assert(events.size() == 1); + assert(events[0].data == "hello"); + + // Test 5: multiple events in one chunk + events = parser.feed("data: first\n\ndata: second\n\n"); + assert(events.size() == 2); + assert(events[0].data == "first"); + assert(events[1].data == "second"); + + // Test 6: OpenAI format + parser.reset(); + events = parser.feed("data: {\"choices\":[{\"delta\":{\"content\":\"Hi\"}}]}\n\n"); + assert(events.size() == 1); + assert(events[0].data.find("Hi") != std::string::npos); + + // Test 7: Anthropic format (event type line) + parser.reset(); + events = parser.feed("event: content_block_delta\ndata: {\"type\":\"content_block_delta\"}\n\n"); + assert(events.size() == 1); + assert(events[0].event == "content_block_delta"); + + // Test 8: [DONE] sentinel + parser.reset(); + events = parser.feed("data: [DONE]\n\n"); + assert(events.size() == 1); + assert(events[0].data == "[DONE]"); + + // Test 9: comment lines (start with :) ignored + parser.reset(); + events = parser.feed(": this is a comment\ndata: actual\n\n"); + assert(events.size() == 1); + assert(events[0].data == "actual"); + + // Test 10: id field + parser.reset(); + events = parser.feed("id: 123\ndata: msg\n\n"); + assert(events.size() == 1); + assert(events[0].id == "123"); + assert(events[0].data == "msg"); + + println("test_sse: ALL PASSED"); + return 0; +} diff --git a/tests/tinyhttps/test_tls.cpp b/tests/tinyhttps/test_tls.cpp new file mode 100644 index 0000000..470d4b5 --- /dev/null +++ b/tests/tinyhttps/test_tls.cpp @@ -0,0 +1,47 @@ +import mcpplibs.tinyhttps; +import std; + +#include +#include "../test_print.hpp" + +int main() { + using namespace mcpplibs::tinyhttps; + Socket::platform_init(); + + // Test 1: TLS connect to HTTPS host + TlsSocket tls; + bool connected = tls.connect("httpbin.org", 443, 5000, true); + assert(connected); + assert(tls.is_valid()); + + // Test 2: HTTPS request over TLS + std::string req = "GET /get HTTP/1.1\r\nHost: httpbin.org\r\nConnection: close\r\n\r\n"; + int written = tls.write(req.data(), static_cast(req.size())); + assert(written > 0); + + // Test 3: Read HTTPS response + std::string response; + char buf[4096]; + int n; + while ((n = tls.read(buf, sizeof(buf))) > 0) { + response.append(buf, n); + } + assert(response.starts_with("HTTP/1.1 200")); + assert(response.find("\"url\"") != std::string::npos); + + // Test 4: close + tls.close(); + assert(!tls.is_valid()); + + // Test 5: move semantics + TlsSocket t1; + t1.connect("httpbin.org", 443, 5000, true); + TlsSocket t2 = std::move(t1); + assert(!t1.is_valid()); + assert(t2.is_valid()); + t2.close(); + + Socket::platform_cleanup(); + println("test_tls: ALL PASSED"); + return 0; +} diff --git a/tests/tinyhttps/xmake.lua b/tests/tinyhttps/xmake.lua new file mode 100644 index 0000000..5b0e91c --- /dev/null +++ b/tests/tinyhttps/xmake.lua @@ -0,0 +1,48 @@ +target("test_socket") + set_kind("binary") + set_languages("c++23") + set_policy("build.c++.modules", true) + add_files("test_socket.cpp") + add_deps("tinyhttps") + +target("test_tls") + set_kind("binary") + set_languages("c++23") + set_policy("build.c++.modules", true) + add_files("test_tls.cpp") + add_deps("tinyhttps") + +target("test_http") + set_kind("binary") + set_languages("c++23") + set_policy("build.c++.modules", true) + add_files("test_http.cpp") + add_deps("tinyhttps") + +target("test_sse") + set_kind("binary") + set_languages("c++23") + set_policy("build.c++.modules", true) + add_files("test_sse.cpp") + add_deps("tinyhttps") + +target("test_http_stream") + set_kind("binary") + set_languages("c++23") + set_policy("build.c++.modules", true) + add_files("test_http_stream.cpp") + add_deps("tinyhttps") + +target("test_proxy") + set_kind("binary") + set_languages("c++23") + set_policy("build.c++.modules", true) + add_files("test_proxy.cpp") + add_deps("tinyhttps") + +target("test_integration") + set_kind("binary") + set_languages("c++23") + set_policy("build.c++.modules", true) + add_files("test_integration.cpp") + add_deps("tinyhttps") diff --git a/tests/xmake.lua b/tests/xmake.lua new file mode 100644 index 0000000..4c1d20a --- /dev/null +++ b/tests/xmake.lua @@ -0,0 +1,2 @@ +includes("tinyhttps") +includes("llmapi") diff --git a/xmake.lua b/xmake.lua index 6785615..92ac765 100644 --- a/xmake.lua +++ b/xmake.lua @@ -1,19 +1,21 @@ ---add_rules("mode.debug", "mode.release") - -add_requires("libcurl 8.11.0") +set_languages("c++23") +set_policy("build.c++.modules", true) ---includes("src/json") +add_requires("mbedtls 3.6.1") -set_languages("c++23") +target("tinyhttps") + set_kind("static") + add_files("src/tinyhttps/*.cppm", { public = true }) + add_packages("mbedtls", { public = true }) target("llmapi") - --set_kind("moduleonly") -- link failed issue when other lib reference llmapi set_kind("static") add_files("src/*.cppm", { public = true, install = true }) - add_packages("libcurl") - --add_deps("__nlohmann_json") + add_files("src/providers/*.cppm", { public = true, install = true }) + add_deps("tinyhttps") add_includedirs("src/json") add_headerfiles("src/json/json.hpp") add_files("src/json/json.cppm", { public = true }) -includes("examples") \ No newline at end of file +includes("examples") +includes("tests")