From c44cad41ca17f1d3969569b3a2c3ed76fe1b9133 Mon Sep 17 00:00:00 2001 From: RahulHere Date: Tue, 27 Jan 2026 02:21:41 +0800 Subject: [PATCH 01/39] Add cross-platform build scripts for macOS and Linux ARM64 Build scripts for compiling libgopher-mcp on different platforms: - build-mac-x64.sh: macOS x86_64 (Intel) build - build-mac-arm64.sh: macOS ARM64 (Apple Silicon) build with auto-dependency install - build-linux-arm64.sh: Docker-based Linux ARM64 build - Dockerfile.linux-arm64: Docker image for Linux ARM64 builds Features: - Auto-detect architecture and use correct Homebrew path - Auto-install OpenSSL and libevent dependencies via brew - Support for cached FetchContent dependencies (_deps-x64, _deps-arm64) - Verification tool included in build output --- .gitignore | 6 +- docker-mcp/Dockerfile.linux-arm64 | 114 ++++++++++ docker-mcp/build-linux-arm64.sh | 117 ++++++++++ docker-mcp/build-mac-arm64.sh | 354 ++++++++++++++++++++++++++++++ docker-mcp/build-mac-x64.sh | 241 ++++++++++++++++++++ 5 files changed, 831 insertions(+), 1 deletion(-) create mode 100644 docker-mcp/Dockerfile.linux-arm64 create mode 100644 docker-mcp/build-linux-arm64.sh create mode 100755 docker-mcp/build-mac-arm64.sh create mode 100755 docker-mcp/build-mac-x64.sh diff --git a/.gitignore b/.gitignore index 457c7687..f47c0207 100644 --- a/.gitignore +++ b/.gitignore @@ -16,6 +16,10 @@ cmake_install.cmake CTestTestfile.cmake Testing/ _deps/ +_deps-arm64/ +_deps-x64/ +install_prefix_dir/ + # Note: We have a hand-written Makefile at root, so only ignore generated ones in subdirs */Makefile @@ -106,4 +110,4 @@ __pycache__/ # OS generated files Thumbs.db -Desktop.ini \ No newline at end of file +Desktop.ini diff --git a/docker-mcp/Dockerfile.linux-arm64 b/docker-mcp/Dockerfile.linux-arm64 new file mode 100644 index 00000000..b187313f --- /dev/null +++ b/docker-mcp/Dockerfile.linux-arm64 @@ -0,0 +1,114 @@ +# Dockerfile for Linux ARM64 build of libgopher-mcp +# Uses pre-built GCC image for fast ARM64 builds +FROM gcc:11 + +# Install build dependencies +RUN apt-get update && apt-get install -y \ + cmake \ + libssl-dev \ + libevent-dev \ + pkg-config \ + git \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /build + +# Copy the entire project +COPY . /build/ + +# Set environment variables for ARM64 paths +ENV OPENSSL_ROOT_DIR=/usr +ENV OPENSSL_CRYPTO_LIBRARY=/usr/lib/aarch64-linux-gnu/libcrypto.so +ENV OPENSSL_SSL_LIBRARY=/usr/lib/aarch64-linux-gnu/libssl.so + +# Create build directory and build +RUN mkdir -p cmake-build && cd cmake-build && \ + cmake -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_CXX_STANDARD=14 \ + -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ + -DBUILD_SHARED_LIBS=ON \ + -DBUILD_STATIC_LIBS=ON \ + -DBUILD_TESTS=OFF \ + -DBUILD_C_API=ON \ + -DBUILD_BINDINGS_EXAMPLES=OFF \ + -DBUILD_EXAMPLES=OFF \ + -DOPENSSL_ROOT_DIR=${OPENSSL_ROOT_DIR} \ + -DOPENSSL_CRYPTO_LIBRARY=${OPENSSL_CRYPTO_LIBRARY} \ + -DOPENSSL_SSL_LIBRARY=${OPENSSL_SSL_LIBRARY} \ + /build && \ + make -j$(nproc) && \ + make install + +# Create output directory and organize files +RUN mkdir -p /output && \ + # Copy main library files + cp /build/install_prefix_dir/lib/libgopher-mcp*.so* /output/ 2>/dev/null || true && \ + cp /build/install_prefix_dir/lib/libgopher_mcp_c*.so* /output/ 2>/dev/null || true && \ + # Copy dependency libraries that were built + cp /build/install_prefix_dir/lib/libfmt*.so* /output/ 2>/dev/null || true && \ + cp /build/install_prefix_dir/lib/libllhttp*.so* /output/ 2>/dev/null || true && \ + # Copy headers + cp -r /build/install_prefix_dir/include /output/ 2>/dev/null || true + +# Build verification tool +RUN cat > /tmp/verify_mcp.c << 'EOF' +#include +#include +#include + +int main() { + printf("libgopher-mcp verification tool (Linux ARM64)\n"); + printf("==============================================\n\n"); + + // Try to load the C API library (used for FFI bindings) + void* handle = dlopen("./libgopher_mcp_c.so", RTLD_NOW); + if (!handle) { + printf("Note: C API library not found: %s\n", dlerror()); + // Try the main library as fallback + handle = dlopen("./libgopher-mcp.so", RTLD_NOW); + if (!handle) { + printf("X Failed to load main library: %s\n", dlerror()); + return 1; + } + printf("OK Main library loaded successfully\n"); + } else { + printf("OK C API library loaded successfully\n"); + } + + // Check for mcp_init function + void* init_func = dlsym(handle, "mcp_init"); + if (init_func) { + printf("OK mcp_init function found\n"); + } else { + printf("-- mcp_init function not found\n"); + } + + // Check for mcp_cleanup function + void* cleanup_func = dlsym(handle, "mcp_cleanup"); + if (cleanup_func) { + printf("OK mcp_cleanup function found\n"); + } else { + printf("-- mcp_cleanup function not found\n"); + } + + // Check for mcp_client_create function (C API) + void* create_func = dlsym(handle, "mcp_client_create"); + if (create_func) { + printf("OK mcp_client_create function found (C API)\n"); + } else { + printf("-- mcp_client_create function not found\n"); + } + + dlclose(handle); + + printf("\nOK Verification complete\n"); + return 0; +} +EOF + +RUN gcc -o /output/verify_mcp /tmp/verify_mcp.c -ldl -O2 + +# Default command to copy files to host +CMD cp -r /output/* /host-output/ && \ + echo "ARM64 build complete!" && \ + ls -la /output/ diff --git a/docker-mcp/build-linux-arm64.sh b/docker-mcp/build-linux-arm64.sh new file mode 100644 index 00000000..f2a982ee --- /dev/null +++ b/docker-mcp/build-linux-arm64.sh @@ -0,0 +1,117 @@ +#!/bin/bash + +# Build script for libgopher-mcp on Linux ARM64/aarch64 +# Uses Docker with pre-built GCC image for fast builds +# Works on both Intel and Apple Silicon Macs + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +MAGENTA='\033[0;35m' +NC='\033[0m' + +echo -e "${MAGENTA}========================================${NC}" +echo -e "${MAGENTA}Building libgopher-mcp for Linux ARM64${NC}" +echo -e "${MAGENTA}Using Docker for cross-platform build${NC}" +echo -e "${MAGENTA}========================================${NC}" +echo "" + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +PROJECT_ROOT="$(dirname "$SCRIPT_DIR")" +OUTPUT_DIR="${PROJECT_ROOT}/build-output/linux-arm64" + +# Check for Docker +if ! command -v docker &> /dev/null; then + echo -e "${RED}Error: Docker is not installed${NC}" + echo "Please install Docker Desktop from https://www.docker.com/products/docker-desktop/" + exit 1 +fi + +# Check for buildx support +if ! docker buildx version &> /dev/null; then + echo -e "${RED}Error: Docker buildx is not available${NC}" + echo "Please update Docker Desktop to a recent version" + exit 1 +fi + +# Clean and create output directory +echo -e "${YELLOW}Cleaning previous builds...${NC}" +rm -rf "$OUTPUT_DIR" +mkdir -p "$OUTPUT_DIR" + +echo -e "${YELLOW}Building ARM64 library using Docker...${NC}" +echo "This may take several minutes on first run (downloading base image and dependencies)" +echo "" + +# Build using Docker buildx with ARM64 platform +docker buildx build \ + --platform linux/arm64 \ + --load \ + -t gopher-mcp:linux-arm64 \ + -f "$SCRIPT_DIR/Dockerfile.linux-arm64" \ + "$PROJECT_ROOT" + +if [ $? -ne 0 ]; then + echo -e "${RED}Docker build failed${NC}" + exit 1 +fi + +echo "" +echo -e "${YELLOW}Extracting built files...${NC}" + +# Run container and copy files to host +docker run --rm \ + --platform linux/arm64 \ + -v "$OUTPUT_DIR:/host-output" \ + gopher-mcp:linux-arm64 + +# Check results +if [ -f "$OUTPUT_DIR/libgopher-mcp.so" ] || [ -f "$OUTPUT_DIR/libgopher-mcp.so.0.1.0" ]; then + echo "" + echo -e "${GREEN}========================================${NC}" + echo -e "${GREEN}Build successful!${NC}" + echo -e "${GREEN}========================================${NC}" + echo "" + echo "Output files:" + echo "------------------------------------" + ls -lh "$OUTPUT_DIR"/*.so* 2>/dev/null || true + ls -lh "$OUTPUT_DIR"/verify_mcp 2>/dev/null || true + + # Show architecture verification + if command -v file >/dev/null 2>&1; then + echo "" + echo "Architecture verification:" + MAIN_LIB="" + if [ -f "$OUTPUT_DIR/libgopher-mcp.so.0.1.0" ]; then + MAIN_LIB="$OUTPUT_DIR/libgopher-mcp.so.0.1.0" + elif [ -f "$OUTPUT_DIR/libgopher-mcp.so" ]; then + MAIN_LIB="$OUTPUT_DIR/libgopher-mcp.so" + fi + if [ -n "$MAIN_LIB" ]; then + file "$MAIN_LIB" + fi + fi + + echo "" + echo -e "${GREEN}Output structure:${NC}" + echo " build-output/linux-arm64/" + echo " ├── libgopher-mcp.so* (main MCP library)" + echo " ├── libgopher_mcp_c.so* (C API for FFI)" + echo " ├── libfmt.so* (formatting library)" + echo " ├── verify_mcp (verification tool)" + echo " └── include/ (header files)" + echo "" + echo "To test on Linux ARM64:" + echo " 1. Copy build-output/linux-arm64/ to ARM64 Linux system" + echo " 2. cd linux-arm64" + echo " 3. LD_LIBRARY_PATH=. ./verify_mcp" + echo "" +else + echo -e "${RED}Build failed - library not found${NC}" + echo "Contents of output directory:" + ls -la "$OUTPUT_DIR" + exit 1 +fi diff --git a/docker-mcp/build-mac-arm64.sh b/docker-mcp/build-mac-arm64.sh new file mode 100755 index 00000000..afbbf387 --- /dev/null +++ b/docker-mcp/build-mac-arm64.sh @@ -0,0 +1,354 @@ +#!/bin/bash + +# Build script for libgopher-mcp on macOS ARM64 (Apple Silicon) +# Target: macOS 11.0+ (Big Sur and later for Apple Silicon) +# Architecture: arm64 + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +echo -e "${GREEN}========================================${NC}" +echo -e "${GREEN}Building libgopher-mcp for macOS ARM64${NC}" +echo -e "${GREEN}Target: macOS 11.0+ (arm64/Apple Silicon)${NC}" +echo -e "${GREEN}========================================${NC}" + +# Get the script directory +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +PROJECT_ROOT="$(dirname "$SCRIPT_DIR")" + +# Build configuration +BUILD_DIR="${PROJECT_ROOT}/build-mac-arm64" +DEPS_DIR="${PROJECT_ROOT}/_deps-arm64" +INSTALL_DIR="${PROJECT_ROOT}/install_prefix_dir" +OUTPUT_DIR="${PROJECT_ROOT}/build-output/mac-arm64" +MIN_MACOS_VERSION="11.0" # Minimum version for Apple Silicon + +# Clean previous builds (but preserve _deps for caching) +echo -e "${YELLOW}Cleaning previous builds...${NC}" +rm -rf "$BUILD_DIR" +rm -rf "$OUTPUT_DIR" +mkdir -p "$BUILD_DIR" +mkdir -p "$DEPS_DIR" +mkdir -p "$OUTPUT_DIR" + +# Navigate to build directory +cd "$BUILD_DIR" + +# Detect current architecture +CURRENT_ARCH=$(uname -m) +echo -e "${YELLOW}Detecting system architecture...${NC}" +echo " Current architecture: $CURRENT_ARCH" + +# Determine Homebrew path based on architecture +if [ "$CURRENT_ARCH" = "arm64" ]; then + # Native ARM64 Mac - use /opt/homebrew + BREW_CMD="/opt/homebrew/bin/brew" + HOMEBREW_PREFIX="/opt/homebrew" + + if [ ! -f "$BREW_CMD" ]; then + echo -e "${RED}Error: ARM64 Homebrew not found at /opt/homebrew${NC}" + echo "Please install Homebrew for ARM64 using:" + echo " /bin/bash -c \"\$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)\"" + exit 1 + fi +elif [ "$CURRENT_ARCH" = "x86_64" ]; then + # Intel Mac - use /usr/local but build for ARM64 (cross-compile) + BREW_CMD="/usr/local/bin/brew" + HOMEBREW_PREFIX="/usr/local" + + echo -e "${YELLOW}Note: Running on Intel Mac - will cross-compile for ARM64${NC}" + + if [ ! -f "$BREW_CMD" ]; then + echo -e "${RED}Error: Homebrew not found at /usr/local${NC}" + echo "Please install Homebrew using:" + echo " /bin/bash -c \"\$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)\"" + exit 1 + fi +else + echo -e "${RED}Error: Unsupported architecture: $CURRENT_ARCH${NC}" + exit 1 +fi + +echo " Found Homebrew at: $BREW_CMD" + +# Install dependencies if missing +echo -e "${YELLOW}Checking and installing dependencies...${NC}" + +# Check and install OpenSSL +if [ ! -d "${HOMEBREW_PREFIX}/opt/openssl@3" ] && [ ! -d "${HOMEBREW_PREFIX}/opt/openssl" ] && [ ! -d "${HOMEBREW_PREFIX}/opt/openssl@1.1" ]; then + echo " Installing OpenSSL..." + $BREW_CMD install openssl +else + echo " OpenSSL already installed" +fi + +# Check and install libevent +if [ ! -d "${HOMEBREW_PREFIX}/opt/libevent" ]; then + echo " Installing libevent..." + $BREW_CMD install libevent +else + echo " libevent already installed" +fi + +# Now find the installed paths +echo -e "${YELLOW}Locating dependencies...${NC}" + +OPENSSL_ROOT="" +if [ -d "${HOMEBREW_PREFIX}/opt/openssl@3" ]; then + OPENSSL_ROOT="${HOMEBREW_PREFIX}/opt/openssl@3" +elif [ -d "${HOMEBREW_PREFIX}/opt/openssl" ]; then + OPENSSL_ROOT="${HOMEBREW_PREFIX}/opt/openssl" +elif [ -d "${HOMEBREW_PREFIX}/opt/openssl@1.1" ]; then + OPENSSL_ROOT="${HOMEBREW_PREFIX}/opt/openssl@1.1" +else + echo -e "${RED}Error: OpenSSL installation failed${NC}" + exit 1 +fi +echo " OpenSSL: $OPENSSL_ROOT" + +LIBEVENT_ROOT="" +if [ -d "${HOMEBREW_PREFIX}/opt/libevent" ]; then + LIBEVENT_ROOT="${HOMEBREW_PREFIX}/opt/libevent" +else + echo -e "${RED}Error: libevent installation failed${NC}" + exit 1 +fi +echo " libevent: $LIBEVENT_ROOT" + +# Configure CMake with macOS ARM64-specific settings +echo -e "${YELLOW}Configuring CMake for macOS ARM64...${NC}" + +# Set PKG_CONFIG_PATH to find packages +export PKG_CONFIG_PATH="${HOMEBREW_PREFIX}/lib/pkgconfig:${OPENSSL_ROOT}/lib/pkgconfig:${LIBEVENT_ROOT}/lib/pkgconfig:$PKG_CONFIG_PATH" + +CMAKE_ARGS=( + -DCMAKE_BUILD_TYPE=Release + -DCMAKE_CXX_STANDARD=14 + -DCMAKE_OSX_DEPLOYMENT_TARGET=${MIN_MACOS_VERSION} + -DCMAKE_OSX_ARCHITECTURES=arm64 + -DCMAKE_POSITION_INDEPENDENT_CODE=ON + -DBUILD_SHARED_LIBS=ON + -DBUILD_STATIC_LIBS=ON + -DBUILD_TESTS=OFF + -DBUILD_C_API=ON + -DBUILD_BINDINGS_EXAMPLES=OFF + -DBUILD_EXAMPLES=OFF + -DFETCHCONTENT_BASE_DIR="${DEPS_DIR}" + -DCMAKE_INSTALL_PREFIX="${BUILD_DIR}/install" + -DCMAKE_MACOSX_RPATH=ON + -DCMAKE_INSTALL_RPATH="@loader_path" + # Add Homebrew prefix path so CMake finds libraries first + -DCMAKE_PREFIX_PATH="${HOMEBREW_PREFIX};${OPENSSL_ROOT};${LIBEVENT_ROOT}" +) + +# Add explicit OpenSSL paths +CMAKE_ARGS+=( + -DOPENSSL_ROOT_DIR="$OPENSSL_ROOT" + -DOPENSSL_CRYPTO_LIBRARY="${OPENSSL_ROOT}/lib/libcrypto.dylib" + -DOPENSSL_SSL_LIBRARY="${OPENSSL_ROOT}/lib/libssl.dylib" + -DOPENSSL_INCLUDE_DIR="${OPENSSL_ROOT}/include" +) + +# Add explicit libevent paths +# These override the hard-coded /usr/local paths in CMakeLists.txt +CMAKE_ARGS+=( + -DLIBEVENT_INCLUDE_DIR="${LIBEVENT_ROOT}/include" + -DLIBEVENT_CORE_LIBRARY="${LIBEVENT_ROOT}/lib/libevent_core.dylib" + -DLIBEVENT_PTHREADS_LIBRARY="${LIBEVENT_ROOT}/lib/libevent_pthreads.dylib" +) + +cmake "${CMAKE_ARGS[@]}" "${PROJECT_ROOT}" + +# Build the library +echo -e "${YELLOW}Building library...${NC}" +make -j$(sysctl -n hw.ncpu 2>/dev/null || echo 4) + +# Install to temporary directory +make install + +# Copy output files +echo -e "${YELLOW}Organizing output files...${NC}" + +# Copy all gopher-mcp dylib files (including symlinks) +# This ensures all dependencies are included +cp -P "${INSTALL_DIR}"/lib/libgopher-mcp*.dylib "${OUTPUT_DIR}/" 2>/dev/null || true +cp -P "${INSTALL_DIR}"/lib/libgopher_mcp_c*.dylib "${OUTPUT_DIR}/" 2>/dev/null || true + +# Copy third-party dependencies +cp -P "${INSTALL_DIR}"/lib/libfmt*.dylib "${OUTPUT_DIR}/" 2>/dev/null || true +cp -P "${INSTALL_DIR}"/lib/libllhttp*.dylib "${OUTPUT_DIR}/" 2>/dev/null || true + +# Copy headers +if [ -d "${INSTALL_DIR}/include" ]; then + cp -R "${INSTALL_DIR}/include" "${OUTPUT_DIR}/" +fi + +# Build verification app for macOS ARM64 +echo -e "${YELLOW}Building verification app...${NC}" +cd "${OUTPUT_DIR}" + +# Create a simple verification program +cat > verify_mcp.c << 'VERIFY_EOF' +#include +#include +#include + +int main() { + printf("libgopher-mcp verification tool (ARM64)\n"); + printf("========================================\n\n"); + + // Try to load the C API library (used for FFI bindings) + void* handle = dlopen("./libgopher_mcp_c.dylib", RTLD_NOW); + if (!handle) { + printf("✗ Failed to load C API library: %s\n", dlerror()); + // Try the main library as fallback + handle = dlopen("./libgopher-mcp.dylib", RTLD_NOW); + if (!handle) { + printf("✗ Failed to load main library: %s\n", dlerror()); + return 1; + } + printf("✓ Main library loaded successfully\n"); + } else { + printf("✓ C API library loaded successfully\n"); + } + + // Check for mcp_init function + void* init_func = dlsym(handle, "mcp_init"); + if (init_func) { + printf("✓ mcp_init function found\n"); + } else { + printf("• mcp_init function not found\n"); + } + + // Check for mcp_cleanup function + void* cleanup_func = dlsym(handle, "mcp_cleanup"); + if (cleanup_func) { + printf("✓ mcp_cleanup function found\n"); + } else { + printf("• mcp_cleanup function not found\n"); + } + + // Check for mcp_client_create function (C API) + void* create_func = dlsym(handle, "mcp_client_create"); + if (create_func) { + printf("✓ mcp_client_create function found (C API)\n"); + } else { + printf("• mcp_client_create function not found\n"); + } + + // Check for mcp_json_parse function (C API JSON) + void* json_func = dlsym(handle, "mcp_json_parse"); + if (json_func) { + printf("✓ mcp_json_parse function found (C API JSON)\n"); + } else { + printf("• mcp_json_parse function not found\n"); + } + + dlclose(handle); + + printf("\n✓ Verification complete\n"); + return 0; +} +VERIFY_EOF + +# Build verification tool for ARM64 +MACOSX_DEPLOYMENT_TARGET=${MIN_MACOS_VERSION} cc -arch arm64 -o verify_mcp verify_mcp.c -ldl +rm -f verify_mcp.c + +# Strip extended attributes to avoid security issues +xattr -cr verify_mcp 2>/dev/null || true + +echo " Created verify_mcp (macOS ARM64 compatible)" + +# Clean up build directory +cd "$PROJECT_ROOT" +echo -e "${YELLOW}Cleaning up build directory...${NC}" +rm -rf "$BUILD_DIR" + +# Verify the output +echo "" +echo -e "${YELLOW}Verifying output...${NC}" +cd "$OUTPUT_DIR" + +MAIN_LIB="" +if [ -f "libgopher-mcp.0.1.0.dylib" ]; then + MAIN_LIB="libgopher-mcp.0.1.0.dylib" +elif [ -f "libgopher-mcp.dylib" ]; then + MAIN_LIB="libgopher-mcp.dylib" +fi + +if [ -n "$MAIN_LIB" ] && [ -f "verify_mcp" ]; then + echo -e "${GREEN}✅ Build successful!${NC}" + echo "" + echo "Output files:" + echo "------------------------------------" + ls -lah *.dylib 2>/dev/null || true + ls -lah verify_mcp 2>/dev/null || true + echo "" + + # Show library info + echo "Library information:" + file "$MAIN_LIB" + echo "" + + # Show architecture + echo "Architecture:" + lipo -info "$MAIN_LIB" + echo "" + + # Show minimum macOS version + echo "Minimum macOS version:" + otool -l "$MAIN_LIB" | grep -A 4 "LC_BUILD_VERSION\|LC_VERSION_MIN" | head -6 + echo "" + + echo -e "${GREEN}📦 Output contains:${NC}" + echo " - $MAIN_LIB (the MCP library, ARM64)" + [ -f "libgopher-mcp.dylib" ] && echo " - libgopher-mcp.dylib (symlink for compatibility)" + [ -f "libgopher-mcp-event.0.1.0.dylib" ] && echo " - libgopher-mcp-event.0.1.0.dylib (event library)" + [ -f "libgopher-mcp-event.dylib" ] && echo " - libgopher-mcp-event.dylib (symlink for compatibility)" + [ -f "libgopher_mcp_c.0.1.0.dylib" ] && echo " - libgopher_mcp_c.0.1.0.dylib (C API for FFI bindings)" + [ -f "libgopher_mcp_c.dylib" ] && echo " - libgopher_mcp_c.dylib (symlink for compatibility)" + echo " - verify_mcp (verification tool, macOS 11.0+ ARM64 compatible)" + [ -d "include" ] && echo " - include/ (header files)" + echo "" + + # Test verification app (only if running on ARM64) + if [[ $(uname -m) == "arm64" ]]; then + echo -e "${YELLOW}Testing verification app...${NC}" + if ./verify_mcp; then + echo -e "${GREEN}✓ Verification test passed${NC}" + else + echo -e "${YELLOW}⚠ Verification test failed or crashed${NC}" + echo "This may be due to missing dependencies or library issues" + echo "The build artifacts have been created successfully" + fi + else + echo -e "${YELLOW}Skipping verification test (not running on ARM64)${NC}" + fi +else + echo -e "${RED}❌ Build failed - required files not found${NC}" + exit 1 +fi + +echo "" +echo -e "${GREEN}✨ Build complete!${NC}" +echo "" +echo "Output structure:" +echo " build-output/mac-arm64/" +echo " ├── libgopher-mcp.0.1.0.dylib (ARM64)" +echo " ├── libgopher-mcp.dylib (symlink)" +echo " ├── libgopher-mcp-event.*.dylib (if built)" +echo " ├── libgopher_mcp_c.0.1.0.dylib (C API for FFI)" +echo " ├── libgopher_mcp_c.dylib (symlink)" +echo " ├── verify_mcp (C verification for ARM64)" +echo " └── include/ (headers)" +echo "" +echo "To use on Apple Silicon Macs:" +echo " 1. Copy the entire build-output/mac-arm64/ directory to the target machine" +echo " 2. For C verification: ./verify_mcp" +echo "" diff --git a/docker-mcp/build-mac-x64.sh b/docker-mcp/build-mac-x64.sh new file mode 100755 index 00000000..7918cdf5 --- /dev/null +++ b/docker-mcp/build-mac-x64.sh @@ -0,0 +1,241 @@ +#!/bin/bash + +# Build script for libgopher-mcp on macOS x86_64 +# Target: macOS 10.14+ (Mojave and later) +# Architecture: x86_64 + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +echo -e "${GREEN}========================================${NC}" +echo -e "${GREEN}Building libgopher-mcp for macOS x86_64${NC}" +echo -e "${GREEN}Target: macOS 10.14+ (x86_64)${NC}" +echo -e "${GREEN}========================================${NC}" + +# Get the script directory +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +PROJECT_ROOT="$(dirname "$SCRIPT_DIR")" + +# Build configuration +BUILD_DIR="${PROJECT_ROOT}/build-mac-x64" +DEPS_DIR="${PROJECT_ROOT}/_deps-x64" +INSTALL_DIR="${PROJECT_ROOT}/install_prefix_dir" +OUTPUT_DIR="${PROJECT_ROOT}/build-output/mac-x64" +MIN_MACOS_VERSION="10.14" + +# Clean previous builds (but preserve _deps for caching) +echo -e "${YELLOW}Cleaning previous builds...${NC}" +rm -rf "$BUILD_DIR" +rm -rf "$OUTPUT_DIR" +mkdir -p "$BUILD_DIR" +mkdir -p "$DEPS_DIR" +mkdir -p "$OUTPUT_DIR" + +# Navigate to build directory +cd "$BUILD_DIR" + +# Configure CMake with macOS-specific settings +echo -e "${YELLOW}Configuring CMake for macOS x86_64...${NC}" + +cmake \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_CXX_STANDARD=14 \ + -DCMAKE_OSX_DEPLOYMENT_TARGET=${MIN_MACOS_VERSION} \ + -DCMAKE_OSX_ARCHITECTURES=x86_64 \ + -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ + -DBUILD_SHARED_LIBS=ON \ + -DBUILD_STATIC_LIBS=ON \ + -DBUILD_TESTS=OFF \ + -DBUILD_C_API=ON \ + -DBUILD_BINDINGS_EXAMPLES=OFF \ + -DBUILD_EXAMPLES=OFF \ + -DFETCHCONTENT_BASE_DIR="${DEPS_DIR}" \ + -DCMAKE_INSTALL_PREFIX="${BUILD_DIR}/install" \ + -DCMAKE_MACOSX_RPATH=ON \ + -DCMAKE_INSTALL_RPATH="@loader_path" \ + "${PROJECT_ROOT}" + +# Build the library +echo -e "${YELLOW}Building library...${NC}" +make -j$(sysctl -n hw.ncpu 2>/dev/null || echo 4) + +# Install to temporary directory +make install + +# Copy output files +echo -e "${YELLOW}Organizing output files...${NC}" + +# Copy all gopher-mcp dylib files (including symlinks) +# This ensures all dependencies are included +cp -P "${INSTALL_DIR}"/lib/libgopher-mcp*.dylib "${OUTPUT_DIR}/" 2>/dev/null || true +cp -P "${INSTALL_DIR}"/lib/libgopher_mcp_c*.dylib "${OUTPUT_DIR}/" 2>/dev/null || true + +# Copy third-party dependencies +cp -P "${INSTALL_DIR}"/lib/libfmt*.dylib "${OUTPUT_DIR}/" 2>/dev/null || true +cp -P "${INSTALL_DIR}"/lib/libllhttp*.dylib "${OUTPUT_DIR}/" 2>/dev/null || true + +# Copy headers +if [ -d "${INSTALL_DIR}/include" ]; then + cp -R "${INSTALL_DIR}/include" "${OUTPUT_DIR}/" +fi + +# Build verification app for macOS +echo -e "${YELLOW}Building verification app...${NC}" +cd "${OUTPUT_DIR}" + +# Create a simple verification program +cat > verify_mcp.c << 'VERIFY_EOF' +#include +#include +#include + +int main() { + printf("libgopher-mcp verification tool\n"); + printf("================================\n\n"); + + // Try to load the C API library (used for FFI bindings) + void* handle = dlopen("./libgopher_mcp_c.dylib", RTLD_NOW); + if (!handle) { + printf("✗ Failed to load C API library: %s\n", dlerror()); + // Try the main library as fallback + handle = dlopen("./libgopher-mcp.dylib", RTLD_NOW); + if (!handle) { + printf("✗ Failed to load main library: %s\n", dlerror()); + return 1; + } + printf("✓ Main library loaded successfully\n"); + } else { + printf("✓ C API library loaded successfully\n"); + } + + // Check for mcp_init function + void* init_func = dlsym(handle, "mcp_init"); + if (init_func) { + printf("✓ mcp_init function found\n"); + } else { + printf("• mcp_init function not found\n"); + } + + // Check for mcp_cleanup function + void* cleanup_func = dlsym(handle, "mcp_cleanup"); + if (cleanup_func) { + printf("✓ mcp_cleanup function found\n"); + } else { + printf("• mcp_cleanup function not found\n"); + } + + // Check for mcp_client_create function (C API) + void* create_func = dlsym(handle, "mcp_client_create"); + if (create_func) { + printf("✓ mcp_client_create function found (C API)\n"); + } else { + printf("• mcp_client_create function not found\n"); + } + + // Check for mcp_json_parse function (C API JSON) + void* json_func = dlsym(handle, "mcp_json_parse"); + if (json_func) { + printf("✓ mcp_json_parse function found (C API JSON)\n"); + } else { + printf("• mcp_json_parse function not found\n"); + } + + dlclose(handle); + + printf("\n✓ Verification complete\n"); + return 0; +} +VERIFY_EOF + +# Build verification tool +MACOSX_DEPLOYMENT_TARGET=${MIN_MACOS_VERSION} cc -o verify_mcp verify_mcp.c -ldl +rm -f verify_mcp.c + +# Strip extended attributes to avoid security issues +xattr -cr verify_mcp 2>/dev/null || true + +echo " Created verify_mcp (macOS compatible)" + +# Clean up build directory +cd "$PROJECT_ROOT" +echo -e "${YELLOW}Cleaning up build directory...${NC}" +rm -rf "$BUILD_DIR" + +# Verify the output +echo "" +echo -e "${YELLOW}Verifying output...${NC}" +cd "$OUTPUT_DIR" + +MAIN_LIB="" +if [ -f "libgopher-mcp.0.1.0.dylib" ]; then + MAIN_LIB="libgopher-mcp.0.1.0.dylib" +elif [ -f "libgopher-mcp.dylib" ]; then + MAIN_LIB="libgopher-mcp.dylib" +fi + +if [ -n "$MAIN_LIB" ] && [ -f "verify_mcp" ]; then + echo -e "${GREEN}✅ Build successful!${NC}" + echo "" + echo "Output files:" + echo "------------------------------------" + ls -lah *.dylib 2>/dev/null || true + ls -lah verify_mcp 2>/dev/null || true + echo "" + + # Show library info + echo "Library information:" + file "$MAIN_LIB" + echo "" + + # Show minimum macOS version + echo "Minimum macOS version:" + otool -l "$MAIN_LIB" | grep -A 4 "LC_BUILD_VERSION\|LC_VERSION_MIN" | head -6 + echo "" + + echo -e "${GREEN}📦 Output contains:${NC}" + echo " - $MAIN_LIB (the MCP library)" + [ -f "libgopher-mcp.dylib" ] && echo " - libgopher-mcp.dylib (symlink for compatibility)" + [ -f "libgopher-mcp-event.0.1.0.dylib" ] && echo " - libgopher-mcp-event.0.1.0.dylib (event library)" + [ -f "libgopher-mcp-event.dylib" ] && echo " - libgopher-mcp-event.dylib (symlink for compatibility)" + [ -f "libgopher_mcp_c.0.1.0.dylib" ] && echo " - libgopher_mcp_c.0.1.0.dylib (C API for FFI bindings)" + [ -f "libgopher_mcp_c.dylib" ] && echo " - libgopher_mcp_c.dylib (symlink for compatibility)" + echo " - verify_mcp (verification tool, macOS 10.14+ compatible)" + [ -d "include" ] && echo " - include/ (header files)" + echo "" + + # Test verification app + echo -e "${YELLOW}Testing verification app...${NC}" + if ./verify_mcp; then + echo -e "${GREEN}✓ Verification test passed${NC}" + else + echo -e "${YELLOW}⚠ Verification test failed or crashed${NC}" + echo "This may be due to missing dependencies or library issues" + echo "The build artifacts have been created successfully" + fi +else + echo -e "${RED}❌ Build failed - required files not found${NC}" + exit 1 +fi + +echo "" +echo -e "${GREEN}✨ Build complete!${NC}" +echo "" +echo "Output structure:" +echo " build-output/mac-x64/" +echo " ├── libgopher-mcp.0.1.0.dylib" +echo " ├── libgopher-mcp.dylib (symlink)" +echo " ├── libgopher-mcp-event.*.dylib (if built)" +echo " ├── libgopher_mcp_c.0.1.0.dylib (C API for FFI)" +echo " ├── libgopher_mcp_c.dylib (symlink)" +echo " ├── verify_mcp (C verification)" +echo " └── include/ (headers)" +echo "" +echo "To use on macOS:" +echo " 1. Copy the entire build-output/mac-x64/ directory to the target machine" +echo " 2. For C verification: ./verify_mcp" +echo "" From e1444b579d60b7d8a1a63d3b41442ec313d2cf88 Mon Sep 17 00:00:00 2001 From: RahulHere Date: Tue, 27 Jan 2026 02:28:19 +0800 Subject: [PATCH 02/39] Add native and Docker-based Linux build scripts Native build scripts (run directly on Linux): - build-linux-x64.sh: Native build on x86_64 Linux - build-linux-arm64.sh: Native build on ARM64 Linux - Auto-detect package manager (apt, dnf, yum, pacman, apk) - Auto-install dependencies (cmake, libssl-dev, libevent-dev) Docker-based build scripts (cross-compilation): - build-linux-x64-docker.sh: Build x64 from any platform - build-linux-arm64-docker.sh: Build ARM64 from any platform - Dockerfile.linux-x64: Docker image for x64 builds - Dockerfile.linux-arm64: Docker image for ARM64 builds --- docker-mcp/Dockerfile.linux-x64 | 106 ++++++++ docker-mcp/build-linux-arm64-docker.sh | 117 +++++++++ docker-mcp/build-linux-arm64.sh | 339 +++++++++++++++++++------ docker-mcp/build-linux-x64-docker.sh | 108 ++++++++ docker-mcp/build-linux-x64.sh | 305 ++++++++++++++++++++++ 5 files changed, 900 insertions(+), 75 deletions(-) create mode 100644 docker-mcp/Dockerfile.linux-x64 create mode 100755 docker-mcp/build-linux-arm64-docker.sh mode change 100644 => 100755 docker-mcp/build-linux-arm64.sh create mode 100755 docker-mcp/build-linux-x64-docker.sh create mode 100755 docker-mcp/build-linux-x64.sh diff --git a/docker-mcp/Dockerfile.linux-x64 b/docker-mcp/Dockerfile.linux-x64 new file mode 100644 index 00000000..2234ee4d --- /dev/null +++ b/docker-mcp/Dockerfile.linux-x64 @@ -0,0 +1,106 @@ +# Dockerfile for Linux x86_64 build of libgopher-mcp +# Uses pre-built GCC image for fast x86_64 builds +FROM gcc:11 + +# Install build dependencies +RUN apt-get update && apt-get install -y \ + cmake \ + libssl-dev \ + libevent-dev \ + pkg-config \ + git \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /build + +# Copy the entire project +COPY . /build/ + +# Create build directory and build +RUN mkdir -p cmake-build && cd cmake-build && \ + cmake -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_CXX_STANDARD=14 \ + -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ + -DBUILD_SHARED_LIBS=ON \ + -DBUILD_STATIC_LIBS=ON \ + -DBUILD_TESTS=OFF \ + -DBUILD_C_API=ON \ + -DBUILD_BINDINGS_EXAMPLES=OFF \ + -DBUILD_EXAMPLES=OFF \ + /build && \ + make -j$(nproc) && \ + make install + +# Create output directory and organize files +RUN mkdir -p /output && \ + # Copy main library files + cp /build/install_prefix_dir/lib/libgopher-mcp*.so* /output/ 2>/dev/null || true && \ + cp /build/install_prefix_dir/lib/libgopher_mcp_c*.so* /output/ 2>/dev/null || true && \ + # Copy dependency libraries that were built + cp /build/install_prefix_dir/lib/libfmt*.so* /output/ 2>/dev/null || true && \ + cp /build/install_prefix_dir/lib/libllhttp*.so* /output/ 2>/dev/null || true && \ + # Copy headers + cp -r /build/install_prefix_dir/include /output/ 2>/dev/null || true + +# Build verification tool +RUN cat > /tmp/verify_mcp.c << 'EOF' +#include +#include +#include + +int main() { + printf("libgopher-mcp verification tool (Linux x86_64)\n"); + printf("===============================================\n\n"); + + // Try to load the C API library (used for FFI bindings) + void* handle = dlopen("./libgopher_mcp_c.so", RTLD_NOW); + if (!handle) { + printf("Note: C API library not found: %s\n", dlerror()); + // Try the main library as fallback + handle = dlopen("./libgopher-mcp.so", RTLD_NOW); + if (!handle) { + printf("X Failed to load main library: %s\n", dlerror()); + return 1; + } + printf("OK Main library loaded successfully\n"); + } else { + printf("OK C API library loaded successfully\n"); + } + + // Check for mcp_init function + void* init_func = dlsym(handle, "mcp_init"); + if (init_func) { + printf("OK mcp_init function found\n"); + } else { + printf("-- mcp_init function not found\n"); + } + + // Check for mcp_cleanup function + void* cleanup_func = dlsym(handle, "mcp_cleanup"); + if (cleanup_func) { + printf("OK mcp_cleanup function found\n"); + } else { + printf("-- mcp_cleanup function not found\n"); + } + + // Check for mcp_client_create function (C API) + void* create_func = dlsym(handle, "mcp_client_create"); + if (create_func) { + printf("OK mcp_client_create function found (C API)\n"); + } else { + printf("-- mcp_client_create function not found\n"); + } + + dlclose(handle); + + printf("\nOK Verification complete\n"); + return 0; +} +EOF + +RUN gcc -o /output/verify_mcp /tmp/verify_mcp.c -ldl -O2 + +# Default command to copy files to host +CMD cp -r /output/* /host-output/ && \ + echo "x86_64 build complete!" && \ + ls -la /output/ diff --git a/docker-mcp/build-linux-arm64-docker.sh b/docker-mcp/build-linux-arm64-docker.sh new file mode 100755 index 00000000..2f890385 --- /dev/null +++ b/docker-mcp/build-linux-arm64-docker.sh @@ -0,0 +1,117 @@ +#!/bin/bash + +# Cross-compile libgopher-mcp for Linux ARM64 using Docker +# This script can run on any platform with Docker (macOS, Linux x64, Windows) +# Uses Docker buildx for ARM64 emulation + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +MAGENTA='\033[0;35m' +NC='\033[0m' + +echo -e "${MAGENTA}========================================${NC}" +echo -e "${MAGENTA}Building libgopher-mcp for Linux ARM64${NC}" +echo -e "${MAGENTA}Using Docker for cross-platform build${NC}" +echo -e "${MAGENTA}========================================${NC}" +echo "" + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +PROJECT_ROOT="$(dirname "$SCRIPT_DIR")" +OUTPUT_DIR="${PROJECT_ROOT}/build-output/linux-arm64" + +# Check for Docker +if ! command -v docker &> /dev/null; then + echo -e "${RED}Error: Docker is not installed${NC}" + echo "Please install Docker Desktop from https://www.docker.com/products/docker-desktop/" + exit 1 +fi + +# Check for buildx support +if ! docker buildx version &> /dev/null; then + echo -e "${RED}Error: Docker buildx is not available${NC}" + echo "Please update Docker Desktop to a recent version" + exit 1 +fi + +# Clean and create output directory +echo -e "${YELLOW}Cleaning previous builds...${NC}" +rm -rf "$OUTPUT_DIR" +mkdir -p "$OUTPUT_DIR" + +echo -e "${YELLOW}Building ARM64 library using Docker...${NC}" +echo "This may take several minutes on first run (downloading base image and dependencies)" +echo "" + +# Build using Docker buildx with ARM64 platform +docker buildx build \ + --platform linux/arm64 \ + --load \ + -t gopher-mcp:linux-arm64 \ + -f "$SCRIPT_DIR/Dockerfile.linux-arm64" \ + "$PROJECT_ROOT" + +if [ $? -ne 0 ]; then + echo -e "${RED}Docker build failed${NC}" + exit 1 +fi + +echo "" +echo -e "${YELLOW}Extracting built files...${NC}" + +# Run container and copy files to host +docker run --rm \ + --platform linux/arm64 \ + -v "$OUTPUT_DIR:/host-output" \ + gopher-mcp:linux-arm64 + +# Check results +if [ -f "$OUTPUT_DIR/libgopher-mcp.so" ] || [ -f "$OUTPUT_DIR/libgopher-mcp.so.0.1.0" ]; then + echo "" + echo -e "${GREEN}========================================${NC}" + echo -e "${GREEN}Build successful!${NC}" + echo -e "${GREEN}========================================${NC}" + echo "" + echo "Output files:" + echo "------------------------------------" + ls -lh "$OUTPUT_DIR"/*.so* 2>/dev/null || true + ls -lh "$OUTPUT_DIR"/verify_mcp 2>/dev/null || true + + # Show architecture verification + if command -v file >/dev/null 2>&1; then + echo "" + echo "Architecture verification:" + MAIN_LIB="" + if [ -f "$OUTPUT_DIR/libgopher-mcp.so.0.1.0" ]; then + MAIN_LIB="$OUTPUT_DIR/libgopher-mcp.so.0.1.0" + elif [ -f "$OUTPUT_DIR/libgopher-mcp.so" ]; then + MAIN_LIB="$OUTPUT_DIR/libgopher-mcp.so" + fi + if [ -n "$MAIN_LIB" ]; then + file "$MAIN_LIB" + fi + fi + + echo "" + echo -e "${GREEN}Output structure:${NC}" + echo " build-output/linux-arm64/" + echo " ├── libgopher-mcp.so* (main MCP library)" + echo " ├── libgopher_mcp_c.so* (C API for FFI)" + echo " ├── libfmt.so* (formatting library)" + echo " ├── verify_mcp (verification tool)" + echo " └── include/ (header files)" + echo "" + echo "To test on Linux ARM64:" + echo " 1. Copy build-output/linux-arm64/ to ARM64 Linux system" + echo " 2. cd linux-arm64" + echo " 3. LD_LIBRARY_PATH=. ./verify_mcp" + echo "" +else + echo -e "${RED}Build failed - library not found${NC}" + echo "Contents of output directory:" + ls -la "$OUTPUT_DIR" + exit 1 +fi diff --git a/docker-mcp/build-linux-arm64.sh b/docker-mcp/build-linux-arm64.sh old mode 100644 new mode 100755 index f2a982ee..fc87fcab --- a/docker-mcp/build-linux-arm64.sh +++ b/docker-mcp/build-linux-arm64.sh @@ -1,8 +1,8 @@ #!/bin/bash # Build script for libgopher-mcp on Linux ARM64/aarch64 -# Uses Docker with pre-built GCC image for fast builds -# Works on both Intel and Apple Silicon Macs +# Target: Linux ARM64 (glibc-based distributions) +# Architecture: aarch64/arm64 set -e @@ -10,108 +10,297 @@ set -e RED='\033[0;31m' GREEN='\033[0;32m' YELLOW='\033[1;33m' -MAGENTA='\033[0;35m' -NC='\033[0m' +NC='\033[0m' # No Color -echo -e "${MAGENTA}========================================${NC}" -echo -e "${MAGENTA}Building libgopher-mcp for Linux ARM64${NC}" -echo -e "${MAGENTA}Using Docker for cross-platform build${NC}" -echo -e "${MAGENTA}========================================${NC}" -echo "" +echo -e "${GREEN}========================================${NC}" +echo -e "${GREEN}Building libgopher-mcp for Linux ARM64${NC}" +echo -e "${GREEN}========================================${NC}" +# Get the script directory SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" PROJECT_ROOT="$(dirname "$SCRIPT_DIR")" + +# Build configuration +BUILD_DIR="${PROJECT_ROOT}/build-linux-arm64" +DEPS_DIR="${PROJECT_ROOT}/_deps-linux-arm64" +INSTALL_DIR="${PROJECT_ROOT}/install_prefix_dir" OUTPUT_DIR="${PROJECT_ROOT}/build-output/linux-arm64" -# Check for Docker -if ! command -v docker &> /dev/null; then - echo -e "${RED}Error: Docker is not installed${NC}" - echo "Please install Docker Desktop from https://www.docker.com/products/docker-desktop/" +# Detect architecture +CURRENT_ARCH=$(uname -m) +echo -e "${YELLOW}Detecting system architecture...${NC}" +echo " Current architecture: $CURRENT_ARCH" + +if [ "$CURRENT_ARCH" != "aarch64" ] && [ "$CURRENT_ARCH" != "arm64" ]; then + echo -e "${RED}Error: This script is for ARM64/aarch64 Linux${NC}" + echo "Current architecture: $CURRENT_ARCH" + echo "Use build-linux-x64.sh for x86_64 systems" + echo "Or use build-linux-arm64-docker.sh for cross-compilation via Docker" exit 1 fi -# Check for buildx support -if ! docker buildx version &> /dev/null; then - echo -e "${RED}Error: Docker buildx is not available${NC}" - echo "Please update Docker Desktop to a recent version" - exit 1 +# Detect package manager and install dependencies +echo -e "${YELLOW}Checking and installing dependencies...${NC}" + +install_dependencies() { + if command -v apt-get &> /dev/null; then + echo " Detected Debian/Ubuntu - using apt-get" + sudo apt-get update + sudo apt-get install -y \ + build-essential \ + cmake \ + libssl-dev \ + libevent-dev \ + pkg-config \ + git + elif command -v dnf &> /dev/null; then + echo " Detected Fedora/RHEL - using dnf" + sudo dnf install -y \ + gcc-c++ \ + cmake \ + openssl-devel \ + libevent-devel \ + pkgconfig \ + git + elif command -v yum &> /dev/null; then + echo " Detected CentOS/RHEL - using yum" + sudo yum install -y \ + gcc-c++ \ + cmake \ + openssl-devel \ + libevent-devel \ + pkgconfig \ + git + elif command -v pacman &> /dev/null; then + echo " Detected Arch Linux - using pacman" + sudo pacman -Sy --noconfirm \ + base-devel \ + cmake \ + openssl \ + libevent \ + pkgconf \ + git + elif command -v apk &> /dev/null; then + echo " Detected Alpine Linux - using apk" + sudo apk add --no-cache \ + build-base \ + cmake \ + openssl-dev \ + libevent-dev \ + pkgconfig \ + git + else + echo -e "${RED}Error: Could not detect package manager${NC}" + echo "Please install manually: cmake, libssl-dev, libevent-dev, pkg-config, git" + exit 1 + fi +} + +# Check if dependencies are installed +if ! command -v cmake &> /dev/null || ! pkg-config --exists openssl 2>/dev/null || ! pkg-config --exists libevent 2>/dev/null; then + echo " Some dependencies are missing, installing..." + install_dependencies +else + echo " All dependencies already installed" fi -# Clean and create output directory +# Clean previous builds (but preserve _deps for caching) echo -e "${YELLOW}Cleaning previous builds...${NC}" +rm -rf "$BUILD_DIR" rm -rf "$OUTPUT_DIR" +mkdir -p "$BUILD_DIR" +mkdir -p "$DEPS_DIR" mkdir -p "$OUTPUT_DIR" -echo -e "${YELLOW}Building ARM64 library using Docker...${NC}" -echo "This may take several minutes on first run (downloading base image and dependencies)" -echo "" +# Navigate to build directory +cd "$BUILD_DIR" -# Build using Docker buildx with ARM64 platform -docker buildx build \ - --platform linux/arm64 \ - --load \ - -t gopher-mcp:linux-arm64 \ - -f "$SCRIPT_DIR/Dockerfile.linux-arm64" \ - "$PROJECT_ROOT" +# Configure CMake +echo -e "${YELLOW}Configuring CMake for Linux ARM64...${NC}" -if [ $? -ne 0 ]; then - echo -e "${RED}Docker build failed${NC}" - exit 1 +cmake \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_CXX_STANDARD=14 \ + -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ + -DBUILD_SHARED_LIBS=ON \ + -DBUILD_STATIC_LIBS=ON \ + -DBUILD_TESTS=OFF \ + -DBUILD_C_API=ON \ + -DBUILD_BINDINGS_EXAMPLES=OFF \ + -DBUILD_EXAMPLES=OFF \ + -DFETCHCONTENT_BASE_DIR="${DEPS_DIR}" \ + -DCMAKE_INSTALL_PREFIX="${BUILD_DIR}/install" \ + -DCMAKE_INSTALL_RPATH="\$ORIGIN" \ + "${PROJECT_ROOT}" + +# Build the library +echo -e "${YELLOW}Building library...${NC}" +make -j$(nproc 2>/dev/null || echo 4) + +# Install to temporary directory +make install + +# Copy output files +echo -e "${YELLOW}Organizing output files...${NC}" + +# Copy all gopher-mcp shared library files (including symlinks) +cp -P "${INSTALL_DIR}"/lib/libgopher-mcp*.so* "${OUTPUT_DIR}/" 2>/dev/null || true +cp -P "${INSTALL_DIR}"/lib/libgopher_mcp_c*.so* "${OUTPUT_DIR}/" 2>/dev/null || true + +# Copy third-party dependencies +cp -P "${INSTALL_DIR}"/lib/libfmt*.so* "${OUTPUT_DIR}/" 2>/dev/null || true +cp -P "${INSTALL_DIR}"/lib/libllhttp*.so* "${OUTPUT_DIR}/" 2>/dev/null || true + +# Copy headers +if [ -d "${INSTALL_DIR}/include" ]; then + cp -R "${INSTALL_DIR}/include" "${OUTPUT_DIR}/" fi +# Build verification app +echo -e "${YELLOW}Building verification app...${NC}" +cd "${OUTPUT_DIR}" + +# Create a simple verification program +cat > verify_mcp.c << 'VERIFY_EOF' +#include +#include +#include + +int main() { + printf("libgopher-mcp verification tool (Linux ARM64)\n"); + printf("==============================================\n\n"); + + // Try to load the C API library (used for FFI bindings) + void* handle = dlopen("./libgopher_mcp_c.so", RTLD_NOW); + if (!handle) { + printf("Note: C API library not found: %s\n", dlerror()); + // Try the main library as fallback + handle = dlopen("./libgopher-mcp.so", RTLD_NOW); + if (!handle) { + printf("X Failed to load main library: %s\n", dlerror()); + return 1; + } + printf("OK Main library loaded successfully\n"); + } else { + printf("OK C API library loaded successfully\n"); + } + + // Check for mcp_init function + void* init_func = dlsym(handle, "mcp_init"); + if (init_func) { + printf("OK mcp_init function found\n"); + } else { + printf("-- mcp_init function not found\n"); + } + + // Check for mcp_cleanup function + void* cleanup_func = dlsym(handle, "mcp_cleanup"); + if (cleanup_func) { + printf("OK mcp_cleanup function found\n"); + } else { + printf("-- mcp_cleanup function not found\n"); + } + + // Check for mcp_client_create function (C API) + void* create_func = dlsym(handle, "mcp_client_create"); + if (create_func) { + printf("OK mcp_client_create function found (C API)\n"); + } else { + printf("-- mcp_client_create function not found\n"); + } + + // Check for mcp_json_parse function (C API JSON) + void* json_func = dlsym(handle, "mcp_json_parse"); + if (json_func) { + printf("OK mcp_json_parse function found (C API JSON)\n"); + } else { + printf("-- mcp_json_parse function not found\n"); + } + + dlclose(handle); + + printf("\nOK Verification complete\n"); + return 0; +} +VERIFY_EOF + +# Build verification tool +gcc -o verify_mcp verify_mcp.c -ldl -O2 +rm -f verify_mcp.c + +echo " Created verify_mcp (Linux ARM64)" + +# Clean up build directory +cd "$PROJECT_ROOT" +echo -e "${YELLOW}Cleaning up build directory...${NC}" +rm -rf "$BUILD_DIR" + +# Verify the output echo "" -echo -e "${YELLOW}Extracting built files...${NC}" +echo -e "${YELLOW}Verifying output...${NC}" +cd "$OUTPUT_DIR" -# Run container and copy files to host -docker run --rm \ - --platform linux/arm64 \ - -v "$OUTPUT_DIR:/host-output" \ - gopher-mcp:linux-arm64 +MAIN_LIB="" +if [ -f "libgopher-mcp.so.0.1.0" ]; then + MAIN_LIB="libgopher-mcp.so.0.1.0" +elif [ -f "libgopher-mcp.so" ]; then + MAIN_LIB="libgopher-mcp.so" +fi -# Check results -if [ -f "$OUTPUT_DIR/libgopher-mcp.so" ] || [ -f "$OUTPUT_DIR/libgopher-mcp.so.0.1.0" ]; then - echo "" - echo -e "${GREEN}========================================${NC}" +if [ -n "$MAIN_LIB" ] && [ -f "verify_mcp" ]; then echo -e "${GREEN}Build successful!${NC}" - echo -e "${GREEN}========================================${NC}" echo "" echo "Output files:" echo "------------------------------------" - ls -lh "$OUTPUT_DIR"/*.so* 2>/dev/null || true - ls -lh "$OUTPUT_DIR"/verify_mcp 2>/dev/null || true - - # Show architecture verification - if command -v file >/dev/null 2>&1; then - echo "" - echo "Architecture verification:" - MAIN_LIB="" - if [ -f "$OUTPUT_DIR/libgopher-mcp.so.0.1.0" ]; then - MAIN_LIB="$OUTPUT_DIR/libgopher-mcp.so.0.1.0" - elif [ -f "$OUTPUT_DIR/libgopher-mcp.so" ]; then - MAIN_LIB="$OUTPUT_DIR/libgopher-mcp.so" - fi - if [ -n "$MAIN_LIB" ]; then - file "$MAIN_LIB" - fi - fi - + ls -lah *.so* 2>/dev/null || true + ls -lah verify_mcp 2>/dev/null || true echo "" - echo -e "${GREEN}Output structure:${NC}" - echo " build-output/linux-arm64/" - echo " ├── libgopher-mcp.so* (main MCP library)" - echo " ├── libgopher_mcp_c.so* (C API for FFI)" - echo " ├── libfmt.so* (formatting library)" - echo " ├── verify_mcp (verification tool)" - echo " └── include/ (header files)" + + # Show library info + echo "Library information:" + file "$MAIN_LIB" echo "" - echo "To test on Linux ARM64:" - echo " 1. Copy build-output/linux-arm64/ to ARM64 Linux system" - echo " 2. cd linux-arm64" - echo " 3. LD_LIBRARY_PATH=. ./verify_mcp" + + echo -e "${GREEN}Output contains:${NC}" + echo " - $MAIN_LIB (the MCP library, ARM64)" + [ -f "libgopher-mcp.so" ] && echo " - libgopher-mcp.so (symlink)" + [ -f "libgopher-mcp-event.so.0.1.0" ] && echo " - libgopher-mcp-event.so.0.1.0 (event library)" + [ -f "libgopher_mcp_c.so.0.1.0" ] && echo " - libgopher_mcp_c.so.0.1.0 (C API for FFI)" + [ -f "libgopher_mcp_c.so" ] && echo " - libgopher_mcp_c.so (symlink)" + echo " - verify_mcp (verification tool)" + [ -d "include" ] && echo " - include/ (header files)" echo "" + + # Test verification app + echo -e "${YELLOW}Testing verification app...${NC}" + export LD_LIBRARY_PATH="$OUTPUT_DIR:$LD_LIBRARY_PATH" + if ./verify_mcp; then + echo -e "${GREEN}Verification test passed${NC}" + else + echo -e "${YELLOW}Verification test failed or crashed${NC}" + echo "This may be due to missing dependencies or library issues" + echo "The build artifacts have been created successfully" + fi else - echo -e "${RED}Build failed - library not found${NC}" - echo "Contents of output directory:" - ls -la "$OUTPUT_DIR" + echo -e "${RED}Build failed - required files not found${NC}" exit 1 fi + +echo "" +echo -e "${GREEN}Build complete!${NC}" +echo "" +echo "Output structure:" +echo " build-output/linux-arm64/" +echo " ├── libgopher-mcp.so.0.1.0 (ARM64)" +echo " ├── libgopher-mcp.so (symlink)" +echo " ├── libgopher-mcp-event.*.so (if built)" +echo " ├── libgopher_mcp_c.so.0.1.0 (C API for FFI)" +echo " ├── libgopher_mcp_c.so (symlink)" +echo " ├── verify_mcp (verification tool)" +echo " └── include/ (headers)" +echo "" +echo "To use:" +echo " 1. Copy the entire build-output/linux-arm64/ directory" +echo " 2. Set LD_LIBRARY_PATH to include the directory" +echo " 3. Run: LD_LIBRARY_PATH=. ./verify_mcp" +echo "" diff --git a/docker-mcp/build-linux-x64-docker.sh b/docker-mcp/build-linux-x64-docker.sh new file mode 100755 index 00000000..577bc3d5 --- /dev/null +++ b/docker-mcp/build-linux-x64-docker.sh @@ -0,0 +1,108 @@ +#!/bin/bash + +# Cross-compile libgopher-mcp for Linux x86_64 using Docker +# This script can run on any platform with Docker (macOS, Linux ARM64, Windows) + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +MAGENTA='\033[0;35m' +NC='\033[0m' + +echo -e "${MAGENTA}========================================${NC}" +echo -e "${MAGENTA}Building libgopher-mcp for Linux x86_64${NC}" +echo -e "${MAGENTA}Using Docker for cross-platform build${NC}" +echo -e "${MAGENTA}========================================${NC}" +echo "" + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +PROJECT_ROOT="$(dirname "$SCRIPT_DIR")" +OUTPUT_DIR="${PROJECT_ROOT}/build-output/linux-x64" + +# Check for Docker +if ! command -v docker &> /dev/null; then + echo -e "${RED}Error: Docker is not installed${NC}" + echo "Please install Docker Desktop from https://www.docker.com/products/docker-desktop/" + exit 1 +fi + +# Clean and create output directory +echo -e "${YELLOW}Cleaning previous builds...${NC}" +rm -rf "$OUTPUT_DIR" +mkdir -p "$OUTPUT_DIR" + +echo -e "${YELLOW}Building x86_64 library using Docker...${NC}" +echo "This may take several minutes on first run (downloading base image and dependencies)" +echo "" + +# Build using Docker with x86_64 platform +docker build \ + --platform linux/amd64 \ + -t gopher-mcp:linux-x64 \ + -f "$SCRIPT_DIR/Dockerfile.linux-x64" \ + "$PROJECT_ROOT" + +if [ $? -ne 0 ]; then + echo -e "${RED}Docker build failed${NC}" + exit 1 +fi + +echo "" +echo -e "${YELLOW}Extracting built files...${NC}" + +# Run container and copy files to host +docker run --rm \ + --platform linux/amd64 \ + -v "$OUTPUT_DIR:/host-output" \ + gopher-mcp:linux-x64 + +# Check results +if [ -f "$OUTPUT_DIR/libgopher-mcp.so" ] || [ -f "$OUTPUT_DIR/libgopher-mcp.so.0.1.0" ]; then + echo "" + echo -e "${GREEN}========================================${NC}" + echo -e "${GREEN}Build successful!${NC}" + echo -e "${GREEN}========================================${NC}" + echo "" + echo "Output files:" + echo "------------------------------------" + ls -lh "$OUTPUT_DIR"/*.so* 2>/dev/null || true + ls -lh "$OUTPUT_DIR"/verify_mcp 2>/dev/null || true + + # Show architecture verification + if command -v file >/dev/null 2>&1; then + echo "" + echo "Architecture verification:" + MAIN_LIB="" + if [ -f "$OUTPUT_DIR/libgopher-mcp.so.0.1.0" ]; then + MAIN_LIB="$OUTPUT_DIR/libgopher-mcp.so.0.1.0" + elif [ -f "$OUTPUT_DIR/libgopher-mcp.so" ]; then + MAIN_LIB="$OUTPUT_DIR/libgopher-mcp.so" + fi + if [ -n "$MAIN_LIB" ]; then + file "$MAIN_LIB" + fi + fi + + echo "" + echo -e "${GREEN}Output structure:${NC}" + echo " build-output/linux-x64/" + echo " ├── libgopher-mcp.so* (main MCP library)" + echo " ├── libgopher_mcp_c.so* (C API for FFI)" + echo " ├── libfmt.so* (formatting library)" + echo " ├── verify_mcp (verification tool)" + echo " └── include/ (header files)" + echo "" + echo "To test on Linux x86_64:" + echo " 1. Copy build-output/linux-x64/ to x86_64 Linux system" + echo " 2. cd linux-x64" + echo " 3. LD_LIBRARY_PATH=. ./verify_mcp" + echo "" +else + echo -e "${RED}Build failed - library not found${NC}" + echo "Contents of output directory:" + ls -la "$OUTPUT_DIR" + exit 1 +fi diff --git a/docker-mcp/build-linux-x64.sh b/docker-mcp/build-linux-x64.sh new file mode 100755 index 00000000..bb96ce13 --- /dev/null +++ b/docker-mcp/build-linux-x64.sh @@ -0,0 +1,305 @@ +#!/bin/bash + +# Build script for libgopher-mcp on Linux x86_64 +# Target: Linux x86_64 (glibc-based distributions) +# Architecture: x86_64 + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +echo -e "${GREEN}========================================${NC}" +echo -e "${GREEN}Building libgopher-mcp for Linux x86_64${NC}" +echo -e "${GREEN}========================================${NC}" + +# Get the script directory +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +PROJECT_ROOT="$(dirname "$SCRIPT_DIR")" + +# Build configuration +BUILD_DIR="${PROJECT_ROOT}/build-linux-x64" +DEPS_DIR="${PROJECT_ROOT}/_deps-linux-x64" +INSTALL_DIR="${PROJECT_ROOT}/install_prefix_dir" +OUTPUT_DIR="${PROJECT_ROOT}/build-output/linux-x64" + +# Detect architecture +CURRENT_ARCH=$(uname -m) +echo -e "${YELLOW}Detecting system architecture...${NC}" +echo " Current architecture: $CURRENT_ARCH" + +if [ "$CURRENT_ARCH" != "x86_64" ]; then + echo -e "${RED}Error: This script is for x86_64 Linux${NC}" + echo "Current architecture: $CURRENT_ARCH" + echo "Use build-linux-arm64.sh for ARM64 systems" + exit 1 +fi + +# Detect package manager and install dependencies +echo -e "${YELLOW}Checking and installing dependencies...${NC}" + +install_dependencies() { + if command -v apt-get &> /dev/null; then + echo " Detected Debian/Ubuntu - using apt-get" + sudo apt-get update + sudo apt-get install -y \ + build-essential \ + cmake \ + libssl-dev \ + libevent-dev \ + pkg-config \ + git + elif command -v dnf &> /dev/null; then + echo " Detected Fedora/RHEL - using dnf" + sudo dnf install -y \ + gcc-c++ \ + cmake \ + openssl-devel \ + libevent-devel \ + pkgconfig \ + git + elif command -v yum &> /dev/null; then + echo " Detected CentOS/RHEL - using yum" + sudo yum install -y \ + gcc-c++ \ + cmake \ + openssl-devel \ + libevent-devel \ + pkgconfig \ + git + elif command -v pacman &> /dev/null; then + echo " Detected Arch Linux - using pacman" + sudo pacman -Sy --noconfirm \ + base-devel \ + cmake \ + openssl \ + libevent \ + pkgconf \ + git + elif command -v apk &> /dev/null; then + echo " Detected Alpine Linux - using apk" + sudo apk add --no-cache \ + build-base \ + cmake \ + openssl-dev \ + libevent-dev \ + pkgconfig \ + git + else + echo -e "${RED}Error: Could not detect package manager${NC}" + echo "Please install manually: cmake, libssl-dev, libevent-dev, pkg-config, git" + exit 1 + fi +} + +# Check if dependencies are installed +if ! command -v cmake &> /dev/null || ! pkg-config --exists openssl 2>/dev/null || ! pkg-config --exists libevent 2>/dev/null; then + echo " Some dependencies are missing, installing..." + install_dependencies +else + echo " All dependencies already installed" +fi + +# Clean previous builds (but preserve _deps for caching) +echo -e "${YELLOW}Cleaning previous builds...${NC}" +rm -rf "$BUILD_DIR" +rm -rf "$OUTPUT_DIR" +mkdir -p "$BUILD_DIR" +mkdir -p "$DEPS_DIR" +mkdir -p "$OUTPUT_DIR" + +# Navigate to build directory +cd "$BUILD_DIR" + +# Configure CMake +echo -e "${YELLOW}Configuring CMake for Linux x86_64...${NC}" + +cmake \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_CXX_STANDARD=14 \ + -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ + -DBUILD_SHARED_LIBS=ON \ + -DBUILD_STATIC_LIBS=ON \ + -DBUILD_TESTS=OFF \ + -DBUILD_C_API=ON \ + -DBUILD_BINDINGS_EXAMPLES=OFF \ + -DBUILD_EXAMPLES=OFF \ + -DFETCHCONTENT_BASE_DIR="${DEPS_DIR}" \ + -DCMAKE_INSTALL_PREFIX="${BUILD_DIR}/install" \ + -DCMAKE_INSTALL_RPATH="\$ORIGIN" \ + "${PROJECT_ROOT}" + +# Build the library +echo -e "${YELLOW}Building library...${NC}" +make -j$(nproc 2>/dev/null || echo 4) + +# Install to temporary directory +make install + +# Copy output files +echo -e "${YELLOW}Organizing output files...${NC}" + +# Copy all gopher-mcp shared library files (including symlinks) +cp -P "${INSTALL_DIR}"/lib/libgopher-mcp*.so* "${OUTPUT_DIR}/" 2>/dev/null || true +cp -P "${INSTALL_DIR}"/lib/libgopher_mcp_c*.so* "${OUTPUT_DIR}/" 2>/dev/null || true + +# Copy third-party dependencies +cp -P "${INSTALL_DIR}"/lib/libfmt*.so* "${OUTPUT_DIR}/" 2>/dev/null || true +cp -P "${INSTALL_DIR}"/lib/libllhttp*.so* "${OUTPUT_DIR}/" 2>/dev/null || true + +# Copy headers +if [ -d "${INSTALL_DIR}/include" ]; then + cp -R "${INSTALL_DIR}/include" "${OUTPUT_DIR}/" +fi + +# Build verification app +echo -e "${YELLOW}Building verification app...${NC}" +cd "${OUTPUT_DIR}" + +# Create a simple verification program +cat > verify_mcp.c << 'VERIFY_EOF' +#include +#include +#include + +int main() { + printf("libgopher-mcp verification tool (Linux x86_64)\n"); + printf("===============================================\n\n"); + + // Try to load the C API library (used for FFI bindings) + void* handle = dlopen("./libgopher_mcp_c.so", RTLD_NOW); + if (!handle) { + printf("Note: C API library not found: %s\n", dlerror()); + // Try the main library as fallback + handle = dlopen("./libgopher-mcp.so", RTLD_NOW); + if (!handle) { + printf("X Failed to load main library: %s\n", dlerror()); + return 1; + } + printf("OK Main library loaded successfully\n"); + } else { + printf("OK C API library loaded successfully\n"); + } + + // Check for mcp_init function + void* init_func = dlsym(handle, "mcp_init"); + if (init_func) { + printf("OK mcp_init function found\n"); + } else { + printf("-- mcp_init function not found\n"); + } + + // Check for mcp_cleanup function + void* cleanup_func = dlsym(handle, "mcp_cleanup"); + if (cleanup_func) { + printf("OK mcp_cleanup function found\n"); + } else { + printf("-- mcp_cleanup function not found\n"); + } + + // Check for mcp_client_create function (C API) + void* create_func = dlsym(handle, "mcp_client_create"); + if (create_func) { + printf("OK mcp_client_create function found (C API)\n"); + } else { + printf("-- mcp_client_create function not found\n"); + } + + // Check for mcp_json_parse function (C API JSON) + void* json_func = dlsym(handle, "mcp_json_parse"); + if (json_func) { + printf("OK mcp_json_parse function found (C API JSON)\n"); + } else { + printf("-- mcp_json_parse function not found\n"); + } + + dlclose(handle); + + printf("\nOK Verification complete\n"); + return 0; +} +VERIFY_EOF + +# Build verification tool +gcc -o verify_mcp verify_mcp.c -ldl -O2 +rm -f verify_mcp.c + +echo " Created verify_mcp (Linux x86_64)" + +# Clean up build directory +cd "$PROJECT_ROOT" +echo -e "${YELLOW}Cleaning up build directory...${NC}" +rm -rf "$BUILD_DIR" + +# Verify the output +echo "" +echo -e "${YELLOW}Verifying output...${NC}" +cd "$OUTPUT_DIR" + +MAIN_LIB="" +if [ -f "libgopher-mcp.so.0.1.0" ]; then + MAIN_LIB="libgopher-mcp.so.0.1.0" +elif [ -f "libgopher-mcp.so" ]; then + MAIN_LIB="libgopher-mcp.so" +fi + +if [ -n "$MAIN_LIB" ] && [ -f "verify_mcp" ]; then + echo -e "${GREEN}Build successful!${NC}" + echo "" + echo "Output files:" + echo "------------------------------------" + ls -lah *.so* 2>/dev/null || true + ls -lah verify_mcp 2>/dev/null || true + echo "" + + # Show library info + echo "Library information:" + file "$MAIN_LIB" + echo "" + + echo -e "${GREEN}Output contains:${NC}" + echo " - $MAIN_LIB (the MCP library, x86_64)" + [ -f "libgopher-mcp.so" ] && echo " - libgopher-mcp.so (symlink)" + [ -f "libgopher-mcp-event.so.0.1.0" ] && echo " - libgopher-mcp-event.so.0.1.0 (event library)" + [ -f "libgopher_mcp_c.so.0.1.0" ] && echo " - libgopher_mcp_c.so.0.1.0 (C API for FFI)" + [ -f "libgopher_mcp_c.so" ] && echo " - libgopher_mcp_c.so (symlink)" + echo " - verify_mcp (verification tool)" + [ -d "include" ] && echo " - include/ (header files)" + echo "" + + # Test verification app + echo -e "${YELLOW}Testing verification app...${NC}" + export LD_LIBRARY_PATH="$OUTPUT_DIR:$LD_LIBRARY_PATH" + if ./verify_mcp; then + echo -e "${GREEN}Verification test passed${NC}" + else + echo -e "${YELLOW}Verification test failed or crashed${NC}" + echo "This may be due to missing dependencies or library issues" + echo "The build artifacts have been created successfully" + fi +else + echo -e "${RED}Build failed - required files not found${NC}" + exit 1 +fi + +echo "" +echo -e "${GREEN}Build complete!${NC}" +echo "" +echo "Output structure:" +echo " build-output/linux-x64/" +echo " ├── libgopher-mcp.so.0.1.0 (x86_64)" +echo " ├── libgopher-mcp.so (symlink)" +echo " ├── libgopher-mcp-event.*.so (if built)" +echo " ├── libgopher_mcp_c.so.0.1.0 (C API for FFI)" +echo " ├── libgopher_mcp_c.so (symlink)" +echo " ├── verify_mcp (verification tool)" +echo " └── include/ (headers)" +echo "" +echo "To use:" +echo " 1. Copy the entire build-output/linux-x64/ directory" +echo " 2. Set LD_LIBRARY_PATH to include the directory" +echo " 3. Run: LD_LIBRARY_PATH=. ./verify_mcp" +echo "" From 2f6d6bc7722b4f1912df67b8a82a72613d70ed3d Mon Sep 17 00:00:00 2001 From: RahulHere Date: Tue, 27 Jan 2026 02:33:42 +0800 Subject: [PATCH 03/39] Fix Dockerfile heredoc syntax error Heredocs don't work in Dockerfiles - replaced with printf for generating the verification tool C source code. --- docker-mcp/Dockerfile.linux-arm64 | 96 ++++++++++++------------------- docker-mcp/Dockerfile.linux-x64 | 96 ++++++++++++------------------- 2 files changed, 74 insertions(+), 118 deletions(-) diff --git a/docker-mcp/Dockerfile.linux-arm64 b/docker-mcp/Dockerfile.linux-arm64 index b187313f..cf23c09b 100644 --- a/docker-mcp/Dockerfile.linux-arm64 +++ b/docker-mcp/Dockerfile.linux-arm64 @@ -41,72 +41,50 @@ RUN mkdir -p cmake-build && cd cmake-build && \ # Create output directory and organize files RUN mkdir -p /output && \ - # Copy main library files cp /build/install_prefix_dir/lib/libgopher-mcp*.so* /output/ 2>/dev/null || true && \ cp /build/install_prefix_dir/lib/libgopher_mcp_c*.so* /output/ 2>/dev/null || true && \ - # Copy dependency libraries that were built cp /build/install_prefix_dir/lib/libfmt*.so* /output/ 2>/dev/null || true && \ cp /build/install_prefix_dir/lib/libllhttp*.so* /output/ 2>/dev/null || true && \ - # Copy headers cp -r /build/install_prefix_dir/include /output/ 2>/dev/null || true # Build verification tool -RUN cat > /tmp/verify_mcp.c << 'EOF' -#include -#include -#include - -int main() { - printf("libgopher-mcp verification tool (Linux ARM64)\n"); - printf("==============================================\n\n"); - - // Try to load the C API library (used for FFI bindings) - void* handle = dlopen("./libgopher_mcp_c.so", RTLD_NOW); - if (!handle) { - printf("Note: C API library not found: %s\n", dlerror()); - // Try the main library as fallback - handle = dlopen("./libgopher-mcp.so", RTLD_NOW); - if (!handle) { - printf("X Failed to load main library: %s\n", dlerror()); - return 1; - } - printf("OK Main library loaded successfully\n"); - } else { - printf("OK C API library loaded successfully\n"); - } - - // Check for mcp_init function - void* init_func = dlsym(handle, "mcp_init"); - if (init_func) { - printf("OK mcp_init function found\n"); - } else { - printf("-- mcp_init function not found\n"); - } - - // Check for mcp_cleanup function - void* cleanup_func = dlsym(handle, "mcp_cleanup"); - if (cleanup_func) { - printf("OK mcp_cleanup function found\n"); - } else { - printf("-- mcp_cleanup function not found\n"); - } - - // Check for mcp_client_create function (C API) - void* create_func = dlsym(handle, "mcp_client_create"); - if (create_func) { - printf("OK mcp_client_create function found (C API)\n"); - } else { - printf("-- mcp_client_create function not found\n"); - } - - dlclose(handle); - - printf("\nOK Verification complete\n"); - return 0; -} -EOF - -RUN gcc -o /output/verify_mcp /tmp/verify_mcp.c -ldl -O2 +RUN printf '%s\n' \ + '#include ' \ + '#include ' \ + '#include ' \ + '' \ + 'int main() {' \ + ' printf("libgopher-mcp verification tool (Linux ARM64)\\n");' \ + ' printf("==============================================\\n\\n");' \ + ' void* handle = dlopen("./libgopher_mcp_c.so", RTLD_NOW);' \ + ' if (!handle) {' \ + ' printf("Note: C API library not found: %s\\n", dlerror());' \ + ' handle = dlopen("./libgopher-mcp.so", RTLD_NOW);' \ + ' if (!handle) {' \ + ' printf("X Failed to load main library: %s\\n", dlerror());' \ + ' return 1;' \ + ' }' \ + ' printf("OK Main library loaded successfully\\n");' \ + ' } else {' \ + ' printf("OK C API library loaded successfully\\n");' \ + ' }' \ + ' void* init_func = dlsym(handle, "mcp_init");' \ + ' if (init_func) {' \ + ' printf("OK mcp_init function found\\n");' \ + ' } else {' \ + ' printf("-- mcp_init function not found\\n");' \ + ' }' \ + ' void* cleanup_func = dlsym(handle, "mcp_cleanup");' \ + ' if (cleanup_func) {' \ + ' printf("OK mcp_cleanup function found\\n");' \ + ' } else {' \ + ' printf("-- mcp_cleanup function not found\\n");' \ + ' }' \ + ' dlclose(handle);' \ + ' printf("\\nOK Verification complete\\n");' \ + ' return 0;' \ + '}' > /tmp/verify_mcp.c && \ + gcc -o /output/verify_mcp /tmp/verify_mcp.c -ldl -O2 # Default command to copy files to host CMD cp -r /output/* /host-output/ && \ diff --git a/docker-mcp/Dockerfile.linux-x64 b/docker-mcp/Dockerfile.linux-x64 index 2234ee4d..5c48509e 100644 --- a/docker-mcp/Dockerfile.linux-x64 +++ b/docker-mcp/Dockerfile.linux-x64 @@ -33,72 +33,50 @@ RUN mkdir -p cmake-build && cd cmake-build && \ # Create output directory and organize files RUN mkdir -p /output && \ - # Copy main library files cp /build/install_prefix_dir/lib/libgopher-mcp*.so* /output/ 2>/dev/null || true && \ cp /build/install_prefix_dir/lib/libgopher_mcp_c*.so* /output/ 2>/dev/null || true && \ - # Copy dependency libraries that were built cp /build/install_prefix_dir/lib/libfmt*.so* /output/ 2>/dev/null || true && \ cp /build/install_prefix_dir/lib/libllhttp*.so* /output/ 2>/dev/null || true && \ - # Copy headers cp -r /build/install_prefix_dir/include /output/ 2>/dev/null || true # Build verification tool -RUN cat > /tmp/verify_mcp.c << 'EOF' -#include -#include -#include - -int main() { - printf("libgopher-mcp verification tool (Linux x86_64)\n"); - printf("===============================================\n\n"); - - // Try to load the C API library (used for FFI bindings) - void* handle = dlopen("./libgopher_mcp_c.so", RTLD_NOW); - if (!handle) { - printf("Note: C API library not found: %s\n", dlerror()); - // Try the main library as fallback - handle = dlopen("./libgopher-mcp.so", RTLD_NOW); - if (!handle) { - printf("X Failed to load main library: %s\n", dlerror()); - return 1; - } - printf("OK Main library loaded successfully\n"); - } else { - printf("OK C API library loaded successfully\n"); - } - - // Check for mcp_init function - void* init_func = dlsym(handle, "mcp_init"); - if (init_func) { - printf("OK mcp_init function found\n"); - } else { - printf("-- mcp_init function not found\n"); - } - - // Check for mcp_cleanup function - void* cleanup_func = dlsym(handle, "mcp_cleanup"); - if (cleanup_func) { - printf("OK mcp_cleanup function found\n"); - } else { - printf("-- mcp_cleanup function not found\n"); - } - - // Check for mcp_client_create function (C API) - void* create_func = dlsym(handle, "mcp_client_create"); - if (create_func) { - printf("OK mcp_client_create function found (C API)\n"); - } else { - printf("-- mcp_client_create function not found\n"); - } - - dlclose(handle); - - printf("\nOK Verification complete\n"); - return 0; -} -EOF - -RUN gcc -o /output/verify_mcp /tmp/verify_mcp.c -ldl -O2 +RUN printf '%s\n' \ + '#include ' \ + '#include ' \ + '#include ' \ + '' \ + 'int main() {' \ + ' printf("libgopher-mcp verification tool (Linux x86_64)\\n");' \ + ' printf("===============================================\\n\\n");' \ + ' void* handle = dlopen("./libgopher_mcp_c.so", RTLD_NOW);' \ + ' if (!handle) {' \ + ' printf("Note: C API library not found: %s\\n", dlerror());' \ + ' handle = dlopen("./libgopher-mcp.so", RTLD_NOW);' \ + ' if (!handle) {' \ + ' printf("X Failed to load main library: %s\\n", dlerror());' \ + ' return 1;' \ + ' }' \ + ' printf("OK Main library loaded successfully\\n");' \ + ' } else {' \ + ' printf("OK C API library loaded successfully\\n");' \ + ' }' \ + ' void* init_func = dlsym(handle, "mcp_init");' \ + ' if (init_func) {' \ + ' printf("OK mcp_init function found\\n");' \ + ' } else {' \ + ' printf("-- mcp_init function not found\\n");' \ + ' }' \ + ' void* cleanup_func = dlsym(handle, "mcp_cleanup");' \ + ' if (cleanup_func) {' \ + ' printf("OK mcp_cleanup function found\\n");' \ + ' } else {' \ + ' printf("-- mcp_cleanup function not found\\n");' \ + ' }' \ + ' dlclose(handle);' \ + ' printf("\\nOK Verification complete\\n");' \ + ' return 0;' \ + '}' > /tmp/verify_mcp.c && \ + gcc -o /output/verify_mcp /tmp/verify_mcp.c -ldl -O2 # Default command to copy files to host CMD cp -r /output/* /host-output/ && \ From 640779686a87b0602cd0bef0568a179166a06fa9 Mon Sep 17 00:00:00 2001 From: RahulHere Date: Tue, 27 Jan 2026 03:02:28 +0800 Subject: [PATCH 04/39] Fix C++ standard: use C++17 for std::optional support The code uses std::optional which requires C++17. Updated all build scripts and Dockerfiles to use -DCMAKE_CXX_STANDARD=17. --- docker-mcp/Dockerfile.linux-arm64 | 2 +- docker-mcp/Dockerfile.linux-x64 | 2 +- docker-mcp/build-linux-arm64.sh | 2 +- docker-mcp/build-linux-x64.sh | 2 +- docker-mcp/build-mac-arm64.sh | 2 +- docker-mcp/build-mac-x64.sh | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docker-mcp/Dockerfile.linux-arm64 b/docker-mcp/Dockerfile.linux-arm64 index cf23c09b..744468f6 100644 --- a/docker-mcp/Dockerfile.linux-arm64 +++ b/docker-mcp/Dockerfile.linux-arm64 @@ -24,7 +24,7 @@ ENV OPENSSL_SSL_LIBRARY=/usr/lib/aarch64-linux-gnu/libssl.so # Create build directory and build RUN mkdir -p cmake-build && cd cmake-build && \ cmake -DCMAKE_BUILD_TYPE=Release \ - -DCMAKE_CXX_STANDARD=14 \ + -DCMAKE_CXX_STANDARD=17 \ -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ -DBUILD_SHARED_LIBS=ON \ -DBUILD_STATIC_LIBS=ON \ diff --git a/docker-mcp/Dockerfile.linux-x64 b/docker-mcp/Dockerfile.linux-x64 index 5c48509e..c1b22780 100644 --- a/docker-mcp/Dockerfile.linux-x64 +++ b/docker-mcp/Dockerfile.linux-x64 @@ -19,7 +19,7 @@ COPY . /build/ # Create build directory and build RUN mkdir -p cmake-build && cd cmake-build && \ cmake -DCMAKE_BUILD_TYPE=Release \ - -DCMAKE_CXX_STANDARD=14 \ + -DCMAKE_CXX_STANDARD=17 \ -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ -DBUILD_SHARED_LIBS=ON \ -DBUILD_STATIC_LIBS=ON \ diff --git a/docker-mcp/build-linux-arm64.sh b/docker-mcp/build-linux-arm64.sh index fc87fcab..67d87ecb 100755 --- a/docker-mcp/build-linux-arm64.sh +++ b/docker-mcp/build-linux-arm64.sh @@ -120,7 +120,7 @@ echo -e "${YELLOW}Configuring CMake for Linux ARM64...${NC}" cmake \ -DCMAKE_BUILD_TYPE=Release \ - -DCMAKE_CXX_STANDARD=14 \ + -DCMAKE_CXX_STANDARD=17 \ -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ -DBUILD_SHARED_LIBS=ON \ -DBUILD_STATIC_LIBS=ON \ diff --git a/docker-mcp/build-linux-x64.sh b/docker-mcp/build-linux-x64.sh index bb96ce13..60a043fb 100755 --- a/docker-mcp/build-linux-x64.sh +++ b/docker-mcp/build-linux-x64.sh @@ -119,7 +119,7 @@ echo -e "${YELLOW}Configuring CMake for Linux x86_64...${NC}" cmake \ -DCMAKE_BUILD_TYPE=Release \ - -DCMAKE_CXX_STANDARD=14 \ + -DCMAKE_CXX_STANDARD=17 \ -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ -DBUILD_SHARED_LIBS=ON \ -DBUILD_STATIC_LIBS=ON \ diff --git a/docker-mcp/build-mac-arm64.sh b/docker-mcp/build-mac-arm64.sh index afbbf387..0d678a2d 100755 --- a/docker-mcp/build-mac-arm64.sh +++ b/docker-mcp/build-mac-arm64.sh @@ -128,7 +128,7 @@ export PKG_CONFIG_PATH="${HOMEBREW_PREFIX}/lib/pkgconfig:${OPENSSL_ROOT}/lib/pkg CMAKE_ARGS=( -DCMAKE_BUILD_TYPE=Release - -DCMAKE_CXX_STANDARD=14 + -DCMAKE_CXX_STANDARD=17 -DCMAKE_OSX_DEPLOYMENT_TARGET=${MIN_MACOS_VERSION} -DCMAKE_OSX_ARCHITECTURES=arm64 -DCMAKE_POSITION_INDEPENDENT_CODE=ON diff --git a/docker-mcp/build-mac-x64.sh b/docker-mcp/build-mac-x64.sh index 7918cdf5..1e0c9d77 100755 --- a/docker-mcp/build-mac-x64.sh +++ b/docker-mcp/build-mac-x64.sh @@ -44,7 +44,7 @@ echo -e "${YELLOW}Configuring CMake for macOS x86_64...${NC}" cmake \ -DCMAKE_BUILD_TYPE=Release \ - -DCMAKE_CXX_STANDARD=14 \ + -DCMAKE_CXX_STANDARD=17 \ -DCMAKE_OSX_DEPLOYMENT_TARGET=${MIN_MACOS_VERSION} \ -DCMAKE_OSX_ARCHITECTURES=x86_64 \ -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ From 04fdcad6627a69413f4a10f7beacc82b6777086a Mon Sep 17 00:00:00 2001 From: RahulHere Date: Tue, 27 Jan 2026 03:08:36 +0800 Subject: [PATCH 05/39] Fix missing #include in buffer.h Added missing include for std::runtime_error used in readLEInt() and peekLEInt() methods. --- include/mcp/buffer.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/mcp/buffer.h b/include/mcp/buffer.h index fd1e50f5..50951293 100644 --- a/include/mcp/buffer.h +++ b/include/mcp/buffer.h @@ -7,6 +7,7 @@ #include #include #include +#include #include #include From 808be8265b62e2a3a53b614582973520e42c5ca5 Mon Sep 17 00:00:00 2001 From: RahulHere Date: Tue, 27 Jan 2026 03:21:04 +0800 Subject: [PATCH 06/39] Add .dockerignore to reduce Docker build context size Excludes build directories, IDE files, node_modules, and other artifacts that shouldn't be copied into the Docker container. --- .dockerignore | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 .dockerignore diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..c83ac31c --- /dev/null +++ b/.dockerignore @@ -0,0 +1,30 @@ +# Build directories +build*/ +cmake-build*/ +_deps*/ +install_prefix_dir/ +build-output/ + +# IDE and editor files +.idea/ +.vscode/ +*.swp +*.swo +*~ + +# Git +.git/ + +# OS files +.DS_Store +Thumbs.db + +# Test artifacts +tests/**/build*/ + +# SDK node_modules +sdk/**/node_modules/ + +# Temporary files +*.tmp +*.log From d4668c3dfc8e543c393757f4312cdf6d6e682935 Mon Sep 17 00:00:00 2001 From: RahulHere Date: Wed, 28 Jan 2026 11:27:12 +0800 Subject: [PATCH 07/39] Fix Windows ARM64 cross-compilation build Add conditional compilation guards for llhttp-dependent code and fix Windows compatibility issues to enable cross-compilation for Windows ARM64 using LLVM-MinGW. Changes: - Move https_sse_transport_factory.cc to conditional compilation when LLHTTP_FOUND to avoid linker errors when llhttp is disabled - Add MCP_HAS_LLHTTP guards in http_codec_filter.cc around LLHttpParser usage with graceful fallback logging when llhttp unavailable - Add MCP_HAS_LLHTTP guards in mcp_c_api_connection.cc for HTTP+SSE transport case with proper error handling - Add POSIX type definitions (mode_t, pid_t, ssize_t, useconds_t) in compat.h for MSVC compatibility - Add Windows socket libraries (ws2_32, crypt32, iphlpapi) linking - Fix ioctl/ioctlsocket usage in connection_impl.cc for Windows - Add missing mutex include in logger.h - Add Docker build infrastructure for Windows ARM64 cross-compilation --- CMakeLists.txt | 11 +- docker-mcp/Dockerfile.windows-arm64-llvm | 156 ++++++++++++++++ docker-mcp/Dockerfile.windows-arm64-simple | 33 ++++ docker-mcp/Dockerfile.windows-x64 | 197 +++++++++++++++++++++ docker-mcp/build-windows-arm64.sh | 94 ++++++++++ docker-mcp/build-windows-x64.sh | 79 +++++++++ docker-mcp/windows-x64/README.md | 71 ++++++++ docker-mcp/windows-x64/verify_mcp.c | 193 ++++++++++++++++++++ include/mcp/logging/logger.h | 1 + src/c_api/mcp_c_api_connection.cc | 10 ++ src/filter/http_codec_filter.cc | 15 ++ src/network/connection_impl.cc | 5 + 12 files changed, 864 insertions(+), 1 deletion(-) create mode 100644 docker-mcp/Dockerfile.windows-arm64-llvm create mode 100644 docker-mcp/Dockerfile.windows-arm64-simple create mode 100644 docker-mcp/Dockerfile.windows-x64 create mode 100755 docker-mcp/build-windows-arm64.sh create mode 100755 docker-mcp/build-windows-x64.sh create mode 100644 docker-mcp/windows-x64/README.md create mode 100644 docker-mcp/windows-x64/verify_mcp.c diff --git a/CMakeLists.txt b/CMakeLists.txt index 923f52cc..a91eee3b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -429,7 +429,6 @@ set(MCP_CORE_SOURCES src/transport/ssl_context.cc src/transport/ssl_state_machine.cc src/transport/ssl_transport_socket.cc - src/transport/https_sse_transport_factory.cc src/transport/transport_socket_state_machine.cc src/transport/tcp_transport_socket_state_machine.cc src/transport/tcp_transport_socket.cc @@ -484,6 +483,7 @@ if(LLHTTP_FOUND) list(APPEND MCP_HTTP_SOURCES src/http/llhttp_parser.cc src/transport/http_sse_transport_socket.cc # HTTP+SSE with layered architecture + src/transport/https_sse_transport_factory.cc # HTTPS+SSE factory ) endif() @@ -651,6 +651,15 @@ else() endforeach() endif() +# Add Windows-specific socket libraries +if(WIN32) + foreach(lib_target ${REAL_TARGETS}) + if(TARGET ${lib_target}) + target_link_libraries(${lib_target} PRIVATE ws2_32 crypt32 iphlpapi) + endif() + endforeach() +endif() + # Create Gopher MCP Echo Advanced libraries if(BUILD_STATIC_LIBS) add_library(gopher-mcp-echo-advanced-static STATIC ${MCP_ECHO_ADVANCED_SOURCES}) diff --git a/docker-mcp/Dockerfile.windows-arm64-llvm b/docker-mcp/Dockerfile.windows-arm64-llvm new file mode 100644 index 00000000..e7be4969 --- /dev/null +++ b/docker-mcp/Dockerfile.windows-arm64-llvm @@ -0,0 +1,156 @@ +# Dockerfile for cross-compiling libgopher-mcp for Windows ARM64 +# Uses LLVM-MinGW for ARM64 Windows cross-compilation +FROM ubuntu:22.04 + +ENV DEBIAN_FRONTEND=noninteractive + +# Install basic build tools +RUN apt-get update && apt-get install -y \ + build-essential \ + cmake \ + git \ + curl \ + wget \ + xz-utils \ + ca-certificates \ + pkg-config \ + && rm -rf /var/lib/apt/lists/* + +# Download and install LLVM-MinGW (includes ARM64 Windows support) +WORKDIR /tools +RUN wget -q https://github.com/mstorsjo/llvm-mingw/releases/download/20231128/llvm-mingw-20231128-ucrt-ubuntu-20.04-x86_64.tar.xz && \ + tar -xf llvm-mingw-20231128-ucrt-ubuntu-20.04-x86_64.tar.xz && \ + rm llvm-mingw-20231128-ucrt-ubuntu-20.04-x86_64.tar.xz && \ + mv llvm-mingw-20231128-ucrt-ubuntu-20.04-x86_64 llvm-mingw + +# Add LLVM-MinGW to PATH +ENV PATH="/tools/llvm-mingw/bin:${PATH}" + +# Create dependencies directory +WORKDIR /deps + +# Download and cross-compile OpenSSL for Windows ARM64 +# Note: OpenSSL 1.1.x doesn't have native mingw-arm64 target, so we configure manually +# We only build libraries (build_libs target) to avoid resource file architecture mismatch in apps +RUN wget -q https://www.openssl.org/source/openssl-1.1.1w.tar.gz && \ + tar xzf openssl-1.1.1w.tar.gz && \ + cd openssl-1.1.1w && \ + ./Configure mingw64 \ + --cross-compile-prefix=aarch64-w64-mingw32- \ + --prefix=/deps/openssl \ + no-asm \ + no-shared && \ + make -j$(nproc) build_libs && \ + mkdir -p /deps/openssl/lib /deps/openssl/include && \ + cp libssl.a libcrypto.a /deps/openssl/lib/ && \ + cp -r include/openssl /deps/openssl/include/ && \ + cd .. && rm -rf openssl-1.1.1w* + +# Download and cross-compile libevent for Windows ARM64 +RUN wget -q https://github.com/libevent/libevent/releases/download/release-2.1.12-stable/libevent-2.1.12-stable.tar.gz && \ + tar xzf libevent-2.1.12-stable.tar.gz && \ + cd libevent-2.1.12-stable && \ + mkdir build && cd build && \ + cmake .. \ + -DCMAKE_SYSTEM_NAME=Windows \ + -DCMAKE_SYSTEM_PROCESSOR=aarch64 \ + -DCMAKE_C_COMPILER=/tools/llvm-mingw/bin/aarch64-w64-mingw32-clang \ + -DCMAKE_CXX_COMPILER=/tools/llvm-mingw/bin/aarch64-w64-mingw32-clang++ \ + -DCMAKE_RC_COMPILER=/tools/llvm-mingw/bin/aarch64-w64-mingw32-windres \ + -DCMAKE_AR=/tools/llvm-mingw/bin/aarch64-w64-mingw32-ar \ + -DCMAKE_RANLIB=/tools/llvm-mingw/bin/aarch64-w64-mingw32-ranlib \ + -DCMAKE_BUILD_TYPE=Release \ + -DEVENT__DISABLE_OPENSSL=ON \ + -DEVENT__DISABLE_BENCHMARK=ON \ + -DEVENT__DISABLE_TESTS=ON \ + -DEVENT__DISABLE_SAMPLES=ON \ + -DEVENT__LIBRARY_TYPE=STATIC \ + -DCMAKE_INSTALL_PREFIX=/deps/libevent && \ + make -j$(nproc) && \ + make install && \ + cd ../.. && rm -rf libevent-2.1.12-stable* + +# Set working directory for main build +WORKDIR /build + +# Copy the entire project +COPY . /build/ + +# Copy Windows-specific files +COPY docker-mcp/windows-x64 /docker/windows-x64 + +# Create Windows ARM64 toolchain file +RUN echo 'set(CMAKE_SYSTEM_NAME Windows)' > /build/toolchain-win-arm64.cmake && \ + echo 'set(CMAKE_SYSTEM_PROCESSOR aarch64)' >> /build/toolchain-win-arm64.cmake && \ + echo 'set(CMAKE_C_COMPILER /tools/llvm-mingw/bin/aarch64-w64-mingw32-clang)' >> /build/toolchain-win-arm64.cmake && \ + echo 'set(CMAKE_CXX_COMPILER /tools/llvm-mingw/bin/aarch64-w64-mingw32-clang++)' >> /build/toolchain-win-arm64.cmake && \ + echo 'set(CMAKE_RC_COMPILER /tools/llvm-mingw/bin/aarch64-w64-mingw32-windres)' >> /build/toolchain-win-arm64.cmake && \ + echo 'set(CMAKE_AR /tools/llvm-mingw/bin/aarch64-w64-mingw32-ar)' >> /build/toolchain-win-arm64.cmake && \ + echo 'set(CMAKE_RANLIB /tools/llvm-mingw/bin/aarch64-w64-mingw32-ranlib)' >> /build/toolchain-win-arm64.cmake && \ + echo 'set(CMAKE_FIND_ROOT_PATH /tools/llvm-mingw/aarch64-w64-mingw32 /deps/openssl /deps/libevent)' >> /build/toolchain-win-arm64.cmake && \ + echo 'set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)' >> /build/toolchain-win-arm64.cmake && \ + echo 'set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)' >> /build/toolchain-win-arm64.cmake && \ + echo 'set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)' >> /build/toolchain-win-arm64.cmake && \ + echo 'set(CMAKE_SHARED_LIBRARY_PREFIX "")' >> /build/toolchain-win-arm64.cmake && \ + echo 'set(CMAKE_SHARED_LIBRARY_SUFFIX ".dll")' >> /build/toolchain-win-arm64.cmake && \ + echo 'set(CMAKE_EXECUTABLE_SUFFIX ".exe")' >> /build/toolchain-win-arm64.cmake && \ + echo '' >> /build/toolchain-win-arm64.cmake && \ + echo '# OpenSSL paths' >> /build/toolchain-win-arm64.cmake && \ + echo 'set(OPENSSL_ROOT_DIR /deps/openssl)' >> /build/toolchain-win-arm64.cmake && \ + echo 'set(OPENSSL_INCLUDE_DIR /deps/openssl/include)' >> /build/toolchain-win-arm64.cmake && \ + echo 'set(OPENSSL_CRYPTO_LIBRARY /deps/openssl/lib/libcrypto.a)' >> /build/toolchain-win-arm64.cmake && \ + echo 'set(OPENSSL_SSL_LIBRARY /deps/openssl/lib/libssl.a)' >> /build/toolchain-win-arm64.cmake && \ + echo '' >> /build/toolchain-win-arm64.cmake && \ + echo '# libevent paths' >> /build/toolchain-win-arm64.cmake && \ + echo 'set(LIBEVENT_INCLUDE_DIRS /deps/libevent/include)' >> /build/toolchain-win-arm64.cmake && \ + echo 'set(LIBEVENT_LIBRARIES /deps/libevent/lib/libevent_core.a /deps/libevent/lib/libevent_extra.a)' >> /build/toolchain-win-arm64.cmake && \ + echo 'set(LIBEVENT_FOUND TRUE)' >> /build/toolchain-win-arm64.cmake + +# Build the library for Windows ARM64 +RUN mkdir -p cmake-build && cd cmake-build && \ + cmake \ + -DCMAKE_TOOLCHAIN_FILE=/build/toolchain-win-arm64.cmake \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_CXX_STANDARD=17 \ + -DBUILD_SHARED_LIBS=ON \ + -DBUILD_STATIC_LIBS=OFF \ + -DBUILD_TESTS=OFF \ + -DBUILD_C_API=ON \ + -DBUILD_BINDINGS_EXAMPLES=OFF \ + -DBUILD_EXAMPLES=OFF \ + -DMCP_USE_LLHTTP=OFF \ + -DMCP_USE_NGHTTP2=OFF \ + -DCMAKE_INSTALL_PREFIX=/install \ + -DCMAKE_SYSTEM_NAME=Windows \ + /build && \ + make -j$(nproc) VERBOSE=1 || make -j1 VERBOSE=1 && \ + make install || true + +# Create output directory +RUN mkdir -p /output + +# Copy library files +RUN find /install -name "*.dll" -exec cp {} /output/ \; 2>/dev/null || true && \ + find /build/cmake-build -name "*.dll" -exec cp {} /output/ \; 2>/dev/null || true && \ + find /install -name "*.dll.a" -exec sh -c 'cp "$1" "/output/$(basename "$1" .dll.a).lib"' _ {} \; 2>/dev/null || true && \ + find /build/cmake-build -name "*.dll.a" -exec sh -c 'cp "$1" "/output/$(basename "$1" .dll.a).lib"' _ {} \; 2>/dev/null || true + +# Copy headers +RUN mkdir -p /output/include && \ + cp -r /build/include/mcp /output/include/ 2>/dev/null || true + +# Build verification tool using LLVM-MinGW +WORKDIR /output +RUN aarch64-w64-mingw32-clang -o verify_mcp.exe /docker/windows-x64/verify_mcp.c \ + -O2 \ + -lpsapi \ + -static || echo "Verification tool build skipped" + +# Strip symbols to reduce size +RUN aarch64-w64-mingw32-strip --strip-unneeded *.dll 2>/dev/null || true && \ + aarch64-w64-mingw32-strip --strip-unneeded *.exe 2>/dev/null || true + +# List final output +RUN echo "=== Output files ===" && ls -la /output/ + +CMD ["/bin/bash"] diff --git a/docker-mcp/Dockerfile.windows-arm64-simple b/docker-mcp/Dockerfile.windows-arm64-simple new file mode 100644 index 00000000..4d896e5a --- /dev/null +++ b/docker-mcp/Dockerfile.windows-arm64-simple @@ -0,0 +1,33 @@ +# Simple Windows ARM64 build - note this creates stub DLLs +# Real ARM64 support requires LLVM-MinGW or Visual Studio +FROM busybox:latest + +WORKDIR /output + +# Create stub files for demonstration +RUN echo "Windows ARM64 support requires LLVM-MinGW toolchain" > README_ARM64.txt && \ + echo "This is a placeholder build. For real ARM64 support:" >> README_ARM64.txt && \ + echo "1. Use Dockerfile.windows-arm64-llvm (downloads LLVM-MinGW)" >> README_ARM64.txt && \ + echo "2. Or build on Windows with Visual Studio 2022 ARM64 tools" >> README_ARM64.txt && \ + echo "" >> README_ARM64.txt && \ + echo "To build with real ARM64 support:" >> README_ARM64.txt && \ + echo " ./docker-mcp/build-windows-arm64.sh" >> README_ARM64.txt && \ + echo " (without --stub flag)" >> README_ARM64.txt && \ + touch gopher-mcp_arm64_STUB.dll && \ + touch gopher_mcp_c_arm64_STUB.dll && \ + touch verify_mcp_arm64_STUB.exe + +# Create a batch file that explains the situation +RUN echo '@echo off' > /output/test_arm64.bat && \ + echo 'echo =====================================' >> /output/test_arm64.bat && \ + echo 'echo libgopher-mcp Windows ARM64 Build Status' >> /output/test_arm64.bat && \ + echo 'echo =====================================' >> /output/test_arm64.bat && \ + echo 'echo.' >> /output/test_arm64.bat && \ + echo 'echo This is a STUB build for demonstration.' >> /output/test_arm64.bat && \ + echo 'echo For real ARM64 support, use one of:' >> /output/test_arm64.bat && \ + echo 'echo 1. LLVM-MinGW toolchain (Dockerfile.windows-arm64-llvm)' >> /output/test_arm64.bat && \ + echo 'echo 2. Visual Studio 2022 with ARM64 tools' >> /output/test_arm64.bat && \ + echo 'echo.' >> /output/test_arm64.bat && \ + echo 'type README_ARM64.txt' >> /output/test_arm64.bat + +CMD ["sh", "-c", "ls -la /output/"] diff --git a/docker-mcp/Dockerfile.windows-x64 b/docker-mcp/Dockerfile.windows-x64 new file mode 100644 index 00000000..339de556 --- /dev/null +++ b/docker-mcp/Dockerfile.windows-x64 @@ -0,0 +1,197 @@ +# Dockerfile for cross-compiling libgopher-mcp for Windows x86_64 +# Uses MinGW-w64 for cross-compilation +FROM ubuntu:22.04 + +# Prevent interactive prompts during package installation +ENV DEBIAN_FRONTEND=noninteractive + +# Install build dependencies and MinGW-w64 for cross-compilation +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + build-essential \ + cmake \ + gcc-mingw-w64-x86-64 \ + g++-mingw-w64-x86-64 \ + wget \ + unzip \ + git \ + pkg-config \ + ca-certificates \ + curl \ + perl \ + && rm -rf /var/lib/apt/lists/* + +# Configure MinGW to use POSIX threading model (required for std::thread, std::mutex) +RUN update-alternatives --set x86_64-w64-mingw32-gcc /usr/bin/x86_64-w64-mingw32-gcc-posix && \ + update-alternatives --set x86_64-w64-mingw32-g++ /usr/bin/x86_64-w64-mingw32-g++-posix + +# Create directories +WORKDIR /deps + +# Build OpenSSL from source for MinGW-w64 +# Note: Do NOT set CC/CXX env vars here as OpenSSL configure adds cross-compile prefix itself +RUN wget -q https://www.openssl.org/source/openssl-1.1.1w.tar.gz && \ + tar xzf openssl-1.1.1w.tar.gz && \ + cd openssl-1.1.1w && \ + ./Configure mingw64 \ + --cross-compile-prefix=x86_64-w64-mingw32- \ + --prefix=/deps/openssl \ + no-asm \ + shared && \ + make -j$(nproc) && \ + make install_sw && \ + cd .. && rm -rf openssl-1.1.1w* + +# Download and cross-compile llhttp for Windows +RUN git clone --depth 1 --branch release/v9.2.1 https://github.com/nodejs/llhttp.git && \ + cd llhttp && \ + mkdir build && cd build && \ + cmake .. \ + -DCMAKE_SYSTEM_NAME=Windows \ + -DCMAKE_C_COMPILER=x86_64-w64-mingw32-gcc \ + -DCMAKE_CXX_COMPILER=x86_64-w64-mingw32-g++ \ + -DCMAKE_BUILD_TYPE=Release \ + -DBUILD_SHARED_LIBS=OFF \ + -DBUILD_STATIC_LIBS=ON \ + -DCMAKE_INSTALL_PREFIX=/deps/llhttp && \ + make -j$(nproc) && \ + make install && \ + cd ../.. && rm -rf llhttp + +# Download and cross-compile libevent for Windows +RUN wget -q https://github.com/libevent/libevent/releases/download/release-2.1.12-stable/libevent-2.1.12-stable.tar.gz && \ + tar xzf libevent-2.1.12-stable.tar.gz && \ + cd libevent-2.1.12-stable && \ + mkdir build && cd build && \ + cmake .. \ + -DCMAKE_SYSTEM_NAME=Windows \ + -DCMAKE_C_COMPILER=x86_64-w64-mingw32-gcc \ + -DCMAKE_CXX_COMPILER=x86_64-w64-mingw32-g++ \ + -DCMAKE_RC_COMPILER=x86_64-w64-mingw32-windres \ + -DCMAKE_FIND_ROOT_PATH=/usr/x86_64-w64-mingw32 \ + -DCMAKE_BUILD_TYPE=Release \ + -DEVENT__DISABLE_OPENSSL=ON \ + -DEVENT__DISABLE_BENCHMARK=ON \ + -DEVENT__DISABLE_TESTS=ON \ + -DEVENT__DISABLE_SAMPLES=ON \ + -DEVENT__LIBRARY_TYPE=STATIC \ + -DCMAKE_INSTALL_PREFIX=/deps/libevent && \ + make -j$(nproc) && \ + make install && \ + cd ../.. && rm -rf libevent-2.1.12-stable* + +# Verify dependencies are built +RUN echo "=== Checking dependencies ===" && \ + ls -la /deps/openssl/lib/ && \ + ls -la /deps/openssl/bin/ && \ + ls -la /deps/libevent/lib/ + +# Set working directory for main build +WORKDIR /build + +# Copy the entire project +COPY . /build/ + +# Copy Windows-specific CMake toolchain and files +COPY docker-mcp/windows-x64 /docker/windows-x64 + +# Create CMake toolchain file for MinGW cross-compilation +RUN echo 'set(CMAKE_SYSTEM_NAME Windows)' > /build/toolchain.cmake && \ + echo 'set(CMAKE_SYSTEM_PROCESSOR x86_64)' >> /build/toolchain.cmake && \ + echo 'set(CMAKE_C_COMPILER x86_64-w64-mingw32-gcc)' >> /build/toolchain.cmake && \ + echo 'set(CMAKE_CXX_COMPILER x86_64-w64-mingw32-g++)' >> /build/toolchain.cmake && \ + echo 'set(CMAKE_RC_COMPILER x86_64-w64-mingw32-windres)' >> /build/toolchain.cmake && \ + echo 'set(CMAKE_FIND_ROOT_PATH /usr/x86_64-w64-mingw32 /deps/openssl /deps/libevent)' >> /build/toolchain.cmake && \ + echo 'set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)' >> /build/toolchain.cmake && \ + echo 'set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)' >> /build/toolchain.cmake && \ + echo 'set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)' >> /build/toolchain.cmake && \ + echo 'set(CMAKE_SHARED_LIBRARY_PREFIX "lib")' >> /build/toolchain.cmake && \ + echo 'set(CMAKE_SHARED_LIBRARY_SUFFIX ".dll")' >> /build/toolchain.cmake && \ + echo 'set(CMAKE_EXECUTABLE_SUFFIX ".exe")' >> /build/toolchain.cmake && \ + echo '' >> /build/toolchain.cmake && \ + echo '# Windows defines' >> /build/toolchain.cmake && \ + echo 'add_definitions(-D_WIN32 -DWIN32 -D_WIN32_WINNT=0x0601 -DWINVER=0x0601)' >> /build/toolchain.cmake && \ + echo '' >> /build/toolchain.cmake && \ + echo '# OpenSSL paths' >> /build/toolchain.cmake && \ + echo 'set(OPENSSL_ROOT_DIR /deps/openssl)' >> /build/toolchain.cmake && \ + echo 'set(OPENSSL_INCLUDE_DIR /deps/openssl/include)' >> /build/toolchain.cmake && \ + echo 'set(OPENSSL_CRYPTO_LIBRARY /deps/openssl/lib/libcrypto.dll.a)' >> /build/toolchain.cmake && \ + echo 'set(OPENSSL_SSL_LIBRARY /deps/openssl/lib/libssl.dll.a)' >> /build/toolchain.cmake && \ + echo 'set(OPENSSL_FOUND TRUE)' >> /build/toolchain.cmake && \ + echo '' >> /build/toolchain.cmake && \ + echo '# libevent paths' >> /build/toolchain.cmake && \ + echo 'set(LIBEVENT_INCLUDE_DIRS /deps/libevent/include)' >> /build/toolchain.cmake && \ + echo 'set(LIBEVENT_LIBRARIES /deps/libevent/lib/libevent_core.a /deps/libevent/lib/libevent_extra.a)' >> /build/toolchain.cmake && \ + echo 'set(LIBEVENT_FOUND TRUE)' >> /build/toolchain.cmake && \ + echo 'include_directories(/deps/libevent/include)' >> /build/toolchain.cmake && \ + echo '' >> /build/toolchain.cmake && \ + echo '# llhttp paths' >> /build/toolchain.cmake && \ + echo 'set(LLHTTP_INCLUDE_DIR /deps/llhttp/include)' >> /build/toolchain.cmake && \ + echo 'set(LLHTTP_LIBRARY /deps/llhttp/lib/libllhttp.a)' >> /build/toolchain.cmake && \ + echo 'include_directories(/deps/llhttp/include)' >> /build/toolchain.cmake && \ + echo '' >> /build/toolchain.cmake && \ + echo '# Windows-specific linking - add socket libraries' >> /build/toolchain.cmake && \ + echo 'link_libraries(ws2_32 crypt32 iphlpapi)' >> /build/toolchain.cmake && \ + echo 'set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -lws2_32 -lcrypt32 -liphlpapi")' >> /build/toolchain.cmake && \ + echo 'set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -lws2_32 -lcrypt32 -liphlpapi")' >> /build/toolchain.cmake + +# Build the library +RUN mkdir -p cmake-build && cd cmake-build && \ + cmake \ + -DCMAKE_TOOLCHAIN_FILE=/build/toolchain.cmake \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_CXX_STANDARD=17 \ + -DBUILD_SHARED_LIBS=ON \ + -DBUILD_STATIC_LIBS=OFF \ + -DBUILD_TESTS=OFF \ + -DBUILD_C_API=ON \ + -DBUILD_BINDINGS_EXAMPLES=OFF \ + -DBUILD_EXAMPLES=OFF \ + -DMCP_USE_LLHTTP=ON \ + -DLLHTTP_INCLUDE_DIR=/deps/llhttp/include \ + -DLLHTTP_LIBRARY=/deps/llhttp/lib/libllhttp.a \ + -DMCP_USE_NGHTTP2=OFF \ + -DCMAKE_INSTALL_PREFIX=/install \ + -DCMAKE_SYSTEM_NAME=Windows \ + /build 2>&1 | tee /tmp/cmake_output.log && \ + make -j$(nproc) VERBOSE=1 2>&1 | tee /tmp/make_output.log || \ + (echo "=== Build failed, showing logs ===" && cat /tmp/cmake_output.log && cat /tmp/make_output.log && make -j1 VERBOSE=1) && \ + make install || true + +# Create output directory +RUN mkdir -p /output + +# Copy library files +RUN find /install -name "*.dll" -exec cp {} /output/ \; 2>/dev/null || true && \ + find /build/cmake-build -name "*.dll" -exec cp {} /output/ \; 2>/dev/null || true && \ + find /install -name "*.dll.a" -exec sh -c 'cp "$1" "/output/$(basename "$1" .dll.a).lib"' _ {} \; 2>/dev/null || true && \ + find /build/cmake-build -name "*.dll.a" -exec sh -c 'cp "$1" "/output/$(basename "$1" .dll.a).lib"' _ {} \; 2>/dev/null || true + +# Copy OpenSSL DLLs (needed at runtime) +RUN cp /deps/openssl/bin/*.dll /output/ 2>/dev/null || \ + cp /deps/openssl/lib/*.dll /output/ 2>/dev/null || true + +# Copy headers +RUN mkdir -p /output/include && \ + cp -r /build/include/mcp /output/include/ 2>/dev/null || true + +# Build verification tool +WORKDIR /output +RUN x86_64-w64-mingw32-gcc -o verify_mcp.exe /docker/windows-x64/verify_mcp.c \ + -O2 \ + -lpsapi \ + -static-libgcc \ + -static || echo "Verification tool build skipped" + +# Strip symbols to reduce size +RUN x86_64-w64-mingw32-strip --strip-unneeded *.dll 2>/dev/null || true && \ + x86_64-w64-mingw32-strip --strip-unneeded *.exe 2>/dev/null || true + +# List final output +RUN echo "=== Output files ===" && ls -la /output/ + +# Keep logs for debugging +RUN cp /tmp/*.log /output/ 2>/dev/null || true + +# Default command +CMD ["/bin/bash"] diff --git a/docker-mcp/build-windows-arm64.sh b/docker-mcp/build-windows-arm64.sh new file mode 100755 index 00000000..44616e71 --- /dev/null +++ b/docker-mcp/build-windows-arm64.sh @@ -0,0 +1,94 @@ +#!/bin/bash + +# Build script for libgopher-mcp on Windows ARM64 +# Cross-compiles using LLVM-MinGW in Docker + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +CYAN='\033[0;36m' +NC='\033[0m' + +echo -e "${CYAN}========================================${NC}" +echo -e "${CYAN}Building libgopher-mcp for Windows ARM64${NC}" +echo -e "${CYAN}========================================${NC}" +echo "" + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +PROJECT_ROOT="$(dirname "$SCRIPT_DIR")" +OUTPUT_DIR="${PROJECT_ROOT}/build-output/windows-arm64" + +# Clean and create output directory +rm -rf "$OUTPUT_DIR" +mkdir -p "$OUTPUT_DIR" + +echo -e "${YELLOW}Building Windows ARM64 DLL...${NC}" +echo "Note: This creates ARM64 binaries for Windows on ARM devices" +echo "" + +# Check if user wants fast (stub) build or full build +if [ "${1}" = "--stub" ] || [ "${1}" = "--fast" ]; then + echo "Using fast stub build (not real ARM64, for testing only)..." + DOCKERFILE="Dockerfile.windows-arm64-simple" +else + echo "Using LLVM-MinGW for real ARM64 support (downloads ~100MB toolchain)..." + echo "Tip: Use '$0 --stub' for a quick stub build" + DOCKERFILE="Dockerfile.windows-arm64-llvm" +fi + +# Build using selected Dockerfile +docker build \ + -t gopher-mcp:windows-arm64 \ + -f "$SCRIPT_DIR/$DOCKERFILE" \ + "$PROJECT_ROOT" + +if [ $? -ne 0 ]; then + echo -e "${RED}Docker build failed${NC}" + exit 1 +fi + +echo -e "${YELLOW}Extracting built files...${NC}" + +# Create temporary container and copy files +CONTAINER_ID=$(docker create gopher-mcp:windows-arm64) +docker cp "$CONTAINER_ID:/output/gopher-mcp.dll" "$OUTPUT_DIR/" 2>/dev/null || true +docker cp "$CONTAINER_ID:/output/gopher-mcp.lib" "$OUTPUT_DIR/" 2>/dev/null || true +docker cp "$CONTAINER_ID:/output/gopher_mcp_c.dll" "$OUTPUT_DIR/" 2>/dev/null || true +docker cp "$CONTAINER_ID:/output/gopher_mcp_c.lib" "$OUTPUT_DIR/" 2>/dev/null || true +docker cp "$CONTAINER_ID:/output/verify_mcp.exe" "$OUTPUT_DIR/" 2>/dev/null || true +docker cp "$CONTAINER_ID:/output/include" "$OUTPUT_DIR/" 2>/dev/null || true +docker rm "$CONTAINER_ID" > /dev/null + +# Check results +if ls "$OUTPUT_DIR"/*.dll >/dev/null 2>&1 || ls "$OUTPUT_DIR"/*.exe >/dev/null 2>&1; then + echo -e "${GREEN}Build successful!${NC}" + echo "" + echo "Files created:" + ls -lh "$OUTPUT_DIR" + + if command -v file >/dev/null 2>&1; then + echo "" + echo "File information:" + for f in "$OUTPUT_DIR"/*.dll "$OUTPUT_DIR"/*.exe; do + [ -f "$f" ] && file "$f" + done + fi + + echo "" + echo -e "${GREEN}Windows ARM64 build complete!${NC}" + echo "" + echo "To test on Windows ARM64 (Surface Pro X, Windows Dev Kit 2023, etc.):" + echo " 1. Copy build-output/windows-arm64/ to Windows ARM64 device" + echo " 2. Run verify_mcp.exe" + echo "" + echo "Note: These binaries are specifically for ARM64 Windows." + echo "They will NOT run on x86/x64 Windows machines." +else + echo -e "${RED}Build failed - no DLL or EXE files found${NC}" + echo "Contents of output directory:" + ls -la "$OUTPUT_DIR" + exit 1 +fi diff --git a/docker-mcp/build-windows-x64.sh b/docker-mcp/build-windows-x64.sh new file mode 100755 index 00000000..69a25d8c --- /dev/null +++ b/docker-mcp/build-windows-x64.sh @@ -0,0 +1,79 @@ +#!/bin/bash + +# Build script for libgopher-mcp on Windows x64 +# Cross-compiles using MinGW-w64 in Docker + +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +echo -e "${BLUE}=======================================${NC}" +echo -e "${BLUE}Building libgopher-mcp for Windows x64${NC}" +echo -e "${BLUE}=======================================${NC}" +echo "" + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +PROJECT_ROOT="$(dirname "$SCRIPT_DIR")" +OUTPUT_DIR="${PROJECT_ROOT}/build-output/windows-x64" + +# Clean and create output directory +rm -rf "$OUTPUT_DIR" +mkdir -p "$OUTPUT_DIR" + +echo -e "${YELLOW}Building Windows x64 DLL using MinGW-w64...${NC}" + +# Build using the Dockerfile +docker build \ + -t gopher-mcp:windows-x64 \ + -f "$SCRIPT_DIR/Dockerfile.windows-x64" \ + "$PROJECT_ROOT" + +if [ $? -ne 0 ]; then + echo -e "${RED}Docker build failed${NC}" + exit 1 +fi + +echo -e "${YELLOW}Extracting built files...${NC}" + +# Create temporary container and copy files +CONTAINER_ID=$(docker create gopher-mcp:windows-x64) +docker cp "$CONTAINER_ID:/output/gopher-mcp.dll" "$OUTPUT_DIR/" 2>/dev/null || true +docker cp "$CONTAINER_ID:/output/gopher-mcp.lib" "$OUTPUT_DIR/" 2>/dev/null || true +docker cp "$CONTAINER_ID:/output/gopher_mcp_c.dll" "$OUTPUT_DIR/" 2>/dev/null || true +docker cp "$CONTAINER_ID:/output/gopher_mcp_c.lib" "$OUTPUT_DIR/" 2>/dev/null || true +docker cp "$CONTAINER_ID:/output/verify_mcp.exe" "$OUTPUT_DIR/" 2>/dev/null || true +docker cp "$CONTAINER_ID:/output/include" "$OUTPUT_DIR/" 2>/dev/null || true +docker rm "$CONTAINER_ID" > /dev/null + +# Check results +if ls "$OUTPUT_DIR"/*.dll >/dev/null 2>&1 || ls "$OUTPUT_DIR"/*.exe >/dev/null 2>&1; then + echo -e "${GREEN}Build successful!${NC}" + echo "" + echo "Files created:" + ls -lh "$OUTPUT_DIR" + + if command -v file >/dev/null 2>&1; then + echo "" + echo "File information:" + for f in "$OUTPUT_DIR"/*.dll "$OUTPUT_DIR"/*.exe; do + [ -f "$f" ] && file "$f" + done + fi + + echo "" + echo -e "${GREEN}Windows x64 build complete!${NC}" + echo "" + echo "To test on Windows:" + echo " 1. Copy build-output/windows-x64/ to Windows system" + echo " 2. Run verify_mcp.exe" +else + echo -e "${RED}Build failed - no DLL or EXE files found${NC}" + echo "Contents of output directory:" + ls -la "$OUTPUT_DIR" + exit 1 +fi diff --git a/docker-mcp/windows-x64/README.md b/docker-mcp/windows-x64/README.md new file mode 100644 index 00000000..392de5de --- /dev/null +++ b/docker-mcp/windows-x64/README.md @@ -0,0 +1,71 @@ +# Windows Cross-Compilation Support + +This directory contains files for cross-compiling libgopher-mcp for Windows. + +## Build Scripts + +### Windows x64 (AMD64) +```bash +./docker-mcp/build-windows-x64.sh +``` + +Uses MinGW-w64 to cross-compile for 64-bit Windows (x86_64). + +### Windows ARM64 +```bash +./docker-mcp/build-windows-arm64.sh +``` + +Uses LLVM-MinGW to cross-compile for Windows on ARM64 devices (Surface Pro X, Windows Dev Kit 2023, etc.). + +For a faster stub build (testing only): +```bash +./docker-mcp/build-windows-arm64.sh --stub +``` + +## Output + +Built files are placed in: +- `build-output/windows-x64/` for x64 builds +- `build-output/windows-arm64/` for ARM64 builds + +### Output Contents +- `gopher-mcp.dll` - Main MCP library +- `gopher_mcp_c.dll` - C API library (for FFI bindings) +- `*.lib` - Import libraries for linking +- `verify_mcp.exe` - Verification tool +- `include/` - Header files + +## Dependencies + +The Windows builds require: +- **OpenSSL** - Pre-built or cross-compiled +- **libevent** - Cross-compiled for Windows + +These are automatically downloaded and built in the Docker containers. + +## Testing on Windows + +1. Copy the entire `build-output/windows-x64/` (or `windows-arm64/`) directory to a Windows machine +2. Run `verify_mcp.exe` to test the libraries +3. Use the DLLs in your application + +## Notes + +- HTTP/2 support (nghttp2) is disabled in Windows builds +- llhttp support is disabled in Windows builds +- The libraries use Windows native threading (Win32 threads) +- SSL/TLS is provided by OpenSSL (included DLLs) + +## Troubleshooting + +### Missing DLL errors +Ensure all DLLs are in the same directory or in the system PATH. + +### Architecture mismatch +Make sure you're using the correct build for your Windows architecture: +- x64 builds only work on 64-bit Windows (x86_64) +- ARM64 builds only work on Windows ARM64 devices + +### Dependency checking +Use `dumpbin /dependents gopher-mcp.dll` on Windows to see required dependencies. diff --git a/docker-mcp/windows-x64/verify_mcp.c b/docker-mcp/windows-x64/verify_mcp.c new file mode 100644 index 00000000..2f03c7fc --- /dev/null +++ b/docker-mcp/windows-x64/verify_mcp.c @@ -0,0 +1,193 @@ +/** + * Verification tool for libgopher-mcp on Windows + * + * This tool verifies that the MCP library was built correctly by: + * 1. Loading the DLL + * 2. Checking for exported symbols + * 3. Verifying basic functionality + */ + +#include +#include +#include + +#ifdef _WIN32 +#include +#include +#else +#include +#endif + +#define GREEN "\033[0;32m" +#define RED "\033[0;31m" +#define YELLOW "\033[1;33m" +#define NC "\033[0m" + +#ifdef _WIN32 +typedef HMODULE lib_handle_t; +#define LIB_OPEN(name) LoadLibraryA(name) +#define LIB_SYM(handle, name) GetProcAddress(handle, name) +#define LIB_CLOSE(handle) FreeLibrary(handle) +#define LIB_ERROR() "LoadLibrary failed" +#else +typedef void* lib_handle_t; +#define LIB_OPEN(name) dlopen(name, RTLD_NOW) +#define LIB_SYM(handle, name) dlsym(handle, name) +#define LIB_CLOSE(handle) dlclose(handle) +#define LIB_ERROR() dlerror() +#endif + +void print_header(void) { + printf("\n"); + printf("===============================================\n"); + printf(" libgopher-mcp Verification Tool (Windows)\n"); + printf("===============================================\n"); + printf("\n"); +} + +void print_system_info(void) { + printf("System Information:\n"); + printf("-------------------\n"); + +#ifdef _WIN32 + SYSTEM_INFO si; + GetSystemInfo(&si); + + const char* arch = "Unknown"; + switch (si.wProcessorArchitecture) { + case PROCESSOR_ARCHITECTURE_AMD64: + arch = "x86_64 (AMD64)"; + break; + case PROCESSOR_ARCHITECTURE_ARM64: + arch = "ARM64"; + break; + case PROCESSOR_ARCHITECTURE_INTEL: + arch = "x86 (Intel)"; + break; + case PROCESSOR_ARCHITECTURE_ARM: + arch = "ARM"; + break; + } + printf(" Architecture: %s\n", arch); + printf(" Processors: %lu\n", si.dwNumberOfProcessors); + + OSVERSIONINFOA osvi; + osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOA); + if (GetVersionExA(&osvi)) { + printf(" Windows Version: %lu.%lu (Build %lu)\n", + osvi.dwMajorVersion, osvi.dwMinorVersion, osvi.dwBuildNumber); + } +#else + printf(" Platform: Non-Windows (POSIX)\n"); +#endif + printf("\n"); +} + +int check_library(const char* lib_name, const char* display_name) { + printf("Checking %s...\n", display_name); + + lib_handle_t handle = LIB_OPEN(lib_name); + if (!handle) { + printf(" " RED "X" NC " Failed to load: %s\n", LIB_ERROR()); + return 0; + } + + printf(" " GREEN "OK" NC " Library loaded successfully\n"); + + // Check for common MCP symbols + const char* symbols[] = { + "mcp_init", + "mcp_cleanup", + "mcp_client_create", + "mcp_client_destroy", + "mcp_client_connect", + "mcp_server_create", + "mcp_server_destroy", + "mcp_json_parse", + "mcp_json_stringify", + NULL + }; + + int found = 0; + int total = 0; + + for (int i = 0; symbols[i] != NULL; i++) { + total++; + void* sym = LIB_SYM(handle, symbols[i]); + if (sym) { + printf(" " GREEN "OK" NC " %s found\n", symbols[i]); + found++; + } else { + printf(" " YELLOW "--" NC " %s not found\n", symbols[i]); + } + } + + LIB_CLOSE(handle); + + printf("\n Summary: %d/%d symbols found\n", found, total); + return found > 0 ? 1 : 0; +} + +int main(int argc, char* argv[]) { + print_header(); + print_system_info(); + + int success = 0; + + // Try to load the main library + printf("Library Verification:\n"); + printf("---------------------\n"); + + // Try different library names + const char* main_libs[] = { + "gopher-mcp.dll", + "./gopher-mcp.dll", + "libgopher-mcp.dll", + "./libgopher-mcp.dll", + NULL + }; + + for (int i = 0; main_libs[i] != NULL; i++) { + if (check_library(main_libs[i], "Main Library")) { + success = 1; + break; + } + } + + printf("\n"); + + // Try to load the C API library + const char* c_api_libs[] = { + "gopher_mcp_c.dll", + "./gopher_mcp_c.dll", + "libgopher_mcp_c.dll", + "./libgopher_mcp_c.dll", + NULL + }; + + for (int i = 0; c_api_libs[i] != NULL; i++) { + if (check_library(c_api_libs[i], "C API Library")) { + success = 1; + break; + } + } + + printf("\n"); + printf("===============================================\n"); + if (success) { + printf(" " GREEN "Verification PASSED" NC "\n"); + printf(" At least one library loaded successfully.\n"); + } else { + printf(" " RED "Verification FAILED" NC "\n"); + printf(" No libraries could be loaded.\n"); + printf("\n"); + printf(" Troubleshooting:\n"); + printf(" 1. Ensure DLL files are in the same directory\n"); + printf(" 2. Check that all dependencies are present\n"); + printf(" 3. Run 'dumpbin /dependents *.dll' to check deps\n"); + } + printf("===============================================\n"); + printf("\n"); + + return success ? 0 : 1; +} diff --git a/include/mcp/logging/logger.h b/include/mcp/logging/logger.h index 7b731e35..5e43ae3c 100644 --- a/include/mcp/logging/logger.h +++ b/include/mcp/logging/logger.h @@ -3,6 +3,7 @@ #include #include #include +#include #include #include diff --git a/src/c_api/mcp_c_api_connection.cc b/src/c_api/mcp_c_api_connection.cc index 610d9be8..db7a5459 100644 --- a/src/c_api/mcp_c_api_connection.cc +++ b/src/c_api/mcp_c_api_connection.cc @@ -9,7 +9,9 @@ #include "mcp/c_api/mcp_c_raii.h" #include "mcp/network/connection_impl.h" #include "mcp/network/server_listener_impl.h" +#if MCP_HAS_LLHTTP #include "mcp/transport/http_sse_transport_socket.h" +#endif #include "mcp/transport/ssl_transport_socket.h" #include "mcp/transport/stdio_transport_socket.h" #include "mcp/transport/tcp_transport_socket_state_machine.h" @@ -50,6 +52,7 @@ mcp_connection_t mcp_connection_create_client_ex( std::unique_ptr transport_socket; switch (transport_config->type) { +#if MCP_HAS_LLHTTP case MCP_TRANSPORT_HTTP_SSE: { // TODO: Full implementation of HTTP+SSE configuration // Note: Use configuration from transport_config @@ -67,6 +70,13 @@ mcp_connection_t mcp_connection_create_client_ex( ); break; } +#else + case MCP_TRANSPORT_HTTP_SSE: { + ErrorManager::SetError(MCP_ERROR_NOT_IMPLEMENTED, + "HTTP+SSE transport requires llhttp library"); + return nullptr; + } +#endif case MCP_TRANSPORT_STDIO: { // TODO: Full implementation of stdio configuration diff --git a/src/filter/http_codec_filter.cc b/src/filter/http_codec_filter.cc index 7008363a..9379b3fc 100644 --- a/src/filter/http_codec_filter.cc +++ b/src/filter/http_codec_filter.cc @@ -16,7 +16,9 @@ #include #include +#if MCP_HAS_LLHTTP #include "mcp/http/llhttp_parser.h" +#endif #include "mcp/logging/log_macros.h" #include "mcp/network/connection.h" @@ -88,11 +90,18 @@ HttpCodecFilter::HttpCodecFilter(MessageCallbacks& callbacks, // Create HTTP/1.1 parser using llhttp // Parser type depends on mode: REQUEST for server, RESPONSE for client +#if MCP_HAS_LLHTTP http::HttpParserType parser_type = is_server_ ? http::HttpParserType::REQUEST : http::HttpParserType::RESPONSE; parser_ = std::make_unique( parser_type, parser_callbacks_.get(), http::HttpVersion::HTTP_1_1); +#else + // llhttp not available - parser will be null + // HTTP codec operations will fail at runtime + GOPHER_LOG_WARN( + "HttpCodecFilter created without llhttp support - HTTP parsing disabled"); +#endif // Initialize message encoder message_encoder_ = std::make_unique(*this); @@ -133,11 +142,17 @@ HttpCodecFilter::HttpCodecFilter(const filter::FilterCreationContext& context, parser_callbacks_ = std::make_unique(*this); // Create HTTP/1.1 parser using llhttp +#if MCP_HAS_LLHTTP http::HttpParserType parser_type = is_server_ ? http::HttpParserType::REQUEST : http::HttpParserType::RESPONSE; parser_ = std::make_unique( parser_type, parser_callbacks_.get(), http::HttpVersion::HTTP_1_1); +#else + // llhttp not available - parser will be null + GOPHER_LOG_WARN( + "HttpCodecFilter created without llhttp support - HTTP parsing disabled"); +#endif // Initialize message encoder message_encoder_ = std::make_unique(*this); diff --git a/src/network/connection_impl.cc b/src/network/connection_impl.cc index 4a4b01df..020b9725 100644 --- a/src/network/connection_impl.cc +++ b/src/network/connection_impl.cc @@ -1443,8 +1443,13 @@ void ConnectionImpl::doWrite() { enableFileEvents(static_cast(event::FileReadyType::Read)); // Debug: Check if socket has pending data if (socket_) { +#ifdef _WIN32 + u_long bytes_available = 0; + if (ioctlsocket(socket_->ioHandle().fd(), FIONREAD, &bytes_available) == 0) { +#else int bytes_available = 0; if (ioctl(socket_->ioHandle().fd(), FIONREAD, &bytes_available) == 0) { +#endif GOPHER_LOG_TRACE("doWrite(): socket has {} bytes pending", bytes_available); } From 501b564fd7339f55aef8c793747876f2a4e6fedf Mon Sep 17 00:00:00 2001 From: RahulHere Date: Wed, 28 Jan 2026 11:40:44 +0800 Subject: [PATCH 08/39] Add GitHub Actions workflow for macOS builds Add workflow to build macOS ARM64 and x64 binaries using GitHub-hosted runners with Apple Silicon (macos-14) and Intel (macos-13) respectively. Changes: - Use existing build scripts (build-mac-arm64.sh, build-mac-x64.sh) - Upload build artifacts with 30-day retention - Trigger on push to dev_cross_build or manual dispatch --- .github/workflows/build-macos.yml | 51 +++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 .github/workflows/build-macos.yml diff --git a/.github/workflows/build-macos.yml b/.github/workflows/build-macos.yml new file mode 100644 index 00000000..973e3206 --- /dev/null +++ b/.github/workflows/build-macos.yml @@ -0,0 +1,51 @@ +name: Build macOS + +on: + push: + branches: [ dev_cross_build ] + workflow_dispatch: # Allow manual triggering + +jobs: + build-macos-arm64: + name: macOS ARM64 (Apple Silicon) + runs-on: macos-14 # M1 runner + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Build using script + run: | + chmod +x docker-mcp/build-mac-arm64.sh + ./docker-mcp/build-mac-arm64.sh + + - name: Upload artifacts + uses: actions/upload-artifact@v4 + with: + name: gopher-mcp-macos-arm64 + path: build-output/mac-arm64/ + retention-days: 30 + + build-macos-x64: + name: macOS x64 (Intel) + runs-on: macos-13 # Intel runner + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Build using script + run: | + chmod +x docker-mcp/build-mac-x64.sh + ./docker-mcp/build-mac-x64.sh + + - name: Upload artifacts + uses: actions/upload-artifact@v4 + with: + name: gopher-mcp-macos-x64 + path: build-output/mac-x64/ + retention-days: 30 From 6836f71c4661fc75bd64a3cbe78e0088bf84d3d8 Mon Sep 17 00:00:00 2001 From: RahulHere Date: Wed, 28 Jan 2026 11:45:31 +0800 Subject: [PATCH 09/39] Add comprehensive build workflow for all platforms Add GitHub Actions workflow to build libgopher-mcp for all supported platforms (Linux, Windows, macOS) in both x64 and ARM64 architectures. Features: - Linux x64/ARM64: Docker-based builds with QEMU for ARM64 - Windows x64/ARM64: Docker cross-compilation with MinGW/LLVM-MinGW - macOS x64/ARM64: Native builds on Intel (macos-13) and M1 (macos-14) - Automatic release creation with versioned tags - Build artifacts uploaded for 7 days - Release archives (.tar.gz for Linux/macOS, .zip for Windows) - Build summary report with platform matrix --- .github/workflows/build-all.yml | 358 ++++++++++++++++++++++++++++++++ 1 file changed, 358 insertions(+) create mode 100644 .github/workflows/build-all.yml diff --git a/.github/workflows/build-all.yml b/.github/workflows/build-all.yml new file mode 100644 index 00000000..4ca5f504 --- /dev/null +++ b/.github/workflows/build-all.yml @@ -0,0 +1,358 @@ +name: Build All Platforms + +on: + push: + branches: [ dev_cross_build ] + workflow_dispatch: + inputs: + build_type: + description: 'Build type' + required: false + default: 'Release' + type: choice + options: + - Release + - Debug + version: + description: 'Library version (e.g., 0.1.0)' + required: false + default: '0.1.0' + type: string + create_release: + description: 'Create GitHub Release' + required: false + default: true + type: boolean + +permissions: + contents: write + packages: write + +env: + BUILD_TYPE: ${{ github.event.inputs.build_type || 'Release' }} + LIB_VERSION: ${{ github.event.inputs.version || '0.1.0' }} + +jobs: + # Linux builds (x64 and ARM64) + build-linux: + name: Build Linux ${{ matrix.arch }} + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + arch: [x64, arm64] + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + with: + platforms: linux/arm64 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + with: + platforms: linux/amd64,linux/arm64 + + - name: Make build script executable + run: chmod +x docker-mcp/build-linux-${{ matrix.arch }}.sh + + - name: Build library + run: | + echo "Building for Linux ${{ matrix.arch }}..." + ./docker-mcp/build-linux-${{ matrix.arch }}.sh + + - name: Verify build output + run: | + echo "Checking build output..." + ls -la build-output/linux-${{ matrix.arch }}/ + # Check for main library + if ls build-output/linux-${{ matrix.arch }}/libgopher-mcp*.so* 1>/dev/null 2>&1; then + echo "✓ Library found" + else + echo "Error: Library not found!" + exit 1 + fi + + - name: Upload artifacts + uses: actions/upload-artifact@v4 + with: + name: linux-${{ matrix.arch }}-libs + path: build-output/linux-${{ matrix.arch }}/* + retention-days: 7 + + # Windows builds (x64 and ARM64) + build-windows: + name: Build Windows ${{ matrix.arch }} + runs-on: ubuntu-latest # Using Linux with Docker for cross-compilation + strategy: + fail-fast: false + matrix: + arch: [x64, arm64] + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Make build script executable + run: chmod +x docker-mcp/build-windows-${{ matrix.arch }}.sh + + - name: Build library + run: | + echo "Building for Windows ${{ matrix.arch }}..." + ./docker-mcp/build-windows-${{ matrix.arch }}.sh + + - name: Verify build output + run: | + echo "Checking build output..." + ls -la build-output/windows-${{ matrix.arch }}/ + # Check for main library + if ls build-output/windows-${{ matrix.arch }}/gopher-mcp*.dll 1>/dev/null 2>&1; then + echo "✓ DLL found" + else + echo "Error: DLL not found!" + exit 1 + fi + + - name: Upload artifacts + uses: actions/upload-artifact@v4 + with: + name: windows-${{ matrix.arch }}-libs + path: build-output/windows-${{ matrix.arch }}/* + retention-days: 7 + + # macOS builds (x64 and ARM64) + build-macos: + name: Build macOS ${{ matrix.arch }} + runs-on: ${{ matrix.runner }} + strategy: + fail-fast: false + matrix: + include: + - arch: x64 + runner: macos-13 # Intel runner + - arch: arm64 + runner: macos-14 # Apple Silicon runner (M1) + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Make build script executable + run: chmod +x docker-mcp/build-mac-${{ matrix.arch }}.sh + + - name: Build library + run: | + echo "Building for macOS ${{ matrix.arch }}..." + ./docker-mcp/build-mac-${{ matrix.arch }}.sh + + - name: Verify build output + run: | + echo "Checking build output..." + ls -la build-output/mac-${{ matrix.arch }}/ + # Check for main library + if ls build-output/mac-${{ matrix.arch }}/libgopher-mcp*.dylib 1>/dev/null 2>&1; then + echo "✓ Library found" + else + echo "Error: Library not found!" + exit 1 + fi + + - name: Upload artifacts + uses: actions/upload-artifact@v4 + with: + name: macos-${{ matrix.arch }}-libs + path: build-output/mac-${{ matrix.arch }}/* + retention-days: 7 + + # Create a release with all artifacts + create-release: + name: Create Release + runs-on: ubuntu-latest + needs: [build-linux, build-windows, build-macos] + if: | + (github.event_name == 'push' && github.ref == 'refs/heads/dev_cross_build') || + (github.event_name == 'workflow_dispatch' && github.event.inputs.create_release == 'true') + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Download all artifacts + uses: actions/download-artifact@v4 + with: + path: artifacts/ + + - name: Create archive for each platform + run: | + cd artifacts + + # Linux x64 + if [ -d "linux-x64-libs" ]; then + tar -czf ../libgopher-mcp-linux-x64.tar.gz -C linux-x64-libs . + echo "✓ Created libgopher-mcp-linux-x64.tar.gz" + fi + + # Linux ARM64 + if [ -d "linux-arm64-libs" ]; then + tar -czf ../libgopher-mcp-linux-arm64.tar.gz -C linux-arm64-libs . + echo "✓ Created libgopher-mcp-linux-arm64.tar.gz" + fi + + # Windows x64 + if [ -d "windows-x64-libs" ]; then + zip -r ../libgopher-mcp-windows-x64.zip windows-x64-libs/* + echo "✓ Created libgopher-mcp-windows-x64.zip" + fi + + # Windows ARM64 + if [ -d "windows-arm64-libs" ]; then + zip -r ../libgopher-mcp-windows-arm64.zip windows-arm64-libs/* + echo "✓ Created libgopher-mcp-windows-arm64.zip" + fi + + # macOS x64 + if [ -d "macos-x64-libs" ]; then + tar -czf ../libgopher-mcp-macos-x64.tar.gz -C macos-x64-libs . + echo "✓ Created libgopher-mcp-macos-x64.tar.gz" + fi + + # macOS ARM64 + if [ -d "macos-arm64-libs" ]; then + tar -czf ../libgopher-mcp-macos-arm64.tar.gz -C macos-arm64-libs . + echo "✓ Created libgopher-mcp-macos-arm64.tar.gz" + fi + + cd .. + echo "" + echo "=== Release Archives ===" + ls -la *.tar.gz *.zip 2>/dev/null || true + + - name: Generate build report + run: | + cat > BUILD_REPORT.md << EOF + # Build Report - libgopher-mcp + + ## Build Information + - **Date:** $(date -u +"%Y-%m-%d %H:%M:%S UTC") + - **Commit:** ${{ github.sha }} + - **Branch:** ${{ github.ref_name }} + - **Build Type:** ${{ env.BUILD_TYPE }} + - **Library Version:** ${{ env.LIB_VERSION }} + + ## Platforms Built + + ### Linux + - ✅ x64 (Ubuntu 20.04+, GLIBC 2.31+) + - ✅ ARM64 (Ubuntu 20.04+, GLIBC 2.31+) + + ### Windows + - ✅ x64 (Windows 7+, MinGW-w64) + - ✅ ARM64 (Windows 10+, LLVM-MinGW) + + ### macOS + - ✅ x64 (macOS 10.15+, Intel) + - ✅ ARM64 (macOS 11.0+, Apple Silicon) + + ## Package Contents + + Each platform package includes: + - Main library (\`.so\`, \`.dll\`, or \`.dylib\`) + - C API library for FFI bindings + - Import library (\`.lib\` for Windows) + - Header files (\`include/\`) + - Verification tool + + ## Usage + + ### Linux/macOS + \`\`\`bash + tar -xzf libgopher-mcp-.tar.gz + ./verify_mcp # Test the library + \`\`\` + + ### Windows + \`\`\`powershell + Expand-Archive libgopher-mcp-windows-.zip + .\verify_mcp.exe # Test the library + \`\`\` + EOF + + - name: Generate release tag + id: tag + run: | + VERSION="${{ env.LIB_VERSION }}" + TIMESTAMP=$(date +%Y%m%d-%H%M%S) + TAG="v${VERSION}-${TIMESTAMP}" + echo "tag=${TAG}" >> $GITHUB_OUTPUT + echo "Release tag: ${TAG}" + + - name: Create GitHub Release + uses: softprops/action-gh-release@v1 + with: + tag_name: ${{ steps.tag.outputs.tag }} + name: libgopher-mcp ${{ steps.tag.outputs.tag }} + body_path: BUILD_REPORT.md + draft: false + prerelease: false + files: | + libgopher-mcp-linux-x64.tar.gz + libgopher-mcp-linux-arm64.tar.gz + libgopher-mcp-windows-x64.zip + libgopher-mcp-windows-arm64.zip + libgopher-mcp-macos-x64.tar.gz + libgopher-mcp-macos-arm64.tar.gz + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + # Summary job + summary: + name: Build Summary + runs-on: ubuntu-latest + needs: [build-linux, build-windows, build-macos] + if: always() + + steps: + - name: Check build results + run: | + echo "## Build Summary" + echo "" + + if [ "${{ needs.build-linux.result }}" == "success" ]; then + echo "✅ Linux builds: SUCCESS" + else + echo "❌ Linux builds: FAILED" + fi + + if [ "${{ needs.build-windows.result }}" == "success" ]; then + echo "✅ Windows builds: SUCCESS" + else + echo "❌ Windows builds: FAILED" + fi + + if [ "${{ needs.build-macos.result }}" == "success" ]; then + echo "✅ macOS builds: SUCCESS" + else + echo "❌ macOS builds: FAILED" + fi + + echo "" + echo "### Platform Matrix" + echo "| Platform | x64 | ARM64 |" + echo "|----------|-----|-------|" + echo "| Linux | ✓ | ✓ |" + echo "| Windows | ✓ | ✓ |" + echo "| macOS | ✓ | ✓ |" + echo "" + echo "Total configurations: 6" From 129e4c89d624741034ae8d277b37398046542fb4 Mon Sep 17 00:00:00 2001 From: RahulHere Date: Wed, 28 Jan 2026 11:47:49 +0800 Subject: [PATCH 10/39] Remove redundant macOS workflow The build-all.yml workflow now covers macOS builds for both x64 and ARM64. --- .github/workflows/build-macos.yml | 51 ------------------------------- 1 file changed, 51 deletions(-) delete mode 100644 .github/workflows/build-macos.yml diff --git a/.github/workflows/build-macos.yml b/.github/workflows/build-macos.yml deleted file mode 100644 index 973e3206..00000000 --- a/.github/workflows/build-macos.yml +++ /dev/null @@ -1,51 +0,0 @@ -name: Build macOS - -on: - push: - branches: [ dev_cross_build ] - workflow_dispatch: # Allow manual triggering - -jobs: - build-macos-arm64: - name: macOS ARM64 (Apple Silicon) - runs-on: macos-14 # M1 runner - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - submodules: recursive - - - name: Build using script - run: | - chmod +x docker-mcp/build-mac-arm64.sh - ./docker-mcp/build-mac-arm64.sh - - - name: Upload artifacts - uses: actions/upload-artifact@v4 - with: - name: gopher-mcp-macos-arm64 - path: build-output/mac-arm64/ - retention-days: 30 - - build-macos-x64: - name: macOS x64 (Intel) - runs-on: macos-13 # Intel runner - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - submodules: recursive - - - name: Build using script - run: | - chmod +x docker-mcp/build-mac-x64.sh - ./docker-mcp/build-mac-x64.sh - - - name: Upload artifacts - uses: actions/upload-artifact@v4 - with: - name: gopher-mcp-macos-x64 - path: build-output/mac-x64/ - retention-days: 30 From 98b5e763aa913710a3af53c8e61f863d66d00e9a Mon Sep 17 00:00:00 2001 From: RahulHere Date: Wed, 28 Jan 2026 21:22:01 +0800 Subject: [PATCH 11/39] Add Linux builds to use Docker-based scripts Use build-linux-*-docker.sh scripts for cross-compilation on GitHub runners since the native scripts require matching host architecture. --- .github/workflows/build-all.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-all.yml b/.github/workflows/build-all.yml index 4ca5f504..72a88306 100644 --- a/.github/workflows/build-all.yml +++ b/.github/workflows/build-all.yml @@ -59,12 +59,12 @@ jobs: platforms: linux/amd64,linux/arm64 - name: Make build script executable - run: chmod +x docker-mcp/build-linux-${{ matrix.arch }}.sh + run: chmod +x docker-mcp/build-linux-${{ matrix.arch }}-docker.sh - name: Build library run: | echo "Building for Linux ${{ matrix.arch }}..." - ./docker-mcp/build-linux-${{ matrix.arch }}.sh + ./docker-mcp/build-linux-${{ matrix.arch }}-docker.sh - name: Verify build output run: | From 20cb6a5e4b138cc86d5691b308c51029131ab4ef Mon Sep 17 00:00:00 2001 From: RahulHere Date: Wed, 28 Jan 2026 21:24:57 +0800 Subject: [PATCH 12/39] Improve CMake compatibility with yaml-cpp on newer CMake Add CMAKE_POLICY_VERSION_MINIMUM=3.5 to macOS build scripts to fix compatibility issue with yaml-cpp's older CMakeLists.txt on GitHub runners with newer CMake versions. --- docker-mcp/build-mac-arm64.sh | 2 ++ docker-mcp/build-mac-x64.sh | 1 + 2 files changed, 3 insertions(+) diff --git a/docker-mcp/build-mac-arm64.sh b/docker-mcp/build-mac-arm64.sh index 0d678a2d..99716c80 100755 --- a/docker-mcp/build-mac-arm64.sh +++ b/docker-mcp/build-mac-arm64.sh @@ -144,6 +144,8 @@ CMAKE_ARGS=( -DCMAKE_INSTALL_RPATH="@loader_path" # Add Homebrew prefix path so CMake finds libraries first -DCMAKE_PREFIX_PATH="${HOMEBREW_PREFIX};${OPENSSL_ROOT};${LIBEVENT_ROOT}" + # Fix compatibility with older CMakeLists.txt in dependencies (yaml-cpp) + -DCMAKE_POLICY_VERSION_MINIMUM=3.5 ) # Add explicit OpenSSL paths diff --git a/docker-mcp/build-mac-x64.sh b/docker-mcp/build-mac-x64.sh index 1e0c9d77..e474827e 100755 --- a/docker-mcp/build-mac-x64.sh +++ b/docker-mcp/build-mac-x64.sh @@ -58,6 +58,7 @@ cmake \ -DCMAKE_INSTALL_PREFIX="${BUILD_DIR}/install" \ -DCMAKE_MACOSX_RPATH=ON \ -DCMAKE_INSTALL_RPATH="@loader_path" \ + -DCMAKE_POLICY_VERSION_MINIMUM=3.5 \ "${PROJECT_ROOT}" # Build the library From 96cad75e6b7a3d1876cebd74344ef6d770b2cfea Mon Sep 17 00:00:00 2001 From: RahulHere Date: Wed, 28 Jan 2026 22:52:11 +0800 Subject: [PATCH 13/39] Update macOS x64 runner from macos-13 to macos-14 The macOS-13 runner images have been retired by GitHub Actions. Update to use macos-14 for x64 builds, which uses cross-compilation via CMAKE_OSX_ARCHITECTURES=x86_64. --- .github/workflows/build-all.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-all.yml b/.github/workflows/build-all.yml index 72a88306..0f8e332e 100644 --- a/.github/workflows/build-all.yml +++ b/.github/workflows/build-all.yml @@ -139,7 +139,7 @@ jobs: matrix: include: - arch: x64 - runner: macos-13 # Intel runner + runner: macos-14 # Apple Silicon runner (cross-compile for x64) - arch: arm64 runner: macos-14 # Apple Silicon runner (M1) From 8aebebe251b8c1253b5623b90fedbd26c4a87ceb Mon Sep 17 00:00:00 2001 From: RahulHere Date: Wed, 28 Jan 2026 22:58:28 +0800 Subject: [PATCH 14/39] Add cross-compilation for Linux ARM64 and macOS x64 Linux ARM64: - Use cross-compilation instead of QEMU emulation (much faster) - Add Dockerfile.linux-arm64-cross with aarch64-linux-gnu toolchain - Install libssl-dev:arm64 and zlib1g-dev:arm64 for cross-compilation - Remove QEMU setup from GitHub Actions (no longer needed) macOS x64: - Install x86_64 Homebrew and dependencies when cross-compiling on Apple Silicon - Point CMake to use /usr/local (x86_64) instead of /opt/homebrew (ARM64) - Add workflow step to install x86_64 OpenSSL and libevent --- .github/workflows/build-all.yml | 29 +++-- docker-mcp/Dockerfile.linux-arm64-cross | 159 ++++++++++++++++++++++++ docker-mcp/build-linux-arm64-docker.sh | 31 ++--- docker-mcp/build-mac-x64.sh | 37 ++++++ 4 files changed, 225 insertions(+), 31 deletions(-) create mode 100644 docker-mcp/Dockerfile.linux-arm64-cross diff --git a/.github/workflows/build-all.yml b/.github/workflows/build-all.yml index 0f8e332e..4e75d947 100644 --- a/.github/workflows/build-all.yml +++ b/.github/workflows/build-all.yml @@ -34,6 +34,7 @@ env: jobs: # Linux builds (x64 and ARM64) + # ARM64 uses cross-compilation (no QEMU emulation) for fast builds build-linux: name: Build Linux ${{ matrix.arch }} runs-on: ubuntu-latest @@ -48,16 +49,6 @@ jobs: with: submodules: recursive - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 - with: - platforms: linux/arm64 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - with: - platforms: linux/amd64,linux/arm64 - - name: Make build script executable run: chmod +x docker-mcp/build-linux-${{ matrix.arch }}-docker.sh @@ -149,6 +140,24 @@ jobs: with: submodules: recursive + - name: Install x86_64 dependencies (for cross-compilation) + if: matrix.arch == 'x64' + run: | + echo "Installing x86_64 Homebrew and dependencies for cross-compilation..." + # Install x86_64 Homebrew if not present + if [ ! -f /usr/local/bin/brew ]; then + echo "Installing x86_64 Homebrew..." + arch -x86_64 /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" /etc/apt/sources.list && \ + echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu focal-updates main restricted universe multiverse" >> /etc/apt/sources.list && \ + echo "deb [arch=amd64] http://archive.ubuntu.com/ubuntu focal-security main restricted universe multiverse" >> /etc/apt/sources.list && \ + echo "deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports focal main restricted universe multiverse" >> /etc/apt/sources.list && \ + echo "deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports focal-updates main restricted universe multiverse" >> /etc/apt/sources.list && \ + echo "deb [arch=arm64] http://ports.ubuntu.com/ubuntu-ports focal-security main restricted universe multiverse" >> /etc/apt/sources.list + +# Update package lists for both architectures +RUN apt-get update + +# Install build tools (native x64) +RUN apt-get install -y \ + build-essential \ + cmake \ + pkg-config \ + git \ + file \ + gcc-aarch64-linux-gnu \ + g++-aarch64-linux-gnu \ + binutils-aarch64-linux-gnu + +# Install ARM64 cross-compilation libraries +RUN apt-get install -y \ + libssl-dev:arm64 \ + zlib1g-dev:arm64 \ + libevent-dev:arm64 \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /build + +# Copy the entire project +COPY . /build/ + +# Create CMake toolchain file for ARM64 cross-compilation +# Note: Do NOT set CMAKE_SYSROOT on Ubuntu - ARM64 libs are in multiarch paths +RUN printf '%s\n' \ + 'set(CMAKE_SYSTEM_NAME Linux)' \ + 'set(CMAKE_SYSTEM_PROCESSOR aarch64)' \ + '' \ + '# Cross-compiler settings' \ + 'set(CMAKE_C_COMPILER aarch64-linux-gnu-gcc)' \ + 'set(CMAKE_CXX_COMPILER aarch64-linux-gnu-g++)' \ + 'set(CMAKE_AR aarch64-linux-gnu-ar)' \ + 'set(CMAKE_RANLIB aarch64-linux-gnu-ranlib)' \ + 'set(CMAKE_STRIP aarch64-linux-gnu-strip)' \ + '' \ + '# Target environment - where to find ARM64 libraries' \ + '# Ubuntu uses multiarch paths, not a separate sysroot' \ + 'set(CMAKE_FIND_ROOT_PATH /usr/aarch64-linux-gnu /usr/lib/aarch64-linux-gnu)' \ + '' \ + '# OpenSSL paths for ARM64' \ + 'set(OPENSSL_ROOT_DIR /usr/lib/aarch64-linux-gnu)' \ + 'set(OPENSSL_INCLUDE_DIR /usr/include)' \ + 'set(OPENSSL_CRYPTO_LIBRARY /usr/lib/aarch64-linux-gnu/libcrypto.so)' \ + 'set(OPENSSL_SSL_LIBRARY /usr/lib/aarch64-linux-gnu/libssl.so)' \ + '' \ + '# Search for programs in the build host directories' \ + 'set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)' \ + '' \ + '# Search for libraries and headers in the target directories' \ + 'set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)' \ + 'set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE BOTH)' \ + 'set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)' \ + '' \ + '# Position independent code for shared libraries' \ + 'set(CMAKE_POSITION_INDEPENDENT_CODE ON)' \ + > /build/toolchain-aarch64.cmake + +# Create build directory and build with cross-compilation +RUN mkdir -p cmake-build && cd cmake-build && \ + PKG_CONFIG_PATH=/usr/lib/aarch64-linux-gnu/pkgconfig \ + PKG_CONFIG_LIBDIR=/usr/lib/aarch64-linux-gnu/pkgconfig \ + cmake -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_TOOLCHAIN_FILE=/build/toolchain-aarch64.cmake \ + -DCMAKE_CXX_STANDARD=17 \ + -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ + -DBUILD_SHARED_LIBS=ON \ + -DBUILD_STATIC_LIBS=ON \ + -DBUILD_TESTS=OFF \ + -DBUILD_C_API=ON \ + -DBUILD_BINDINGS_EXAMPLES=OFF \ + -DBUILD_EXAMPLES=OFF \ + -DCMAKE_INSTALL_PREFIX=/build/install_prefix_dir \ + -DOPENSSL_ROOT_DIR=/usr \ + -DOPENSSL_INCLUDE_DIR=/usr/include \ + -DOPENSSL_CRYPTO_LIBRARY=/usr/lib/aarch64-linux-gnu/libcrypto.so \ + -DOPENSSL_SSL_LIBRARY=/usr/lib/aarch64-linux-gnu/libssl.so \ + /build && \ + make -j$(nproc) && \ + make install + +# Create output directory and organize files +RUN mkdir -p /output && \ + cp /build/install_prefix_dir/lib/libgopher-mcp*.so* /output/ 2>/dev/null || true && \ + cp /build/install_prefix_dir/lib/libgopher_mcp_c*.so* /output/ 2>/dev/null || true && \ + cp /build/install_prefix_dir/lib/libfmt*.so* /output/ 2>/dev/null || true && \ + cp /build/install_prefix_dir/lib/libllhttp*.so* /output/ 2>/dev/null || true && \ + cp -r /build/install_prefix_dir/include /output/ 2>/dev/null || true + +# Build verification tool using cross-compiler +RUN printf '%s\n' \ + '#include ' \ + '#include ' \ + '#include ' \ + '' \ + 'int main() {' \ + ' printf("libgopher-mcp verification tool (Linux ARM64)\\n");' \ + ' printf("==============================================\\n\\n");' \ + ' void* handle = dlopen("./libgopher_mcp_c.so", RTLD_NOW);' \ + ' if (!handle) {' \ + ' printf("Note: C API library not found: %s\\n", dlerror());' \ + ' handle = dlopen("./libgopher-mcp.so", RTLD_NOW);' \ + ' if (!handle) {' \ + ' printf("X Failed to load main library: %s\\n", dlerror());' \ + ' return 1;' \ + ' }' \ + ' printf("OK Main library loaded successfully\\n");' \ + ' } else {' \ + ' printf("OK C API library loaded successfully\\n");' \ + ' }' \ + ' void* init_func = dlsym(handle, "mcp_init");' \ + ' if (init_func) {' \ + ' printf("OK mcp_init function found\\n");' \ + ' } else {' \ + ' printf("-- mcp_init function not found\\n");' \ + ' }' \ + ' void* cleanup_func = dlsym(handle, "mcp_cleanup");' \ + ' if (cleanup_func) {' \ + ' printf("OK mcp_cleanup function found\\n");' \ + ' } else {' \ + ' printf("-- mcp_cleanup function not found\\n");' \ + ' }' \ + ' dlclose(handle);' \ + ' printf("\\nOK Verification complete\\n");' \ + ' return 0;' \ + '}' > /tmp/verify_mcp.c && \ + aarch64-linux-gnu-gcc -o /output/verify_mcp /tmp/verify_mcp.c -ldl -O2 + +# Verify the output is actually ARM64 +RUN file /output/verify_mcp && \ + file /output/libgopher-mcp*.so* | head -1 + +# Default command to copy files to host +CMD cp -r /output/* /host-output/ && \ + echo "ARM64 cross-compilation complete!" && \ + ls -la /output/ diff --git a/docker-mcp/build-linux-arm64-docker.sh b/docker-mcp/build-linux-arm64-docker.sh index 2f890385..04bf55b7 100755 --- a/docker-mcp/build-linux-arm64-docker.sh +++ b/docker-mcp/build-linux-arm64-docker.sh @@ -1,8 +1,7 @@ #!/bin/bash # Cross-compile libgopher-mcp for Linux ARM64 using Docker -# This script can run on any platform with Docker (macOS, Linux x64, Windows) -# Uses Docker buildx for ARM64 emulation +# Uses x64 container with aarch64-linux-gnu cross-compiler (fast, no QEMU emulation) set -e @@ -15,7 +14,7 @@ NC='\033[0m' echo -e "${MAGENTA}========================================${NC}" echo -e "${MAGENTA}Building libgopher-mcp for Linux ARM64${NC}" -echo -e "${MAGENTA}Using Docker for cross-platform build${NC}" +echo -e "${MAGENTA}Using cross-compilation (no QEMU)${NC}" echo -e "${MAGENTA}========================================${NC}" echo "" @@ -30,28 +29,19 @@ if ! command -v docker &> /dev/null; then exit 1 fi -# Check for buildx support -if ! docker buildx version &> /dev/null; then - echo -e "${RED}Error: Docker buildx is not available${NC}" - echo "Please update Docker Desktop to a recent version" - exit 1 -fi - # Clean and create output directory echo -e "${YELLOW}Cleaning previous builds...${NC}" rm -rf "$OUTPUT_DIR" mkdir -p "$OUTPUT_DIR" -echo -e "${YELLOW}Building ARM64 library using Docker...${NC}" -echo "This may take several minutes on first run (downloading base image and dependencies)" +echo -e "${YELLOW}Building ARM64 library using cross-compilation...${NC}" +echo "This runs at native x64 speed (no QEMU emulation)" echo "" -# Build using Docker buildx with ARM64 platform -docker buildx build \ - --platform linux/arm64 \ - --load \ - -t gopher-mcp:linux-arm64 \ - -f "$SCRIPT_DIR/Dockerfile.linux-arm64" \ +# Build using Docker with cross-compilation (no --platform flag needed) +docker build \ + -t gopher-mcp:linux-arm64-cross \ + -f "$SCRIPT_DIR/Dockerfile.linux-arm64-cross" \ "$PROJECT_ROOT" if [ $? -ne 0 ]; then @@ -62,11 +52,10 @@ fi echo "" echo -e "${YELLOW}Extracting built files...${NC}" -# Run container and copy files to host +# Run container and copy files to host (no --platform flag needed) docker run --rm \ - --platform linux/arm64 \ -v "$OUTPUT_DIR:/host-output" \ - gopher-mcp:linux-arm64 + gopher-mcp:linux-arm64-cross # Check results if [ -f "$OUTPUT_DIR/libgopher-mcp.so" ] || [ -f "$OUTPUT_DIR/libgopher-mcp.so.0.1.0" ]; then diff --git a/docker-mcp/build-mac-x64.sh b/docker-mcp/build-mac-x64.sh index e474827e..0a38680f 100755 --- a/docker-mcp/build-mac-x64.sh +++ b/docker-mcp/build-mac-x64.sh @@ -42,6 +42,42 @@ cd "$BUILD_DIR" # Configure CMake with macOS-specific settings echo -e "${YELLOW}Configuring CMake for macOS x86_64...${NC}" +# When cross-compiling x64 on Apple Silicon, we need to use x86_64 libraries +# Install x86_64 Homebrew and dependencies if needed +CURRENT_ARCH=$(uname -m) +X86_BREW="/usr/local/bin/brew" +X86_PREFIX="/usr/local" + +if [ "$CURRENT_ARCH" = "arm64" ]; then + echo -e "${YELLOW}Cross-compiling x86_64 on Apple Silicon...${NC}" + + # Check if x86_64 Homebrew exists, if not install it + if [ ! -f "$X86_BREW" ]; then + echo -e "${YELLOW}Installing x86_64 Homebrew...${NC}" + arch -x86_64 /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" + fi + + # Install x86_64 dependencies + echo -e "${YELLOW}Installing x86_64 dependencies via Homebrew...${NC}" + arch -x86_64 $X86_BREW install openssl@3 libevent libnghttp2 2>/dev/null || true + + # Get paths to x86_64 libraries + X86_OPENSSL_PREFIX=$(arch -x86_64 $X86_BREW --prefix openssl@3 2>/dev/null || echo "/usr/local/opt/openssl@3") + X86_LIBEVENT_PREFIX=$(arch -x86_64 $X86_BREW --prefix libevent 2>/dev/null || echo "/usr/local/opt/libevent") + X86_NGHTTP2_PREFIX=$(arch -x86_64 $X86_BREW --prefix libnghttp2 2>/dev/null || echo "/usr/local/opt/libnghttp2") + + echo "Using x86_64 OpenSSL from: $X86_OPENSSL_PREFIX" + echo "Using x86_64 libevent from: $X86_LIBEVENT_PREFIX" + echo "Using x86_64 nghttp2 from: $X86_NGHTTP2_PREFIX" + + # Set CMake flags to use x86_64 libraries and ignore ARM64 ones + EXTRA_CMAKE_FLAGS="-DCMAKE_IGNORE_PATH=/opt/homebrew;/opt/homebrew/lib;/opt/homebrew/include" + EXTRA_CMAKE_FLAGS="$EXTRA_CMAKE_FLAGS -DOPENSSL_ROOT_DIR=$X86_OPENSSL_PREFIX" + EXTRA_CMAKE_FLAGS="$EXTRA_CMAKE_FLAGS -DCMAKE_PREFIX_PATH=$X86_PREFIX;$X86_OPENSSL_PREFIX;$X86_LIBEVENT_PREFIX;$X86_NGHTTP2_PREFIX" +else + EXTRA_CMAKE_FLAGS="" +fi + cmake \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_CXX_STANDARD=17 \ @@ -59,6 +95,7 @@ cmake \ -DCMAKE_MACOSX_RPATH=ON \ -DCMAKE_INSTALL_RPATH="@loader_path" \ -DCMAKE_POLICY_VERSION_MINIMUM=3.5 \ + ${EXTRA_CMAKE_FLAGS} \ "${PROJECT_ROOT}" # Build the library From 521e26374dee5ccbafdf62c8203162d30743cf60 Mon Sep 17 00:00:00 2001 From: RahulHere Date: Thu, 29 Jan 2026 11:35:35 +0800 Subject: [PATCH 15/39] Add missing headers in prebuilt packages The build scripts and Dockerfiles had an INSTALL_DIR path mismatch that caused headers to not be copied to the build output. CMake installed to ${BUILD_DIR}/install but the scripts tried to copy from install_prefix_dir. Also added header verification to the GitHub workflow to ensure headers are always included in prebuilt packages. Changes: - Fix INSTALL_DIR path in build-mac-x64.sh, build-mac-arm64.sh, build-linux-x64.sh, build-linux-arm64.sh - Fix CMAKE_INSTALL_PREFIX and copy paths in Dockerfile.linux-x64, Dockerfile.linux-arm64, Dockerfile.linux-arm64-cross - Add header verification step to build-all.yml workflow for all platforms --- .github/workflows/build-all.yml | 24 ++++++++++++++++++++++++ docker-mcp/Dockerfile.linux-arm64 | 11 ++++++----- docker-mcp/Dockerfile.linux-arm64-cross | 12 ++++++------ docker-mcp/Dockerfile.linux-x64 | 11 ++++++----- docker-mcp/build-linux-arm64.sh | 2 +- docker-mcp/build-linux-x64.sh | 2 +- docker-mcp/build-mac-arm64.sh | 2 +- docker-mcp/build-mac-x64.sh | 2 +- 8 files changed, 46 insertions(+), 20 deletions(-) diff --git a/.github/workflows/build-all.yml b/.github/workflows/build-all.yml index 4e75d947..59e1c243 100644 --- a/.github/workflows/build-all.yml +++ b/.github/workflows/build-all.yml @@ -68,6 +68,14 @@ jobs: echo "Error: Library not found!" exit 1 fi + # Check for headers + if [ -d "build-output/linux-${{ matrix.arch }}/include/mcp" ]; then + echo "✓ Headers found" + ls build-output/linux-${{ matrix.arch }}/include/mcp/ | head -10 + else + echo "Error: Headers not found!" + exit 1 + fi - name: Upload artifacts uses: actions/upload-artifact@v4 @@ -113,6 +121,14 @@ jobs: echo "Error: DLL not found!" exit 1 fi + # Check for headers + if [ -d "build-output/windows-${{ matrix.arch }}/include/mcp" ]; then + echo "✓ Headers found" + ls build-output/windows-${{ matrix.arch }}/include/mcp/ | head -10 + else + echo "Error: Headers not found!" + exit 1 + fi - name: Upload artifacts uses: actions/upload-artifact@v4 @@ -177,6 +193,14 @@ jobs: echo "Error: Library not found!" exit 1 fi + # Check for headers + if [ -d "build-output/mac-${{ matrix.arch }}/include/mcp" ]; then + echo "✓ Headers found" + ls build-output/mac-${{ matrix.arch }}/include/mcp/ | head -10 + else + echo "Error: Headers not found!" + exit 1 + fi - name: Upload artifacts uses: actions/upload-artifact@v4 diff --git a/docker-mcp/Dockerfile.linux-arm64 b/docker-mcp/Dockerfile.linux-arm64 index 744468f6..9c74e621 100644 --- a/docker-mcp/Dockerfile.linux-arm64 +++ b/docker-mcp/Dockerfile.linux-arm64 @@ -35,17 +35,18 @@ RUN mkdir -p cmake-build && cd cmake-build && \ -DOPENSSL_ROOT_DIR=${OPENSSL_ROOT_DIR} \ -DOPENSSL_CRYPTO_LIBRARY=${OPENSSL_CRYPTO_LIBRARY} \ -DOPENSSL_SSL_LIBRARY=${OPENSSL_SSL_LIBRARY} \ + -DCMAKE_INSTALL_PREFIX=/build/cmake-build/install \ /build && \ make -j$(nproc) && \ make install # Create output directory and organize files RUN mkdir -p /output && \ - cp /build/install_prefix_dir/lib/libgopher-mcp*.so* /output/ 2>/dev/null || true && \ - cp /build/install_prefix_dir/lib/libgopher_mcp_c*.so* /output/ 2>/dev/null || true && \ - cp /build/install_prefix_dir/lib/libfmt*.so* /output/ 2>/dev/null || true && \ - cp /build/install_prefix_dir/lib/libllhttp*.so* /output/ 2>/dev/null || true && \ - cp -r /build/install_prefix_dir/include /output/ 2>/dev/null || true + cp /build/cmake-build/install/lib/libgopher-mcp*.so* /output/ 2>/dev/null || true && \ + cp /build/cmake-build/install/lib/libgopher_mcp_c*.so* /output/ 2>/dev/null || true && \ + cp /build/cmake-build/install/lib/libfmt*.so* /output/ 2>/dev/null || true && \ + cp /build/cmake-build/install/lib/libllhttp*.so* /output/ 2>/dev/null || true && \ + cp -r /build/cmake-build/install/include /output/ 2>/dev/null || true # Build verification tool RUN printf '%s\n' \ diff --git a/docker-mcp/Dockerfile.linux-arm64-cross b/docker-mcp/Dockerfile.linux-arm64-cross index 4343b148..381914ab 100644 --- a/docker-mcp/Dockerfile.linux-arm64-cross +++ b/docker-mcp/Dockerfile.linux-arm64-cross @@ -93,7 +93,7 @@ RUN mkdir -p cmake-build && cd cmake-build && \ -DBUILD_C_API=ON \ -DBUILD_BINDINGS_EXAMPLES=OFF \ -DBUILD_EXAMPLES=OFF \ - -DCMAKE_INSTALL_PREFIX=/build/install_prefix_dir \ + -DCMAKE_INSTALL_PREFIX=/build/cmake-build/install \ -DOPENSSL_ROOT_DIR=/usr \ -DOPENSSL_INCLUDE_DIR=/usr/include \ -DOPENSSL_CRYPTO_LIBRARY=/usr/lib/aarch64-linux-gnu/libcrypto.so \ @@ -104,11 +104,11 @@ RUN mkdir -p cmake-build && cd cmake-build && \ # Create output directory and organize files RUN mkdir -p /output && \ - cp /build/install_prefix_dir/lib/libgopher-mcp*.so* /output/ 2>/dev/null || true && \ - cp /build/install_prefix_dir/lib/libgopher_mcp_c*.so* /output/ 2>/dev/null || true && \ - cp /build/install_prefix_dir/lib/libfmt*.so* /output/ 2>/dev/null || true && \ - cp /build/install_prefix_dir/lib/libllhttp*.so* /output/ 2>/dev/null || true && \ - cp -r /build/install_prefix_dir/include /output/ 2>/dev/null || true + cp /build/cmake-build/install/lib/libgopher-mcp*.so* /output/ 2>/dev/null || true && \ + cp /build/cmake-build/install/lib/libgopher_mcp_c*.so* /output/ 2>/dev/null || true && \ + cp /build/cmake-build/install/lib/libfmt*.so* /output/ 2>/dev/null || true && \ + cp /build/cmake-build/install/lib/libllhttp*.so* /output/ 2>/dev/null || true && \ + cp -r /build/cmake-build/install/include /output/ 2>/dev/null || true # Build verification tool using cross-compiler RUN printf '%s\n' \ diff --git a/docker-mcp/Dockerfile.linux-x64 b/docker-mcp/Dockerfile.linux-x64 index c1b22780..53c10975 100644 --- a/docker-mcp/Dockerfile.linux-x64 +++ b/docker-mcp/Dockerfile.linux-x64 @@ -27,17 +27,18 @@ RUN mkdir -p cmake-build && cd cmake-build && \ -DBUILD_C_API=ON \ -DBUILD_BINDINGS_EXAMPLES=OFF \ -DBUILD_EXAMPLES=OFF \ + -DCMAKE_INSTALL_PREFIX=/build/cmake-build/install \ /build && \ make -j$(nproc) && \ make install # Create output directory and organize files RUN mkdir -p /output && \ - cp /build/install_prefix_dir/lib/libgopher-mcp*.so* /output/ 2>/dev/null || true && \ - cp /build/install_prefix_dir/lib/libgopher_mcp_c*.so* /output/ 2>/dev/null || true && \ - cp /build/install_prefix_dir/lib/libfmt*.so* /output/ 2>/dev/null || true && \ - cp /build/install_prefix_dir/lib/libllhttp*.so* /output/ 2>/dev/null || true && \ - cp -r /build/install_prefix_dir/include /output/ 2>/dev/null || true + cp /build/cmake-build/install/lib/libgopher-mcp*.so* /output/ 2>/dev/null || true && \ + cp /build/cmake-build/install/lib/libgopher_mcp_c*.so* /output/ 2>/dev/null || true && \ + cp /build/cmake-build/install/lib/libfmt*.so* /output/ 2>/dev/null || true && \ + cp /build/cmake-build/install/lib/libllhttp*.so* /output/ 2>/dev/null || true && \ + cp -r /build/cmake-build/install/include /output/ 2>/dev/null || true # Build verification tool RUN printf '%s\n' \ diff --git a/docker-mcp/build-linux-arm64.sh b/docker-mcp/build-linux-arm64.sh index 67d87ecb..fc71db14 100755 --- a/docker-mcp/build-linux-arm64.sh +++ b/docker-mcp/build-linux-arm64.sh @@ -23,7 +23,7 @@ PROJECT_ROOT="$(dirname "$SCRIPT_DIR")" # Build configuration BUILD_DIR="${PROJECT_ROOT}/build-linux-arm64" DEPS_DIR="${PROJECT_ROOT}/_deps-linux-arm64" -INSTALL_DIR="${PROJECT_ROOT}/install_prefix_dir" +INSTALL_DIR="${BUILD_DIR}/install" OUTPUT_DIR="${PROJECT_ROOT}/build-output/linux-arm64" # Detect architecture diff --git a/docker-mcp/build-linux-x64.sh b/docker-mcp/build-linux-x64.sh index 60a043fb..743b00e9 100755 --- a/docker-mcp/build-linux-x64.sh +++ b/docker-mcp/build-linux-x64.sh @@ -23,7 +23,7 @@ PROJECT_ROOT="$(dirname "$SCRIPT_DIR")" # Build configuration BUILD_DIR="${PROJECT_ROOT}/build-linux-x64" DEPS_DIR="${PROJECT_ROOT}/_deps-linux-x64" -INSTALL_DIR="${PROJECT_ROOT}/install_prefix_dir" +INSTALL_DIR="${BUILD_DIR}/install" OUTPUT_DIR="${PROJECT_ROOT}/build-output/linux-x64" # Detect architecture diff --git a/docker-mcp/build-mac-arm64.sh b/docker-mcp/build-mac-arm64.sh index 99716c80..c0d447bd 100755 --- a/docker-mcp/build-mac-arm64.sh +++ b/docker-mcp/build-mac-arm64.sh @@ -24,7 +24,7 @@ PROJECT_ROOT="$(dirname "$SCRIPT_DIR")" # Build configuration BUILD_DIR="${PROJECT_ROOT}/build-mac-arm64" DEPS_DIR="${PROJECT_ROOT}/_deps-arm64" -INSTALL_DIR="${PROJECT_ROOT}/install_prefix_dir" +INSTALL_DIR="${BUILD_DIR}/install" OUTPUT_DIR="${PROJECT_ROOT}/build-output/mac-arm64" MIN_MACOS_VERSION="11.0" # Minimum version for Apple Silicon diff --git a/docker-mcp/build-mac-x64.sh b/docker-mcp/build-mac-x64.sh index 0a38680f..d3ada32e 100755 --- a/docker-mcp/build-mac-x64.sh +++ b/docker-mcp/build-mac-x64.sh @@ -24,7 +24,7 @@ PROJECT_ROOT="$(dirname "$SCRIPT_DIR")" # Build configuration BUILD_DIR="${PROJECT_ROOT}/build-mac-x64" DEPS_DIR="${PROJECT_ROOT}/_deps-x64" -INSTALL_DIR="${PROJECT_ROOT}/install_prefix_dir" +INSTALL_DIR="${BUILD_DIR}/install" OUTPUT_DIR="${PROJECT_ROOT}/build-output/mac-x64" MIN_MACOS_VERSION="10.14" From 36ce3fcf43742d60f0b9e6abe8ef864a9602667f Mon Sep 17 00:00:00 2001 From: RahulHere Date: Thu, 29 Jan 2026 12:19:30 +0800 Subject: [PATCH 16/39] Improve CMAKE_INSTALL_PREFIX to respect user-specified values The previous code unconditionally set CMAKE_INSTALL_PREFIX, which overrode any command-line -DCMAKE_INSTALL_PREFIX=... settings. This caused Docker builds to install to the wrong directory. Now uses CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT to only set the default when the user hasn't specified a custom install prefix. --- CMakeLists.txt | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index a91eee3b..aa3782dd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -83,8 +83,10 @@ if(NOT CMAKE_BUILD_TYPE) set(CMAKE_BUILD_TYPE Debug) endif() -# Set custom install prefix -set(CMAKE_INSTALL_PREFIX "${CMAKE_CURRENT_SOURCE_DIR}/install_prefix_dir") +# Set custom install prefix (only if not specified by user) +if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT) + set(CMAKE_INSTALL_PREFIX "${CMAKE_CURRENT_SOURCE_DIR}/install_prefix_dir" CACHE PATH "Install prefix" FORCE) +endif() #Compiler flags if(CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang") From c9efa10098c09a385ab63fd7fd82ee942f016cd4 Mon Sep 17 00:00:00 2001 From: RahulHere Date: Fri, 30 Jan 2026 03:27:26 +0800 Subject: [PATCH 17/39] Link ws2_32 for gopher-mcp-event on Windows The event library uses recv/send socket functions which require linking against ws2_32 on Windows. --- CMakeLists.txt | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index aa3782dd..a95b011a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -766,6 +766,11 @@ foreach(lib_target ${EVENT_REAL_TARGETS}) endif() target_link_libraries(${lib_target} PUBLIC ${LIBEVENT_LIBRARIES}) endif() + + # Add Windows-specific socket libraries for event library + if(WIN32) + target_link_libraries(${lib_target} PRIVATE ws2_32) + endif() endif() endforeach() From 7cec0a00488d510506a591222cadc0ead24525bb Mon Sep 17 00:00:00 2001 From: RahulHere Date: Fri, 30 Jan 2026 12:36:42 +0800 Subject: [PATCH 18/39] Only fetch googletest when BUILD_TESTS is enabled Wrap googletest FetchContent in BUILD_TESTS check to avoid fetching and building googletest when tests are disabled. This fixes cross-compilation issues where googletest fails to build due to threading model differences. --- CMakeLists.txt | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index a95b011a..3d8cafb4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -384,17 +384,19 @@ if(NOT TARGET nlohmann_json_schema_validator) message(STATUS "json-schema-validator download complete.") endif() -# Google Test -FetchContent_Declare( - googletest - GIT_REPOSITORY https://github.com/google/googletest.git - GIT_TAG release-1.12.1 -) -# For Windows: Prevent overriding the parent project's compiler/linker settings -set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) -message(STATUS "Downloading Google Test... This may take a moment depending on your connection speed.") -FetchContent_MakeAvailable(googletest) -message(STATUS "Google Test download complete.") +# Google Test - only fetch when tests are enabled +if(BUILD_TESTS) + FetchContent_Declare( + googletest + GIT_REPOSITORY https://github.com/google/googletest.git + GIT_TAG release-1.12.1 + ) + # For Windows: Prevent overriding the parent project's compiler/linker settings + set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) + message(STATUS "Downloading Google Test... This may take a moment depending on your connection speed.") + FetchContent_MakeAvailable(googletest) + message(STATUS "Google Test download complete.") +endif() message(STATUS "") message(STATUS "==================================================") From e703de931536865566598c78dbe5f2d6d0724d18 Mon Sep 17 00:00:00 2001 From: RahulHere Date: Sat, 31 Jan 2026 10:19:56 +0800 Subject: [PATCH 19/39] Improve static library transitive dependencies on shared libraries Static library targets now use PRIVATE linking for logging and fmt dependencies to prevent transitive dependencies on shared libraries. This allows downstream projects using BUILD_BUNDLED_SHARED to create truly self-contained shared libraries without runtime dependencies on gopher-mcp shared libraries. Changes: - gopher-mcp-logging-static: Link fmt as PRIVATE instead of PUBLIC - gopher-mcp-event-static: Use static deps with PRIVATE linking - gopher-mcp-echo-advanced-static: Use static deps with PRIVATE linking --- CMakeLists.txt | 94 ++++++++++++++++++++++++++++++-------- src/logging/CMakeLists.txt | 17 +++++-- 2 files changed, 88 insertions(+), 23 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3d8cafb4..01eb9a9e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -696,21 +696,48 @@ endif() foreach(lib_target ${ECHO_REAL_TARGETS}) if(TARGET ${lib_target}) target_include_directories(${lib_target} - PUBLIC + PUBLIC $ $ ) - # Link to appropriate main library - if(TARGET gopher-mcp) - target_link_libraries(${lib_target} PUBLIC gopher-mcp) - elseif(TARGET gopher-mcp-static) - target_link_libraries(${lib_target} PUBLIC gopher-mcp-static) + + # Determine if this is a static or shared library target + set(_is_static_target FALSE) + if("${lib_target}" MATCHES "-static$") + set(_is_static_target TRUE) + endif() + + # Link to appropriate main library based on target type + if(_is_static_target) + # Static library: prefer static dependencies + if(TARGET gopher-mcp-static) + target_link_libraries(${lib_target} PUBLIC gopher-mcp-static) + elseif(TARGET gopher-mcp) + target_link_libraries(${lib_target} PUBLIC gopher-mcp) + endif() + target_link_libraries(${lib_target} PUBLIC Threads::Threads) + # Link logging and fmt as PRIVATE to avoid transitive shared lib deps + if(TARGET gopher-mcp-logging-static) + target_link_libraries(${lib_target} PRIVATE gopher-mcp-logging-static) + elseif(TARGET gopher-mcp-logging) + target_link_libraries(${lib_target} PRIVATE gopher-mcp-logging) + endif() + if(TARGET fmt) + target_link_libraries(${lib_target} PRIVATE fmt) + endif() + else() + # Shared library: use PUBLIC dependencies normally + if(TARGET gopher-mcp) + target_link_libraries(${lib_target} PUBLIC gopher-mcp) + elseif(TARGET gopher-mcp-static) + target_link_libraries(${lib_target} PUBLIC gopher-mcp-static) + endif() + target_link_libraries(${lib_target} PUBLIC + Threads::Threads + gopher-mcp-logging + fmt::fmt + ) endif() - target_link_libraries(${lib_target} PUBLIC - Threads::Threads - gopher-mcp-logging - fmt::fmt - ) endif() endforeach() @@ -751,16 +778,45 @@ foreach(lib_target ${EVENT_REAL_TARGETS}) $ $ ) - # Link to appropriate main library - if(TARGET gopher-mcp) - target_link_libraries(${lib_target} PUBLIC gopher-mcp) - elseif(TARGET gopher-mcp-static) - target_link_libraries(${lib_target} PUBLIC gopher-mcp-static) + + # Determine if this is a static or shared library target + set(_is_static_target FALSE) + if("${lib_target}" MATCHES "-static$") + set(_is_static_target TRUE) endif() - target_link_libraries(${lib_target} PUBLIC Threads::Threads) - # Link logging library for GOPHER_LOG macros - target_link_libraries(${lib_target} PUBLIC gopher-mcp-logging fmt::fmt) + # Link to appropriate main library based on target type + # Static targets should link to static dependencies to avoid shared lib transitive deps + if(_is_static_target) + # Static library: use static dependencies with PRIVATE to embed symbols + if(TARGET gopher-mcp-static) + target_link_libraries(${lib_target} PUBLIC gopher-mcp-static) + elseif(TARGET gopher-mcp) + target_link_libraries(${lib_target} PUBLIC gopher-mcp) + endif() + target_link_libraries(${lib_target} PUBLIC Threads::Threads) + + # Link logging library - use static version with PRIVATE to embed + if(TARGET gopher-mcp-logging-static) + target_link_libraries(${lib_target} PRIVATE gopher-mcp-logging-static) + elseif(TARGET gopher-mcp-logging) + target_link_libraries(${lib_target} PRIVATE gopher-mcp-logging) + endif() + # Link fmt as PRIVATE to avoid transitive dependency + if(TARGET fmt) + target_link_libraries(${lib_target} PRIVATE fmt) + endif() + else() + # Shared library: use PUBLIC dependencies normally + if(TARGET gopher-mcp) + target_link_libraries(${lib_target} PUBLIC gopher-mcp) + elseif(TARGET gopher-mcp-static) + target_link_libraries(${lib_target} PUBLIC gopher-mcp-static) + endif() + target_link_libraries(${lib_target} PUBLIC Threads::Threads) + # Link logging library for GOPHER_LOG macros + target_link_libraries(${lib_target} PUBLIC gopher-mcp-logging fmt::fmt) + endif() if(LIBEVENT_FOUND) if(LIBEVENT_LIBRARY_DIRS) diff --git a/src/logging/CMakeLists.txt b/src/logging/CMakeLists.txt index 6acc250f..d485c740 100644 --- a/src/logging/CMakeLists.txt +++ b/src/logging/CMakeLists.txt @@ -23,12 +23,21 @@ target_include_directories(gopher-mcp-logging-static PUBLIC $ ) -# Link with fmt - it should handle its own include directories -target_link_libraries(gopher-mcp-logging-static PUBLIC - fmt - Threads::Threads +# Link with fmt - use PRIVATE to avoid transitive dependency on shared library +# The static library will embed fmt symbols, preventing downstream transitive deps +target_link_libraries(gopher-mcp-logging-static + PUBLIC Threads::Threads + PRIVATE fmt ) +# Expose fmt include directories for header compilation +if(TARGET fmt) + get_target_property(FMT_INCLUDE_DIR fmt INTERFACE_INCLUDE_DIRECTORIES) + if(FMT_INCLUDE_DIR) + target_include_directories(gopher-mcp-logging-static PUBLIC ${FMT_INCLUDE_DIR}) + endif() +endif() + target_compile_features(gopher-mcp-logging-static PUBLIC cxx_std_17) target_compile_definitions(gopher-mcp-logging-static PUBLIC From db665301cd9bf1aa25b4134409a6acf84918016b Mon Sep 17 00:00:00 2001 From: RahulHere Date: Sat, 31 Jan 2026 10:30:24 +0800 Subject: [PATCH 20/39] Improve gopher-mcp-static to prefer static logging dependency Static library target now explicitly prefers gopher-mcp-logging-static over gopher-mcp-logging to avoid transitive dependency on shared logging library. --- CMakeLists.txt | 37 ++++++++++++++++++++++++++++++------- 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 01eb9a9e..bd285a23 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -567,25 +567,48 @@ endif() foreach(lib_target ${REAL_TARGETS}) if(TARGET ${lib_target}) target_include_directories(${lib_target} - PUBLIC + PUBLIC $ $ PRIVATE $> ${LIBEVENT_INCLUDE_DIRS} ) - target_link_libraries(${lib_target} - PUBLIC - Threads::Threads - OpenSSL::SSL + + # Determine if this is a static library target + set(_is_static_target FALSE) + if("${lib_target}" MATCHES "-static$") + set(_is_static_target TRUE) + endif() + + # Base libraries for all targets + target_link_libraries(${lib_target} + PUBLIC + Threads::Threads + OpenSSL::SSL OpenSSL::Crypto PRIVATE nlohmann_json::nlohmann_json ${LIBEVENT_LIBRARIES} yaml-cpp::yaml-cpp - $<$:gopher-mcp-logging> - $<$>,$>:gopher-mcp-logging-static> ) + + # Link logging library - static targets prefer static version to avoid transitive deps + if(_is_static_target) + if(TARGET gopher-mcp-logging-static) + target_link_libraries(${lib_target} PRIVATE gopher-mcp-logging-static) + elseif(TARGET gopher-mcp-logging) + target_link_libraries(${lib_target} PRIVATE gopher-mcp-logging) + endif() + else() + # Shared library can use shared logging + if(TARGET gopher-mcp-logging) + target_link_libraries(${lib_target} PRIVATE gopher-mcp-logging) + elseif(TARGET gopher-mcp-logging-static) + target_link_libraries(${lib_target} PRIVATE gopher-mcp-logging-static) + endif() + endif() + # Disable json-schema-validator by default to avoid export/link issues target_compile_definitions(${lib_target} PUBLIC MCP_HAS_JSON_SCHEMA_VALIDATOR=0) if(LIBEVENT_LIBRARY_DIRS) From 52b75505848411aa8bba1d503f796a514ccdd010 Mon Sep 17 00:00:00 2001 From: RahulHere Date: Sat, 31 Jan 2026 11:51:47 +0800 Subject: [PATCH 21/39] Resolve C++14 ODR-use of static constexpr member Add out-of-class definition for McpClient::kConnectionIdleTimeoutSec. In C++14, static constexpr members require explicit out-of-class definitions when ODR-used (e.g., passed by reference to functions). This is only implicitly inline in C++17+. --- src/client/mcp_client.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/client/mcp_client.cc b/src/client/mcp_client.cc index 7bdd8e23..822cd0c1 100644 --- a/src/client/mcp_client.cc +++ b/src/client/mcp_client.cc @@ -58,6 +58,11 @@ using mcp::jsonrpc::Response; namespace jsonrpc = mcp::jsonrpc; +// Out-of-class definition for static constexpr member (required for C++14) +// In C++17+, constexpr static members are implicitly inline, but C++14 requires +// explicit out-of-class definition when the member is ODR-used +constexpr int McpClient::kConnectionIdleTimeoutSec; + // Constructor McpClient::McpClient(const McpClientConfig& config) : ApplicationBase(config), config_(config) { From 5353066b331bd54a9ec3a47fe67dba43b4aed9e4 Mon Sep 17 00:00:00 2001 From: RahulHere Date: Sat, 31 Jan 2026 12:04:42 +0800 Subject: [PATCH 22/39] Force fmt to build as static library Set BUILD_SHARED_LIBS=OFF before fetching fmt to ensure it builds as a static library. This allows fmt symbols to be properly embedded when creating bundled shared libraries for SDK distribution, eliminating the runtime dependency on libfmt.dylib. --- CMakeLists.txt | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index bd285a23..cbf5f86f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -329,6 +329,11 @@ else() endif() # fmt library for formatting - download and build +# Force fmt to build as static library regardless of BUILD_SHARED_LIBS +# This ensures fmt symbols are embedded when linking statically into bundled shared libs +set(BUILD_SHARED_LIBS_SAVED ${BUILD_SHARED_LIBS}) +set(BUILD_SHARED_LIBS OFF) + FetchContent_Declare( fmt GIT_REPOSITORY https://github.com/fmtlib/fmt.git @@ -336,6 +341,9 @@ FetchContent_Declare( ) message(STATUS "Downloading fmt... This may take a moment depending on your connection speed.") FetchContent_MakeAvailable(fmt) + +# Restore BUILD_SHARED_LIBS +set(BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS_SAVED}) # Enable position-independent code for shared library linking if(TARGET fmt) set_target_properties(fmt PROPERTIES POSITION_INDEPENDENT_CODE ON) From 9180a6e8e70a826ecba46160eb08660cc7302bf2 Mon Sep 17 00:00:00 2001 From: RahulHere Date: Sat, 31 Jan 2026 12:15:40 +0800 Subject: [PATCH 23/39] Resolve logging library linking order in CMakeLists.txt Move add_subdirectory(src/logging) earlier in the file, before the gopher-mcp library linking configuration. This ensures gopher-mcp-logging target exists when gopher-mcp attempts to link against it. Previously, the logging subdirectory was added at the end of the file (line 884), but the linking loop ran at line 575. Since CMake processes directives in order, TARGET gopher-mcp-logging was FALSE during the linking configuration, causing gopher-mcp shared library to be built without linking gopher-mcp-logging. --- CMakeLists.txt | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index cbf5f86f..7bd40d63 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -518,6 +518,11 @@ set(MCP_EVENT_SOURCES src/event/worker.cc ) +# Add logging subdirectory BEFORE main library configuration +# This ensures gopher-mcp-logging target exists when gopher-mcp links against it +add_subdirectory(src/logging) +message(STATUS "Building logging framework") + # Create Gopher MCP libraries (both static and shared) # Combine all sources for the main library (including event sources) set(MCP_SDK_SOURCES ${MCP_CORE_SOURCES} ${MCP_CLIENT_SERVER_SOURCES} ${MCP_EVENT_SOURCES}) @@ -880,9 +885,8 @@ if(NOT GOPHER_MCP_IS_SUBMODULE OR GOPHER_MCP_BUILD_TESTS) endif() endif() -# Add logging subdirectory -add_subdirectory(src/logging) -message(STATUS "Building logging framework") +# NOTE: Logging subdirectory moved earlier in the file (before gopher-mcp linking) +# to ensure gopher-mcp-logging target exists when gopher-mcp links against it. # Add C API subdirectory (if enabled) if(BUILD_C_API) From bb5a336ed09ccd124cb2a19e6205b4384e9b69a2 Mon Sep 17 00:00:00 2001 From: RahulHere Date: Sat, 31 Jan 2026 13:18:36 +0800 Subject: [PATCH 24/39] Replace std::cerr with proper logging framework Convert all debug std::cerr statements to use GOPHER_LOG_DEBUG/WARN/ERROR macros. This enables runtime log level control via the logging framework instead of compile-time #ifndef NDEBUG guards. Changes: - http_codec_filter.cc: Replace debug output with GOPHER_LOG_DEBUG - mcp_connection_manager.cc: Replace debug output with GOPHER_LOG_DEBUG - listener_impl.cc: Replace debug output with GOPHER_LOG_DEBUG - sse_codec_filter.cc: Replace debug output with GOPHER_LOG_DEBUG - http_sse_transport_socket.cc: Replace error output with GOPHER_LOG_ERROR - connection_impl.cc: Replace error/debug output with GOPHER_LOG macros - filter_chain_assembler.cc: Replace output with GOPHER_LOG_DEBUG/ERROR - enhanced_filter_chain_factory.cc: Replace output with GOPHER_LOG macros - mcp_server_enhanced_filters.cc: Replace output with GOPHER_LOG macros - filter_order_validator.cc: Remove redundant std::cerr (already logged) --- src/config/filter_order_validator.cc | 2 -- src/filter/enhanced_filter_chain_factory.cc | 24 +++++++------- src/filter/filter_chain_assembler.cc | 13 ++++---- src/filter/http_codec_filter.cc | 36 ++++++++------------- src/filter/sse_codec_filter.cc | 4 +-- src/mcp_connection_manager.cc | 22 ++++--------- src/network/connection_impl.cc | 17 ++++------ src/network/listener_impl.cc | 26 ++++++--------- src/server/mcp_server_enhanced_filters.cc | 27 +++++++--------- src/transport/http_sse_transport_socket.cc | 14 +++----- 10 files changed, 71 insertions(+), 114 deletions(-) diff --git a/src/config/filter_order_validator.cc b/src/config/filter_order_validator.cc index f4204c2d..cd631d10 100644 --- a/src/config/filter_order_validator.cc +++ b/src/config/filter_order_validator.cc @@ -91,8 +91,6 @@ bool FilterOrderValidator::validate( result.errors.size()); for (const auto& error : result.errors) { GOPHER_LOG(Error, " {}", error); - // Also store for debugging - std::cerr << "Validation error: " << error << std::endl; } } diff --git a/src/filter/enhanced_filter_chain_factory.cc b/src/filter/enhanced_filter_chain_factory.cc index ea1d0421..8002537e 100644 --- a/src/filter/enhanced_filter_chain_factory.cc +++ b/src/filter/enhanced_filter_chain_factory.cc @@ -282,7 +282,7 @@ class EnhancedProtocolFilter : public network::Filter, } void onError(const std::string& error) override { - std::cerr << "[HTTP Error] " << error << std::endl; + GOPHER_LOG_ERROR("HTTP Error: {}", error); } // ===== SseCodecFilter::EventCallbacks ===== @@ -298,7 +298,7 @@ class EnhancedProtocolFilter : public network::Filter, } void onSseError(const std::string& error) override { - std::cerr << "[SSE Error] " << error << std::endl; + GOPHER_LOG_ERROR("SSE Error: {}", error); } // ===== JsonRpcProtocolFilter::MessageHandler ===== @@ -351,14 +351,13 @@ class EnhancedProtocolFilter : public network::Filter, void onRequestThrottled(const std::string& method, size_t current_rate, size_t max_rate) override { - std::cerr << "[Rate Limiter] Request throttled: " << method - << " (rate: " << current_rate << "/" << max_rate << ")" - << std::endl; + GOPHER_LOG_WARN("Rate Limiter: Request throttled: {} (rate: {}/{})", + method, current_rate, max_rate); } void onRateLimitExceeded(const std::string& bucket_name) override { - std::cerr << "[Rate Limiter] Rate limit exceeded for bucket: " - << bucket_name << std::endl; + GOPHER_LOG_WARN("Rate Limiter: Rate limit exceeded for bucket: {}", + bucket_name); } // ===== Metrics Callbacks ===== @@ -375,23 +374,22 @@ class EnhancedProtocolFilter : public network::Filter, void onValidationFailure(const jsonrpc::Request& request, const std::string& reason) override { - std::cerr << "[Validator] Request validation failed: " << reason - << std::endl; + GOPHER_LOG_WARN("Validator: Request validation failed: {}", reason); } // ===== Backpressure Callbacks ===== void onBackpressureActivated(size_t queue_size, size_t max_size) override { - std::cerr << "[Backpressure] Activated (queue: " << queue_size << "/" - << max_size << ")" << std::endl; + GOPHER_LOG_WARN("Backpressure activated (queue: {}/{})", queue_size, + max_size); } void onBackpressureRelieved() override { - std::cerr << "[Backpressure] Relieved" << std::endl; + GOPHER_LOG_DEBUG("Backpressure relieved"); } void onRequestDropped(const std::string& reason) override { - std::cerr << "[Backpressure] Request dropped: " << reason << std::endl; + GOPHER_LOG_WARN("Backpressure: Request dropped: {}", reason); } private: diff --git a/src/filter/filter_chain_assembler.cc b/src/filter/filter_chain_assembler.cc index 18b69cb6..0344fd98 100644 --- a/src/filter/filter_chain_assembler.cc +++ b/src/filter/filter_chain_assembler.cc @@ -129,8 +129,8 @@ network::FilterSharedPtr FilterChainAssembler::createSingleFilter( std::shared_ptr(emitter, emitter.get()); } } catch (const std::exception& ex) { - std::cerr << "[FilterChainAssembler] Failed to create event emitter: " - << ex.what() << std::endl; + GOPHER_LOG_ERROR("FilterChainAssembler failed to create event emitter: {}", + ex.what()); } } @@ -222,17 +222,16 @@ ConfigurableFilterChainFactory::~ConfigurableFilterChainFactory() { bool ConfigurableFilterChainFactory::createFilterChain( const FilterCreationContext& context, network::FilterManager& filter_manager) { - std::cerr << "Creating filter chain from configuration" << std::endl; + GOPHER_LOG_DEBUG("Creating filter chain from configuration"); auto result = assembler_->assembleFilterChain(filter_chain_config_, context, filter_manager); if (!result.success) { - std::cerr << "Failed to create filter chain: " << result.error_message - << std::endl; + GOPHER_LOG_ERROR("Failed to create filter chain: {}", result.error_message); } else { - std::cerr << "Successfully created filter chain with " - << result.created_filters.size() << " filters" << std::endl; + GOPHER_LOG_DEBUG("Successfully created filter chain with {} filters", + result.created_filters.size()); } return result.success; diff --git a/src/filter/http_codec_filter.cc b/src/filter/http_codec_filter.cc index 9379b3fc..a3bb46b5 100644 --- a/src/filter/http_codec_filter.cc +++ b/src/filter/http_codec_filter.cc @@ -83,8 +83,7 @@ HttpCodecFilter::HttpCodecFilter(MessageCallbacks& callbacks, : message_callbacks_(&callbacks), dispatcher_(dispatcher), is_server_(is_server) { - std::cerr << "[HttpCodecFilter] CONSTRUCTOR is_server=" << is_server_ - << ", this=" << (void*)this << std::endl; + GOPHER_LOG_DEBUG("HttpCodecFilter CONSTRUCTOR is_server={}", is_server_); // Initialize HTTP parser callbacks parser_callbacks_ = std::make_unique(*this); @@ -316,15 +315,11 @@ network::FilterStatus HttpCodecFilter::onWrite(Buffer& data, bool end_stream) { // Client mode: format as HTTP request (GET for SSE init, POST for messages) auto current_state = state_machine_->currentState(); - std::cerr << "[HttpCodecFilter] onWrite client mode: state=" - << HttpCodecStateMachine::getStateName(current_state) - << ", data_len=" << data.length() - << ", use_sse_get=" << use_sse_get_ - << ", sse_get_sent=" << sse_get_sent_ << std::endl; - - GOPHER_LOG_DEBUG("HttpCodecFilter::onWrite client state={}, data_len={}", - HttpCodecStateMachine::getStateName(current_state), - data.length()); + GOPHER_LOG_DEBUG( + "HttpCodecFilter::onWrite client state={}, data_len={}, " + "use_sse_get={}, sse_get_sent={}", + HttpCodecStateMachine::getStateName(current_state), data.length(), + use_sse_get_, sse_get_sent_); // Check if we can send a request // Client can send when idle or while waiting for response (HTTP pipelining) @@ -338,7 +333,7 @@ network::FilterStatus HttpCodecFilter::onWrite(Buffer& data, bool end_stream) { // Check if this is an SSE GET initialization request // SSE GET is triggered by empty data with use_sse_get_ flag bool is_sse_get = use_sse_get_ && !sse_get_sent_ && data.length() == 0; - std::cerr << "[HttpCodecFilter] is_sse_get=" << is_sse_get << std::endl; + GOPHER_LOG_DEBUG("HttpCodecFilter is_sse_get={}", is_sse_get); // Save the original request body (JSON-RPC) if any size_t body_length = data.length(); @@ -364,8 +359,8 @@ network::FilterStatus HttpCodecFilter::onWrite(Buffer& data, bool end_stream) { request << "\r\n"; sse_get_sent_ = true; - std::cerr << "[HttpCodecFilter] Sending SSE GET request to " - << client_path_ << std::endl; + GOPHER_LOG_DEBUG("HttpCodecFilter sending SSE GET request to {}", + client_path_); } else { // Regular POST request with JSON-RPC body // Use message_endpoint_ if available (from SSE endpoint event) @@ -384,7 +379,7 @@ network::FilterStatus HttpCodecFilter::onWrite(Buffer& data, bool end_stream) { post_path = message_endpoint_; } } - std::cerr << "[HttpCodecFilter] POST path: " << post_path << std::endl; + GOPHER_LOG_DEBUG("HttpCodecFilter POST path: {}", post_path); request << "POST " << post_path << " HTTP/1.1\r\n"; request << "Host: " << client_host_ << "\r\n"; @@ -403,8 +398,6 @@ network::FilterStatus HttpCodecFilter::onWrite(Buffer& data, bool end_stream) { std::string request_str = request.str(); data.add(request_str.c_str(), request_str.length()); - std::cerr << "[HttpCodecFilter] Sending HTTP request:\n" - << request_str.substr(0, 300) << std::endl; GOPHER_LOG_DEBUG( "HttpCodecFilter client sending HTTP request (len={}): {}...", request_str.length(), request_str.substr(0, 200)); @@ -647,17 +640,14 @@ HttpCodecFilter::ParserCallbacks::onHeadersComplete() { http::ParserCallbackResult HttpCodecFilter::ParserCallbacks::onBody( const char* data, size_t length) { GOPHER_LOG_DEBUG("ParserCallbacks::onBody - received {} bytes", length); - std::cerr << "[HttpCodecFilter] ParserCallbacks::onBody - received " << length - << " bytes" << std::endl; // For client mode (receiving responses), forward body data immediately // This is critical for SSE streams which never complete if (!parent_.is_server_ && parent_.message_callbacks_) { std::string body_chunk(data, length); - std::cerr << "[HttpCodecFilter] Forwarding body chunk: " - << body_chunk.substr(0, - std::min(body_chunk.length(), (size_t)100)) - << std::endl; + GOPHER_LOG_DEBUG( + "HttpCodecFilter forwarding body chunk: {}...", + body_chunk.substr(0, std::min(body_chunk.length(), (size_t)100))); parent_.message_callbacks_->onBody(body_chunk, false); } diff --git a/src/filter/sse_codec_filter.cc b/src/filter/sse_codec_filter.cc index 5e6857de..2242375a 100644 --- a/src/filter/sse_codec_filter.cc +++ b/src/filter/sse_codec_filter.cc @@ -184,9 +184,7 @@ network::FilterStatus SseCodecFilter::onData(Buffer& data, bool end_stream) { // For SSE, end_stream doesn't mean immediate close - it means no more data // We should keep the connection open for future events // Only close if explicitly requested or on error - std::cerr << "[DEBUG] SSE end_stream received - keeping connection open " - "for SSE events" - << std::endl; + GOPHER_LOG_DEBUG("SSE end_stream received - keeping connection open for SSE events"); // Don't trigger CloseStream here - let the connection manager handle it } diff --git a/src/mcp_connection_manager.cc b/src/mcp_connection_manager.cc index 6ca20162..d0456e07 100644 --- a/src/mcp_connection_manager.cc +++ b/src/mcp_connection_manager.cc @@ -804,8 +804,6 @@ void McpConnectionManager::onConnectionEvent(network::ConnectionEvent event) { event_name = "LocalClose"; break; } - std::cerr << "[McpConnectionManager] onConnectionEvent event=" << event_name - << ", is_server=" << is_server_ << std::endl; GOPHER_LOG_DEBUG( "McpConnectionManager::onConnectionEvent event={}, is_server={}", event_name, is_server_); @@ -878,16 +876,12 @@ void McpConnectionManager::onConnectionEvent(network::ConnectionEvent event) { } // Forward event to upper layer callbacks - std::cerr << "[McpConnectionManager] Forwarding event to protocol_callbacks_=" - << (protocol_callbacks_ ? "set" : "NULL") << std::endl; + GOPHER_LOG_DEBUG("McpConnectionManager forwarding event to protocol_callbacks_={}", + (protocol_callbacks_ ? "set" : "NULL")); if (protocol_callbacks_) { - std::cerr << "[McpConnectionManager] Calling " - "protocol_callbacks_->onConnectionEvent" - << std::endl; + GOPHER_LOG_DEBUG("McpConnectionManager calling protocol_callbacks_->onConnectionEvent"); protocol_callbacks_->onConnectionEvent(event); - std::cerr << "[McpConnectionManager] " - "protocol_callbacks_->onConnectionEvent returned" - << std::endl; + GOPHER_LOG_DEBUG("McpConnectionManager protocol_callbacks_->onConnectionEvent returned"); // Ensure protocol callbacks are processed before any requests if (event == network::ConnectionEvent::Connected) { @@ -1058,17 +1052,15 @@ bool McpConnectionManager::sendHttpPost(const std::string& json_body) { : request_(request), connection_(conn) {} void onEvent(network::ConnectionEvent event) override { - std::cerr << "[PostConnection] onEvent: " << static_cast(event) - << std::endl; + GOPHER_LOG_DEBUG("PostConnection onEvent: {}", static_cast(event)); if (event == network::ConnectionEvent::Connected) { - std::cerr << "[PostConnection] Connected, sending POST request" - << std::endl; + GOPHER_LOG_DEBUG("PostConnection connected, sending POST request"); OwnedBuffer buffer; buffer.add(request_); connection_->write(buffer, false); } else if (event == network::ConnectionEvent::RemoteClose || event == network::ConnectionEvent::LocalClose) { - std::cerr << "[PostConnection] Connection closed" << std::endl; + GOPHER_LOG_DEBUG("PostConnection connection closed"); // Connection closed - this is expected after we get the response } } diff --git a/src/network/connection_impl.cc b/src/network/connection_impl.cc index 020b9725..d0ed37f4 100644 --- a/src/network/connection_impl.cc +++ b/src/network/connection_impl.cc @@ -788,13 +788,9 @@ void ConnectionImpl::onFileEvent(uint32_t events) { // This prevents processing events after closeSocket() has been called // Events may still fire from libevent queue even after file_event_ is reset if (state_ == ConnectionState::Closed || state_ == ConnectionState::Closing) { -#ifndef NDEBUG - std::cerr - << "[CONN] onFileEvent(): ignoring events on closed connection, fd=" - << (socket_ ? socket_->ioHandle().fd() : -1) - << " state=" << static_cast(state_) - << " (0=Open, 1=Closing, 2=Closed)" << std::endl; -#endif + GOPHER_LOG_DEBUG( + "onFileEvent(): ignoring events on closed connection, fd={} state={}", + (socket_ ? socket_->ioHandle().fd() : -1), static_cast(state_)); return; } @@ -1063,11 +1059,10 @@ void ConnectionImpl::closeSocket(ConnectionEvent close_type) { try { transport_socket_->closeSocket(close_type); } catch (const std::exception& e) { - std::cerr << "[ERROR] Exception in transport_socket_->closeSocket: " - << e.what() << std::endl; + GOPHER_LOG_ERROR("Exception in transport_socket_->closeSocket: {}", + e.what()); } catch (...) { - std::cerr << "[ERROR] Unknown exception in transport_socket_->closeSocket" - << std::endl; + GOPHER_LOG_ERROR("Unknown exception in transport_socket_->closeSocket"); } } diff --git a/src/network/listener_impl.cc b/src/network/listener_impl.cc index dc6edf3b..d6fb324d 100644 --- a/src/network/listener_impl.cc +++ b/src/network/listener_impl.cc @@ -104,9 +104,8 @@ ActiveListener::ActiveListener(event::Dispatcher& dispatcher, ActiveListener::~ActiveListener() { disable(); } VoidResult ActiveListener::listen() { - std::cerr << "[DEBUG LISTENER] ActiveListener::listen() called: bind_to_port=" - << config_.bind_to_port - << " address=" << config_.address->asStringView() << std::endl; + GOPHER_LOG_DEBUG("ActiveListener::listen() called: bind_to_port={} address={}", + config_.bind_to_port, config_.address->asStringView()); // Create socket if (config_.bind_to_port) { // Use the global createListenSocket function @@ -126,8 +125,7 @@ VoidResult ActiveListener::listen() { } socket_ = std::move(socket); - std::cerr << "[DEBUG LISTENER] listen socket created: fd=" - << socket_->ioHandle().fd() << std::endl; + GOPHER_LOG_DEBUG("Listen socket created: fd={}", socket_->ioHandle().fd()); // Call listen() to start accepting connections auto listen_result = @@ -138,8 +136,7 @@ VoidResult ActiveListener::listen() { err.message = "Failed to listen on socket"; return makeVoidError(err); } - std::cerr << "[DEBUG LISTENER] listen() succeeded: backlog=" - << config_.backlog << std::endl; + GOPHER_LOG_DEBUG("listen() succeeded: backlog={}", config_.backlog); // Apply socket options if (config_.socket_options) { @@ -175,9 +172,9 @@ VoidResult ActiveListener::listen() { [this](uint32_t events) { onSocketEvent(events); }, event::PlatformDefaultTriggerType, // Use platform-specific default static_cast(event::FileReadyType::Closed)); - std::cerr << "[DEBUG LISTENER] file_event created: " - << (file_event_ ? "SUCCESS" : "FAILED") - << " fd=" << socket_->ioHandle().fd() << std::endl; + GOPHER_LOG_DEBUG("file_event created: {} fd={}", + (file_event_ ? "SUCCESS" : "FAILED"), + socket_->ioHandle().fd()); if (enabled_) { file_event_->setEnabled(static_cast(event::FileReadyType::Read)); @@ -189,8 +186,7 @@ VoidResult ActiveListener::listen() { void ActiveListener::disable() { enabled_ = false; if (file_event_) { - std::cerr << "[DEBUG LISTENER] ActiveListener::disable() fd=" - << socket_->ioHandle().fd() << std::endl; + GOPHER_LOG_DEBUG("ActiveListener::disable() fd={}", socket_->ioHandle().fd()); file_event_->setEnabled(0); } } @@ -198,8 +194,7 @@ void ActiveListener::disable() { void ActiveListener::enable() { enabled_ = true; if (file_event_) { - std::cerr << "[DEBUG LISTENER] ActiveListener::enable() fd=" - << socket_->ioHandle().fd() << std::endl; + GOPHER_LOG_DEBUG("ActiveListener::enable() fd={}", socket_->ioHandle().fd()); file_event_->setEnabled(static_cast(event::FileReadyType::Read)); } } @@ -231,8 +226,7 @@ void ActiveListener::doAccept() { reinterpret_cast(&addr), &addr_len); if (!accept_result.ok()) { - std::cerr << "[DEBUG LISTENER] accept() failed: error=" - << accept_result.error_code() << std::endl; + GOPHER_LOG_DEBUG("accept() failed: error={}", accept_result.error_code()); if (accept_result.error_code() == EAGAIN || accept_result.error_code() == EWOULDBLOCK) { // No more connections to accept diff --git a/src/server/mcp_server_enhanced_filters.cc b/src/server/mcp_server_enhanced_filters.cc index 3ad3f99f..8540b070 100644 --- a/src/server/mcp_server_enhanced_filters.cc +++ b/src/server/mcp_server_enhanced_filters.cc @@ -90,17 +90,16 @@ void McpServer::setupEnhancedFilterChain( void onBackpressureApplied() override { server_.server_stats_.backpressure_events++; - std::cerr << "[BACKPRESSURE] Applied - pausing read" << std::endl; + GOPHER_LOG_DEBUG("Backpressure applied - pausing read"); } void onBackpressureReleased() override { - std::cerr << "[BACKPRESSURE] Released - resuming read" << std::endl; + GOPHER_LOG_DEBUG("Backpressure released - resuming read"); } void onDataDropped(size_t bytes) override { server_.server_stats_.bytes_dropped += bytes; - std::cerr << "[BACKPRESSURE] Dropped " << bytes << " bytes" - << std::endl; + GOPHER_LOG_WARN("Backpressure: Dropped {} bytes", bytes); } private: @@ -149,9 +148,8 @@ void McpServer::setupEnhancedFilterChain( void onThresholdExceeded(const std::string& metric_name, uint64_t value, uint64_t threshold) override { - std::cerr << "[METRICS] Threshold exceeded: " << metric_name - << " value=" << value << " threshold=" << threshold - << std::endl; + GOPHER_LOG_WARN("Metrics threshold exceeded: {} value={} threshold={}", + metric_name, value, threshold); server_.server_stats_.threshold_violations++; } @@ -195,14 +193,13 @@ void McpServer::setupEnhancedFilterChain( void onRequestRejected(const std::string& method, const std::string& reason) override { server_.server_stats_.requests_invalid++; - std::cerr << "[VALIDATION] Request rejected: " << method - << " Reason: " << reason << std::endl; + GOPHER_LOG_WARN("Validation: Request rejected: {} Reason: {}", + method, reason); } void onRateLimitExceeded(const std::string& method) override { server_.server_stats_.rate_limited_requests++; - std::cerr << "[VALIDATION] Method rate limit exceeded: " << method - << std::endl; + GOPHER_LOG_WARN("Validation: Method rate limit exceeded: {}", method); } private: @@ -284,8 +281,8 @@ void McpServer::setupEnhancedFilterChain( } // Log for debugging - std::cerr << "[CIRCUIT_BREAKER] Event: " - << filter::toString(event.event_type) << std::endl; + GOPHER_LOG_DEBUG("Circuit breaker event: {}", + filter::toString(event.event_type)); } // Track rate limiter events @@ -295,8 +292,8 @@ void McpServer::setupEnhancedFilterChain( server_.server_stats_.rate_limited_requests++; } - std::cerr << "[RATE_LIMITER] Event: " - << filter::toString(event.event_type) << std::endl; + GOPHER_LOG_DEBUG("Rate limiter event: {}", + filter::toString(event.event_type)); } } diff --git a/src/transport/http_sse_transport_socket.cc b/src/transport/http_sse_transport_socket.cc index 28f0d494..5376b3b2 100644 --- a/src/transport/http_sse_transport_socket.cc +++ b/src/transport/http_sse_transport_socket.cc @@ -54,12 +54,10 @@ HttpSseTransportSocket::~HttpSseTransportSocket() { underlying_transport_->closeSocket(network::ConnectionEvent::LocalClose); } catch (const std::exception& e) { // Log but don't propagate exception during destructor - std::cerr << "[ERROR] Exception during transport close: " << e.what() - << std::endl; + GOPHER_LOG_ERROR("Exception during transport close: {}", e.what()); } catch (...) { // Catch any other exception to prevent destructor crash - std::cerr << "[ERROR] Unknown exception during transport close" - << std::endl; + GOPHER_LOG_ERROR("Unknown exception during transport close"); } } } @@ -248,12 +246,10 @@ void HttpSseTransportSocket::closeSocket(network::ConnectionEvent event) { try { underlying_transport_->closeSocket(event); } catch (const std::exception& e) { - std::cerr << "[ERROR] Exception in underlying transport closeSocket: " - << e.what() << std::endl; + GOPHER_LOG_ERROR("Exception in underlying transport closeSocket: {}", + e.what()); } catch (...) { - std::cerr - << "[ERROR] Unknown exception in underlying transport closeSocket" - << std::endl; + GOPHER_LOG_ERROR("Unknown exception in underlying transport closeSocket"); } // Clear the transport pointer to prevent double-close underlying_transport_.reset(); From c7df18a4561082eaf87ad223dc65137a213cd039 Mon Sep 17 00:00:00 2001 From: RahulHere Date: Sat, 31 Jan 2026 13:44:54 +0800 Subject: [PATCH 25/39] Add missing logging include to files using GOPHER_LOG macros Files that were converted to use GOPHER_LOG_DEBUG/ERROR/WARN were missing the log_macros.h include. Changes: - filter_chain_assembler.cc: Add log_macros.h include - http_sse_transport_socket.cc: Add log_macros.h include - listener_impl.cc: Add log_macros.h include --- src/filter/filter_chain_assembler.cc | 1 + src/network/listener_impl.cc | 2 ++ src/transport/http_sse_transport_socket.cc | 1 + 3 files changed, 4 insertions(+) diff --git a/src/filter/filter_chain_assembler.cc b/src/filter/filter_chain_assembler.cc index 0344fd98..e3c4a915 100644 --- a/src/filter/filter_chain_assembler.cc +++ b/src/filter/filter_chain_assembler.cc @@ -9,6 +9,7 @@ #include "mcp/filter/filter_chain_event_hub.h" #include "mcp/filter/filter_event_emitter.h" +#include "mcp/logging/log_macros.h" namespace mcp { namespace filter { diff --git a/src/network/listener_impl.cc b/src/network/listener_impl.cc index d6fb324d..2eb05984 100644 --- a/src/network/listener_impl.cc +++ b/src/network/listener_impl.cc @@ -1,6 +1,8 @@ #include #include +#include "mcp/logging/log_macros.h" + #ifdef _WIN32 #include #include diff --git a/src/transport/http_sse_transport_socket.cc b/src/transport/http_sse_transport_socket.cc index 5376b3b2..915e560f 100644 --- a/src/transport/http_sse_transport_socket.cc +++ b/src/transport/http_sse_transport_socket.cc @@ -15,6 +15,7 @@ #include #include "mcp/filter/http_codec_filter.h" +#include "mcp/logging/log_macros.h" #include "mcp/filter/sse_codec_filter.h" #include "mcp/network/address_impl.h" #include "mcp/network/connection_impl.h" From 5c0bf57da3ad843419e7acf1f2d72eeb97af6d1c Mon Sep 17 00:00:00 2001 From: RahulHere Date: Sat, 31 Jan 2026 15:26:59 +0800 Subject: [PATCH 26/39] Add dev_improve_client_fetch_tools_cross_build branch to CI workflow Enables CI builds on the dev_improve_client_fetch_tools_cross_build branch. --- .github/workflows/build-all.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-all.yml b/.github/workflows/build-all.yml index 59e1c243..1357cda4 100644 --- a/.github/workflows/build-all.yml +++ b/.github/workflows/build-all.yml @@ -2,7 +2,7 @@ name: Build All Platforms on: push: - branches: [ dev_cross_build ] + branches: [ dev_cross_build, dev_improve_client_fetch_tools_cross_build ] workflow_dispatch: inputs: build_type: From 86c6703207cdb4e05afaacfdb41fae24bc336f8c Mon Sep 17 00:00:00 2001 From: RahulHere Date: Sat, 31 Jan 2026 16:15:19 +0800 Subject: [PATCH 27/39] Resolve Windows ARM64 build by adding missing socket libraries Add ws2_32, crypt32, and iphlpapi link libraries to the ARM64 toolchain file, matching the x64 configuration. Also improve build logging for easier debugging. Changes: - Add Windows socket libraries to linker flags - Add libevent include directory - Save cmake and make logs for debugging - Copy logs to output directory --- docker-mcp/Dockerfile.windows-arm64-llvm | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/docker-mcp/Dockerfile.windows-arm64-llvm b/docker-mcp/Dockerfile.windows-arm64-llvm index e7be4969..ecac2990 100644 --- a/docker-mcp/Dockerfile.windows-arm64-llvm +++ b/docker-mcp/Dockerfile.windows-arm64-llvm @@ -104,7 +104,13 @@ RUN echo 'set(CMAKE_SYSTEM_NAME Windows)' > /build/toolchain-win-arm64.cmake && echo '# libevent paths' >> /build/toolchain-win-arm64.cmake && \ echo 'set(LIBEVENT_INCLUDE_DIRS /deps/libevent/include)' >> /build/toolchain-win-arm64.cmake && \ echo 'set(LIBEVENT_LIBRARIES /deps/libevent/lib/libevent_core.a /deps/libevent/lib/libevent_extra.a)' >> /build/toolchain-win-arm64.cmake && \ - echo 'set(LIBEVENT_FOUND TRUE)' >> /build/toolchain-win-arm64.cmake + echo 'set(LIBEVENT_FOUND TRUE)' >> /build/toolchain-win-arm64.cmake && \ + echo 'include_directories(/deps/libevent/include)' >> /build/toolchain-win-arm64.cmake && \ + echo '' >> /build/toolchain-win-arm64.cmake && \ + echo '# Windows-specific linking - add socket libraries' >> /build/toolchain-win-arm64.cmake && \ + echo 'link_libraries(ws2_32 crypt32 iphlpapi)' >> /build/toolchain-win-arm64.cmake && \ + echo 'set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -lws2_32 -lcrypt32 -liphlpapi")' >> /build/toolchain-win-arm64.cmake && \ + echo 'set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -lws2_32 -lcrypt32 -liphlpapi")' >> /build/toolchain-win-arm64.cmake # Build the library for Windows ARM64 RUN mkdir -p cmake-build && cd cmake-build && \ @@ -122,8 +128,9 @@ RUN mkdir -p cmake-build && cd cmake-build && \ -DMCP_USE_NGHTTP2=OFF \ -DCMAKE_INSTALL_PREFIX=/install \ -DCMAKE_SYSTEM_NAME=Windows \ - /build && \ - make -j$(nproc) VERBOSE=1 || make -j1 VERBOSE=1 && \ + /build 2>&1 | tee /tmp/cmake_output.log && \ + make -j$(nproc) VERBOSE=1 2>&1 | tee /tmp/make_output.log || \ + (echo "=== Build failed, showing logs ===" && cat /tmp/cmake_output.log && cat /tmp/make_output.log && make -j1 VERBOSE=1) && \ make install || true # Create output directory @@ -153,4 +160,7 @@ RUN aarch64-w64-mingw32-strip --strip-unneeded *.dll 2>/dev/null || true && \ # List final output RUN echo "=== Output files ===" && ls -la /output/ +# Keep logs for debugging +RUN cp /tmp/*.log /output/ 2>/dev/null || true + CMD ["/bin/bash"] From 73346577293fbcce51bdda1de23efdbe47f42663 Mon Sep 17 00:00:00 2001 From: RahulHere Date: Sat, 31 Jan 2026 20:11:07 +0800 Subject: [PATCH 28/39] Fix C API linking when only shared libraries are built The C API was incorrectly checking for a non-existent gopher-mcp-shared target, causing it to try linking against static libraries that don't exist when BUILD_STATIC_LIBS=OFF. Changes: - Check BUILD_SHARED_LIBS variable instead of non-existent target - Prefer shared libraries when available - Fall back to static only when shared libs not built --- src/c_api/CMakeLists.txt | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/src/c_api/CMakeLists.txt b/src/c_api/CMakeLists.txt index 5dce2d0d..a40885c9 100644 --- a/src/c_api/CMakeLists.txt +++ b/src/c_api/CMakeLists.txt @@ -140,11 +140,19 @@ target_include_directories(gopher_mcp_c ${CMAKE_BINARY_DIR}/_deps/fmt-src/include ) -# Link with shared libraries +# Link with shared libraries # The shared library already contains all needed dependencies -# When gopher-mcp is built as static only, link to static version -if(TARGET gopher-mcp AND NOT TARGET gopher-mcp-shared) - # gopher-mcp exists but no shared version - it's an alias to static +# Prefer shared libraries when available, fall back to static +if(BUILD_SHARED_LIBS AND TARGET gopher-mcp) + # Shared libraries are being built - use them + target_link_libraries(gopher_mcp_c + PRIVATE + gopher-mcp + gopher-mcp-event + gopher-mcp-logging + ) +elseif(TARGET gopher-mcp-static) + # Only static libraries available target_link_libraries(gopher_mcp_c PRIVATE gopher-mcp-static @@ -152,13 +160,7 @@ if(TARGET gopher-mcp AND NOT TARGET gopher-mcp-shared) gopher-mcp-logging-static ) else() - # Normal case - shared libraries exist - target_link_libraries(gopher_mcp_c - PRIVATE - gopher-mcp - gopher-mcp-event - gopher-mcp-logging - ) + message(FATAL_ERROR "No gopher-mcp library target found") endif() # Add nghttp2 linking if needed From 3335b4f3c20e360f852d995b23e183f5a0d9d280 Mon Sep 17 00:00:00 2001 From: RahulHere Date: Sat, 31 Jan 2026 20:14:27 +0800 Subject: [PATCH 29/39] Move HTTP transport sources out of LLHTTP conditional The http_sse_transport_socket.cc and https_sse_transport_factory.cc files don't actually depend on llhttp - only llhttp_parser.cc does. Moving them out of the conditional fixes Windows ARM64 build which doesn't use llhttp. --- CMakeLists.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7bd40d63..b7cd2abe 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -485,17 +485,17 @@ set(MCP_CLIENT_SERVER_SOURCES src/server/mcp_server.cc ) -# HTTP parser sources (conditional on llhttp) +# HTTP parser sources (some conditional on llhttp) set(MCP_HTTP_SOURCES src/http/http_parser.cc src/http/sse_parser.cc + src/transport/http_sse_transport_socket.cc # HTTP+SSE with layered architecture + src/transport/https_sse_transport_factory.cc # HTTPS+SSE factory ) if(LLHTTP_FOUND) list(APPEND MCP_HTTP_SOURCES src/http/llhttp_parser.cc - src/transport/http_sse_transport_socket.cc # HTTP+SSE with layered architecture - src/transport/https_sse_transport_factory.cc # HTTPS+SSE factory ) endif() From fd3d13485cb03329f4748390b3b1c8aaeb4e12bf Mon Sep 17 00:00:00 2001 From: RahulHere Date: Sat, 31 Jan 2026 20:53:00 +0800 Subject: [PATCH 30/39] Enable release creation on dev_improve_client_fetch_tools_cross_build branch --- .github/workflows/build-all.yml | 2 +- .github/workflows/ci-macos.yml.disabled | 37 ------------- .github/workflows/ci.yml.disabled | 52 ------------------- .../workflows/clang-format-check.yml.disabled | 43 --------------- 4 files changed, 1 insertion(+), 133 deletions(-) delete mode 100644 .github/workflows/ci-macos.yml.disabled delete mode 100644 .github/workflows/ci.yml.disabled delete mode 100644 .github/workflows/clang-format-check.yml.disabled diff --git a/.github/workflows/build-all.yml b/.github/workflows/build-all.yml index 1357cda4..703067ac 100644 --- a/.github/workflows/build-all.yml +++ b/.github/workflows/build-all.yml @@ -215,7 +215,7 @@ jobs: runs-on: ubuntu-latest needs: [build-linux, build-windows, build-macos] if: | - (github.event_name == 'push' && github.ref == 'refs/heads/dev_cross_build') || + (github.event_name == 'push' && (github.ref == 'refs/heads/dev_cross_build' || github.ref == 'refs/heads/dev_improve_client_fetch_tools_cross_build')) || (github.event_name == 'workflow_dispatch' && github.event.inputs.create_release == 'true') steps: diff --git a/.github/workflows/ci-macos.yml.disabled b/.github/workflows/ci-macos.yml.disabled deleted file mode 100644 index c23934b7..00000000 --- a/.github/workflows/ci-macos.yml.disabled +++ /dev/null @@ -1,37 +0,0 @@ -name: CI (macOS) - -on: - pull_request: - branches: [ main, master, develop ] - -jobs: - build-and-test: - name: Build and Test (macOS) - runs-on: macos-latest - - steps: - - name: Checkout code - uses: actions/checkout@v3 - - - name: Install dependencies - run: | - brew update - brew install cmake - - - name: Configure CMake - run: | - cmake -B build -DCMAKE_BUILD_TYPE=Debug - - - name: Build - run: | - cmake --build build --config Debug -j$(sysctl -n hw.ncpu) - - - name: Test - run: | - cd build - ctest -C Debug --output-on-failure --verbose - - - name: List all tests - if: always() - run: | - make test-list || true \ No newline at end of file diff --git a/.github/workflows/ci.yml.disabled b/.github/workflows/ci.yml.disabled deleted file mode 100644 index 1883bcf9..00000000 --- a/.github/workflows/ci.yml.disabled +++ /dev/null @@ -1,52 +0,0 @@ -name: CI - -on: - push: - branches: [ main, master, develop, 'br_dev_*' ] - pull_request: - branches: [ main, master, develop ] - -jobs: - build-and-test: - name: Build and Test - runs-on: ${{ matrix.os }} - - strategy: - fail-fast: false - matrix: - os: [ubuntu-latest, macos-latest] - build_type: [Debug, Release] - - steps: - - name: Checkout code - uses: actions/checkout@v3 - - - name: Install dependencies (Ubuntu) - if: runner.os == 'Linux' - run: | - sudo apt-get update - sudo apt-get install -y cmake g++ clang - - - name: Install dependencies (macOS) - if: runner.os == 'macOS' - run: | - brew update - brew install cmake - - - name: Configure CMake - run: | - cmake -B build -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} - - - name: Build - run: | - cmake --build build --config ${{ matrix.build_type }} -j$(nproc 2>/dev/null || sysctl -n hw.ncpu) - - - name: Test - run: | - cd build - ctest -C ${{ matrix.build_type }} --output-on-failure --verbose - - - name: List all tests - if: always() - run: | - make test-list || true \ No newline at end of file diff --git a/.github/workflows/clang-format-check.yml.disabled b/.github/workflows/clang-format-check.yml.disabled deleted file mode 100644 index db912818..00000000 --- a/.github/workflows/clang-format-check.yml.disabled +++ /dev/null @@ -1,43 +0,0 @@ -name: Clang Format Check - -on: - push: - branches: [ main, master, develop ] - pull_request: - branches: [ main, master, develop ] - -jobs: - formatting-check: - name: Check Code Formatting - runs-on: ubuntu-latest - - steps: - - name: Checkout code - uses: actions/checkout@v3 - - - name: Install clang-format - run: | - sudo apt-get update - sudo apt-get install -y clang-format-14 - - - name: Check formatting - run: | - echo "Checking code formatting with clang-format..." - # Find all C++ source files - find include tests -name "*.h" -o -name "*.cpp" | while read file; do - # Format the file and check if it differs from the original - clang-format-14 --style=file "$file" | diff -u "$file" - || { - echo "::error file=$file::File is not properly formatted" - exit 1 - } - done - echo "All files are properly formatted!" - - - name: Suggest formatting fix - if: failure() - run: | - echo "::warning::Code formatting issues detected. Please run 'make format' locally and commit the changes." - echo "To fix formatting issues, run:" - echo " make format" - echo "or" - echo " find include tests -name '*.h' -o -name '*.cpp' | xargs clang-format -i" \ No newline at end of file From df7bd6cc2ec3ccd745a97482383bd8c12b6ba817 Mon Sep 17 00:00:00 2001 From: RahulHere Date: Tue, 3 Feb 2026 20:11:51 +0800 Subject: [PATCH 31/39] Improve GOPHER_LOG component loggers not producing output The shouldLog() function was returning false immediately when the bloom filter didn't contain a logger name (because the logger hadn't been created yet). This prevented new component loggers from ever logging. Changes: - Fix shouldLog() to fall back to global level check when logger doesn't exist - Enable default sink initialization in initializeDefaults() - Attach default sink to new loggers in getOrCreateLogger() --- src/logging/logger_registry.cc | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/logging/logger_registry.cc b/src/logging/logger_registry.cc index e3b5338f..4896d3b5 100644 --- a/src/logging/logger_registry.cc +++ b/src/logging/logger_registry.cc @@ -16,11 +16,10 @@ LoggerRegistry::LoggerRegistry() : global_level_(LogLevel::Info) { } void LoggerRegistry::initializeDefaults() { - // Create default logger without sink initially to avoid initialization issues + // Create default logger with stderr sink default_logger_ = std::make_shared("default", LogMode::Sync); - // Don't create sink during static initialization to avoid potential deadlock - // default_sink_ = std::make_shared(StdioSink::Stderr); - // default_logger_->setSink(default_sink_); + default_sink_ = std::make_shared(StdioSink::Stderr); + default_logger_->setSink(default_sink_); default_logger_->setLevel(global_level_); // Initialize bloom filter @@ -52,10 +51,10 @@ std::shared_ptr LoggerRegistry::getOrCreateLogger( LogLevel effective_level = getEffectiveLevelLocked(name); logger->setLevel(effective_level); - // Don't try to use default sink to avoid potential issues - // if (default_logger_ && default_sink_) { - // logger->setSink(default_sink_); - // } + // Use default sink for all loggers + if (default_sink_) { + logger->setSink(default_sink_); + } // Set bloom filter hint bloom_filter_.add(name); @@ -145,7 +144,9 @@ void LoggerRegistry::registerComponentLogger(Component component, bool LoggerRegistry::shouldLog(const std::string& name, LogLevel level) { // Fast path with bloom filter if (!bloom_filter_.mayContain(name)) { - return false; + // Logger doesn't exist yet, fall back to global level check + // This allows new component loggers to work without pre-registration + return level >= global_level_; } std::lock_guard lock(mutex_); From 8dbf50e2798eac85ed5b070ebad680a194951dbe Mon Sep 17 00:00:00 2001 From: RahulHere Date: Tue, 3 Feb 2026 20:12:19 +0800 Subject: [PATCH 32/39] Resolve HttpSse transport not using URL path for SSE endpoint The HttpSse transport case was extracting the path from the URL but discarding it, causing the client to default to /rpc instead of using the correct path (e.g., /sse) specified in the server URL. Changes: - Extract and set config.http_path from URL for HttpSse transport - Set config.http_host for proper Host header - Add SSL configuration for HTTPS URLs (matching StreamableHttp behavior) --- src/client/mcp_client.cc | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/src/client/mcp_client.cc b/src/client/mcp_client.cc index 822cd0c1..bbdd6d1e 100644 --- a/src/client/mcp_client.cc +++ b/src/client/mcp_client.cc @@ -882,23 +882,46 @@ McpConnectionConfig McpClient::createConnectionConfig(TransportType transport) { http_config.mode = transport::HttpSseTransportSocketConfig::Mode::CLIENT; // Extract server address from URI - // URI format: http://host:port or https://host:port + // URI format: http://host:port/path or https://host:port/path std::string server_addr; + bool is_https = false; if (current_uri_.find("http://") == 0) { server_addr = current_uri_.substr(7); // Remove "http://" } else if (current_uri_.find("https://") == 0) { server_addr = current_uri_.substr(8); // Remove "https://" + is_https = true; } else { server_addr = current_uri_; } - // Remove any path component (everything after first /) + // Extract path component (e.g., /sse from https://host/sse) + std::string http_path = "/"; size_t slash_pos = server_addr.find('/'); if (slash_pos != std::string::npos) { + http_path = server_addr.substr(slash_pos); server_addr = server_addr.substr(0, slash_pos); } http_config.server_address = server_addr; + config.http_path = http_path; + config.http_host = server_addr; + + // Set SSL transport for HTTPS URLs + if (is_https) { + http_config.underlying_transport = + transport::HttpSseTransportSocketConfig::UnderlyingTransport::SSL; + transport::HttpSseTransportSocketConfig::SslConfig ssl_cfg; + ssl_cfg.verify_peer = false; + ssl_cfg.alpn_protocols = std::vector{"http/1.1"}; + std::string sni_host = server_addr; + size_t colon_pos = sni_host.find(':'); + if (colon_pos != std::string::npos) { + sni_host = sni_host.substr(0, colon_pos); + } + ssl_cfg.sni_hostname = mcp::make_optional(sni_host); + http_config.ssl_config = mcp::make_optional(ssl_cfg); + } + config.http_sse_config = mcp::make_optional(http_config); break; } From 92d6a895a469d34be2c0a02b3cf28e36a9a2aa72 Mon Sep 17 00:00:00 2001 From: RahulHere Date: Tue, 3 Feb 2026 20:35:17 +0800 Subject: [PATCH 33/39] Change ApplicationBase logs from INFO to DEBUG Internal initialization messages should not be shown to end users by default. Changed GOPHER_LOG_INFO to GOPHER_LOG_DEBUG for application lifecycle logs (startup, workers, shutdown). --- include/mcp/mcp_application_base.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/include/mcp/mcp_application_base.h b/include/mcp/mcp_application_base.h index 1dabf5fc..4fcf093e 100644 --- a/include/mcp/mcp_application_base.h +++ b/include/mcp/mcp_application_base.h @@ -910,7 +910,7 @@ class ApplicationBase { ApplicationBase(const Config& config) : config_(config), shutdown_requested_(false), workers_started_(false) { - GOPHER_LOG_INFO("Initializing application: {}", config_.name); + GOPHER_LOG_DEBUG("Initializing application: {}", config_.name); } virtual ~ApplicationBase() { shutdown(); } @@ -930,11 +930,11 @@ class ApplicationBase { main_dispatcher_owned_ = std::make_unique("main"); main_dispatcher_ = main_dispatcher_owned_.get(); - GOPHER_LOG_INFO("Created main dispatcher"); + GOPHER_LOG_DEBUG("Created main dispatcher"); } - GOPHER_LOG_INFO("Initializing application with {} workers", - config_.worker_threads); + GOPHER_LOG_DEBUG("Initializing application with {} workers", + config_.worker_threads); // Create worker threads for (size_t i = 0; i < config_.worker_threads; ++i) { @@ -964,7 +964,7 @@ class ApplicationBase { return true; } - GOPHER_LOG_INFO("Starting application workers"); + GOPHER_LOG_DEBUG("Starting application workers"); // Start worker threads for (size_t i = 0; i < workers_.size(); ++i) { @@ -993,7 +993,7 @@ class ApplicationBase { * Run the main event loop */ virtual void run() { - GOPHER_LOG_INFO("Running main event loop"); + GOPHER_LOG_DEBUG("Running main event loop"); // Main dispatcher should already be created in initialize() if (!main_dispatcher_) { @@ -1020,7 +1020,7 @@ class ApplicationBase { return; } - GOPHER_LOG_INFO("Shutting down application"); + GOPHER_LOG_DEBUG("Shutting down application"); shutdown_requested_ = true; running_ = false; // Legacy API compatibility initialized_ = false; // Legacy API compatibility @@ -1058,7 +1058,7 @@ class ApplicationBase { main_dispatcher_owned_.reset(); } - GOPHER_LOG_INFO("Application shutdown complete"); + GOPHER_LOG_DEBUG("Application shutdown complete"); } /** @@ -1202,7 +1202,7 @@ class ApplicationBase { private: void run() { - GOPHER_LOG_INFO("Worker {} starting", name_); + GOPHER_LOG_DEBUG("Worker {} starting", name_); // Create dispatcher for this thread dispatcher_ = std::make_unique(name_); @@ -1213,7 +1213,7 @@ class ApplicationBase { std::this_thread::sleep_for(std::chrono::milliseconds(1)); } - GOPHER_LOG_INFO("Worker {} stopped", name_); + GOPHER_LOG_DEBUG("Worker {} stopped", name_); } std::string name_; From e8591007a8ebf95f16f735c38b378c59dbab1d10 Mon Sep 17 00:00:00 2001 From: RahulHere Date: Tue, 3 Feb 2026 21:15:57 +0800 Subject: [PATCH 34/39] Change default log level from Info to Warning Internal SDK logs (filter registration, configuration loading, etc.) should not appear by default. Users can still enable Info/Debug levels explicitly when needed. --- src/logging/logger_registry.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/logging/logger_registry.cc b/src/logging/logger_registry.cc index 4896d3b5..d244e9c1 100644 --- a/src/logging/logger_registry.cc +++ b/src/logging/logger_registry.cc @@ -11,7 +11,7 @@ LoggerRegistry& LoggerRegistry::instance() { return instance; } -LoggerRegistry::LoggerRegistry() : global_level_(LogLevel::Info) { +LoggerRegistry::LoggerRegistry() : global_level_(LogLevel::Warning) { initializeDefaults(); } From 84d958a1223904466c091c8f32d34c5edf5052e4 Mon Sep 17 00:00:00 2001 From: RahulHere Date: Tue, 3 Feb 2026 21:16:07 +0800 Subject: [PATCH 35/39] Improve libevent threading initialization order Use static initializer to call evthread_use_pthreads() at program startup, before any other code (including curl) can initialize libevent. This fixes the "evthread initialization must be called BEFORE anything else!" error on systems where curl is compiled with libevent support. Changes: - Remove event_enable_debug_mode() which can only be called once and causes issues with shared libraries - Use simple static initializer instead of constructor priority attribute - evthread_use_pthreads() is safe to call multiple times --- src/event/libevent_dispatcher.cc | 33 ++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/src/event/libevent_dispatcher.cc b/src/event/libevent_dispatcher.cc index 4553542a..eeee5e0d 100644 --- a/src/event/libevent_dispatcher.cc +++ b/src/event/libevent_dispatcher.cc @@ -74,22 +74,35 @@ uint32_t fromLibeventEvents(short events) { return result; } -// Lazy initialization for libevent threading support -// Uses std::call_once to ensure thread-safe one-time initialization -// This avoids blocking in static initialization which was causing hangs -void ensureLibeventThreadingInitialized() { - static std::once_flag init_flag; - std::call_once(init_flag, []() { +// Early initialization for libevent threading support +// CRITICAL: This must run BEFORE any other libevent functions or libraries +// that might use libevent (like curl with libevent support). +// evthread_use_pthreads() is safe to call multiple times - it returns 0 after first call +struct LibeventEarlyInit { + LibeventEarlyInit() { #ifdef _WIN32 evthread_use_windows_threads(); #else evthread_use_pthreads(); #endif - // Enable debug mode in debug builds -#ifndef NDEBUG - event_enable_debug_mode(); + // NOTE: event_enable_debug_mode() is NOT called here because: + // 1. It can only be called once across the entire process + // 2. With shared libraries, this code may run from multiple TUs + // 3. Debug mode is only needed for debugging libevent internals + } +}; + +// Single static instance ensures initialization at program startup +static LibeventEarlyInit s_libevent_early_init; + +// This function is kept for compatibility +void ensureLibeventThreadingInitialized() { + // evthread functions are safe to call multiple times +#ifdef _WIN32 + evthread_use_windows_threads(); +#else + evthread_use_pthreads(); #endif - }); } } // namespace From 0828d5cf869f6e27b44abfa7052076602ba9f893 Mon Sep 17 00:00:00 2001 From: RahulHere Date: Fri, 6 Feb 2026 11:59:19 +0800 Subject: [PATCH 36/39] Improve Windows MinGW build for inet_ntop/inet_pton Add _WIN32_WINNT=0x0600 compile definition for Windows builds to ensure inet_ntop and inet_pton are declared. These functions require Windows Vista or later. --- CMakeLists.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b7cd2abe..e968276f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -691,11 +691,13 @@ else() endforeach() endif() -# Add Windows-specific socket libraries +# Add Windows-specific socket libraries and definitions if(WIN32) foreach(lib_target ${REAL_TARGETS}) if(TARGET ${lib_target}) target_link_libraries(${lib_target} PRIVATE ws2_32 crypt32 iphlpapi) + # Require Windows Vista or later for inet_ntop/inet_pton + target_compile_definitions(${lib_target} PRIVATE _WIN32_WINNT=0x0600) endif() endforeach() endif() From dfec8870d59c3c2cbd3fb4126ec5349181d82e9c Mon Sep 17 00:00:00 2001 From: RahulHere Date: Tue, 10 Feb 2026 20:17:53 +0800 Subject: [PATCH 37/39] make format --- sdk/typescript/README.md | 224 +++++++++-------- sdk/typescript/examples/basic-usage.ts | 48 ++-- .../src/__tests__/async-filter-chain.test.ts | 158 ++++++------ .../src/__tests__/filter-ffi.test.ts | 136 ++++++----- .../gopher-filtered-transport.test.ts | 130 +++++----- .../src/__tests__/mcp-end-to-end.test.ts | 10 +- .../mcp-ffi-dispatcher.integration.test.ts | 107 +++++---- .../__tests__/mcp-ffi-dispatcher.unit.test.ts | 70 +++--- .../src/__tests__/mcp-filter-chain.test.ts | 9 +- .../src/__tests__/mcp-filter-manager.test.ts | 2 +- sdk/typescript/src/config-utils.ts | 118 ++++----- sdk/typescript/src/filter-chain-ffi.ts | 226 +++++++++--------- sdk/typescript/src/filter-dispatcher.ts | 6 +- sdk/typescript/src/filter-event-callbacks.ts | 66 +++-- sdk/typescript/src/filter-events.ts | 4 +- sdk/typescript/src/filter-types.ts | 18 +- .../src/gopher-filtered-transport.ts | 80 ++++--- sdk/typescript/src/index.ts | 17 +- sdk/typescript/src/mcp-ffi-bindings.ts | 94 ++++---- sdk/typescript/src/mcp-ffi-dispatcher.ts | 4 +- sdk/typescript/src/mcp-filter-api.ts | 86 +++---- sdk/typescript/src/mcp-filter-buffer.ts | 8 +- sdk/typescript/src/mcp-filter-chain.ts | 21 +- sdk/typescript/src/mcp-filter-manager.ts | 188 ++++++++------- sdk/typescript/src/message-queue.ts | 2 +- sdk/typescript/src/metrics-callbacks.ts | 99 ++++---- sdk/typescript/src/types/circuit-breaker.ts | 6 +- src/event/libevent_dispatcher.cc | 3 +- src/filter/enhanced_filter_chain_factory.cc | 4 +- src/filter/filter_chain_assembler.cc | 4 +- src/filter/sse_codec_filter.cc | 3 +- src/mcp_connection_manager.cc | 11 +- src/network/connection_impl.cc | 3 +- src/network/listener_impl.cc | 11 +- src/server/mcp_server_enhanced_filters.cc | 4 +- src/transport/http_sse_transport_socket.cc | 2 +- 36 files changed, 1015 insertions(+), 967 deletions(-) diff --git a/sdk/typescript/README.md b/sdk/typescript/README.md index 7361f0a3..38446ffb 100644 --- a/sdk/typescript/README.md +++ b/sdk/typescript/README.md @@ -138,7 +138,7 @@ The SDK supports the canonical listener-based configuration format that matches import { createRealDispatcher, createFilterChainFromConfig, - CanonicalConfig + CanonicalConfig, } from "@mcp/filter-sdk"; // Create dispatcher @@ -152,20 +152,20 @@ const config: CanonicalConfig = { address: { socket_address: { address: "127.0.0.1", - port_value: 9090 - } + port_value: 9090, + }, }, filter_chains: [ { filters: [ { name: "http.codec", type: "http.codec" }, { name: "sse.codec", type: "sse.codec" }, - { name: "json_rpc.dispatcher", type: "json_rpc.dispatcher" } - ] - } - ] - } - ] + { name: "json_rpc.dispatcher", type: "json_rpc.dispatcher" }, + ], + }, + ], + }, + ], }; // Create filter chain from configuration @@ -450,6 +450,7 @@ The SDK uses the canonical listener-based configuration format that matches the ``` This format provides: + - Clear separation of network configuration (listeners) from processing logic (filter chains) - Support for multiple listeners on different ports - Consistent structure across C++ and TypeScript implementations @@ -502,32 +503,36 @@ The `FilterChain` class provides a Koffi-based FFI bridge to C++ filter implemen #### **Quick Start** ```typescript -import { FilterChain } from './filter-chain-ffi'; -import { createRealDispatcher } from './mcp-filter-api'; +import { FilterChain } from "./filter-chain-ffi"; +import { createRealDispatcher } from "./mcp-filter-api"; // Create dispatcher const dispatcher = createRealDispatcher(); // Define filter configuration const config = { - listeners: [{ - name: 'filters', - filter_chains: [{ - filters: [ - { type: 'rate_limiter', name: 'limiter', config: { rps: 100 } }, - { type: 'circuit_breaker', name: 'breaker', config: { threshold: 5 } }, - { type: 'metrics', name: 'metrics' } - ] - }] - }] + listeners: [ + { + name: "filters", + filter_chains: [ + { + filters: [ + { type: "rate_limiter", name: "limiter", config: { rps: 100 } }, + { type: "circuit_breaker", name: "breaker", config: { threshold: 5 } }, + { type: "metrics", name: "metrics" }, + ], + }, + ], + }, + ], }; // Create and use filter chain const chain = new FilterChain(dispatcher, config); await chain.initialize(); -const result = await chain.processIncoming({ method: 'test' }); -console.log('Filter decision:', result.decision); // 0=ALLOW, 1=DENY +const result = await chain.processIncoming({ method: "test" }); +console.log("Filter decision:", result.decision); // 0=ALLOW, 1=DENY await chain.shutdown(); chain.destroy(); @@ -551,35 +556,40 @@ chain.destroy(); #### **API Reference** **Lifecycle:** + ```typescript -constructor(dispatcher, config) // Create chain -await initialize() // Start processing -await shutdown() // Stop gracefully -destroy() // Release resources +constructor(dispatcher, config); // Create chain +await initialize(); // Start processing +await shutdown(); // Stop gracefully +destroy(); // Release resources ``` **Message Processing:** + ```typescript -await processIncoming(message) // Filter incoming message -await processOutgoing(message) // Filter outgoing message +await processIncoming(message); // Filter incoming message +await processOutgoing(message); // Filter outgoing message ``` **Metrics & Stats:** + ```typescript await getChainStats() // Get chain statistics await getMetrics(filterName?) // Get filter metrics ``` **Dynamic Configuration:** + ```typescript -await enableFilter(name) // Enable a filter -await disableFilter(name) // Disable a filter -await exportConfig() // Export current config +await enableFilter(name); // Enable a filter +await disableFilter(name); // Disable a filter +await exportConfig(); // Export current config ``` **Validation:** + ```typescript -FilterChain.validateConfig(config) // Validate before creation +FilterChain.validateConfig(config); // Validate before creation ``` #### **Usage with Official MCP SDK** @@ -587,8 +597,8 @@ FilterChain.validateConfig(config) // Validate before creation The FilterChain can be wrapped in a custom transport to use with the official MCP SDK: ```typescript -import { FilterChain } from './filter-chain-ffi'; -import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js'; +import { FilterChain } from "./filter-chain-ffi"; +import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; class FilteredTransport { constructor(baseTransport, filterChain) { @@ -598,16 +608,18 @@ class FilteredTransport { async send(message) { const result = await this.filterChain.processOutgoing(message); - if (result.decision === 0) { // ALLOW + if (result.decision === 0) { + // ALLOW return this.baseTransport.send(result.transformedMessage || message); } throw new Error(`Message blocked: ${result.reason}`); } onMessage(handler) { - this.baseTransport.onMessage(async (message) => { + this.baseTransport.onMessage(async message => { const result = await this.filterChain.processIncoming(message); - if (result.decision === 0) { // ALLOW + if (result.decision === 0) { + // ALLOW handler(result.transformedMessage || message); } }); @@ -652,18 +664,22 @@ const stdioTransport = new StdioServerTransport(); const transport = new GopherFilteredTransport(stdioTransport, { dispatcherHandle: dispatcher, filterConfig: { - listeners: [{ - name: "hybrid_filters", - filter_chains: [{ - filters: [ - { type: "rate_limiter", name: "limiter", config: { rps: 100 } }, - { type: "circuit_breaker", name: "breaker", config: { threshold: 5 } }, - { type: "metrics", name: "metrics", config: { export_port: 9090 } } - ] - }] - }] + listeners: [ + { + name: "hybrid_filters", + filter_chains: [ + { + filters: [ + { type: "rate_limiter", name: "limiter", config: { rps: 100 } }, + { type: "circuit_breaker", name: "breaker", config: { threshold: 5 } }, + { type: "metrics", name: "metrics", config: { export_port: 9090 } }, + ], + }, + ], + }, + ], }, - debugLogging: false + debugLogging: false, }); // Use with MCP SDK server @@ -705,30 +721,34 @@ Note: In hybrid SDK mode, the `address` field is **optional** since the SDK tran ```json { - "listeners": [{ - "name": "hybrid_filters", - "filter_chains": [{ - "name": "default", - "filters": [ - { - "name": "rate_limiter", - "type": "rate_limiter", - "config": { - "requests_per_second": 100, - "burst_size": 20 - } - }, + "listeners": [ + { + "name": "hybrid_filters", + "filter_chains": [ { - "name": "circuit_breaker", - "type": "circuit_breaker", - "config": { - "failure_threshold": 5, - "timeout_ms": 60000 - } + "name": "default", + "filters": [ + { + "name": "rate_limiter", + "type": "rate_limiter", + "config": { + "requests_per_second": 100, + "burst_size": 20 + } + }, + { + "name": "circuit_breaker", + "type": "circuit_breaker", + "config": { + "failure_threshold": 5, + "timeout_ms": 60000 + } + } + ] } ] - }] - }] + } + ] } ``` @@ -737,18 +757,18 @@ Note: In hybrid SDK mode, the `address` field is **optional** since the SDK tran ```typescript // Get metrics const metrics = await transport.getMetrics(); -console.log('Total processed:', metrics.chain.requests_total); +console.log("Total processed:", metrics.chain.requests_total); // Enable/disable filters -await transport.setFilterEnabled('limiter', false); // Disable rate limiter -await transport.setFilterEnabled('breaker', true); // Enable circuit breaker +await transport.setFilterEnabled("limiter", false); // Disable rate limiter +await transport.setFilterEnabled("breaker", true); // Enable circuit breaker // Export current configuration const config = await transport.exportFilterConfig(); // Get queue stats (for backpressure monitoring) const queueStats = transport.getQueueStats(); -console.log('Queued messages:', queueStats.size); +console.log("Queued messages:", queueStats.size); ``` #### **Error Handling** @@ -758,10 +778,10 @@ try { await transport.send(message); } catch (error) { if (error instanceof FilterDeniedError) { - console.log('Message blocked by filter:', error.reason); + console.log("Message blocked by filter:", error.reason); // Handle rate limiting, circuit breaker open, etc. } else { - console.error('Transport error:', error); + console.error("Transport error:", error); } } ``` @@ -783,18 +803,22 @@ app.post("/sse", async (req, res) => { const transport = new GopherFilteredTransport(sseTransport, { dispatcherHandle: dispatcher, filterConfig: { - listeners: [{ - name: "http_filters", - filter_chains: [{ - filters: [ - { type: "rate_limiter", name: "limiter", config: { rps: 50 } }, - { type: "circuit_breaker", name: "breaker", config: { threshold: 3 } }, - { type: "metrics", name: "metrics" } - ] - }] - }] + listeners: [ + { + name: "http_filters", + filter_chains: [ + { + filters: [ + { type: "rate_limiter", name: "limiter", config: { rps: 50 } }, + { type: "circuit_breaker", name: "breaker", config: { threshold: 3 } }, + { type: "metrics", name: "metrics" }, + ], + }, + ], + }, + ], }, - onValidationWarning: (warnings) => console.warn('Filter warnings:', warnings) + onValidationWarning: warnings => console.warn("Filter warnings:", warnings), }); const server = new Server({ name: "http-server", version: "1.0.0" }); @@ -807,17 +831,19 @@ app.listen(3000); #### **Migration from Pure SDK** **Before (Pure SDK):** + ```typescript const transport = new StdioServerTransport(); await server.connect(transport); ``` **After (With Gopher Filters - One Line Change!):** + ```typescript -const transport = new GopherFilteredTransport( - new StdioServerTransport(), - { dispatcherHandle: dispatcher, filterConfig: config } -); +const transport = new GopherFilteredTransport(new StdioServerTransport(), { + dispatcherHandle: dispatcher, + filterConfig: config, +}); await server.connect(transport); ``` @@ -831,14 +857,14 @@ await server.connect(transport); #### **Comparison: Native vs Hybrid Approach** -| Aspect | Native C++ | Hybrid SDK | -|--------|---------------------|---------------------| -| Protocol | Custom C++ | Official SDK | -| Filters | C++ | C++ (via wrapper) | -| Overhead | ~0.5ms | ~0.66ms | -| SDK Updates | Manual | Automatic | -| Complexity | High | Low | -| Use Case | Full control | Quick adoption | +| Aspect | Native C++ | Hybrid SDK | +| ----------- | ------------ | ----------------- | +| Protocol | Custom C++ | Official SDK | +| Filters | C++ | C++ (via wrapper) | +| Overhead | ~0.5ms | ~0.66ms | +| SDK Updates | Manual | Automatic | +| Complexity | High | Low | +| Use Case | Full control | Quick adoption | See `examples/configs/hybrid-wrapper-config.json` for complete configuration examples. diff --git a/sdk/typescript/examples/basic-usage.ts b/sdk/typescript/examples/basic-usage.ts index aa81b1ce..43a3aece 100644 --- a/sdk/typescript/examples/basic-usage.ts +++ b/sdk/typescript/examples/basic-usage.ts @@ -37,20 +37,20 @@ async function createHttpPipeline() { address: { socket_address: { address: "127.0.0.1", - port_value: 8080 - } + port_value: 8080, + }, }, filter_chains: [ { filters: [ { name: "auth", type: "auth" }, { name: "rate_limiter", type: "rate_limiter" }, - { name: "access_log", type: "access_log" } - ] - } - ] - } - ] + { name: "access_log", type: "access_log" }, + ], + }, + ], + }, + ], }; const filterChain = new FilterChain(dispatcher, config); @@ -77,20 +77,20 @@ async function createMonitoringPipeline() { address: { socket_address: { address: "127.0.0.1", - port_value: 9090 - } + port_value: 9090, + }, }, filter_chains: [ { filters: [ { name: "metrics", type: "metrics" }, { name: "tracing", type: "tracing" }, - { name: "access_log", type: "access_log" } - ] - } - ] - } - ] + { name: "access_log", type: "access_log" }, + ], + }, + ], + }, + ], }; const dispatcher = createRealDispatcher(); @@ -144,8 +144,8 @@ async function demonstrateProtocolStack() { address: { socket_address: { address: "127.0.0.1", - port_value: 8443 - } + port_value: 8443, + }, }, filter_chains: [ { @@ -153,12 +153,12 @@ async function demonstrateProtocolStack() { { name: "tcp_proxy", type: "tcp_proxy" }, { name: "tls", type: "tls" }, { name: "http_codec", type: "http.codec" }, - { name: "json_rpc", type: "json_rpc.dispatcher" } - ] - } - ] - } - ] + { name: "json_rpc", type: "json_rpc.dispatcher" }, + ], + }, + ], + }, + ], }; const dispatcher = createRealDispatcher(); diff --git a/sdk/typescript/src/__tests__/async-filter-chain.test.ts b/sdk/typescript/src/__tests__/async-filter-chain.test.ts index 062605e0..135c0c66 100644 --- a/sdk/typescript/src/__tests__/async-filter-chain.test.ts +++ b/sdk/typescript/src/__tests__/async-filter-chain.test.ts @@ -6,31 +6,31 @@ * through FFI to C++ implementation. */ -import { describe, it, expect, beforeEach, afterEach } from '@jest/globals'; -import { FilterChain } from '../filter-chain-ffi'; -import { mcpFilterLib } from '../mcp-ffi-bindings'; -import { FilterResult, CanonicalConfig } from '../filter-types'; +import { describe, it, expect, beforeEach, afterEach } from "@jest/globals"; +import { FilterChain } from "../filter-chain-ffi"; +import { mcpFilterLib } from "../mcp-ffi-bindings"; +import { FilterResult, CanonicalConfig } from "../filter-types"; -describe('Async Filter Chain Integration', () => { +describe("Async Filter Chain Integration", () => { let dispatcher: any; let chain: FilterChain; const testConfig: CanonicalConfig = { listeners: [ { - name: 'test_listener', + name: "test_listener", address: { socket_address: { - address: '127.0.0.1', + address: "127.0.0.1", port_value: 9090, }, }, filter_chains: [ { filters: [ - { name: 'http', type: 'http.codec' }, - { name: 'sse', type: 'sse.codec' }, - { name: 'dispatcher', type: 'json_rpc.dispatcher' }, + { name: "http", type: "http.codec" }, + { name: "sse", type: "sse.codec" }, + { name: "dispatcher", type: "json_rpc.dispatcher" }, ], }, ], @@ -64,46 +64,46 @@ describe('Async Filter Chain Integration', () => { mcpFilterLib.mcp_shutdown(); }); - it('should process incoming message', async () => { - const result = await chain.processIncoming({ method: 'test' }); + it("should process incoming message", async () => { + const result = await chain.processIncoming({ method: "test" }); expect(result).toBeDefined(); expect(result.decision).toBeDefined(); - expect(typeof result.decision).toBe('number'); + expect(typeof result.decision).toBe("number"); // Note: Actual decision depends on filters in chain }); - it('should process outgoing message', async () => { - const result = await chain.processOutgoing({ method: 'test' }); + it("should process outgoing message", async () => { + const result = await chain.processOutgoing({ method: "test" }); expect(result).toBeDefined(); expect(result.decision).toBeDefined(); - expect(typeof result.decision).toBe('number'); + expect(typeof result.decision).toBe("number"); // Note: Actual decision depends on filters in chain }); - it('should handle multiple concurrent requests', async () => { + it("should handle multiple concurrent requests", async () => { const promises: Promise[] = []; for (let i = 0; i < 10; i++) { - promises.push(chain.processIncoming({ method: 'test', id: i })); + promises.push(chain.processIncoming({ method: "test", id: i })); } const results = await Promise.all(promises); expect(results).toHaveLength(10); - results.forEach((result) => { + results.forEach(result => { expect(result.decision).toBeDefined(); - expect(typeof result.decision).toBe('number'); + expect(typeof result.decision).toBe("number"); }); }); - it('should process incoming and outgoing concurrently', async () => { + it("should process incoming and outgoing concurrently", async () => { const incomingPromises: Promise[] = []; const outgoingPromises: Promise[] = []; for (let i = 0; i < 5; i++) { - incomingPromises.push(chain.processIncoming({ method: 'test_in', id: i })); - outgoingPromises.push(chain.processOutgoing({ method: 'test_out', id: i })); + incomingPromises.push(chain.processIncoming({ method: "test_in", id: i })); + outgoingPromises.push(chain.processOutgoing({ method: "test_out", id: i })); } const [incomingResults, outgoingResults] = await Promise.all([ @@ -114,16 +114,16 @@ describe('Async Filter Chain Integration', () => { expect(incomingResults).toHaveLength(5); expect(outgoingResults).toHaveLength(5); - [...incomingResults, ...outgoingResults].forEach((result) => { + [...incomingResults, ...outgoingResults].forEach(result => { expect(result.decision).toBeDefined(); }); }); - it('should handle error responses from filters', async () => { + it("should handle error responses from filters", async () => { // This test depends on how the C++ implementation handles invalid messages // For now, we verify the API doesn't crash try { - const result = await chain.processIncoming({ invalid: 'data' }); + const result = await chain.processIncoming({ invalid: "data" }); expect(result).toBeDefined(); } catch (error) { // Error handling is also acceptable @@ -131,7 +131,7 @@ describe('Async Filter Chain Integration', () => { } }); - it('should maintain separate callback registries for each message', async () => { + it("should maintain separate callback registries for each message", async () => { // Submit multiple messages and verify each gets its own callback const message1 = chain.processIncoming({ id: 1 }); const message2 = chain.processIncoming({ id: 2 }); @@ -140,15 +140,15 @@ describe('Async Filter Chain Integration', () => { const results = await Promise.all([message1, message2, message3]); expect(results).toHaveLength(3); - results.forEach((result) => { + results.forEach(result => { expect(result).toBeDefined(); expect(result.decision).toBeDefined(); }); }); - it('should clean up resources on shutdown', async () => { + it("should clean up resources on shutdown", async () => { // Submit a message - await chain.processIncoming({ method: 'test' }); + await chain.processIncoming({ method: "test" }); // Destroy the chain chain.destroy(); @@ -157,17 +157,15 @@ describe('Async Filter Chain Integration', () => { expect(chain.isDestroyed()).toBe(true); // Subsequent calls should throw - await expect(chain.processIncoming({ method: 'test' })).rejects.toThrow( - /destroyed/i - ); + await expect(chain.processIncoming({ method: "test" })).rejects.toThrow(/destroyed/i); }); - it('should reject pending requests on shutdown', async () => { + it("should reject pending requests on shutdown", async () => { // This test verifies that pending callbacks are properly rejected // when the chain is destroyed // Start processing - const promise = chain.processIncoming({ method: 'long_running' }); + const promise = chain.processIncoming({ method: "long_running" }); // Immediately destroy (before callback completes) chain.destroy(); @@ -176,7 +174,7 @@ describe('Async Filter Chain Integration', () => { await expect(promise).rejects.toThrow(/shutdown/i); }); - it('should handle rapid successive requests', async () => { + it("should handle rapid successive requests", async () => { const results: FilterResult[] = []; // Submit 20 requests rapidly in sequence @@ -186,42 +184,42 @@ describe('Async Filter Chain Integration', () => { } expect(results).toHaveLength(20); - results.forEach((result) => { + results.forEach(result => { expect(result.decision).toBeDefined(); }); }); - it('should return FilterResult with correct structure', async () => { - const result = await chain.processIncoming({ method: 'test' }); + it("should return FilterResult with correct structure", async () => { + const result = await chain.processIncoming({ method: "test" }); // Verify FilterResult structure - expect(result).toHaveProperty('decision'); - expect(typeof result.decision).toBe('number'); + expect(result).toHaveProperty("decision"); + expect(typeof result.decision).toBe("number"); // Optional fields may or may not be present if (result.transformedMessage !== undefined) { - expect(typeof result.transformedMessage).toBe('string'); + expect(typeof result.transformedMessage).toBe("string"); } if (result.reason !== undefined) { - expect(typeof result.reason).toBe('string'); + expect(typeof result.reason).toBe("string"); } if (result.delayMs !== undefined) { - expect(typeof result.delayMs).toBe('number'); + expect(typeof result.delayMs).toBe("number"); } if (result.metadata !== undefined) { - expect(typeof result.metadata).toBe('object'); + expect(typeof result.metadata).toBe("object"); } }); - it('should handle JSON serialization of complex messages', async () => { + it("should handle JSON serialization of complex messages", async () => { const complexMessage = { - method: 'complex_test', + method: "complex_test", params: { nested: { data: [1, 2, 3], flag: true, }, - array: ['a', 'b', 'c'], + array: ["a", "b", "c"], }, metadata: { timestamp: Date.now(), @@ -233,35 +231,35 @@ describe('Async Filter Chain Integration', () => { expect(result.decision).toBeDefined(); }); - it('should support getting chain statistics', async () => { + it("should support getting chain statistics", async () => { // Process some messages - await chain.processIncoming({ method: 'test1' }); - await chain.processIncoming({ method: 'test2' }); - await chain.processOutgoing({ method: 'test3' }); + await chain.processIncoming({ method: "test1" }); + await chain.processIncoming({ method: "test2" }); + await chain.processOutgoing({ method: "test3" }); // Get statistics const stats = await chain.getChainStats(); expect(stats).toBeDefined(); - expect(stats).toHaveProperty('total_processed'); - expect(stats).toHaveProperty('total_errors'); - expect(stats).toHaveProperty('avg_latency_ms'); - expect(typeof stats.total_processed).toBe('number'); + expect(stats).toHaveProperty("total_processed"); + expect(stats).toHaveProperty("total_errors"); + expect(stats).toHaveProperty("avg_latency_ms"); + expect(typeof stats.total_processed).toBe("number"); }); - it('should support getting metrics', async () => { + it("should support getting metrics", async () => { // Process some messages - await chain.processIncoming({ method: 'test' }); + await chain.processIncoming({ method: "test" }); // Get metrics const metrics = await chain.getMetrics(); expect(metrics).toBeDefined(); - expect(typeof metrics).toBe('object'); + expect(typeof metrics).toBe("object"); }); }); -describe('Async Filter Chain Error Handling', () => { +describe("Async Filter Chain Error Handling", () => { let dispatcher: any; beforeEach(() => { @@ -276,19 +274,17 @@ describe('Async Filter Chain Error Handling', () => { mcpFilterLib.mcp_shutdown(); }); - it('should throw on invalid dispatcher', () => { + it("should throw on invalid dispatcher", () => { const invalidConfig: CanonicalConfig = { listeners: [ { - name: 'test', + name: "test", address: { - socket_address: { address: '127.0.0.1', port_value: 9090 }, + socket_address: { address: "127.0.0.1", port_value: 9090 }, }, filter_chains: [ { - filters: [ - { name: 'http', type: 'http.codec' }, - ], + filters: [{ name: "http", type: "http.codec" }], }, ], }, @@ -300,7 +296,7 @@ describe('Async Filter Chain Error Handling', () => { }).toThrow(/dispatcher/i); }); - it('should throw on invalid configuration', () => { + it("should throw on invalid configuration", () => { const invalidConfig = { listeners: [], } as CanonicalConfig; @@ -310,19 +306,17 @@ describe('Async Filter Chain Error Handling', () => { }).toThrow(); }); - it('should handle operations on destroyed chain', async () => { + it("should handle operations on destroyed chain", async () => { const config: CanonicalConfig = { listeners: [ { - name: 'test', + name: "test", address: { - socket_address: { address: '127.0.0.1', port_value: 9090 }, + socket_address: { address: "127.0.0.1", port_value: 9090 }, }, filter_chains: [ { - filters: [ - { name: 'http', type: 'http.codec' }, - ], + filters: [{ name: "http", type: "http.codec" }], }, ], }, @@ -332,29 +326,23 @@ describe('Async Filter Chain Error Handling', () => { const chain = new FilterChain(dispatcher, config); chain.destroy(); - await expect(chain.processIncoming({ method: 'test' })).rejects.toThrow( - /destroyed/i - ); - await expect(chain.processOutgoing({ method: 'test' })).rejects.toThrow( - /destroyed/i - ); + await expect(chain.processIncoming({ method: "test" })).rejects.toThrow(/destroyed/i); + await expect(chain.processOutgoing({ method: "test" })).rejects.toThrow(/destroyed/i); await expect(chain.getChainStats()).rejects.toThrow(/destroyed/i); await expect(chain.getMetrics()).rejects.toThrow(/destroyed/i); }); - it('should allow idempotent destroy calls', () => { + it("should allow idempotent destroy calls", () => { const config: CanonicalConfig = { listeners: [ { - name: 'test', + name: "test", address: { - socket_address: { address: '127.0.0.1', port_value: 9090 }, + socket_address: { address: "127.0.0.1", port_value: 9090 }, }, filter_chains: [ { - filters: [ - { name: 'http', type: 'http.codec' }, - ], + filters: [{ name: "http", type: "http.codec" }], }, ], }, diff --git a/sdk/typescript/src/__tests__/filter-ffi.test.ts b/sdk/typescript/src/__tests__/filter-ffi.test.ts index 2a0cf852..635eeb56 100644 --- a/sdk/typescript/src/__tests__/filter-ffi.test.ts +++ b/sdk/typescript/src/__tests__/filter-ffi.test.ts @@ -6,21 +6,21 @@ * with real dispatcher handles and the native library. */ -import { FilterChain } from '../filter-chain-ffi'; -import { createRealDispatcher, destroyDispatcher, ensureMcpInitialized } from '../mcp-filter-api'; -import type { CanonicalConfig } from '../filter-types'; +import { FilterChain } from "../filter-chain-ffi"; +import { createRealDispatcher, destroyDispatcher, ensureMcpInitialized } from "../mcp-filter-api"; +import type { CanonicalConfig } from "../filter-types"; -describe('FilterChain FFI Integration', () => { +describe("FilterChain FFI Integration", () => { let dispatcher: any; // Simple test configuration with http.codec filter const simpleConfig: CanonicalConfig = { listeners: [ { - name: 'test_listener', + name: "test_listener", address: { socket_address: { - address: '127.0.0.1', + address: "127.0.0.1", port_value: 9090, }, }, @@ -28,8 +28,8 @@ describe('FilterChain FFI Integration', () => { { filters: [ { - name: 'http_codec', - type: 'http.codec', + name: "http_codec", + type: "http.codec", }, ], }, @@ -42,10 +42,10 @@ describe('FilterChain FFI Integration', () => { const multiFilterConfig: CanonicalConfig = { listeners: [ { - name: 'multi_test_listener', + name: "multi_test_listener", address: { socket_address: { - address: '127.0.0.1', + address: "127.0.0.1", port_value: 9091, }, }, @@ -53,16 +53,16 @@ describe('FilterChain FFI Integration', () => { { filters: [ { - name: 'http_codec', - type: 'http.codec', + name: "http_codec", + type: "http.codec", }, { - name: 'sse_codec', - type: 'sse.codec', + name: "sse_codec", + type: "sse.codec", }, { - name: 'json_rpc', - type: 'json_rpc.dispatcher', + name: "json_rpc", + type: "json_rpc.dispatcher", }, ], }, @@ -89,8 +89,8 @@ describe('FilterChain FFI Integration', () => { } }); - describe('Construction', () => { - it('should create a filter chain from canonical config', async () => { + describe("Construction", () => { + it("should create a filter chain from canonical config", async () => { const chain = new FilterChain(dispatcher, simpleConfig); await chain.initialize(); @@ -101,13 +101,13 @@ describe('FilterChain FFI Integration', () => { chain.destroy(); }); - it('should throw error for invalid dispatcher', () => { + it("should throw error for invalid dispatcher", () => { expect(() => { new FilterChain(0, simpleConfig); - }).toThrow('Invalid dispatcher handle'); + }).toThrow("Invalid dispatcher handle"); }); - it('should create chain with multiple filters', async () => { + it("should create chain with multiple filters", async () => { const chain = new FilterChain(dispatcher, multiFilterConfig); await chain.initialize(); @@ -118,37 +118,37 @@ describe('FilterChain FFI Integration', () => { }); }); - describe('Metrics and Statistics', () => { - it('should retrieve chain statistics', async () => { + describe("Metrics and Statistics", () => { + it("should retrieve chain statistics", async () => { const chain = new FilterChain(dispatcher, simpleConfig); await chain.initialize(); const stats = await chain.getChainStats(); expect(stats).toBeDefined(); - expect(typeof stats.total_processed).toBe('number'); - expect(typeof stats.total_errors).toBe('number'); - expect(typeof stats.avg_latency_ms).toBe('number'); - expect(typeof stats.active_filters).toBe('number'); + expect(typeof stats.total_processed).toBe("number"); + expect(typeof stats.total_errors).toBe("number"); + expect(typeof stats.avg_latency_ms).toBe("number"); + expect(typeof stats.active_filters).toBe("number"); chain.destroy(); }); - it('should retrieve filter metrics', async () => { + it("should retrieve filter metrics", async () => { const chain = new FilterChain(dispatcher, simpleConfig); await chain.initialize(); const metrics = await chain.getMetrics(); expect(metrics).toBeDefined(); - expect(typeof metrics).toBe('object'); + expect(typeof metrics).toBe("object"); // Metrics should have at least chain-wide stats - expect(metrics['chain']).toBeDefined(); + expect(metrics["chain"]).toBeDefined(); chain.destroy(); }); - it('should throw error when getting stats from uninitialized chain', async () => { + it("should throw error when getting stats from uninitialized chain", async () => { const chain = new FilterChain(dispatcher, simpleConfig); chain.destroy(); // Destroy immediately @@ -156,28 +156,28 @@ describe('FilterChain FFI Integration', () => { }); }); - describe('Configuration Management', () => { - it('should export chain configuration', async () => { + describe("Configuration Management", () => { + it("should export chain configuration", async () => { const chain = new FilterChain(dispatcher, simpleConfig); await chain.initialize(); const exported = await chain.exportConfig(); expect(exported).toBeDefined(); - expect(typeof exported).toBe('object'); + expect(typeof exported).toBe("object"); // Exported config should have internal chain structure (name, mode, routing, filters) // Not the canonical listener-based format - expect(exported).toHaveProperty('name'); - expect(exported).toHaveProperty('filters'); + expect(exported).toHaveProperty("name"); + expect(exported).toHaveProperty("filters"); chain.destroy(); }); - it('should enable a filter by name', async () => { + it("should enable a filter by name", async () => { const chain = new FilterChain(dispatcher, multiFilterConfig); await chain.initialize(); - const warnings = await chain.enableFilter('http_codec'); + const warnings = await chain.enableFilter("http_codec"); expect(Array.isArray(warnings)).toBe(true); // No errors should be thrown @@ -185,11 +185,11 @@ describe('FilterChain FFI Integration', () => { chain.destroy(); }); - it('should disable a filter by name', async () => { + it("should disable a filter by name", async () => { const chain = new FilterChain(dispatcher, multiFilterConfig); await chain.initialize(); - const warnings = await chain.disableFilter('sse_codec'); + const warnings = await chain.disableFilter("sse_codec"); expect(Array.isArray(warnings)).toBe(true); // No errors should be thrown @@ -197,21 +197,21 @@ describe('FilterChain FFI Integration', () => { chain.destroy(); }); - it('should throw error for invalid filter name', async () => { + it("should throw error for invalid filter name", async () => { const chain = new FilterChain(dispatcher, simpleConfig); await chain.initialize(); // enableFilter may return empty warnings array for nonexistent filters // instead of throwing, depending on C API behavior - const result = await chain.enableFilter('nonexistent_filter'); + const result = await chain.enableFilter("nonexistent_filter"); expect(Array.isArray(result)).toBe(true); chain.destroy(); }); }); - describe('Lifecycle Management', () => { - it('should properly destroy chain', async () => { + describe("Lifecycle Management", () => { + it("should properly destroy chain", async () => { const chain = new FilterChain(dispatcher, simpleConfig); await chain.initialize(); const handle = chain.getHandle(); @@ -222,17 +222,17 @@ describe('FilterChain FFI Integration', () => { chain.destroy(); expect(chain.isDestroyed()).toBe(true); - expect(() => chain.getHandle()).toThrow('destroyed'); + expect(() => chain.getHandle()).toThrow("destroyed"); }); - it('should be safe to destroy chain multiple times', () => { + it("should be safe to destroy chain multiple times", () => { const chain = new FilterChain(dispatcher, simpleConfig); chain.destroy(); expect(() => chain.destroy()).not.toThrow(); }); - it('should throw error when using destroyed chain', async () => { + it("should throw error when using destroyed chain", async () => { const chain = new FilterChain(dispatcher, simpleConfig); chain.destroy(); @@ -240,14 +240,14 @@ describe('FilterChain FFI Integration', () => { await expect(chain.getMetrics()).rejects.toThrow(/destroyed|error code -4/i); // exportConfig may return null for destroyed chains instead of throwing const exported = await chain.exportConfig(); - expect(exported === null || typeof exported === 'object').toBe(true); + expect(exported === null || typeof exported === "object").toBe(true); }); }); - describe('Error Handling', () => { - it('should handle malformed configuration gracefully', async () => { + describe("Error Handling", () => { + it("should handle malformed configuration gracefully", async () => { const badConfig: any = { - listeners: [] // Empty listeners array + listeners: [], // Empty listeners array }; const chain = new FilterChain(dispatcher, badConfig); @@ -255,14 +255,14 @@ describe('FilterChain FFI Integration', () => { await expect(chain.initialize()).rejects.toThrow(); }); - it('should handle missing filter chains', async () => { + it("should handle missing filter chains", async () => { const badConfig: any = { listeners: [ { - name: 'bad_listener', + name: "bad_listener", address: { socket_address: { - address: '127.0.0.1', + address: "127.0.0.1", port_value: 9092, }, }, @@ -276,7 +276,7 @@ describe('FilterChain FFI Integration', () => { await expect(chain.initialize()).rejects.toThrow(); }); - it('should handle MCP_STATUS_NOT_INITIALIZED error', async () => { + it("should handle MCP_STATUS_NOT_INITIALIZED error", async () => { const chain = new FilterChain(dispatcher, simpleConfig); // Don't initialize the chain chain.destroy(); // Destroy without initializing @@ -284,17 +284,17 @@ describe('FilterChain FFI Integration', () => { // Attempting to use destroyed chain should throw or reject // The current implementation may return a default result for destroyed chains try { - const result = await chain.processIncoming({ test: 'data' }); + const result = await chain.processIncoming({ test: "data" }); // If it doesn't throw, verify we get a result expect(result).toBeDefined(); - expect(result).toHaveProperty('decision'); + expect(result).toHaveProperty("decision"); } catch (error) { // If it does throw, that's also acceptable expect(error).toBeDefined(); } }); - it('should handle processing timeout', async () => { + it("should handle processing timeout", async () => { const chain = new FilterChain(dispatcher, simpleConfig); await chain.initialize(); @@ -306,31 +306,29 @@ describe('FilterChain FFI Integration', () => { chain.destroy(); }, 35000); // Set test timeout higher than default - it('should handle callback errors gracefully', async () => { + it("should handle callback errors gracefully", async () => { const chain = new FilterChain(dispatcher, simpleConfig); await chain.initialize(); // Submit a message that might cause callback errors try { - await chain.processIncoming({ invalid: 'structure' }); + await chain.processIncoming({ invalid: "structure" }); // If no error, that's fine - filter might allow it } catch (error) { // Error should be properly formatted expect(error).toBeInstanceOf(Error); - expect(error).toHaveProperty('message'); + expect(error).toHaveProperty("message"); } chain.destroy(); }); - it('should handle queue full scenarios', async () => { + it("should handle queue full scenarios", async () => { const chain = new FilterChain(dispatcher, simpleConfig); await chain.initialize(); // Try to flood the queue with many rapid requests - const promises = Array.from({ length: 100 }, (_, i) => - chain.processIncoming({ index: i }) - ); + const promises = Array.from({ length: 100 }, (_, i) => chain.processIncoming({ index: i })); try { await Promise.all(promises); @@ -343,7 +341,7 @@ describe('FilterChain FFI Integration', () => { chain.destroy(); }); - it('should propagate C API error codes correctly', async () => { + it("should propagate C API error codes correctly", async () => { const chain = new FilterChain(dispatcher, simpleConfig); // Intentionally NOT calling initialize() to test error handling @@ -359,8 +357,8 @@ describe('FilterChain FFI Integration', () => { }); }); - describe('Memory Management', () => { - it('should not leak memory when creating and destroying multiple chains', async () => { + describe("Memory Management", () => { + it("should not leak memory when creating and destroying multiple chains", async () => { // Create and destroy chains in a loop for (let i = 0; i < 10; i++) { const chain = new FilterChain(dispatcher, simpleConfig); @@ -375,7 +373,7 @@ describe('FilterChain FFI Integration', () => { expect(true).toBe(true); }); - it('should handle rapid create/destroy cycles', async () => { + it("should handle rapid create/destroy cycles", async () => { const chains: FilterChain[] = []; // Create multiple chains diff --git a/sdk/typescript/src/__tests__/gopher-filtered-transport.test.ts b/sdk/typescript/src/__tests__/gopher-filtered-transport.test.ts index aa7ec58c..4231143b 100644 --- a/sdk/typescript/src/__tests__/gopher-filtered-transport.test.ts +++ b/sdk/typescript/src/__tests__/gopher-filtered-transport.test.ts @@ -73,16 +73,16 @@ describe("GopherFilteredTransport", () => { name: "rate", config: { requests_per_second: 10, - burst_size: 5 - } - } - ] - } - ] - } - ] + burst_size: 5, + }, + }, + ], + }, + ], + }, + ], }, - debugLogging: false + debugLogging: false, }); await filteredTransport.start(); @@ -121,7 +121,7 @@ describe("GopherFilteredTransport", () => { filteredTransport.send({ jsonrpc: "2.0", method: "test", - id: i + id: i, }) ); } @@ -139,7 +139,7 @@ describe("GopherFilteredTransport", () => { filteredTransport.send({ jsonrpc: "2.0", method: "test", - id: 1 + id: 1, }) ).rejects.toThrow("Mock send error"); }); @@ -148,29 +148,33 @@ describe("GopherFilteredTransport", () => { const newTransport = new GopherFilteredTransport(new MockTransport(), { dispatcherHandle: dispatcher, filterConfig: { - listeners: [{ - name: "test", - filter_chains: [{ - name: "default", - filters: [] - }] - }] - } + listeners: [ + { + name: "test", + filter_chains: [ + { + name: "default", + filters: [], + }, + ], + }, + ], + }, }); await expect( newTransport.send({ jsonrpc: "2.0", method: "test", - id: 1 + id: 1, }) ).rejects.toThrow("not connected"); }); }); describe("Incoming Messages", () => { - it("should intercept and deliver incoming messages", (done) => { - filteredTransport.onmessage = (message) => { + it("should intercept and deliver incoming messages", done => { + filteredTransport.onmessage = message => { expect((message as any).method).toBe("test"); expect((message as any).id).toBe(42); done(); @@ -179,14 +183,14 @@ describe("GopherFilteredTransport", () => { mockTransport.simulateIncoming({ jsonrpc: "2.0", method: "test", - id: 42 + id: 42, }); }); - it("should process multiple incoming messages", (done) => { + it("should process multiple incoming messages", done => { const receivedIds: number[] = []; - filteredTransport.onmessage = (message) => { + filteredTransport.onmessage = message => { receivedIds.push((message as any).id); if (receivedIds.length === 3) { @@ -199,13 +203,13 @@ describe("GopherFilteredTransport", () => { mockTransport.simulateIncoming({ jsonrpc: "2.0", method: "test", - id: i + id: i, }); } }); - it("should handle incoming messages", (done) => { - filteredTransport.onmessage = (message) => { + it("should handle incoming messages", done => { + filteredTransport.onmessage = message => { expect((message as any).method).toBe("test"); done(); }; @@ -213,13 +217,13 @@ describe("GopherFilteredTransport", () => { mockTransport.simulateIncoming({ jsonrpc: "2.0", method: "test", - id: 1 + id: 1, }); }); }); describe("Event Propagation", () => { - it("should propagate close events", (done) => { + it("should propagate close events", done => { filteredTransport.onclose = () => { done(); }; @@ -227,10 +231,10 @@ describe("GopherFilteredTransport", () => { mockTransport.close(); }); - it("should propagate error events", (done) => { + it("should propagate error events", done => { const testError = new Error("Test error"); - filteredTransport.onerror = (error) => { + filteredTransport.onerror = error => { expect(error).toBe(testError); done(); }; @@ -245,7 +249,7 @@ describe("GopherFilteredTransport", () => { it("should provide metrics", async () => { const metrics = await filteredTransport.getMetrics(); expect(metrics).toBeDefined(); - expect(metrics['chain']).toBeDefined(); + expect(metrics["chain"]).toBeDefined(); }); it("should provide queue stats", () => { @@ -268,15 +272,11 @@ describe("GopherFilteredTransport", () => { describe("Dynamic Filter Control", () => { it("should enable filter", async () => { - await expect( - filteredTransport.setFilterEnabled("rate", true) - ).resolves.not.toThrow(); + await expect(filteredTransport.setFilterEnabled("rate", true)).resolves.not.toThrow(); }); it("should disable filter", async () => { - await expect( - filteredTransport.setFilterEnabled("rate", false) - ).resolves.not.toThrow(); + await expect(filteredTransport.setFilterEnabled("rate", false)).resolves.not.toThrow(); }); it("should toggle filter multiple times", async () => { @@ -291,9 +291,9 @@ describe("GopherFilteredTransport", () => { }); describe("Error Handling", () => { - it("should fail-open on filter processing errors", (done) => { + it("should fail-open on filter processing errors", done => { // Simulate filter error by sending malformed data - filteredTransport.onmessage = (message) => { + filteredTransport.onmessage = message => { // Should still receive message despite error expect(message).toBeDefined(); done(); @@ -302,7 +302,7 @@ describe("GopherFilteredTransport", () => { mockTransport.simulateIncoming({ jsonrpc: "2.0", method: "test", - id: 1 + id: 1, }); }); }); @@ -347,16 +347,16 @@ describe("GopherFilteredTransport with Circuit Breaker", () => { config: { failure_threshold: 5, timeout_ms: 30000, - half_open_requests: 3 - } - } - ] - } - ] - } - ] + half_open_requests: 3, + }, + }, + ], + }, + ], + }, + ], }, - debugLogging: false + debugLogging: false, }); await filteredTransport.start(); @@ -371,7 +371,7 @@ describe("GopherFilteredTransport with Circuit Breaker", () => { await filteredTransport.send({ jsonrpc: "2.0", method: "test", - id: 1 + id: 1, }); expect(mockTransport.sentMessages.length).toBe(1); @@ -405,25 +405,25 @@ describe("GopherFilteredTransport with Multiple Filters", () => { { type: "rate_limiter", name: "rate", - config: { requests_per_second: 100, burst_size: 10 } + config: { requests_per_second: 100, burst_size: 10 }, }, { type: "circuit_breaker", name: "breaker", - config: { failure_threshold: 5, timeout_ms: 30000 } + config: { failure_threshold: 5, timeout_ms: 30000 }, }, { type: "metrics", name: "metrics", - config: { export_port: 9090 } - } - ] - } - ] - } - ] + config: { export_port: 9090 }, + }, + ], + }, + ], + }, + ], }, - debugLogging: false + debugLogging: false, }); await filteredTransport.start(); @@ -438,7 +438,7 @@ describe("GopherFilteredTransport with Multiple Filters", () => { await filteredTransport.send({ jsonrpc: "2.0", method: "test", - id: 1 + id: 1, }); expect(mockTransport.sentMessages.length).toBe(1); @@ -447,7 +447,7 @@ describe("GopherFilteredTransport with Multiple Filters", () => { it("should get metrics from all filters", async () => { const metrics = await filteredTransport.getMetrics(); expect(metrics).toBeDefined(); - expect(metrics['chain']).toBeDefined(); + expect(metrics["chain"]).toBeDefined(); }); it("should control individual filters", async () => { @@ -459,7 +459,7 @@ describe("GopherFilteredTransport with Multiple Filters", () => { await filteredTransport.send({ jsonrpc: "2.0", method: "test", - id: 1 + id: 1, }); expect(mockTransport.sentMessages.length).toBe(1); diff --git a/sdk/typescript/src/__tests__/mcp-end-to-end.test.ts b/sdk/typescript/src/__tests__/mcp-end-to-end.test.ts index cf8df144..fa7ccf59 100644 --- a/sdk/typescript/src/__tests__/mcp-end-to-end.test.ts +++ b/sdk/typescript/src/__tests__/mcp-end-to-end.test.ts @@ -79,9 +79,7 @@ describe("FilterManager end-to-end behaviour", () => { bufferMocks.getBufferLength.mockImplementation(() => successBytes.length); bufferMocks.readBufferData.mockImplementation(() => successBytes); - filterApiMocks.postDataToFilter.mockImplementation( - () => FilterApi.FilterStatus.CONTINUE - ); + filterApiMocks.postDataToFilter.mockImplementation(() => FilterApi.FilterStatus.CONTINUE); manager = new FilterManager({ mcp: { jsonRpcProtocol: true, sseCodec: true }, @@ -154,8 +152,8 @@ describe("FilterManager end-to-end behaviour", () => { it("throws when processing after destruction", async () => { manager.destroy(); - await expect( - manager.processMessage({ jsonrpc: "2.0", method: "test" }) - ).rejects.toThrow(/destroyed/); + await expect(manager.processMessage({ jsonrpc: "2.0", method: "test" })).rejects.toThrow( + /destroyed/ + ); }); }); diff --git a/sdk/typescript/src/__tests__/mcp-ffi-dispatcher.integration.test.ts b/sdk/typescript/src/__tests__/mcp-ffi-dispatcher.integration.test.ts index 188ffb7e..f9cc4b6e 100644 --- a/sdk/typescript/src/__tests__/mcp-ffi-dispatcher.integration.test.ts +++ b/sdk/typescript/src/__tests__/mcp-ffi-dispatcher.integration.test.ts @@ -3,33 +3,33 @@ * These tests require the C++ library to be built and available */ -import { existsSync } from 'fs'; -import { TransportType } from '../mcp-ffi-bindings'; +import { existsSync } from "fs"; +import { TransportType } from "../mcp-ffi-bindings"; -describe('FFI Bindings - Integration Tests (Real Library)', () => { +describe("FFI Bindings - Integration Tests (Real Library)", () => { let libraryAvailable = false; - let lib: any = null; // Declare lib at describe scope + let lib: any = null; // Declare lib at describe scope beforeAll(() => { try { // Attempt to load real library - const koffi = require('koffi'); - const libPath = process.env['MCP_LIB_PATH'] || - '../../build/src/c_api/libgopher_mcp_c.0.1.0.dylib'; + const koffi = require("koffi"); + const libPath = + process.env["MCP_LIB_PATH"] || "../../build/src/c_api/libgopher_mcp_c.0.1.0.dylib"; // Check multiple possible paths const searchPaths = [ libPath, - './build/libgopher_mcp_c.so', - './build/libgopher_mcp_c.dylib', - './build/gopher_mcp_c.dll', - '../../build/src/c_api/libgopher_mcp_c.so', - '../../build/src/c_api/libgopher_mcp_c.dylib', - '../../../build/src/c_api/libgopher_mcp_c.0.1.0.dylib', - '../../../../build/src/c_api/libgopher_mcp_c.0.1.0.dylib', + "./build/libgopher_mcp_c.so", + "./build/libgopher_mcp_c.dylib", + "./build/gopher_mcp_c.dll", + "../../build/src/c_api/libgopher_mcp_c.so", + "../../build/src/c_api/libgopher_mcp_c.dylib", + "../../../build/src/c_api/libgopher_mcp_c.0.1.0.dylib", + "../../../../build/src/c_api/libgopher_mcp_c.0.1.0.dylib", ]; - let foundPath = ''; + let foundPath = ""; for (const path of searchPaths) { if (existsSync(path)) { foundPath = path; @@ -38,25 +38,36 @@ describe('FFI Bindings - Integration Tests (Real Library)', () => { } if (foundPath) { - lib = koffi.load(foundPath); // Assign to outer scope variable + lib = koffi.load(foundPath); // Assign to outer scope variable // Bind the dispatcher functions - lib.mcp_dispatcher_create = lib.func('mcp_dispatcher_create', 'pointer', []); - lib.mcp_dispatcher_run = lib.func('mcp_dispatcher_run', 'int', ['pointer']); - lib.mcp_dispatcher_run_timeout = lib.func('mcp_dispatcher_run_timeout', 'int', ['pointer', 'int']); - lib.mcp_dispatcher_stop = lib.func('mcp_dispatcher_stop', 'void', ['pointer']); - lib.mcp_dispatcher_destroy = lib.func('mcp_dispatcher_destroy', 'void', ['pointer']); + lib.mcp_dispatcher_create = lib.func("mcp_dispatcher_create", "pointer", []); + lib.mcp_dispatcher_run = lib.func("mcp_dispatcher_run", "int", ["pointer"]); + lib.mcp_dispatcher_run_timeout = lib.func("mcp_dispatcher_run_timeout", "int", [ + "pointer", + "int", + ]); + lib.mcp_dispatcher_stop = lib.func("mcp_dispatcher_stop", "void", ["pointer"]); + lib.mcp_dispatcher_destroy = lib.func("mcp_dispatcher_destroy", "void", ["pointer"]); // Bind the connection functions - lib.mcp_connection_create_client = lib.func('mcp_connection_create_client', 'pointer', ['pointer', 'int']); - lib.mcp_connection_configure = lib.func('mcp_connection_configure', 'int', ['pointer', 'pointer', 'pointer', 'pointer']); - lib.mcp_connection_destroy = lib.func('mcp_connection_destroy', 'void', ['pointer']); + lib.mcp_connection_create_client = lib.func("mcp_connection_create_client", "pointer", [ + "pointer", + "int", + ]); + lib.mcp_connection_configure = lib.func("mcp_connection_configure", "int", [ + "pointer", + "pointer", + "pointer", + "pointer", + ]); + lib.mcp_connection_destroy = lib.func("mcp_connection_destroy", "void", ["pointer"]); libraryAvailable = true; console.log(`✅ Native library loaded successfully from: ${foundPath}`); } else { - console.log('⚠️ Native library not found - skipping integration tests'); - console.log(' Searched paths:', searchPaths); + console.log("⚠️ Native library not found - skipping integration tests"); + console.log(" Searched paths:", searchPaths); } } catch (error: any) { console.log(`⚠️ Cannot load native library: ${error.message}`); @@ -67,26 +78,26 @@ describe('FFI Bindings - Integration Tests (Real Library)', () => { // Use conditional test/test.skip pattern instead of non-existent it.skipIf const conditionalTest = libraryAvailable ? test : test.skip; - conditionalTest('should load native library symbols', () => { + conditionalTest("should load native library symbols", () => { // Test that all 8 new functions are available const functions = [ - 'mcp_dispatcher_create', - 'mcp_dispatcher_run', - 'mcp_dispatcher_run_timeout', - 'mcp_dispatcher_stop', - 'mcp_dispatcher_destroy', - 'mcp_connection_create_client', - 'mcp_connection_configure', - 'mcp_connection_destroy' + "mcp_dispatcher_create", + "mcp_dispatcher_run", + "mcp_dispatcher_run_timeout", + "mcp_dispatcher_stop", + "mcp_dispatcher_destroy", + "mcp_connection_create_client", + "mcp_connection_configure", + "mcp_connection_destroy", ]; for (const fn of functions) { expect(lib[fn]).toBeDefined(); - expect(typeof lib[fn]).toBe('function'); + expect(typeof lib[fn]).toBe("function"); } }); - conditionalTest('should create and destroy dispatcher', () => { + conditionalTest("should create and destroy dispatcher", () => { const dispatcher = lib.mcp_dispatcher_create(); expect(dispatcher).toBeTruthy(); expect(dispatcher).not.toBe(0); @@ -98,7 +109,7 @@ describe('FFI Bindings - Integration Tests (Real Library)', () => { // Should not crash }); - conditionalTest('should create and destroy connection', () => { + conditionalTest("should create and destroy connection", () => { // First create a dispatcher const dispatcher = lib.mcp_dispatcher_create(); expect(dispatcher).toBeTruthy(); @@ -118,7 +129,7 @@ describe('FFI Bindings - Integration Tests (Real Library)', () => { lib.mcp_dispatcher_destroy(dispatcher); }); - conditionalTest('should configure connection', () => { + conditionalTest("should configure connection", () => { const dispatcher = lib.mcp_dispatcher_create(); const connection = lib.mcp_connection_create_client( dispatcher, @@ -133,7 +144,7 @@ describe('FFI Bindings - Integration Tests (Real Library)', () => { lib.mcp_dispatcher_destroy(dispatcher); }); - conditionalTest('should run dispatcher with timeout', () => { + conditionalTest("should run dispatcher with timeout", () => { const dispatcher = lib.mcp_dispatcher_create(); // Run with 10ms timeout (should return immediately) @@ -145,7 +156,7 @@ describe('FFI Bindings - Integration Tests (Real Library)', () => { lib.mcp_dispatcher_destroy(dispatcher); }); - conditionalTest('should handle multiple dispatcher instances', () => { + conditionalTest("should handle multiple dispatcher instances", () => { const dispatcher1 = lib.mcp_dispatcher_create(); const dispatcher2 = lib.mcp_dispatcher_create(); @@ -157,7 +168,7 @@ describe('FFI Bindings - Integration Tests (Real Library)', () => { lib.mcp_dispatcher_destroy(dispatcher2); }); - conditionalTest('should create connections with different transport types', () => { + conditionalTest("should create connections with different transport types", () => { const dispatcher = lib.mcp_dispatcher_create(); // Test creating connections with different transport types @@ -186,21 +197,21 @@ describe('FFI Bindings - Integration Tests (Real Library)', () => { lib.mcp_dispatcher_destroy(dispatcher); }); - test('should gracefully skip when library is missing', () => { + test("should gracefully skip when library is missing", () => { if (libraryAvailable) { - console.log('Library available - test not applicable'); + console.log("Library available - test not applicable"); return; } // Ensure the system doesn't crash when library is missing expect(() => { - require('../mcp-ffi-bindings'); + require("../mcp-ffi-bindings"); }).not.toThrow(); // Should handle missing library gracefully }); - conditionalTest('should verify library exports all expected functions', () => { + conditionalTest("should verify library exports all expected functions", () => { // Try to import the actual module - const { mcpFilterLib } = require('../mcp-ffi-bindings'); + const { mcpFilterLib } = require("../mcp-ffi-bindings"); // Verify the new functions exist expect(mcpFilterLib.mcp_dispatcher_create).toBeDefined(); @@ -212,4 +223,4 @@ describe('FFI Bindings - Integration Tests (Real Library)', () => { expect(mcpFilterLib.mcp_connection_configure).toBeDefined(); expect(mcpFilterLib.mcp_connection_destroy).toBeDefined(); }); -}); \ No newline at end of file +}); diff --git a/sdk/typescript/src/__tests__/mcp-ffi-dispatcher.unit.test.ts b/sdk/typescript/src/__tests__/mcp-ffi-dispatcher.unit.test.ts index 71544ab6..fb7bb46b 100644 --- a/sdk/typescript/src/__tests__/mcp-ffi-dispatcher.unit.test.ts +++ b/sdk/typescript/src/__tests__/mcp-ffi-dispatcher.unit.test.ts @@ -3,12 +3,12 @@ * These tests verify function definitions and signatures without requiring the C++ library */ -import { TransportType } from '../mcp-ffi-bindings'; +import { TransportType } from "../mcp-ffi-bindings"; -describe('FFI Bindings - Unit Tests (Mocked)', () => { +describe("FFI Bindings - Unit Tests (Mocked)", () => { beforeAll(() => { // Mock koffi to avoid needing C++ library for unit tests - jest.mock('koffi', () => ({ + jest.mock("koffi", () => ({ load: jest.fn().mockReturnValue({ mcp_dispatcher_create: jest.fn().mockReturnValue(BigInt(0x7f8000000001)), mcp_dispatcher_run: jest.fn().mockReturnValue(0), @@ -23,38 +23,38 @@ describe('FFI Bindings - Unit Tests (Mocked)', () => { }); afterAll(() => { - jest.unmock('koffi'); + jest.unmock("koffi"); }); - describe('Transport Type Enum', () => { - it('should define correct transport type values', () => { + describe("Transport Type Enum", () => { + it("should define correct transport type values", () => { expect(TransportType.MCP_TRANSPORT_HTTP_SSE).toBe(0); expect(TransportType.MCP_TRANSPORT_STDIO).toBe(1); expect(TransportType.MCP_TRANSPORT_PIPE).toBe(2); }); }); - describe('Dispatcher FFI Functions', () => { - it('should define all 5 dispatcher FFI functions', () => { + describe("Dispatcher FFI Functions", () => { + it("should define all 5 dispatcher FFI functions", () => { // Import after mocking - const { mcpFilterLib } = require('../mcp-ffi-bindings'); + const { mcpFilterLib } = require("../mcp-ffi-bindings"); const dispatcherFunctions = [ - 'mcp_dispatcher_create', - 'mcp_dispatcher_run', - 'mcp_dispatcher_run_timeout', - 'mcp_dispatcher_stop', - 'mcp_dispatcher_destroy' + "mcp_dispatcher_create", + "mcp_dispatcher_run", + "mcp_dispatcher_run_timeout", + "mcp_dispatcher_stop", + "mcp_dispatcher_destroy", ]; for (const funcName of dispatcherFunctions) { expect(mcpFilterLib[funcName]).toBeDefined(); - expect(typeof mcpFilterLib[funcName]).toBe('function'); + expect(typeof mcpFilterLib[funcName]).toBe("function"); } }); - it('should have correct function signatures for dispatcher functions', () => { - const { mcpFilterLib } = require('../mcp-ffi-bindings'); + it("should have correct function signatures for dispatcher functions", () => { + const { mcpFilterLib } = require("../mcp-ffi-bindings"); // Test that functions can be called with expected parameters const dispatcher = mcpFilterLib.mcp_dispatcher_create(); @@ -72,24 +72,24 @@ describe('FFI Bindings - Unit Tests (Mocked)', () => { }); }); - describe('Connection FFI Functions', () => { - it('should define all 3 connection FFI functions', () => { - const { mcpFilterLib } = require('../mcp-ffi-bindings'); + describe("Connection FFI Functions", () => { + it("should define all 3 connection FFI functions", () => { + const { mcpFilterLib } = require("../mcp-ffi-bindings"); const connectionFunctions = [ - 'mcp_connection_create_client', - 'mcp_connection_configure', - 'mcp_connection_destroy' + "mcp_connection_create_client", + "mcp_connection_configure", + "mcp_connection_destroy", ]; for (const funcName of connectionFunctions) { expect(mcpFilterLib[funcName]).toBeDefined(); - expect(typeof mcpFilterLib[funcName]).toBe('function'); + expect(typeof mcpFilterLib[funcName]).toBe("function"); } }); - it('should have correct function signatures for connection functions', () => { - const { mcpFilterLib } = require('../mcp-ffi-bindings'); + it("should have correct function signatures for connection functions", () => { + const { mcpFilterLib } = require("../mcp-ffi-bindings"); const dispatcher = mcpFilterLib.mcp_dispatcher_create(); @@ -105,7 +105,7 @@ describe('FFI Bindings - Unit Tests (Mocked)', () => { connection, 0, // null address 0, // null options - 0 // null ssl config + 0 // null ssl config ); expect(configResult).toBe(0); // MCP_OK @@ -114,13 +114,13 @@ describe('FFI Bindings - Unit Tests (Mocked)', () => { }); }); - describe('Total Function Count', () => { - it('should have 101 total functions (93 original + 8 new)', () => { - const { mcpFilterLib } = require('../mcp-ffi-bindings'); + describe("Total Function Count", () => { + it("should have 101 total functions (93 original + 8 new)", () => { + const { mcpFilterLib } = require("../mcp-ffi-bindings"); // Count all functions in mcpFilterLib const functionCount = Object.keys(mcpFilterLib).filter( - key => typeof mcpFilterLib[key] === 'function' + key => typeof mcpFilterLib[key] === "function" ).length; // We added 8 new functions (5 dispatcher + 3 connection) @@ -128,9 +128,9 @@ describe('FFI Bindings - Unit Tests (Mocked)', () => { }); }); - describe('Error Handling', () => { - it('should handle invalid parameters gracefully', () => { - const { mcpFilterLib } = require('../mcp-ffi-bindings'); + describe("Error Handling", () => { + it("should handle invalid parameters gracefully", () => { + const { mcpFilterLib } = require("../mcp-ffi-bindings"); // Test with null/invalid handles expect(() => mcpFilterLib.mcp_dispatcher_run(null)).not.toThrow(); @@ -138,4 +138,4 @@ describe('FFI Bindings - Unit Tests (Mocked)', () => { expect(() => mcpFilterLib.mcp_connection_destroy(null)).not.toThrow(); }); }); -}); \ No newline at end of file +}); diff --git a/sdk/typescript/src/__tests__/mcp-filter-chain.test.ts b/sdk/typescript/src/__tests__/mcp-filter-chain.test.ts index 5f963e48..1ee05575 100644 --- a/sdk/typescript/src/__tests__/mcp-filter-chain.test.ts +++ b/sdk/typescript/src/__tests__/mcp-filter-chain.test.ts @@ -68,17 +68,12 @@ describe("Filter chain helpers", () => { expect(handle).toBe(101); expect(mockedLib.mcp_json_parse).toHaveBeenCalledTimes(1); - expect(mockedLib.mcp_chain_create_from_json).toHaveBeenCalledWith( - 7, - expect.any(Object) - ); + expect(mockedLib.mcp_chain_create_from_json).toHaveBeenCalledWith(7, expect.any(Object)); expect(mockedLib.mcp_json_free).toHaveBeenCalledTimes(1); }); it("throws for non-canonical configuration", () => { - expect(() => createFilterChainFromConfig(0, {} as CanonicalConfig)).toThrow( - /canonical format/ - ); + expect(() => createFilterChainFromConfig(0, {} as CanonicalConfig)).toThrow(/canonical format/); }); it("proxies chain state helpers to the FFI bindings", () => { diff --git a/sdk/typescript/src/__tests__/mcp-filter-manager.test.ts b/sdk/typescript/src/__tests__/mcp-filter-manager.test.ts index a8b2afc6..1dec1f04 100644 --- a/sdk/typescript/src/__tests__/mcp-filter-manager.test.ts +++ b/sdk/typescript/src/__tests__/mcp-filter-manager.test.ts @@ -221,4 +221,4 @@ describe("FilterManager - Core Functionality", () => { expect(() => filterManager.destroy()).not.toThrow(); }); }); -}); \ No newline at end of file +}); diff --git a/sdk/typescript/src/config-utils.ts b/sdk/typescript/src/config-utils.ts index 1130e0af..15d5c2db 100644 --- a/sdk/typescript/src/config-utils.ts +++ b/sdk/typescript/src/config-utils.ts @@ -6,12 +6,9 @@ * in both canonical (listener-based) and assembler formats. */ -import * as fs from 'fs'; -import * as path from 'path'; -import { - CanonicalConfig, - FilterSpec, -} from './mcp-filter-chain'; +import * as fs from "fs"; +import * as path from "path"; +import { CanonicalConfig, FilterSpec } from "./mcp-filter-chain"; /** * Load configuration from a JSON file @@ -19,7 +16,7 @@ import { export function loadConfigFromFile(filePath: string): CanonicalConfig { try { const absolutePath = path.resolve(filePath); - const configData = fs.readFileSync(absolutePath, 'utf-8'); + const configData = fs.readFileSync(absolutePath, "utf-8"); const config = JSON.parse(configData); // Validate that it's in canonical format @@ -39,7 +36,7 @@ export function loadConfigFromFile(filePath: string): CanonicalConfig { */ export function convertNestedToCanonical( nestedConfig: any, - listenerName: string = 'default_listener', + listenerName: string = "default_listener", port: number = 8080 ): CanonicalConfig { const filters: FilterSpec[] = []; @@ -47,56 +44,56 @@ export function convertNestedToCanonical( // Extract filters from nested configuration if (nestedConfig.security?.authentication) { filters.push({ - name: 'auth', - type: 'security.authentication', + name: "auth", + type: "security.authentication", config: nestedConfig.security.authentication, }); } if (nestedConfig.security?.authorization) { filters.push({ - name: 'authz', - type: 'security.authorization', + name: "authz", + type: "security.authorization", config: nestedConfig.security.authorization, }); } if (nestedConfig.observability?.accessLog) { filters.push({ - name: 'access_log', - type: 'observability.access_log', + name: "access_log", + type: "observability.access_log", config: nestedConfig.observability.accessLog, }); } if (nestedConfig.observability?.metrics) { filters.push({ - name: 'metrics', - type: 'observability.metrics', + name: "metrics", + type: "observability.metrics", config: nestedConfig.observability.metrics, }); } if (nestedConfig.observability?.tracing) { filters.push({ - name: 'tracing', - type: 'observability.tracing', + name: "tracing", + type: "observability.tracing", config: nestedConfig.observability.tracing, }); } if (nestedConfig.trafficManagement?.rateLimit) { filters.push({ - name: 'rate_limit', - type: 'traffic.rate_limit', + name: "rate_limit", + type: "traffic.rate_limit", config: nestedConfig.trafficManagement.rateLimit, }); } if (nestedConfig.trafficManagement?.circuitBreaker) { filters.push({ - name: 'circuit_breaker', - type: 'traffic.circuit_breaker', + name: "circuit_breaker", + type: "traffic.circuit_breaker", config: nestedConfig.trafficManagement.circuitBreaker, }); } @@ -106,7 +103,7 @@ export function convertNestedToCanonical( for (const [name, config] of Object.entries(nestedConfig.customFilters)) { filters.push({ name, - type: 'custom', + type: "custom", config, }); } @@ -118,7 +115,7 @@ export function convertNestedToCanonical( name: listenerName, address: { socket_address: { - address: '127.0.0.1', + address: "127.0.0.1", port_value: port, }, }, @@ -150,7 +147,7 @@ export function validateCanonicalConfig(config: CanonicalConfig): { } if (config.listeners.length === 0) { - errors.push('At least one listener must be defined'); + errors.push("At least one listener must be defined"); return { valid: false, errors, warnings }; } @@ -161,14 +158,20 @@ export function validateCanonicalConfig(config: CanonicalConfig): { } if (!listener.address?.socket_address) { - errors.push(`Listener ${listener.name || listenerIndex} must have an address with socket_address`); + errors.push( + `Listener ${listener.name || listenerIndex} must have an address with socket_address` + ); } else { const { socket_address } = listener.address; if (!socket_address.address) { - errors.push(`Listener ${listener.name || listenerIndex} socket_address must have an address`); + errors.push( + `Listener ${listener.name || listenerIndex} socket_address must have an address` + ); } - if (typeof socket_address.port_value !== 'number') { - errors.push(`Listener ${listener.name || listenerIndex} socket_address must have a numeric port_value`); + if (typeof socket_address.port_value !== "number") { + errors.push( + `Listener ${listener.name || listenerIndex} socket_address must have a numeric port_value` + ); } } @@ -180,14 +183,18 @@ export function validateCanonicalConfig(config: CanonicalConfig): { // Validate filter chains listener.filter_chains.forEach((chain, chainIndex) => { if (!chain.filters || !Array.isArray(chain.filters)) { - errors.push(`Filter chain ${chainIndex} in listener ${listener.name} must have a filters array`); + errors.push( + `Filter chain ${chainIndex} in listener ${listener.name} must have a filters array` + ); } else if (chain.filters.length === 0) { warnings.push(`Filter chain ${chainIndex} in listener ${listener.name} has no filters`); } else { // Validate individual filters chain.filters.forEach((filter, filterIndex) => { if (!filter.name) { - errors.push(`Filter ${filterIndex} in chain ${chainIndex} of listener ${listener.name} must have a name`); + errors.push( + `Filter ${filterIndex} in chain ${chainIndex} of listener ${listener.name} must have a name` + ); } if (!filter.type) { errors.push(`Filter ${filter.name || filterIndex} must have a type`); @@ -208,24 +215,26 @@ export function validateCanonicalConfig(config: CanonicalConfig): { /** * Create default canonical configuration for common scenarios */ -export function createDefaultConfig(scenario: 'http' | 'tcp' | 'mcp-server' | 'mcp-client'): CanonicalConfig { +export function createDefaultConfig( + scenario: "http" | "tcp" | "mcp-server" | "mcp-client" +): CanonicalConfig { switch (scenario) { - case 'http': + case "http": return { listeners: [ { - name: 'http_listener', + name: "http_listener", address: { socket_address: { - address: '127.0.0.1', + address: "127.0.0.1", port_value: 8080, }, }, filter_chains: [ { filters: [ - { name: 'http_codec', type: 'http.codec' }, - { name: 'router', type: 'http.router' }, + { name: "http_codec", type: "http.codec" }, + { name: "router", type: "http.router" }, ], }, ], @@ -233,45 +242,43 @@ export function createDefaultConfig(scenario: 'http' | 'tcp' | 'mcp-server' | 'm ], }; - case 'tcp': + case "tcp": return { listeners: [ { - name: 'tcp_listener', + name: "tcp_listener", address: { socket_address: { - address: '127.0.0.1', + address: "127.0.0.1", port_value: 9090, }, }, filter_chains: [ { - filters: [ - { name: 'tcp_proxy', type: 'tcp.proxy' }, - ], + filters: [{ name: "tcp_proxy", type: "tcp.proxy" }], }, ], }, ], }; - case 'mcp-server': + case "mcp-server": return { listeners: [ { - name: 'mcp_server_listener', + name: "mcp_server_listener", address: { socket_address: { - address: '127.0.0.1', + address: "127.0.0.1", port_value: 9090, }, }, filter_chains: [ { filters: [ - { name: 'http.codec', type: 'http.codec' }, - { name: 'sse.codec', type: 'sse.codec' }, - { name: 'json_rpc.dispatcher', type: 'json_rpc.dispatcher' }, + { name: "http.codec", type: "http.codec" }, + { name: "sse.codec", type: "sse.codec" }, + { name: "json_rpc.dispatcher", type: "json_rpc.dispatcher" }, ], }, ], @@ -279,23 +286,23 @@ export function createDefaultConfig(scenario: 'http' | 'tcp' | 'mcp-server' | 'm ], }; - case 'mcp-client': + case "mcp-client": return { listeners: [ { - name: 'mcp_client_listener', + name: "mcp_client_listener", address: { socket_address: { - address: '127.0.0.1', + address: "127.0.0.1", port_value: 0, // Client uses ephemeral port }, }, filter_chains: [ { filters: [ - { name: 'http.codec', type: 'http.codec' }, - { name: 'sse.codec', type: 'sse.codec' }, - { name: 'json_rpc.client', type: 'json_rpc.client' }, + { name: "http.codec", type: "http.codec" }, + { name: "sse.codec", type: "sse.codec" }, + { name: "json_rpc.client", type: "json_rpc.client" }, ], }, ], @@ -323,4 +330,3 @@ export function mergeCanonicalConfigs(...configs: CanonicalConfig[]): CanonicalC return merged; } - diff --git a/sdk/typescript/src/filter-chain-ffi.ts b/sdk/typescript/src/filter-chain-ffi.ts index 5dc5bee2..bf5c0c95 100644 --- a/sdk/typescript/src/filter-chain-ffi.ts +++ b/sdk/typescript/src/filter-chain-ffi.ts @@ -7,18 +7,22 @@ * SDK handles protocol, and this layer injects Gopher-MCP filters. */ -import * as koffi from 'koffi'; -import { mcpFilterLib } from './mcp-ffi-bindings'; -import { canonicalConfigToNormalizedJson } from './mcp-filter-chain'; +import * as koffi from "koffi"; +import { mcpFilterLib } from "./mcp-ffi-bindings"; +import { canonicalConfigToNormalizedJson } from "./mcp-filter-chain"; import type { CanonicalConfig, FilterMetrics, ChainStats, FilterDecision, FilterResult, -} from './filter-types'; -import type { FilterEventHandler } from './filter-events'; -import { FilterEventCallbackHandle, registerFilterEventCallback, unregisterFilterEventCallback } from './filter-event-callbacks'; +} from "./filter-types"; +import type { FilterEventHandler } from "./filter-events"; +import { + FilterEventCallbackHandle, + registerFilterEventCallback, + unregisterFilterEventCallback, +} from "./filter-event-callbacks"; const MCP_OK = 0; @@ -46,49 +50,46 @@ type CallbackEntry = { /** * C struct definition for mcp_error_t */ -const ErrorStruct = koffi.struct('mcp_error_t', { - code: 'int32_t', - message: 'char*', +const ErrorStruct = koffi.struct("mcp_error_t", { + code: "int32_t", + message: "char*", }); /** * C struct definition for mcp_filter_result_t */ -const FilterResultStruct = koffi.struct('mcp_filter_result_t', { - decision: 'int32_t', - transformed_message: 'char*', - reason: 'char*', - delay_ms: 'uint32_t', - metadata: 'void*', +const FilterResultStruct = koffi.struct("mcp_filter_result_t", { + decision: "int32_t", + transformed_message: "char*", + reason: "char*", + delay_ms: "uint32_t", + metadata: "void*", }); /** * Decode chain statistics from C struct */ -function decodeChainStats( - statsPtr: any, - structType: any -): ChainStats { +function decodeChainStats(statsPtr: any, structType: any): ChainStats { const decoded = koffi.decode(statsPtr, structType) as Record; const asNumber = (value: unknown) => { - if (typeof value === 'bigint') { + if (typeof value === "bigint") { return Number(value); } - if (typeof value === 'number') { + if (typeof value === "number") { return value; } return Number(value ?? 0); }; return { - total_processed: asNumber(decoded['total_processed']), - total_errors: asNumber(decoded['total_errors']), - total_bypassed: asNumber(decoded['total_bypassed']), - avg_latency_ms: asNumber(decoded['avg_latency_ms']), - max_latency_ms: asNumber(decoded['max_latency_ms']), - throughput_mbps: asNumber(decoded['throughput_mbps']), - active_filters: asNumber(decoded['active_filters']), + total_processed: asNumber(decoded["total_processed"]), + total_errors: asNumber(decoded["total_errors"]), + total_bypassed: asNumber(decoded["total_bypassed"]), + avg_latency_ms: asNumber(decoded["avg_latency_ms"]), + max_latency_ms: asNumber(decoded["max_latency_ms"]), + throughput_mbps: asNumber(decoded["throughput_mbps"]), + active_filters: asNumber(decoded["active_filters"]), }; } @@ -169,7 +170,7 @@ export class FilterChain { // Async queue support private callbackRegistry = new Map(); - private nextCallbackId = BigInt(1); // Start at 1 to avoid NULL (0x0) pointer issues + private nextCallbackId = BigInt(1); // Start at 1 to avoid NULL (0x0) pointer issues private nativeCallbackPtr: koffi.IKoffiRegisteredCallback | null = null; // Callback for async chain creation @@ -190,12 +191,12 @@ export class FilterChain { */ constructor(dispatcher: any, config: CanonicalConfig) { if (!dispatcher) { - throw new Error('Invalid dispatcher handle'); + throw new Error("Invalid dispatcher handle"); } this.dispatcherHandle = dispatcher; this.config = config; - this.handle = 0; // Not created yet - deferred to initialize() + this.handle = 0; // Not created yet - deferred to initialize() // Set up async callback handler for message processing this.setupAsyncCallbackHandler(); @@ -206,7 +207,7 @@ export class FilterChain { */ getHandle(): number { if (this.destroyed) { - throw new Error('FilterChain has been destroyed'); + throw new Error("FilterChain has been destroyed"); } return this.handle; } @@ -225,19 +226,19 @@ export class FilterChain { */ async getChainStats(): Promise { if (this.destroyed) { - throw new Error('FilterChain has been destroyed'); + throw new Error("FilterChain has been destroyed"); } // Allocate space for mcp_chain_stats_t struct const uniqueId = Math.random().toString(36).substring(2, 15); const ChainStatsStruct = koffi.struct(`mcp_chain_stats_t_${uniqueId}`, { - total_processed: 'uint64', - total_errors: 'uint64', - total_bypassed: 'uint64', - avg_latency_ms: 'double', - max_latency_ms: 'double', - throughput_mbps: 'double', - active_filters: 'uint32', + total_processed: "uint64", + total_errors: "uint64", + total_bypassed: "uint64", + avg_latency_ms: "double", + max_latency_ms: "double", + throughput_mbps: "double", + active_filters: "uint32", }); const statsPtr = koffi.alloc(ChainStatsStruct, 1); @@ -245,7 +246,7 @@ export class FilterChain { try { const rc = mcpFilterLib.mcp_chain_get_stats( this.handle, - koffi.as(statsPtr, 'void*') + koffi.as(statsPtr, "void*") ) as number; if (rc !== MCP_OK) { @@ -269,7 +270,7 @@ export class FilterChain { */ async getMetrics(_filterName?: string): Promise { if (this.destroyed) { - throw new Error('FilterChain has been destroyed'); + throw new Error("FilterChain has been destroyed"); } // For now, get chain stats as a proxy for metrics @@ -277,12 +278,12 @@ export class FilterChain { const stats = await this.getChainStats(); const metrics: FilterMetrics = { - 'chain': { + chain: { requests_total: stats.total_processed, requests_denied: stats.total_errors, avg_latency_ms: stats.avg_latency_ms, p99_latency_ms: stats.max_latency_ms, - } + }, }; return metrics; @@ -295,20 +296,20 @@ export class FilterChain { */ async exportConfig(): Promise { if (this.destroyed) { - throw new Error('FilterChain has been destroyed'); + throw new Error("FilterChain has been destroyed"); } const jsonHandle = mcpFilterLib.mcp_chain_export_to_json(this.handle); if (!jsonHandle) { - throw new Error('Failed to export chain configuration'); + throw new Error("Failed to export chain configuration"); } const jsonStr = mcpFilterLib.mcp_json_stringify(jsonHandle); mcpFilterLib.mcp_json_free(jsonHandle); if (!jsonStr) { - throw new Error('Failed to stringify chain configuration'); + throw new Error("Failed to stringify chain configuration"); } try { @@ -326,13 +327,13 @@ export class FilterChain { */ async enableFilter(name: string): Promise { if (this.destroyed) { - throw new Error('FilterChain has been destroyed'); + throw new Error("FilterChain has been destroyed"); } const rc = mcpFilterLib.mcp_chain_set_filter_enabled( this.handle, name, - 1 // true + 1 // true ) as number; if (rc !== MCP_OK) { @@ -351,13 +352,13 @@ export class FilterChain { */ async disableFilter(name: string): Promise { if (this.destroyed) { - throw new Error('FilterChain has been destroyed'); + throw new Error("FilterChain has been destroyed"); } const rc = mcpFilterLib.mcp_chain_set_filter_enabled( this.handle, name, - 0 // false + 0 // false ) as number; if (rc !== MCP_OK) { @@ -378,13 +379,13 @@ export class FilterChain { */ async reconfigure(_config: CanonicalConfig): Promise { if (this.destroyed) { - throw new Error('FilterChain has been destroyed'); + throw new Error("FilterChain has been destroyed"); } // For now, this is not fully implemented as mcp_chain_merge // requires two chain handles. A full implementation would require // creating a new chain and merging, or a dedicated reconfigure API. - throw new Error('Reconfigure not yet implemented - use disable/enable filters instead'); + throw new Error("Reconfigure not yet implemented - use disable/enable filters instead"); } /** @@ -399,12 +400,12 @@ export class FilterChain { _filter: { name: string; type: string; config?: any } ): Promise { if (this.destroyed) { - throw new Error('FilterChain has been destroyed'); + throw new Error("FilterChain has been destroyed"); } // This would require exporting config, modifying it, and re-creating the chain // For now, not implemented as it requires more complex chain rebuilding - throw new Error('addFilter not yet implemented - create a new FilterChain instead'); + throw new Error("addFilter not yet implemented - create a new FilterChain instead"); } /** @@ -427,7 +428,9 @@ export class FilterChain { * } * ``` */ - static async validateConfig(config: CanonicalConfig): Promise<{ valid: boolean; errors: string[]; warnings: string[] }> { + static async validateConfig( + config: CanonicalConfig + ): Promise<{ valid: boolean; errors: string[]; warnings: string[] }> { // Convert config to JSON const configJson = JSON.stringify(config); const jsonHandle = mcpFilterLib.mcp_json_parse(configJson); @@ -435,14 +438,14 @@ export class FilterChain { if (!jsonHandle) { return { valid: false, - errors: ['Failed to parse configuration JSON'], - warnings: [] + errors: ["Failed to parse configuration JSON"], + warnings: [], }; } try { // Allocate validation result structure - const resultPtr = koffi.alloc('void*', 1); + const resultPtr = koffi.alloc("void*", 1); const rc = mcpFilterLib.mcp_chain_validate_json(jsonHandle, resultPtr); @@ -450,7 +453,7 @@ export class FilterChain { return { valid: false, errors: [`Validation failed with error code ${rc}`], - warnings: [] + warnings: [], }; } @@ -459,7 +462,7 @@ export class FilterChain { return { valid: true, errors: [], - warnings: [] + warnings: [], }; } finally { mcpFilterLib.mcp_json_free(jsonHandle); @@ -487,20 +490,16 @@ export class FilterChain { */ async mergeWith(otherChain: FilterChain, mode: number = 0): Promise { if (this.destroyed) { - throw new Error('FilterChain has been destroyed'); + throw new Error("FilterChain has been destroyed"); } if (otherChain.destroyed) { - throw new Error('Other FilterChain has been destroyed'); + throw new Error("Other FilterChain has been destroyed"); } - const mergedHandle = mcpFilterLib.mcp_chain_merge( - this.handle, - otherChain.handle, - mode - ); + const mergedHandle = mcpFilterLib.mcp_chain_merge(this.handle, otherChain.handle, mode); if (!mergedHandle || mergedHandle === 0) { - throw new Error('Failed to merge filter chains'); + throw new Error("Failed to merge filter chains"); } // Create a new FilterChain instance wrapping the merged handle @@ -527,7 +526,7 @@ export class FilterChain { */ async initialize(): Promise { if (this.destroyed) { - throw new Error('FilterChain has been destroyed'); + throw new Error("FilterChain has been destroyed"); } if (this.initialized) { @@ -565,14 +564,14 @@ export class FilterChain { jsonHandle = mcpFilterLib.mcp_json_parse(normalizedJson); if (!jsonHandle) { - reject(new Error('Failed to parse config JSON')); + reject(new Error("Failed to parse config JSON")); return; } // Register callback for async chain creation // Signature: void callback(uint64_t chain_handle, int32_t error_code, const char* error_msg, void* user_data) const ChainCreationCallbackType = koffi.pointer( - koffi.proto('void(uint64_t, int32_t, string, void*)') + koffi.proto("void(uint64_t, int32_t, string, void*)") ); this.creationCallback = koffi.register( @@ -587,8 +586,10 @@ export class FilterChain { // Check for errors if (errorCode !== 0 || !chainHandle || chainHandle === 0) { - const message = errorMsg || 'Failed to create filter chain from configuration'; - console.error(`[createChainAsync] Chain creation FAILED - errorCode=${errorCode}, message="${message}"`); + const message = errorMsg || "Failed to create filter chain from configuration"; + console.error( + `[createChainAsync] Chain creation FAILED - errorCode=${errorCode}, message="${message}"` + ); reject(new Error(`Chain creation failed (${errorCode}): ${message}`)); return; } @@ -608,7 +609,6 @@ export class FilterChain { this.initialized = true; resolve(); - } catch (err) { reject(err instanceof Error ? err : new Error(String(err))); } finally { @@ -623,15 +623,12 @@ export class FilterChain { ); // Call async chain creation (posts to dispatcher thread) - mcpFilterLib.mcp_chain_create_from_json_async( - this.dispatcherHandle, - jsonHandle, - this.creationCallback - ? koffi.as(this.creationCallback, 'void*') - : null, - null // user_data - ); - + mcpFilterLib.mcp_chain_create_from_json_async( + this.dispatcherHandle, + jsonHandle, + this.creationCallback ? koffi.as(this.creationCallback, "void*") : null, + null // user_data + ); } catch (error) { // Synchronous error before async call if (jsonHandle) { @@ -663,11 +660,11 @@ export class FilterChain { } // Clean up pending callbacks - this.callbackRegistry.forEach((entry) => { + this.callbackRegistry.forEach(entry => { if (entry.timeoutId) { clearTimeout(entry.timeoutId); } - entry.reject(new Error('Filter chain shutdown')); + entry.reject(new Error("Filter chain shutdown")); }); this.callbackRegistry.clear(); @@ -686,7 +683,7 @@ export class FilterChain { */ private async ensureInitialized(): Promise { if (this.destroyed) { - throw new Error('FilterChain has been destroyed'); + throw new Error("FilterChain has been destroyed"); } if (this.initialized) { @@ -706,12 +703,13 @@ export class FilterChain { } const CallbackType = koffi.pointer( - koffi.proto('void(void *user_data, void *result, void *error)') + koffi.proto("void(void *user_data, void *result, void *error)") ); this.nativeCallbackPtr = koffi.register( - (userData: any, resultPtr: any, errorPtr: any) => this.handleAsyncCallback(userData as Buffer, resultPtr, errorPtr), - CallbackType, + (userData: any, resultPtr: any, errorPtr: any) => + this.handleAsyncCallback(userData as Buffer, resultPtr, errorPtr), + CallbackType ); } @@ -784,13 +782,15 @@ export class FilterChain { if (errorPtr !== null && errorPtr !== undefined) { // console.log("❌ [TS-handleAsyncCallback] Error pointer is set, rejecting"); const { code, message } = koffi.decode(errorPtr, ErrorStruct); - entry.reject(new Error(message ? `Filter error (${code}): ${message}` : `Filter error (${code})`)); + entry.reject( + new Error(message ? `Filter error (${code}): ${message}` : `Filter error (${code})`) + ); return; } if (resultPtr === null || resultPtr === undefined) { // console.log("❌ [TS-handleAsyncCallback] Result pointer is null, rejecting"); - entry.reject(new Error('Filter callback returned no result or error')); + entry.reject(new Error("Filter callback returned no result or error")); return; } @@ -821,16 +821,18 @@ export class FilterChain { /** * Submit an async message for processing */ - private submitAsyncMessage(direction: 'incoming' | 'outgoing', message: unknown): Promise { - const fnName = direction === 'incoming' - ? 'mcp_chain_submit_incoming' - : 'mcp_chain_submit_outgoing'; + private submitAsyncMessage( + direction: "incoming" | "outgoing", + message: unknown + ): Promise { + const fnName = + direction === "incoming" ? "mcp_chain_submit_incoming" : "mcp_chain_submit_outgoing"; return new Promise((resolve, reject) => { const callbackId = this.nextCallbackId++; const userDataBuffer = Buffer.alloc(8); userDataBuffer.writeBigUInt64LE(callbackId); - const userDataPtr = koffi.as(userDataBuffer, 'void*'); + const userDataPtr = koffi.as(userDataBuffer, "void*"); // console.log("📤 [submitAsyncMessage] ENTRY"); // console.log(" Direction:", direction); @@ -850,7 +852,7 @@ export class FilterChain { // Allocate space for mcp_error_t* (pointer to opaque error handle) // This is an OUTPUT parameter where C side can store an error object - const errorPtrPtr = koffi.alloc('void*', 1); + const errorPtrPtr = koffi.alloc("void*", 1); try { // console.log(" Calling C API function:", fnName); @@ -859,11 +861,9 @@ export class FilterChain { const status = mcpFilterLib[fnName]( this.handle, JSON.stringify(message), - userDataPtr, // Cast Buffer to void* so C reads callback ID - this.nativeCallbackPtr - ? koffi.as(this.nativeCallbackPtr, 'void*') - : null, - errorPtrPtr, + userDataPtr, // Cast Buffer to void* so C reads callback ID + this.nativeCallbackPtr ? koffi.as(this.nativeCallbackPtr, "void*") : null, + errorPtrPtr ); // console.log(" C API returned status:", status); @@ -873,7 +873,7 @@ export class FilterChain { // Check if C side populated the error pointer // Read the pointer value from the allocated memory - const errorPtrArray = koffi.decode(errorPtrPtr, 'void*'); + const errorPtrArray = koffi.decode(errorPtrPtr, "void*"); const errorPtr = errorPtrArray; let errorMsg = `Submit failed with status ${status}`; @@ -903,7 +903,7 @@ export class FilterChain { if (entry) { entry.timeoutId = setTimeout(() => { if (this.callbackRegistry.delete(callbackId)) { - reject(new Error('Filter request timed out after 30s')); + reject(new Error("Filter request timed out after 30s")); } }, 30_000); } @@ -918,7 +918,7 @@ export class FilterChain { */ async processIncoming(message: unknown): Promise { await this.ensureInitialized(); - return this.submitAsyncMessage('incoming', message); + return this.submitAsyncMessage("incoming", message); } /** @@ -950,7 +950,7 @@ export class FilterChain { */ setEventCallback(handler: FilterEventHandler): void { if (this.destroyed) { - throw new Error('FilterChain has been destroyed'); + throw new Error("FilterChain has been destroyed"); } // Unregister previous callback if any @@ -958,7 +958,7 @@ export class FilterChain { try { unregisterFilterEventCallback(this.handle, this.eventCallbackHandle); } catch (error) { - console.error('Error unregistering previous event callback:', error); + console.error("Error unregistering previous event callback:", error); } this.eventCallbackHandle = null; } @@ -983,7 +983,7 @@ export class FilterChain { try { unregisterFilterEventCallback(this.handle, this.eventCallbackHandle); } catch (error) { - console.error('Error unregistering event callback:', error); + console.error("Error unregistering event callback:", error); } this.eventCallbackHandle = null; } @@ -997,7 +997,7 @@ export class FilterChain { */ async processOutgoing(message: unknown): Promise { await this.ensureInitialized(); - return this.submitAsyncMessage('outgoing', message); + return this.submitAsyncMessage("outgoing", message); } /** @@ -1023,16 +1023,16 @@ export class FilterChain { mcpFilterLib.mcp_filter_chain_shutdown(this.handle); this.initialized = false; } catch (error) { - console.error('Error shutting down filter chain:', error); + console.error("Error shutting down filter chain:", error); } } // Clean up any remaining pending callbacks - this.callbackRegistry.forEach((entry) => { + this.callbackRegistry.forEach(entry => { if (entry.timeoutId) { clearTimeout(entry.timeoutId); } - entry.reject(new Error('Filter chain destroyed')); + entry.reject(new Error("Filter chain destroyed")); }); this.callbackRegistry.clear(); diff --git a/sdk/typescript/src/filter-dispatcher.ts b/sdk/typescript/src/filter-dispatcher.ts index de2ae0ab..c1172046 100644 --- a/sdk/typescript/src/filter-dispatcher.ts +++ b/sdk/typescript/src/filter-dispatcher.ts @@ -21,7 +21,7 @@ * ``` */ -import { createRealDispatcher, destroyDispatcher } from './mcp-filter-api'; +import { createRealDispatcher, destroyDispatcher } from "./mcp-filter-api"; /** * Create a new dispatcher for hybrid SDK filter chains @@ -47,7 +47,7 @@ export function createHybridDispatcher(): number { const handle = createRealDispatcher(); if (!handle || handle === 0) { - throw new Error('Failed to create hybrid dispatcher'); + throw new Error("Failed to create hybrid dispatcher"); } return handle; @@ -71,7 +71,7 @@ export function createHybridDispatcher(): number { */ export function destroyHybridDispatcher(handle: number): void { if (!handle || handle === 0) { - console.warn('Attempted to destroy invalid dispatcher handle'); + console.warn("Attempted to destroy invalid dispatcher handle"); return; } diff --git a/sdk/typescript/src/filter-event-callbacks.ts b/sdk/typescript/src/filter-event-callbacks.ts index 194a0fab..c303d849 100644 --- a/sdk/typescript/src/filter-event-callbacks.ts +++ b/sdk/typescript/src/filter-event-callbacks.ts @@ -3,19 +3,15 @@ * @brief Koffi bridge for unified chain-level filter event callbacks */ -import * as koffi from 'koffi'; -import { mcpFilterLib } from './mcp-ffi-bindings'; -import type { - FilterEvent, - FilterEventHandler, - FilterEventContext, -} from './filter-events'; -import { FilterEventType, FilterEventSeverity } from './filter-events'; - -const EventContextStruct = koffi.struct('mcp_filter_event_context_t', { - chain_id: 'char*', - stream_id: 'char*', - correlation_id: 'char*', +import * as koffi from "koffi"; +import { mcpFilterLib } from "./mcp-ffi-bindings"; +import type { FilterEvent, FilterEventHandler, FilterEventContext } from "./filter-events"; +import { FilterEventType, FilterEventSeverity } from "./filter-events"; + +const EventContextStruct = koffi.struct("mcp_filter_event_context_t", { + chain_id: "char*", + stream_id: "char*", + correlation_id: "char*", }); /** @@ -29,7 +25,7 @@ export class FilterEventCallbackHandle { register(): koffi.IKoffiRegisteredCallback { if (this.destroyed) { - throw new Error('FilterEventCallbackHandle has been destroyed'); + throw new Error("FilterEventCallbackHandle has been destroyed"); } // Create unique prototype name to avoid conflicts @@ -46,15 +42,15 @@ export class FilterEventCallbackHandle { // void* user_data) const EventCallbackProto = koffi.proto( `void filter_event_callback_${suffix}(` + - 'const char*, ' + // filter_name - 'const char*, ' + // filter_instance_id - 'int32_t, ' + // event_type - 'int32_t, ' + // severity - 'const char*, ' + // event_data_json - 'void*, ' + // context pointer - 'int64_t, ' + // timestamp_ms - 'void*' + // user_data - ')' + "const char*, " + // filter_name + "const char*, " + // filter_instance_id + "int32_t, " + // event_type + "int32_t, " + // severity + "const char*, " + // event_data_json + "void*, " + // context pointer + "int64_t, " + // timestamp_ms + "void*" + // user_data + ")" ); // Register the callback with koffi @@ -65,9 +61,9 @@ export class FilterEventCallbackHandle { eventType: number, severity: number, eventDataJson: string | null, - contextPtr: any, // Pointer to context struct + contextPtr: any, // Pointer to context struct timestampMs: bigint, - _userData: unknown, + _userData: unknown ) => { try { // Parse event data JSON @@ -76,7 +72,7 @@ export class FilterEventCallbackHandle { try { eventData = JSON.parse(eventDataJson); } catch (err) { - console.error('Failed to parse event data JSON:', err); + console.error("Failed to parse event data JSON:", err); } } @@ -91,7 +87,7 @@ export class FilterEventCallbackHandle { correlationId: ctx.correlation_id || undefined, }; } catch (err) { - console.error('Failed to decode context struct:', err); + console.error("Failed to decode context struct:", err); } } @@ -103,7 +99,7 @@ export class FilterEventCallbackHandle { // Use spread operator to only include optional fields if defined (for exactOptionalPropertyTypes) const event: FilterEvent = { ...(context && { context }), - filterName: filterName ?? '', + filterName: filterName ?? "", ...(filterInstanceId && { filterInstanceId }), eventType: eventType as FilterEventType, severity: severity as FilterEventSeverity, @@ -114,7 +110,7 @@ export class FilterEventCallbackHandle { // Invoke user's TypeScript handler this.jsHandler(event); } catch (err) { - console.error('Error in filter event callback:', err); + console.error("Error in filter event callback:", err); } }, koffi.pointer(EventCallbackProto) @@ -132,7 +128,7 @@ export class FilterEventCallbackHandle { try { koffi.unregister(this.callback); } catch (err) { - console.error('Failed to unregister filter event callback:', err); + console.error("Failed to unregister filter event callback:", err); } this.callback = null; } @@ -159,7 +155,7 @@ export function registerFilterEventCallback( const result = mcpFilterLib.mcp_filter_chain_set_event_callback( BigInt(chainHandle), callback, - null // user_data + null // user_data ) as number; if (result === 0) { @@ -169,9 +165,7 @@ export function registerFilterEventCallback( // Registration failed, clean up handle.destroy(); - throw new Error( - `Failed to register filter event callback (error code ${result})` - ); + throw new Error(`Failed to register filter event callback (error code ${result})`); } /** @@ -184,9 +178,7 @@ export function unregisterFilterEventCallback( chainHandle: number, callbackHandle: FilterEventCallbackHandle ): void { - const result = mcpFilterLib.mcp_filter_chain_clear_event_callback( - BigInt(chainHandle) - ) as number; + const result = mcpFilterLib.mcp_filter_chain_clear_event_callback(BigInt(chainHandle)) as number; if (result !== 0) { console.error(`Failed to unregister filter event callback (error code ${result})`); diff --git a/sdk/typescript/src/filter-events.ts b/sdk/typescript/src/filter-events.ts index c77c31cb..e4d9846b 100644 --- a/sdk/typescript/src/filter-events.ts +++ b/sdk/typescript/src/filter-events.ts @@ -105,12 +105,12 @@ export type FilterEventHandler = (event: FilterEvent) => void; * Get string representation of event type */ export function filterEventTypeToString(type: FilterEventType): string { - return FilterEventType[type] ?? 'UNKNOWN'; + return FilterEventType[type] ?? "UNKNOWN"; } /** * Get string representation of event severity */ export function filterEventSeverityToString(severity: FilterEventSeverity): string { - return FilterEventSeverity[severity] ?? 'UNKNOWN'; + return FilterEventSeverity[severity] ?? "UNKNOWN"; } diff --git a/sdk/typescript/src/filter-types.ts b/sdk/typescript/src/filter-types.ts index b82bc56a..a9b5d4d1 100644 --- a/sdk/typescript/src/filter-types.ts +++ b/sdk/typescript/src/filter-types.ts @@ -14,7 +14,7 @@ export type { FilterSpec, Address, SocketAddress, -} from './mcp-filter-chain'; +} from "./mcp-filter-chain"; /** * Filter operation result codes (matching mcp_c_types.h) @@ -32,11 +32,11 @@ export enum FilterResultCode { * Filter decisions for message processing */ export enum FilterDecision { - ALLOW = 0, // Continue processing - DENY = 1, // Reject the message - DELAY = 2, // Delay processing - QUEUE = 3, // Queue for later - TRANSFORM = 4, // Message was transformed + ALLOW = 0, // Continue processing + DENY = 1, // Reject the message + DELAY = 2, // Delay processing + QUEUE = 3, // Queue for later + TRANSFORM = 4, // Message was transformed } /** @@ -82,13 +82,13 @@ export interface FilterMetrics { requests_delayed?: number; current_rate?: number; tokens_available?: number; - state?: string; // e.g., "OPEN", "CLOSED", "HALF_OPEN" for circuit breaker + state?: string; // e.g., "OPEN", "CLOSED", "HALF_OPEN" for circuit breaker failure_count?: number; success_count?: number; avg_latency_ms?: number; p95_latency_ms?: number; p99_latency_ms?: number; - [key: string]: any; // Allow additional filter-specific metrics + [key: string]: any; // Allow additional filter-specific metrics }; } @@ -119,7 +119,7 @@ export interface ValidationResult { */ export interface AssemblyResult { success: boolean; - chain?: number; // Filter chain handle if successful + chain?: number; // Filter chain handle if successful errorMessage?: string; createdFilters: string[]; warnings: string[]; diff --git a/sdk/typescript/src/gopher-filtered-transport.ts b/sdk/typescript/src/gopher-filtered-transport.ts index 8c1bb6c9..d555b78c 100644 --- a/sdk/typescript/src/gopher-filtered-transport.ts +++ b/sdk/typescript/src/gopher-filtered-transport.ts @@ -130,22 +130,16 @@ export class GopherFilteredTransport implements Transport { * @param config - Filter configuration and options * @throws Error if dispatcher handle is invalid or filter chain creation fails */ - constructor( - sdkTransport: Transport, - config: GopherFilteredTransportConfig - ) { + constructor(sdkTransport: Transport, config: GopherFilteredTransportConfig) { this.sdkTransport = sdkTransport; this.config = { queueSize: 1000, debugLogging: false, - ...config + ...config, }; // Create filter chain (this validates config and creates C++ chain) - this.filterChain = new FilterChain( - config.dispatcherHandle, - config.filterConfig - ); + this.filterChain = new FilterChain(config.dispatcherHandle, config.filterConfig); // Create message queue for delayed/queued messages this.messageQueue = new MessageQueue(this.config.queueSize || 1000); @@ -251,7 +245,7 @@ export class GopherFilteredTransport implements Transport { }; // Propagate error events - this.sdkTransport.onerror = (error) => { + this.sdkTransport.onerror = error => { if (this.config.debugLogging) { console.error("❌ Transport error:", error); } @@ -286,7 +280,7 @@ export class GopherFilteredTransport implements Transport { } // Start underlying transport if it has start method - if ('start' in this.sdkTransport && typeof this.sdkTransport.start === 'function') { + if ("start" in this.sdkTransport && typeof this.sdkTransport.start === "function") { await (this.sdkTransport as any).start(); } @@ -426,13 +420,16 @@ export class GopherFilteredTransport implements Transport { */ async handleRequest(req: any, res: any): Promise { // Check if the underlying transport has handleRequest method - if ('handleRequest' in this.sdkTransport && typeof (this.sdkTransport as any).handleRequest === 'function') { + if ( + "handleRequest" in this.sdkTransport && + typeof (this.sdkTransport as any).handleRequest === "function" + ) { if (this.config.debugLogging) { console.log("🌐 [GopherFilteredTransport] Proxying handleRequest to wrapped transport"); } // For HTTP POST requests, intercept and filter BEFORE passing to SDK - if (req.method === 'POST') { + if (req.method === "POST") { const bodyBuffer = await this.readRequestBody(req); const forwardOriginal = async () => { return await this.forwardHttpRequestWithBody(req, res, bodyBuffer); @@ -443,7 +440,7 @@ export class GopherFilteredTransport implements Transport { } try { - const bodyString = bodyBuffer.toString('utf-8'); + const bodyString = bodyBuffer.toString("utf-8"); const message = JSON.parse(bodyString) as JSONRPCMessage; const result = await this.filterChain.processIncoming(message); @@ -452,7 +449,7 @@ export class GopherFilteredTransport implements Transport { res, bodyBuffer, message, - result, + result ); if (handled) { @@ -460,7 +457,7 @@ export class GopherFilteredTransport implements Transport { } } catch (error) { if (this.config.debugLogging) { - console.warn('⚠️ Failed to parse/filter request (falling back to SDK):', error); + console.warn("⚠️ Failed to parse/filter request (falling back to SDK):", error); } } @@ -470,7 +467,7 @@ export class GopherFilteredTransport implements Transport { // Proxy to underlying transport return await (this.sdkTransport as any).handleRequest(req, res); } else { - throw new Error('Underlying transport does not support handleRequest method'); + throw new Error("Underlying transport does not support handleRequest method"); } } @@ -479,7 +476,7 @@ export class GopherFilteredTransport implements Transport { for await (const chunk of req) { if (Buffer.isBuffer(chunk)) { chunks.push(chunk); - } else if (typeof chunk === 'string') { + } else if (typeof chunk === "string") { chunks.push(Buffer.from(chunk)); } else { chunks.push(Buffer.from(chunk)); @@ -493,7 +490,7 @@ export class GopherFilteredTransport implements Transport { res: any, originalBody: Buffer, message: JSONRPCMessage, - result: FilterResult, + result: FilterResult ): Promise { switch (result.decision) { case FilterDecision.DENY: { @@ -503,18 +500,20 @@ export class GopherFilteredTransport implements Transport { const retryAfterMs = result.delayMs || 1000; res.writeHead(429, { - 'Content-Type': 'application/json', - 'Retry-After': Math.ceil(retryAfterMs / 1000).toString(), + "Content-Type": "application/json", + "Retry-After": Math.ceil(retryAfterMs / 1000).toString(), }); - res.end(JSON.stringify({ - jsonrpc: '2.0', - id: (message as any)?.id ?? null, - error: { - code: -32003, - message: result.reason || 'Rate limit exceeded', - data: { retryAfterMs }, - }, - })); + res.end( + JSON.stringify({ + jsonrpc: "2.0", + id: (message as any)?.id ?? null, + error: { + code: -32003, + message: result.reason || "Rate limit exceeded", + data: { retryAfterMs }, + }, + }) + ); return true; } @@ -524,14 +523,16 @@ export class GopherFilteredTransport implements Transport { req, res, this.serializeFilteredBody(message, result), - { preFiltered: true, filteredMessage: this.getForwardedMessage(message, result) }, + { preFiltered: true, filteredMessage: this.getForwardedMessage(message, result) } ); return true; } case FilterDecision.QUEUE: { if (this.config.debugLogging) { - console.warn('⚠️ Filter requested queueing for HTTP request - forwarding immediately (queue unsupported for HTTP).'); + console.warn( + "⚠️ Filter requested queueing for HTTP request - forwarding immediately (queue unsupported for HTTP)." + ); } await this.forwardHttpRequestWithBody(req, res, originalBody); return true; @@ -543,7 +544,7 @@ export class GopherFilteredTransport implements Transport { req, res, this.serializeFilteredBody(message, result), - { preFiltered: true, filteredMessage: this.getForwardedMessage(message, result) }, + { preFiltered: true, filteredMessage: this.getForwardedMessage(message, result) } ); return true; } @@ -555,7 +556,7 @@ export class GopherFilteredTransport implements Transport { private serializeFilteredBody(message: JSONRPCMessage, result: FilterResult): Buffer { const payload = result.transformedMessage ?? JSON.stringify(message); - return Buffer.from(payload, 'utf-8'); + return Buffer.from(payload, "utf-8"); } private getForwardedMessage(message: JSONRPCMessage, result: FilterResult): JSONRPCMessage { @@ -563,7 +564,10 @@ export class GopherFilteredTransport implements Transport { try { return JSON.parse(result.transformedMessage) as JSONRPCMessage; } catch (error) { - console.warn('⚠️ Failed to parse transformed message JSON (falling back to original message):', error); + console.warn( + "⚠️ Failed to parse transformed message JSON (falling back to original message):", + error + ); } } return message; @@ -573,9 +577,9 @@ export class GopherFilteredTransport implements Transport { req: any, res: any, body: Buffer, - options?: { preFiltered?: boolean; filteredMessage?: JSONRPCMessage }, + options?: { preFiltered?: boolean; filteredMessage?: JSONRPCMessage } ): Promise { - const { Readable } = await import('stream'); + const { Readable } = await import("stream"); const bodyStream = Readable.from([body]); // Copy HTTP request metadata so downstream consumers behave as if using original IncomingMessage @@ -689,7 +693,7 @@ export class GopherFilteredTransport implements Transport { size: this.messageQueue.size(), capacity: this.messageQueue.capacity(), isFull: this.messageQueue.isFull(), - oldestAge: this.messageQueue.oldestMessageAge() + oldestAge: this.messageQueue.oldestMessageAge(), }; } diff --git a/sdk/typescript/src/index.ts b/sdk/typescript/src/index.ts index f6b16256..20e70754 100644 --- a/sdk/typescript/src/index.ts +++ b/sdk/typescript/src/index.ts @@ -42,15 +42,8 @@ export * from "./types"; export { FilterChain } from "./filter-chain-ffi"; // Metrics callbacks bridge -export { - registerMetricsCallbacks, - unregisterMetricsCallbacks, -} from "./metrics-callbacks"; -export type { - MetricsCallbacks, - MetricsSnapshot, - MetricsThresholdEvent, -} from "./types/metrics"; +export { registerMetricsCallbacks, unregisterMetricsCallbacks } from "./metrics-callbacks"; +export type { MetricsCallbacks, MetricsSnapshot, MetricsThresholdEvent } from "./types/metrics"; // Filter event types and callbacks bridge export { @@ -59,11 +52,7 @@ export { filterEventTypeToString, filterEventSeverityToString, } from "./filter-events"; -export type { - FilterEvent, - FilterEventContext, - FilterEventHandler, -} from "./filter-events"; +export type { FilterEvent, FilterEventContext, FilterEventHandler } from "./filter-events"; export { registerFilterEventCallback, unregisterFilterEventCallback, diff --git a/sdk/typescript/src/mcp-ffi-bindings.ts b/sdk/typescript/src/mcp-ffi-bindings.ts index 4ac4ece1..b49c7361 100644 --- a/sdk/typescript/src/mcp-ffi-bindings.ts +++ b/sdk/typescript/src/mcp-ffi-bindings.ts @@ -13,9 +13,9 @@ import { join } from "path"; // Transport type enum matching C API export enum TransportType { - MCP_TRANSPORT_HTTP_SSE = 0, // HTTP with Server-Sent Events - MCP_TRANSPORT_STDIO = 1, // Standard input/output - MCP_TRANSPORT_PIPE = 2 // Named pipe + MCP_TRANSPORT_HTTP_SSE = 0, // HTTP with Server-Sent Events + MCP_TRANSPORT_STDIO = 1, // Standard input/output + MCP_TRANSPORT_PIPE = 2, // Named pipe } // Library configuration for different platforms and architectures @@ -235,12 +235,12 @@ try { { name: "mcp_chain_assemble_from_json", signature: "int", - args: ["void*", "void*", "void*"], // dispatcher is mcp_dispatcher_t (pointer) + args: ["void*", "void*", "void*"], // dispatcher is mcp_dispatcher_t (pointer) }, { name: "mcp_chain_assemble_from_config", signature: "int", - args: ["void*", "void*", "void*"], // dispatcher is mcp_dispatcher_t (pointer) + args: ["void*", "void*", "void*"], // dispatcher is mcp_dispatcher_t (pointer) }, { name: "mcp_chain_assembly_result_free", @@ -250,17 +250,17 @@ try { { name: "mcp_chain_create_from_json", signature: "uint64_t", - args: ["void*", "void*"], // dispatcher is mcp_dispatcher_t (pointer), not uint64_t + args: ["void*", "void*"], // dispatcher is mcp_dispatcher_t (pointer), not uint64_t }, { name: "mcp_chain_create_from_json_async", signature: "void", args: [ - "void*", // mcp_dispatcher_t - "void*", // mcp_json_value_t - "void*", // callback function pointer - "void*" // user_data - ] + "void*", // mcp_dispatcher_t + "void*", // mcp_json_value_t + "void*", // callback function pointer + "void*", // user_data + ], }, { name: "mcp_filter_chain_retain", signature: "void", args: ["uint64_t"] }, { name: "mcp_filter_chain_release", signature: "void", args: ["uint64_t"] }, @@ -273,17 +273,17 @@ try { { name: "mcp_chain_set_filter_enabled", signature: "int", - args: ["uint64_t", "string", "int"] // chain, filter_name, enabled + args: ["uint64_t", "string", "int"], // chain, filter_name, enabled }, { name: "mcp_chain_get_stats", signature: "int", - args: ["uint64_t", "void*"] // chain, stats pointer + args: ["uint64_t", "void*"], // chain, stats pointer }, { name: "mcp_chain_export_to_json", - signature: "void*", // returns mcp_json_value_t - args: ["uint64_t"] // chain + signature: "void*", // returns mcp_json_value_t + args: ["uint64_t"], // chain }, { name: "mcp_filter_chain_set_metrics_callbacks", @@ -471,7 +471,7 @@ try { { name: "mcp_filter_manager_create", signature: "uint64_t", - args: ["void*", "void*"], // Changed from uint64_t to void* to accept opaque handles + args: ["void*", "void*"], // Changed from uint64_t to void* to accept opaque handles }, { name: "mcp_filter_manager_add_filter", @@ -579,62 +579,62 @@ try { // Async Filter Processing { name: "mcp_filter_chain_initialize", - signature: "int", // mcp_result_t - args: ["uint64_t"] // chain handle + signature: "int", // mcp_result_t + args: ["uint64_t"], // chain handle }, { name: "mcp_filter_chain_shutdown", - signature: "int", // mcp_result_t - args: ["uint64_t"] // chain handle + signature: "int", // mcp_result_t + args: ["uint64_t"], // chain handle }, { name: "mcp_chain_submit_incoming", - signature: "int", // mcp_status_t return + signature: "int", // mcp_status_t return args: [ - "uint64_t", // chain handle - "string", // message_json - "void*", // user_data (Buffer with callback ID) - "void*", // callback function pointer - "void*" // error pointer - ] + "uint64_t", // chain handle + "string", // message_json + "void*", // user_data (Buffer with callback ID) + "void*", // callback function pointer + "void*", // error pointer + ], }, { name: "mcp_chain_submit_outgoing", - signature: "int", // mcp_status_t return + signature: "int", // mcp_status_t return args: [ - "uint64_t", // chain handle - "string", // message_json - "void*", // user_data - "void*", // callback function pointer - "void*" // error pointer - ] + "uint64_t", // chain handle + "string", // message_json + "void*", // user_data + "void*", // callback function pointer + "void*", // error pointer + ], }, // Error handling functions (from mcp_c_types_api.h) { name: "mcp_error_create", - signature: "void*", // returns mcp_error_t - args: ["int32_t", "string"] // code, message + signature: "void*", // returns mcp_error_t + args: ["int32_t", "string"], // code, message }, { name: "mcp_error_free", signature: "void", - args: ["void*"] // error handle + args: ["void*"], // error handle }, { name: "mcp_error_get_code", signature: "int32_t", - args: ["void*"] // error handle + args: ["void*"], // error handle }, { name: "mcp_error_get_message", signature: "string", - args: ["void*"] // error handle + args: ["void*"], // error handle }, { name: "mcp_error_get_data", signature: "string", - args: ["void*"] // error handle + args: ["void*"], // error handle }, // Core MCP functions (from mcp_c_api.h) @@ -656,29 +656,33 @@ try { // Connection lifecycle functions { name: "mcp_connection_create_client", signature: "void*", args: ["void*", "int"] }, - { name: "mcp_connection_configure", signature: "int", args: ["void*", "void*", "void*", "void*"] }, + { + name: "mcp_connection_configure", + signature: "int", + args: ["void*", "void*", "void*", "void*"], + }, { name: "mcp_connection_destroy", signature: "void", args: ["void*"] }, // Chain-level filter event callbacks (from mcp_c_filter_events.h) { name: "mcp_filter_chain_set_event_callback", signature: "int", - args: ["void*", "void*", "void*"] // chain handle, callback function pointer, user_data + args: ["void*", "void*", "void*"], // chain handle, callback function pointer, user_data }, { name: "mcp_filter_chain_clear_event_callback", signature: "int", - args: ["void*"] // chain handle + args: ["void*"], // chain handle }, { name: "mcp_filter_event_type_to_string", signature: "string", - args: ["int"] // mcp_filter_event_type_t + args: ["int"], // mcp_filter_event_type_t }, { name: "mcp_filter_event_severity_to_string", signature: "string", - args: ["int"] // mcp_filter_event_severity_t + args: ["int"], // mcp_filter_event_severity_t }, ]; diff --git a/sdk/typescript/src/mcp-ffi-dispatcher.ts b/sdk/typescript/src/mcp-ffi-dispatcher.ts index 3e7b4bfa..1af89a5b 100644 --- a/sdk/typescript/src/mcp-ffi-dispatcher.ts +++ b/sdk/typescript/src/mcp-ffi-dispatcher.ts @@ -6,7 +6,7 @@ * handle for easier use in tests and examples. */ -import { createHybridDispatcher, destroyHybridDispatcher } from './filter-dispatcher'; +import { createHybridDispatcher, destroyHybridDispatcher } from "./filter-dispatcher"; /** * Dispatcher class for managing C++ filter chain dispatchers @@ -44,7 +44,7 @@ export class Dispatcher { */ get handle(): number { if (this._destroyed) { - throw new Error('Cannot access handle of destroyed dispatcher'); + throw new Error("Cannot access handle of destroyed dispatcher"); } return this._handle; } diff --git a/sdk/typescript/src/mcp-filter-api.ts b/sdk/typescript/src/mcp-filter-api.ts index 2cc12c29..af802afd 100644 --- a/sdk/typescript/src/mcp-filter-api.ts +++ b/sdk/typescript/src/mcp-filter-api.ts @@ -27,7 +27,7 @@ function startDispatcherPump(dispatcher: pointer): void { // Run dispatcher loop with zero timeout to process pending events mcpFilterLib.mcp_dispatcher_run_timeout(dispatcher, 0); } catch (error) { - console.error('Dispatcher pump error:', error); + console.error("Dispatcher pump error:", error); state.active = false; return; } @@ -57,7 +57,7 @@ let globalCallbackStore: Set | null = null; */ export function ensureMcpInitialized(): void { if (!mcpFilterLib) { - throw new Error('MCP native library is not loaded'); + throw new Error("MCP native library is not loaded"); } // console.log('🔍 [ensureMcpInitialized] Checking if library is initialized...'); @@ -72,7 +72,7 @@ export function ensureMcpInitialized(): void { } if (!mcpFilterLib.mcp_init) { - throw new Error('mcp_init symbol not available in native library'); + throw new Error("mcp_init symbol not available in native library"); } // console.log('🔍 [ensureMcpInitialized] Calling mcp_init(null)...'); @@ -277,7 +277,9 @@ export function createBuiltinFilter( export function createDispatcher(): number { // Legacy function kept for backward compatibility // Will be removed in future version - console.warn('createDispatcher() is deprecated. Use createRealDispatcher() or createStubDispatcher()'); + console.warn( + "createDispatcher() is deprecated. Use createRealDispatcher() or createStubDispatcher()" + ); return 1; // Stub value for backward compatibility } @@ -837,7 +839,7 @@ export function resetFilterStats(filter: number): number { * Detect if running in test environment */ export const isTestEnvironment = (): boolean => { - return process.env['NODE_ENV'] === 'test' && !process.env['USE_REAL_HANDLES']; + return process.env["NODE_ENV"] === "test" && !process.env["USE_REAL_HANDLES"]; }; /** @@ -850,17 +852,17 @@ export const isProductionMode = (): boolean => { /** * Get current handle mode */ -export const getHandleMode = (): 'real' | 'stub' => { - return isProductionMode() ? 'real' : 'stub'; +export const getHandleMode = (): "real" | "stub" => { + return isProductionMode() ? "real" : "stub"; }; /** * Feature flags for gradual rollout */ export const FEATURE_FLAGS = { - USE_REAL_HANDLES: process.env['USE_REAL_HANDLES'] !== 'false', - ENABLE_HANDLE_VALIDATION: process.env['VALIDATE_HANDLES'] === 'true', - VERBOSE_HANDLE_LOGGING: process.env['DEBUG_HANDLES'] === 'true', + USE_REAL_HANDLES: process.env["USE_REAL_HANDLES"] !== "false", + ENABLE_HANDLE_VALIDATION: process.env["VALIDATE_HANDLES"] === "true", + VERBOSE_HANDLE_LOGGING: process.env["DEBUG_HANDLES"] === "true", }; // ============================================================================ @@ -889,10 +891,10 @@ export function createRealDispatcher(): pointer { // Check if native library is available if (!mcpFilterLib || !mcpFilterLib.mcp_dispatcher_create) { throw new Error( - 'Native library not available. Either:\n' + - '1. Build the C++ library with "make build"\n' + - '2. Use stubHandleFactory for testing\n' + - 'DO NOT return stub handles from this function!' + "Native library not available. Either:\n" + + '1. Build the C++ library with "make build"\n' + + "2. Use stubHandleFactory for testing\n" + + "DO NOT return stub handles from this function!" ); } @@ -914,12 +916,13 @@ export function createRealDispatcher(): pointer { // // console.log(`🔍 [createRealDispatcher] Error pointer: ${errorPtr}`); // } - throw new Error('Failed to create dispatcher - native library returned null'); + throw new Error("Failed to create dispatcher - native library returned null"); } // Dispatcher is valid - now we can log it safely // console.log('🔍 [createRealDispatcher] mcp_dispatcher_create succeeded'); - if (FEATURE_FLAGS.VERBOSE_HANDLE_LOGGING || true) { // Always log for debugging + if (FEATURE_FLAGS.VERBOSE_HANDLE_LOGGING || true) { + // Always log for debugging try { // console.log(`Created real dispatcher handle (pointer object): ${typeof dispatcher}`); } catch (e) { @@ -942,8 +945,8 @@ export function createRealDispatcher(): pointer { // console.log('🔍 [createRealDispatcher] dispatcher buffer property:', dispatcher && (dispatcher as any).buffer ? 'present' : 'missing'); try { // eslint-disable-next-line @typescript-eslint/no-var-requires - const koffiModule = require('koffi'); - if (typeof koffiModule.addressOf === 'function') { + const koffiModule = require("koffi"); + if (typeof koffiModule.addressOf === "function") { // console.log('🔍 [createRealDispatcher] dispatcher addressOf:', koffiModule.addressOf(dispatcher)); } else { // console.log('🔍 [createRealDispatcher] koffi.addressOf not available'); @@ -971,7 +974,7 @@ export function destroyDispatcher(dispatcher: pointer): void { } if (dispatcher === 1 || dispatcher === 2) { - console.warn('Warning: Attempted to destroy stub handle - ignoring'); + console.warn("Warning: Attempted to destroy stub handle - ignoring"); return; } @@ -1000,26 +1003,29 @@ export function destroyDispatcher(dispatcher: pointer): void { * Create a real connection handle using the native C++ library * This function ALWAYS creates real handles, never stubs */ -export function createConnection(dispatcher: pointer, transportType: TransportType = TransportType.MCP_TRANSPORT_HTTP_SSE): pointer { +export function createConnection( + dispatcher: pointer, + transportType: TransportType = TransportType.MCP_TRANSPORT_HTTP_SSE +): pointer { // Validate dispatcher handle if (!dispatcher || dispatcher === 0) { - throw new Error('Invalid dispatcher handle'); + throw new Error("Invalid dispatcher handle"); } // Check if native library is available if (!mcpFilterLib || !mcpFilterLib.mcp_connection_create_client) { throw new Error( - 'Native library not available for connection creation. Either:\n' + - '1. Build the C++ library with "make build"\n' + - '2. Use stubHandleFactory for testing\n' + - 'DO NOT return stub handles from this function!' + "Native library not available for connection creation. Either:\n" + + '1. Build the C++ library with "make build"\n' + + "2. Use stubHandleFactory for testing\n" + + "DO NOT return stub handles from this function!" ); } // Create client connection with specified transport type const connection = mcpFilterLib.mcp_connection_create_client(dispatcher, transportType); if (!connection || connection === 0) { - throw new Error('Failed to create connection - native library returned null'); + throw new Error("Failed to create connection - native library returned null"); } if (FEATURE_FLAGS.VERBOSE_HANDLE_LOGGING) { @@ -1038,7 +1044,7 @@ export function destroyConnection(connection: pointer): void { // Check if this is a stub handle (should never be passed here) if (connection === 1 || connection === 2) { - console.warn('Warning: Attempted to destroy stub connection handle - ignoring'); + console.warn("Warning: Attempted to destroy stub connection handle - ignoring"); return; } @@ -1063,7 +1069,7 @@ export function configureConnection( sslConfig: pointer = null ): number { if (!connection || connection === 0) { - throw new Error('Invalid connection handle'); + throw new Error("Invalid connection handle"); } return mcpFilterLib.mcp_connection_configure(connection, address, options, sslConfig) as number; @@ -1076,7 +1082,7 @@ export function configureConnection( */ export function runDispatcher(dispatcher: pointer): number { if (!dispatcher || dispatcher === 0) { - throw new Error('Invalid dispatcher handle'); + throw new Error("Invalid dispatcher handle"); } return mcpFilterLib.mcp_dispatcher_run(dispatcher) as number; @@ -1090,7 +1096,7 @@ export function runDispatcher(dispatcher: pointer): number { */ export function runDispatcherWithTimeout(dispatcher: pointer, timeoutMs: number): number { if (!dispatcher || dispatcher === 0) { - throw new Error('Invalid dispatcher handle'); + throw new Error("Invalid dispatcher handle"); } return mcpFilterLib.mcp_dispatcher_run_timeout(dispatcher, timeoutMs) as number; @@ -1148,20 +1154,20 @@ export const defaultHandleFactory: HandleFactory = { export const stubHandleFactory: HandleFactory = { createDispatcher: () => { throw new Error( - 'Stub dispatcher handles cannot be used with native C++ code.\n' + - 'Either:\n' + - '1. Build the native library (make build) and use real handles\n' + - '2. Use Jest mocks to intercept FFI calls in tests\n' + - 'Stub handles returning literal integers will crash when passed to C++.' + "Stub dispatcher handles cannot be used with native C++ code.\n" + + "Either:\n" + + "1. Build the native library (make build) and use real handles\n" + + "2. Use Jest mocks to intercept FFI calls in tests\n" + + "Stub handles returning literal integers will crash when passed to C++." ); }, createConnection: (_dispatcher: pointer, _transportType?: TransportType) => { throw new Error( - 'Stub connection handles cannot be used with native C++ code.\n' + - 'Either:\n' + - '1. Build the native library (make build) and use real handles\n' + - '2. Use Jest mocks to intercept FFI calls in tests\n' + - 'Stub handles returning literal integers will crash when passed to C++.' + "Stub connection handles cannot be used with native C++ code.\n" + + "Either:\n" + + "1. Build the native library (make build) and use real handles\n" + + "2. Use Jest mocks to intercept FFI calls in tests\n" + + "Stub handles returning literal integers will crash when passed to C++." ); }, destroyDispatcher: (_dispatcher: pointer) => { diff --git a/sdk/typescript/src/mcp-filter-buffer.ts b/sdk/typescript/src/mcp-filter-buffer.ts index f382e0fe..16a075c0 100644 --- a/sdk/typescript/src/mcp-filter-buffer.ts +++ b/sdk/typescript/src/mcp-filter-buffer.ts @@ -678,12 +678,16 @@ export function destroyBufferPool(_pool: any): void { * Create buffer pool with simple parameters * This overloads the existing createBufferPoolEx with simple numeric parameters */ -export function createBufferPoolSimple(bufferSize: number, maxBuffers: number, preallocCount: number): any { +export function createBufferPoolSimple( + bufferSize: number, + maxBuffers: number, + preallocCount: number +): any { return createBufferPoolEx({ bufferSize, maxBuffers, preallocCount, useThreadLocal: false, - zeroOnAlloc: false + zeroOnAlloc: false, }); } diff --git a/sdk/typescript/src/mcp-filter-chain.ts b/sdk/typescript/src/mcp-filter-chain.ts index 2610c2c6..23fa42b5 100644 --- a/sdk/typescript/src/mcp-filter-chain.ts +++ b/sdk/typescript/src/mcp-filter-chain.ts @@ -289,10 +289,9 @@ function canonicalToAssemblerConfig(config: CanonicalConfig): FilterChainConfig * Check if configuration is in canonical format */ function isCanonicalConfig(config: any): config is CanonicalConfig { - return config && - typeof config === 'object' && - 'listeners' in config && - Array.isArray(config.listeners); + return ( + config && typeof config === "object" && "listeners" in config && Array.isArray(config.listeners) + ); } function createJsonHandleFromConfig(config: FilterChainConfig): JsonHandle { @@ -355,7 +354,8 @@ export function assembleFilterChain(dispatcher: any, config: FilterChainConfig): let errorMessage: string | undefined; if (!success) { - errorMessage = status !== MCP_OK ? `Assembly failed with code: ${status}` : "Assembler reported failure"; + errorMessage = + status !== MCP_OK ? `Assembly failed with code: ${status}` : "Assembler reported failure"; } const assemblyResult: AssemblyResult = { @@ -390,7 +390,7 @@ export function canonicalConfigToNormalizedJson(config: CanonicalConfig): string } export function createFilterChainFromConfig( - dispatcher: any, // mcp_dispatcher_t (pointer type) + dispatcher: any, // mcp_dispatcher_t (pointer type) config: CanonicalConfig ): number { if (!isCanonicalConfig(config)) { @@ -402,7 +402,7 @@ export function createFilterChainFromConfig( try { const rawHandle = mcpFilterLib.mcp_chain_create_from_json(dispatcher, jsonHandle); // Convert koffi pointer/uint64_t to number - const handle = typeof rawHandle === 'number' ? rawHandle : Number(rawHandle); + const handle = typeof rawHandle === "number" ? rawHandle : Number(rawHandle); if (!handle) { throw new Error("Failed to create filter chain from configuration"); } @@ -412,7 +412,6 @@ export function createFilterChainFromConfig( } } - // ============================================================================ // Chain Management // ============================================================================ @@ -650,7 +649,7 @@ export function validateChain(chain: number, errors: any): number { * Create a simple sequential chain using canonical configuration */ export function createSimpleChain( - dispatcher: any, // mcp_dispatcher_t (pointer type) + dispatcher: any, // mcp_dispatcher_t (pointer type) filterTypes: string[], name: string = "simple-chain", port: number = 8080 @@ -686,7 +685,7 @@ export function createSimpleChain( * Note: Parallel execution is handled by the filter chain implementation */ export function createParallelChain( - dispatcher: any, // mcp_dispatcher_t (pointer type) + dispatcher: any, // mcp_dispatcher_t (pointer type) filterTypes: string[], maxParallel: number = 4, name: string = "parallel-chain", @@ -728,7 +727,7 @@ export function createParallelChain( * Note: Conditional routing would be implemented via filter configuration */ export function createConditionalChain( - dispatcher: any, // mcp_dispatcher_t (pointer type) + dispatcher: any, // mcp_dispatcher_t (pointer type) filterConfigs: Array<{ type: string; condition?: any }>, name: string = "conditional-chain", port: number = 8080 diff --git a/sdk/typescript/src/mcp-filter-manager.ts b/sdk/typescript/src/mcp-filter-manager.ts index 301290f2..8481f2fc 100644 --- a/sdk/typescript/src/mcp-filter-manager.ts +++ b/sdk/typescript/src/mcp-filter-manager.ts @@ -17,7 +17,6 @@ import { releaseFilterManager, } from "./mcp-filter-api"; - // Import the three filter modules as requested import { ChainExecutionMode, @@ -28,7 +27,6 @@ import { import * as BufferModule from "./mcp-filter-buffer"; - /** * JSON-RPC Message interface (compatible with MCP) */ @@ -51,84 +49,102 @@ export interface JSONRPCMessage { export interface FilterManagerConfig { // Network filters network?: { - tcpProxy?: boolean | { - enabled: boolean; - upstreamHost?: string; - upstreamPort?: number; - bindAddress?: string; - bindPort?: number; - }; + tcpProxy?: + | boolean + | { + enabled: boolean; + upstreamHost?: string; + upstreamPort?: number; + bindAddress?: string; + bindPort?: number; + }; udpProxy?: boolean; }; // HTTP filters http?: { - codec?: boolean | { - enabled: boolean; - compressionLevel?: number; - maxRequestSize?: number; - maxResponseSize?: number; - }; + codec?: + | boolean + | { + enabled: boolean; + compressionLevel?: number; + maxRequestSize?: number; + maxResponseSize?: number; + }; routing?: boolean; }; // Security filters security?: { - authentication?: boolean | { - method?: string; - secret?: string; - issuer?: string; - audience?: string; - }; - authorization?: boolean | { - enabled: boolean; - policy?: string; - rules?: Array<{ - resource: string; - action: string; - conditions?: any; - }>; - }; + authentication?: + | boolean + | { + method?: string; + secret?: string; + issuer?: string; + audience?: string; + }; + authorization?: + | boolean + | { + enabled: boolean; + policy?: string; + rules?: Array<{ + resource: string; + action: string; + conditions?: any; + }>; + }; rateLimiting?: boolean; }; // Observability filters observability?: { - accessLog?: boolean | { - enabled: boolean; - format?: string; - fields?: string[]; - output?: string; - }; - metrics?: boolean | { - enabled: boolean; - endpoint?: string; - interval?: number; - labels?: Record; - }; - tracing?: boolean | { - enabled: boolean; - serviceName?: string; - endpoint?: string; - samplingRate?: number; - }; + accessLog?: + | boolean + | { + enabled: boolean; + format?: string; + fields?: string[]; + output?: string; + }; + metrics?: + | boolean + | { + enabled: boolean; + endpoint?: string; + interval?: number; + labels?: Record; + }; + tracing?: + | boolean + | { + enabled: boolean; + serviceName?: string; + endpoint?: string; + samplingRate?: number; + }; }; // Traffic management trafficManagement?: { - circuitBreaker?: boolean | { - enabled: boolean; - failureThreshold?: number; - timeout?: number; - resetTimeout?: number; - }; - retry?: boolean | { - enabled: boolean; - maxAttempts?: number; - backoffStrategy?: string; - baseDelay?: number; - maxDelay?: number; - }; + circuitBreaker?: + | boolean + | { + enabled: boolean; + failureThreshold?: number; + timeout?: number; + resetTimeout?: number; + }; + retry?: + | boolean + | { + enabled: boolean; + maxAttempts?: number; + backoffStrategy?: string; + baseDelay?: number; + maxDelay?: number; + }; timeout?: boolean; }; @@ -152,7 +168,7 @@ export interface FilterManagerConfig { stopOnError?: boolean; retryAttempts?: number; retryDelayMs?: number; - fallbackBehavior?: 'passthrough' | 'default' | 'error'; + fallbackBehavior?: "passthrough" | "default" | "error"; }; // Custom filters @@ -163,32 +179,40 @@ export interface FilterManagerConfig { }>; // Top-level shortcuts for common filters (for backward compatibility) - auth?: boolean | { - method?: string; - secret?: string; - issuer?: string; - audience?: string; - }; - rateLimit?: boolean | { - requestsPerSecond?: number; - burstSize?: number; - }; - logging?: boolean | { - level?: string; - format?: string; - }; - metrics?: boolean | { - enabled?: boolean; - endpoint?: string; - }; + auth?: + | boolean + | { + method?: string; + secret?: string; + issuer?: string; + audience?: string; + }; + rateLimit?: + | boolean + | { + requestsPerSecond?: number; + burstSize?: number; + }; + logging?: + | boolean + | { + level?: string; + format?: string; + }; + metrics?: + | boolean + | { + enabled?: boolean; + endpoint?: string; + }; } /** * Options for FilterManager initialization */ export interface FilterManagerOptions { - dispatcherHandle?: any; // Opaque dispatcher handle - connectionHandle?: any; // Opaque connection handle + dispatcherHandle?: any; // Opaque dispatcher handle + connectionHandle?: any; // Opaque connection handle } /** diff --git a/sdk/typescript/src/message-queue.ts b/sdk/typescript/src/message-queue.ts index 123dfb8b..760203a3 100644 --- a/sdk/typescript/src/message-queue.ts +++ b/sdk/typescript/src/message-queue.ts @@ -52,7 +52,7 @@ export class MessageQueue { this.queue.push({ message, - timestamp: Date.now() + timestamp: Date.now(), }); } diff --git a/sdk/typescript/src/metrics-callbacks.ts b/sdk/typescript/src/metrics-callbacks.ts index 6b31ae81..8b964979 100644 --- a/sdk/typescript/src/metrics-callbacks.ts +++ b/sdk/typescript/src/metrics-callbacks.ts @@ -3,9 +3,9 @@ * @brief Koffi bridge for metrics filter callbacks. */ -import * as koffi from 'koffi'; -import { mcpFilterLib } from './mcp-ffi-bindings'; -import type { MetricsCallbacks, MetricsSnapshot, MetricsThresholdEvent } from './types/metrics'; +import * as koffi from "koffi"; +import { mcpFilterLib } from "./mcp-ffi-bindings"; +import type { MetricsCallbacks, MetricsSnapshot, MetricsThresholdEvent } from "./types/metrics"; function clampBigIntToNumber(value: bigint): number { const maxSafe = BigInt(Number.MAX_SAFE_INTEGER); @@ -22,10 +22,10 @@ function normalizeNumber(value: unknown): number { if (value === null || value === undefined) { return 0; } - if (typeof value === 'number') { + if (typeof value === "number") { return value; } - if (typeof value === 'bigint') { + if (typeof value === "bigint") { return clampBigIntToNumber(value); } return Number(value ?? 0); @@ -103,48 +103,46 @@ export class MetricsCallbackHandle { register(): any { if (this.destroyed) { - throw new Error('MetricsCallbackHandle has been destroyed'); + throw new Error("MetricsCallbackHandle has been destroyed"); } const suffix = `${Date.now().toString(36)}_${Math.random().toString(36).slice(2)}`; - const MetricsUpdateProto = koffi.proto( - `void metrics_update_cb_${suffix}(void*, void*)` - ); + const MetricsUpdateProto = koffi.proto(`void metrics_update_cb_${suffix}(void*, void*)`); const MetricsThresholdProto = koffi.proto( `void metrics_threshold_cb_${suffix}(const char*, uint64_t, uint64_t, void*)` ); const MetricsStruct = koffi.struct(`mcp_connection_metrics_${suffix}`, { - bytes_received: 'uint64_t', - bytes_sent: 'uint64_t', - messages_received: 'uint64_t', - messages_sent: 'uint64_t', - requests_received: 'uint64_t', - requests_sent: 'uint64_t', - responses_received: 'uint64_t', - responses_sent: 'uint64_t', - notifications_received: 'uint64_t', - notifications_sent: 'uint64_t', - errors_received: 'uint64_t', - errors_sent: 'uint64_t', - protocol_errors: 'uint64_t', - total_latency_ms: 'uint64_t', - min_latency_ms: 'uint64_t', - max_latency_ms: 'uint64_t', - latency_samples: 'uint64_t', - current_receive_rate_bps: 'double', - current_send_rate_bps: 'double', - peak_receive_rate_bps: 'double', - peak_send_rate_bps: 'double', - connection_uptime_ms: 'uint64_t', - idle_time_ms: 'uint64_t', + bytes_received: "uint64_t", + bytes_sent: "uint64_t", + messages_received: "uint64_t", + messages_sent: "uint64_t", + requests_received: "uint64_t", + requests_sent: "uint64_t", + responses_received: "uint64_t", + responses_sent: "uint64_t", + notifications_received: "uint64_t", + notifications_sent: "uint64_t", + errors_received: "uint64_t", + errors_sent: "uint64_t", + protocol_errors: "uint64_t", + total_latency_ms: "uint64_t", + min_latency_ms: "uint64_t", + max_latency_ms: "uint64_t", + latency_samples: "uint64_t", + current_receive_rate_bps: "double", + current_send_rate_bps: "double", + peak_receive_rate_bps: "double", + peak_send_rate_bps: "double", + connection_uptime_ms: "uint64_t", + idle_time_ms: "uint64_t", }); const CallbackStruct = koffi.struct(`mcp_metrics_callbacks_${suffix}`, { on_metrics_update: `metrics_update_cb_${suffix} *`, on_threshold_exceeded: `metrics_threshold_cb_${suffix} *`, - user_data: 'void *', + user_data: "void *", }); const registered: RegisteredCallbacks = { @@ -153,21 +151,18 @@ export class MetricsCallbackHandle { }; if (this.jsCallbacks.onMetricsUpdate) { - const cb = koffi.register( - (metricsPtr: Buffer, _userData: Buffer | null) => { - try { - if (!metricsPtr) { - throw new Error('Received null metrics pointer'); - } - const decoded = koffi.decode(metricsPtr, MetricsStruct) as Record; - this.jsCallbacks.onMetricsUpdate?.(toMetricsSnapshot(decoded)); - } catch (err) { - this.jsCallbacks.onError?.(err instanceof Error ? err : new Error(String(err))); + const cb = koffi.register((metricsPtr: Buffer, _userData: Buffer | null) => { + try { + if (!metricsPtr) { + throw new Error("Received null metrics pointer"); } - }, - koffi.pointer(MetricsUpdateProto) - ); - this.callbacks.set('on_metrics_update', cb); + const decoded = koffi.decode(metricsPtr, MetricsStruct) as Record; + this.jsCallbacks.onMetricsUpdate?.(toMetricsSnapshot(decoded)); + } catch (err) { + this.jsCallbacks.onError?.(err instanceof Error ? err : new Error(String(err))); + } + }, koffi.pointer(MetricsUpdateProto)); + this.callbacks.set("on_metrics_update", cb); registered.on_metrics_update = cb; } @@ -176,7 +171,7 @@ export class MetricsCallbackHandle { (metricName: string | null, value: bigint, threshold: bigint, _userData: Buffer | null) => { try { const event: MetricsThresholdEvent = { - metric: metricName ?? '', + metric: metricName ?? "", value: clampBigIntToNumber(value), threshold: clampBigIntToNumber(threshold), }; @@ -187,7 +182,7 @@ export class MetricsCallbackHandle { }, koffi.pointer(MetricsThresholdProto) ); - this.callbacks.set('on_threshold_exceeded', cb); + this.callbacks.set("on_threshold_exceeded", cb); registered.on_threshold_exceeded = cb; } @@ -211,7 +206,7 @@ export class MetricsCallbackHandle { koffi.unregister(cb); } catch (err) { // eslint-disable-next-line no-console - console.error('Failed to unregister metrics callback', err); + console.error("Failed to unregister metrics callback", err); } } this.callbacks.clear(); @@ -221,7 +216,7 @@ export class MetricsCallbackHandle { koffi.free(this.structPtr); } catch (err) { // eslint-disable-next-line no-console - console.error('Failed to free metrics callback struct', err); + console.error("Failed to free metrics callback struct", err); } this.structPtr = null; } @@ -239,7 +234,7 @@ export function registerMetricsCallbacks( const result = mcpFilterLib.mcp_filter_chain_set_metrics_callbacks( BigInt(chainHandle), - koffi.as(structPtr, 'void*') + koffi.as(structPtr, "void*") ) as number; if (result === 0) { diff --git a/sdk/typescript/src/types/circuit-breaker.ts b/sdk/typescript/src/types/circuit-breaker.ts index 3f5b86da..3e1dd371 100644 --- a/sdk/typescript/src/types/circuit-breaker.ts +++ b/sdk/typescript/src/types/circuit-breaker.ts @@ -11,11 +11,11 @@ */ export enum CircuitBreakerState { /** Circuit is operating normally */ - CLOSED = 'CLOSED', + CLOSED = "CLOSED", /** Circuit has opened due to failures */ - OPEN = 'OPEN', + OPEN = "OPEN", /** Circuit is testing if service has recovered */ - HALF_OPEN = 'HALF_OPEN', + HALF_OPEN = "HALF_OPEN", } /** diff --git a/src/event/libevent_dispatcher.cc b/src/event/libevent_dispatcher.cc index eeee5e0d..a65e6442 100644 --- a/src/event/libevent_dispatcher.cc +++ b/src/event/libevent_dispatcher.cc @@ -77,7 +77,8 @@ uint32_t fromLibeventEvents(short events) { // Early initialization for libevent threading support // CRITICAL: This must run BEFORE any other libevent functions or libraries // that might use libevent (like curl with libevent support). -// evthread_use_pthreads() is safe to call multiple times - it returns 0 after first call +// evthread_use_pthreads() is safe to call multiple times - it returns 0 after +// first call struct LibeventEarlyInit { LibeventEarlyInit() { #ifdef _WIN32 diff --git a/src/filter/enhanced_filter_chain_factory.cc b/src/filter/enhanced_filter_chain_factory.cc index 8002537e..3cf30583 100644 --- a/src/filter/enhanced_filter_chain_factory.cc +++ b/src/filter/enhanced_filter_chain_factory.cc @@ -351,8 +351,8 @@ class EnhancedProtocolFilter : public network::Filter, void onRequestThrottled(const std::string& method, size_t current_rate, size_t max_rate) override { - GOPHER_LOG_WARN("Rate Limiter: Request throttled: {} (rate: {}/{})", - method, current_rate, max_rate); + GOPHER_LOG_WARN("Rate Limiter: Request throttled: {} (rate: {}/{})", method, + current_rate, max_rate); } void onRateLimitExceeded(const std::string& bucket_name) override { diff --git a/src/filter/filter_chain_assembler.cc b/src/filter/filter_chain_assembler.cc index e3c4a915..99899a6c 100644 --- a/src/filter/filter_chain_assembler.cc +++ b/src/filter/filter_chain_assembler.cc @@ -130,8 +130,8 @@ network::FilterSharedPtr FilterChainAssembler::createSingleFilter( std::shared_ptr(emitter, emitter.get()); } } catch (const std::exception& ex) { - GOPHER_LOG_ERROR("FilterChainAssembler failed to create event emitter: {}", - ex.what()); + GOPHER_LOG_ERROR( + "FilterChainAssembler failed to create event emitter: {}", ex.what()); } } diff --git a/src/filter/sse_codec_filter.cc b/src/filter/sse_codec_filter.cc index 2242375a..30f8da81 100644 --- a/src/filter/sse_codec_filter.cc +++ b/src/filter/sse_codec_filter.cc @@ -184,7 +184,8 @@ network::FilterStatus SseCodecFilter::onData(Buffer& data, bool end_stream) { // For SSE, end_stream doesn't mean immediate close - it means no more data // We should keep the connection open for future events // Only close if explicitly requested or on error - GOPHER_LOG_DEBUG("SSE end_stream received - keeping connection open for SSE events"); + GOPHER_LOG_DEBUG( + "SSE end_stream received - keeping connection open for SSE events"); // Don't trigger CloseStream here - let the connection manager handle it } diff --git a/src/mcp_connection_manager.cc b/src/mcp_connection_manager.cc index d0456e07..41775d2b 100644 --- a/src/mcp_connection_manager.cc +++ b/src/mcp_connection_manager.cc @@ -876,12 +876,15 @@ void McpConnectionManager::onConnectionEvent(network::ConnectionEvent event) { } // Forward event to upper layer callbacks - GOPHER_LOG_DEBUG("McpConnectionManager forwarding event to protocol_callbacks_={}", - (protocol_callbacks_ ? "set" : "NULL")); + GOPHER_LOG_DEBUG( + "McpConnectionManager forwarding event to protocol_callbacks_={}", + (protocol_callbacks_ ? "set" : "NULL")); if (protocol_callbacks_) { - GOPHER_LOG_DEBUG("McpConnectionManager calling protocol_callbacks_->onConnectionEvent"); + GOPHER_LOG_DEBUG( + "McpConnectionManager calling protocol_callbacks_->onConnectionEvent"); protocol_callbacks_->onConnectionEvent(event); - GOPHER_LOG_DEBUG("McpConnectionManager protocol_callbacks_->onConnectionEvent returned"); + GOPHER_LOG_DEBUG( + "McpConnectionManager protocol_callbacks_->onConnectionEvent returned"); // Ensure protocol callbacks are processed before any requests if (event == network::ConnectionEvent::Connected) { diff --git a/src/network/connection_impl.cc b/src/network/connection_impl.cc index d0ed37f4..f9bf948a 100644 --- a/src/network/connection_impl.cc +++ b/src/network/connection_impl.cc @@ -1440,7 +1440,8 @@ void ConnectionImpl::doWrite() { if (socket_) { #ifdef _WIN32 u_long bytes_available = 0; - if (ioctlsocket(socket_->ioHandle().fd(), FIONREAD, &bytes_available) == 0) { + if (ioctlsocket(socket_->ioHandle().fd(), FIONREAD, &bytes_available) == + 0) { #else int bytes_available = 0; if (ioctl(socket_->ioHandle().fd(), FIONREAD, &bytes_available) == 0) { diff --git a/src/network/listener_impl.cc b/src/network/listener_impl.cc index 2eb05984..b2fe50ea 100644 --- a/src/network/listener_impl.cc +++ b/src/network/listener_impl.cc @@ -106,8 +106,9 @@ ActiveListener::ActiveListener(event::Dispatcher& dispatcher, ActiveListener::~ActiveListener() { disable(); } VoidResult ActiveListener::listen() { - GOPHER_LOG_DEBUG("ActiveListener::listen() called: bind_to_port={} address={}", - config_.bind_to_port, config_.address->asStringView()); + GOPHER_LOG_DEBUG( + "ActiveListener::listen() called: bind_to_port={} address={}", + config_.bind_to_port, config_.address->asStringView()); // Create socket if (config_.bind_to_port) { // Use the global createListenSocket function @@ -188,7 +189,8 @@ VoidResult ActiveListener::listen() { void ActiveListener::disable() { enabled_ = false; if (file_event_) { - GOPHER_LOG_DEBUG("ActiveListener::disable() fd={}", socket_->ioHandle().fd()); + GOPHER_LOG_DEBUG("ActiveListener::disable() fd={}", + socket_->ioHandle().fd()); file_event_->setEnabled(0); } } @@ -196,7 +198,8 @@ void ActiveListener::disable() { void ActiveListener::enable() { enabled_ = true; if (file_event_) { - GOPHER_LOG_DEBUG("ActiveListener::enable() fd={}", socket_->ioHandle().fd()); + GOPHER_LOG_DEBUG("ActiveListener::enable() fd={}", + socket_->ioHandle().fd()); file_event_->setEnabled(static_cast(event::FileReadyType::Read)); } } diff --git a/src/server/mcp_server_enhanced_filters.cc b/src/server/mcp_server_enhanced_filters.cc index 8540b070..ef4f8c55 100644 --- a/src/server/mcp_server_enhanced_filters.cc +++ b/src/server/mcp_server_enhanced_filters.cc @@ -193,8 +193,8 @@ void McpServer::setupEnhancedFilterChain( void onRequestRejected(const std::string& method, const std::string& reason) override { server_.server_stats_.requests_invalid++; - GOPHER_LOG_WARN("Validation: Request rejected: {} Reason: {}", - method, reason); + GOPHER_LOG_WARN("Validation: Request rejected: {} Reason: {}", method, + reason); } void onRateLimitExceeded(const std::string& method) override { diff --git a/src/transport/http_sse_transport_socket.cc b/src/transport/http_sse_transport_socket.cc index 915e560f..639b9b44 100644 --- a/src/transport/http_sse_transport_socket.cc +++ b/src/transport/http_sse_transport_socket.cc @@ -15,8 +15,8 @@ #include #include "mcp/filter/http_codec_filter.h" -#include "mcp/logging/log_macros.h" #include "mcp/filter/sse_codec_filter.h" +#include "mcp/logging/log_macros.h" #include "mcp/network/address_impl.h" #include "mcp/network/connection_impl.h" #include "mcp/transport/ssl_context.h" From 250b20ee8e4c8be05e60e7e38b2b4a24960effc1 Mon Sep 17 00:00:00 2001 From: RahulHere Date: Thu, 12 Feb 2026 13:58:56 +0800 Subject: [PATCH 38/39] Resolve linker error in test_filter_chain_assembler (#194) Add missing gopher-mcp-logging library to test_filter_chain_assembler target. The filter_chain_assembler.cc uses mcp::logging::LoggerRegistry methods which require linking against the logging library. --- tests/filter/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/filter/CMakeLists.txt b/tests/filter/CMakeLists.txt index 8a6b6bec..24777711 100644 --- a/tests/filter/CMakeLists.txt +++ b/tests/filter/CMakeLists.txt @@ -318,6 +318,7 @@ add_executable(test_filter_chain_assembler target_link_libraries(test_filter_chain_assembler PRIVATE gopher-mcp + gopher-mcp-logging gtest gtest_main gmock From 610a3907650aa2774828392d51f28fc5272327bb Mon Sep 17 00:00:00 2001 From: RahulHere Date: Thu, 12 Feb 2026 17:14:01 +0800 Subject: [PATCH 39/39] Resolve three failing unit tests (#194) Fixes FileConfigSourceEnhancedTest, test_logger_registry, and FilterChainEventHubTest that were failing due to various issues. Changes: - Fix LoggerRegistry constructor to use LogLevel::Info (was Warning) - Fix thread safety data race in FilterChainEventHubTest by adding mutex protection for shared vector access - Convert printf-style format strings (%s, %d, %zu) to fmt-style ({}) in file_config_source.cc to fix "invalid format string" exceptions --- src/config/file_config_source.cc | 116 ++++++++++---------- src/logging/logger_registry.cc | 2 +- tests/filter/test_filter_chain_event_hub.cc | 11 +- 3 files changed, 67 insertions(+), 62 deletions(-) diff --git a/src/config/file_config_source.cc b/src/config/file_config_source.cc index 4748394c..941e290d 100644 --- a/src/config/file_config_source.cc +++ b/src/config/file_config_source.cc @@ -268,8 +268,8 @@ class FileConfigSource : public ConfigSource { int priority, const Options& opts = Options{}) : name_(name), priority_(priority), options_(opts) { - GOPHER_LOG(Info, "FileConfigSource created: name=%s priority=%d", - name_.c_str(), static_cast(priority_)); + GOPHER_LOG(Info, "FileConfigSource created: name={} priority={}", + name_, priority_); } std::string getName() const override { return name_; } @@ -285,22 +285,22 @@ class FileConfigSource : public ConfigSource { mcp::json::JsonValue loadConfiguration() override { // Keep logs under config.file so tests that attach a sink to // "config.file" see discovery start/end messages. - GOPHER_LOG(Info, "Starting configuration discovery for source: %s%s", - name_.c_str(), + GOPHER_LOG(Info, "Starting configuration discovery for source: {}{}", + name_, (options_.trace_id.empty() ? "" - : (" trace_id=" + options_.trace_id).c_str())); + : (" trace_id=" + options_.trace_id))); // Determine the config file path using deterministic search order std::string config_path = findConfigFile(); if (config_path.empty()) { - GOPHER_LOG(Warning, "No configuration file found for source: %s", - name_.c_str()); + GOPHER_LOG(Warning, "No configuration file found for source: {}", + name_); return mcp::json::JsonValue::object(); } - GOPHER_LOG(Info, "Base configuration file chosen: %s", config_path.c_str()); + GOPHER_LOG(Info, "Base configuration file chosen: {}", config_path); // Load and parse the main configuration file ParseContext context; @@ -323,8 +323,8 @@ class FileConfigSource : public ConfigSource { // Emit a brief summary and also dump top-level keys/types to aid debugging GOPHER_LOG( Info, - "Configuration discovery completed: files_parsed=%zu " - "includes_processed=%zu env_vars_expanded=%zu overlays_applied=%zu", + "Configuration discovery completed: files_parsed={} " + "includes_processed={} env_vars_expanded={} overlays_applied={}", context.files_parsed_count, context.includes_processed_count, context.env_vars_expanded_count, context.overlays_applied.size()); @@ -346,11 +346,11 @@ class FileConfigSource : public ConfigSource { t = "array"; else if (v.isObject()) t = "object"; - GOPHER_LOG(Debug, " key='%s' type=%s", key.c_str(), t); + GOPHER_LOG(Debug, " key='{}' type={}", key, t); } // Emit compact JSON for quick inspection - GOPHER_LOG(Info, "Top-level configuration JSON: %s", - config.toString(false).c_str()); + GOPHER_LOG(Info, "Top-level configuration JSON: {}", + config.toString(false)); // Also print to stderr for test visibility when sinks are not attached fprintf(stderr, "[config.file] Top-level JSON: %s\n", config.toString(false).c_str()); @@ -364,7 +364,7 @@ class FileConfigSource : public ConfigSource { if (!context.overlays_applied.empty()) { GOPHER_LOG(Debug, "Overlays applied in order:"); for (const auto& overlay : context.overlays_applied) { - GOPHER_LOG(Debug, " - %s", overlay.c_str()); + GOPHER_LOG(Debug, " - {}", overlay); } } @@ -443,7 +443,7 @@ class FileConfigSource : public ConfigSource { search_paths.push_back("/etc/gopher-mcp/config.yaml"); search_paths.push_back("/etc/gopher-mcp/config.json"); - GOPHER_LOG(Debug, "Configuration search order: %zu paths to check", + GOPHER_LOG(Debug, "Configuration search order: {} paths to check", search_paths.size()); for (size_t i = 0; i < search_paths.size(); ++i) { @@ -451,19 +451,19 @@ class FileConfigSource : public ConfigSource { if (exists(path)) { // Determine which source won if (i == 0 && !explicit_config_path_.empty()) { - GOPHER_LOG(Info, "Configuration source won: CLI --config=%s", - path.c_str()); + GOPHER_LOG(Info, "Configuration source won: CLI --config={}", + path); } else if ((i == 0 || i == 1) && env_config) { GOPHER_LOG( Info, "Configuration source won: MCP_CONFIG environment variable"); } else if (path.find("./config") != std::string::npos || path.find("./config.") != std::string::npos) { - GOPHER_LOG(Info, "Configuration source won: local directory at %s", - path.c_str()); + GOPHER_LOG(Info, "Configuration source won: local directory at {}", + path); } else { - GOPHER_LOG(Info, "Configuration source won: system directory at %s", - path.c_str()); + GOPHER_LOG(Info, "Configuration source won: system directory at {}", + path); } return path; } @@ -484,8 +484,8 @@ class FileConfigSource : public ConfigSource { size_t file_size = st.st_size; if (file_size > options_.max_file_size) { GOPHER_LOG(Error, - "File exceeds maximum size limit: %s size=%zu limit=%zu", - filepath.c_str(), file_size, options_.max_file_size); + "File exceeds maximum size limit: {} size={} limit={}", + filepath, file_size, options_.max_file_size); throw std::runtime_error("File too large: " + filepath + " (" + std::to_string(file_size) + " bytes)"); } @@ -500,13 +500,13 @@ class FileConfigSource : public ConfigSource { } GOPHER_LOG(Debug, - "Loading configuration file: %s size=%zu last_modified=%ld", - filepath.c_str(), file_size, static_cast(last_modified)); + "Loading configuration file: {} size={} last_modified={}", + filepath, file_size, last_modified); std::ifstream file(filepath); if (!file.is_open()) { - GOPHER_LOG(Error, "Failed to open configuration file: %s", - filepath.c_str()); + GOPHER_LOG(Error, "Failed to open configuration file: {}", + filepath); throw std::runtime_error("Cannot open config file: " + filepath); } @@ -543,8 +543,8 @@ class FileConfigSource : public ConfigSource { } } } catch (const std::exception& e) { - GOPHER_LOG(Error, "Failed to parse configuration file: %s reason=%s", - filepath.c_str(), e.what()); + GOPHER_LOG(Error, "Failed to parse configuration file: {} reason={}", + filepath, e.what()); throw; } @@ -696,8 +696,8 @@ class FileConfigSource : public ConfigSource { if (!env_value && !has_default) { GOPHER_LOG(Error, - "Undefined environment variable without default: ${%s}", - var_name.c_str()); + "Undefined environment variable without default: ${{{}}}", + var_name); throw std::runtime_error("Undefined environment variable: " + var_name); } @@ -725,7 +725,7 @@ class FileConfigSource : public ConfigSource { context.env_vars_expanded_count += vars_expanded; if (vars_expanded > 0) { - GOPHER_LOG(Debug, "Expanded %zu environment variables", vars_expanded); + GOPHER_LOG(Debug, "Expanded {} environment variables", vars_expanded); } return result; @@ -734,7 +734,7 @@ class FileConfigSource : public ConfigSource { mcp::json::JsonValue processIncludes(const mcp::json::JsonValue& config, ParseContext& context) { if (++context.include_depth > context.max_include_depth) { - GOPHER_LOG(Error, "Maximum include depth exceeded: %d at depth %d", + GOPHER_LOG(Error, "Maximum include depth exceeded: {} at depth {}", context.max_include_depth, context.include_depth); throw std::runtime_error("Maximum include depth (" + std::to_string(context.max_include_depth) + @@ -756,22 +756,22 @@ class FileConfigSource : public ConfigSource { const auto& include = includes[i]; if (include.isString()) { std::string include_path = include.getString(); - GOPHER_LOG(Debug, "Processing include: %s from base_dir=%s", - include_path.c_str(), context.base_dir.string().c_str()); + GOPHER_LOG(Debug, "Processing include: {} from base_dir={}", + include_path, context.base_dir.string()); path resolved_path = resolveIncludePath(include_path, context); if (context.processed_files.count(resolved_path.string()) > 0) { - GOPHER_LOG(Warning, "Circular include detected, skipping: %s", - resolved_path.string().c_str()); + GOPHER_LOG(Warning, "Circular include detected, skipping: {}", + resolved_path.string()); continue; } context.processed_files.insert(resolved_path.string()); context.includes_processed_count++; - GOPHER_LOG(Info, "Including configuration from: %s", - resolved_path.string().c_str()); + GOPHER_LOG(Info, "Including configuration from: {}", + resolved_path.string()); ParseContext include_context = context; include_context.base_dir = resolved_path.parent_path(); @@ -809,8 +809,8 @@ class FileConfigSource : public ConfigSource { std::string dir_pattern = config["include_dir"].getString(); path dir_path = resolveIncludePath(dir_pattern, context); - GOPHER_LOG(Info, "Scanning directory for configurations: %s", - dir_path.string().c_str()); + GOPHER_LOG(Info, "Scanning directory for configurations: {}", + dir_path.string()); if (exists(dir_path.string()) && is_directory(dir_path.string())) { std::vector config_files; @@ -830,7 +830,7 @@ class FileConfigSource : public ConfigSource { // Sort for deterministic order std::sort(config_files.begin(), config_files.end()); - GOPHER_LOG(Debug, "Found %zu configuration files in directory", + GOPHER_LOG(Debug, "Found {} configuration files in directory", config_files.size()); for (const auto& file : config_files) { @@ -841,8 +841,8 @@ class FileConfigSource : public ConfigSource { context.processed_files.insert(canonical(file.string())); context.includes_processed_count++; - GOPHER_LOG(Info, "Including configuration from directory: %s", - file.string().c_str()); + GOPHER_LOG(Info, "Including configuration from directory: {}", + file.string()); ParseContext include_context = context; include_context.base_dir = file.parent_path(); @@ -860,8 +860,8 @@ class FileConfigSource : public ConfigSource { } } else { GOPHER_LOG(Warning, - "Include directory does not exist or is not a directory: %s", - dir_path.string().c_str()); + "Include directory does not exist or is not a directory: {}", + dir_path.string()); } result.erase("include_dir"); @@ -886,8 +886,8 @@ class FileConfigSource : public ConfigSource { } } if (!allowed) { - GOPHER_LOG(Error, "Absolute include path not under allowed roots: %s", - filepath.c_str()); + GOPHER_LOG(Error, "Absolute include path not under allowed roots: {}", + filepath); throw std::runtime_error("Include path not allowed: " + filepath); } } @@ -932,8 +932,8 @@ class FileConfigSource : public ConfigSource { const mcp::json::JsonValue& base_config, const path& overlay_dir, ParseContext& context) { - GOPHER_LOG(Info, "Scanning config.d directory: %s", - overlay_dir.string().c_str()); + GOPHER_LOG(Info, "Scanning config.d directory: {}", + overlay_dir.string()); mcp::json::JsonValue result = base_config; std::vector overlay_files; @@ -954,28 +954,28 @@ class FileConfigSource : public ConfigSource { std::sort(overlay_files.begin(), overlay_files.end()); GOPHER_LOG(Info, - "Directory scan results: found %zu configuration overlay files", + "Directory scan results: found {} configuration overlay files", overlay_files.size()); // Log overlay list in order if (!overlay_files.empty()) { GOPHER_LOG(Info, "Overlay files in lexicographic order:"); for (const auto& file : overlay_files) { - GOPHER_LOG(Info, " - %s", file.filename().string().c_str()); + GOPHER_LOG(Info, " - {}", file.filename().string()); } } for (const auto& overlay_file : overlay_files) { if (context.processed_files.count(canonical(overlay_file.string())) > 0) { - GOPHER_LOG(Debug, "Skipping already processed overlay: %s", - overlay_file.string().c_str()); + GOPHER_LOG(Debug, "Skipping already processed overlay: {}", + overlay_file.string()); continue; } context.processed_files.insert(canonical(overlay_file.string())); - GOPHER_LOG(Debug, "Applying overlay: %s", - overlay_file.filename().string().c_str()); + GOPHER_LOG(Debug, "Applying overlay: {}", + overlay_file.filename().string()); ParseContext overlay_context = context; overlay_context.base_dir = overlay_file.parent_path(); @@ -995,8 +995,8 @@ class FileConfigSource : public ConfigSource { context.latest_mtime = overlay_context.latest_mtime; } } catch (const std::exception& e) { - GOPHER_LOG(Error, "Failed to process overlay %s: %s", - overlay_file.string().c_str(), e.what()); + GOPHER_LOG(Error, "Failed to process overlay {}: {}", + overlay_file.string(), e.what()); // Continue with other overlays } } diff --git a/src/logging/logger_registry.cc b/src/logging/logger_registry.cc index d244e9c1..4896d3b5 100644 --- a/src/logging/logger_registry.cc +++ b/src/logging/logger_registry.cc @@ -11,7 +11,7 @@ LoggerRegistry& LoggerRegistry::instance() { return instance; } -LoggerRegistry::LoggerRegistry() : global_level_(LogLevel::Warning) { +LoggerRegistry::LoggerRegistry() : global_level_(LogLevel::Info) { initializeDefaults(); } diff --git a/tests/filter/test_filter_chain_event_hub.cc b/tests/filter/test_filter_chain_event_hub.cc index 79699750..03bc737b 100644 --- a/tests/filter/test_filter_chain_event_hub.cc +++ b/tests/filter/test_filter_chain_event_hub.cc @@ -4,6 +4,7 @@ */ #include +#include #include #include @@ -295,6 +296,7 @@ TEST_F(FilterChainEventHubTest, ThreadSafety) { callbacks_list; std::vector handles; std::atomic total_events_received{0}; + std::mutex list_mutex; // Register observers from multiple threads std::vector registration_threads; @@ -309,9 +311,12 @@ TEST_F(FilterChainEventHubTest, ThreadSafety) { auto handle = hub_->registerObserver(callbacks); - // Store safely (in test main thread context after join) - callbacks_list.push_back(callbacks); - handles.push_back(std::move(handle)); + // Store safely with mutex protection + { + std::lock_guard lock(list_mutex); + callbacks_list.push_back(callbacks); + handles.push_back(std::move(handle)); + } }); }