diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..5ea80ef --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,27 @@ +name: CI +on: [push, pull_request] + +permissions: + contents: read + +jobs: + test: + runs-on: ubuntu-latest + + steps: + - name: Check out code + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version-file: "go.mod" + + - name: Download dependencies + run: go mod download + + - name: Run unit tests + run: go test -race ./... + + - name: Run e2e test suite + run: go test ./cmd/snmcp-e2e/... -v diff --git a/.gitignore b/.gitignore index 9a5e603..5688d17 100644 --- a/.gitignore +++ b/.gitignore @@ -2,7 +2,7 @@ bin/ dist/ -.DS_Store +.DS_Store tmp/ *.log @@ -12,3 +12,4 @@ vendor agents/ .serena/ .envrc +.agents/ diff --git a/CLAUDE.md b/CLAUDE.md index 6f37d9b..3784c8e 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -19,16 +19,16 @@ go test -v -run TestName ./pkg/... # Run a single test ## Architecture Overview -The StreamNative MCP Server implements the Model Context Protocol using the `mark3labs/mcp-go` library to enable AI agents to interact with Apache Kafka, Apache Pulsar, and StreamNative Cloud resources. +The StreamNative MCP Server implements the Model Context Protocol using the `modelcontextprotocol/go-sdk` library to enable AI agents to interact with Apache Kafka, Apache Pulsar, and StreamNative Cloud resources. ### Request Flow ``` -Client Request → MCP Server (pkg/mcp/server.go) +Client Request → Transport (stdio/SSE in pkg/cmd/mcp/) ↓ - SSE/stdio transport layer (pkg/cmd/mcp/) + MCP Server (go-sdk, pkg/mcp/server_new.go) ↓ - Tool Handler (from builders) + Tool Handler (typed input/output) ↓ Context Functions (pkg/mcp/ctx.go) ↓ @@ -37,13 +37,16 @@ Client Request → MCP Server (pkg/mcp/server.go) ### Core Components -1. **Server & Sessions** (`pkg/mcp/server.go`) - - `Server` struct holds `MCPServer`, `KafkaSession`, `PulsarSession`, and `SNCloudSession` - - Sessions provide lazy-initialized clients for each service +1. **Server & Sessions** (`pkg/mcp/server_new.go`) + - `Server` wraps go-sdk `*mcp.Server` as `MCPServer` and holds `KafkaSession`, `PulsarSession`, and `SNCloudSession` + - `NewServer` configures server capabilities plus logging/recovery middleware - Context functions (`pkg/mcp/ctx.go`) inject/retrieve sessions from request context 2. **Tool Builders Framework** (`pkg/mcp/builders/`) - - `ToolBuilder` interface: `GetName()`, `GetRequiredFeatures()`, `BuildTools()`, `Validate()` + - `ToolBuilder` interface: `GetName()`, `GetRequiredFeatures()`, `BuildTools(ctx, config)`, `Validate(config)` + - `ToolDefinition` provides `Definition()` and `Register(*mcp.Server)` for typed tool installs + - `ServerTool[In, Out]` pairs a `*mcp.Tool` with `mcp.ToolHandlerFor[In, Out]` + - Tool schemas are generated via `jsonschema-go` helpers in `pkg/mcp/schema.go` - `BaseToolBuilder` provides common feature validation logic - `ToolRegistry` manages all tool builders with concurrent-safe registration - `ToolBuildConfig` specifies build parameters (ReadOnly, Features, Options) @@ -57,6 +60,7 @@ Client Request → MCP Server (pkg/mcp/server.go) 4. **Tool Registration** (`pkg/mcp/*_tools.go`) - Each `*_tools.go` file creates a builder, builds tools, and adds them to the server - Tools are conditionally registered based on `--features` flag + - Registration uses `tool.Register(server)` to install typed handlers on the go-sdk server - Feature constants defined in `pkg/mcp/features.go` 5. **PFTools - Functions as Tools** (`pkg/mcp/pftools/`) @@ -69,8 +73,9 @@ Client Request → MCP Server (pkg/mcp/server.go) - `pulsar_session_manager.go` - LRU session cache with TTL cleanup for multi-session mode 7. **Transport Layer** (`pkg/cmd/mcp/`) - - `sse.go` - SSE transport with health endpoints (`/healthz`, `/readyz`) and auth middleware - - `server.go` - Stdio transport and common server initialization + - `stdio.go` - Stdio transport via `mcp.StdioTransport` (optional `mcp.LoggingTransport`) + - `sse.go` - SSE transport via `mcp.SSEServerTransport` with message endpoint, health endpoints, and auth middleware + - `server.go` - Common server initialization and tool registration ### Key Design Patterns @@ -101,11 +106,35 @@ Client Request → MCP Server (pkg/mcp/server.go) } } - func (b *MyToolBuilder) BuildTools(ctx context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { + func (b *MyToolBuilder) BuildTools(ctx context.Context, config builders.ToolBuildConfig) ([]builders.ToolDefinition, error) { if !b.HasAnyRequiredFeature(config.Features) { return nil, nil } - // Build and return tools + if err := b.Validate(config); err != nil { + return nil, err + } + + inputSchema, err := mcp.InputSchema[MyInput]() + if err != nil { + return nil, err + } + + tool := &sdk.Tool{ + Name: "my_tool", + Description: "Tool description", + InputSchema: inputSchema, + } + + handler := func(ctx context.Context, _ *sdk.CallToolRequest, input MyInput) (*sdk.CallToolResult, MyOutput, error) { + // Handler logic here + return &sdk.CallToolResult{ + Content: []sdk.Content{&sdk.TextContent{Text: "ok"}}, + }, MyOutput{}, nil + } + + return []builders.ToolDefinition{ + builders.ServerTool[MyInput, MyOutput]{Tool: tool, Handler: handler}, + }, nil } ``` @@ -113,12 +142,12 @@ Client Request → MCP Server (pkg/mcp/server.go) 3. **Create Registration File** `pkg/mcp/my_tools.go`: ```go - func AddMyTools(s *server.MCPServer, readOnly bool, features []string) { + func AddMyTools(s *sdk.Server, readOnly bool, features []string) { builder := kafkabuilders.NewMyToolBuilder() config := builders.ToolBuildConfig{ReadOnly: readOnly, Features: features} tools, _ := builder.BuildTools(context.Background(), config) for _, tool := range tools { - s.AddTool(tool.Tool, tool.Handler) + tool.Register(s) } } ``` @@ -127,7 +156,10 @@ Client Request → MCP Server (pkg/mcp/server.go) ```go session := mcp.GetKafkaSession(ctx) // or GetPulsarSession if session == nil { - return mcp.NewToolResultError("session not found"), nil + return &sdk.CallToolResult{ + IsError: true, + Content: []sdk.Content{&sdk.TextContent{Text: "session not found"}}, + }, nil, nil } admin, err := session.GetAdminClient() ``` @@ -242,6 +274,6 @@ The project includes generated SDK packages: ## Error Handling - Wrap errors: `fmt.Errorf("failed to X: %w", err)` -- Return tool errors: `mcp.NewToolResultError("message")` +- Return tool errors by setting `IsError: true` on `sdk.CallToolResult` - Check session nil before operations - For PFTools, use circuit breaker to handle repeated failures diff --git a/cmd/snmcp-e2e/kafka_test.go b/cmd/snmcp-e2e/kafka_test.go new file mode 100644 index 0000000..322334e --- /dev/null +++ b/cmd/snmcp-e2e/kafka_test.go @@ -0,0 +1,350 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "slices" + "sort" + "strings" + "testing" + "time" + + "github.com/modelcontextprotocol/go-sdk/mcp" + "github.com/stretchr/testify/require" +) + +func TestKafkaE2E(t *testing.T) { + if testing.Short() { + t.Skip("skipping e2e test in short mode") + } + if !getenvBool("E2E_USE_TESTCONTAINERS", false) { + t.Skip("set E2E_USE_TESTCONTAINERS=true to run e2e tests") + } + + cfg := loadTestcontainersConfig() + overallTimeout := cfg.StartupTimeout + 3*time.Minute + ctx, cancel := context.WithTimeout(context.Background(), overallTimeout) + defer cancel() + + kafkaContainer, kafkaBrokers, err := startKafkaContainer(ctx, cfg) + require.NoError(t, err) + env := &testcontainersEnv{ + Kafka: kafkaContainer, + KafkaBrokers: kafkaBrokers, + } + t.Cleanup(func() { + cleanupCtx, cleanupCancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cleanupCancel() + _ = env.Terminate(cleanupCtx) + }) + + snmcpBaseURL, stopServer, err := startSNMCPServerWithKafka(t, kafkaBrokers) + require.NoError(t, err) + t.Cleanup(stopServer) + + sseURL := snmcpBaseURL + "/sse" + client, err := newAuthedClient(ctx, sseURL, "", "snmcp-e2e-kafka") + require.NoError(t, err) + t.Cleanup(func() { + _ = client.Close() + }) + + suffix := time.Now().UnixNano() + topic := fmt.Sprintf("e2e-topic-%d", suffix) + group := fmt.Sprintf("e2e-group-%d", suffix) + message := fmt.Sprintf("message-%d", suffix) + + result, err := callTool(ctx, client, "kafka_admin_topics", map[string]any{ + "resource": "topic", + "operation": "create", + "name": topic, + "partitions": float64(1), + "replicationFactor": float64(1), + }) + require.NoError(t, requireToolOK(result, err, "kafka_admin_topics create")) + + result, err = callTool(ctx, client, "kafka_admin_topics", map[string]any{ + "resource": "topics", + "operation": "list", + }) + require.NoError(t, requireToolOK(result, err, "kafka_admin_topics list")) + topics, err := parseKafkaTopicNames(firstText(result)) + require.NoError(t, err) + require.Contains(t, topics, topic) + + result, err = callTool(ctx, client, "kafka_client_produce", map[string]any{ + "topic": topic, + "value": message, + }) + require.NoError(t, requireToolOK(result, err, "kafka_client_produce")) + + result, err = callTool(ctx, client, "kafka_client_consume", map[string]any{ + "topic": topic, + "group": group, + "offset": "atstart", + "max-messages": float64(1), + "timeout": float64(20), + }) + require.NoError(t, requireToolOK(result, err, "kafka_client_consume")) + messages, err := decodeKafkaConsumeValues(firstText(result)) + require.NoError(t, err) + require.Contains(t, messages, message) + + require.NoError(t, waitForKafkaGroup(ctx, client, group)) + + result, err = callTool(ctx, client, "kafka_admin_groups", map[string]any{ + "resource": "group", + "operation": "describe", + "group": group, + }) + require.NoError(t, requireToolOK(result, err, "kafka_admin_groups describe")) + + result, err = callTool(ctx, client, "kafka_admin_groups", map[string]any{ + "resource": "group", + "operation": "offsets", + "group": group, + }) + require.NoError(t, requireToolOK(result, err, "kafka_admin_groups offsets")) + + result, err = callTool(ctx, client, "kafka_admin_partitions", map[string]any{ + "resource": "partition", + "operation": "update", + "topic": topic, + "new-total": float64(2), + }) + require.NoError(t, requireToolOK(result, err, "kafka_admin_partitions update")) + require.NoError(t, waitForKafkaPartitions(ctx, client, topic, 2)) +} + +func startSNMCPServerWithKafka(t *testing.T, kafkaBrokers string) (string, func(), error) { + t.Helper() + + if strings.TrimSpace(kafkaBrokers) == "" { + return "", nil, errors.New("kafka brokers are required") + } + + repoRoot := findRepoRoot(t) + binaryPath := buildSNMCPBinary(t, repoRoot) + + addr := reserveLocalAddr(t) + baseURL := fmt.Sprintf("http://%s/mcp", addr) + + //nolint:gosec // test binary path and arguments are controlled + cmd := exec.Command(binaryPath, + "sse", + "--http-addr", addr, + "--http-path", "/mcp", + "--use-external-kafka", + "--kafka-bootstrap-servers", kafkaBrokers, + ) + cmd.Dir = repoRoot + cmd.Env = append(os.Environ(), "SNMCP_CONFIG_DIR="+t.TempDir()) + + var output bytes.Buffer + cmd.Stdout = &output + cmd.Stderr = &output + + if err := cmd.Start(); err != nil { + return "", nil, fmt.Errorf("start snmcp: %w", err) + } + + errCh := make(chan error, 1) + go func() { + errCh <- cmd.Wait() + }() + + readyCtx, cancel := context.WithTimeout(context.Background(), 45*time.Second) + defer cancel() + if err := waitForHTTPStatus(readyCtx, baseURL+"/healthz", 200, errCh); err != nil { + _ = cmd.Process.Signal(os.Interrupt) + <-errCh + return "", nil, fmt.Errorf("snmcp not ready: %w\n%s", err, strings.TrimSpace(output.String())) + } + + cleanup := func() { + if cmd.Process == nil { + return + } + _ = cmd.Process.Signal(os.Interrupt) + select { + case <-errCh: + case <-time.After(10 * time.Second): + _ = cmd.Process.Kill() + <-errCh + } + } + + return baseURL, cleanup, nil +} + +func parseKafkaTopicNames(raw string) ([]string, error) { + if raw == "" { + return nil, errors.New("empty topics response") + } + var payload map[string]any + if err := json.Unmarshal([]byte(raw), &payload); err != nil { + return nil, fmt.Errorf("parse topics response: %w", err) + } + names := make([]string, 0, len(payload)) + for name := range payload { + names = append(names, name) + } + sort.Strings(names) + return names, nil +} + +func parseKafkaGroupNames(raw string) ([]string, error) { + if raw == "" { + return nil, errors.New("empty groups response") + } + var payload map[string]any + if err := json.Unmarshal([]byte(raw), &payload); err != nil { + return nil, fmt.Errorf("parse groups response: %w", err) + } + names := make([]string, 0, len(payload)) + for name := range payload { + names = append(names, name) + } + sort.Strings(names) + return names, nil +} + +func decodeKafkaConsumeValues(raw string) ([]string, error) { + if raw == "" { + return nil, errors.New("empty consume response") + } + var payload []any + if err := json.Unmarshal([]byte(raw), &payload); err != nil { + return nil, fmt.Errorf("parse consume response: %w", err) + } + values := make([]string, 0, len(payload)) + for _, entry := range payload { + switch value := entry.(type) { + case string: + decoded, err := base64.StdEncoding.DecodeString(value) + if err != nil { + values = append(values, value) + continue + } + values = append(values, string(decoded)) + default: + encoded, err := json.Marshal(value) + if err != nil { + values = append(values, fmt.Sprintf("%v", value)) + continue + } + values = append(values, string(encoded)) + } + } + return values, nil +} + +func waitForKafkaGroup(ctx context.Context, client *mcp.ClientSession, group string) error { + waitCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + ticker := time.NewTicker(500 * time.Millisecond) + defer ticker.Stop() + + for { + result, err := callTool(waitCtx, client, "kafka_admin_groups", map[string]any{ + "resource": "groups", + "operation": "list", + }) + if err == nil && result != nil && !result.IsError { + names, parseErr := parseKafkaGroupNames(firstText(result)) + if parseErr == nil && slices.Contains(names, group) { + return nil + } + } + + select { + case <-waitCtx.Done(): + return fmt.Errorf("timeout waiting for kafka group %s", group) + case <-ticker.C: + } + } +} + +func waitForKafkaPartitions(ctx context.Context, client *mcp.ClientSession, topic string, expected int) error { + waitCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + ticker := time.NewTicker(500 * time.Millisecond) + defer ticker.Stop() + + for { + result, err := callTool(waitCtx, client, "kafka_admin_topics", map[string]any{ + "resource": "topic", + "operation": "metadata", + "name": topic, + }) + if err == nil && result != nil && !result.IsError { + count, parseErr := extractKafkaPartitionCount(firstText(result), topic) + if parseErr == nil && count >= expected { + return nil + } + } + + select { + case <-waitCtx.Done(): + return fmt.Errorf("timeout waiting for kafka partitions on %s", topic) + case <-ticker.C: + } + } +} + +func extractKafkaPartitionCount(raw, topic string) (int, error) { + if raw == "" { + return 0, errors.New("empty metadata response") + } + var payload map[string]any + if err := json.Unmarshal([]byte(raw), &payload); err != nil { + return 0, fmt.Errorf("parse metadata response: %w", err) + } + topicsValue, ok := payload["Topics"] + if !ok { + topicsValue = payload["topics"] + } + topics, ok := topicsValue.(map[string]any) + if !ok { + return 0, errors.New("unexpected topics payload") + } + topicValue, ok := topics[topic] + if !ok { + return 0, fmt.Errorf("topic %s not found in metadata", topic) + } + topicMap, ok := topicValue.(map[string]any) + if !ok { + return 0, errors.New("unexpected topic metadata payload") + } + partitionsValue, ok := topicMap["Partitions"] + if !ok { + partitionsValue = topicMap["partitions"] + } + partitions, ok := partitionsValue.(map[string]any) + if !ok { + return 0, errors.New("unexpected partitions payload") + } + return len(partitions), nil +} diff --git a/cmd/snmcp-e2e/main.go b/cmd/snmcp-e2e/main.go index 43d2f08..b7a4136 100644 --- a/cmd/snmcp-e2e/main.go +++ b/cmd/snmcp-e2e/main.go @@ -28,8 +28,7 @@ import ( "sync" "time" - "github.com/mark3labs/mcp-go/client" - "github.com/mark3labs/mcp-go/mcp" + "github.com/modelcontextprotocol/go-sdk/mcp" ) type config struct { @@ -149,6 +148,17 @@ func run(ctx context.Context, cfg config) error { return err } + result, err = callTool(ctx, testClient, "pulsar_admin_tenant", map[string]any{ + "resource": "tenant", + "operation": "create", + "tenant": tenant + "-unauthorized", + "adminRoles": []string{"test-user"}, + "allowedClusters": []string{cluster}, + }) + if err := requireToolError(result, err, "pulsar_admin_tenant unauthorized create"); err != nil { + return err + } + result, err = callTool(ctx, adminClient, "pulsar_admin_namespace", map[string]any{ "operation": "create", "namespace": namespace, @@ -272,34 +282,19 @@ func expectUnauthorized(ctx context.Context, sseURL, token string, verbose bool) return fmt.Errorf("expected unauthorized status for %s, got %d", sseURL, status) } - headers := map[string]string{ - "Authorization": "Bearer " + token, - } - c, err := client.NewSSEMCPClient(sseURL, client.WithHeaders(headers)) + session, err := newAuthedClient(ctx, sseURL, token, "snmcp-e2e-unauthorized") if err != nil { - return err - } - defer func() { - _ = c.Close() - }() - - if err := c.Start(ctx); err != nil { - logf(verbose, "sse start error: %v", err) + logf(verbose, "sse connect error: %v", err) if isAuthError(err) { return nil } return fmt.Errorf("expected auth error for %s, got %v", sseURL, err) } + defer func() { + _ = session.Close() + }() - if err := initializeClient(ctx, c, "snmcp-e2e-unauthorized"); err != nil { - logf(verbose, "initialize error: %v", err) - if isAuthError(err) { - return nil - } - return fmt.Errorf("expected auth error during initialize for %s, got %v", sseURL, err) - } - - result, err := callTool(ctx, c, "pulsar_admin_cluster", map[string]any{ + result, err := callTool(ctx, session, "pulsar_admin_cluster", map[string]any{ "resource": "cluster", "operation": "list", }) @@ -321,39 +316,49 @@ func expectUnauthorized(ctx context.Context, sseURL, token string, verbose bool) return nil } -func newAuthedClient(ctx context.Context, sseURL, token, clientName string) (*client.Client, error) { - headers := map[string]string{ - "Authorization": "Bearer " + token, +func newAuthedClient(ctx context.Context, sseURL, token, clientName string) (*mcp.ClientSession, error) { + transport := &mcp.SSEClientTransport{ + Endpoint: sseURL, + HTTPClient: newAuthHTTPClient(token), } - c, err := client.NewSSEMCPClient(sseURL, client.WithHeaders(headers)) - if err != nil { - return nil, err - } - if err := c.Start(ctx); err != nil { - return nil, err + c := mcp.NewClient(&mcp.Implementation{ + Name: clientName, + Version: "1.0.0", + }, nil) + return c.Connect(ctx, transport, nil) +} + +type authRoundTripper struct { + base http.RoundTripper + token string +} + +func (rt *authRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + base := rt.base + if base == nil { + base = http.DefaultTransport } - if err := initializeClient(ctx, c, clientName); err != nil { - return nil, err + cloned := req.Clone(req.Context()) + if rt.token != "" { + cloned.Header.Set("Authorization", "Bearer "+rt.token) } - return c, nil + return base.RoundTrip(cloned) } -func initializeClient(ctx context.Context, c *client.Client, name string) error { - req := mcp.InitializeRequest{} - req.Params.ProtocolVersion = mcp.LATEST_PROTOCOL_VERSION - req.Params.ClientInfo = mcp.Implementation{ - Name: name, - Version: "1.0.0", +func newAuthHTTPClient(token string) *http.Client { + return &http.Client{ + Transport: &authRoundTripper{ + base: http.DefaultTransport, + token: token, + }, } - _, err := c.Initialize(ctx, req) - return err } -func callTool(ctx context.Context, c *client.Client, name string, args map[string]any) (*mcp.CallToolResult, error) { - request := mcp.CallToolRequest{} - request.Params.Name = name - request.Params.Arguments = args - return c.CallTool(ctx, request) +func callTool(ctx context.Context, session *mcp.ClientSession, name string, args map[string]any) (*mcp.CallToolResult, error) { + return session.CallTool(ctx, &mcp.CallToolParams{ + Name: name, + Arguments: args, + }) } func requireToolOK(result *mcp.CallToolResult, err error, label string) error { @@ -380,7 +385,7 @@ func firstText(result *mcp.CallToolResult) string { if result == nil || len(result.Content) == 0 { return "" } - if text, ok := result.Content[0].(mcp.TextContent); ok { + if text, ok := result.Content[0].(*mcp.TextContent); ok { return text.Text } return "" @@ -443,7 +448,7 @@ func isAuthText(text string) bool { return false } -func listClusters(ctx context.Context, c *client.Client) ([]string, error) { +func listClusters(ctx context.Context, c *mcp.ClientSession) ([]string, error) { result, err := callTool(ctx, c, "pulsar_admin_cluster", map[string]any{ "resource": "cluster", "operation": "list", @@ -462,7 +467,7 @@ func listClusters(ctx context.Context, c *client.Client) ([]string, error) { return clusters, nil } -func runConcurrent(ctx context.Context, adminClient, testClient *client.Client, topic, subscription string) error { +func runConcurrent(ctx context.Context, adminClient, testClient *mcp.ClientSession, topic, subscription string) error { var wg sync.WaitGroup errCh := make(chan error, 2) diff --git a/cmd/snmcp-e2e/pulsar_test.go b/cmd/snmcp-e2e/pulsar_test.go new file mode 100644 index 0000000..456be16 --- /dev/null +++ b/cmd/snmcp-e2e/pulsar_test.go @@ -0,0 +1,391 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "bufio" + "bytes" + "context" + "fmt" + "net" + "net/http" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/wait" +) + +const ( + defaultAdminRole = "admin" + defaultTestUser = "test-user" + authSecretFilePath = "/pulsarctl/test/auth/token/secret.key" //nolint:gosec // static test container path +) + +type authTokens struct { + AdminToken string + TestUserToken string +} + +func TestPulsarAdminE2E(t *testing.T) { + if testing.Short() { + t.Skip("skipping e2e test in short mode") + } + if !getenvBool("E2E_USE_TESTCONTAINERS", false) { + t.Skip("set E2E_USE_TESTCONTAINERS=true to run e2e tests") + } + + tokens := loadAuthTokens(t) + secretKeyPath := loadSecretKeyPath(t) + cfg := loadTestcontainersConfig() + + overallTimeout := cfg.StartupTimeout + 3*time.Minute + ctx, cancel := context.WithTimeout(context.Background(), overallTimeout) + defer cancel() + + env, err := startPulsarContainerWithAuth(ctx, cfg, tokens.AdminToken, secretKeyPath) + require.NoError(t, err) + t.Cleanup(func() { + cleanupCtx, cleanupCancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cleanupCancel() + _ = env.Terminate(cleanupCtx) + }) + + snmcpBaseURL, stopServer, err := startSNMCPServer(t, env.PulsarWebServiceURL, env.PulsarBrokerURL) + require.NoError(t, err) + t.Cleanup(stopServer) + + sseURL := snmcpBaseURL + "/sse" + require.NoError(t, expectUnauthorized(ctx, sseURL, "", false)) + require.NoError(t, expectUnauthorized(ctx, sseURL, "invalid-token", false)) + + adminClient, err := newAuthedClient(ctx, sseURL, tokens.AdminToken, "snmcp-e2e-admin") + require.NoError(t, err) + t.Cleanup(func() { + _ = adminClient.Close() + }) + + testClient, err := newAuthedClient(ctx, sseURL, tokens.TestUserToken, "snmcp-e2e-test-user") + require.NoError(t, err) + t.Cleanup(func() { + _ = testClient.Close() + }) + + clusters, err := listClusters(ctx, adminClient) + require.NoError(t, err) + require.NotEmpty(t, clusters) + + suffix := time.Now().UnixNano() + tenant := fmt.Sprintf("e2e-%d", suffix) + namespace := fmt.Sprintf("%s/ns-%d", tenant, suffix) + topic := fmt.Sprintf("persistent://%s/topic-%d", namespace, suffix) + + result, err := callTool(ctx, adminClient, "pulsar_admin_tenant", map[string]any{ + "resource": "tenant", + "operation": "create", + "tenant": tenant, + "adminRoles": []string{defaultAdminRole}, + "allowedClusters": []string{clusters[0]}, + }) + require.NoError(t, requireToolOK(result, err, "pulsar_admin_tenant create")) + + result, err = callTool(ctx, adminClient, "pulsar_admin_namespace", map[string]any{ + "operation": "create", + "namespace": namespace, + "clusters": []string{clusters[0]}, + }) + require.NoError(t, requireToolOK(result, err, "pulsar_admin_namespace create")) + + result, err = callTool(ctx, adminClient, "pulsar_admin_namespace_policy_set", map[string]any{ + "namespace": namespace, + "policy": "permission", + "role": defaultTestUser, + "actions": []string{"consume"}, + }) + require.NoError(t, requireToolOK(result, err, "pulsar_admin_namespace_policy_set permission")) + + result, err = callTool(ctx, adminClient, "pulsar_admin_topic", map[string]any{ + "resource": "topic", + "operation": "create", + "topic": topic, + "partitions": float64(0), + }) + require.NoError(t, requireToolOK(result, err, "pulsar_admin_topic create")) + + result, err = callTool(ctx, testClient, "pulsar_admin_tenant", map[string]any{ + "resource": "tenant", + "operation": "create", + "tenant": tenant + "-unauthorized", + "adminRoles": []string{defaultTestUser}, + "allowedClusters": []string{clusters[0]}, + }) + require.NoError(t, requireToolError(result, err, "pulsar_admin_tenant unauthorized create")) +} + +func startPulsarContainerWithAuth(ctx context.Context, cfg testcontainersConfig, adminToken, secretKeyPath string) (*testcontainersEnv, error) { + if !cfg.Enabled { + return nil, errTestcontainersDisabled + } + if strings.TrimSpace(adminToken) == "" { + return nil, fmt.Errorf("admin token is required") + } + if strings.TrimSpace(secretKeyPath) == "" { + return nil, fmt.Errorf("secret key path is required") + } + + request := testcontainers.ContainerRequest{ + Image: cfg.PulsarImage, + ExposedPorts: []string{pulsarBrokerPort, pulsarWebServicePort}, + Env: map[string]string{ + "PULSAR_PREFIX_authenticationEnabled": "true", + "PULSAR_PREFIX_authenticationProviders": "org.apache.pulsar.broker.authentication.AuthenticationProviderToken", + "PULSAR_PREFIX_authorizationEnabled": "true", + "PULSAR_PREFIX_superUserRoles": defaultAdminRole, + "PULSAR_PREFIX_tokenSecretKey": "file://" + authSecretFilePath, + "PULSAR_PREFIX_brokerClientAuthenticationPlugin": "org.apache.pulsar.client.impl.auth.AuthenticationToken", + "PULSAR_PREFIX_brokerClientAuthenticationParameters": "token:" + adminToken, + }, + Cmd: []string{"bash", "-lc", "set -- $(hostname -i); export PULSAR_PREFIX_advertisedAddress=$1; bin/apply-config-from-env.py /pulsar/conf/standalone.conf; exec bin/pulsar standalone"}, + Files: []testcontainers.ContainerFile{ + { + HostFilePath: secretKeyPath, + ContainerFilePath: authSecretFilePath, + FileMode: 0o600, + }, + }, + WaitingFor: wait.ForAll( + wait.ForHTTP("/admin/v2/clusters"). + WithPort(pulsarWebServicePort). + WithHeaders(map[string]string{"Authorization": "Bearer " + adminToken}), + wait.ForListeningPort(pulsarBrokerPort), + ).WithDeadline(cfg.StartupTimeout), + } + + container, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ + ContainerRequest: request, + Started: true, + }) + if err != nil { + return nil, fmt.Errorf("start pulsar container: %w", err) + } + + pulsarWebURL, pulsarBrokerURL, err := resolvePulsarEndpoints(ctx, container) + if err != nil { + _ = container.Terminate(ctx) + return nil, err + } + + return &testcontainersEnv{ + Pulsar: container, + PulsarWebServiceURL: pulsarWebURL, + PulsarBrokerURL: pulsarBrokerURL, + }, nil +} + +func startSNMCPServer(t *testing.T, pulsarWebURL, pulsarBrokerURL string) (string, func(), error) { + t.Helper() + + repoRoot := findRepoRoot(t) + binaryPath := buildSNMCPBinary(t, repoRoot) + + addr := reserveLocalAddr(t) + baseURL := fmt.Sprintf("http://%s/mcp", addr) + + //nolint:gosec // test binary path and arguments are controlled + cmd := exec.Command(binaryPath, + "sse", + "--http-addr", addr, + "--http-path", "/mcp", + "--use-external-pulsar", + "--pulsar-web-service-url", pulsarWebURL, + "--pulsar-service-url", pulsarBrokerURL, + "--multi-session-pulsar", + ) + cmd.Dir = repoRoot + cmd.Env = append(os.Environ(), "SNMCP_CONFIG_DIR="+t.TempDir()) + + var output bytes.Buffer + cmd.Stdout = &output + cmd.Stderr = &output + + if err := cmd.Start(); err != nil { + return "", nil, fmt.Errorf("start snmcp: %w", err) + } + + errCh := make(chan error, 1) + go func() { + errCh <- cmd.Wait() + }() + + readyCtx, cancel := context.WithTimeout(context.Background(), 45*time.Second) + defer cancel() + if err := waitForHTTPStatus(readyCtx, baseURL+"/healthz", http.StatusOK, errCh); err != nil { + _ = cmd.Process.Signal(os.Interrupt) + <-errCh + return "", nil, fmt.Errorf("snmcp not ready: %w\n%s", err, strings.TrimSpace(output.String())) + } + + cleanup := func() { + if cmd.Process == nil { + return + } + _ = cmd.Process.Signal(os.Interrupt) + select { + case <-errCh: + case <-time.After(10 * time.Second): + _ = cmd.Process.Kill() + <-errCh + } + } + + return baseURL, cleanup, nil +} + +func waitForHTTPStatus(ctx context.Context, target string, status int, errCh <-chan error) error { + for { + select { + case err := <-errCh: + return fmt.Errorf("snmcp exited early: %w", err) + default: + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, target, nil) + if err != nil { + return err + } + resp, err := http.DefaultClient.Do(req) + if err == nil { + _ = resp.Body.Close() + if resp.StatusCode == status { + return nil + } + } + + select { + case <-ctx.Done(): + return fmt.Errorf("timeout waiting for %s", target) + case <-time.After(250 * time.Millisecond): + } + } +} + +func buildSNMCPBinary(t *testing.T, repoRoot string) string { + t.Helper() + outputPath := filepath.Join(t.TempDir(), "snmcp") + + //nolint:gosec // go build command is static and controlled in tests + cmd := exec.Command("go", "build", "-o", outputPath, "./cmd/streamnative-mcp-server") + cmd.Dir = repoRoot + cmd.Env = os.Environ() + + var output bytes.Buffer + cmd.Stdout = &output + cmd.Stderr = &output + + require.NoErrorf(t, cmd.Run(), "failed to build snmcp: %s", strings.TrimSpace(output.String())) + return outputPath +} + +func reserveLocalAddr(t *testing.T) string { + t.Helper() + listener, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + addr := listener.Addr().String() + require.NoError(t, listener.Close()) + return addr +} + +func loadAuthTokens(t *testing.T) authTokens { + t.Helper() + + tokens := authTokens{ + AdminToken: strings.TrimSpace(os.Getenv("ADMIN_TOKEN")), + TestUserToken: strings.TrimSpace(os.Getenv("TEST_USER_TOKEN")), + } + + if tokens.AdminToken != "" && tokens.TestUserToken != "" { + return tokens + } + + repoRoot := findRepoRoot(t) + path := filepath.Join(repoRoot, "charts", "snmcp", "e2e", "test-tokens.env") + //nolint:gosec // path is resolved from repo root in tests + file, err := os.Open(path) + require.NoError(t, err) + defer func() { + _ = file.Close() + }() + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) != 2 { + continue + } + key := strings.TrimSpace(parts[0]) + value := strings.TrimSpace(parts[1]) + switch key { + case "ADMIN_TOKEN": + if tokens.AdminToken == "" { + tokens.AdminToken = value + } + case "TEST_USER_TOKEN": + if tokens.TestUserToken == "" { + tokens.TestUserToken = value + } + } + } + require.NoError(t, scanner.Err()) + require.NotEmpty(t, tokens.AdminToken) + require.NotEmpty(t, tokens.TestUserToken) + return tokens +} + +func loadSecretKeyPath(t *testing.T) string { + t.Helper() + repoRoot := findRepoRoot(t) + path := filepath.Join(repoRoot, "charts", "snmcp", "e2e", "test-secret.key") + _, err := os.Stat(path) + require.NoError(t, err) + return path +} + +func findRepoRoot(t *testing.T) string { + t.Helper() + dir, err := os.Getwd() + require.NoError(t, err) + + for { + if _, statErr := os.Stat(filepath.Join(dir, "go.mod")); statErr == nil { + return dir + } + parent := filepath.Dir(dir) + if parent == dir { + break + } + dir = parent + } + require.Fail(t, "go.mod not found in parent directories") + return "" +} diff --git a/cmd/snmcp-e2e/testcontainers.go b/cmd/snmcp-e2e/testcontainers.go new file mode 100644 index 0000000..27ae118 --- /dev/null +++ b/cmd/snmcp-e2e/testcontainers.go @@ -0,0 +1,193 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + "time" + + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/modules/kafka" + "github.com/testcontainers/testcontainers-go/wait" +) + +const ( + pulsarBrokerPort = "6650/tcp" + pulsarWebServicePort = "8080/tcp" + defaultPulsarImage = "apachepulsar/pulsar:latest" + defaultKafkaImage = "confluentinc/confluent-local:7.5.0" + defaultStartupTimeout = 5 * time.Minute +) + +var errTestcontainersDisabled = errors.New("testcontainers disabled") + +type testcontainersConfig struct { + Enabled bool + PulsarImage string + KafkaImage string + StartupTimeout time.Duration +} + +type testcontainersEnv struct { + Pulsar testcontainers.Container + Kafka *kafka.KafkaContainer + PulsarWebServiceURL string + PulsarBrokerURL string + KafkaBrokers string +} + +func loadTestcontainersConfig() testcontainersConfig { + return testcontainersConfig{ + Enabled: getenvBool("E2E_USE_TESTCONTAINERS", false), + PulsarImage: getenv("E2E_PULSAR_IMAGE", defaultPulsarImage), + KafkaImage: getenv("E2E_KAFKA_IMAGE", defaultKafkaImage), + StartupTimeout: getenvDuration("E2E_TESTCONTAINERS_TIMEOUT", defaultStartupTimeout), + } +} + +func startTestcontainers(ctx context.Context, cfg testcontainersConfig) (*testcontainersEnv, error) { + if !cfg.Enabled { + return nil, errTestcontainersDisabled + } + if err := validateTestcontainersConfig(cfg); err != nil { + return nil, err + } + + startupCtx, cancel := context.WithTimeout(ctx, cfg.StartupTimeout) + defer cancel() + + pulsarContainer, err := startPulsarContainer(startupCtx, cfg) + if err != nil { + return nil, err + } + + pulsarWebURL, pulsarBrokerURL, err := resolvePulsarEndpoints(startupCtx, pulsarContainer) + if err != nil { + _ = pulsarContainer.Terminate(startupCtx) + return nil, err + } + + kafkaContainer, kafkaBrokers, err := startKafkaContainer(startupCtx, cfg) + if err != nil { + _ = pulsarContainer.Terminate(startupCtx) + return nil, err + } + + env := &testcontainersEnv{ + Pulsar: pulsarContainer, + Kafka: kafkaContainer, + PulsarWebServiceURL: pulsarWebURL, + PulsarBrokerURL: pulsarBrokerURL, + KafkaBrokers: kafkaBrokers, + } + return env, nil +} + +func (env *testcontainersEnv) Terminate(ctx context.Context) error { + if env == nil { + return nil + } + + var err error + if env.Kafka != nil { + err = errors.Join(err, env.Kafka.Terminate(ctx)) + } + if env.Pulsar != nil { + err = errors.Join(err, env.Pulsar.Terminate(ctx)) + } + return err +} + +func validateTestcontainersConfig(cfg testcontainersConfig) error { + if strings.TrimSpace(cfg.PulsarImage) == "" { + return errors.New("pulsar image is required") + } + if strings.TrimSpace(cfg.KafkaImage) == "" { + return errors.New("kafka image is required") + } + if cfg.StartupTimeout <= 0 { + return errors.New("startup timeout must be positive") + } + return nil +} + +func startPulsarContainer(ctx context.Context, cfg testcontainersConfig) (testcontainers.Container, error) { + request := testcontainers.ContainerRequest{ + Image: cfg.PulsarImage, + ExposedPorts: []string{pulsarBrokerPort, pulsarWebServicePort}, + Cmd: []string{"standalone"}, + WaitingFor: wait.ForAll( + wait.ForListeningPort(pulsarBrokerPort), + wait.ForListeningPort(pulsarWebServicePort), + ).WithDeadline(cfg.StartupTimeout), + } + container, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ + ContainerRequest: request, + Started: true, + }) + if err != nil { + return nil, fmt.Errorf("start pulsar container: %w", err) + } + return container, nil +} + +func resolvePulsarEndpoints(ctx context.Context, container testcontainers.Container) (string, string, error) { + host, err := container.Host(ctx) + if err != nil { + return "", "", fmt.Errorf("pulsar host: %w", err) + } + webPort, err := container.MappedPort(ctx, pulsarWebServicePort) + if err != nil { + return "", "", fmt.Errorf("pulsar web service port: %w", err) + } + brokerPort, err := container.MappedPort(ctx, pulsarBrokerPort) + if err != nil { + return "", "", fmt.Errorf("pulsar broker port: %w", err) + } + + webURL := fmt.Sprintf("http://%s:%s", host, webPort.Port()) + brokerURL := fmt.Sprintf("pulsar://%s:%s", host, brokerPort.Port()) + return webURL, brokerURL, nil +} + +func startKafkaContainer(ctx context.Context, cfg testcontainersConfig) (*kafka.KafkaContainer, string, error) { + container, err := kafka.Run(ctx, cfg.KafkaImage) + if err != nil { + return nil, "", fmt.Errorf("start kafka container: %w", err) + } + + brokers, err := container.Brokers(ctx) + if err != nil { + _ = container.Terminate(ctx) + return nil, "", fmt.Errorf("kafka brokers: %w", err) + } + return container, strings.Join(brokers, ","), nil +} + +func getenvDuration(key string, fallback time.Duration) time.Duration { + raw := os.Getenv(key) + if strings.TrimSpace(raw) == "" { + return fallback + } + value, err := time.ParseDuration(raw) + if err != nil { + return fallback + } + return value +} diff --git a/cmd/snmcp-e2e/testcontainers_test.go b/cmd/snmcp-e2e/testcontainers_test.go new file mode 100644 index 0000000..f5376b1 --- /dev/null +++ b/cmd/snmcp-e2e/testcontainers_test.go @@ -0,0 +1,83 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestLoadTestcontainersConfigDefaults(t *testing.T) { + cfg := loadTestcontainersConfig() + require.False(t, cfg.Enabled) + require.Equal(t, defaultPulsarImage, cfg.PulsarImage) + require.Equal(t, defaultKafkaImage, cfg.KafkaImage) + require.Equal(t, defaultStartupTimeout, cfg.StartupTimeout) +} + +func TestLoadTestcontainersConfigOverrides(t *testing.T) { + t.Setenv("E2E_USE_TESTCONTAINERS", "true") + t.Setenv("E2E_PULSAR_IMAGE", "apachepulsar/pulsar:3.2.2") + t.Setenv("E2E_KAFKA_IMAGE", "confluentinc/confluent-local:7.6.0") + t.Setenv("E2E_TESTCONTAINERS_TIMEOUT", "2m") + + cfg := loadTestcontainersConfig() + require.True(t, cfg.Enabled) + require.Equal(t, "apachepulsar/pulsar:3.2.2", cfg.PulsarImage) + require.Equal(t, "confluentinc/confluent-local:7.6.0", cfg.KafkaImage) + require.Equal(t, 2*time.Minute, cfg.StartupTimeout) +} + +func TestLoadTestcontainersConfigInvalidTimeout(t *testing.T) { + t.Setenv("E2E_TESTCONTAINERS_TIMEOUT", "not-a-duration") + cfg := loadTestcontainersConfig() + require.Equal(t, defaultStartupTimeout, cfg.StartupTimeout) +} + +func TestStartTestcontainersDisabled(t *testing.T) { + cfg := testcontainersConfig{Enabled: false} + env, err := startTestcontainers(context.Background(), cfg) + require.ErrorIs(t, err, errTestcontainersDisabled) + require.Nil(t, env) +} + +func TestTestcontainersEnvTerminateNil(t *testing.T) { + var env *testcontainersEnv + require.NoError(t, env.Terminate(context.Background())) +} + +func TestValidateTestcontainersConfig(t *testing.T) { + cfg := testcontainersConfig{ + Enabled: true, + PulsarImage: "apachepulsar/pulsar:latest", + KafkaImage: "confluentinc/confluent-local:7.5.0", + StartupTimeout: time.Minute, + } + require.NoError(t, validateTestcontainersConfig(cfg)) + + cfg.PulsarImage = "" + require.Error(t, validateTestcontainersConfig(cfg)) + + cfg.PulsarImage = "apachepulsar/pulsar:latest" + cfg.KafkaImage = "" + require.Error(t, validateTestcontainersConfig(cfg)) + + cfg.KafkaImage = "confluentinc/confluent-local:7.5.0" + cfg.StartupTimeout = 0 + require.Error(t, validateTestcontainersConfig(cfg)) +} diff --git a/go.mod b/go.mod index 3d63a7b..978b947 100644 --- a/go.mod +++ b/go.mod @@ -7,9 +7,11 @@ require ( github.com/apache/pulsar-client-go v0.13.1 github.com/golang-jwt/jwt v3.2.1+incompatible github.com/google/go-cmp v0.7.0 + github.com/google/jsonschema-go v0.3.0 github.com/hamba/avro/v2 v2.28.0 github.com/mark3labs/mcp-go v0.43.2 github.com/mitchellh/go-homedir v1.1.0 + github.com/modelcontextprotocol/go-sdk v1.2.0 github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.9.1 @@ -17,68 +19,100 @@ require ( github.com/streamnative/pulsarctl v0.4.3-0.20250312214758-e472faec284b github.com/streamnative/streamnative-mcp-server/sdk/sdk-apiserver v0.0.0-20250506174209-b67ea08ddd82 github.com/streamnative/streamnative-mcp-server/sdk/sdk-kafkaconnect v0.0.0-00010101000000-000000000000 - github.com/stretchr/testify v1.10.0 + github.com/stretchr/testify v1.11.1 + github.com/testcontainers/testcontainers-go v0.40.0 + github.com/testcontainers/testcontainers-go/modules/kafka v0.40.0 github.com/twmb/franz-go v1.18.1 github.com/twmb/franz-go/pkg/kadm v1.16.0 github.com/twmb/franz-go/pkg/sr v1.3.0 github.com/twmb/tlscfg v1.2.1 - golang.org/x/oauth2 v0.27.0 + golang.org/x/oauth2 v0.34.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 ) require ( + dario.cat/mergo v1.0.2 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/AthenZ/athenz v1.10.39 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/DataDog/zstd v1.5.0 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect github.com/ardielle/ardielle-go v1.5.2 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.4.0 // indirect github.com/buger/jsonparser v1.1.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/containerd/errdefs v1.0.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/containerd/platforms v0.2.1 // indirect + github.com/cpuguy83/dockercfg v0.3.2 // indirect github.com/danieljoos/wincred v1.1.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/docker v28.5.1+incompatible // indirect + github.com/docker/go-connections v0.6.0 // indirect + github.com/docker/go-units v0.5.0 // indirect github.com/dvsekhvalnov/jose2go v1.7.0 // indirect + github.com/ebitengine/purego v0.8.4 // indirect github.com/fatih/color v1.7.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/ghodss/yaml v1.0.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/golang-jwt/jwt/v4 v4.5.2 // indirect github.com/golang-jwt/jwt/v5 v5.2.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/uuid v1.6.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/invopop/jsonschema v0.13.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/compress v1.18.0 // indirect github.com/kris-nova/logger v0.0.0-20181127235838-fd0d87064b06 // indirect github.com/kris-nova/lolgopher v0.0.0-20180921204813-313b3abb0d9b // indirect - github.com/magiconair/properties v1.8.7 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/magiconair/properties v1.8.10 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.2 // indirect github.com/mattn/go-isatty v0.0.8 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/go-archive v0.1.0 // indirect + github.com/moby/patternmatcher v0.6.0 // indirect + github.com/moby/sys/sequential v0.6.0 // indirect + github.com/moby/sys/user v0.4.0 // indirect + github.com/moby/sys/userns v0.1.0 // indirect + github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/morikuni/aec v1.0.0 // indirect github.com/mtibben/percent v0.2.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/onsi/gomega v1.35.1 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/pierrec/lz4 v2.0.5+incompatible // indirect github.com/pierrec/lz4/v4 v4.1.22 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/sagikazarmark/locafero v0.7.0 // indirect + github.com/shirou/gopsutil/v4 v4.25.6 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spf13/afero v1.12.0 // indirect @@ -86,17 +120,27 @@ require ( github.com/spf13/pflag v1.0.6 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect github.com/twmb/franz-go/pkg/kmsg v1.9.0 // indirect github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect github.com/yosida95/uritemplate/v3 v3.0.2 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/sdk v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.9.0 // indirect golang.org/x/crypto v0.45.0 // indirect - golang.org/x/mod v0.29.0 // indirect + golang.org/x/mod v0.30.0 // indirect golang.org/x/sys v0.38.0 // indirect golang.org/x/term v0.37.0 // indirect - golang.org/x/text v0.31.0 // indirect - google.golang.org/protobuf v1.36.1 // indirect + golang.org/x/text v0.32.0 // indirect + google.golang.org/grpc v1.78.0 // indirect + google.golang.org/protobuf v1.36.11 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 1156150..2a91f3a 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,11 @@ -dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= -dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= +dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XBn0= github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/AthenZ/athenz v1.10.39 h1:mtwHTF/v62ewY2Z5KWhuZgVXftBej1/Tn80zx4DcawY= github.com/AthenZ/athenz v1.10.39/go.mod h1:3Tg8HLsiQZp81BJY58JBeU2BR6B/H4/0MQGfCwhHNEA= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= @@ -11,10 +13,10 @@ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg6 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/zstd v1.5.0 h1:+K/VEwIAaPcHiMtQvpLD4lqW7f0Gk3xdYZmI1hD+CXo= github.com/DataDog/zstd v1.5.0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/IBM/sarama v1.42.1 h1:wugyWa15TDEHh2kvq2gAy1IHLjEjuYOYgXz/ruC/OSQ= +github.com/IBM/sarama v1.42.1/go.mod h1:Xxho9HkHd4K/MDUo/T/sOqwtX/17D33++E9Wib6hUdQ= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/Microsoft/hcsshim v0.11.5 h1:haEcLNpj9Ka1gd3B3tAEs9CpE0c+1IhoL59w/exYU38= -github.com/Microsoft/hcsshim v0.11.5/go.mod h1:MV8xMfmECjl5HdO7U/3/hFVnkmSBjAjmA09d4bExKcU= github.com/apache/pulsar-client-go v0.13.1 h1:XAAKXjF99du7LP6qu/nBII1HC2nS483/vQoQIWmm5Yg= github.com/apache/pulsar-client-go v0.13.1/go.mod h1:0X5UCs+Cv5w6Ds38EZebUMfyVUFIh+URF2BeipEVhIU= github.com/ardielle/ardielle-go v1.5.2 h1:TilHTpHIQJ27R1Tl/iITBzMwiUGSlVfiVhwDNGM3Zj4= @@ -33,15 +35,19 @@ github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK3 github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/containerd/containerd v1.7.18 h1:jqjZTQNfXGoEaZdW1WwPU0RqSn1Bm2Ay/KJPUuO8nao= -github.com/containerd/containerd v1.7.18/go.mod h1:IYEk9/IO6wAPUz2bCMVUbsfXjzw5UNP5fLz4PsUygQ4= -github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM= -github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E= -github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA= +github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0= github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -52,14 +58,22 @@ github.com/dimfeld/httptreemux v5.0.1+incompatible h1:Qj3gVcDNoOthBAqftuD596rm4w github.com/dimfeld/httptreemux v5.0.1+incompatible/go.mod h1:rbUlSV+CCpv/SuqUTP/8Bk2O3LyUV436/yaRGkhP6Z0= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY= -github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= -github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/docker v28.5.1+incompatible h1:Bm8DchhSD2J6PsFzxC35TZo4TLGR2PdW/E69rU45NhM= +github.com/docker/docker v28.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= +github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dvsekhvalnov/jose2go v1.7.0 h1:bnQc8+GMnidJZA8zc6lLEAb4xNrIqHwO+9TzqvtQZPo= github.com/dvsekhvalnov/jose2go v1.7.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/eapache/go-resiliency v1.4.0 h1:3OK9bWpPk5q6pbFAaYSEwD9CLUSHG8bnZuqX2yMt3B0= +github.com/eapache/go-resiliency v1.4.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= +github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= +github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw= +github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= @@ -70,8 +84,9 @@ github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/ github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= @@ -81,8 +96,6 @@ github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9L github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= @@ -92,13 +105,20 @@ github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVI github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/jsonschema-go v0.3.0 h1:6AH2TxVNtk3IlvkkhjrtbUc4S8AvO0Xii0DxIygDg+Q= +github.com/google/jsonschema-go v0.3.0/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4 h1:kEISI/Gx67NzH3nJxAmY/dGac80kKZgZt134u7Y/k1s= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4/go.mod h1:6Nz966r3vQYCqIzWsuEl9d7cf7mRhtDmm++sOxlnfxI= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/hamba/avro/v2 v2.28.0 h1:E8J5D27biyAulWKNiEBhV85QPc9xRMCUCGJewS0KYCE= @@ -108,18 +128,30 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E= github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= github.com/jawher/mow.cli v1.0.4/go.mod h1:5hQj2V8g+qYmLUVWqu4Wuja1pI57M83EChYLVZ0sMKk= github.com/jawher/mow.cli v1.2.0/go.mod h1:y+pcA3jBAdo/GIZx/0rFjw/K2bVEODP9rfZOfaiq8Ko= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= +github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= +github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= +github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -133,8 +165,8 @@ github.com/kris-nova/lolgopher v0.0.0-20180921204813-313b3abb0d9b h1:xYEM2oBUhBE github.com/kris-nova/lolgopher v0.0.0-20180921204813-313b3abb0d9b/go.mod h1:V0HF/ZBlN86HqewcDC/cVxMmYDiRukWjSrgKLUAn9Js= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE= +github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mark3labs/mcp-go v0.43.2 h1:21PUSlWWiSbUPQwXIJ5WKlETixpFpq+WBpbMGDSVy/I= @@ -149,14 +181,22 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ= +github.com/moby/go-archive v0.1.0/go.mod h1:G9B+YoujNohJmrIYFBpSd54GTUB4lt9S+xVQvsJyFuo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= -github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= -github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= -github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= -github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= +github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modelcontextprotocol/go-sdk v1.2.0 h1:Y23co09300CEk8iZ/tMxIX1dVmKZkzoSBZOpJwUnc/s= +github.com/modelcontextprotocol/go-sdk v1.2.0/go.mod h1:6fM3LCm3yV7pAs8isnKLn07oKtB0MP9LHd3DfAcKw10= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -177,8 +217,8 @@ github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= @@ -200,15 +240,15 @@ github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= -github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= -github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= -github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= -github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shirou/gopsutil/v4 v4.25.6 h1:kLysI2JsKorfaFPcYmcJqbzROzsBWEOAtw6A7dIfqXs= +github.com/shirou/gopsutil/v4 v4.25.6/go.mod h1:PfybzyydfZcN+JMMjkF6Zb8Mq1A/VcogFFg7hj50W9c= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= @@ -235,12 +275,14 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/testcontainers/testcontainers-go v0.32.0 h1:ug1aK08L3gCHdhknlTTwWjPHPS+/alvLJU/DRxTD/ME= -github.com/testcontainers/testcontainers-go v0.32.0/go.mod h1:CRHrzHLQhlXUsa5gXjTOfqIEJcrK5+xMDmBr/WMI88E= +github.com/testcontainers/testcontainers-go v0.40.0 h1:pSdJYLOVgLE8YdUY2FHQ1Fxu+aMnb6JfVz1mxk7OeMU= +github.com/testcontainers/testcontainers-go v0.40.0/go.mod h1:FSXV5KQtX2HAMlm7U3APNyLkkap35zNLxukw9oBi/MY= +github.com/testcontainers/testcontainers-go/modules/kafka v0.40.0 h1:BW4CMO6rYLvJRC7UF4l0rudnwm7IX/kJPvGd9MCJM6I= +github.com/testcontainers/testcontainers-go/modules/kafka v0.40.0/go.mod h1:O4U0SUR8blhkRLLfIFHQqNRKzee7fOxzya2H+rnl4OY= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= @@ -259,16 +301,26 @@ github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/ github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= -github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= -github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= -go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= -go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= -go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= -go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= -go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= -go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= @@ -277,25 +329,29 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= -golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= -golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= -golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= -golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= +golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -304,18 +360,24 @@ golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= -golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 h1:TqExAhdPaB60Ux47Cn0oLV07rGnxZzIsaRhQaqS666A= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA= -google.golang.org/grpc v1.67.3 h1:OgPcDAFKHnH8X3O4WcO4XUc8GRDeKsKReqbQtiCj7N8= -google.golang.org/grpc v1.67.3/go.mod h1:YGaHCc6Oap+FzBJTZLBzkGSYt/cvGPFTPxkn7QfSU8s= -google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= -google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b h1:uA40e2M6fYRBf0+8uN5mLlqUtV192iiksiICIBkYJ1E= +google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:Xa7le7qx2vmqB/SzWUBa7KdMjpdpAHlh5QCSnjessQk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b h1:Mv8VFug0MP9e5vUxfBcE3vUkV6CImK3cMNMIDFjmzxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= +google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= +google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -333,6 +395,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/go.work.sum b/go.work.sum index 84e6fa8..937ea2f 100644 --- a/go.work.sum +++ b/go.work.sum @@ -1,5 +1,6 @@ cel.dev/expr v0.16.1 h1:NR0+oFYzR1CqLFhTAqg3ql59G9VfN8fKq1TCHJ6gq1g= cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.65.0 h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8= cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= @@ -12,6 +13,8 @@ cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2Qx cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= cloud.google.com/go/iam v1.2.2 h1:ozUSofHUGf/F4tCNy/mu9tHLTaxZFLOUiKzjcgWHGIA= cloud.google.com/go/iam v1.2.2/go.mod h1:0Ys8ccaZHdI1dEUilwzqng/6ps2YB6vRsjIe00/+6JY= @@ -21,21 +24,26 @@ cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= cloud.google.com/go/storage v1.49.0 h1:zenOPBOWHCnojRd9aJZAyQXBYqkJkdQS42dxL55CIMw= cloud.google.com/go/storage v1.49.0/go.mod h1:k1eHhhpLvrPjVGfo0mOUPEJ4Y2+a/Hv5PiwehZI9qGU= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9 h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 h1:3c8yed4lgqTt+oTQ+JNMDo+F4xprBf+O/il4ZC0nRLw= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0/go.mod h1:obipzmGjfSjam60XLwGfqUkJsfiheAl+TUjG+4yzyPM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1/go.mod h1:jyqM3eLpJ3IbIFDTKVz2rF9T/xWGW0rIriGwnz8l9Tk= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 h1:8nn+rsCvTq9axyEh382S0PFLBeaFwNsT43IrPWzctRU= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1/go.mod h1:viRWSEhtMZqz1rhwmOVKkWl6SwmVowfL9O2YR5gI2PE= github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA= +github.com/Microsoft/hcsshim v0.11.5/go.mod h1:MV8xMfmECjl5HdO7U/3/hFVnkmSBjAjmA09d4bExKcU= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/pulsar-client-go/oauth2 v0.0.0-20200715083626-b9f8c5cedefb h1:E1P0FudxDdj2RhbveZC9i3PwukLCA/4XQSkBS/dw6/I= github.com/ardielle/ardielle-tools v1.5.4 h1:2uL/7wZRUF4LGV7r2eTaaeyhkBoqdiqEitSXMd6k8F8= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA= @@ -53,22 +61,38 @@ github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU= github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI= github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f/go.mod h1:HlzOvOjVBOfTGSRXRyY0OiCS/3J1akRGQQpRO/7zyF4= +github.com/containerd/containerd v1.7.18 h1:jqjZTQNfXGoEaZdW1WwPU0RqSn1Bm2Ay/KJPUuO8nao= +github.com/containerd/containerd v1.7.18/go.mod h1:IYEk9/IO6wAPUz2bCMVUbsfXjzw5UNP5fLz4PsUygQ4= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc h1:TP+534wVlf61smEIq1nwLLAjQVEK2EADoW3CX9AuT+8= +github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM= +github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0= +github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g= github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04= github.com/coreos/go-etcd v2.0.0+incompatible h1:bXhRBIXoTm9BYHS3gE0TtQuyNZyeEMux2sDi4oo5YOo= github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= +github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E= +github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible h1:dvc1KSkIYTVjZgHf/CTC2diTYC8PzhaA5sFISRfNVrE= +github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY= +github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/envoyproxy/go-control-plane v0.9.4 h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E= github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE= github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= +github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329/go.mod h1:Alz8LEClvR7xKsrq3qzoc4N0guvVNSS8KmSChGYr9hs= +github.com/envoyproxy/go-control-plane/envoy v1.35.0/go.mod h1:09qwbGVuSWWAyN5t/b3iyVfz5+z8QWGrzkoqm/8SbEs= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q= github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A= github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= @@ -76,29 +100,32 @@ github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSY github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4 h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I= +github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-redis/redis v6.15.6+incompatible h1:H9evprGPLI8+ci7fxQx6WNZHJSb7be8FqJQRhdQZ5Sg= github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk= github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw= github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -135,6 +162,8 @@ github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4d github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/keybase/go-keychain v0.0.0-20190712205309-48d3d31d256d h1:Z+RDyXzjKE0i2sTjZ/b1uxiGtPhFy34Ou/Tk0qwN0kM= github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= @@ -144,6 +173,8 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mark3labs/mcp-go v0.23.1 h1:RzTzZ5kJ+HxwnutKA4rll8N/pKV6Wh5dhCmiJUu5S9I= github.com/mark3labs/mcp-go v0.23.1/go.mod h1:rXqOudj/djTORU/ThxYx8fqEVj/5pvTuuebQ2RC7uk4= github.com/mark3labs/mcp-go v0.34.0 h1:eWy7WBGvhk6EyAAyVzivTCprE52iXJwNtvHV6Cv3bR0= @@ -152,6 +183,13 @@ github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/ github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/moby/sys/mount v0.3.4/go.mod h1:KcQJMbQdJHPlq5lcYT+/CjatWM4PuxKe+XLSVS4J6Os= +github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= +github.com/moby/sys/reexec v0.1.0/go.mod h1:EqjBg8F3X7iZe5pU6nRZnYCMUTXoxsjiIfHup5wYIN8= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= +github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= @@ -159,6 +197,8 @@ github.com/olekukonko/tablewriter v0.0.1 h1:b3iUnf1v+ppJiOfNX4yxxqfWKMQPZR5yoh8u github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo/v2 v2.20.1 h1:YlVIbqct+ZmnEph770q9Q7NVAz4wwIiVNahee6JyUzo= github.com/onsi/ginkgo/v2 v2.20.1/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= @@ -169,15 +209,29 @@ github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgm github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0 h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= +github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= +github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= +github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M= +github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/testcontainers/testcontainers-go v0.32.0 h1:ug1aK08L3gCHdhknlTTwWjPHPS+/alvLJU/DRxTD/ME= +github.com/testcontainers/testcontainers-go v0.32.0/go.mod h1:CRHrzHLQhlXUsa5gXjTOfqIEJcrK5+xMDmBr/WMI88E= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8 h1:3SVOIvH7Ae1KRYyQWRjXWJEA9sS/c/pjvH++55Gr648= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= @@ -186,18 +240,35 @@ github.com/yuin/goldmark v1.1.32 h1:5tjfNdR2ki3yYQ842+eX2sQHeiwpKJ0RnHO4IYOc4V8= go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/contrib/detectors/gcp v1.29.0 h1:TiaiXB4DpGD3sdzNlYQxruQngn5Apwzi1X0DRhuGvDQ= go.opentelemetry.io/contrib/detectors/gcp v1.29.0/go.mod h1:GW2aWZNwR2ZxDLdv8OyC2G8zkRoQBuURgV7RPQgcPoU= +go.opentelemetry.io/contrib/detectors/gcp v1.38.0/go.mod h1:SU+iU7nu5ud4oCb3LQOhIZ3nRLj6FNVrKgtflbaf2ts= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= +go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY= go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= +go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= @@ -209,8 +280,12 @@ golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= @@ -220,20 +295,22 @@ golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642 h1:B6caxRw+hozq68X2MY7jEpZh/cr4/aHLv9xU8Kkadrw= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= -golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d h1:W07d4xkoAUSNOkOzdzXCdFGxT7o2rW4q8M34tB2i//k= @@ -244,6 +321,7 @@ golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/api v0.30.0 h1:yfrXXP61wVuLb0vBcG6qaOoIoqYEzOQS8jum51jkv2w= google.golang.org/api v0.215.0 h1:jdYF4qnyczlEz2ReWIsosNLDuzXyvFHJtI5gcr0J7t0= google.golang.org/api v0.215.0/go.mod h1:fta3CVtuJYOEdugLNWm6WodzOS8KdFckABwN4I40hzY= @@ -258,7 +336,13 @@ google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go. google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b h1:ZlWIi1wSK56/8hn4QcBp/j9M7Gt3U/3hZw3mC7vDICo= google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 h1:TqExAhdPaB60Ux47Cn0oLV07rGnxZzIsaRhQaqS666A= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA= google.golang.org/grpc v1.31.0 h1:T7P4R73V3SSDPhH7WW7ATbfViLtmamH0DKrP3f9AuDI= +google.golang.org/grpc v1.67.3 h1:OgPcDAFKHnH8X3O4WcO4XUc8GRDeKsKReqbQtiCj7N8= +google.golang.org/grpc v1.67.3/go.mod h1:YGaHCc6Oap+FzBJTZLBzkGSYt/cvGPFTPxkn7QfSU8s= +google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= +google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= diff --git a/pkg/cmd/mcp/server.go b/pkg/cmd/mcp/server.go index 1e37132..8363b2e 100644 --- a/pkg/cmd/mcp/server.go +++ b/pkg/cmd/mcp/server.go @@ -28,10 +28,10 @@ import ( "github.com/streamnative/streamnative-mcp-server/pkg/pulsar" ) -func newMcpServer(_ context.Context, configOpts *ServerOptions, logrusLogger *logrus.Logger) (*mcp.Server, error) { +func newMcpServer(_ context.Context, configOpts *ServerOptions, logrusLogger *logrus.Logger) (*mcp.LegacyServer, error) { snConfig := configOpts.LoadConfigOrDie() var s *server.MCPServer - var mcpServer *mcp.Server + var mcpServer *mcp.LegacyServer switch { case snConfig.KeyFile != "": { @@ -46,7 +46,7 @@ func newMcpServer(_ context.Context, configOpts *ServerOptions, logrusLogger *lo if err != nil { return nil, errors.Wrap(err, "failed to create StreamNative Cloud session") } - mcpServer = mcp.NewServer("streamnative-mcp-server", "0.0.1", logrusLogger, server.WithInstructions(mcp.GetStreamNativeCloudServerInstructions(userName, snConfig))) + mcpServer = mcp.NewLegacyServer("streamnative-mcp-server", "0.0.1", logrusLogger, server.WithInstructions(mcp.GetStreamNativeCloudServerInstructions(userName, snConfig))) mcpServer.SNCloudSession = session s = mcpServer.MCPServer @@ -77,13 +77,13 @@ func newMcpServer(_ context.Context, configOpts *ServerOptions, logrusLogger *lo if err != nil { return nil, errors.Wrap(err, "failed to set external Kafka context") } - mcpServer = mcp.NewServer("streamnative-mcp-server", "0.0.1", logrusLogger, server.WithInstructions(mcp.GetExternalKafkaServerInstructions(snConfig.ExternalKafka.BootstrapServers))) + mcpServer = mcp.NewLegacyServer("streamnative-mcp-server", "0.0.1", logrusLogger, server.WithInstructions(mcp.GetExternalKafkaServerInstructions(snConfig.ExternalKafka.BootstrapServers))) mcpServer.KafkaSession = ksession s = mcpServer.MCPServer } case snConfig.ExternalPulsar != nil: { - mcpServer = mcp.NewServer("streamnative-mcp-server", "0.0.1", logrusLogger, server.WithInstructions(mcp.GetExternalPulsarServerInstructions(snConfig.ExternalPulsar.WebServiceURL))) + mcpServer = mcp.NewLegacyServer("streamnative-mcp-server", "0.0.1", logrusLogger, server.WithInstructions(mcp.GetExternalPulsarServerInstructions(snConfig.ExternalPulsar.WebServiceURL))) s = mcpServer.MCPServer // Only create global PulsarSession if not in multi-session mode @@ -114,32 +114,32 @@ func newMcpServer(_ context.Context, configOpts *ServerOptions, logrusLogger *lo } } - mcp.PulsarAdminAddBrokersTools(s, configOpts.ReadOnly, configOpts.Features) - mcp.PulsarAdminAddBrokerStatsTools(s, configOpts.ReadOnly, configOpts.Features) - mcp.PulsarAdminAddClusterTools(s, configOpts.ReadOnly, configOpts.Features) - mcp.PulsarAdminAddFunctionsWorkerTools(s, configOpts.ReadOnly, configOpts.Features) - mcp.PulsarAdminAddNamespaceTools(s, configOpts.ReadOnly, configOpts.Features) - mcp.PulsarAdminAddNamespacePolicyTools(s, configOpts.ReadOnly, configOpts.Features) - mcp.PulsarAdminAddNsIsolationPolicyTools(s, configOpts.ReadOnly, configOpts.Features) - mcp.PulsarAdminAddPackagesTools(s, configOpts.ReadOnly, configOpts.Features) - mcp.PulsarAdminAddResourceQuotasTools(s, configOpts.ReadOnly, configOpts.Features) - mcp.PulsarAdminAddSchemasTools(s, configOpts.ReadOnly, configOpts.Features) - mcp.PulsarAdminAddSubscriptionTools(s, configOpts.ReadOnly, configOpts.Features) - mcp.PulsarAdminAddTenantTools(s, configOpts.ReadOnly, configOpts.Features) - mcp.PulsarAdminAddTopicTools(s, configOpts.ReadOnly, configOpts.Features) - mcp.PulsarAdminAddSinksTools(s, configOpts.ReadOnly, configOpts.Features) - mcp.PulsarAdminAddFunctionsTools(s, configOpts.ReadOnly, configOpts.Features) - mcp.PulsarAdminAddSourcesTools(s, configOpts.ReadOnly, configOpts.Features) - mcp.PulsarAdminAddTopicPolicyTools(s, configOpts.ReadOnly, configOpts.Features) - mcp.PulsarClientAddConsumerTools(s, configOpts.ReadOnly, configOpts.Features) - mcp.PulsarClientAddProducerTools(s, configOpts.ReadOnly, configOpts.Features) + mcp.PulsarAdminAddBrokersToolsLegacy(s, configOpts.ReadOnly, configOpts.Features) + mcp.PulsarAdminAddBrokerStatsToolsLegacy(s, configOpts.ReadOnly, configOpts.Features) + mcp.PulsarAdminAddClusterToolsLegacy(s, configOpts.ReadOnly, configOpts.Features) + mcp.PulsarAdminAddFunctionsWorkerToolsLegacy(s, configOpts.ReadOnly, configOpts.Features) + mcp.PulsarAdminAddNamespaceToolsLegacy(s, configOpts.ReadOnly, configOpts.Features) + mcp.PulsarAdminAddNamespacePolicyToolsLegacy(s, configOpts.ReadOnly, configOpts.Features) + mcp.PulsarAdminAddNsIsolationPolicyToolsLegacy(s, configOpts.ReadOnly, configOpts.Features) + mcp.PulsarAdminAddPackagesToolsLegacy(s, configOpts.ReadOnly, configOpts.Features) + mcp.PulsarAdminAddResourceQuotasToolsLegacy(s, configOpts.ReadOnly, configOpts.Features) + mcp.PulsarAdminAddSchemasToolsLegacy(s, configOpts.ReadOnly, configOpts.Features) + mcp.PulsarAdminAddSubscriptionToolsLegacy(s, configOpts.ReadOnly, configOpts.Features) + mcp.PulsarAdminAddTenantToolsLegacy(s, configOpts.ReadOnly, configOpts.Features) + mcp.PulsarAdminAddTopicToolsLegacy(s, configOpts.ReadOnly, configOpts.Features) + mcp.PulsarAdminAddSinksToolsLegacy(s, configOpts.ReadOnly, configOpts.Features) + mcp.PulsarAdminAddFunctionsToolsLegacy(s, configOpts.ReadOnly, configOpts.Features) + mcp.PulsarAdminAddSourcesToolsLegacy(s, configOpts.ReadOnly, configOpts.Features) + mcp.PulsarAdminAddTopicPolicyToolsLegacy(s, configOpts.ReadOnly, configOpts.Features) + mcp.PulsarClientAddConsumerToolsLegacy(s, configOpts.ReadOnly, configOpts.Features) + mcp.PulsarClientAddProducerToolsLegacy(s, configOpts.ReadOnly, configOpts.Features) - mcp.KafkaAdminAddTopicTools(s, configOpts.ReadOnly, configOpts.Features) - mcp.KafkaAdminAddPartitionsTools(s, configOpts.ReadOnly, configOpts.Features) - mcp.KafkaAdminAddGroupsTools(s, configOpts.ReadOnly, configOpts.Features) - mcp.KafkaAdminAddSchemaRegistryTools(s, configOpts.ReadOnly, configOpts.Features) - mcp.KafkaAdminAddKafkaConnectTools(s, configOpts.ReadOnly, configOpts.Features) - mcp.KafkaClientAddConsumeTools(s, configOpts.ReadOnly, logrusLogger, configOpts.Features) - mcp.KafkaClientAddProduceTools(s, configOpts.ReadOnly, configOpts.Features) + mcp.KafkaAdminAddTopicToolsLegacy(s, configOpts.ReadOnly, configOpts.Features) + mcp.KafkaAdminAddPartitionsToolsLegacy(s, configOpts.ReadOnly, configOpts.Features) + mcp.KafkaAdminAddGroupsToolsLegacy(s, configOpts.ReadOnly, configOpts.Features) + mcp.KafkaAdminAddSchemaRegistryToolsLegacy(s, configOpts.ReadOnly, configOpts.Features) + mcp.KafkaAdminAddKafkaConnectToolsLegacy(s, configOpts.ReadOnly, configOpts.Features) + mcp.KafkaClientAddConsumeToolsLegacy(s, configOpts.ReadOnly, logrusLogger, configOpts.Features) + mcp.KafkaClientAddProduceToolsLegacy(s, configOpts.ReadOnly, configOpts.Features) return mcpServer, nil } diff --git a/pkg/cmd/mcp/stdio.go b/pkg/cmd/mcp/stdio.go index e13232e..b520f40 100644 --- a/pkg/cmd/mcp/stdio.go +++ b/pkg/cmd/mcp/stdio.go @@ -17,7 +17,6 @@ package mcp import ( "context" "fmt" - "io" "os" "os/signal" "path/filepath" @@ -25,11 +24,10 @@ import ( stdlog "log" - "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/streamnative/streamnative-mcp-server/pkg/common" - "github.com/streamnative/streamnative-mcp-server/pkg/log" ) // NewCmdMcpStdioServer builds the stdio server command. @@ -69,21 +67,18 @@ func runStdioServer(configOpts *ServerOptions) error { return fmt.Errorf("failed to create MCP server: %w", err) } - stdioServer := server.NewStdioServer(mcpServer.MCPServer) - stdioServer.SetErrorLogger(stdLogger) + var transport sdk.Transport = &sdk.StdioTransport{} + if configOpts.LogCommands { + transport = &sdk.LoggingTransport{ + Transport: transport, + Writer: logger.Writer(), + } + } // Start listening for messages errC := make(chan error, 1) go func() { - in, out := io.Reader(os.Stdin), io.Writer(os.Stdout) - - if configOpts.LogCommands { - // If command logging is enabled, wrap the IO with a logger - loggedIO := log.NewIOLogger(in, out, logger) - in, out = loggedIO, loggedIO - } - - errC <- stdioServer.Listen(ctx, in, out) + errC <- mcpServer.Run(ctx, transport, stdLogger) }() _, _ = fmt.Fprintf(os.Stderr, "StreamNative Cloud MCP Server running on stdio\n") diff --git a/pkg/mcp/builders/base.go b/pkg/mcp/builders/base.go index 61411a8..4c93cab 100644 --- a/pkg/mcp/builders/base.go +++ b/pkg/mcp/builders/base.go @@ -20,8 +20,7 @@ import ( "fmt" "slices" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + "github.com/modelcontextprotocol/go-sdk/mcp" ) // FeatureChecker defines the interface for checking feature requirements @@ -41,7 +40,7 @@ type ToolBuilder interface { GetRequiredFeatures() []string // BuildTools builds and returns a list of server tools - BuildTools(ctx context.Context, config ToolBuildConfig) ([]server.ServerTool, error) + BuildTools(ctx context.Context, config ToolBuildConfig) ([]ToolDefinition, error) // Validate validates the builder configuration Validate(config ToolBuildConfig) error @@ -50,6 +49,31 @@ type ToolBuilder interface { FeatureChecker } +// ToolDefinition describes a tool and its handler registration. +type ToolDefinition interface { + Definition() *mcp.Tool + Register(server *mcp.Server) +} + +// ServerTool combines a tool with a typed handler. +type ServerTool[In, Out any] struct { + Tool *mcp.Tool + Handler ToolHandlerFunc[In, Out] +} + +// Definition returns the tool definition. +func (t ServerTool[In, Out]) Definition() *mcp.Tool { + return t.Tool +} + +// Register installs the tool on the provided server. +func (t ServerTool[In, Out]) Register(server *mcp.Server) { + if server == nil || t.Tool == nil { + return + } + mcp.AddTool(server, t.Tool, t.Handler) +} + // ToolBuildConfig contains all configuration information needed to build tools // It specifies build parameters such as read-only mode, features, and options type ToolBuildConfig struct { @@ -130,6 +154,6 @@ func (b *BaseToolBuilder) HasAnyRequiredFeature(features []string) bool { return false } -// ToolHandlerFunc defines the tool handler function type -// It maintains consistency with server.ToolHandlerFunc -type ToolHandlerFunc func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) +// ToolHandlerFunc defines the tool handler function type. +// It aliases mcp.ToolHandlerFor to preserve SDK behavior. +type ToolHandlerFunc[In, Out any] = mcp.ToolHandlerFor[In, Out] diff --git a/pkg/mcp/builders/kafka/consume.go b/pkg/mcp/builders/kafka/consume.go index 9b8fae2..8083042 100644 --- a/pkg/mcp/builders/kafka/consume.go +++ b/pkg/mcp/builders/kafka/consume.go @@ -20,9 +20,9 @@ import ( "fmt" "time" + "github.com/google/jsonschema-go/jsonschema" "github.com/hamba/avro/v2" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/sirupsen/logrus" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" @@ -30,6 +30,39 @@ import ( "github.com/twmb/franz-go/pkg/sr" ) +type kafkaConsumeInput struct { + Topic string `json:"topic"` + Group *string `json:"group,omitempty"` + Offset *string `json:"offset,omitempty"` + MaxMessages *int `json:"max-messages,omitempty"` + Timeout *int `json:"timeout,omitempty"` +} + +const ( + kafkaConsumeTopicDesc = "The name of the Kafka topic to consume messages from. " + + "Must be an existing topic that the user has read permissions for. " + + "For partitioned topics, this will consume from all partitions unless a specific partition is specified." + kafkaConsumeGroupDesc = "The consumer group ID to use for consumption. " + + "Optional. If provided, the consumer will join this consumer group and track offsets with Kafka. " + + "If not provided, a random group ID will be generated, and offsets won't be committed back to Kafka. " + + "Using a meaningful group ID is important when you want to resume consumption later or coordinate multiple consumers." + kafkaConsumeOffsetDesc = "The offset position to start consuming from. " + + "Optional. Must be one of these values:\n" + + "- 'atstart': Begin from the earliest available message in the topic/partition\n" + + "- 'atend': Begin from the next message that arrives after the consumer starts\n" + + "- 'atcommitted': Begin from the last committed offset (only works with specified 'group')\n" + + "Default: 'atstart'" + kafkaConsumeMaxMessagesDesc = "Maximum number of messages to consume in this request. " + + "Optional. Limits the total number of messages returned, across all partitions if no specific partition is specified. " + + "Higher values retrieve more data but may increase response time and size. " + + "Default: 10" + kafkaConsumeTimeoutDesc = "Maximum time in seconds to wait for messages. " + + "Optional. The consumer will wait up to this long to collect the requested number of messages. " + + "If fewer than 'max-messages' are available within this time, the available messages are returned. " + + "Longer timeouts are useful for low-volume topics or when consuming with 'atend'. " + + "Default: 10 seconds" +) + // KafkaConsumeToolBuilder implements the ToolBuilder interface for Kafka client consume operations // It provides functionality to build Kafka consumer tools // /nolint:revive @@ -61,7 +94,7 @@ func NewKafkaConsumeToolBuilder() *KafkaConsumeToolBuilder { // BuildTools builds the Kafka consume tool list // This is the core method implementing the ToolBuilder interface -func (b *KafkaConsumeToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { +func (b *KafkaConsumeToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]builders.ToolDefinition, error) { // Check features - return empty list if no required features are present if !b.HasAnyRequiredFeature(config.Features) { return nil, nil @@ -73,18 +106,23 @@ func (b *KafkaConsumeToolBuilder) BuildTools(_ context.Context, config builders. } // Extract logger from options if provided - if loggerOpt, ok := config.Options["logger"]; ok { - if logger, ok := loggerOpt.(*logrus.Logger); ok { - b.logger = logger + if config.Options != nil { + if loggerOpt, ok := config.Options["logger"]; ok { + if logger, ok := loggerOpt.(*logrus.Logger); ok { + b.logger = logger + } } } // Build tools - tool := b.buildKafkaConsumeTool() + tool, err := b.buildKafkaConsumeTool() + if err != nil { + return nil, err + } handler := b.buildKafkaConsumeHandler() - return []server.ServerTool{ - { + return []builders.ToolDefinition{ + builders.ServerTool[kafkaConsumeInput, any]{ Tool: tool, Handler: handler, }, @@ -93,7 +131,12 @@ func (b *KafkaConsumeToolBuilder) BuildTools(_ context.Context, config builders. // buildKafkaConsumeTool builds the Kafka consume MCP tool definition // Migrated from the original tool definition logic -func (b *KafkaConsumeToolBuilder) buildKafkaConsumeTool() mcp.Tool { +func (b *KafkaConsumeToolBuilder) buildKafkaConsumeTool() (*sdk.Tool, error) { + inputSchema, err := buildKafkaConsumeInputSchema() + if err != nil { + return nil, err + } + toolDesc := "Consume messages from a Kafka topic.\n" + "This tool allows you to read messages from Kafka topics, specifying various consumption parameters.\n\n" + "Kafka Consumer Concepts:\n" + @@ -121,53 +164,20 @@ func (b *KafkaConsumeToolBuilder) buildKafkaConsumeTool() mcp.Tool { " timeout: 30\n\n" + "This tool requires Kafka consumer permissions on the specified topic." - return mcp.NewTool("kafka_client_consume", - mcp.WithDescription(toolDesc), - mcp.WithString("topic", mcp.Required(), - mcp.Description("The name of the Kafka topic to consume messages from. "+ - "Must be an existing topic that the user has read permissions for. "+ - "For partitioned topics, this will consume from all partitions unless a specific partition is specified."), - ), - mcp.WithString("group", - mcp.Description("The consumer group ID to use for consumption. "+ - "Optional. If provided, the consumer will join this consumer group and track offsets with Kafka. "+ - "If not provided, a random group ID will be generated, and offsets won't be committed back to Kafka. "+ - "Using a meaningful group ID is important when you want to resume consumption later or coordinate multiple consumers."), - ), - mcp.WithString("offset", - mcp.Description("The offset position to start consuming from. "+ - "Optional. Must be one of these values:\n"+ - "- 'atstart': Begin from the earliest available message in the topic/partition\n"+ - "- 'atend': Begin from the next message that arrives after the consumer starts\n"+ - "- 'atcommitted': Begin from the last committed offset (only works with specified 'group')\n"+ - "Default: 'atstart'"), - ), - mcp.WithNumber("max-messages", - mcp.Description("Maximum number of messages to consume in this request. "+ - "Optional. Limits the total number of messages returned, across all partitions if no specific partition is specified. "+ - "Higher values retrieve more data but may increase response time and size. "+ - "Default: 10"), - ), - mcp.WithNumber("timeout", - mcp.Description("Maximum time in seconds to wait for messages. "+ - "Optional. The consumer will wait up to this long to collect the requested number of messages. "+ - "If fewer than 'max-messages' are available within this time, the available messages are returned. "+ - "Longer timeouts are useful for low-volume topics or when consuming with 'atend'. "+ - "Default: 10 seconds"), - ), - ) + return &sdk.Tool{ + Name: "kafka_client_consume", + Description: toolDesc, + InputSchema: inputSchema, + }, nil } // buildKafkaConsumeHandler builds the Kafka consume handler function // Migrated from the original handler logic -func (b *KafkaConsumeToolBuilder) buildKafkaConsumeHandler() func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *KafkaConsumeToolBuilder) buildKafkaConsumeHandler() builders.ToolHandlerFunc[kafkaConsumeInput, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input kafkaConsumeInput) (*sdk.CallToolResult, any, error) { opts := []kgo.Opt{} // Get required parameters - topicName, err := request.RequireString("topic") - if err != nil { - return b.handleError("get topic name", err), nil - } + topicName := input.Topic opts = append(opts, kgo.ConsumeTopics(topicName)) opts = append(opts, kgo.FetchIsolationLevel(kgo.ReadUncommitted())) @@ -176,16 +186,28 @@ func (b *KafkaConsumeToolBuilder) buildKafkaConsumeHandler() func(context.Contex w := b.logger.Writer() opts = append(opts, kgo.WithLogger(kgo.BasicLogger(w, kgo.LogLevelInfo, nil))) } - maxMessages := request.GetFloat("max-messages", 10) + maxMessages := 10 + if input.MaxMessages != nil { + maxMessages = *input.MaxMessages + } - timeoutSec := request.GetFloat("timeout", 10) + timeoutSec := 10 + if input.Timeout != nil { + timeoutSec = *input.Timeout + } - group := request.GetString("group", "") + group := "" + if input.Group != nil { + group = *input.Group + } if group != "" { opts = append(opts, kgo.ConsumerGroup(group)) } - offsetStr := request.GetString("offset", "atstart") + offsetStr := "atstart" + if input.Offset != nil && *input.Offset != "" { + offsetStr = *input.Offset + } var offset kgo.Offset switch offsetStr { @@ -200,19 +222,19 @@ func (b *KafkaConsumeToolBuilder) buildKafkaConsumeHandler() func(context.Contex } opts = append(opts, kgo.ConsumeResetOffset(offset)) if b.logger != nil { - b.logger.Infof("Consuming from topic: %s, group: %s, max-messages: %d, timeout: %d", topicName, group, int(maxMessages), int(timeoutSec)) + b.logger.Infof("Consuming from topic: %s, group: %s, max-messages: %d, timeout: %d", topicName, group, maxMessages, timeoutSec) } // Get Kafka session from context session := mcpCtx.GetKafkaSession(ctx) if session == nil { - return b.handleError("get Kafka session not found in context", nil), nil + return nil, nil, b.handleError("get Kafka session not found in context", nil) } // Create Kafka client using the session kafkaClient, err := session.GetClient(opts...) if err != nil { - return b.handleError("create Kafka client", err), nil + return nil, nil, b.handleError("create Kafka client", err) } defer kafkaClient.Close() @@ -228,13 +250,13 @@ func (b *KafkaConsumeToolBuilder) buildKafkaConsumeHandler() func(context.Contex defer cancel() if err = kafkaClient.Ping(timeoutCtx); err != nil { // check connectivity to cluster - return b.handleError("ping Kafka cluster", err), nil + return nil, nil, b.handleError("ping Kafka cluster", err) } if schemaReady { subjSchema, err := srClient.SchemaByVersion(timeoutCtx, topicName+"-value", -1) if err != nil { - return b.handleError("get schema", err), nil + return nil, nil, b.handleError("get schema", err) } if b.logger != nil { b.logger.Infof("Schema ID: %d", subjSchema.ID) @@ -243,7 +265,7 @@ func (b *KafkaConsumeToolBuilder) buildKafkaConsumeHandler() func(context.Contex case sr.TypeAvro: avroSchema, err := avro.Parse(subjSchema.Schema.Schema) if err != nil { - return b.handleError("parse avro schema", err), nil + return nil, nil, b.handleError("parse avro schema", err) } serde.Register( subjSchema.ID, @@ -275,7 +297,7 @@ func (b *KafkaConsumeToolBuilder) buildKafkaConsumeHandler() func(context.Contex results := make([]any, 0) consumerLoop: for { - fetches := kafkaClient.PollRecords(timeoutCtx, int(maxMessages)) + fetches := kafkaClient.PollRecords(timeoutCtx, maxMessages) iter := fetches.RecordIter() if b.logger != nil { b.logger.Infof("NumRecords: %d\n", fetches.NumRecords()) @@ -305,7 +327,7 @@ func (b *KafkaConsumeToolBuilder) buildKafkaConsumeHandler() func(context.Contex } else { results = append(results, record.Value) } - if len(results) >= int(maxMessages) { + if len(results) >= maxMessages { break consumerLoop } } @@ -318,22 +340,47 @@ func (b *KafkaConsumeToolBuilder) buildKafkaConsumeHandler() func(context.Contex } } - return b.marshalResponse(results) + result, err := b.marshalResponse(results) + return result, nil, err } } +func buildKafkaConsumeInputSchema() (*jsonschema.Schema, error) { + schema, err := jsonschema.For[kafkaConsumeInput](nil) + if err != nil { + return nil, fmt.Errorf("input schema: %w", err) + } + if schema.Type != "object" { + return nil, fmt.Errorf("input schema must have type \"object\"") + } + + if schema.Properties == nil { + schema.Properties = map[string]*jsonschema.Schema{} + } + setSchemaDescription(schema, "topic", kafkaConsumeTopicDesc) + setSchemaDescription(schema, "group", kafkaConsumeGroupDesc) + setSchemaDescription(schema, "offset", kafkaConsumeOffsetDesc) + setSchemaDescription(schema, "max-messages", kafkaConsumeMaxMessagesDesc) + setSchemaDescription(schema, "timeout", kafkaConsumeTimeoutDesc) + + normalizeAdditionalProperties(schema) + return schema, nil +} + // Unified error handling and utility functions // handleError provides unified error handling -func (b *KafkaConsumeToolBuilder) handleError(operation string, err error) *mcp.CallToolResult { - return mcp.NewToolResultError(fmt.Sprintf("Failed to %s: %v", operation, err)) +func (b *KafkaConsumeToolBuilder) handleError(operation string, err error) error { + return fmt.Errorf("failed to %s: %v", operation, err) } // marshalResponse provides unified JSON serialization for responses -func (b *KafkaConsumeToolBuilder) marshalResponse(data interface{}) (*mcp.CallToolResult, error) { +func (b *KafkaConsumeToolBuilder) marshalResponse(data interface{}) (*sdk.CallToolResult, error) { jsonBytes, err := json.Marshal(data) if err != nil { - return b.handleError("marshal response", err), nil + return nil, b.handleError("marshal response", err) } - return mcp.NewToolResultText(string(jsonBytes)), nil + return &sdk.CallToolResult{ + Content: []sdk.Content{&sdk.TextContent{Text: string(jsonBytes)}}, + }, nil } diff --git a/pkg/mcp/builders/kafka/consume_legacy.go b/pkg/mcp/builders/kafka/consume_legacy.go new file mode 100644 index 0000000..ab9ef5d --- /dev/null +++ b/pkg/mcp/builders/kafka/consume_legacy.go @@ -0,0 +1,340 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafka + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/hamba/avro/v2" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "github.com/sirupsen/logrus" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" + "github.com/twmb/franz-go/pkg/kgo" + "github.com/twmb/franz-go/pkg/sr" +) + +// KafkaConsumeLegacyToolBuilder implements the legacy ToolBuilder interface for Kafka client consume operations. +// It provides functionality to build Kafka consumer tools. +// /nolint:revive +type KafkaConsumeLegacyToolBuilder struct { + *builders.BaseToolBuilder + logger *logrus.Logger +} + +// NewKafkaConsumeLegacyToolBuilder creates a new legacy Kafka consume tool builder instance. +func NewKafkaConsumeLegacyToolBuilder() *KafkaConsumeLegacyToolBuilder { + metadata := builders.ToolMetadata{ + Name: "kafka_consume", + Version: "1.0.0", + Description: "Kafka client consume tools", + Category: "kafka_client", + Tags: []string{"kafka", "client", "consume"}, + } + + features := []string{ + "kafka-client", + "all", + "all-kafka", + } + + return &KafkaConsumeLegacyToolBuilder{ + BaseToolBuilder: builders.NewBaseToolBuilder(metadata, features), + } +} + +// BuildTools builds the Kafka consume tool list for the legacy server. +func (b *KafkaConsumeLegacyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { + // Check features - return empty list if no required features are present + if !b.HasAnyRequiredFeature(config.Features) { + return nil, nil + } + + // Validate configuration (only validate when matching features are present) + if err := b.Validate(config); err != nil { + return nil, err + } + + // Extract logger from options if provided + if config.Options != nil { + if loggerOpt, ok := config.Options["logger"]; ok { + if logger, ok := loggerOpt.(*logrus.Logger); ok { + b.logger = logger + } + } + } + + // Build tools + tool := b.buildKafkaConsumeTool() + handler := b.buildKafkaConsumeHandler() + + return []server.ServerTool{ + { + Tool: tool, + Handler: handler, + }, + }, nil +} + +// buildKafkaConsumeTool builds the Kafka consume MCP tool definition. +// Migrated from the original tool definition logic. +func (b *KafkaConsumeLegacyToolBuilder) buildKafkaConsumeTool() mcp.Tool { + toolDesc := "Consume messages from a Kafka topic.\n" + + "This tool allows you to read messages from Kafka topics, specifying various consumption parameters.\n\n" + + "Kafka Consumer Concepts:\n" + + "- Consumers read data from Kafka topics, which can be spread across multiple partitions\n" + + "- Consumer Groups enable multiple consumers to cooperatively process messages from the same topic\n" + + "- Offsets track the position of consumers in each partition, allowing resumption after failures\n" + + "- Partitions are independent ordered sequences of messages that enable parallel processing\n\n" + + "This tool provides a temporary consumer instance for diagnostic and testing purposes. " + + "It does not commit offsets back to Kafka unless the 'group' parameter is explicitly specified. Do not use this tool for Pulsar protocol operations. Use 'pulsar_client_consume' instead.\n\n" + + "Usage Examples:\n\n" + + "1. Basic consumption - Get 10 earliest messages from a topic:\n" + + " topic: \"my-topic\"\n" + + " max-messages: 10\n\n" + + "2. Consumer group - Join an existing consumer group and resume from committed offset:\n" + + " topic: \"my-topic\"\n" + + " offset: \"atstart\"\n" + + " max-messages: 20\n\n" + + "3. Consumer group - Join an existing consumer group and resume from committed offset:\n" + + " topic: \"my-topic\"\n" + + " group: \"my-consumer-group\"\n" + + " offset: \"atcommitted\"\n\n" + + "4. Time-limited consumption - Set a longer timeout for slow topics:\n" + + " topic: \"my-topic\"\n" + + " max-messages: 100\n" + + " timeout: 30\n\n" + + "This tool requires Kafka consumer permissions on the specified topic." + + return mcp.NewTool("kafka_client_consume", + mcp.WithDescription(toolDesc), + mcp.WithString("topic", mcp.Required(), + mcp.Description("The name of the Kafka topic to consume messages from. "+ + "Must be an existing topic that the user has read permissions for. "+ + "For partitioned topics, this will consume from all partitions unless a specific partition is specified."), + ), + mcp.WithString("group", + mcp.Description("The consumer group ID to use for consumption. "+ + "Optional. If provided, the consumer will join this consumer group and track offsets with Kafka. "+ + "If not provided, a random group ID will be generated, and offsets won't be committed back to Kafka. "+ + "Using a meaningful group ID is important when you want to resume consumption later or coordinate multiple consumers."), + ), + mcp.WithString("offset", + mcp.Description("The offset position to start consuming from. "+ + "Optional. Must be one of these values:\n"+ + "- 'atstart': Begin from the earliest available message in the topic/partition\n"+ + "- 'atend': Begin from the next message that arrives after the consumer starts\n"+ + "- 'atcommitted': Begin from the last committed offset (only works with specified 'group')\n"+ + "Default: 'atstart'"), + ), + mcp.WithNumber("max-messages", + mcp.Description("Maximum number of messages to consume in this request. "+ + "Optional. Limits the total number of messages returned, across all partitions if no specific partition is specified. "+ + "Higher values retrieve more data but may increase response time and size. "+ + "Default: 10"), + ), + mcp.WithNumber("timeout", + mcp.Description("Maximum time in seconds to wait for messages. "+ + "Optional. The consumer will wait up to this long to collect the requested number of messages. "+ + "If fewer than 'max-messages' are available within this time, the available messages are returned. "+ + "Longer timeouts are useful for low-volume topics or when consuming with 'atend'. "+ + "Default: 10 seconds"), + ), + ) +} + +// buildKafkaConsumeHandler builds the Kafka consume handler function. +// Migrated from the original handler logic. +func (b *KafkaConsumeLegacyToolBuilder) buildKafkaConsumeHandler() func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + opts := []kgo.Opt{} + // Get required parameters + topicName, err := request.RequireString("topic") + if err != nil { + return b.handleError("get topic name", err), nil + } + opts = append(opts, kgo.ConsumeTopics(topicName)) + + opts = append(opts, kgo.FetchIsolationLevel(kgo.ReadUncommitted())) + opts = append(opts, kgo.KeepRetryableFetchErrors()) + if b.logger != nil { + w := b.logger.Writer() + opts = append(opts, kgo.WithLogger(kgo.BasicLogger(w, kgo.LogLevelInfo, nil))) + } + maxMessages := request.GetFloat("max-messages", 10) + + timeoutSec := request.GetFloat("timeout", 10) + + group := request.GetString("group", "") + if group != "" { + opts = append(opts, kgo.ConsumerGroup(group)) + } + + offsetStr := request.GetString("offset", "atstart") + + var offset kgo.Offset + switch offsetStr { + case "atstart": + offset = kgo.NewOffset().AtStart() + case "atend": + offset = kgo.NewOffset().AtEnd() + case "atcommitted": + offset = kgo.NewOffset().AtCommitted() + default: + offset = kgo.NewOffset().AtStart() + } + opts = append(opts, kgo.ConsumeResetOffset(offset)) + if b.logger != nil { + b.logger.Infof("Consuming from topic: %s, group: %s, max-messages: %d, timeout: %d", topicName, group, int(maxMessages), int(timeoutSec)) + } + + // Get Kafka session from context + session := mcpCtx.GetKafkaSession(ctx) + if session == nil { + return b.handleError("get Kafka session not found in context", nil), nil + } + + // Create Kafka client using the session + kafkaClient, err := session.GetClient(opts...) + if err != nil { + return b.handleError("create Kafka client", err), nil + } + defer kafkaClient.Close() + + srClient, err := session.GetSchemaRegistryClient() + schemaReady := false + var serde sr.Serde + if err == nil && srClient != nil { + schemaReady = true + } + + // Set timeout + timeoutCtx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSec)*time.Second) + defer cancel() + + if err = kafkaClient.Ping(timeoutCtx); err != nil { // check connectivity to cluster + return b.handleError("ping Kafka cluster", err), nil + } + + if schemaReady { + subjSchema, err := srClient.SchemaByVersion(timeoutCtx, topicName+"-value", -1) + if err != nil { + return b.handleError("get schema", err), nil + } + if b.logger != nil { + b.logger.Infof("Schema ID: %d", subjSchema.ID) + } + switch subjSchema.Type { + case sr.TypeAvro: + avroSchema, err := avro.Parse(subjSchema.Schema.Schema) + if err != nil { + return b.handleError("parse avro schema", err), nil + } + serde.Register( + subjSchema.ID, + map[string]any{}, + sr.EncodeFn(func(v any) ([]byte, error) { + return avro.Marshal(avroSchema, v) + }), + sr.DecodeFn(func(data []byte, v any) error { + return avro.Unmarshal(avroSchema, data, v) + }), + ) + case sr.TypeJSON: + serde.Register( + subjSchema.ID, + map[string]any{}, + sr.EncodeFn(json.Marshal), + sr.DecodeFn(json.Unmarshal), + ) + case sr.TypeProtobuf: + default: + // TODO: support other schema types + if b.logger != nil { + b.logger.Infof("Unsupported schema type: %s", subjSchema.Type) + } + schemaReady = false + } + } + + results := make([]any, 0) + consumerLoop: + for { + fetches := kafkaClient.PollRecords(timeoutCtx, int(maxMessages)) + iter := fetches.RecordIter() + if b.logger != nil { + b.logger.Infof("NumRecords: %d\n", fetches.NumRecords()) + } + + for _, fetchErr := range fetches.Errors() { + if b.logger != nil { + b.logger.Infof("error consuming from topic: topic=%s, partition=%d, err=%v\n", + fetchErr.Topic, fetchErr.Partition, fetchErr.Err) + } + break consumerLoop + } + + for !iter.Done() { + record := iter.Next() + if schemaReady { + var value map[string]any + err := serde.Decode(record.Value, &value) + if err != nil { + if b.logger != nil { + b.logger.Infof("Failed to decode value: %v", err) + } + results = append(results, record.Value) + } else { + results = append(results, value) + } + } else { + results = append(results, record.Value) + } + if len(results) >= int(maxMessages) { + break consumerLoop + } + } + } + + err = kafkaClient.CommitUncommittedOffsets(timeoutCtx) + if err != nil { + if err != context.Canceled && b.logger != nil { + b.logger.Infof("Failed to commit offsets: %v", err) + } + } + + return b.marshalResponse(results) + } +} + +// Unified error handling and utility functions + +// handleError provides unified error handling. +func (b *KafkaConsumeLegacyToolBuilder) handleError(operation string, err error) *mcp.CallToolResult { + return mcp.NewToolResultError(fmt.Sprintf("Failed to %s: %v", operation, err)) +} + +// marshalResponse provides unified JSON serialization for responses. +func (b *KafkaConsumeLegacyToolBuilder) marshalResponse(data interface{}) (*mcp.CallToolResult, error) { + jsonBytes, err := json.Marshal(data) + if err != nil { + return b.handleError("marshal response", err), nil + } + return mcp.NewToolResultText(string(jsonBytes)), nil +} diff --git a/pkg/mcp/builders/kafka/consume_test.go b/pkg/mcp/builders/kafka/consume_test.go index f6e4a3e..18323b1 100644 --- a/pkg/mcp/builders/kafka/consume_test.go +++ b/pkg/mcp/builders/kafka/consume_test.go @@ -18,6 +18,7 @@ import ( "context" "testing" + "github.com/google/jsonschema-go/jsonschema" "github.com/sirupsen/logrus" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" "github.com/stretchr/testify/assert" @@ -46,8 +47,8 @@ func TestKafkaConsumeToolBuilder(t *testing.T) { tools, err := builder.BuildTools(context.Background(), config) require.NoError(t, err) assert.Len(t, tools, 1) - assert.Equal(t, "kafka_client_consume", tools[0].Tool.Name) - assert.NotNil(t, tools[0].Handler) + assert.Equal(t, "kafka_client_consume", tools[0].Definition().Name) + assert.NotNil(t, tools[0]) }) t.Run("BuildTools_WithLogger", func(t *testing.T) { @@ -95,3 +96,30 @@ func TestKafkaConsumeToolBuilder(t *testing.T) { assert.Error(t, err) }) } + +func TestKafkaConsumeToolSchema(t *testing.T) { + builder := NewKafkaConsumeToolBuilder() + tool, err := builder.buildKafkaConsumeTool() + require.NoError(t, err) + assert.Equal(t, "kafka_client_consume", tool.Name) + + schema, ok := tool.InputSchema.(*jsonschema.Schema) + require.True(t, ok) + require.NotNil(t, schema.Properties) + + expectedRequired := []string{"topic"} + assert.ElementsMatch(t, expectedRequired, schema.Required) + + expectedProps := []string{ + "topic", + "group", + "offset", + "max-messages", + "timeout", + } + assert.ElementsMatch(t, expectedProps, mapStringKeys(schema.Properties)) + + topicSchema := schema.Properties["topic"] + require.NotNil(t, topicSchema) + assert.Equal(t, kafkaConsumeTopicDesc, topicSchema.Description) +} diff --git a/pkg/mcp/builders/kafka/groups.go b/pkg/mcp/builders/kafka/groups.go index ba4c80c..9ac52d9 100644 --- a/pkg/mcp/builders/kafka/groups.go +++ b/pkg/mcp/builders/kafka/groups.go @@ -20,13 +20,46 @@ import ( "fmt" "strings" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + "github.com/google/jsonschema-go/jsonschema" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" "github.com/twmb/franz-go/pkg/kadm" ) +type kafkaGroupsInput struct { + Resource string `json:"resource"` + Operation string `json:"operation"` + Group *string `json:"group,omitempty"` + Members *string `json:"members,omitempty"` + Topic *string `json:"topic,omitempty"` + Partition *int `json:"partition,omitempty"` + Offset *int64 `json:"offset,omitempty"` +} + +const ( + kafkaGroupsResourceDesc = "Resource to operate on. Available resources:\n" + + "- group: A single Kafka Consumer Group for operations on individual groups (describe, remove-members, set-offset, delete-offset)\n" + + "- groups: Collection of Kafka Consumer Groups for bulk operations (list)" + kafkaGroupsOperationDesc = "Operation to perform. Available operations:\n" + + "- list: List all Kafka Consumer Groups in the cluster\n" + + "- describe: Get detailed information about a specific Consumer Group, including members, offsets, and lag\n" + + "- remove-members: Remove specific members from a Consumer Group to force rebalancing or troubleshoot issues\n" + + "- offsets: Get offsets for a specific consumer group\n" + + "- delete-offset: Delete a specific offset for a consumer group of a topic\n" + + "- set-offset: Set a specific offset for a consumer group's topic-partition" + kafkaGroupsGroupDesc = "The name of the Kafka Consumer Group to operate on. " + + "Required for the 'describe' and 'remove-members' operations. " + + "Must be an existing consumer group name in the Kafka cluster. " + + "Consumer Group names are case-sensitive and typically follow a naming convention like 'application-name'." + kafkaGroupsMembersDesc = "Comma-separated list of consumer instance IDs to remove from the group. " + + "Required for the 'remove-members' operation. " + + "Consumer instance IDs can be found using the 'describe' operation." + kafkaGroupsTopicDesc = "The topic name. Required for 'delete-offset' and 'set-offset' operations." + kafkaGroupsPartitionDesc = "The partition number. Required for 'set-offset' operation." + kafkaGroupsOffsetDesc = "The offset value to set. Required for 'set-offset' operation." +) + // KafkaGroupsToolBuilder implements the ToolBuilder interface for Kafka Consumer Groups // /nolint:revive type KafkaGroupsToolBuilder struct { @@ -55,7 +88,7 @@ func NewKafkaGroupsToolBuilder() *KafkaGroupsToolBuilder { } // BuildTools builds the Kafka Groups tool list -func (b *KafkaGroupsToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { +func (b *KafkaGroupsToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]builders.ToolDefinition, error) { // Check features - return empty list if no required features are present if !b.HasAnyRequiredFeature(config.Features) { return nil, nil @@ -67,11 +100,14 @@ func (b *KafkaGroupsToolBuilder) BuildTools(_ context.Context, config builders.T } // Build tools - tool := b.buildKafkaGroupsTool() + tool, err := b.buildKafkaGroupsTool() + if err != nil { + return nil, err + } handler := b.buildKafkaGroupsHandler(config.ReadOnly) - return []server.ServerTool{ - { + return []builders.ToolDefinition{ + builders.ServerTool[kafkaGroupsInput, any]{ Tool: tool, Handler: handler, }, @@ -79,18 +115,11 @@ func (b *KafkaGroupsToolBuilder) BuildTools(_ context.Context, config builders.T } // buildKafkaGroupsTool builds the Kafka Groups MCP tool definition -func (b *KafkaGroupsToolBuilder) buildKafkaGroupsTool() mcp.Tool { - resourceDesc := "Resource to operate on. Available resources:\n" + - "- group: A single Kafka Consumer Group for operations on individual groups (describe, remove-members, set-offset, delete-offset)\n" + - "- groups: Collection of Kafka Consumer Groups for bulk operations (list)" - - operationDesc := "Operation to perform. Available operations:\n" + - "- list: List all Kafka Consumer Groups in the cluster\n" + - "- describe: Get detailed information about a specific Consumer Group, including members, offsets, and lag\n" + - "- remove-members: Remove specific members from a Consumer Group to force rebalancing or troubleshoot issues\n" + - "- offsets: Get offsets for a specific consumer group\n" + - "- delete-offset: Delete a specific offset for a consumer group of a topic\n" + - "- set-offset: Set a specific offset for a consumer group's topic-partition" +func (b *KafkaGroupsToolBuilder) buildKafkaGroupsTool() (*sdk.Tool, error) { + inputSchema, err := buildKafkaGroupsInputSchema() + if err != nil { + return nil, err + } toolDesc := "Unified tool for managing Apache Kafka Consumer Groups.\n" + "This tool provides access to Kafka consumer group operations including listing, describing, and managing group membership.\n" + @@ -130,63 +159,33 @@ func (b *KafkaGroupsToolBuilder) buildKafkaGroupsTool() mcp.Tool { " offset: 1000\n\n" + "This tool requires Kafka super-user permissions." - return mcp.NewTool("kafka_admin_groups", - mcp.WithDescription(toolDesc), - mcp.WithString("resource", mcp.Required(), - mcp.Description(resourceDesc), - ), - mcp.WithString("operation", mcp.Required(), - mcp.Description(operationDesc), - ), - mcp.WithString("group", - mcp.Description("The name of the Kafka Consumer Group to operate on. "+ - "Required for the 'describe' and 'remove-members' operations. "+ - "Must be an existing consumer group name in the Kafka cluster. "+ - "Consumer Group names are case-sensitive and typically follow a naming convention like 'application-name'.")), - mcp.WithString("members", - mcp.Description("Comma-separated list of consumer instance IDs to remove from the group. "+ - "Required for the 'remove-members' operation. "+ - "Consumer instance IDs can be found using the 'describe' operation.")), - mcp.WithString("topic", - mcp.Description("The topic name. Required for 'delete-offset' and 'set-offset' operations.")), - mcp.WithNumber("partition", - mcp.Description("The partition number. Required for 'set-offset' operation.")), - mcp.WithNumber("offset", - mcp.Description("The offset value to set. Required for 'set-offset' operation.")), - ) + return &sdk.Tool{ + Name: "kafka_admin_groups", + Description: toolDesc, + InputSchema: inputSchema, + }, nil } // buildKafkaGroupsHandler builds the Kafka Groups handler function -func (b *KafkaGroupsToolBuilder) buildKafkaGroupsHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - // Get required parameters - resource, err := request.RequireString("resource") - if err != nil { - return b.handleError("get resource", err), nil - } - - operation, err := request.RequireString("operation") - if err != nil { - return b.handleError("get operation", err), nil - } - +func (b *KafkaGroupsToolBuilder) buildKafkaGroupsHandler(readOnly bool) builders.ToolHandlerFunc[kafkaGroupsInput, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input kafkaGroupsInput) (*sdk.CallToolResult, any, error) { // Normalize parameters - resource = strings.ToLower(resource) - operation = strings.ToLower(operation) + resource := strings.ToLower(input.Resource) + operation := strings.ToLower(input.Operation) // Validate write operations in read-only mode if readOnly && (operation == "remove-members" || operation == "delete-offset" || operation == "set-offset") { - return mcp.NewToolResultError("Write operations are not allowed in read-only mode"), nil + return nil, nil, fmt.Errorf("write operations are not allowed in read-only mode") } // Get Kafka admin client session := mcpCtx.GetKafkaSession(ctx) if session == nil { - return b.handleError("get Kafka session not found in context", nil), nil + return nil, nil, b.handleError("get Kafka session not found in context", nil) } admin, err := session.GetAdminClient() if err != nil { - return b.handleError("get admin client", err), nil + return nil, nil, b.handleError("get admin client", err) } // Dispatch based on resource and operation @@ -194,27 +193,33 @@ func (b *KafkaGroupsToolBuilder) buildKafkaGroupsHandler(readOnly bool) func(con case "groups": switch operation { case "list": - return b.handleKafkaGroupsList(ctx, admin, request) + result, err := b.handleKafkaGroupsList(ctx, admin) + return result, nil, err default: - return mcp.NewToolResultError(fmt.Sprintf("Invalid operation for resource 'groups': %s", operation)), nil + return nil, nil, fmt.Errorf("invalid operation for resource 'groups': %s", operation) } case "group": switch operation { case "describe": - return b.handleKafkaGroupDescribe(ctx, admin, request) + result, err := b.handleKafkaGroupDescribe(ctx, admin, input) + return result, nil, err case "remove-members": - return b.handleKafkaGroupRemoveMembers(ctx, admin, request) + result, err := b.handleKafkaGroupRemoveMembers(ctx, admin, input) + return result, nil, err case "offsets": - return b.handleKafkaGroupOffsets(ctx, admin, request) + result, err := b.handleKafkaGroupOffsets(ctx, admin, input) + return result, nil, err case "delete-offset": - return b.handleKafkaGroupDeleteOffset(ctx, admin, request) + result, err := b.handleKafkaGroupDeleteOffset(ctx, admin, input) + return result, nil, err case "set-offset": - return b.handleKafkaGroupSetOffset(ctx, admin, request) + result, err := b.handleKafkaGroupSetOffset(ctx, admin, input) + return result, nil, err default: - return mcp.NewToolResultError(fmt.Sprintf("Invalid operation for resource 'group': %s", operation)), nil + return nil, nil, fmt.Errorf("invalid operation for resource 'group': %s", operation) } default: - return mcp.NewToolResultError(fmt.Sprintf("Invalid resource: %s. Available resources: groups, group", resource)), nil + return nil, nil, fmt.Errorf("invalid resource: %s. available resources: groups, group", resource) } } } @@ -222,54 +227,63 @@ func (b *KafkaGroupsToolBuilder) buildKafkaGroupsHandler(readOnly bool) func(con // Utility functions // handleError provides unified error handling -func (b *KafkaGroupsToolBuilder) handleError(operation string, err error) *mcp.CallToolResult { - return mcp.NewToolResultError(fmt.Sprintf("Failed to %s: %v", operation, err)) +func (b *KafkaGroupsToolBuilder) handleError(operation string, err error) error { + return fmt.Errorf("failed to %s: %v", operation, err) } // marshalResponse provides unified JSON serialization for responses -func (b *KafkaGroupsToolBuilder) marshalResponse(data interface{}) (*mcp.CallToolResult, error) { +func (b *KafkaGroupsToolBuilder) marshalResponse(data interface{}) (*sdk.CallToolResult, error) { jsonBytes, err := json.Marshal(data) if err != nil { - return b.handleError("marshal response", err), nil + return nil, b.handleError("marshal response", err) } - return mcp.NewToolResultText(string(jsonBytes)), nil + return &sdk.CallToolResult{ + Content: []sdk.Content{&sdk.TextContent{Text: string(jsonBytes)}}, + }, nil +} + +func requireInt64(value *int64, key string) (int64, error) { + if value == nil { + return 0, fmt.Errorf("required argument %q not found", key) + } + return *value, nil } // Specific operation handler functions // handleKafkaGroupsList handles listing all consumer groups -func (b *KafkaGroupsToolBuilder) handleKafkaGroupsList(ctx context.Context, admin *kadm.Client, _ mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *KafkaGroupsToolBuilder) handleKafkaGroupsList(ctx context.Context, admin *kadm.Client) (*sdk.CallToolResult, error) { groups, err := admin.ListGroups(ctx) if err != nil { - return b.handleError("list Kafka consumer groups", err), nil + return nil, b.handleError("list Kafka consumer groups", err) } return b.marshalResponse(groups) } // handleKafkaGroupDescribe handles describing a specific consumer group -func (b *KafkaGroupsToolBuilder) handleKafkaGroupDescribe(ctx context.Context, admin *kadm.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - groupName, err := request.RequireString("group") +func (b *KafkaGroupsToolBuilder) handleKafkaGroupDescribe(ctx context.Context, admin *kadm.Client, input kafkaGroupsInput) (*sdk.CallToolResult, error) { + groupName, err := requireString(input.Group, "group") if err != nil { - return b.handleError("get group name", err), nil + return nil, b.handleError("get group name", err) } groups, err := admin.DescribeGroups(ctx, groupName) if err != nil { - return b.handleError("describe Kafka consumer group", err), nil + return nil, b.handleError("describe Kafka consumer group", err) } return b.marshalResponse(groups) } // handleKafkaGroupRemoveMembers handles removing members from a consumer group -func (b *KafkaGroupsToolBuilder) handleKafkaGroupRemoveMembers(ctx context.Context, admin *kadm.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - groupName, err := request.RequireString("group") +func (b *KafkaGroupsToolBuilder) handleKafkaGroupRemoveMembers(ctx context.Context, admin *kadm.Client, input kafkaGroupsInput) (*sdk.CallToolResult, error) { + groupName, err := requireString(input.Group, "group") if err != nil { - return b.handleError("get group name", err), nil + return nil, b.handleError("get group name", err) } - membersStr, err := request.RequireString("members") + membersStr, err := requireString(input.Members, "members") if err != nil { - return b.handleError("get members", err), nil + return nil, b.handleError("get members", err) } memberIDs := strings.Split(membersStr, ",") @@ -280,36 +294,36 @@ func (b *KafkaGroupsToolBuilder) handleKafkaGroupRemoveMembers(ctx context.Conte builder := kadm.LeaveGroup(groupName).InstanceIDs(memberIDs...) response, err := admin.LeaveGroup(ctx, builder) if err != nil { - return b.handleError("remove members from Kafka consumer group", err), nil + return nil, b.handleError("remove members from Kafka consumer group", err) } return b.marshalResponse(response) } // handleKafkaGroupOffsets handles getting offsets for a consumer group -func (b *KafkaGroupsToolBuilder) handleKafkaGroupOffsets(ctx context.Context, admin *kadm.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - groupName, err := request.RequireString("group") +func (b *KafkaGroupsToolBuilder) handleKafkaGroupOffsets(ctx context.Context, admin *kadm.Client, input kafkaGroupsInput) (*sdk.CallToolResult, error) { + groupName, err := requireString(input.Group, "group") if err != nil { - return b.handleError("get group name", err), nil + return nil, b.handleError("get group name", err) } response, err := admin.FetchOffsets(ctx, groupName) if err != nil { - return b.handleError("get offsets for Kafka consumer group", err), nil + return nil, b.handleError("get offsets for Kafka consumer group", err) } return b.marshalResponse(response) } // handleKafkaGroupDeleteOffset handles deleting a specific offset for a consumer group -func (b *KafkaGroupsToolBuilder) handleKafkaGroupDeleteOffset(ctx context.Context, admin *kadm.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - groupName, err := request.RequireString("group") +func (b *KafkaGroupsToolBuilder) handleKafkaGroupDeleteOffset(ctx context.Context, admin *kadm.Client, input kafkaGroupsInput) (*sdk.CallToolResult, error) { + groupName, err := requireString(input.Group, "group") if err != nil { - return b.handleError("get group name", err), nil + return nil, b.handleError("get group name", err) } - topicName, err := request.RequireString("topic") + topicName, err := requireString(input.Topic, "topic") if err != nil { - return b.handleError("get topic name", err), nil + return nil, b.handleError("get topic name", err) } // Create a TopicsSet with the specified topic @@ -320,55 +334,79 @@ func (b *KafkaGroupsToolBuilder) handleKafkaGroupDeleteOffset(ctx context.Contex // Call DeleteOffsets to delete the offsets for the specified topic responses, err := admin.DeleteOffsets(ctx, groupName, topicsSet) if err != nil { - return b.handleError("delete offset for Kafka consumer group", err), nil + return nil, b.handleError("delete offset for Kafka consumer group", err) } // Check for errors in the response if err := responses.Error(); err != nil { - return b.handleError("delete offset for Kafka consumer group", err), nil + return nil, b.handleError("delete offset for Kafka consumer group", err) } return b.marshalResponse(responses) } // handleKafkaGroupSetOffset handles setting a specific offset for a consumer group -func (b *KafkaGroupsToolBuilder) handleKafkaGroupSetOffset(ctx context.Context, admin *kadm.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - groupName, err := request.RequireString("group") +func (b *KafkaGroupsToolBuilder) handleKafkaGroupSetOffset(ctx context.Context, admin *kadm.Client, input kafkaGroupsInput) (*sdk.CallToolResult, error) { + groupName, err := requireString(input.Group, "group") if err != nil { - return b.handleError("get group name", err), nil + return nil, b.handleError("get group name", err) } - topicName, err := request.RequireString("topic") + topicName, err := requireString(input.Topic, "topic") if err != nil { - return b.handleError("get topic name", err), nil + return nil, b.handleError("get topic name", err) } - partitionNum, err := request.RequireFloat("partition") + partitionNum, err := requireInt(input.Partition, "partition") if err != nil { - return b.handleError("get partition number", err), nil + return nil, b.handleError("get partition number", err) } + //nolint:gosec partitionInt := int32(partitionNum) - offsetNum, err := request.RequireFloat("offset") + offsetNum, err := requireInt64(input.Offset, "offset") if err != nil { - return b.handleError("get offset", err), nil + return nil, b.handleError("get offset", err) } - offsetInt := int64(offsetNum) // Create Offsets object with the specified topic, partition, and offset offsets := make(kadm.Offsets) - offsets.AddOffset(topicName, partitionInt, offsetInt, -1) // Using -1 for leaderEpoch as it's optional + offsets.AddOffset(topicName, partitionInt, offsetNum, -1) // Using -1 for leaderEpoch as it's optional // Commit the offsets responses, err := admin.CommitOffsets(ctx, groupName, offsets) if err != nil { - return b.handleError("set offset for Kafka consumer group", err), nil + return nil, b.handleError("set offset for Kafka consumer group", err) } // Check for errors in the response if err := responses.Error(); err != nil { - return b.handleError("set offset for Kafka consumer group", err), nil + return nil, b.handleError("set offset for Kafka consumer group", err) } return b.marshalResponse(responses) } + +func buildKafkaGroupsInputSchema() (*jsonschema.Schema, error) { + schema, err := jsonschema.For[kafkaGroupsInput](nil) + if err != nil { + return nil, fmt.Errorf("input schema: %w", err) + } + if schema.Type != "object" { + return nil, fmt.Errorf("input schema must have type \"object\"") + } + + if schema.Properties == nil { + schema.Properties = map[string]*jsonschema.Schema{} + } + setSchemaDescription(schema, "resource", kafkaGroupsResourceDesc) + setSchemaDescription(schema, "operation", kafkaGroupsOperationDesc) + setSchemaDescription(schema, "group", kafkaGroupsGroupDesc) + setSchemaDescription(schema, "members", kafkaGroupsMembersDesc) + setSchemaDescription(schema, "topic", kafkaGroupsTopicDesc) + setSchemaDescription(schema, "partition", kafkaGroupsPartitionDesc) + setSchemaDescription(schema, "offset", kafkaGroupsOffsetDesc) + + normalizeAdditionalProperties(schema) + return schema, nil +} diff --git a/pkg/mcp/builders/kafka/groups_legacy.go b/pkg/mcp/builders/kafka/groups_legacy.go new file mode 100644 index 0000000..0207956 --- /dev/null +++ b/pkg/mcp/builders/kafka/groups_legacy.go @@ -0,0 +1,373 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafka + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" + "github.com/twmb/franz-go/pkg/kadm" +) + +// KafkaGroupsLegacyToolBuilder implements the ToolBuilder interface for Kafka Consumer Groups. +type KafkaGroupsLegacyToolBuilder struct { //nolint:revive + *builders.BaseToolBuilder +} + +// NewKafkaGroupsLegacyToolBuilder creates a new legacy Kafka Groups tool builder instance. +func NewKafkaGroupsLegacyToolBuilder() *KafkaGroupsLegacyToolBuilder { + metadata := builders.ToolMetadata{ + Name: "kafka_groups", + Version: "1.0.0", + Description: "Kafka Consumer Groups administration tools", + Category: "kafka_admin", + Tags: []string{"kafka", "groups", "admin", "consumer"}, + } + + features := []string{ + "kafka-admin", + "all", + "all-kafka", + } + + return &KafkaGroupsLegacyToolBuilder{ + BaseToolBuilder: builders.NewBaseToolBuilder(metadata, features), + } +} + +// BuildTools builds the Kafka Groups tool list for the legacy server. +func (b *KafkaGroupsLegacyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { + // Check features - return empty list if no required features are present + if !b.HasAnyRequiredFeature(config.Features) { + return nil, nil + } + + // Validate configuration + if err := b.Validate(config); err != nil { + return nil, err + } + + // Build tools + tool := b.buildKafkaGroupsTool() + handler := b.buildKafkaGroupsHandler(config.ReadOnly) + + return []server.ServerTool{ + { + Tool: tool, + Handler: handler, + }, + }, nil +} + +// buildKafkaGroupsTool builds the Kafka Groups MCP tool definition. +func (b *KafkaGroupsLegacyToolBuilder) buildKafkaGroupsTool() mcp.Tool { + resourceDesc := "Resource to operate on. Available resources:\n" + + "- group: A single Kafka Consumer Group for operations on individual groups (describe, remove-members, set-offset, delete-offset)\n" + + "- groups: Collection of Kafka Consumer Groups for bulk operations (list)" + + operationDesc := "Operation to perform. Available operations:\n" + + "- list: List all Kafka Consumer Groups in the cluster\n" + + "- describe: Get detailed information about a specific Consumer Group, including members, offsets, and lag\n" + + "- remove-members: Remove specific members from a Consumer Group to force rebalancing or troubleshoot issues\n" + + "- offsets: Get offsets for a specific consumer group\n" + + "- delete-offset: Delete a specific offset for a consumer group of a topic\n" + + "- set-offset: Set a specific offset for a consumer group's topic-partition" + + toolDesc := "Unified tool for managing Apache Kafka Consumer Groups.\n" + + "This tool provides access to Kafka consumer group operations including listing, describing, and managing group membership.\n" + + "Kafka Consumer Groups are a key concept for scalable consumption of Kafka topics. A consumer group consists of multiple consumer instances\n" + + "that collaborate to consume data from topic partitions. Kafka ensures that:\n" + + "- Each partition is consumed by exactly one consumer in the group\n" + + "- When consumers join or leave, Kafka triggers a 'rebalance' to redistribute partitions\n" + + "- Consumer groups track consumption progress through committed offsets\n\n" + + "Usage Examples:\n\n" + + "1. List all Kafka Consumer Groups in the cluster:\n" + + " resource: \"groups\"\n" + + " operation: \"list\"\n\n" + + "2. Describe a specific Kafka Consumer Group to see its members and consumption details:\n" + + " resource: \"group\"\n" + + " operation: \"describe\"\n" + + " group: \"my-consumer-group\"\n\n" + + "3. Remove specific members from a Kafka Consumer Group to trigger rebalancing:\n" + + " resource: \"group\"\n" + + " operation: \"remove-members\"\n" + + " group: \"my-consumer-group\"\n" + + " members: \"consumer-instance-1,consumer-instance-2\"\n\n" + + "4. Get offsets for a specific consumer group:\n" + + " resource: \"group\"\n" + + " operation: \"offsets\"\n" + + " group: \"my-consumer-group\"\n\n" + + "5. Delete a specific offset for a consumer group of a topic:\n" + + " resource: \"group\"\n" + + " operation: \"delete-offset\"\n" + + " group: \"my-consumer-group\"\n" + + " topic: \"my-topic\"\n\n" + + "6. Set a specific offset for a consumer group's topic-partition:\n" + + " resource: \"group\"\n" + + " operation: \"set-offset\"\n" + + " group: \"my-consumer-group\"\n" + + " topic: \"my-topic\"\n" + + " partition: 0\n" + + " offset: 1000\n\n" + + "This tool requires Kafka super-user permissions." + + return mcp.NewTool("kafka_admin_groups", + mcp.WithDescription(toolDesc), + mcp.WithString("resource", mcp.Required(), + mcp.Description(resourceDesc), + ), + mcp.WithString("operation", mcp.Required(), + mcp.Description(operationDesc), + ), + mcp.WithString("group", + mcp.Description("The name of the Kafka Consumer Group to operate on. "+ + "Required for the 'describe' and 'remove-members' operations. "+ + "Must be an existing consumer group name in the Kafka cluster. "+ + "Consumer Group names are case-sensitive and typically follow a naming convention like 'application-name'.")), + mcp.WithString("members", + mcp.Description("Comma-separated list of consumer instance IDs to remove from the group. "+ + "Required for the 'remove-members' operation. "+ + "Consumer instance IDs can be found using the 'describe' operation.")), + mcp.WithString("topic", + mcp.Description("The topic name. Required for 'delete-offset' and 'set-offset' operations.")), + mcp.WithNumber("partition", + mcp.Description("The partition number. Required for 'set-offset' operation.")), + mcp.WithNumber("offset", + mcp.Description("The offset value to set. Required for 'set-offset' operation.")), + ) +} + +// buildKafkaGroupsHandler builds the Kafka Groups handler function. +func (b *KafkaGroupsLegacyToolBuilder) buildKafkaGroupsHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Get required parameters + resource, err := request.RequireString("resource") + if err != nil { + return b.handleError("get resource", err), nil + } + + operation, err := request.RequireString("operation") + if err != nil { + return b.handleError("get operation", err), nil + } + + // Normalize parameters + resource = strings.ToLower(resource) + operation = strings.ToLower(operation) + + // Validate write operations in read-only mode + if readOnly && (operation == "remove-members" || operation == "delete-offset" || operation == "set-offset") { + return mcp.NewToolResultError("Write operations are not allowed in read-only mode"), nil + } + + // Get Kafka admin client + session := mcpCtx.GetKafkaSession(ctx) + if session == nil { + return b.handleError("get Kafka session not found in context", nil), nil + } + admin, err := session.GetAdminClient() + if err != nil { + return b.handleError("get admin client", err), nil + } + + // Dispatch based on resource and operation + switch resource { + case "groups": + switch operation { + case "list": + return b.handleKafkaGroupsList(ctx, admin, request) + default: + return mcp.NewToolResultError(fmt.Sprintf("Invalid operation for resource 'groups': %s", operation)), nil + } + case "group": + switch operation { + case "describe": + return b.handleKafkaGroupDescribe(ctx, admin, request) + case "remove-members": + return b.handleKafkaGroupRemoveMembers(ctx, admin, request) + case "offsets": + return b.handleKafkaGroupOffsets(ctx, admin, request) + case "delete-offset": + return b.handleKafkaGroupDeleteOffset(ctx, admin, request) + case "set-offset": + return b.handleKafkaGroupSetOffset(ctx, admin, request) + default: + return mcp.NewToolResultError(fmt.Sprintf("Invalid operation for resource 'group': %s", operation)), nil + } + default: + return mcp.NewToolResultError(fmt.Sprintf("Invalid resource: %s. Available resources: groups, group", resource)), nil + } + } +} + +// Utility functions + +// handleError provides unified error handling. +func (b *KafkaGroupsLegacyToolBuilder) handleError(operation string, err error) *mcp.CallToolResult { + return mcp.NewToolResultError(fmt.Sprintf("Failed to %s: %v", operation, err)) +} + +// marshalResponse provides unified JSON serialization for responses. +func (b *KafkaGroupsLegacyToolBuilder) marshalResponse(data interface{}) (*mcp.CallToolResult, error) { + jsonBytes, err := json.Marshal(data) + if err != nil { + return b.handleError("marshal response", err), nil + } + return mcp.NewToolResultText(string(jsonBytes)), nil +} + +// Specific operation handler functions + +// handleKafkaGroupsList handles listing all consumer groups. +func (b *KafkaGroupsLegacyToolBuilder) handleKafkaGroupsList(ctx context.Context, admin *kadm.Client, _ mcp.CallToolRequest) (*mcp.CallToolResult, error) { + groups, err := admin.ListGroups(ctx) + if err != nil { + return b.handleError("list Kafka consumer groups", err), nil + } + return b.marshalResponse(groups) +} + +// handleKafkaGroupDescribe handles describing a specific consumer group. +func (b *KafkaGroupsLegacyToolBuilder) handleKafkaGroupDescribe(ctx context.Context, admin *kadm.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + groupName, err := request.RequireString("group") + if err != nil { + return b.handleError("get group name", err), nil + } + + groups, err := admin.DescribeGroups(ctx, groupName) + if err != nil { + return b.handleError("describe Kafka consumer group", err), nil + } + return b.marshalResponse(groups) +} + +// handleKafkaGroupRemoveMembers handles removing members from a consumer group. +func (b *KafkaGroupsLegacyToolBuilder) handleKafkaGroupRemoveMembers(ctx context.Context, admin *kadm.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + groupName, err := request.RequireString("group") + if err != nil { + return b.handleError("get group name", err), nil + } + + membersStr, err := request.RequireString("members") + if err != nil { + return b.handleError("get members", err), nil + } + + memberIDs := strings.Split(membersStr, ",") + for i, member := range memberIDs { + memberIDs[i] = strings.TrimSpace(member) + } + + builder := kadm.LeaveGroup(groupName).InstanceIDs(memberIDs...) + response, err := admin.LeaveGroup(ctx, builder) + if err != nil { + return b.handleError("remove members from Kafka consumer group", err), nil + } + + return b.marshalResponse(response) +} + +// handleKafkaGroupOffsets handles getting offsets for a consumer group. +func (b *KafkaGroupsLegacyToolBuilder) handleKafkaGroupOffsets(ctx context.Context, admin *kadm.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + groupName, err := request.RequireString("group") + if err != nil { + return b.handleError("get group name", err), nil + } + + response, err := admin.FetchOffsets(ctx, groupName) + if err != nil { + return b.handleError("get offsets for Kafka consumer group", err), nil + } + return b.marshalResponse(response) +} + +// handleKafkaGroupDeleteOffset handles deleting a specific offset for a consumer group. +func (b *KafkaGroupsLegacyToolBuilder) handleKafkaGroupDeleteOffset(ctx context.Context, admin *kadm.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + groupName, err := request.RequireString("group") + if err != nil { + return b.handleError("get group name", err), nil + } + + topicName, err := request.RequireString("topic") + if err != nil { + return b.handleError("get topic name", err), nil + } + + // Create a TopicsSet with the specified topic + // This will target all partitions for the given topic + topicsSet := make(kadm.TopicsSet) + topicsSet.Add(topicName) + + // Call DeleteOffsets to delete the offsets for the specified topic + responses, err := admin.DeleteOffsets(ctx, groupName, topicsSet) + if err != nil { + return b.handleError("delete offset for Kafka consumer group", err), nil + } + + // Check for errors in the response + if err := responses.Error(); err != nil { + return b.handleError("delete offset for Kafka consumer group", err), nil + } + + return b.marshalResponse(responses) +} + +// handleKafkaGroupSetOffset handles setting a specific offset for a consumer group. +func (b *KafkaGroupsLegacyToolBuilder) handleKafkaGroupSetOffset(ctx context.Context, admin *kadm.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + groupName, err := request.RequireString("group") + if err != nil { + return b.handleError("get group name", err), nil + } + + topicName, err := request.RequireString("topic") + if err != nil { + return b.handleError("get topic name", err), nil + } + + partitionNum, err := request.RequireFloat("partition") + if err != nil { + return b.handleError("get partition number", err), nil + } + partitionInt := int32(partitionNum) + + offsetNum, err := request.RequireFloat("offset") + if err != nil { + return b.handleError("get offset", err), nil + } + offsetInt := int64(offsetNum) + + // Create Offsets object with the specified topic, partition, and offset + offsets := make(kadm.Offsets) + offsets.AddOffset(topicName, partitionInt, offsetInt, -1) // Using -1 for leaderEpoch as it's optional + + // Commit the offsets + responses, err := admin.CommitOffsets(ctx, groupName, offsets) + if err != nil { + return b.handleError("set offset for Kafka consumer group", err), nil + } + + // Check for errors in the response + if err := responses.Error(); err != nil { + return b.handleError("set offset for Kafka consumer group", err), nil + } + + return b.marshalResponse(responses) +} diff --git a/pkg/mcp/builders/kafka/groups_test.go b/pkg/mcp/builders/kafka/groups_test.go new file mode 100644 index 0000000..9ceef8c --- /dev/null +++ b/pkg/mcp/builders/kafka/groups_test.go @@ -0,0 +1,139 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafka + +import ( + "context" + "testing" + + "github.com/google/jsonschema-go/jsonschema" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestKafkaGroupsToolBuilder(t *testing.T) { + builder := NewKafkaGroupsToolBuilder() + + t.Run("GetName", func(t *testing.T) { + assert.Equal(t, "kafka_groups", builder.GetName()) + }) + + t.Run("GetRequiredFeatures", func(t *testing.T) { + features := builder.GetRequiredFeatures() + assert.NotEmpty(t, features) + assert.Contains(t, features, "kafka-admin") + }) + + t.Run("BuildTools_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"kafka-admin"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "kafka_admin_groups", tools[0].Definition().Name) + assert.NotNil(t, tools[0]) + }) + + t.Run("BuildTools_ReadOnly", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: true, + Features: []string{"kafka-admin"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "kafka_admin_groups", tools[0].Definition().Name) + }) + + t.Run("BuildTools_NoFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"unrelated-feature"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 0) + }) + + t.Run("Validate_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"kafka-admin"}, + } + + err := builder.Validate(config) + assert.NoError(t, err) + }) + + t.Run("Validate_MissingFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"unrelated-feature"}, + } + + err := builder.Validate(config) + assert.Error(t, err) + }) +} + +func TestKafkaGroupsToolSchema(t *testing.T) { + builder := NewKafkaGroupsToolBuilder() + tool, err := builder.buildKafkaGroupsTool() + require.NoError(t, err) + assert.Equal(t, "kafka_admin_groups", tool.Name) + + schema, ok := tool.InputSchema.(*jsonschema.Schema) + require.True(t, ok) + require.NotNil(t, schema.Properties) + + expectedRequired := []string{"resource", "operation"} + assert.ElementsMatch(t, expectedRequired, schema.Required) + + expectedProps := []string{ + "resource", + "operation", + "group", + "members", + "topic", + "partition", + "offset", + } + assert.ElementsMatch(t, expectedProps, mapStringKeys(schema.Properties)) + + resourceSchema := schema.Properties["resource"] + require.NotNil(t, resourceSchema) + assert.Equal(t, kafkaGroupsResourceDesc, resourceSchema.Description) + + operationSchema := schema.Properties["operation"] + require.NotNil(t, operationSchema) + assert.Equal(t, kafkaGroupsOperationDesc, operationSchema.Description) +} + +func TestKafkaGroupsToolBuilder_ReadOnlyRejectsWrite(t *testing.T) { + builder := NewKafkaGroupsToolBuilder() + handler := builder.buildKafkaGroupsHandler(true) + + _, _, err := handler(context.Background(), nil, kafkaGroupsInput{ + Resource: "group", + Operation: "remove-members", + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "read-only") +} diff --git a/pkg/mcp/builders/kafka/produce.go b/pkg/mcp/builders/kafka/produce.go index 0c7596c..6b97d70 100644 --- a/pkg/mcp/builders/kafka/produce.go +++ b/pkg/mcp/builders/kafka/produce.go @@ -21,15 +21,59 @@ import ( "strings" "time" + "github.com/google/jsonschema-go/jsonschema" "github.com/hamba/avro/v2" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" "github.com/twmb/franz-go/pkg/kgo" "github.com/twmb/franz-go/pkg/sr" ) +type kafkaProduceMessageInput struct { + Key *string `json:"key,omitempty"` + Value string `json:"value"` + Headers []string `json:"headers,omitempty"` + Partition *int `json:"partition,omitempty"` +} + +type kafkaProduceInput struct { + Topic string `json:"topic"` + Key *string `json:"key,omitempty"` + Value string `json:"value"` + Headers []string `json:"headers,omitempty"` + Partition *int `json:"partition,omitempty"` + Messages []kafkaProduceMessageInput `json:"messages,omitempty"` + Sync *bool `json:"sync,omitempty"` +} + +const ( + kafkaProduceTopicDesc = "The name of the Kafka topic to produce messages to. " + + "Must be an existing topic that the user has write permissions for." + kafkaProduceKeyDesc = "The key for the message. " + + "Optional. Keys are used for partition assignment and maintaining order for related messages. " + + "Messages with the same key will be sent to the same partition." + kafkaProduceValueDesc = "The value/content of the message to send. " + + "This is the actual payload that will be delivered to consumers. It can be a JSON string, and the system will automatically serialize it to the appropriate format based on the schema registry if it is available." + kafkaProduceHeadersDesc = "Message headers in the format of [\"key=value\"]. " + + "Optional. Headers allow you to attach metadata to messages without modifying the payload. " + + "They are passed along with the message to consumers." + kafkaProducePartitionDesc = "The specific partition to send the message to. " + + "Optional. If not specified, Kafka will automatically assign a partition based on the message key (if provided) or round-robin assignment. " + + "Specifying a partition can be useful for testing or when you need guaranteed partition assignment." + kafkaProduceMessagesDesc = "An array of messages to send in batch. " + + "Optional. Alternative to the single message parameters (key, value, headers, partition). " + + "Each message object can contain 'key', 'value', 'headers', and 'partition' properties. " + + "Batch sending is more efficient for multiple messages." + kafkaProduceSyncDesc = "Whether to wait for server acknowledgment before returning. " + + "Optional. Default is true. When true, ensures the message was successfully written to the topic before the tool returns a success response." + + kafkaProduceMessageKeyDesc = "Message key" + kafkaProduceMessageValueDesc = "Message value (required)" + kafkaProduceMessageHeadersDesc = "Message headers as array of \"key=value\" strings" + kafkaProduceMessagePartitionDesc = "Target partition number" +) + // KafkaProduceToolBuilder implements the ToolBuilder interface for Kafka client produce operations // It provides functionality to build Kafka producer tools // /nolint:revive @@ -60,7 +104,7 @@ func NewKafkaProduceToolBuilder() *KafkaProduceToolBuilder { // BuildTools builds the Kafka produce tool list // This is the core method implementing the ToolBuilder interface -func (b *KafkaProduceToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { +func (b *KafkaProduceToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]builders.ToolDefinition, error) { // Skip registration if in read-only mode if config.ReadOnly { return nil, nil @@ -77,11 +121,14 @@ func (b *KafkaProduceToolBuilder) BuildTools(_ context.Context, config builders. } // Build tools - tool := b.buildKafkaProduceTool() + tool, err := b.buildKafkaProduceTool() + if err != nil { + return nil, err + } handler := b.buildKafkaProduceHandler() - return []server.ServerTool{ - { + return []builders.ToolDefinition{ + builders.ServerTool[kafkaProduceInput, any]{ Tool: tool, Handler: handler, }, @@ -90,7 +137,12 @@ func (b *KafkaProduceToolBuilder) BuildTools(_ context.Context, config builders. // buildKafkaProduceTool builds the Kafka produce MCP tool definition // Migrated from the original tool definition logic -func (b *KafkaProduceToolBuilder) buildKafkaProduceTool() mcp.Tool { +func (b *KafkaProduceToolBuilder) buildKafkaProduceTool() (*sdk.Tool, error) { + inputSchema, err := buildKafkaProduceInputSchema() + if err != nil { + return nil, err + } + toolDesc := "Produce messages to a Kafka topic.\n" + "This tool allows you to send messages to Kafka topics with various options for message creation.\n\n" + "Kafka Producer Concepts:\n" + @@ -120,95 +172,29 @@ func (b *KafkaProduceToolBuilder) buildKafkaProduceTool() mcp.Tool { " partition: 2\n\n" + "This tool requires Kafka producer permissions on the specified topic." - return mcp.NewTool("kafka_client_produce", - mcp.WithDescription(toolDesc), - mcp.WithString("topic", mcp.Required(), - mcp.Description("The name of the Kafka topic to produce messages to. "+ - "Must be an existing topic that the user has write permissions for."), - ), - mcp.WithString("key", - mcp.Description("The key for the message. "+ - "Optional. Keys are used for partition assignment and maintaining order for related messages. "+ - "Messages with the same key will be sent to the same partition."), - ), - mcp.WithString("value", - mcp.Required(), - mcp.Description("The value/content of the message to send. "+ - "This is the actual payload that will be delivered to consumers. It can be a JSON string, and the system will automatically serialize it to the appropriate format based on the schema registry if it is available."), - ), - mcp.WithArray("headers", - mcp.Description("Message headers in the format of [\"key=value\"]. "+ - "Optional. Headers allow you to attach metadata to messages without modifying the payload. "+ - "They are passed along with the message to consumers."), - mcp.Items(map[string]interface{}{ - "type": "string", - "description": "key value pair in the format of \"key=value\"", - }), - ), - mcp.WithNumber("partition", - mcp.Description("The specific partition to send the message to. "+ - "Optional. If not specified, Kafka will automatically assign a partition based on the message key (if provided) or round-robin assignment. "+ - "Specifying a partition can be useful for testing or when you need guaranteed partition assignment."), - ), - mcp.WithArray("messages", - mcp.Description("An array of messages to send in batch. "+ - "Optional. Alternative to the single message parameters (key, value, headers, partition). "+ - "Each message object can contain 'key', 'value', 'headers', and 'partition' properties. "+ - "Batch sending is more efficient for multiple messages."), - mcp.Items(map[string]interface{}{ - "type": "object", - "properties": map[string]interface{}{ - "key": map[string]interface{}{ - "type": "string", - "description": "Message key", - }, - "value": map[string]interface{}{ - "type": "string", - "description": "Message value (required)", - }, - "headers": map[string]interface{}{ - "type": "array", - "description": "Message headers as array of \"key=value\" strings", - "items": map[string]interface{}{ - "type": "string", - }, - }, - "partition": map[string]interface{}{ - "type": "number", - "description": "Target partition number", - }, - }, - "required": []string{"value"}, - }), - ), - mcp.WithBoolean("sync", - mcp.Description("Whether to wait for server acknowledgment before returning. "+ - "Optional. Default is true. When true, ensures the message was successfully written "+ - "to the topic before the tool returns a success response."), - ), - ) + return &sdk.Tool{ + Name: "kafka_client_produce", + Description: toolDesc, + InputSchema: inputSchema, + }, nil } // buildKafkaProduceHandler builds the Kafka produce handler function // Migrated from the original handler logic -func (b *KafkaProduceToolBuilder) buildKafkaProduceHandler() func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - // Get required topic parameter - topicName, err := request.RequireString("topic") - if err != nil { - return b.handleError("get topic name", err), nil - } +func (b *KafkaProduceToolBuilder) buildKafkaProduceHandler() builders.ToolHandlerFunc[kafkaProduceInput, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input kafkaProduceInput) (*sdk.CallToolResult, any, error) { + topicName := input.Topic // Get Kafka session from context session := mcpCtx.GetKafkaSession(ctx) if session == nil { - return b.handleError("get Kafka session not found in context", nil), nil + return nil, nil, b.handleError("get Kafka session not found in context", nil) } // Create Kafka client using the session kafkaClient, err := session.GetClient() if err != nil { - return b.handleError("create Kafka client", err), nil + return nil, nil, b.handleError("create Kafka client", err) } defer kafkaClient.Close() @@ -224,19 +210,19 @@ func (b *KafkaProduceToolBuilder) buildKafkaProduceHandler() func(context.Contex defer cancel() if err = kafkaClient.Ping(timeoutCtx); err != nil { // check connectivity to cluster - return b.handleError("ping Kafka cluster", err), nil + return nil, nil, b.handleError("ping Kafka cluster", err) } if schemaReady { subjSchema, err := srClient.SchemaByVersion(timeoutCtx, topicName+"-value", -1) if err != nil { - return b.handleError("get schema", err), nil + return nil, nil, b.handleError("get schema", err) } switch subjSchema.Type { case sr.TypeAvro: avroSchema, err := avro.Parse(subjSchema.Schema.Schema) if err != nil { - return b.handleError("parse avro schema", err), nil + return nil, nil, b.handleError("parse avro schema", err) } serde.Register( subjSchema.ID, @@ -263,14 +249,16 @@ func (b *KafkaProduceToolBuilder) buildKafkaProduceHandler() func(context.Contex } // Single message mode (simplified version) - value, err := request.RequireString("value") - if err != nil { - return b.handleError("get value", err), nil + value := input.Value + key := "" + if input.Key != nil { + key = *input.Key + } + headers := input.Headers + sync := true + if input.Sync != nil { + sync = *input.Sync } - - key := request.GetString("key", "") - headers := request.GetStringSlice("headers", []string{}) - sync := request.GetBool("sync", true) // Prepare record record := &kgo.Record{ @@ -302,7 +290,7 @@ func (b *KafkaProduceToolBuilder) buildKafkaProduceHandler() func(context.Contex if err := json.Unmarshal([]byte(value), &jsonValue); err == nil { encodedValue, err := serde.Encode(jsonValue) if err != nil { - return b.handleError("encode value with schema", err), nil + return nil, nil, b.handleError("encode value with schema", err) } record.Value = encodedValue } @@ -312,7 +300,7 @@ func (b *KafkaProduceToolBuilder) buildKafkaProduceHandler() func(context.Contex if sync { results := kafkaClient.ProduceSync(timeoutCtx, record) if len(results) > 0 && results[0].Err != nil { - return b.handleError("produce message", results[0].Err), nil + return nil, nil, b.handleError("produce message", results[0].Err) } } else { kafkaClient.Produce(timeoutCtx, record, func(_ *kgo.Record, _ error) { @@ -336,8 +324,41 @@ func (b *KafkaProduceToolBuilder) buildKafkaProduceHandler() func(context.Contex response["partition"] = record.Partition } - return b.marshalResponse(response) + result, err := b.marshalResponse(response) + return result, nil, err + } +} + +func buildKafkaProduceInputSchema() (*jsonschema.Schema, error) { + schema, err := jsonschema.For[kafkaProduceInput](nil) + if err != nil { + return nil, fmt.Errorf("input schema: %w", err) + } + if schema.Type != "object" { + return nil, fmt.Errorf("input schema must have type \"object\"") + } + + if schema.Properties == nil { + schema.Properties = map[string]*jsonschema.Schema{} } + setSchemaDescription(schema, "topic", kafkaProduceTopicDesc) + setSchemaDescription(schema, "key", kafkaProduceKeyDesc) + setSchemaDescription(schema, "value", kafkaProduceValueDesc) + setSchemaDescription(schema, "headers", kafkaProduceHeadersDesc) + setSchemaDescription(schema, "partition", kafkaProducePartitionDesc) + setSchemaDescription(schema, "messages", kafkaProduceMessagesDesc) + setSchemaDescription(schema, "sync", kafkaProduceSyncDesc) + + messagesSchema := schema.Properties["messages"] + if messagesSchema != nil && messagesSchema.Items != nil { + setSchemaDescription(messagesSchema.Items, "key", kafkaProduceMessageKeyDesc) + setSchemaDescription(messagesSchema.Items, "value", kafkaProduceMessageValueDesc) + setSchemaDescription(messagesSchema.Items, "headers", kafkaProduceMessageHeadersDesc) + setSchemaDescription(messagesSchema.Items, "partition", kafkaProduceMessagePartitionDesc) + } + + normalizeAdditionalProperties(schema) + return schema, nil } // Helper functions @@ -345,15 +366,17 @@ func (b *KafkaProduceToolBuilder) buildKafkaProduceHandler() func(context.Contex // Unified error handling and utility functions // handleError provides unified error handling -func (b *KafkaProduceToolBuilder) handleError(operation string, err error) *mcp.CallToolResult { - return mcp.NewToolResultError(fmt.Sprintf("Failed to %s: %v", operation, err)) +func (b *KafkaProduceToolBuilder) handleError(operation string, err error) error { + return fmt.Errorf("failed to %s: %v", operation, err) } // marshalResponse provides unified JSON serialization for responses -func (b *KafkaProduceToolBuilder) marshalResponse(data interface{}) (*mcp.CallToolResult, error) { +func (b *KafkaProduceToolBuilder) marshalResponse(data interface{}) (*sdk.CallToolResult, error) { jsonBytes, err := json.Marshal(data) if err != nil { - return b.handleError("marshal response", err), nil + return nil, b.handleError("marshal response", err) } - return mcp.NewToolResultText(string(jsonBytes)), nil + return &sdk.CallToolResult{ + Content: []sdk.Content{&sdk.TextContent{Text: string(jsonBytes)}}, + }, nil } diff --git a/pkg/mcp/builders/kafka/produce_legacy.go b/pkg/mcp/builders/kafka/produce_legacy.go new file mode 100644 index 0000000..267a898 --- /dev/null +++ b/pkg/mcp/builders/kafka/produce_legacy.go @@ -0,0 +1,351 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafka + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/hamba/avro/v2" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" + "github.com/twmb/franz-go/pkg/kgo" + "github.com/twmb/franz-go/pkg/sr" +) + +// KafkaProduceLegacyToolBuilder implements the legacy ToolBuilder interface for Kafka client produce operations. +// /nolint:revive +type KafkaProduceLegacyToolBuilder struct { + *builders.BaseToolBuilder +} + +// NewKafkaProduceLegacyToolBuilder creates a new legacy Kafka produce tool builder instance. +func NewKafkaProduceLegacyToolBuilder() *KafkaProduceLegacyToolBuilder { + metadata := builders.ToolMetadata{ + Name: "kafka_produce", + Version: "1.0.0", + Description: "Kafka client produce tools", + Category: "kafka_client", + Tags: []string{"kafka", "client", "produce"}, + } + + features := []string{ + "kafka-client", + "all", + "all-kafka", + } + + return &KafkaProduceLegacyToolBuilder{ + BaseToolBuilder: builders.NewBaseToolBuilder(metadata, features), + } +} + +// BuildTools builds the Kafka produce tool list for the legacy server. +func (b *KafkaProduceLegacyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { + // Skip registration if in read-only mode + if config.ReadOnly { + return nil, nil + } + + // Check features - return empty list if no required features are present + if !b.HasAnyRequiredFeature(config.Features) { + return nil, nil + } + + // Validate configuration (only validate when matching features are present) + if err := b.Validate(config); err != nil { + return nil, err + } + + // Build tools + tool := b.buildKafkaProduceTool() + handler := b.buildKafkaProduceHandler() + + return []server.ServerTool{ + { + Tool: tool, + Handler: handler, + }, + }, nil +} + +// buildKafkaProduceTool builds the Kafka produce MCP tool definition. +func (b *KafkaProduceLegacyToolBuilder) buildKafkaProduceTool() mcp.Tool { + toolDesc := "Produce messages to a Kafka topic.\n" + + "This tool allows you to send messages to Kafka topics with various options for message creation.\n\n" + + "Kafka Producer Concepts:\n" + + "- Producers write data to Kafka topics, which can be spread across multiple partitions\n" + + "- Messages can include a key, which determines the partition assignment (consistent hashing)\n" + + "- Headers can be added to messages to include metadata without affecting the message payload\n" + + "- Partitions enable parallel processing and ordered delivery within a single partition\n\n" + + "This tool provides a simple producer instance for diagnostic and testing purposes. Do not use this tool for Pulsar protocol operations. Use 'pulsar_client_produce' instead.\n\n" + + "Usage Examples:\n\n" + + "1. Basic message production - Send a simple message to a topic:\n" + + " topic: \"my-topic\"\n" + + " value: \"Hello, Kafka!\"\n\n" + + "2. Keyed message - Send a message with a key for consistent partition routing:\n" + + " topic: \"my-topic\"\n" + + " key: \"user-123\"\n" + + " value: \"User activity data\"\n\n" + + "3. Multiple messages - Send several messages in one request:\n" + + " topic: \"my-topic\"\n" + + " messages: [{\"key\": \"key1\", \"value\": \"value1\"}, {\"key\": \"key2\", \"value\": \"value2\"}]\n\n" + + "4. Message with headers - Include metadata with your message:\n" + + " topic: \"my-topic\"\n" + + " value: \"Message with headers\"\n" + + " headers: [\"source=mcp-tool\", \"timestamp=2023-06-01\"]\n\n" + + "5. Specific partition - Send to a particular partition:\n" + + " topic: \"my-topic\"\n" + + " value: \"Targeted message\"\n" + + " partition: 2\n\n" + + "This tool requires Kafka producer permissions on the specified topic." + + return mcp.NewTool("kafka_client_produce", + mcp.WithDescription(toolDesc), + mcp.WithString("topic", mcp.Required(), + mcp.Description("The name of the Kafka topic to produce messages to. "+ + "Must be an existing topic that the user has write permissions for."), + ), + mcp.WithString("key", + mcp.Description("The key for the message. "+ + "Optional. Keys are used for partition assignment and maintaining order for related messages. "+ + "Messages with the same key will be sent to the same partition."), + ), + mcp.WithString("value", + mcp.Required(), + mcp.Description("The value/content of the message to send. "+ + "This is the actual payload that will be delivered to consumers. It can be a JSON string, and the system will automatically serialize it to the appropriate format based on the schema registry if it is available."), + ), + mcp.WithArray("headers", + mcp.Description("Message headers in the format of [\"key=value\"]. "+ + "Optional. Headers allow you to attach metadata to messages without modifying the payload. "+ + "They are passed along with the message to consumers."), + mcp.Items(map[string]interface{}{ + "type": "string", + "description": "key value pair in the format of \"key=value\"", + }), + ), + mcp.WithNumber("partition", + mcp.Description("The specific partition to send the message to. "+ + "Optional. If not specified, Kafka will automatically assign a partition based on the message key (if provided) or round-robin assignment. "+ + "Specifying a partition can be useful for testing or when you need guaranteed partition assignment."), + ), + mcp.WithArray("messages", + mcp.Description("An array of messages to send in batch. "+ + "Optional. Alternative to the single message parameters (key, value, headers, partition). "+ + "Each message object can contain 'key', 'value', 'headers', and 'partition' properties. "+ + "Batch sending is more efficient for multiple messages."), + mcp.Items(map[string]interface{}{ + "type": "object", + "properties": map[string]interface{}{ + "key": map[string]interface{}{ + "type": "string", + "description": "Message key", + }, + "value": map[string]interface{}{ + "type": "string", + "description": "Message value (required)", + }, + "headers": map[string]interface{}{ + "type": "array", + "description": "Message headers as array of \"key=value\" strings", + "items": map[string]interface{}{ + "type": "string", + }, + }, + "partition": map[string]interface{}{ + "type": "number", + "description": "Target partition number", + }, + }, + "required": []string{"value"}, + }), + ), + mcp.WithBoolean("sync", + mcp.Description("Whether to wait for server acknowledgment before returning. "+ + "Optional. Default is true. When true, ensures the message was successfully written "+ + "to the topic before the tool returns a success response."), + ), + ) +} + +// buildKafkaProduceHandler builds the Kafka produce handler function. +func (b *KafkaProduceLegacyToolBuilder) buildKafkaProduceHandler() func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Get required topic parameter + topicName, err := request.RequireString("topic") + if err != nil { + return b.handleError("get topic name", err), nil + } + + // Get Kafka session from context + session := mcpCtx.GetKafkaSession(ctx) + if session == nil { + return b.handleError("get Kafka session not found in context", nil), nil + } + + // Create Kafka client using the session + kafkaClient, err := session.GetClient() + if err != nil { + return b.handleError("create Kafka client", err), nil + } + defer kafkaClient.Close() + + srClient, err := session.GetSchemaRegistryClient() + schemaReady := false + var serde sr.Serde + if err == nil && srClient != nil { + schemaReady = true + } + + // Set timeout + timeoutCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + if err = kafkaClient.Ping(timeoutCtx); err != nil { // check connectivity to cluster + return b.handleError("ping Kafka cluster", err), nil + } + + if schemaReady { + subjSchema, err := srClient.SchemaByVersion(timeoutCtx, topicName+"-value", -1) + if err != nil { + return b.handleError("get schema", err), nil + } + switch subjSchema.Type { + case sr.TypeAvro: + avroSchema, err := avro.Parse(subjSchema.Schema.Schema) + if err != nil { + return b.handleError("parse avro schema", err), nil + } + serde.Register( + subjSchema.ID, + map[string]any{}, + sr.EncodeFn(func(v any) ([]byte, error) { + return avro.Marshal(avroSchema, v) + }), + sr.DecodeFn(func(data []byte, v any) error { + return avro.Unmarshal(avroSchema, data, v) + }), + ) + case sr.TypeJSON: + serde.Register( + subjSchema.ID, + map[string]any{}, + sr.EncodeFn(json.Marshal), + sr.DecodeFn(json.Unmarshal), + ) + case sr.TypeProtobuf: + default: + // TODO: support other schema types + schemaReady = false + } + } + + // Single message mode (simplified version) + value, err := request.RequireString("value") + if err != nil { + return b.handleError("get value", err), nil + } + + key := request.GetString("key", "") + headers := request.GetStringSlice("headers", []string{}) + sync := request.GetBool("sync", true) + + // Prepare record + record := &kgo.Record{ + Topic: topicName, + Value: []byte(value), + } + + // Add key if provided + if key != "" { + record.Key = []byte(key) + } + + // Add headers if provided + if len(headers) > 0 { + for _, headerStr := range headers { + parts := strings.SplitN(headerStr, "=", 2) + if len(parts) == 2 { + record.Headers = append(record.Headers, kgo.RecordHeader{ + Key: parts[0], + Value: []byte(parts[1]), + }) + } + } + } + + // Handle schema encoding if available + if schemaReady { + var jsonValue interface{} + if err := json.Unmarshal([]byte(value), &jsonValue); err == nil { + encodedValue, err := serde.Encode(jsonValue) + if err != nil { + return b.handleError("encode value with schema", err), nil + } + record.Value = encodedValue + } + } + + // Produce the message based on sync parameter + if sync { + results := kafkaClient.ProduceSync(timeoutCtx, record) + if len(results) > 0 && results[0].Err != nil { + return b.handleError("produce message", results[0].Err), nil + } + } else { + kafkaClient.Produce(timeoutCtx, record, func(_ *kgo.Record, _ error) { + // Log async errors but don't fail since we're async + // In the future, this could be enhanced with proper async result handling + }) + } + + // Create result + response := map[string]interface{}{ + "status": "success", + "topic": record.Topic, + "timestamp": time.Now().Format(time.RFC3339), + } + + if len(record.Key) > 0 { + response["key"] = string(record.Key) + } + + if record.Partition != -1 { + response["partition"] = record.Partition + } + + return b.marshalResponse(response) + } +} + +// handleError provides unified error handling. +func (b *KafkaProduceLegacyToolBuilder) handleError(operation string, err error) *mcp.CallToolResult { + return mcp.NewToolResultError(fmt.Sprintf("Failed to %s: %v", operation, err)) +} + +// marshalResponse provides unified JSON serialization for responses. +func (b *KafkaProduceLegacyToolBuilder) marshalResponse(data interface{}) (*mcp.CallToolResult, error) { + jsonBytes, err := json.Marshal(data) + if err != nil { + return b.handleError("marshal response", err), nil + } + return mcp.NewToolResultText(string(jsonBytes)), nil +} diff --git a/pkg/mcp/builders/kafka/produce_test.go b/pkg/mcp/builders/kafka/produce_test.go index 6a88f5f..8fbe5a3 100644 --- a/pkg/mcp/builders/kafka/produce_test.go +++ b/pkg/mcp/builders/kafka/produce_test.go @@ -18,6 +18,7 @@ import ( "context" "testing" + "github.com/google/jsonschema-go/jsonschema" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -45,8 +46,8 @@ func TestKafkaProduceToolBuilder(t *testing.T) { tools, err := builder.BuildTools(context.Background(), config) require.NoError(t, err) assert.Len(t, tools, 1) - assert.Equal(t, "kafka_client_produce", tools[0].Tool.Name) - assert.NotNil(t, tools[0].Handler) + assert.Equal(t, "kafka_client_produce", tools[0].Definition().Name) + assert.NotNil(t, tools[0]) }) t.Run("BuildTools_ReadOnlyMode", func(t *testing.T) { @@ -89,3 +90,32 @@ func TestKafkaProduceToolBuilder(t *testing.T) { assert.Error(t, err) }) } + +func TestKafkaProduceToolSchema(t *testing.T) { + builder := NewKafkaProduceToolBuilder() + tool, err := builder.buildKafkaProduceTool() + require.NoError(t, err) + assert.Equal(t, "kafka_client_produce", tool.Name) + + schema, ok := tool.InputSchema.(*jsonschema.Schema) + require.True(t, ok) + require.NotNil(t, schema.Properties) + + expectedRequired := []string{"topic", "value"} + assert.ElementsMatch(t, expectedRequired, schema.Required) + + expectedProps := []string{ + "topic", + "key", + "value", + "headers", + "partition", + "messages", + "sync", + } + assert.ElementsMatch(t, expectedProps, mapStringKeys(schema.Properties)) + + topicSchema := schema.Properties["topic"] + require.NotNil(t, topicSchema) + assert.Equal(t, kafkaProduceTopicDesc, topicSchema.Description) +} diff --git a/pkg/mcp/builders/kafka/schema_registry.go b/pkg/mcp/builders/kafka/schema_registry.go index a4a0eae..651eec3 100644 --- a/pkg/mcp/builders/kafka/schema_registry.go +++ b/pkg/mcp/builders/kafka/schema_registry.go @@ -21,13 +21,53 @@ import ( "strconv" "strings" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + "github.com/google/jsonschema-go/jsonschema" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" "github.com/twmb/franz-go/pkg/sr" ) +type kafkaSchemaRegistryInput struct { + Resource string `json:"resource"` + Operation string `json:"operation"` + Subject *string `json:"subject,omitempty"` + Version *string `json:"version,omitempty"` + Compatibility *string `json:"compatibility,omitempty"` + SchemaType *string `json:"schemaType,omitempty"` + Schema *string `json:"schema,omitempty"` +} + +const ( + kafkaSchemaRegistryResourceDesc = "Resource to operate on. Available resources:\n" + + "- subjects: Collection of all schema subjects in the Schema Registry\n" + + "- subject: A specific schema subject (a named schema that can have multiple versions)\n" + + "- versions: Collection of all versions for a specific subject\n" + + "- version: A specific version of a subject's schema\n" + + "- compatibility: Compatibility settings that control schema evolution rules\n" + + "- types: Supported schema format types (like AVRO, JSON, PROTOBUF)" + kafkaSchemaRegistryOperationDesc = "Operation to perform. Available operations:\n" + + "- list: List all subjects, versions for a subject, or supported schema types\n" + + "- get: Get a subject's latest schema, a specific version, or compatibility setting\n" + + "- set: Set compatibility level for global or subject-specific schema evolution\n" + + "- create: Register a new schema for a subject\n" + + "- delete: Delete a schema subject or a specific version" + kafkaSchemaRegistrySubjectDesc = "The name of the schema subject. " + + "Required for operations on 'subject', 'versions', 'version', and subject-specific 'compatibility' resources. " + + "Subject names typically follow the pattern '-key' or '-value'." + kafkaSchemaRegistryVersionDesc = "The version number or 'latest' for the most recent version. " + + "Required for 'version' resource operations." + kafkaSchemaRegistryCompatibilityDesc = "The compatibility level to set. " + + "Valid values: BACKWARD, FORWARD, FULL, NONE. " + + "Required for 'set' operation on 'compatibility' resource." + kafkaSchemaRegistrySchemaTypeDesc = "The schema format type. " + + "Valid values: AVRO, JSON, PROTOBUF. " + + "Required for 'create' operation on 'subject' resource." + kafkaSchemaRegistrySchemaDesc = "The schema definition as a JSON string. " + + "Required for 'create' operation on 'subject' resource. " + + "The structure depends on the schema type (AVRO, JSON Schema, or Protocol Buffers)." +) + // KafkaSchemaRegistryToolBuilder implements the ToolBuilder interface for Kafka Schema Registry // /nolint:revive type KafkaSchemaRegistryToolBuilder struct { @@ -57,7 +97,7 @@ func NewKafkaSchemaRegistryToolBuilder() *KafkaSchemaRegistryToolBuilder { } // BuildTools builds the Kafka Schema Registry tool list -func (b *KafkaSchemaRegistryToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { +func (b *KafkaSchemaRegistryToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]builders.ToolDefinition, error) { // Check features - return empty list if no required features are present if !b.HasAnyRequiredFeature(config.Features) { return nil, nil @@ -69,11 +109,14 @@ func (b *KafkaSchemaRegistryToolBuilder) BuildTools(_ context.Context, config bu } // Build tools - tool := b.buildKafkaSchemaRegistryTool() + tool, err := b.buildKafkaSchemaRegistryTool() + if err != nil { + return nil, err + } handler := b.buildKafkaSchemaRegistryHandler(config.ReadOnly) - return []server.ServerTool{ - { + return []builders.ToolDefinition{ + builders.ServerTool[kafkaSchemaRegistryInput, any]{ Tool: tool, Handler: handler, }, @@ -81,21 +124,11 @@ func (b *KafkaSchemaRegistryToolBuilder) BuildTools(_ context.Context, config bu } // buildKafkaSchemaRegistryTool builds the Kafka Schema Registry MCP tool definition -func (b *KafkaSchemaRegistryToolBuilder) buildKafkaSchemaRegistryTool() mcp.Tool { - resourceDesc := "Resource to operate on. Available resources:\n" + - "- subjects: Collection of all schema subjects in the Schema Registry\n" + - "- subject: A specific schema subject (a named schema that can have multiple versions)\n" + - "- versions: Collection of all versions for a specific subject\n" + - "- version: A specific version of a subject's schema\n" + - "- compatibility: Compatibility settings that control schema evolution rules\n" + - "- types: Supported schema format types (like AVRO, JSON, PROTOBUF)" - - operationDesc := "Operation to perform. Available operations:\n" + - "- list: List all subjects, versions for a subject, or supported schema types\n" + - "- get: Get a subject's latest schema, a specific version, or compatibility setting\n" + - "- set: Set compatibility level for global or subject-specific schema evolution\n" + - "- create: Register a new schema for a subject\n" + - "- delete: Delete a schema subject or a specific version" +func (b *KafkaSchemaRegistryToolBuilder) buildKafkaSchemaRegistryTool() (*sdk.Tool, error) { + inputSchema, err := buildKafkaSchemaRegistryInputSchema() + if err != nil { + return nil, err + } toolDesc := "Unified tool for managing Apache Kafka Schema Registry.\n" + "Schema Registry provides a centralized repository for managing and validating schemas for Kafka data.\n" + @@ -122,7 +155,7 @@ func (b *KafkaSchemaRegistryToolBuilder) buildKafkaSchemaRegistryTool() mcp.Tool " resource: \"subject\"\n" + " operation: \"create\"\n" + " subject: \"user-events-value\"\n" + - " schema: {...}\n" + + " schema: \"{...}\"\n" + " schemaType: \"AVRO\"\n\n" + "4. Set compatibility level:\n" + " resource: \"compatibility\"\n" + @@ -131,67 +164,33 @@ func (b *KafkaSchemaRegistryToolBuilder) buildKafkaSchemaRegistryTool() mcp.Tool " compatibility: \"BACKWARD\"\n\n" + "This tool requires appropriate Schema Registry permissions." - return mcp.NewTool("kafka_admin_sr", - mcp.WithDescription(toolDesc), - mcp.WithString("resource", mcp.Required(), - mcp.Description(resourceDesc), - ), - mcp.WithString("operation", mcp.Required(), - mcp.Description(operationDesc), - ), - mcp.WithString("subject", - mcp.Description("The name of the schema subject. "+ - "Required for operations on 'subject', 'versions', 'version', and subject-specific 'compatibility' resources. "+ - "Subject names typically follow the pattern '-key' or '-value'.")), - mcp.WithString("version", - mcp.Description("The version number or 'latest' for the most recent version. "+ - "Required for 'version' resource operations.")), - mcp.WithString("compatibility", - mcp.Description("The compatibility level to set. "+ - "Valid values: BACKWARD, FORWARD, FULL, NONE. "+ - "Required for 'set' operation on 'compatibility' resource.")), - mcp.WithString("schemaType", - mcp.Description("The schema format type. "+ - "Valid values: AVRO, JSON, PROTOBUF. "+ - "Required for 'create' operation on 'subject' resource.")), - mcp.WithObject("schema", - mcp.Description("The schema definition as a JSON object. "+ - "Required for 'create' operation on 'subject' resource. "+ - "The structure depends on the schema type (AVRO, JSON Schema, or Protocol Buffers).")), - ) + return &sdk.Tool{ + Name: "kafka_admin_sr", + Description: toolDesc, + InputSchema: inputSchema, + }, nil } // buildKafkaSchemaRegistryHandler builds the Kafka Schema Registry handler function -func (b *KafkaSchemaRegistryToolBuilder) buildKafkaSchemaRegistryHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - // Get required parameters - resource, err := request.RequireString("resource") - if err != nil { - return b.handleError("get resource", err), nil - } - - operation, err := request.RequireString("operation") - if err != nil { - return b.handleError("get operation", err), nil - } - +func (b *KafkaSchemaRegistryToolBuilder) buildKafkaSchemaRegistryHandler(readOnly bool) builders.ToolHandlerFunc[kafkaSchemaRegistryInput, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input kafkaSchemaRegistryInput) (*sdk.CallToolResult, any, error) { // Normalize parameters - resource = strings.ToLower(resource) - operation = strings.ToLower(operation) + resource := strings.ToLower(input.Resource) + operation := strings.ToLower(input.Operation) // Validate write operations in read-only mode if readOnly && (operation == "create" || operation == "delete" || operation == "set") { - return mcp.NewToolResultError("Write operations are not allowed in read-only mode"), nil + return nil, nil, fmt.Errorf("write operations are not allowed in read-only mode") } // Get Schema Registry client session := mcpCtx.GetKafkaSession(ctx) if session == nil { - return b.handleError("get Kafka session not found in context", nil), nil + return nil, nil, b.handleError("get Kafka session not found in context", nil) } client, err := session.GetSchemaRegistryClient() if err != nil { - return b.handleError("get Schema Registry client", err), nil + return nil, nil, b.handleError("get Schema Registry client", err) } // Dispatch based on resource and operation @@ -199,55 +198,65 @@ func (b *KafkaSchemaRegistryToolBuilder) buildKafkaSchemaRegistryHandler(readOnl case "subjects": switch operation { case "list": - return b.handleSchemaSubjectsList(ctx, client, request) + result, err := b.handleSchemaSubjectsList(ctx, client) + return result, nil, err default: - return mcp.NewToolResultError(fmt.Sprintf("Invalid operation for resource 'subjects': %s", operation)), nil + return nil, nil, fmt.Errorf("invalid operation for resource 'subjects': %s", operation) } case "subject": switch operation { case "get": - return b.handleSchemaSubjectGet(ctx, client, request) + result, err := b.handleSchemaSubjectGet(ctx, client, input) + return result, nil, err case "create": - return b.handleSchemaSubjectCreate(ctx, client, request) + result, err := b.handleSchemaSubjectCreate(ctx, client, input) + return result, nil, err case "delete": - return b.handleSchemaSubjectDelete(ctx, client, request) + result, err := b.handleSchemaSubjectDelete(ctx, client, input) + return result, nil, err default: - return mcp.NewToolResultError(fmt.Sprintf("Invalid operation for resource 'subject': %s", operation)), nil + return nil, nil, fmt.Errorf("invalid operation for resource 'subject': %s", operation) } case "versions": switch operation { case "list": - return b.handleSchemaVersionsList(ctx, client, request) + result, err := b.handleSchemaVersionsList(ctx, client, input) + return result, nil, err default: - return mcp.NewToolResultError(fmt.Sprintf("Invalid operation for resource 'versions': %s", operation)), nil + return nil, nil, fmt.Errorf("invalid operation for resource 'versions': %s", operation) } case "version": switch operation { case "get": - return b.handleSchemaVersionGet(ctx, client, request) + result, err := b.handleSchemaVersionGet(ctx, client, input) + return result, nil, err case "delete": - return b.handleSchemaVersionDelete(ctx, client, request) + result, err := b.handleSchemaVersionDelete(ctx, client, input) + return result, nil, err default: - return mcp.NewToolResultError(fmt.Sprintf("Invalid operation for resource 'version': %s", operation)), nil + return nil, nil, fmt.Errorf("invalid operation for resource 'version': %s", operation) } case "compatibility": switch operation { case "get": - return b.handleSchemaCompatibilityGet(ctx, client, request) + result, err := b.handleSchemaCompatibilityGet(ctx, client, input) + return result, nil, err case "set": - return b.handleSchemaCompatibilitySet(ctx, client, request) + result, err := b.handleSchemaCompatibilitySet(ctx, client, input) + return result, nil, err default: - return mcp.NewToolResultError(fmt.Sprintf("Invalid operation for resource 'compatibility': %s", operation)), nil + return nil, nil, fmt.Errorf("invalid operation for resource 'compatibility': %s", operation) } case "types": switch operation { case "list": - return b.handleSchemaTypesList(ctx, client, request) + result, err := b.handleSchemaTypesList() + return result, nil, err default: - return mcp.NewToolResultError(fmt.Sprintf("Invalid operation for resource 'types': %s", operation)), nil + return nil, nil, fmt.Errorf("invalid operation for resource 'types': %s", operation) } default: - return mcp.NewToolResultError(fmt.Sprintf("Invalid resource: %s. Available resources: subjects, subject, versions, version, compatibility, types", resource)), nil + return nil, nil, fmt.Errorf("invalid resource: %s. available resources: subjects, subject, versions, version, compatibility, types", resource) } } } @@ -255,66 +264,68 @@ func (b *KafkaSchemaRegistryToolBuilder) buildKafkaSchemaRegistryHandler(readOnl // Utility functions // handleError provides unified error handling -func (b *KafkaSchemaRegistryToolBuilder) handleError(operation string, err error) *mcp.CallToolResult { - return mcp.NewToolResultError(fmt.Sprintf("Failed to %s: %v", operation, err)) +func (b *KafkaSchemaRegistryToolBuilder) handleError(operation string, err error) error { + return fmt.Errorf("failed to %s: %v", operation, err) } // marshalResponse provides unified JSON serialization for responses -func (b *KafkaSchemaRegistryToolBuilder) marshalResponse(data interface{}) (*mcp.CallToolResult, error) { +func (b *KafkaSchemaRegistryToolBuilder) marshalResponse(data interface{}) (*sdk.CallToolResult, error) { jsonBytes, err := json.Marshal(data) if err != nil { - return b.handleError("marshal response", err), nil + return nil, b.handleError("marshal response", err) } - return mcp.NewToolResultText(string(jsonBytes)), nil + return &sdk.CallToolResult{ + Content: []sdk.Content{&sdk.TextContent{Text: string(jsonBytes)}}, + }, nil } // Specific operation handler functions // handleSchemaSubjectsList handles listing all schema subjects -func (b *KafkaSchemaRegistryToolBuilder) handleSchemaSubjectsList(ctx context.Context, client *sr.Client, _ mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *KafkaSchemaRegistryToolBuilder) handleSchemaSubjectsList(ctx context.Context, client *sr.Client) (*sdk.CallToolResult, error) { subjects, err := client.Subjects(ctx) if err != nil { - return b.handleError("list schema subjects", err), nil + return nil, b.handleError("list schema subjects", err) } return b.marshalResponse(subjects) } // handleSchemaSubjectGet handles getting the latest schema for a subject -func (b *KafkaSchemaRegistryToolBuilder) handleSchemaSubjectGet(ctx context.Context, client *sr.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - subject, err := request.RequireString("subject") +func (b *KafkaSchemaRegistryToolBuilder) handleSchemaSubjectGet(ctx context.Context, client *sr.Client, input kafkaSchemaRegistryInput) (*sdk.CallToolResult, error) { + subject, err := requireString(input.Subject, "subject") if err != nil { - return b.handleError("get subject name", err), nil + return nil, b.handleError("get subject name", err) } schema, err := client.SchemaByVersion(ctx, subject, -1) // -1 for latest if err != nil { - return b.handleError("get schema for subject", err), nil + return nil, b.handleError("get schema for subject", err) } return b.marshalResponse(schema) } // handleSchemaSubjectCreate handles registering a new schema for a subject -func (b *KafkaSchemaRegistryToolBuilder) handleSchemaSubjectCreate(ctx context.Context, client *sr.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - subject, err := request.RequireString("subject") +func (b *KafkaSchemaRegistryToolBuilder) handleSchemaSubjectCreate(ctx context.Context, client *sr.Client, input kafkaSchemaRegistryInput) (*sdk.CallToolResult, error) { + subject, err := requireString(input.Subject, "subject") if err != nil { - return b.handleError("get subject name", err), nil + return nil, b.handleError("get subject name", err) } - schemaTypeStr, err := request.RequireString("schemaType") + schemaTypeStr, err := requireString(input.SchemaType, "schemaType") if err != nil { - return b.handleError("get schema type", err), nil + return nil, b.handleError("get schema type", err) } - schema, err := request.RequireString("schema") + schema, err := requireString(input.Schema, "schema") if err != nil { - return b.handleError("get schema object", err), nil + return nil, b.handleError("get schema object", err) } // Parse schema type var schemaType sr.SchemaType err = schemaType.UnmarshalText([]byte(schemaTypeStr)) if err != nil { - return b.handleError("unmarshal schema type", err), nil + return nil, b.handleError("unmarshal schema type", err) } // Create schema @@ -325,50 +336,50 @@ func (b *KafkaSchemaRegistryToolBuilder) handleSchemaSubjectCreate(ctx context.C result, err := client.CreateSchema(ctx, subject, schemaObj) if err != nil { - return b.handleError("create schema", err), nil + return nil, b.handleError("create schema", err) } return b.marshalResponse(result) } // handleSchemaSubjectDelete handles deleting a schema subject -func (b *KafkaSchemaRegistryToolBuilder) handleSchemaSubjectDelete(ctx context.Context, client *sr.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - subject, err := request.RequireString("subject") +func (b *KafkaSchemaRegistryToolBuilder) handleSchemaSubjectDelete(ctx context.Context, client *sr.Client, input kafkaSchemaRegistryInput) (*sdk.CallToolResult, error) { + subject, err := requireString(input.Subject, "subject") if err != nil { - return b.handleError("get subject name", err), nil + return nil, b.handleError("get subject name", err) } // Delete subject using correct API signature (soft delete by default) versions, err := client.DeleteSubject(ctx, subject, sr.SoftDelete) if err != nil { - return b.handleError("delete schema subject", err), nil + return nil, b.handleError("delete schema subject", err) } return b.marshalResponse(versions) } // handleSchemaVersionsList handles listing all versions for a subject -func (b *KafkaSchemaRegistryToolBuilder) handleSchemaVersionsList(ctx context.Context, client *sr.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - subject, err := request.RequireString("subject") +func (b *KafkaSchemaRegistryToolBuilder) handleSchemaVersionsList(ctx context.Context, client *sr.Client, input kafkaSchemaRegistryInput) (*sdk.CallToolResult, error) { + subject, err := requireString(input.Subject, "subject") if err != nil { - return b.handleError("get subject name", err), nil + return nil, b.handleError("get subject name", err) } versions, err := client.SubjectVersions(ctx, subject) if err != nil { - return b.handleError("list schema versions", err), nil + return nil, b.handleError("list schema versions", err) } return b.marshalResponse(versions) } // handleSchemaVersionGet handles getting a specific version of a schema -func (b *KafkaSchemaRegistryToolBuilder) handleSchemaVersionGet(ctx context.Context, client *sr.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - subject, err := request.RequireString("subject") +func (b *KafkaSchemaRegistryToolBuilder) handleSchemaVersionGet(ctx context.Context, client *sr.Client, input kafkaSchemaRegistryInput) (*sdk.CallToolResult, error) { + subject, err := requireString(input.Subject, "subject") if err != nil { - return b.handleError("get subject name", err), nil + return nil, b.handleError("get subject name", err) } - versionStr, err := request.RequireString("version") + versionStr, err := requireString(input.Version, "version") if err != nil { - return b.handleError("get version", err), nil + return nil, b.handleError("get version", err) } var version int @@ -378,46 +389,51 @@ func (b *KafkaSchemaRegistryToolBuilder) handleSchemaVersionGet(ctx context.Cont var parseErr error version, parseErr = strconv.Atoi(versionStr) if parseErr != nil { - return b.handleError("parse version number", parseErr), nil + return nil, b.handleError("parse version number", parseErr) } } schema, err := client.SchemaByVersion(ctx, subject, version) if err != nil { - return b.handleError("get schema version", err), nil + return nil, b.handleError("get schema version", err) } return b.marshalResponse(schema) } // handleSchemaVersionDelete handles deleting a specific version of a schema -func (b *KafkaSchemaRegistryToolBuilder) handleSchemaVersionDelete(ctx context.Context, client *sr.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - subject, err := request.RequireString("subject") +func (b *KafkaSchemaRegistryToolBuilder) handleSchemaVersionDelete(ctx context.Context, client *sr.Client, input kafkaSchemaRegistryInput) (*sdk.CallToolResult, error) { + subject, err := requireString(input.Subject, "subject") if err != nil { - return b.handleError("get subject name", err), nil + return nil, b.handleError("get subject name", err) } - versionStr, err := request.RequireString("version") + versionStr, err := requireString(input.Version, "version") if err != nil { - return b.handleError("get version", err), nil + return nil, b.handleError("get version", err) } version, err := strconv.Atoi(versionStr) if err != nil { - return b.handleError("parse version number", err), nil + return nil, b.handleError("parse version number", err) } // Delete schema version using correct API signature (soft delete by default) err = client.DeleteSchema(ctx, subject, version, sr.SoftDelete) if err != nil { - return b.handleError("delete schema version", err), nil + return nil, b.handleError("delete schema version", err) } - return mcp.NewToolResultText(fmt.Sprintf("Schema version %d for subject %s deleted successfully", version, subject)), nil + return &sdk.CallToolResult{ + Content: []sdk.Content{&sdk.TextContent{Text: fmt.Sprintf("Schema version %d for subject %s deleted successfully", version, subject)}}, + }, nil } // handleSchemaCompatibilityGet handles getting compatibility setting -func (b *KafkaSchemaRegistryToolBuilder) handleSchemaCompatibilityGet(ctx context.Context, client *sr.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - subject := request.GetString("subject", "") // Optional for global compatibility +func (b *KafkaSchemaRegistryToolBuilder) handleSchemaCompatibilityGet(ctx context.Context, client *sr.Client, input kafkaSchemaRegistryInput) (*sdk.CallToolResult, error) { + subject := "" + if input.Subject != nil { + subject = *input.Subject + } var results []sr.CompatibilityResult if subject != "" { @@ -431,7 +447,7 @@ func (b *KafkaSchemaRegistryToolBuilder) handleSchemaCompatibilityGet(ctx contex // Check for errors in results for _, result := range results { if result.Err != nil { - return b.handleError("get compatibility setting", result.Err), nil + return nil, b.handleError("get compatibility setting", result.Err) } } @@ -440,17 +456,20 @@ func (b *KafkaSchemaRegistryToolBuilder) handleSchemaCompatibilityGet(ctx contex return b.marshalResponse(map[string]string{"compatibility": results[0].Level.String()}) } - return mcp.NewToolResultError("No compatibility result returned"), nil + return nil, fmt.Errorf("no compatibility result returned") } // handleSchemaCompatibilitySet handles setting compatibility level -func (b *KafkaSchemaRegistryToolBuilder) handleSchemaCompatibilitySet(ctx context.Context, client *sr.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - compatibilityStr, err := request.RequireString("compatibility") +func (b *KafkaSchemaRegistryToolBuilder) handleSchemaCompatibilitySet(ctx context.Context, client *sr.Client, input kafkaSchemaRegistryInput) (*sdk.CallToolResult, error) { + compatibilityStr, err := requireString(input.Compatibility, "compatibility") if err != nil { - return b.handleError("get compatibility level", err), nil + return nil, b.handleError("get compatibility level", err) } - subject := request.GetString("subject", "") // Optional for global compatibility + subject := "" + if input.Subject != nil { + subject = *input.Subject + } // Parse compatibility level var compatibility sr.CompatibilityLevel @@ -464,7 +483,7 @@ func (b *KafkaSchemaRegistryToolBuilder) handleSchemaCompatibilitySet(ctx contex case "NONE": compatibility = sr.CompatNone default: - return mcp.NewToolResultError(fmt.Sprintf("Invalid compatibility level: %s. Valid levels: BACKWARD, FORWARD, FULL, NONE", compatibilityStr)), nil + return nil, fmt.Errorf("invalid compatibility level: %s. valid levels: BACKWARD, FORWARD, FULL, NONE", compatibilityStr) } // Create SetCompatibility request @@ -484,15 +503,41 @@ func (b *KafkaSchemaRegistryToolBuilder) handleSchemaCompatibilitySet(ctx contex // Check for errors in results for _, result := range results { if result.Err != nil { - return b.handleError("set compatibility level", result.Err), nil + return nil, b.handleError("set compatibility level", result.Err) } } - return mcp.NewToolResultText(fmt.Sprintf("Compatibility level set to %s", compatibilityStr)), nil + return &sdk.CallToolResult{ + Content: []sdk.Content{&sdk.TextContent{Text: fmt.Sprintf("Compatibility level set to %s", compatibilityStr)}}, + }, nil } // handleSchemaTypesList handles listing supported schema types -func (b *KafkaSchemaRegistryToolBuilder) handleSchemaTypesList(_ context.Context, _ *sr.Client, _ mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *KafkaSchemaRegistryToolBuilder) handleSchemaTypesList() (*sdk.CallToolResult, error) { types := []string{"AVRO", "JSON", "PROTOBUF"} return b.marshalResponse(types) } + +func buildKafkaSchemaRegistryInputSchema() (*jsonschema.Schema, error) { + schema, err := jsonschema.For[kafkaSchemaRegistryInput](nil) + if err != nil { + return nil, fmt.Errorf("input schema: %w", err) + } + if schema.Type != "object" { + return nil, fmt.Errorf("input schema must have type \"object\"") + } + + if schema.Properties == nil { + schema.Properties = map[string]*jsonschema.Schema{} + } + setSchemaDescription(schema, "resource", kafkaSchemaRegistryResourceDesc) + setSchemaDescription(schema, "operation", kafkaSchemaRegistryOperationDesc) + setSchemaDescription(schema, "subject", kafkaSchemaRegistrySubjectDesc) + setSchemaDescription(schema, "version", kafkaSchemaRegistryVersionDesc) + setSchemaDescription(schema, "compatibility", kafkaSchemaRegistryCompatibilityDesc) + setSchemaDescription(schema, "schemaType", kafkaSchemaRegistrySchemaTypeDesc) + setSchemaDescription(schema, "schema", kafkaSchemaRegistrySchemaDesc) + + normalizeAdditionalProperties(schema) + return schema, nil +} diff --git a/pkg/mcp/builders/kafka/schema_registry_test.go b/pkg/mcp/builders/kafka/schema_registry_test.go new file mode 100644 index 0000000..594a43f --- /dev/null +++ b/pkg/mcp/builders/kafka/schema_registry_test.go @@ -0,0 +1,144 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafka + +import ( + "context" + "testing" + + "github.com/google/jsonschema-go/jsonschema" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestKafkaSchemaRegistryToolBuilder(t *testing.T) { + builder := NewKafkaSchemaRegistryToolBuilder() + + t.Run("GetName", func(t *testing.T) { + assert.Equal(t, "kafka_schema_registry", builder.GetName()) + }) + + t.Run("GetRequiredFeatures", func(t *testing.T) { + features := builder.GetRequiredFeatures() + assert.NotEmpty(t, features) + assert.Contains(t, features, "kafka-admin-schema-registry") + }) + + t.Run("BuildTools_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"kafka-admin-schema-registry"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "kafka_admin_sr", tools[0].Definition().Name) + assert.NotNil(t, tools[0]) + }) + + t.Run("BuildTools_ReadOnly", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: true, + Features: []string{"kafka-admin-schema-registry"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "kafka_admin_sr", tools[0].Definition().Name) + }) + + t.Run("BuildTools_NoFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"unrelated-feature"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 0) + }) + + t.Run("Validate_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"kafka-admin-schema-registry"}, + } + + err := builder.Validate(config) + assert.NoError(t, err) + }) + + t.Run("Validate_MissingFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"unrelated-feature"}, + } + + err := builder.Validate(config) + assert.Error(t, err) + }) +} + +func TestKafkaSchemaRegistryToolSchema(t *testing.T) { + builder := NewKafkaSchemaRegistryToolBuilder() + tool, err := builder.buildKafkaSchemaRegistryTool() + require.NoError(t, err) + assert.Equal(t, "kafka_admin_sr", tool.Name) + + schema, ok := tool.InputSchema.(*jsonschema.Schema) + require.True(t, ok) + require.NotNil(t, schema.Properties) + + expectedRequired := []string{"resource", "operation"} + assert.ElementsMatch(t, expectedRequired, schema.Required) + + expectedProps := []string{ + "resource", + "operation", + "subject", + "version", + "compatibility", + "schemaType", + "schema", + } + assert.ElementsMatch(t, expectedProps, mapStringKeys(schema.Properties)) + + resourceSchema := schema.Properties["resource"] + require.NotNil(t, resourceSchema) + assert.Equal(t, kafkaSchemaRegistryResourceDesc, resourceSchema.Description) + + operationSchema := schema.Properties["operation"] + require.NotNil(t, operationSchema) + assert.Equal(t, kafkaSchemaRegistryOperationDesc, operationSchema.Description) + + schemaSchema := schema.Properties["schema"] + require.NotNil(t, schemaSchema) + assert.Equal(t, kafkaSchemaRegistrySchemaDesc, schemaSchema.Description) + assert.Contains(t, schemaSchema.Types, "string") +} + +func TestKafkaSchemaRegistryToolBuilder_ReadOnlyRejectsWrite(t *testing.T) { + builder := NewKafkaSchemaRegistryToolBuilder() + handler := builder.buildKafkaSchemaRegistryHandler(true) + + _, _, err := handler(context.Background(), nil, kafkaSchemaRegistryInput{ + Resource: "subject", + Operation: "create", + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "read-only") +} diff --git a/pkg/mcp/builders/kafka/topics.go b/pkg/mcp/builders/kafka/topics.go index 3960d54..d57ba97 100644 --- a/pkg/mcp/builders/kafka/topics.go +++ b/pkg/mcp/builders/kafka/topics.go @@ -18,18 +18,59 @@ import ( "context" "encoding/json" "fmt" + "reflect" "strings" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + "github.com/google/jsonschema-go/jsonschema" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" "github.com/twmb/franz-go/pkg/kadm" ) -// KafkaTopicsToolBuilder implements the ToolBuilder interface for Kafka Topics -// /nolint:revive -type KafkaTopicsToolBuilder struct { +type kafkaTopicsInput struct { + Resource string `json:"resource"` + Operation string `json:"operation"` + Name *string `json:"name,omitempty"` + Partitions *int `json:"partitions,omitempty"` + ReplicationFactor *int `json:"replicationFactor,omitempty"` + Configs map[string]any `json:"configs,omitempty"` + IncludeInternal bool `json:"includeInternal,omitempty"` +} + +const ( + kafkaTopicsResourceDesc = "Resource to operate on. Available resources:\n" + + "- topic: A single Kafka topic for operations on individual topics (create, get, delete)\n" + + "- topics: Collection of Kafka topics for bulk operations (list)" + kafkaTopicsOperationDesc = "Operation to perform. Available operations:\n" + + "- list: List all topics in the Kafka cluster, optionally including internal topics\n" + + "- get: Get detailed configuration for a specific topic\n" + + "- create: Create a new topic with specified partitions, replication factor, and optional configs\n" + + "- delete: Delete an existing topic\n" + + "- metadata: Get metadata for a specific topic" + kafkaTopicsNameDesc = "The name of the Kafka topic to operate on. " + + "Required for 'get', 'create', 'delete', and 'metadata' operations on the 'topic' resource. " + + "Topic names should follow Kafka naming conventions (alphanumeric, dots, underscores, and hyphens)." + kafkaTopicsPartitionsDesc = "The number of partitions for the topic. Required for 'create' operation. " + + "Partitions determine the parallelism and scalability of the topic. " + + "More partitions allow more concurrent consumers and higher throughput." + kafkaTopicsReplicationFactorDesc = "The replication factor for the topic. Required for 'create' operation. " + + "Replication factor determines fault tolerance - it should be at least 2 for production use. " + + "Cannot exceed the number of available brokers in the cluster." + kafkaTopicsConfigsDesc = "Optional configuration parameters for the topic during 'create' operation. " + + "Common configurations include:\n" + + "- retention.ms: How long to retain messages (milliseconds)\n" + + "- compression.type: Compression algorithm (none, gzip, snappy, lz4, zstd)\n" + + "- cleanup.policy: Log cleanup policy (delete, compact, compact,delete)\n" + + "- segment.ms: Time before a new log segment is rolled out\n" + + "- max.message.bytes: Maximum size of a message batch" + kafkaTopicsIncludeInternalDesc = "Whether to include internal Kafka topics in the 'list' operation. " + + "Internal topics are used by Kafka itself (e.g., __consumer_offsets, __transaction_state). " + + "Default: false" +) + +// KafkaTopicsToolBuilder implements the ToolBuilder interface for Kafka topics. +type KafkaTopicsToolBuilder struct { //nolint:revive *builders.BaseToolBuilder } @@ -55,7 +96,7 @@ func NewKafkaTopicsToolBuilder() *KafkaTopicsToolBuilder { } // BuildTools builds the Kafka Topics tool list -func (b *KafkaTopicsToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { +func (b *KafkaTopicsToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]builders.ToolDefinition, error) { // Check features - return empty list if no required features are present if !b.HasAnyRequiredFeature(config.Features) { return nil, nil @@ -67,11 +108,14 @@ func (b *KafkaTopicsToolBuilder) BuildTools(_ context.Context, config builders.T } // Build tools - tool := b.buildKafkaTopicsTool() + tool, err := b.buildKafkaTopicsTool() + if err != nil { + return nil, err + } handler := b.buildKafkaTopicsHandler(config.ReadOnly) - return []server.ServerTool{ - { + return []builders.ToolDefinition{ + builders.ServerTool[kafkaTopicsInput, any]{ Tool: tool, Handler: handler, }, @@ -79,17 +123,11 @@ func (b *KafkaTopicsToolBuilder) BuildTools(_ context.Context, config builders.T } // buildKafkaTopicsTool builds the Kafka Topics MCP tool definition -func (b *KafkaTopicsToolBuilder) buildKafkaTopicsTool() mcp.Tool { - resourceDesc := "Resource to operate on. Available resources:\n" + - "- topic: A single Kafka topic for operations on individual topics (create, get, delete)\n" + - "- topics: Collection of Kafka topics for bulk operations (list)" - - operationDesc := "Operation to perform. Available operations:\n" + - "- list: List all topics in the Kafka cluster, optionally including internal topics\n" + - "- get: Get detailed configuration for a specific topic\n" + - "- create: Create a new topic with specified partitions, replication factor, and optional configs\n" + - "- delete: Delete an existing topic\n" + - "- metadata: Get metadata for a specific topic\n" +func (b *KafkaTopicsToolBuilder) buildKafkaTopicsTool() (*sdk.Tool, error) { + inputSchema, err := buildKafkaTopicsInputSchema() + if err != nil { + return nil, err + } toolDesc := "Unified tool for managing Apache Kafka topics.\n" + "This tool provides access to various Kafka topic operations, including creation, deletion, listing, and configuration retrieval.\n" + @@ -137,72 +175,33 @@ func (b *KafkaTopicsToolBuilder) buildKafkaTopicsTool() mcp.Tool { " name: \"old-topic\"\n\n" + "This tool requires appropriate Kafka permissions for topic management." - return mcp.NewTool("kafka_admin_topics", - mcp.WithDescription(toolDesc), - mcp.WithString("resource", mcp.Required(), - mcp.Description(resourceDesc), - ), - mcp.WithString("operation", mcp.Required(), - mcp.Description(operationDesc), - ), - mcp.WithString("name", - mcp.Description("The name of the Kafka topic to operate on. "+ - "Required for 'get', 'create', 'delete', and 'metadata' operations on the 'topic' resource. "+ - "Topic names should follow Kafka naming conventions (alphanumeric, dots, underscores, and hyphens).")), - mcp.WithNumber("partitions", - mcp.Description("The number of partitions for the topic. Required for 'create' operation. "+ - "Partitions determine the parallelism and scalability of the topic. "+ - "More partitions allow more concurrent consumers and higher throughput.")), - mcp.WithNumber("replicationFactor", - mcp.Description("The replication factor for the topic. Required for 'create' operation. "+ - "Replication factor determines fault tolerance - it should be at least 2 for production use. "+ - "Cannot exceed the number of available brokers in the cluster.")), - mcp.WithObject("configs", - mcp.Description("Optional configuration parameters for the topic during 'create' operation. "+ - "Common configurations include:\n"+ - "- retention.ms: How long to retain messages (milliseconds)\n"+ - "- compression.type: Compression algorithm (none, gzip, snappy, lz4, zstd)\n"+ - "- cleanup.policy: Log cleanup policy (delete, compact, compact,delete)\n"+ - "- segment.ms: Time before a new log segment is rolled out\n"+ - "- max.message.bytes: Maximum size of a message batch")), - mcp.WithBoolean("includeInternal", - mcp.Description("Whether to include internal Kafka topics in the 'list' operation. "+ - "Internal topics are used by Kafka itself (e.g., __consumer_offsets, __transaction_state). "+ - "Default: false")), - ) + return &sdk.Tool{ + Name: "kafka_admin_topics", + Description: toolDesc, + InputSchema: inputSchema, + }, nil } // buildKafkaTopicsHandler builds the Kafka Topics handler function -func (b *KafkaTopicsToolBuilder) buildKafkaTopicsHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - // Get required parameters - resource, err := request.RequireString("resource") - if err != nil { - return b.handleError("get resource", err), nil - } - - operation, err := request.RequireString("operation") - if err != nil { - return b.handleError("get operation", err), nil - } - +func (b *KafkaTopicsToolBuilder) buildKafkaTopicsHandler(readOnly bool) builders.ToolHandlerFunc[kafkaTopicsInput, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input kafkaTopicsInput) (*sdk.CallToolResult, any, error) { // Normalize parameters - resource = strings.ToLower(resource) - operation = strings.ToLower(operation) + resource := strings.ToLower(input.Resource) + operation := strings.ToLower(input.Operation) // Validate write operations in read-only mode if readOnly && (operation == "create" || operation == "delete") { - return mcp.NewToolResultError("Write operations are not allowed in read-only mode"), nil + return nil, nil, fmt.Errorf("write operations are not allowed in read-only mode") } // Get Kafka admin client session := mcpCtx.GetKafkaSession(ctx) if session == nil { - return b.handleError("get Kafka session not found in context", nil), nil + return nil, nil, b.handleError("get Kafka session not found in context", nil) } admin, err := session.GetAdminClient() if err != nil { - return b.handleError("get admin client", err), nil + return nil, nil, b.handleError("get admin client", err) } // Dispatch based on resource and operation @@ -210,25 +209,30 @@ func (b *KafkaTopicsToolBuilder) buildKafkaTopicsHandler(readOnly bool) func(con case "topics": switch operation { case "list": - return b.handleKafkaTopicsList(ctx, admin, request) + result, err := b.handleKafkaTopicsList(ctx, admin, input) + return result, nil, err default: - return mcp.NewToolResultError(fmt.Sprintf("Invalid operation for resource 'topics': %s", operation)), nil + return nil, nil, fmt.Errorf("invalid operation for resource 'topics': %s", operation) } case "topic": switch operation { case "get": - return b.handleKafkaTopicGet(ctx, admin, request) + result, err := b.handleKafkaTopicGet(ctx, admin, input) + return result, nil, err case "create": - return b.handleKafkaTopicCreate(ctx, admin, request) + result, err := b.handleKafkaTopicCreate(ctx, admin, input) + return result, nil, err case "delete": - return b.handleKafkaTopicDelete(ctx, admin, request) + result, err := b.handleKafkaTopicDelete(ctx, admin, input) + return result, nil, err case "metadata": - return b.handleKafkaTopicMetadata(ctx, admin, request) + result, err := b.handleKafkaTopicMetadata(ctx, admin, input) + return result, nil, err default: - return mcp.NewToolResultError(fmt.Sprintf("Invalid operation for resource 'topic': %s", operation)), nil + return nil, nil, fmt.Errorf("invalid operation for resource 'topic': %s", operation) } default: - return mcp.NewToolResultError(fmt.Sprintf("Invalid resource: %s. Available resources: topics, topic", resource)), nil + return nil, nil, fmt.Errorf("invalid resource: %s. available resources: topics, topic", resource) } } } @@ -236,26 +240,42 @@ func (b *KafkaTopicsToolBuilder) buildKafkaTopicsHandler(readOnly bool) func(con // Utility functions // handleError provides unified error handling -func (b *KafkaTopicsToolBuilder) handleError(operation string, err error) *mcp.CallToolResult { - return mcp.NewToolResultError(fmt.Sprintf("Failed to %s: %v", operation, err)) +func (b *KafkaTopicsToolBuilder) handleError(operation string, err error) error { + return fmt.Errorf("failed to %s: %v", operation, err) } // marshalResponse provides unified JSON serialization for responses -func (b *KafkaTopicsToolBuilder) marshalResponse(data interface{}) (*mcp.CallToolResult, error) { +func (b *KafkaTopicsToolBuilder) marshalResponse(data interface{}) (*sdk.CallToolResult, error) { jsonBytes, err := json.Marshal(data) if err != nil { - return b.handleError("marshal response", err), nil + return nil, b.handleError("marshal response", err) + } + return &sdk.CallToolResult{ + Content: []sdk.Content{&sdk.TextContent{Text: string(jsonBytes)}}, + }, nil +} + +func requireString(value *string, key string) (string, error) { + if value == nil { + return "", fmt.Errorf("required argument %q not found", key) } - return mcp.NewToolResultText(string(jsonBytes)), nil + return *value, nil +} + +func requireInt(value *int, key string) (int, error) { + if value == nil { + return 0, fmt.Errorf("required argument %q not found", key) + } + return *value, nil } // handleKafkaTopicsList handles listing all topics -func (b *KafkaTopicsToolBuilder) handleKafkaTopicsList(ctx context.Context, admin *kadm.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - includeInternal := request.GetBool("includeInternal", false) +func (b *KafkaTopicsToolBuilder) handleKafkaTopicsList(ctx context.Context, admin *kadm.Client, input kafkaTopicsInput) (*sdk.CallToolResult, error) { + includeInternal := input.IncludeInternal topics, err := admin.ListTopics(ctx) if err != nil { - return b.handleError("list Kafka topics", err), nil + return nil, b.handleError("list Kafka topics", err) } // Filter out internal topics if not requested @@ -273,95 +293,220 @@ func (b *KafkaTopicsToolBuilder) handleKafkaTopicsList(ctx context.Context, admi } // handleKafkaTopicGet handles getting detailed information about a specific topic -func (b *KafkaTopicsToolBuilder) handleKafkaTopicGet(ctx context.Context, admin *kadm.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - topicName, err := request.RequireString("name") +func (b *KafkaTopicsToolBuilder) handleKafkaTopicGet(ctx context.Context, admin *kadm.Client, input kafkaTopicsInput) (*sdk.CallToolResult, error) { + topicName, err := requireString(input.Name, "name") if err != nil { - return b.handleError("get topic name", err), nil + return nil, b.handleError("get topic name", err) } topics, err := admin.ListTopics(ctx, topicName) if err != nil { - return b.handleError("get Kafka topic", err), nil + return nil, b.handleError("get Kafka topic", err) } return b.marshalResponse(topics) } // handleKafkaTopicCreate handles creating a new topic -func (b *KafkaTopicsToolBuilder) handleKafkaTopicCreate(ctx context.Context, admin *kadm.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - topicName, err := request.RequireString("name") +func (b *KafkaTopicsToolBuilder) handleKafkaTopicCreate(ctx context.Context, admin *kadm.Client, input kafkaTopicsInput) (*sdk.CallToolResult, error) { + topicName, err := requireString(input.Name, "name") if err != nil { - return b.handleError("get topic name", err), nil + return nil, b.handleError("get topic name", err) } - partitionsNum, err := request.RequireInt("partitions") + partitionsNum, err := requireInt(input.Partitions, "partitions") if err != nil { - return b.handleError("get partitions", err), nil + return nil, b.handleError("get partitions", err) } - replicationFactorNum, err := request.RequireInt("replicationFactor") + replicationFactorNum, err := requireInt(input.ReplicationFactor, "replicationFactor") if err != nil { - return b.handleError("get replication factor", err), nil + return nil, b.handleError("get replication factor", err) } - ///nolint:gosec + //nolint:gosec partitions := int32(partitionsNum) - ///nolint:gosec + //nolint:gosec replicationFactor := int16(replicationFactorNum) - // Parse optional configs - var configs map[string]*string - arguments := request.GetArguments() - if configsParam, exists := arguments["configs"]; exists { - if configsMap, ok := configsParam.(map[string]interface{}); ok { - configs = make(map[string]*string) - for key, value := range configsMap { - if strValue, ok := value.(string); ok { - configs[key] = &strValue - } else { - // Convert non-string values to strings - strValue := fmt.Sprintf("%v", value) - configs[key] = &strValue - } - } - } - } + configs := b.buildConfigs(input) // Create topic using the correct CreateTopics API results, err := admin.CreateTopics(ctx, partitions, replicationFactor, configs, topicName) if err != nil { - return b.handleError("create Kafka topic", err), nil + return nil, b.handleError("create Kafka topic", err) } return b.marshalResponse(results) } // handleKafkaTopicDelete handles deleting a topic -func (b *KafkaTopicsToolBuilder) handleKafkaTopicDelete(ctx context.Context, admin *kadm.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - topicName, err := request.RequireString("name") +func (b *KafkaTopicsToolBuilder) handleKafkaTopicDelete(ctx context.Context, admin *kadm.Client, input kafkaTopicsInput) (*sdk.CallToolResult, error) { + topicName, err := requireString(input.Name, "name") if err != nil { - return b.handleError("get topic name", err), nil + return nil, b.handleError("get topic name", err) } results, err := admin.DeleteTopics(ctx, topicName) if err != nil { - return b.handleError("delete Kafka topic", err), nil + return nil, b.handleError("delete Kafka topic", err) } return b.marshalResponse(results) } // handleKafkaTopicMetadata handles getting metadata for a topic -func (b *KafkaTopicsToolBuilder) handleKafkaTopicMetadata(ctx context.Context, admin *kadm.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - topicName, err := request.RequireString("name") +func (b *KafkaTopicsToolBuilder) handleKafkaTopicMetadata(ctx context.Context, admin *kadm.Client, input kafkaTopicsInput) (*sdk.CallToolResult, error) { + topicName, err := requireString(input.Name, "name") if err != nil { - return b.handleError("get topic name", err), nil + return nil, b.handleError("get topic name", err) } metadata, err := admin.Metadata(ctx, topicName) if err != nil { - return b.handleError("get Kafka topic metadata", err), nil + return nil, b.handleError("get Kafka topic metadata", err) } return b.marshalResponse(metadata) } + +func (b *KafkaTopicsToolBuilder) buildConfigs(input kafkaTopicsInput) map[string]*string { + if len(input.Configs) == 0 { + return nil + } + + configs := make(map[string]*string, len(input.Configs)) + for key, value := range input.Configs { + strValue, ok := value.(string) + if !ok { + strValue = fmt.Sprintf("%v", value) + } + configs[key] = &strValue + } + + return configs +} + +func buildKafkaTopicsInputSchema() (*jsonschema.Schema, error) { + schema, err := jsonschema.For[kafkaTopicsInput](nil) + if err != nil { + return nil, fmt.Errorf("input schema: %w", err) + } + if schema.Type != "object" { + return nil, fmt.Errorf("input schema must have type \"object\"") + } + + if schema.Properties == nil { + schema.Properties = map[string]*jsonschema.Schema{} + } + setSchemaDescription(schema, "resource", kafkaTopicsResourceDesc) + setSchemaDescription(schema, "operation", kafkaTopicsOperationDesc) + setSchemaDescription(schema, "name", kafkaTopicsNameDesc) + setSchemaDescription(schema, "partitions", kafkaTopicsPartitionsDesc) + setSchemaDescription(schema, "replicationFactor", kafkaTopicsReplicationFactorDesc) + setSchemaDescription(schema, "configs", kafkaTopicsConfigsDesc) + setSchemaDescription(schema, "includeInternal", kafkaTopicsIncludeInternalDesc) + + normalizeAdditionalProperties(schema) + return schema, nil +} + +func setSchemaDescription(schema *jsonschema.Schema, name, desc string) { + if schema == nil { + return + } + prop, ok := schema.Properties[name] + if !ok || prop == nil { + return + } + prop.Description = desc +} + +func normalizeAdditionalProperties(schema *jsonschema.Schema) { + visited := map[*jsonschema.Schema]bool{} + var walk func(*jsonschema.Schema) + walk = func(s *jsonschema.Schema) { + if s == nil || visited[s] { + return + } + visited[s] = true + + if s.Type == "object" && s.Properties != nil && isFalseSchema(s.AdditionalProperties) { + s.AdditionalProperties = nil + } + + for _, prop := range s.Properties { + walk(prop) + } + for _, prop := range s.PatternProperties { + walk(prop) + } + for _, def := range s.Defs { + walk(def) + } + for _, def := range s.Definitions { + walk(def) + } + if s.AdditionalProperties != nil && !isFalseSchema(s.AdditionalProperties) { + walk(s.AdditionalProperties) + } + if s.Items != nil { + walk(s.Items) + } + for _, item := range s.PrefixItems { + walk(item) + } + if s.AdditionalItems != nil { + walk(s.AdditionalItems) + } + if s.UnevaluatedItems != nil { + walk(s.UnevaluatedItems) + } + if s.UnevaluatedProperties != nil { + walk(s.UnevaluatedProperties) + } + if s.PropertyNames != nil { + walk(s.PropertyNames) + } + if s.Contains != nil { + walk(s.Contains) + } + for _, subschema := range s.AllOf { + walk(subschema) + } + for _, subschema := range s.AnyOf { + walk(subschema) + } + for _, subschema := range s.OneOf { + walk(subschema) + } + if s.Not != nil { + walk(s.Not) + } + if s.If != nil { + walk(s.If) + } + if s.Then != nil { + walk(s.Then) + } + if s.Else != nil { + walk(s.Else) + } + for _, subschema := range s.DependentSchemas { + walk(subschema) + } + } + walk(schema) +} + +func isFalseSchema(schema *jsonschema.Schema) bool { + if schema == nil || schema.Not == nil { + return false + } + if !reflect.ValueOf(*schema.Not).IsZero() { + return false + } + clone := *schema + clone.Not = nil + return reflect.ValueOf(clone).IsZero() +} diff --git a/pkg/mcp/builders/kafka/topics_test.go b/pkg/mcp/builders/kafka/topics_test.go new file mode 100644 index 0000000..312ec98 --- /dev/null +++ b/pkg/mcp/builders/kafka/topics_test.go @@ -0,0 +1,147 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kafka + +import ( + "context" + "testing" + + "github.com/google/jsonschema-go/jsonschema" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestKafkaTopicsToolBuilder(t *testing.T) { + builder := NewKafkaTopicsToolBuilder() + + t.Run("GetName", func(t *testing.T) { + assert.Equal(t, "kafka_topics", builder.GetName()) + }) + + t.Run("GetRequiredFeatures", func(t *testing.T) { + features := builder.GetRequiredFeatures() + assert.NotEmpty(t, features) + assert.Contains(t, features, "kafka-admin") + }) + + t.Run("BuildTools_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"kafka-admin"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "kafka_admin_topics", tools[0].Definition().Name) + assert.NotNil(t, tools[0]) + }) + + t.Run("BuildTools_ReadOnly", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: true, + Features: []string{"kafka-admin"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "kafka_admin_topics", tools[0].Definition().Name) + }) + + t.Run("BuildTools_NoFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"unrelated-feature"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 0) + }) + + t.Run("Validate_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"kafka-admin"}, + } + + err := builder.Validate(config) + assert.NoError(t, err) + }) + + t.Run("Validate_MissingFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"unrelated-feature"}, + } + + err := builder.Validate(config) + assert.Error(t, err) + }) +} + +func TestKafkaTopicsToolSchema(t *testing.T) { + builder := NewKafkaTopicsToolBuilder() + tool, err := builder.buildKafkaTopicsTool() + require.NoError(t, err) + assert.Equal(t, "kafka_admin_topics", tool.Name) + + schema, ok := tool.InputSchema.(*jsonschema.Schema) + require.True(t, ok) + require.NotNil(t, schema.Properties) + + expectedRequired := []string{"resource", "operation"} + assert.ElementsMatch(t, expectedRequired, schema.Required) + + expectedProps := []string{ + "resource", + "operation", + "name", + "partitions", + "replicationFactor", + "configs", + "includeInternal", + } + assert.ElementsMatch(t, expectedProps, mapStringKeys(schema.Properties)) + + resourceSchema := schema.Properties["resource"] + require.NotNil(t, resourceSchema) + assert.Equal(t, kafkaTopicsResourceDesc, resourceSchema.Description) + + operationSchema := schema.Properties["operation"] + require.NotNil(t, operationSchema) + assert.Equal(t, kafkaTopicsOperationDesc, operationSchema.Description) +} + +func TestKafkaTopicsToolBuilder_ReadOnlyRejectsWrite(t *testing.T) { + builder := NewKafkaTopicsToolBuilder() + handler := builder.buildKafkaTopicsHandler(true) + + _, _, err := handler(context.Background(), nil, kafkaTopicsInput{ + Resource: "topic", + Operation: "create", + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "read-only") +} + +func mapStringKeys[V any](m map[string]V) []string { + keys := make([]string, 0, len(m)) + for key := range m { + keys = append(keys, key) + } + return keys +} diff --git a/pkg/mcp/builders/pulsar/admin_tools_test.go b/pkg/mcp/builders/pulsar/admin_tools_test.go new file mode 100644 index 0000000..83051a7 --- /dev/null +++ b/pkg/mcp/builders/pulsar/admin_tools_test.go @@ -0,0 +1,370 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "testing" + + "github.com/google/jsonschema-go/jsonschema" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPulsarAdminBrokersToolBuilder(t *testing.T) { + builder := NewPulsarAdminBrokersToolBuilder() + + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"pulsar-admin-brokers"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "pulsar_admin_brokers", tools[0].Definition().Name) + + config.Features = []string{"unrelated-feature"} + tools, err = builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Empty(t, tools) +} + +func TestPulsarAdminBrokersToolSchema(t *testing.T) { + builder := NewPulsarAdminBrokersToolBuilder() + tool, err := builder.buildPulsarAdminBrokersTool() + require.NoError(t, err) + assert.Equal(t, "pulsar_admin_brokers", tool.Name) + + schema, ok := tool.InputSchema.(*jsonschema.Schema) + require.True(t, ok) + require.NotNil(t, schema.Properties) + + expectedRequired := []string{"resource", "operation"} + assert.ElementsMatch(t, expectedRequired, schema.Required) + + expectedProps := []string{ + "resource", + "operation", + "clusterName", + "brokerUrl", + "configType", + "configName", + "configValue", + } + assert.ElementsMatch(t, expectedProps, mapStringKeys(schema.Properties)) + + resourceSchema := schema.Properties["resource"] + require.NotNil(t, resourceSchema) + assert.Equal(t, pulsarAdminBrokersResourceDesc, resourceSchema.Description) +} + +func TestPulsarAdminBrokersToolBuilder_RequiresSession(t *testing.T) { + builder := NewPulsarAdminBrokersToolBuilder() + handler := builder.buildPulsarAdminBrokersHandler(true) + + _, _, err := handler(context.Background(), nil, pulsarAdminBrokersInput{ + Resource: "config", + Operation: "update", + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "pulsar session") +} + +func TestPulsarAdminClusterToolBuilder(t *testing.T) { + builder := NewPulsarAdminClusterToolBuilder() + + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"pulsar-admin-clusters"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "pulsar_admin_cluster", tools[0].Definition().Name) +} + +func TestPulsarAdminClusterToolSchema(t *testing.T) { + builder := NewPulsarAdminClusterToolBuilder() + tool, err := builder.buildClusterTool() + require.NoError(t, err) + assert.Equal(t, "pulsar_admin_cluster", tool.Name) + + schema, ok := tool.InputSchema.(*jsonschema.Schema) + require.True(t, ok) + require.NotNil(t, schema.Properties) + + expectedRequired := []string{"resource", "operation"} + assert.ElementsMatch(t, expectedRequired, schema.Required) + + expectedProps := []string{ + "resource", + "operation", + "cluster_name", + "domain_name", + "service_url", + "service_url_tls", + "broker_service_url", + "broker_service_url_tls", + "peer_cluster_names", + "brokers", + } + assert.ElementsMatch(t, expectedProps, mapStringKeys(schema.Properties)) +} + +func TestPulsarAdminClusterToolBuilder_RequiresSession(t *testing.T) { + builder := NewPulsarAdminClusterToolBuilder() + handler := builder.buildClusterHandler(true) + + _, _, err := handler(context.Background(), nil, pulsarAdminClusterInput{ + Resource: "cluster", + Operation: "create", + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "pulsar session") +} + +func TestPulsarAdminSourcesToolBuilder(t *testing.T) { + builder := NewPulsarAdminSourcesToolBuilder() + + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"pulsar-admin-sources"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "pulsar_admin_sources", tools[0].Definition().Name) +} + +func TestPulsarAdminSourcesToolSchema(t *testing.T) { + builder := NewPulsarAdminSourcesToolBuilder() + tool, err := builder.buildSourcesTool() + require.NoError(t, err) + assert.Equal(t, "pulsar_admin_sources", tool.Name) + + schema, ok := tool.InputSchema.(*jsonschema.Schema) + require.True(t, ok) + require.NotNil(t, schema.Properties) + + expectedRequired := []string{"operation"} + assert.ElementsMatch(t, expectedRequired, schema.Required) + + expectedProps := []string{ + "operation", + "tenant", + "namespace", + "name", + "archive", + "source-type", + "destination-topic-name", + "deserialization-classname", + "schema-type", + "classname", + "processing-guarantees", + "parallelism", + "source-config", + } + assert.ElementsMatch(t, expectedProps, mapStringKeys(schema.Properties)) +} + +func TestPulsarAdminSourcesToolBuilder_ReadOnlyRejectsCreate(t *testing.T) { + builder := NewPulsarAdminSourcesToolBuilder() + handler := builder.buildSourcesHandler(true) + + _, _, err := handler(context.Background(), nil, pulsarAdminSourcesInput{ + Operation: "create", + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "read-only") +} + +func TestPulsarAdminSinksToolBuilder(t *testing.T) { + builder := NewPulsarAdminSinksToolBuilder() + + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"pulsar-admin-sinks"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "pulsar_admin_sinks", tools[0].Definition().Name) +} + +func TestPulsarAdminSinksToolSchema(t *testing.T) { + builder := NewPulsarAdminSinksToolBuilder() + tool, err := builder.buildSinksTool() + require.NoError(t, err) + assert.Equal(t, "pulsar_admin_sinks", tool.Name) + + schema, ok := tool.InputSchema.(*jsonschema.Schema) + require.True(t, ok) + require.NotNil(t, schema.Properties) + + expectedRequired := []string{"operation"} + assert.ElementsMatch(t, expectedRequired, schema.Required) + + expectedProps := []string{ + "operation", + "tenant", + "namespace", + "name", + "archive", + "sink-type", + "inputs", + "topics-pattern", + "subs-name", + "parallelism", + "sink-config", + } + assert.ElementsMatch(t, expectedProps, mapStringKeys(schema.Properties)) +} + +func TestPulsarAdminSinksToolBuilder_ReadOnlyRejectsCreate(t *testing.T) { + builder := NewPulsarAdminSinksToolBuilder() + handler := builder.buildSinksHandler(true) + + _, _, err := handler(context.Background(), nil, pulsarAdminSinksInput{ + Operation: "create", + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "read-only") +} + +func TestPulsarAdminPackagesToolBuilder(t *testing.T) { + builder := NewPulsarAdminPackagesToolBuilder() + + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"pulsar-admin-packages"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "pulsar_admin_package", tools[0].Definition().Name) + + config.Features = []string{"unrelated-feature"} + tools, err = builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Empty(t, tools) +} + +func TestPulsarAdminPackagesToolSchema(t *testing.T) { + builder := NewPulsarAdminPackagesToolBuilder() + tool, err := builder.buildPackagesTool() + require.NoError(t, err) + assert.Equal(t, "pulsar_admin_package", tool.Name) + + schema, ok := tool.InputSchema.(*jsonschema.Schema) + require.True(t, ok) + require.NotNil(t, schema.Properties) + + expectedRequired := []string{"resource", "operation"} + assert.ElementsMatch(t, expectedRequired, schema.Required) + + expectedProps := []string{ + "resource", + "operation", + "packageName", + "namespace", + "type", + "description", + "contact", + "path", + "properties", + } + assert.ElementsMatch(t, expectedProps, mapStringKeys(schema.Properties)) + + resourceSchema := schema.Properties["resource"] + require.NotNil(t, resourceSchema) + assert.Equal(t, pulsarAdminPackagesResourceDesc, resourceSchema.Description) +} + +func TestPulsarAdminPackagesToolBuilder_ReadOnlyRejectsUpload(t *testing.T) { + builder := NewPulsarAdminPackagesToolBuilder() + handler := builder.buildPackagesHandler(true) + + _, _, err := handler(context.Background(), nil, pulsarAdminPackagesInput{ + Resource: "package", + Operation: "upload", + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "read-only") +} + +func TestPulsarAdminSubscriptionToolBuilder(t *testing.T) { + builder := NewPulsarAdminSubscriptionToolBuilder() + + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"pulsar-admin-subscriptions"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "pulsar_admin_subscription", tools[0].Definition().Name) +} + +func TestPulsarAdminSubscriptionToolSchema(t *testing.T) { + builder := NewPulsarAdminSubscriptionToolBuilder() + tool, err := builder.buildSubscriptionTool() + require.NoError(t, err) + assert.Equal(t, "pulsar_admin_subscription", tool.Name) + + schema, ok := tool.InputSchema.(*jsonschema.Schema) + require.True(t, ok) + require.NotNil(t, schema.Properties) + + expectedRequired := []string{"resource", "operation", "topic"} + assert.ElementsMatch(t, expectedRequired, schema.Required) + + expectedProps := []string{ + "resource", + "operation", + "topic", + "subscription", + "messageId", + "count", + "expireTimeInSeconds", + "force", + } + assert.ElementsMatch(t, expectedProps, mapStringKeys(schema.Properties)) +} + +func TestPulsarAdminSubscriptionToolBuilder_ReadOnlyRejectsDelete(t *testing.T) { + builder := NewPulsarAdminSubscriptionToolBuilder() + handler := builder.buildSubscriptionHandler(true) + + _, _, err := handler(context.Background(), nil, pulsarAdminSubscriptionInput{ + Resource: "subscription", + Operation: "delete", + Topic: "persistent://public/default/test", + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "read-only") +} diff --git a/pkg/mcp/builders/pulsar/brokers.go b/pkg/mcp/builders/pulsar/brokers.go index 6a81fd3..6de8205 100644 --- a/pkg/mcp/builders/pulsar/brokers.go +++ b/pkg/mcp/builders/pulsar/brokers.go @@ -20,13 +20,51 @@ import ( "encoding/json" "fmt" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + "github.com/google/jsonschema-go/jsonschema" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/pulsarctl/pkg/cmdutils" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" ) +type pulsarAdminBrokersInput struct { + Resource string `json:"resource"` + Operation string `json:"operation"` + ClusterName *string `json:"clusterName,omitempty"` + BrokerURL *string `json:"brokerUrl,omitempty"` + ConfigType *string `json:"configType,omitempty"` + ConfigName *string `json:"configName,omitempty"` + ConfigValue *string `json:"configValue,omitempty"` +} + +const ( + pulsarAdminBrokersResourceDesc = "Type of resource to access, available options:\n" + + "- brokers: Manage broker listings\n" + + "- health: Check broker health status\n" + + "- config: Manage broker configurations\n" + + "- namespaces: Manage namespaces owned by a broker" + pulsarAdminBrokersOperationDesc = "Operation to perform, available options:\n" + + "- list: List resources (used with brokers)\n" + + "- get: Retrieve resource information (used with health, config, namespaces)\n" + + "- update: Update a resource (used with config)\n" + + "- delete: Delete a resource (used with config)" + pulsarAdminBrokersClusterNameDesc = "Pulsar cluster name, required for these operations:\n" + + "- When resource=brokers, operation=list\n" + + "- When resource=namespaces, operation=get" + pulsarAdminBrokersBrokerURLDesc = "Broker URL, such as '127.0.0.1:8080', required for these operations:\n" + + "- When resource=namespaces, operation=get" + pulsarAdminBrokersConfigTypeDesc = "Configuration type, required when resource=config, operation=get, available options:\n" + + "- dynamic: Get list of dynamically modifiable configuration names\n" + + "- runtime: Get all runtime configurations (including static and dynamic configs)\n" + + "- internal: Get internal configuration information\n" + + "- all_dynamic: Get all dynamic configurations and their current values" + pulsarAdminBrokersConfigNameDesc = "Configuration parameter name, required for these operations:\n" + + "- When resource=config, operation=update\n" + + "- When resource=config, operation=delete" + pulsarAdminBrokersConfigValueDesc = "Configuration parameter value, required for these operations:\n" + + "- When resource=config, operation=update" +) + // PulsarAdminBrokersToolBuilder implements the ToolBuilder interface for Pulsar admin brokers // /nolint:revive type PulsarAdminBrokersToolBuilder struct { @@ -56,7 +94,7 @@ func NewPulsarAdminBrokersToolBuilder() *PulsarAdminBrokersToolBuilder { } // BuildTools builds the Pulsar admin brokers tool list -func (b *PulsarAdminBrokersToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { +func (b *PulsarAdminBrokersToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]builders.ToolDefinition, error) { // Check features - return empty list if no required features are present if !b.HasAnyRequiredFeature(config.Features) { return nil, nil @@ -68,11 +106,14 @@ func (b *PulsarAdminBrokersToolBuilder) BuildTools(_ context.Context, config bui } // Build tools - tool := b.buildPulsarAdminBrokersTool() + tool, err := b.buildPulsarAdminBrokersTool() + if err != nil { + return nil, err + } handler := b.buildPulsarAdminBrokersHandler(config.ReadOnly) - return []server.ServerTool{ - { + return []builders.ToolDefinition{ + builders.ServerTool[pulsarAdminBrokersInput, any]{ Tool: tool, Handler: handler, }, @@ -80,110 +121,80 @@ func (b *PulsarAdminBrokersToolBuilder) BuildTools(_ context.Context, config bui } // buildPulsarAdminBrokersTool builds the Pulsar admin brokers MCP tool definition -func (b *PulsarAdminBrokersToolBuilder) buildPulsarAdminBrokersTool() mcp.Tool { - return mcp.NewTool("pulsar_admin_brokers", - mcp.WithDescription("Unified tool for managing Apache Pulsar broker resources. This tool integrates multiple broker management functions, including:\n"+ - "1. List active brokers in a cluster (resource=brokers, operation=list)\n"+ - "2. Check broker health status (resource=health, operation=get)\n"+ - "3. Manage broker configurations (resource=config, operation=get/update/delete)\n"+ - "4. View namespaces owned by a broker (resource=namespaces, operation=get)\n\n"+ - "Different functions are accessed by combining resource and operation parameters, with other parameters used selectively based on operation type.\n"+ - "Example: {\"resource\": \"config\", \"operation\": \"get\", \"configType\": \"dynamic\"} retrieves all dynamic configuration names.\n"+ - "This tool requires Pulsar super-user permissions."), - mcp.WithString("resource", mcp.Required(), - mcp.Description("Type of resource to access, available options:\n"+ - "- brokers: Manage broker listings\n"+ - "- health: Check broker health status\n"+ - "- config: Manage broker configurations\n"+ - "- namespaces: Manage namespaces owned by a broker"), - ), - mcp.WithString("operation", mcp.Required(), - mcp.Description("Operation to perform, available options:\n"+ - "- list: List resources (used with brokers)\n"+ - "- get: Retrieve resource information (used with health, config, namespaces)\n"+ - "- update: Update a resource (used with config)\n"+ - "- delete: Delete a resource (used with config)"), - ), - mcp.WithString("clusterName", - mcp.Description("Pulsar cluster name, required for these operations:\n"+ - "- When resource=brokers, operation=list\n"+ - "- When resource=namespaces, operation=get"), - ), - mcp.WithString("brokerUrl", - mcp.Description("Broker URL, such as '127.0.0.1:8080', required for these operations:\n"+ - "- When resource=namespaces, operation=get"), - ), - mcp.WithString("configType", - mcp.Description("Configuration type, required when resource=config, operation=get, available options:\n"+ - "- dynamic: Get list of dynamically modifiable configuration names\n"+ - "- runtime: Get all runtime configurations (including static and dynamic configs)\n"+ - "- internal: Get internal configuration information\n"+ - "- all_dynamic: Get all dynamic configurations and their current values"), - ), - mcp.WithString("configName", - mcp.Description("Configuration parameter name, required for these operations:\n"+ - "- When resource=config, operation=update\n"+ - "- When resource=config, operation=delete"), - ), - mcp.WithString("configValue", - mcp.Description("Configuration parameter value, required for these operations:\n"+ - "- When resource=config, operation=update"), - ), - ) +func (b *PulsarAdminBrokersToolBuilder) buildPulsarAdminBrokersTool() (*sdk.Tool, error) { + inputSchema, err := buildPulsarAdminBrokersInputSchema() + if err != nil { + return nil, err + } + + toolDesc := "Unified tool for managing Apache Pulsar broker resources. This tool integrates multiple broker management functions, including:\n" + + "1. List active brokers in a cluster (resource=brokers, operation=list)\n" + + "2. Check broker health status (resource=health, operation=get)\n" + + "3. Manage broker configurations (resource=config, operation=get/update/delete)\n" + + "4. View namespaces owned by a broker (resource=namespaces, operation=get)\n\n" + + "Different functions are accessed by combining resource and operation parameters, with other parameters used selectively based on operation type.\n" + + "Example: {\"resource\": \"config\", \"operation\": \"get\", \"configType\": \"dynamic\"} retrieves all dynamic configuration names.\n" + + "This tool requires Pulsar super-user permissions." + + return &sdk.Tool{ + Name: "pulsar_admin_brokers", + Description: toolDesc, + InputSchema: inputSchema, + }, nil } // buildPulsarAdminBrokersHandler builds the Pulsar admin brokers handler function -func (b *PulsarAdminBrokersToolBuilder) buildPulsarAdminBrokersHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminBrokersToolBuilder) buildPulsarAdminBrokersHandler(readOnly bool) builders.ToolHandlerFunc[pulsarAdminBrokersInput, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input pulsarAdminBrokersInput) (*sdk.CallToolResult, any, error) { // Get Pulsar session from context session := mcpCtx.GetPulsarSession(ctx) if session == nil { - return mcp.NewToolResultError("Pulsar session not found in context"), nil + return nil, nil, fmt.Errorf("pulsar session not found in context") } // Get admin client client, err := session.GetAdminClient() if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get admin client: %v", err)), nil + return nil, nil, fmt.Errorf("failed to get admin client: %v", err) } // Get required parameters - resource, err := request.RequireString("resource") - if err != nil { - return mcp.NewToolResultError("Missing required resource parameter. " + - "Please specify one of: brokers, health, config, namespaces."), nil + resource := input.Resource + if resource == "" { + return nil, nil, fmt.Errorf("missing required resource parameter. please specify one of: brokers, health, config, namespaces") } - operation, err := request.RequireString("operation") - if err != nil { - return mcp.NewToolResultError("Missing required operation parameter. " + - "Please specify one of: list, get, update, delete based on the resource type."), nil + operation := input.Operation + if operation == "" { + return nil, nil, fmt.Errorf("missing required operation parameter. please specify one of: list, get, update, delete based on the resource type") } // Validate if the parameter combination is valid validCombination, errMsg := b.validateResourceOperation(resource, operation) if !validCombination { - return mcp.NewToolResultError(errMsg), nil + return nil, nil, fmt.Errorf("%s", errMsg) } // Process request based on resource type switch resource { case "brokers": - return b.handleBrokersResource(client, operation, request) + result, err := b.handleBrokersResource(client, operation, input) + return result, nil, err case "health": - return b.handleHealthResource(client, operation, request) + result, err := b.handleHealthResource(client, operation) + return result, nil, err case "config": // Check write operation permissions if (operation == "update" || operation == "delete") && readOnly { - return mcp.NewToolResultError("Configuration update/delete operations not allowed in read-only mode. " + - "Please contact your administrator if you need to modify broker configurations."), nil + return nil, nil, fmt.Errorf("configuration update/delete operations not allowed in read-only mode. please contact your administrator if you need to modify broker configurations") } - return b.handleConfigResource(client, operation, request) + result, err := b.handleConfigResource(client, operation, input) + return result, nil, err case "namespaces": - return b.handleNamespacesResource(client, operation, request) + result, err := b.handleNamespacesResource(client, operation, input) + return result, nil, err default: - return mcp.NewToolResultError(fmt.Sprintf("Unsupported resource: %s. "+ - "Please use one of: brokers, health, config, namespaces.", resource)), nil + return nil, nil, fmt.Errorf("unsupported resource: %s. please use one of: brokers, health, config, namespaces", resource) } } } @@ -213,58 +224,47 @@ func (b *PulsarAdminBrokersToolBuilder) validateResourceOperation(resource, oper } // handleBrokersResource handles brokers resource -func (b *PulsarAdminBrokersToolBuilder) handleBrokersResource(client cmdutils.Client, operation string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminBrokersToolBuilder) handleBrokersResource(client cmdutils.Client, operation string, input pulsarAdminBrokersInput) (*sdk.CallToolResult, error) { switch operation { case "list": - clusterName, err := request.RequireString("clusterName") + clusterName, err := requireString(input.ClusterName, "clusterName") if err != nil { - return mcp.NewToolResultError("Missing required parameter 'clusterName'. " + - "Please provide the name of the Pulsar cluster to list brokers for."), nil + return nil, fmt.Errorf("missing required parameter 'clusterName'. please provide the name of the Pulsar cluster to list brokers for") } brokers, err := client.Brokers().GetActiveBrokers(clusterName) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get active brokers: %v. "+ - "Please verify the cluster name and ensure the Pulsar service is running.", err)), nil + return nil, fmt.Errorf("failed to get active brokers: %v. please verify the cluster name and ensure the Pulsar service is running", err) } - brokersJSON, err := json.Marshal(brokers) - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to serialize brokers list: %v", err)), nil - } - - return mcp.NewToolResultText(string(brokersJSON)), nil + return b.marshalResponse(brokers) default: - return mcp.NewToolResultError(fmt.Sprintf("Unsupported operation '%s' for brokers resource. "+ - "The only supported operation is 'list'.", operation)), nil + return nil, fmt.Errorf("unsupported operation '%s' for brokers resource. the only supported operation is 'list'", operation) } } // handleHealthResource handles health resource -func (b *PulsarAdminBrokersToolBuilder) handleHealthResource(client cmdutils.Client, operation string, _ mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminBrokersToolBuilder) handleHealthResource(client cmdutils.Client, operation string) (*sdk.CallToolResult, error) { switch operation { case "get": //nolint:staticcheck err := client.Brokers().HealthCheck() if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Broker health check failed: %v. "+ - "The broker might be down or experiencing issues.", err)), nil + return nil, fmt.Errorf("broker health check failed: %v. the broker might be down or experiencing issues", err) } - return mcp.NewToolResultText("ok"), nil + return textResult("ok"), nil default: - return mcp.NewToolResultError(fmt.Sprintf("Unsupported operation '%s' for health resource. "+ - "The only supported operation is 'get'.", operation)), nil + return nil, fmt.Errorf("unsupported operation '%s' for health resource. the only supported operation is 'get'", operation) } } // handleConfigResource handles config resource -func (b *PulsarAdminBrokersToolBuilder) handleConfigResource(client cmdutils.Client, operation string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminBrokersToolBuilder) handleConfigResource(client cmdutils.Client, operation string, input pulsarAdminBrokersInput) (*sdk.CallToolResult, error) { switch operation { case "get": - configType, err := request.RequireString("configType") + configType, err := requireString(input.ConfigType, "configType") if err != nil { - return mcp.NewToolResultError("Missing required parameter 'configType'. " + - "Please specify one of: dynamic, runtime, internal, all_dynamic."), nil + return nil, fmt.Errorf("missing required parameter 'configType'. please specify one of: dynamic, runtime, internal, all_dynamic") } var result interface{} @@ -280,95 +280,108 @@ func (b *PulsarAdminBrokersToolBuilder) handleConfigResource(client cmdutils.Cli case "all_dynamic": result, fetchErr = client.Brokers().GetAllDynamicConfigurations() default: - return mcp.NewToolResultError(fmt.Sprintf("Invalid config type: '%s'. "+ - "Valid types are: dynamic, runtime, internal, all_dynamic.", configType)), nil + return nil, fmt.Errorf("invalid config type: '%s'. valid types are: dynamic, runtime, internal, all_dynamic", configType) } if fetchErr != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get %s configuration: %v", configType, fetchErr)), nil + return nil, fmt.Errorf("failed to get %s configuration: %v", configType, fetchErr) } - resultJSON, err := json.Marshal(result) - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to serialize configuration: %v", err)), nil - } - - return mcp.NewToolResultText(string(resultJSON)), nil + return b.marshalResponse(result) case "update": - configName, err := request.RequireString("configName") + configName, err := requireString(input.ConfigName, "configName") if err != nil { - return mcp.NewToolResultError("Missing required parameter 'configName'. " + - "Please provide the name of the configuration parameter to update."), nil + return nil, fmt.Errorf("missing required parameter 'configName'. please provide the name of the configuration parameter to update") } - configValue, err := request.RequireString("configValue") + configValue, err := requireString(input.ConfigValue, "configValue") if err != nil { - return mcp.NewToolResultError("Missing required parameter 'configValue'. " + - "Please provide the new value for the configuration parameter."), nil + return nil, fmt.Errorf("missing required parameter 'configValue'. please provide the new value for the configuration parameter") } err = client.Brokers().UpdateDynamicConfiguration(configName, configValue) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to update configuration: %v. "+ - "Please verify the configuration name is valid and the value is of the correct type.", err)), nil + return nil, fmt.Errorf("failed to update configuration: %v. please verify the configuration name is valid and the value is of the correct type", err) } - return mcp.NewToolResultText(fmt.Sprintf("Dynamic configuration '%s' updated successfully to '%s'", - configName, configValue)), nil + return textResult(fmt.Sprintf("Dynamic configuration '%s' updated successfully to '%s'", configName, configValue)), nil case "delete": - configName, err := request.RequireString("configName") + configName, err := requireString(input.ConfigName, "configName") if err != nil { - return mcp.NewToolResultError("Missing required parameter 'configName'. " + - "Please provide the name of the configuration parameter to delete."), nil + return nil, fmt.Errorf("missing required parameter 'configName'. please provide the name of the configuration parameter to delete") } err = client.Brokers().DeleteDynamicConfiguration(configName) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to delete configuration: %v. "+ - "Please verify the configuration name is valid and exists.", err)), nil + return nil, fmt.Errorf("failed to delete configuration: %v. please verify the configuration name is valid and exists", err) } - return mcp.NewToolResultText(fmt.Sprintf("Dynamic configuration '%s' deleted successfully", configName)), nil + return textResult(fmt.Sprintf("Dynamic configuration '%s' deleted successfully", configName)), nil default: - return mcp.NewToolResultError(fmt.Sprintf("Unsupported operation '%s' for config resource. "+ - "Supported operations are: get, update, delete.", operation)), nil + return nil, fmt.Errorf("unsupported operation '%s' for config resource. supported operations are: get, update, delete", operation) } } // handleNamespacesResource handles namespaces resource -func (b *PulsarAdminBrokersToolBuilder) handleNamespacesResource(client cmdutils.Client, operation string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminBrokersToolBuilder) handleNamespacesResource(client cmdutils.Client, operation string, input pulsarAdminBrokersInput) (*sdk.CallToolResult, error) { switch operation { case "get": - clusterName, err := request.RequireString("clusterName") + clusterName, err := requireString(input.ClusterName, "clusterName") if err != nil { - return mcp.NewToolResultError("Missing required parameter 'clusterName'. " + - "Please provide the name of the Pulsar cluster."), nil + return nil, fmt.Errorf("missing required parameter 'clusterName'. please provide the name of the Pulsar cluster") } - brokerURL, err := request.RequireString("brokerUrl") + brokerURL, err := requireString(input.BrokerURL, "brokerUrl") if err != nil { - return mcp.NewToolResultError("Missing required parameter 'brokerUrl'. " + - "Please provide the URL of the broker (e.g., '127.0.0.1:8080')."), nil + return nil, fmt.Errorf("missing required parameter 'brokerUrl'. please provide the URL of the broker (e.g., '127.0.0.1:8080')") } namespaces, err := client.Brokers().GetOwnedNamespaces(clusterName, brokerURL) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get owned namespaces: %v. "+ - "Please verify the cluster name and broker URL are correct.", err)), nil - } - - namespacesJSON, err := json.Marshal(namespaces) - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to serialize namespaces: %v", err)), nil + return nil, fmt.Errorf("failed to get owned namespaces: %v. please verify the cluster name and broker URL are correct", err) } - return mcp.NewToolResultText(string(namespacesJSON)), nil + return b.marshalResponse(namespaces) default: - return mcp.NewToolResultError(fmt.Sprintf("Unsupported operation '%s' for namespaces resource. "+ - "The only supported operation is 'get'.", operation)), nil + return nil, fmt.Errorf("unsupported operation '%s' for namespaces resource. the only supported operation is 'get'", operation) } } + +func (b *PulsarAdminBrokersToolBuilder) marshalResponse(data interface{}) (*sdk.CallToolResult, error) { + jsonBytes, err := json.Marshal(data) + if err != nil { + return nil, fmt.Errorf("failed to marshal response: %v", err) + } + return &sdk.CallToolResult{ + Content: []sdk.Content{&sdk.TextContent{Text: string(jsonBytes)}}, + }, nil +} + +func buildPulsarAdminBrokersInputSchema() (*jsonschema.Schema, error) { + schema, err := jsonschema.For[pulsarAdminBrokersInput](nil) + if err != nil { + return nil, fmt.Errorf("input schema: %w", err) + } + if schema.Type != "object" { + return nil, fmt.Errorf("input schema must have type \"object\"") + } + + if schema.Properties == nil { + schema.Properties = map[string]*jsonschema.Schema{} + } + + setSchemaDescription(schema, "resource", pulsarAdminBrokersResourceDesc) + setSchemaDescription(schema, "operation", pulsarAdminBrokersOperationDesc) + setSchemaDescription(schema, "clusterName", pulsarAdminBrokersClusterNameDesc) + setSchemaDescription(schema, "brokerUrl", pulsarAdminBrokersBrokerURLDesc) + setSchemaDescription(schema, "configType", pulsarAdminBrokersConfigTypeDesc) + setSchemaDescription(schema, "configName", pulsarAdminBrokersConfigNameDesc) + setSchemaDescription(schema, "configValue", pulsarAdminBrokersConfigValueDesc) + + normalizeAdditionalProperties(schema) + return schema, nil +} diff --git a/pkg/mcp/builders/pulsar/brokers_legacy.go b/pkg/mcp/builders/pulsar/brokers_legacy.go new file mode 100644 index 0000000..7837ace --- /dev/null +++ b/pkg/mcp/builders/pulsar/brokers_legacy.go @@ -0,0 +1,122 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" +) + +// PulsarAdminBrokersLegacyToolBuilder implements the legacy ToolBuilder interface for Pulsar admin brokers. +// /nolint:revive +type PulsarAdminBrokersLegacyToolBuilder struct { + *builders.BaseToolBuilder +} + +// NewPulsarAdminBrokersLegacyToolBuilder creates a new Pulsar admin brokers legacy tool builder instance. +func NewPulsarAdminBrokersLegacyToolBuilder() *PulsarAdminBrokersLegacyToolBuilder { + metadata := builders.ToolMetadata{ + Name: "pulsar_admin_brokers", + Version: "1.0.0", + Description: "Pulsar admin brokers management tools", + Category: "pulsar_admin", + Tags: []string{"pulsar", "admin", "brokers"}, + } + + features := []string{ + "pulsar-admin-brokers", + "all", + "all-pulsar", + "pulsar-admin", + } + + return &PulsarAdminBrokersLegacyToolBuilder{ + BaseToolBuilder: builders.NewBaseToolBuilder(metadata, features), + } +} + +// BuildTools builds the Pulsar admin brokers legacy tool list. +func (b *PulsarAdminBrokersLegacyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { + if !b.HasAnyRequiredFeature(config.Features) { + return nil, nil + } + + if err := b.Validate(config); err != nil { + return nil, err + } + + tool, err := b.buildPulsarAdminBrokersTool() + if err != nil { + return nil, err + } + handler := b.buildPulsarAdminBrokersHandler(config.ReadOnly) + + return []server.ServerTool{ + { + Tool: tool, + Handler: handler, + }, + }, nil +} + +func (b *PulsarAdminBrokersLegacyToolBuilder) buildPulsarAdminBrokersTool() (mcp.Tool, error) { + inputSchema, err := buildPulsarAdminBrokersInputSchema() + if err != nil { + return mcp.Tool{}, err + } + + schemaJSON, err := json.Marshal(inputSchema) + if err != nil { + return mcp.Tool{}, fmt.Errorf("marshal input schema: %w", err) + } + + toolDesc := "Unified tool for managing Apache Pulsar broker resources. This tool integrates multiple broker management functions, including:\n" + + "1. List active brokers in a cluster (resource=brokers, operation=list)\n" + + "2. Check broker health status (resource=health, operation=get)\n" + + "3. Manage broker configurations (resource=config, operation=get/update/delete)\n" + + "4. View namespaces owned by a broker (resource=namespaces, operation=get)\n\n" + + "Different functions are accessed by combining resource and operation parameters, with other parameters used selectively based on operation type.\n" + + "Example: {\"resource\": \"config\", \"operation\": \"get\", \"configType\": \"dynamic\"} retrieves all dynamic configuration names.\n" + + "This tool requires Pulsar super-user permissions." + + return mcp.Tool{ + Name: "pulsar_admin_brokers", + Description: toolDesc, + RawInputSchema: schemaJSON, + }, nil +} + +func (b *PulsarAdminBrokersLegacyToolBuilder) buildPulsarAdminBrokersHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { + sdkBuilder := NewPulsarAdminBrokersToolBuilder() + sdkHandler := sdkBuilder.buildPulsarAdminBrokersHandler(readOnly) + + return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + var input pulsarAdminBrokersInput + if err := request.BindArguments(&input); err != nil { + return mcp.NewToolResultError(fmt.Sprintf("failed to parse arguments: %v", err)), nil + } + + result, _, err := sdkHandler(ctx, nil, input) + if err != nil { + return mcp.NewToolResultError(err.Error()), nil + } + return legacyToolResultFromSDK(result), nil + } +} diff --git a/pkg/mcp/builders/pulsar/brokers_stats.go b/pkg/mcp/builders/pulsar/brokers_stats.go index b0da1df..7912b5c 100644 --- a/pkg/mcp/builders/pulsar/brokers_stats.go +++ b/pkg/mcp/builders/pulsar/brokers_stats.go @@ -19,13 +19,28 @@ import ( "encoding/json" "fmt" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + "github.com/google/jsonschema-go/jsonschema" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/pulsarctl/pkg/cmdutils" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" ) +type pulsarAdminBrokerStatsInput struct { + Resource string `json:"resource"` + AllocatorName *string `json:"allocator_name,omitempty"` +} + +const ( + pulsarAdminBrokerStatsResourceDesc = "Type of broker stats resource to access, available options:\n" + + "- monitoring_metrics: Metrics for the broker's monitoring system\n" + + "- mbeans: JVM MBeans statistics\n" + + "- topics: Statistics about all topics managed by the broker\n" + + "- allocator_stats: Memory allocator statistics (requires allocator_name parameter)\n" + + "- load_report: Broker load information" + pulsarAdminBrokerStatsAllocatorNameDesc = "The name of the allocator to get statistics for. Required only when resource=allocator_stats" +) + // PulsarAdminBrokerStatsToolBuilder implements the ToolBuilder interface for Pulsar Broker Statistics // /nolint:revive type PulsarAdminBrokerStatsToolBuilder struct { @@ -55,7 +70,7 @@ func NewPulsarAdminBrokerStatsToolBuilder() *PulsarAdminBrokerStatsToolBuilder { } // BuildTools builds the Pulsar Admin Broker Stats tool list -func (b *PulsarAdminBrokerStatsToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { +func (b *PulsarAdminBrokerStatsToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]builders.ToolDefinition, error) { // Check features - return empty list if no required features are present if !b.HasAnyRequiredFeature(config.Features) { return nil, nil @@ -67,11 +82,14 @@ func (b *PulsarAdminBrokerStatsToolBuilder) BuildTools(_ context.Context, config } // Build tools - tool := b.buildBrokerStatsTool() + tool, err := b.buildBrokerStatsTool() + if err != nil { + return nil, err + } handler := b.buildBrokerStatsHandler(config.ReadOnly) - return []server.ServerTool{ - { + return []builders.ToolDefinition{ + builders.ServerTool[pulsarAdminBrokerStatsInput, any]{ Tool: tool, Handler: handler, }, @@ -79,13 +97,11 @@ func (b *PulsarAdminBrokerStatsToolBuilder) BuildTools(_ context.Context, config } // buildBrokerStatsTool builds the Pulsar Admin Broker Stats MCP tool definition -func (b *PulsarAdminBrokerStatsToolBuilder) buildBrokerStatsTool() mcp.Tool { - resourceDesc := "Type of broker stats resource to access, available options:\n" + - "- monitoring_metrics: Metrics for the broker's monitoring system\n" + - "- mbeans: JVM MBeans statistics\n" + - "- topics: Statistics about all topics managed by the broker\n" + - "- allocator_stats: Memory allocator statistics (requires allocator_name parameter)\n" + - "- load_report: Broker load information" +func (b *PulsarAdminBrokerStatsToolBuilder) buildBrokerStatsTool() (*sdk.Tool, error) { + inputSchema, err := buildPulsarAdminBrokerStatsInputSchema() + if err != nil { + return nil, err + } toolDesc := "Unified tool for retrieving Apache Pulsar broker statistics.\n" + "This tool provides access to various broker stats resources, including:\n" + @@ -98,57 +114,58 @@ func (b *PulsarAdminBrokerStatsToolBuilder) buildBrokerStatsTool() mcp.Tool { "Example: {\"resource\": \"allocator_stats\", \"allocator_name\": \"default\"} retrieves stats for the default allocator\n" + "This tool requires Pulsar super-user permissions." - return mcp.NewTool("pulsar_admin_broker_stats", - mcp.WithDescription(toolDesc), - mcp.WithString("resource", mcp.Required(), - mcp.Description(resourceDesc), - ), - mcp.WithString("allocator_name", - mcp.Description("The name of the allocator to get statistics for. Required only when resource=allocator_stats"), - ), - ) + return &sdk.Tool{ + Name: "pulsar_admin_broker_stats", + Description: toolDesc, + InputSchema: inputSchema, + }, nil } // buildBrokerStatsHandler builds the Pulsar Admin Broker Stats handler function -func (b *PulsarAdminBrokerStatsToolBuilder) buildBrokerStatsHandler(_ bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminBrokerStatsToolBuilder) buildBrokerStatsHandler(_ bool) builders.ToolHandlerFunc[pulsarAdminBrokerStatsInput, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input pulsarAdminBrokerStatsInput) (*sdk.CallToolResult, any, error) { // Get Pulsar admin client session := mcpCtx.GetPulsarSession(ctx) if session == nil { - return mcp.NewToolResultError("Pulsar session not found in context"), nil + return nil, nil, fmt.Errorf("pulsar session not found in context") } client, err := session.GetAdminClient() if err != nil { - return b.handleError("get admin client", err), nil + return nil, nil, b.handleError("get admin client", err) } // Get required resource parameter - resource, err := request.RequireString("resource") - if err != nil { - return mcp.NewToolResultError("Missing required parameter 'resource'. " + - "Please specify one of: monitoring_metrics, mbeans, topics, allocator_stats, load_report."), nil + resource := input.Resource + if resource == "" { + return nil, nil, fmt.Errorf("missing required parameter 'resource'; please specify one of: monitoring_metrics, mbeans, topics, allocator_stats, load_report") } // Process request based on resource type switch resource { case "monitoring_metrics": - return b.handleMonitoringMetrics(client) + result, handlerErr := b.handleMonitoringMetrics(client) + return result, nil, handlerErr case "mbeans": - return b.handleMBeans(client) + result, handlerErr := b.handleMBeans(client) + return result, nil, handlerErr case "topics": - return b.handleTopics(client) + result, handlerErr := b.handleTopics(client) + return result, nil, handlerErr case "allocator_stats": - allocatorName, err := request.RequireString("allocator_name") - if err != nil { - return mcp.NewToolResultError("Missing required parameter 'allocator_name' for allocator_stats resource. " + - "Please provide the name of the allocator to get statistics for."), nil + allocatorName := "" + if input.AllocatorName != nil { + allocatorName = *input.AllocatorName + } + if allocatorName == "" { + return nil, nil, fmt.Errorf("missing required parameter 'allocator_name' for allocator_stats resource; please provide the name of the allocator to get statistics for") } - return b.handleAllocatorStats(client, allocatorName) + result, handlerErr := b.handleAllocatorStats(client, allocatorName) + return result, nil, handlerErr case "load_report": - return b.handleLoadReport(client) + result, handlerErr := b.handleLoadReport(client) + return result, nil, handlerErr default: - return mcp.NewToolResultError(fmt.Sprintf("Unsupported resource: %s. "+ - "Please use one of: monitoring_metrics, mbeans, topics, allocator_stats, load_report.", resource)), nil + return nil, nil, fmt.Errorf("unsupported resource: %s. please use one of: monitoring_metrics, mbeans, topics, allocator_stats, load_report", resource) } } } @@ -156,62 +173,84 @@ func (b *PulsarAdminBrokerStatsToolBuilder) buildBrokerStatsHandler(_ bool) func // Utility functions // handleError provides unified error handling -func (b *PulsarAdminBrokerStatsToolBuilder) handleError(operation string, err error) *mcp.CallToolResult { - return mcp.NewToolResultError(fmt.Sprintf("Failed to %s: %v", operation, err)) +func (b *PulsarAdminBrokerStatsToolBuilder) handleError(operation string, err error) error { + return fmt.Errorf("failed to %s: %v", operation, err) } // marshalResponse provides unified JSON serialization for responses -func (b *PulsarAdminBrokerStatsToolBuilder) marshalResponse(data interface{}) (*mcp.CallToolResult, error) { +func (b *PulsarAdminBrokerStatsToolBuilder) marshalResponse(data interface{}) (*sdk.CallToolResult, error) { jsonBytes, err := json.Marshal(data) if err != nil { - return b.handleError("marshal response", err), nil + return nil, b.handleError("marshal response", err) } - return mcp.NewToolResultText(string(jsonBytes)), nil + return &sdk.CallToolResult{ + Content: []sdk.Content{&sdk.TextContent{Text: string(jsonBytes)}}, + }, nil } // Specific operation handler functions // handleMonitoringMetrics handles retrieving monitoring metrics -func (b *PulsarAdminBrokerStatsToolBuilder) handleMonitoringMetrics(client cmdutils.Client) (*mcp.CallToolResult, error) { +func (b *PulsarAdminBrokerStatsToolBuilder) handleMonitoringMetrics(client cmdutils.Client) (*sdk.CallToolResult, error) { stats, err := client.BrokerStats().GetMetrics() if err != nil { - return b.handleError("get monitoring metrics", err), nil + return nil, b.handleError("get monitoring metrics", err) } return b.marshalResponse(stats) } // handleMBeans handles retrieving MBeans statistics -func (b *PulsarAdminBrokerStatsToolBuilder) handleMBeans(client cmdutils.Client) (*mcp.CallToolResult, error) { +func (b *PulsarAdminBrokerStatsToolBuilder) handleMBeans(client cmdutils.Client) (*sdk.CallToolResult, error) { stats, err := client.BrokerStats().GetMBeans() if err != nil { - return b.handleError("get MBeans", err), nil + return nil, b.handleError("get MBeans", err) } return b.marshalResponse(stats) } // handleTopics handles retrieving topics statistics -func (b *PulsarAdminBrokerStatsToolBuilder) handleTopics(client cmdutils.Client) (*mcp.CallToolResult, error) { +func (b *PulsarAdminBrokerStatsToolBuilder) handleTopics(client cmdutils.Client) (*sdk.CallToolResult, error) { stats, err := client.BrokerStats().GetTopics() if err != nil { - return b.handleError("get topics stats", err), nil + return nil, b.handleError("get topics stats", err) } return b.marshalResponse(stats) } // handleAllocatorStats handles retrieving allocator statistics -func (b *PulsarAdminBrokerStatsToolBuilder) handleAllocatorStats(client cmdutils.Client, allocatorName string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminBrokerStatsToolBuilder) handleAllocatorStats(client cmdutils.Client, allocatorName string) (*sdk.CallToolResult, error) { stats, err := client.BrokerStats().GetAllocatorStats(allocatorName) if err != nil { - return b.handleError("get allocator stats", err), nil + return nil, b.handleError("get allocator stats", err) } return b.marshalResponse(stats) } // handleLoadReport handles retrieving load report -func (b *PulsarAdminBrokerStatsToolBuilder) handleLoadReport(client cmdutils.Client) (*mcp.CallToolResult, error) { +func (b *PulsarAdminBrokerStatsToolBuilder) handleLoadReport(client cmdutils.Client) (*sdk.CallToolResult, error) { stats, err := client.BrokerStats().GetLoadReport() if err != nil { - return b.handleError("get load report", err), nil + return nil, b.handleError("get load report", err) } return b.marshalResponse(stats) } + +func buildPulsarAdminBrokerStatsInputSchema() (*jsonschema.Schema, error) { + schema, err := jsonschema.For[pulsarAdminBrokerStatsInput](nil) + if err != nil { + return nil, fmt.Errorf("input schema: %w", err) + } + if schema.Type != "object" { + return nil, fmt.Errorf("input schema must have type \"object\"") + } + + if schema.Properties == nil { + schema.Properties = map[string]*jsonschema.Schema{} + } + + setSchemaDescription(schema, "resource", pulsarAdminBrokerStatsResourceDesc) + setSchemaDescription(schema, "allocator_name", pulsarAdminBrokerStatsAllocatorNameDesc) + + normalizeAdditionalProperties(schema) + return schema, nil +} diff --git a/pkg/mcp/builders/pulsar/brokers_stats_legacy.go b/pkg/mcp/builders/pulsar/brokers_stats_legacy.go new file mode 100644 index 0000000..e12bdd3 --- /dev/null +++ b/pkg/mcp/builders/pulsar/brokers_stats_legacy.go @@ -0,0 +1,124 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" +) + +// PulsarAdminBrokerStatsLegacyToolBuilder implements the legacy ToolBuilder interface for Pulsar broker stats. +// /nolint:revive +type PulsarAdminBrokerStatsLegacyToolBuilder struct { + *builders.BaseToolBuilder +} + +// NewPulsarAdminBrokerStatsLegacyToolBuilder creates a new Pulsar admin broker stats legacy tool builder instance. +func NewPulsarAdminBrokerStatsLegacyToolBuilder() *PulsarAdminBrokerStatsLegacyToolBuilder { + metadata := builders.ToolMetadata{ + Name: "pulsar_admin_broker_stats", + Version: "1.0.0", + Description: "Pulsar Broker Statistics administration tools", + Category: "pulsar_admin", + Tags: []string{"pulsar", "broker", "stats", "admin", "monitoring"}, + } + + features := []string{ + "pulsar-admin-brokers-status", + "all", + "all-pulsar", + "pulsar-admin", + } + + return &PulsarAdminBrokerStatsLegacyToolBuilder{ + BaseToolBuilder: builders.NewBaseToolBuilder(metadata, features), + } +} + +// BuildTools builds the Pulsar admin broker stats legacy tool list. +func (b *PulsarAdminBrokerStatsLegacyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { + if !b.HasAnyRequiredFeature(config.Features) { + return nil, nil + } + + if err := b.Validate(config); err != nil { + return nil, err + } + + tool, err := b.buildBrokerStatsTool() + if err != nil { + return nil, err + } + handler := b.buildBrokerStatsHandler(config.ReadOnly) + + return []server.ServerTool{ + { + Tool: tool, + Handler: handler, + }, + }, nil +} + +func (b *PulsarAdminBrokerStatsLegacyToolBuilder) buildBrokerStatsTool() (mcp.Tool, error) { + inputSchema, err := buildPulsarAdminBrokerStatsInputSchema() + if err != nil { + return mcp.Tool{}, err + } + + schemaJSON, err := json.Marshal(inputSchema) + if err != nil { + return mcp.Tool{}, fmt.Errorf("marshal input schema: %w", err) + } + + toolDesc := "Unified tool for retrieving Apache Pulsar broker statistics.\n" + + "This tool provides access to various broker stats resources, including:\n" + + "1. Monitoring metrics (resource=monitoring_metrics): Metrics for the broker's monitoring system\n" + + "2. MBean stats (resource=mbeans): JVM MBeans statistics\n" + + "3. Topics stats (resource=topics): Statistics about all topics managed by the broker\n" + + "4. Allocator stats (resource=allocator_stats): Memory allocator statistics for specific allocator\n" + + "5. Load report (resource=load_report): Broker load information, sometimes the load report is not available, so suggest to use other resources to get the broker metrics\n\n" + + "Example: {\"resource\": \"monitoring_metrics\"} retrieves all monitoring metrics\n" + + "Example: {\"resource\": \"allocator_stats\", \"allocator_name\": \"default\"} retrieves stats for the default allocator\n" + + "This tool requires Pulsar super-user permissions." + + return mcp.Tool{ + Name: "pulsar_admin_broker_stats", + Description: toolDesc, + RawInputSchema: schemaJSON, + }, nil +} + +func (b *PulsarAdminBrokerStatsLegacyToolBuilder) buildBrokerStatsHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { + sdkBuilder := NewPulsarAdminBrokerStatsToolBuilder() + sdkHandler := sdkBuilder.buildBrokerStatsHandler(readOnly) + + return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + var input pulsarAdminBrokerStatsInput + if err := request.BindArguments(&input); err != nil { + return mcp.NewToolResultError(fmt.Sprintf("failed to parse arguments: %v", err)), nil + } + + result, _, err := sdkHandler(ctx, nil, input) + if err != nil { + return mcp.NewToolResultError(err.Error()), nil + } + return legacyToolResultFromSDK(result), nil + } +} diff --git a/pkg/mcp/builders/pulsar/brokers_stats_test.go b/pkg/mcp/builders/pulsar/brokers_stats_test.go new file mode 100644 index 0000000..36a4ca7 --- /dev/null +++ b/pkg/mcp/builders/pulsar/brokers_stats_test.go @@ -0,0 +1,116 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "testing" + + "github.com/google/jsonschema-go/jsonschema" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPulsarAdminBrokerStatsToolBuilder(t *testing.T) { + builder := NewPulsarAdminBrokerStatsToolBuilder() + + t.Run("GetName", func(t *testing.T) { + assert.Equal(t, "pulsar_admin_broker_stats", builder.GetName()) + }) + + t.Run("GetRequiredFeatures", func(t *testing.T) { + features := builder.GetRequiredFeatures() + assert.NotEmpty(t, features) + assert.Contains(t, features, "pulsar-admin-brokers-status") + }) + + t.Run("BuildTools_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"pulsar-admin-brokers-status"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "pulsar_admin_broker_stats", tools[0].Definition().Name) + }) + + t.Run("BuildTools_ReadOnly", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: true, + Features: []string{"pulsar-admin-brokers-status"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + }) + + t.Run("BuildTools_NoFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"unrelated-feature"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 0) + }) + + t.Run("Validate_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"pulsar-admin-brokers-status"}, + } + + err := builder.Validate(config) + assert.NoError(t, err) + }) + + t.Run("Validate_MissingFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"unrelated-feature"}, + } + + err := builder.Validate(config) + assert.Error(t, err) + }) +} + +func TestPulsarAdminBrokerStatsToolSchema(t *testing.T) { + builder := NewPulsarAdminBrokerStatsToolBuilder() + tool, err := builder.buildBrokerStatsTool() + require.NoError(t, err) + assert.Equal(t, "pulsar_admin_broker_stats", tool.Name) + + schema, ok := tool.InputSchema.(*jsonschema.Schema) + require.True(t, ok) + require.NotNil(t, schema.Properties) + + expectedRequired := []string{"resource"} + assert.ElementsMatch(t, expectedRequired, schema.Required) + + expectedProps := []string{"resource", "allocator_name"} + assert.ElementsMatch(t, expectedProps, mapStringKeys(schema.Properties)) + + resourceSchema := schema.Properties["resource"] + require.NotNil(t, resourceSchema) + assert.Equal(t, pulsarAdminBrokerStatsResourceDesc, resourceSchema.Description) + + allocatorSchema := schema.Properties["allocator_name"] + require.NotNil(t, allocatorSchema) + assert.Equal(t, pulsarAdminBrokerStatsAllocatorNameDesc, allocatorSchema.Description) +} diff --git a/pkg/mcp/builders/pulsar/cluster.go b/pkg/mcp/builders/pulsar/cluster.go index 02ce2e6..58b15bb 100644 --- a/pkg/mcp/builders/pulsar/cluster.go +++ b/pkg/mcp/builders/pulsar/cluster.go @@ -20,13 +20,49 @@ import ( "fmt" "github.com/apache/pulsar-client-go/pulsaradmin/pkg/utils" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + "github.com/google/jsonschema-go/jsonschema" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/pulsarctl/pkg/cmdutils" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" ) +type pulsarAdminClusterInput struct { + Resource string `json:"resource"` + Operation string `json:"operation"` + ClusterName *string `json:"cluster_name,omitempty"` + DomainName *string `json:"domain_name,omitempty"` + ServiceURL *string `json:"service_url,omitempty"` + ServiceURLTLS *string `json:"service_url_tls,omitempty"` + BrokerServiceURL *string `json:"broker_service_url,omitempty"` + BrokerServiceURLTLS *string `json:"broker_service_url_tls,omitempty"` + PeerClusterNames []string `json:"peer_cluster_names,omitempty"` + Brokers []string `json:"brokers,omitempty"` +} + +const ( + pulsarAdminClusterResourceDesc = "Type of cluster resource to access, available options:\n" + + "- cluster: Pulsar cluster configuration\n" + + "- peer_clusters: Peer clusters for geo-replication\n" + + "- failure_domain: Failure domains for fault tolerance" + pulsarAdminClusterOperationDesc = "Operation to perform, available options (depend on resource):\n" + + "- list: List resources (used with cluster, failure_domain)\n" + + "- get: Retrieve resource information (used with cluster, peer_clusters, failure_domain)\n" + + "- create: Create a new resource (used with cluster, failure_domain)\n" + + "- update: Update an existing resource (used with cluster, peer_clusters, failure_domain)\n" + + "- delete: Delete a resource (used with cluster, failure_domain)" + pulsarAdminClusterNameDesc = "Name of the Pulsar cluster, required for all operations except 'list' with resource=cluster" + pulsarAdminClusterDomainNameDesc = "Name of the failure domain, required when resource=failure_domain and operation is get, create, update, or delete" + pulsarAdminClusterServiceURLDesc = "Pulsar cluster web service URL (e.g., http://example.pulsar.io:8080), used when resource=cluster and operation is create or update" + pulsarAdminClusterServiceURLTLSDesc = "Pulsar cluster TLS secured web service URL (e.g., https://example.pulsar.io:8443), used when resource=cluster and operation is create or update" + pulsarAdminClusterBrokerServiceURLDesc = "Pulsar cluster broker service URL (e.g., pulsar://example.pulsar.io:6650), used when resource=cluster and operation is create or update" + pulsarAdminClusterBrokerServiceURLTLSDesc = "Pulsar cluster TLS secured broker service URL (e.g., pulsar+ssl://example.pulsar.io:6651), used when resource=cluster and operation is create or update" + pulsarAdminClusterPeerClusterNamesDesc = "List of clusters to be registered as peer-clusters, used when:\n" + + "- resource=cluster and operation is create or update\n" + + "- resource=peer_clusters and operation is update" + pulsarAdminClusterBrokersDesc = "List of broker names to include in a failure domain, required when resource=failure_domain and operation is create or update" +) + // PulsarAdminClusterToolBuilder implements the ToolBuilder interface for Pulsar Admin Cluster tools // It provides functionality to build Pulsar cluster management tools // /nolint:revive @@ -58,7 +94,7 @@ func NewPulsarAdminClusterToolBuilder() *PulsarAdminClusterToolBuilder { // BuildTools builds the Pulsar Admin Cluster tool list // This is the core method implementing the ToolBuilder interface -func (b *PulsarAdminClusterToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { +func (b *PulsarAdminClusterToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]builders.ToolDefinition, error) { // Check features - return empty list if no required features are present if !b.HasAnyRequiredFeature(config.Features) { return nil, nil @@ -70,11 +106,14 @@ func (b *PulsarAdminClusterToolBuilder) BuildTools(_ context.Context, config bui } // Build tools - tool := b.buildClusterTool() + tool, err := b.buildClusterTool() + if err != nil { + return nil, err + } handler := b.buildClusterHandler(config.ReadOnly) - return []server.ServerTool{ - { + return []builders.ToolDefinition{ + builders.ServerTool[pulsarAdminClusterInput, any]{ Tool: tool, Handler: handler, }, @@ -83,7 +122,12 @@ func (b *PulsarAdminClusterToolBuilder) BuildTools(_ context.Context, config bui // buildClusterTool builds the Pulsar Admin Cluster MCP tool definition // Migrated from the original tool definition logic -func (b *PulsarAdminClusterToolBuilder) buildClusterTool() mcp.Tool { +func (b *PulsarAdminClusterToolBuilder) buildClusterTool() (*sdk.Tool, error) { + inputSchema, err := buildPulsarAdminClusterInputSchema() + if err != nil { + return nil, err + } + toolDesc := "Unified tool for managing Apache Pulsar clusters.\n" + "This tool provides access to various cluster resources and operations, including:\n" + "1. Manage clusters (resource=cluster): List, get, create, update, delete clusters\n" + @@ -96,118 +140,62 @@ func (b *PulsarAdminClusterToolBuilder) buildClusterTool() mcp.Tool { "- {\"resource\": \"failure_domain\", \"operation\": \"list\", \"cluster_name\": \"my-cluster\"} lists failure domains\n" + "This tool requires Pulsar super-user permissions." - resourceDesc := "Type of cluster resource to access, available options:\n" + - "- cluster: Pulsar cluster configuration\n" + - "- peer_clusters: Peer clusters for geo-replication\n" + - "- failure_domain: Failure domains for fault tolerance" - - operationDesc := "Operation to perform, available options (depend on resource):\n" + - "- list: List resources (used with cluster, failure_domain)\n" + - "- get: Retrieve resource information (used with cluster, peer_clusters, failure_domain)\n" + - "- create: Create a new resource (used with cluster, failure_domain)\n" + - "- update: Update an existing resource (used with cluster, peer_clusters, failure_domain)\n" + - "- delete: Delete a resource (used with cluster, failure_domain)" - - return mcp.NewTool("pulsar_admin_cluster", - mcp.WithDescription(toolDesc), - mcp.WithString("resource", mcp.Required(), - mcp.Description(resourceDesc), - ), - mcp.WithString("operation", mcp.Required(), - mcp.Description(operationDesc), - ), - mcp.WithString("cluster_name", - mcp.Description("Name of the Pulsar cluster, required for all operations except 'list' with resource=cluster"), - ), - mcp.WithString("domain_name", - mcp.Description("Name of the failure domain, required when resource=failure_domain and operation is get, create, update, or delete"), - ), - mcp.WithString("service_url", - mcp.Description("Pulsar cluster web service URL (e.g., http://example.pulsar.io:8080), used when resource=cluster and operation is create or update"), - ), - mcp.WithString("service_url_tls", - mcp.Description("Pulsar cluster TLS secured web service URL (e.g., https://example.pulsar.io:8443), used when resource=cluster and operation is create or update"), - ), - mcp.WithString("broker_service_url", - mcp.Description("Pulsar cluster broker service URL (e.g., pulsar://example.pulsar.io:6650), used when resource=cluster and operation is create or update"), - ), - mcp.WithString("broker_service_url_tls", - mcp.Description("Pulsar cluster TLS secured broker service URL (e.g., pulsar+ssl://example.pulsar.io:6651), used when resource=cluster and operation is create or update"), - ), - mcp.WithArray("peer_cluster_names", - mcp.Description("List of clusters to be registered as peer-clusters, used when:\n"+ - "- resource=cluster and operation is create or update\n"+ - "- resource=peer_clusters and operation is update"), - mcp.Items( - map[string]interface{}{ - "type": "string", - "description": "peer cluster name", - }, - ), - ), - mcp.WithArray("brokers", - mcp.Description("List of broker names to include in a failure domain, required when resource=failure_domain and operation is create or update"), - mcp.Items( - map[string]interface{}{ - "type": "string", - "description": "broker", - }, - ), - ), - ) + return &sdk.Tool{ + Name: "pulsar_admin_cluster", + Description: toolDesc, + InputSchema: inputSchema, + }, nil } // buildClusterHandler builds the Pulsar Admin Cluster handler function // Migrated from the original handler logic -func (b *PulsarAdminClusterToolBuilder) buildClusterHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminClusterToolBuilder) buildClusterHandler(readOnly bool) builders.ToolHandlerFunc[pulsarAdminClusterInput, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input pulsarAdminClusterInput) (*sdk.CallToolResult, any, error) { // Get Pulsar session from context session := mcpCtx.GetPulsarSession(ctx) if session == nil { - return mcp.NewToolResultError("Pulsar session not found in context"), nil + return nil, nil, fmt.Errorf("pulsar session not found in context") } client, err := session.GetAdminClient() if err != nil { - return b.handleError("get admin client", err), nil + return nil, nil, b.handleError("get admin client", err) } - // Get required parameters - resource, err := request.RequireString("resource") - if err != nil { - return mcp.NewToolResultError("Missing required resource parameter. " + - "Please specify one of: cluster, peer_clusters, failure_domain."), nil + resource := input.Resource + if resource == "" { + return nil, nil, fmt.Errorf("missing required resource parameter. please specify one of: cluster, peer_clusters, failure_domain") } - operation, err := request.RequireString("operation") - if err != nil { - return mcp.NewToolResultError("Missing required operation parameter. " + - "Please specify one of: list, get, create, update, delete based on the resource type."), nil + operation := input.Operation + if operation == "" { + return nil, nil, fmt.Errorf("missing required operation parameter. please specify one of: list, get, create, update, delete based on the resource type") } // Validate if the parameter combination is valid validCombination, errMsg := b.validateClusterResourceOperation(resource, operation) if !validCombination { - return mcp.NewToolResultError(errMsg), nil + return nil, nil, fmt.Errorf("%s", errMsg) } // Check write operation permissions if (operation == "create" || operation == "update" || operation == "delete") && readOnly { - return mcp.NewToolResultError("Create/update/delete operations not allowed in read-only mode. " + - "Please contact your administrator if you need to modify cluster resources."), nil + return nil, nil, fmt.Errorf("create/update/delete operations not allowed in read-only mode. please contact your administrator if you need to modify cluster resources") } // Process request based on resource type switch resource { case "cluster": - return b.handleClusterResource(client, operation, request) + result, err := b.handleClusterResource(client, operation, input) + return result, nil, err case "peer_clusters": - return b.handlePeerClustersResource(client, operation, request) + result, err := b.handlePeerClustersResource(client, operation, input) + return result, nil, err case "failure_domain": - return b.handleFailureDomainResource(client, operation, request) + result, err := b.handleFailureDomainResource(client, operation, input) + return result, nil, err default: - return mcp.NewToolResultError(fmt.Sprintf("Unsupported resource: %s. "+ - "Please use one of: cluster, peer_clusters, failure_domain.", resource)), nil + return nil, nil, fmt.Errorf("unsupported resource: %s. please use one of: cluster, peer_clusters, failure_domain", resource) } } } @@ -215,17 +203,19 @@ func (b *PulsarAdminClusterToolBuilder) buildClusterHandler(readOnly bool) func( // Unified error handling and utility functions // handleError provides unified error handling -func (b *PulsarAdminClusterToolBuilder) handleError(operation string, err error) *mcp.CallToolResult { - return mcp.NewToolResultError(fmt.Sprintf("Failed to %s: %v", operation, err)) +func (b *PulsarAdminClusterToolBuilder) handleError(operation string, err error) error { + return fmt.Errorf("failed to %s: %v", operation, err) } // marshalResponse provides unified JSON serialization for responses -func (b *PulsarAdminClusterToolBuilder) marshalResponse(data interface{}) (*mcp.CallToolResult, error) { +func (b *PulsarAdminClusterToolBuilder) marshalResponse(data interface{}) (*sdk.CallToolResult, error) { jsonBytes, err := json.Marshal(data) if err != nil { - return b.handleError("marshal response", err), nil + return nil, b.handleError("marshal response", err) } - return mcp.NewToolResultText(string(jsonBytes)), nil + return &sdk.CallToolResult{ + Content: []sdk.Content{&sdk.TextContent{Text: string(jsonBytes)}}, + }, nil } // Validate if the resource and operation combination is valid @@ -250,110 +240,103 @@ func (b *PulsarAdminClusterToolBuilder) validateClusterResourceOperation(resourc } // Handle cluster resource operations -func (b *PulsarAdminClusterToolBuilder) handleClusterResource(client cmdutils.Client, operation string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminClusterToolBuilder) handleClusterResource(client cmdutils.Client, operation string, input pulsarAdminClusterInput) (*sdk.CallToolResult, error) { switch operation { case "list": return b.handleClusterList(client) case "get": - clusterName, err := request.RequireString("cluster_name") + clusterName, err := requireString(input.ClusterName, "cluster_name") if err != nil { - return mcp.NewToolResultError("Missing required parameter 'cluster_name'. " + - "Please provide the name of the cluster to get information for."), nil + return nil, fmt.Errorf("missing required parameter 'cluster_name'. please provide the name of the cluster to get information for") } return b.getClusterData(client, clusterName) case "create": - return b.createCluster(client, request) + return b.createCluster(client, input) case "update": - return b.updateCluster(client, request) + return b.updateCluster(client, input) case "delete": - clusterName, err := request.RequireString("cluster_name") + clusterName, err := requireString(input.ClusterName, "cluster_name") if err != nil { - return mcp.NewToolResultError("Missing required parameter 'cluster_name'. " + - "Please provide the name of the cluster to delete."), nil + return nil, fmt.Errorf("missing required parameter 'cluster_name'. please provide the name of the cluster to delete") } return b.deleteCluster(client, clusterName) default: - return mcp.NewToolResultError(fmt.Sprintf("Unsupported cluster operation: %s", operation)), nil + return nil, fmt.Errorf("unsupported cluster operation: %s", operation) } } // Handle peer clusters resource operations -func (b *PulsarAdminClusterToolBuilder) handlePeerClustersResource(client cmdutils.Client, operation string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - clusterName, err := request.RequireString("cluster_name") +func (b *PulsarAdminClusterToolBuilder) handlePeerClustersResource(client cmdutils.Client, operation string, input pulsarAdminClusterInput) (*sdk.CallToolResult, error) { + clusterName, err := requireString(input.ClusterName, "cluster_name") if err != nil { - return mcp.NewToolResultError("Missing required parameter 'cluster_name'. " + - "Please provide the name of the cluster for peer clusters operation."), nil + return nil, fmt.Errorf("missing required parameter 'cluster_name'. please provide the name of the cluster for peer clusters operation") } switch operation { case "get": return b.getPeerClusters(client, clusterName) case "update": - return b.updatePeerClusters(client, clusterName, request) + return b.updatePeerClusters(client, clusterName, input) default: - return mcp.NewToolResultError(fmt.Sprintf("Unsupported peer_clusters operation: %s", operation)), nil + return nil, fmt.Errorf("unsupported peer_clusters operation: %s", operation) } } // Handle failure domain resource operations -func (b *PulsarAdminClusterToolBuilder) handleFailureDomainResource(client cmdutils.Client, operation string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - clusterName, err := request.RequireString("cluster_name") +func (b *PulsarAdminClusterToolBuilder) handleFailureDomainResource(client cmdutils.Client, operation string, input pulsarAdminClusterInput) (*sdk.CallToolResult, error) { + clusterName, err := requireString(input.ClusterName, "cluster_name") if err != nil { - return mcp.NewToolResultError("Missing required parameter 'cluster_name'. " + - "Please provide the name of the cluster for failure domain operation."), nil + return nil, fmt.Errorf("missing required parameter 'cluster_name'. please provide the name of the cluster for failure domain operation") } switch operation { case "list": return b.listFailureDomains(client, clusterName) case "get": - domainName, err := request.RequireString("domain_name") + domainName, err := requireString(input.DomainName, "domain_name") if err != nil { - return mcp.NewToolResultError("Missing required parameter 'domain_name'. " + - "Please provide the name of the failure domain."), nil + return nil, fmt.Errorf("missing required parameter 'domain_name'. please provide the name of the failure domain") } return b.getFailureDomain(client, clusterName, domainName) case "create": - return b.createFailureDomain(client, clusterName, request) + return b.createFailureDomain(client, clusterName, input) case "update": - return b.updateFailureDomain(client, clusterName, request) + return b.updateFailureDomain(client, clusterName, input) case "delete": - domainName, err := request.RequireString("domain_name") + domainName, err := requireString(input.DomainName, "domain_name") if err != nil { - return mcp.NewToolResultError("Missing required parameter 'domain_name'. " + - "Please provide the name of the failure domain to delete."), nil + return nil, fmt.Errorf("missing required parameter 'domain_name'. please provide the name of the failure domain to delete") } return b.deleteFailureDomain(client, clusterName, domainName) default: - return mcp.NewToolResultError(fmt.Sprintf("Unsupported failure_domain operation: %s", operation)), nil + return nil, fmt.Errorf("unsupported failure_domain operation: %s", operation) } } -func (b *PulsarAdminClusterToolBuilder) handleClusterList(client cmdutils.Client) (*mcp.CallToolResult, error) { +func (b *PulsarAdminClusterToolBuilder) handleClusterList(client cmdutils.Client) (*sdk.CallToolResult, error) { // Get cluster list clusters, err := client.Clusters().List() if err != nil { - return b.handleError("get cluster list", err), nil + return nil, b.handleError("get cluster list", err) } return b.marshalResponse(clusters) } -func (b *PulsarAdminClusterToolBuilder) getClusterData(client cmdutils.Client, clusterName string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminClusterToolBuilder) getClusterData(client cmdutils.Client, clusterName string) (*sdk.CallToolResult, error) { // Get cluster data clusterData, err := client.Clusters().Get(clusterName) if err != nil { - return b.handleError("get cluster data", err), nil + return nil, b.handleError("get cluster data", err) } return b.marshalResponse(clusterData) } -func (b *PulsarAdminClusterToolBuilder) createCluster(client cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - clusterName, err := request.RequireString("cluster_name") +func (b *PulsarAdminClusterToolBuilder) createCluster(client cmdutils.Client, input pulsarAdminClusterInput) (*sdk.CallToolResult, error) { + clusterName, err := requireString(input.ClusterName, "cluster_name") if err != nil { - return mcp.NewToolResultError("Missing required parameter 'cluster_name'. " + - "Please provide the name of the cluster to create."), nil + return nil, fmt.Errorf("missing required parameter 'cluster_name'. please provide the name of the cluster to create") } // Initialize cluster data @@ -362,36 +345,35 @@ func (b *PulsarAdminClusterToolBuilder) createCluster(client cmdutils.Client, re } // Set optional parameters if provided - if serviceURL := request.GetString("service_url", ""); serviceURL != "" { + if serviceURL := stringValue(input.ServiceURL); serviceURL != "" { clusterData.ServiceURL = serviceURL } - if serviceURLTls := request.GetString("service_url_tls", ""); serviceURLTls != "" { - clusterData.ServiceURLTls = serviceURLTls + if serviceURLTLS := stringValue(input.ServiceURLTLS); serviceURLTLS != "" { + clusterData.ServiceURLTls = serviceURLTLS } - if brokerServiceURL := request.GetString("broker_service_url", ""); brokerServiceURL != "" { + if brokerServiceURL := stringValue(input.BrokerServiceURL); brokerServiceURL != "" { clusterData.BrokerServiceURL = brokerServiceURL } - if brokerServiceURLTls := request.GetString("broker_service_url_tls", ""); brokerServiceURLTls != "" { - clusterData.BrokerServiceURLTls = brokerServiceURLTls + if brokerServiceURLTLS := stringValue(input.BrokerServiceURLTLS); brokerServiceURLTLS != "" { + clusterData.BrokerServiceURLTls = brokerServiceURLTLS } - if peerClusters := request.GetStringSlice("peer_cluster_names", []string{}); len(peerClusters) > 0 { - clusterData.PeerClusterNames = peerClusters + if len(input.PeerClusterNames) > 0 { + clusterData.PeerClusterNames = input.PeerClusterNames } // Create cluster err = client.Clusters().Create(clusterData) if err != nil { - return b.handleError("create cluster", err), nil + return nil, b.handleError("create cluster", err) } - return mcp.NewToolResultText(fmt.Sprintf("Cluster %s created successfully", clusterName)), nil + return textResult(fmt.Sprintf("Cluster %s created successfully", clusterName)), nil } -func (b *PulsarAdminClusterToolBuilder) updateCluster(client cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - clusterName, err := request.RequireString("cluster_name") +func (b *PulsarAdminClusterToolBuilder) updateCluster(client cmdutils.Client, input pulsarAdminClusterInput) (*sdk.CallToolResult, error) { + clusterName, err := requireString(input.ClusterName, "cluster_name") if err != nil { - return mcp.NewToolResultError("Missing required parameter 'cluster_name'. " + - "Please provide the name of the cluster to update."), nil + return nil, fmt.Errorf("missing required parameter 'cluster_name'. please provide the name of the cluster to update") } // Initialize cluster data @@ -400,98 +382,95 @@ func (b *PulsarAdminClusterToolBuilder) updateCluster(client cmdutils.Client, re } // Set optional parameters if provided - if serviceURL := request.GetString("service_url", ""); serviceURL != "" { + if serviceURL := stringValue(input.ServiceURL); serviceURL != "" { clusterData.ServiceURL = serviceURL } - if serviceURLTls := request.GetString("service_url_tls", ""); serviceURLTls != "" { - clusterData.ServiceURLTls = serviceURLTls + if serviceURLTLS := stringValue(input.ServiceURLTLS); serviceURLTLS != "" { + clusterData.ServiceURLTls = serviceURLTLS } - if brokerServiceURL := request.GetString("broker_service_url", ""); brokerServiceURL != "" { + if brokerServiceURL := stringValue(input.BrokerServiceURL); brokerServiceURL != "" { clusterData.BrokerServiceURL = brokerServiceURL } - if brokerServiceURLTls := request.GetString("broker_service_url_tls", ""); brokerServiceURLTls != "" { - clusterData.BrokerServiceURLTls = brokerServiceURLTls + if brokerServiceURLTLS := stringValue(input.BrokerServiceURLTLS); brokerServiceURLTLS != "" { + clusterData.BrokerServiceURLTls = brokerServiceURLTLS } - if peerClusters := request.GetStringSlice("peer_cluster_names", []string{}); len(peerClusters) > 0 { - clusterData.PeerClusterNames = peerClusters + if len(input.PeerClusterNames) > 0 { + clusterData.PeerClusterNames = input.PeerClusterNames } // Update cluster err = client.Clusters().Update(clusterData) if err != nil { - return b.handleError("update cluster", err), nil + return nil, b.handleError("update cluster", err) } - return mcp.NewToolResultText(fmt.Sprintf("Cluster %s updated successfully", clusterName)), nil + return textResult(fmt.Sprintf("Cluster %s updated successfully", clusterName)), nil } -func (b *PulsarAdminClusterToolBuilder) deleteCluster(client cmdutils.Client, clusterName string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminClusterToolBuilder) deleteCluster(client cmdutils.Client, clusterName string) (*sdk.CallToolResult, error) { // Delete cluster err := client.Clusters().Delete(clusterName) if err != nil { - return b.handleError("delete cluster", err), nil + return nil, b.handleError("delete cluster", err) } - return mcp.NewToolResultText(fmt.Sprintf("Cluster %s deleted successfully", clusterName)), nil + return textResult(fmt.Sprintf("Cluster %s deleted successfully", clusterName)), nil } -func (b *PulsarAdminClusterToolBuilder) getPeerClusters(client cmdutils.Client, clusterName string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminClusterToolBuilder) getPeerClusters(client cmdutils.Client, clusterName string) (*sdk.CallToolResult, error) { // Get peer clusters peerClusters, err := client.Clusters().GetPeerClusters(clusterName) if err != nil { - return b.handleError("get peer clusters", err), nil + return nil, b.handleError("get peer clusters", err) } return b.marshalResponse(peerClusters) } -func (b *PulsarAdminClusterToolBuilder) updatePeerClusters(client cmdutils.Client, clusterName string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - peerClusters, err := request.RequireStringSlice("peer_cluster_names") +func (b *PulsarAdminClusterToolBuilder) updatePeerClusters(client cmdutils.Client, clusterName string, input pulsarAdminClusterInput) (*sdk.CallToolResult, error) { + peerClusters, err := requireStringSlice(input.PeerClusterNames, "peer_cluster_names") if err != nil { - return mcp.NewToolResultError("Missing required parameter 'peer_cluster_names'. " + - "Please provide an array of peer cluster names to set."), nil + return nil, fmt.Errorf("missing required parameter 'peer_cluster_names'. please provide an array of peer cluster names to set") } // Update peer clusters err = client.Clusters().UpdatePeerClusters(clusterName, peerClusters) if err != nil { - return b.handleError("update peer clusters", err), nil + return nil, b.handleError("update peer clusters", err) } - return mcp.NewToolResultText(fmt.Sprintf("Peer clusters for %s updated successfully", clusterName)), nil + return textResult(fmt.Sprintf("Peer clusters for %s updated successfully", clusterName)), nil } -func (b *PulsarAdminClusterToolBuilder) listFailureDomains(client cmdutils.Client, clusterName string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminClusterToolBuilder) listFailureDomains(client cmdutils.Client, clusterName string) (*sdk.CallToolResult, error) { // Get failure domains list failureDomains, err := client.Clusters().ListFailureDomains(clusterName) if err != nil { - return b.handleError("list failure domains", err), nil + return nil, b.handleError("list failure domains", err) } return b.marshalResponse(failureDomains) } -func (b *PulsarAdminClusterToolBuilder) getFailureDomain(client cmdutils.Client, clusterName, domainName string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminClusterToolBuilder) getFailureDomain(client cmdutils.Client, clusterName, domainName string) (*sdk.CallToolResult, error) { // Get failure domain failureDomain, err := client.Clusters().GetFailureDomain(clusterName, domainName) if err != nil { - return b.handleError("get failure domain", err), nil + return nil, b.handleError("get failure domain", err) } return b.marshalResponse(failureDomain) } -func (b *PulsarAdminClusterToolBuilder) createFailureDomain(client cmdutils.Client, clusterName string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - domainName, err := request.RequireString("domain_name") +func (b *PulsarAdminClusterToolBuilder) createFailureDomain(client cmdutils.Client, clusterName string, input pulsarAdminClusterInput) (*sdk.CallToolResult, error) { + domainName, err := requireString(input.DomainName, "domain_name") if err != nil { - return mcp.NewToolResultError("Missing required parameter 'domain_name'. " + - "Please provide the name of the failure domain to create."), nil + return nil, fmt.Errorf("missing required parameter 'domain_name'. please provide the name of the failure domain to create") } - brokers, err := request.RequireStringSlice("brokers") + brokers, err := requireStringSlice(input.Brokers, "brokers") if err != nil { - return mcp.NewToolResultError("Missing required parameter 'brokers'. " + - "Please provide an array of broker names to include in this failure domain."), nil + return nil, fmt.Errorf("missing required parameter 'brokers'. please provide an array of broker names to include in this failure domain") } // Create failure domain data @@ -504,23 +483,21 @@ func (b *PulsarAdminClusterToolBuilder) createFailureDomain(client cmdutils.Clie // Create failure domain err = client.Clusters().CreateFailureDomain(failureDomainData) if err != nil { - return b.handleError("create failure domain", err), nil + return nil, b.handleError("create failure domain", err) } - return mcp.NewToolResultText(fmt.Sprintf("Failure domain %s created successfully in cluster %s", domainName, clusterName)), nil + return textResult(fmt.Sprintf("Failure domain %s created successfully in cluster %s", domainName, clusterName)), nil } -func (b *PulsarAdminClusterToolBuilder) updateFailureDomain(client cmdutils.Client, clusterName string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - domainName, err := request.RequireString("domain_name") +func (b *PulsarAdminClusterToolBuilder) updateFailureDomain(client cmdutils.Client, clusterName string, input pulsarAdminClusterInput) (*sdk.CallToolResult, error) { + domainName, err := requireString(input.DomainName, "domain_name") if err != nil { - return mcp.NewToolResultError("Missing required parameter 'domain_name'. " + - "Please provide the name of the failure domain to update."), nil + return nil, fmt.Errorf("missing required parameter 'domain_name'. please provide the name of the failure domain to update") } - brokers, err := request.RequireStringSlice("brokers") + brokers, err := requireStringSlice(input.Brokers, "brokers") if err != nil { - return mcp.NewToolResultError("Missing required parameter 'brokers'. " + - "Please provide an array of broker names to include in this failure domain."), nil + return nil, fmt.Errorf("missing required parameter 'brokers'. please provide an array of broker names to include in this failure domain") } // Create failure domain data @@ -533,14 +510,13 @@ func (b *PulsarAdminClusterToolBuilder) updateFailureDomain(client cmdutils.Clie // Update failure domain err = client.Clusters().UpdateFailureDomain(failureDomainData) if err != nil { - return b.handleError("update failure domain", err), nil + return nil, b.handleError("update failure domain", err) } - return mcp.NewToolResultText( - fmt.Sprintf("Failure domain %s updated successfully in cluster %s", domainName, clusterName)), nil + return textResult(fmt.Sprintf("Failure domain %s updated successfully in cluster %s", domainName, clusterName)), nil } -func (b *PulsarAdminClusterToolBuilder) deleteFailureDomain(client cmdutils.Client, clusterName, domainName string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminClusterToolBuilder) deleteFailureDomain(client cmdutils.Client, clusterName, domainName string) (*sdk.CallToolResult, error) { // Create failure domain data for deletion failureDomainData := utils.FailureDomainData{ ClusterName: clusterName, @@ -550,9 +526,43 @@ func (b *PulsarAdminClusterToolBuilder) deleteFailureDomain(client cmdutils.Clie // Delete failure domain err := client.Clusters().DeleteFailureDomain(failureDomainData) if err != nil { - return b.handleError("delete failure domain", err), nil + return nil, b.handleError("delete failure domain", err) + } + + return textResult(fmt.Sprintf("Failure domain %s deleted successfully from cluster %s", domainName, clusterName)), nil +} + +func buildPulsarAdminClusterInputSchema() (*jsonschema.Schema, error) { + schema, err := jsonschema.For[pulsarAdminClusterInput](nil) + if err != nil { + return nil, fmt.Errorf("input schema: %w", err) + } + if schema.Type != "object" { + return nil, fmt.Errorf("input schema must have type \"object\"") + } + + if schema.Properties == nil { + schema.Properties = map[string]*jsonschema.Schema{} + } + + setSchemaDescription(schema, "resource", pulsarAdminClusterResourceDesc) + setSchemaDescription(schema, "operation", pulsarAdminClusterOperationDesc) + setSchemaDescription(schema, "cluster_name", pulsarAdminClusterNameDesc) + setSchemaDescription(schema, "domain_name", pulsarAdminClusterDomainNameDesc) + setSchemaDescription(schema, "service_url", pulsarAdminClusterServiceURLDesc) + setSchemaDescription(schema, "service_url_tls", pulsarAdminClusterServiceURLTLSDesc) + setSchemaDescription(schema, "broker_service_url", pulsarAdminClusterBrokerServiceURLDesc) + setSchemaDescription(schema, "broker_service_url_tls", pulsarAdminClusterBrokerServiceURLTLSDesc) + setSchemaDescription(schema, "peer_cluster_names", pulsarAdminClusterPeerClusterNamesDesc) + setSchemaDescription(schema, "brokers", pulsarAdminClusterBrokersDesc) + + if peersSchema := schema.Properties["peer_cluster_names"]; peersSchema != nil && peersSchema.Items != nil { + peersSchema.Items.Description = "peer cluster name" + } + if brokersSchema := schema.Properties["brokers"]; brokersSchema != nil && brokersSchema.Items != nil { + brokersSchema.Items.Description = "broker" } - return mcp.NewToolResultText( - fmt.Sprintf("Failure domain %s deleted successfully from cluster %s", domainName, clusterName)), nil + normalizeAdditionalProperties(schema) + return schema, nil } diff --git a/pkg/mcp/builders/pulsar/cluster_legacy.go b/pkg/mcp/builders/pulsar/cluster_legacy.go new file mode 100644 index 0000000..2793e7c --- /dev/null +++ b/pkg/mcp/builders/pulsar/cluster_legacy.go @@ -0,0 +1,125 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" +) + +// PulsarAdminClusterLegacyToolBuilder implements the legacy ToolBuilder interface for Pulsar admin clusters. +// /nolint:revive +type PulsarAdminClusterLegacyToolBuilder struct { + *builders.BaseToolBuilder +} + +// NewPulsarAdminClusterLegacyToolBuilder creates a new Pulsar admin cluster legacy tool builder instance. +func NewPulsarAdminClusterLegacyToolBuilder() *PulsarAdminClusterLegacyToolBuilder { + metadata := builders.ToolMetadata{ + Name: "pulsar_admin_cluster", + Version: "1.0.0", + Description: "Pulsar Admin cluster management tools", + Category: "pulsar_admin", + Tags: []string{"pulsar", "cluster", "admin"}, + } + + features := []string{ + "pulsar-admin-clusters", + "all", + "all-pulsar", + "pulsar-admin", + } + + return &PulsarAdminClusterLegacyToolBuilder{ + BaseToolBuilder: builders.NewBaseToolBuilder(metadata, features), + } +} + +// BuildTools builds the Pulsar admin cluster legacy tool list. +func (b *PulsarAdminClusterLegacyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { + if !b.HasAnyRequiredFeature(config.Features) { + return nil, nil + } + + if err := b.Validate(config); err != nil { + return nil, err + } + + tool, err := b.buildClusterTool() + if err != nil { + return nil, err + } + handler := b.buildClusterHandler(config.ReadOnly) + + return []server.ServerTool{ + { + Tool: tool, + Handler: handler, + }, + }, nil +} + +func (b *PulsarAdminClusterLegacyToolBuilder) buildClusterTool() (mcp.Tool, error) { + inputSchema, err := buildPulsarAdminClusterInputSchema() + if err != nil { + return mcp.Tool{}, err + } + + schemaJSON, err := json.Marshal(inputSchema) + if err != nil { + return mcp.Tool{}, fmt.Errorf("marshal input schema: %w", err) + } + + toolDesc := "Unified tool for managing Apache Pulsar clusters.\n" + + "This tool provides access to various cluster resources and operations, including:\n" + + "1. Manage clusters (resource=cluster): List, get, create, update, delete clusters\n" + + "2. Manage peer clusters (resource=peer_clusters): Get, update peer clusters\n" + + "3. Manage failure domains (resource=failure_domain): List, get, create, update, delete failure domains\n\n" + + "Different functions are accessed by combining resource and operation parameters, with other parameters used selectively based on operation type.\n\n" + + "Examples:\n" + + "- {\"resource\": \"cluster\", \"operation\": \"list\"} lists all clusters\n" + + "- {\"resource\": \"cluster\", \"operation\": \"get\", \"cluster_name\": \"my-cluster\"} gets cluster configuration\n" + + "- {\"resource\": \"failure_domain\", \"operation\": \"list\", \"cluster_name\": \"my-cluster\"} lists failure domains\n" + + "This tool requires Pulsar super-user permissions." + + return mcp.Tool{ + Name: "pulsar_admin_cluster", + Description: toolDesc, + RawInputSchema: schemaJSON, + }, nil +} + +func (b *PulsarAdminClusterLegacyToolBuilder) buildClusterHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { + sdkBuilder := NewPulsarAdminClusterToolBuilder() + sdkHandler := sdkBuilder.buildClusterHandler(readOnly) + + return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + var input pulsarAdminClusterInput + if err := request.BindArguments(&input); err != nil { + return mcp.NewToolResultError(fmt.Sprintf("failed to parse arguments: %v", err)), nil + } + + result, _, err := sdkHandler(ctx, nil, input) + if err != nil { + return mcp.NewToolResultError(err.Error()), nil + } + return legacyToolResultFromSDK(result), nil + } +} diff --git a/pkg/mcp/builders/pulsar/consume.go b/pkg/mcp/builders/pulsar/consume.go index e625532..8368bae 100644 --- a/pkg/mcp/builders/pulsar/consume.go +++ b/pkg/mcp/builders/pulsar/consume.go @@ -22,12 +22,52 @@ import ( "time" "github.com/apache/pulsar-client-go/pulsar" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + "github.com/google/jsonschema-go/jsonschema" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" ) +type pulsarClientConsumeInput struct { + Topic string `json:"topic"` + SubscriptionName string `json:"subscription-name"` + SubscriptionType *string `json:"subscription-type,omitempty"` + SubscriptionMode *string `json:"subscription-mode,omitempty"` + InitialPosition *string `json:"initial-position,omitempty"` + NumMessages *float64 `json:"num-messages,omitempty"` + Timeout *float64 `json:"timeout,omitempty"` + ShowProperties bool `json:"show-properties,omitempty"` + HidePayload bool `json:"hide-payload,omitempty"` +} + +const ( + pulsarClientConsumeTopicDesc = "The fully qualified topic name to consume from (format: [persistent|non-persistent]://tenant/namespace/topic). " + + "For partitioned topics, you can consume from all partitions by specifying the base topic name " + + "or from a specific partition by appending -partition-N to the topic name." + pulsarClientConsumeSubscriptionNameDesc = "The subscription name for this consumer. " + + "A subscription represents a named cursor for tracking message consumption progress. " + + "Multiple consumers can share the same subscription name to form a consumer group." + pulsarClientConsumeSubscriptionTypeDesc = "Subscription type controlling message distribution among consumers:\n" + + "- exclusive: Only one consumer can consume from the subscription at a time\n" + + "- shared: Messages are distributed across all consumers in a round-robin fashion\n" + + "- failover: Only one active consumer, others act as backups\n" + + "- key_shared: Messages with the same key are delivered to the same consumer (default: exclusive)" + pulsarClientConsumeSubscriptionModeDesc = "Subscription durability mode:\n" + + "- durable: Subscription persists even when all consumers disconnect\n" + + "- non-durable: Subscription is deleted when all consumers disconnect (default: durable)" + pulsarClientConsumeInitialPositionDesc = "Initial cursor position for new subscriptions:\n" + + "- latest: Start consuming from the latest (most recent) message\n" + + "- earliest: Start consuming from the earliest (oldest available) message (default: latest)" + pulsarClientConsumeNumMessagesDesc = "Maximum number of messages to consume in this session. " + + "Set to 0 for unlimited consumption until timeout. (default: 10)" + pulsarClientConsumeTimeoutDesc = "Maximum time to wait for messages in seconds. " + + "The consumer will stop after this timeout even if fewer messages were received. (default: 30)" + pulsarClientConsumeShowPropertiesDesc = "Include message properties in the output. " + + "Message properties are key-value pairs attached to messages for metadata purposes. (default: false)" + pulsarClientConsumeHidePayloadDesc = "Exclude message payload from the output. " + + "Useful when you only need message metadata or are dealing with large payloads. (default: false)" +) + // PulsarClientConsumeToolBuilder implements the ToolBuilder interface for Pulsar Client Consumer tools // It provides functionality to build Pulsar message consumption tools // /nolint:revive @@ -58,7 +98,7 @@ func NewPulsarClientConsumeToolBuilder() *PulsarClientConsumeToolBuilder { // BuildTools builds the Pulsar Client Consumer tool list // This is the core method implementing the ToolBuilder interface -func (b *PulsarClientConsumeToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { +func (b *PulsarClientConsumeToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]builders.ToolDefinition, error) { // Check features - return empty list if no required features are present if !b.HasAnyRequiredFeature(config.Features) { return nil, nil @@ -70,11 +110,14 @@ func (b *PulsarClientConsumeToolBuilder) BuildTools(_ context.Context, config bu } // Build tools - tool := b.buildConsumeTool() + tool, err := b.buildConsumeTool() + if err != nil { + return nil, err + } handler := b.buildConsumeHandler(config.ReadOnly) - return []server.ServerTool{ - { + return []builders.ToolDefinition{ + builders.ServerTool[pulsarClientConsumeInput, any]{ Tool: tool, Handler: handler, }, @@ -83,7 +126,12 @@ func (b *PulsarClientConsumeToolBuilder) BuildTools(_ context.Context, config bu // buildConsumeTool builds the Pulsar Client Consumer MCP tool definition // Migrated from the original tool definition logic -func (b *PulsarClientConsumeToolBuilder) buildConsumeTool() mcp.Tool { +func (b *PulsarClientConsumeToolBuilder) buildConsumeTool() (*sdk.Tool, error) { + inputSchema, err := buildPulsarClientConsumeInputSchema() + if err != nil { + return nil, err + } + toolDesc := "Consume messages from a Pulsar topic. " + "This tool allows you to consume messages from a specified Pulsar topic with various options " + "to control the subscription behavior, message processing, and display format. " + @@ -93,88 +141,76 @@ func (b *PulsarClientConsumeToolBuilder) buildConsumeTool() mcp.Tool { "timeout settings, and message display options. " + "Do not use this tool for Kafka protocol operations. Use 'kafka_client_consume' instead." - return mcp.NewTool("pulsar_client_consume", - mcp.WithDescription(toolDesc), - mcp.WithString("topic", mcp.Required(), - mcp.Description("The fully qualified topic name to consume from (format: [persistent|non-persistent]://tenant/namespace/topic). "+ - "For partitioned topics, you can consume from all partitions by specifying the base topic name "+ - "or from a specific partition by appending -partition-N to the topic name."), - ), - mcp.WithString("subscription-name", mcp.Required(), - mcp.Description("The subscription name for this consumer. "+ - "A subscription represents a named cursor for tracking message consumption progress. "+ - "Multiple consumers can share the same subscription name to form a consumer group."), - ), - mcp.WithString("subscription-type", - mcp.Description("Subscription type controlling message distribution among consumers:\\n"+ - "- exclusive: Only one consumer can consume from the subscription at a time\\n"+ - "- shared: Messages are distributed across all consumers in a round-robin fashion\\n"+ - "- failover: Only one active consumer, others act as backups\\n"+ - "- key_shared: Messages with the same key are delivered to the same consumer (default: exclusive)"), - ), - mcp.WithString("subscription-mode", - mcp.Description("Subscription durability mode:\\n"+ - "- durable: Subscription persists even when all consumers disconnect\\n"+ - "- non-durable: Subscription is deleted when all consumers disconnect (default: durable)"), - ), - mcp.WithString("initial-position", - mcp.Description("Initial cursor position for new subscriptions:\\n"+ - "- latest: Start consuming from the latest (most recent) message\\n"+ - "- earliest: Start consuming from the earliest (oldest available) message (default: latest)"), - ), - mcp.WithNumber("num-messages", - mcp.Description("Maximum number of messages to consume in this session. "+ - "Set to 0 for unlimited consumption until timeout. (default: 10)"), - ), - mcp.WithNumber("timeout", - mcp.Description("Maximum time to wait for messages in seconds. "+ - "The consumer will stop after this timeout even if fewer messages were received. (default: 30)"), - ), - mcp.WithBoolean("show-properties", - mcp.Description("Include message properties in the output. "+ - "Message properties are key-value pairs attached to messages for metadata purposes. (default: false)"), - ), - mcp.WithBoolean("hide-payload", - mcp.Description("Exclude message payload from the output. "+ - "Useful when you only need message metadata or are dealing with large payloads. (default: false)"), - ), - ) + return &sdk.Tool{ + Name: "pulsar_client_consume", + Description: toolDesc, + InputSchema: inputSchema, + }, nil } // buildConsumeHandler builds the Pulsar Client Consumer handler function // Migrated from the original handler logic -func (b *PulsarClientConsumeToolBuilder) buildConsumeHandler(_ bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarClientConsumeToolBuilder) buildConsumeHandler(_ bool) builders.ToolHandlerFunc[pulsarClientConsumeInput, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input pulsarClientConsumeInput) (*sdk.CallToolResult, any, error) { // Extract required parameters with validation - topic, err := request.RequireString("topic") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get topic: %v", err)), nil + topic := strings.TrimSpace(input.Topic) + if topic == "" { + return nil, nil, fmt.Errorf("failed to get topic: topic is required") } - subscriptionName, err := request.RequireString("subscription-name") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get subscription name: %v", err)), nil + subscriptionName := strings.TrimSpace(input.SubscriptionName) + if subscriptionName == "" { + return nil, nil, fmt.Errorf("failed to get subscription name: subscription-name is required") } // Set default values and extract optional parameters - subscriptionType := request.GetString("subscription-type", "exclusive") - subscriptionMode := request.GetString("subscription-mode", "durable") - initialPosition := request.GetString("initial-position", "latest") - numMessages := int(request.GetFloat("num-messages", 10)) - timeout := int(request.GetFloat("timeout", 30)) - showProperties := request.GetBool("show-properties", false) - hidePayload := request.GetBool("hide-payload", false) + subscriptionType := "exclusive" + if input.SubscriptionType != nil { + value := strings.TrimSpace(*input.SubscriptionType) + if value != "" { + subscriptionType = value + } + } + + subscriptionMode := "durable" + if input.SubscriptionMode != nil { + value := strings.TrimSpace(*input.SubscriptionMode) + if value != "" { + subscriptionMode = value + } + } + + initialPosition := "latest" + if input.InitialPosition != nil { + value := strings.TrimSpace(*input.InitialPosition) + if value != "" { + initialPosition = value + } + } + + numMessages := 10 + if input.NumMessages != nil { + numMessages = int(*input.NumMessages) + } + + timeout := 30 + if input.Timeout != nil { + timeout = int(*input.Timeout) + } + + showProperties := input.ShowProperties + hidePayload := input.HidePayload // Get Pulsar session from context session := mcpCtx.GetPulsarSession(ctx) if session == nil { - return mcp.NewToolResultError("Pulsar session not found in context"), nil + return nil, nil, fmt.Errorf("pulsar session not found in context") } // Setup client client, err := session.GetPulsarClient() if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to create Pulsar client: %v", err)), nil + return nil, nil, fmt.Errorf("failed to create Pulsar client: %v", err) } defer client.Close() @@ -196,7 +232,7 @@ func (b *PulsarClientConsumeToolBuilder) buildConsumeHandler(_ bool) func(contex case "key_shared": consumerOpts.Type = pulsar.KeyShared default: - return mcp.NewToolResultError(fmt.Sprintf("Invalid subscription type: %s. Valid types: exclusive, shared, failover, key_shared", subscriptionType)), nil + return nil, nil, fmt.Errorf("invalid subscription type: %s. Valid types: exclusive, shared, failover, key_shared", subscriptionType) } // Set subscription mode @@ -206,7 +242,7 @@ func (b *PulsarClientConsumeToolBuilder) buildConsumeHandler(_ bool) func(contex case "non-durable": consumerOpts.SubscriptionMode = pulsar.NonDurable default: - return mcp.NewToolResultError(fmt.Sprintf("Invalid subscription mode: %s. Valid modes: durable, non-durable", subscriptionMode)), nil + return nil, nil, fmt.Errorf("invalid subscription mode: %s. Valid modes: durable, non-durable", subscriptionMode) } // Set initial position @@ -216,13 +252,13 @@ func (b *PulsarClientConsumeToolBuilder) buildConsumeHandler(_ bool) func(contex case "earliest": consumerOpts.SubscriptionInitialPosition = pulsar.SubscriptionPositionEarliest default: - return mcp.NewToolResultError(fmt.Sprintf("Invalid initial position: %s. Valid positions: latest, earliest", initialPosition)), nil + return nil, nil, fmt.Errorf("invalid initial position: %s. Valid positions: latest, earliest", initialPosition) } // Create consumer consumer, err := client.Subscribe(consumerOpts) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to create consumer: %v", err)), nil + return nil, nil, fmt.Errorf("failed to create consumer: %v", err) } defer consumer.Close() @@ -252,7 +288,7 @@ func (b *PulsarClientConsumeToolBuilder) buildConsumeHandler(_ bool) func(contex if err == context.DeadlineExceeded || err == context.Canceled { break } - return mcp.NewToolResultError(fmt.Sprintf("Error receiving message: %v", err)), nil + return nil, nil, fmt.Errorf("error receiving message: %v", err) } // Process the message @@ -294,22 +330,51 @@ func (b *PulsarClientConsumeToolBuilder) buildConsumeHandler(_ bool) func(contex "messages": messages, } - return b.marshalResponse(response) + result, err := b.marshalResponse(response) + return result, nil, err } } +func buildPulsarClientConsumeInputSchema() (*jsonschema.Schema, error) { + schema, err := jsonschema.For[pulsarClientConsumeInput](nil) + if err != nil { + return nil, fmt.Errorf("input schema: %w", err) + } + if schema.Type != "object" { + return nil, fmt.Errorf("input schema must have type \"object\"") + } + + if schema.Properties == nil { + schema.Properties = map[string]*jsonschema.Schema{} + } + setSchemaDescription(schema, "topic", pulsarClientConsumeTopicDesc) + setSchemaDescription(schema, "subscription-name", pulsarClientConsumeSubscriptionNameDesc) + setSchemaDescription(schema, "subscription-type", pulsarClientConsumeSubscriptionTypeDesc) + setSchemaDescription(schema, "subscription-mode", pulsarClientConsumeSubscriptionModeDesc) + setSchemaDescription(schema, "initial-position", pulsarClientConsumeInitialPositionDesc) + setSchemaDescription(schema, "num-messages", pulsarClientConsumeNumMessagesDesc) + setSchemaDescription(schema, "timeout", pulsarClientConsumeTimeoutDesc) + setSchemaDescription(schema, "show-properties", pulsarClientConsumeShowPropertiesDesc) + setSchemaDescription(schema, "hide-payload", pulsarClientConsumeHidePayloadDesc) + + normalizeAdditionalProperties(schema) + return schema, nil +} + // Unified error handling and utility functions // handleError provides unified error handling -func (b *PulsarClientConsumeToolBuilder) handleError(operation string, err error) *mcp.CallToolResult { - return mcp.NewToolResultError(fmt.Sprintf("Failed to %s: %v", operation, err)) +func (b *PulsarClientConsumeToolBuilder) handleError(operation string, err error) error { + return fmt.Errorf("failed to %s: %v", operation, err) } // marshalResponse provides unified JSON serialization for responses -func (b *PulsarClientConsumeToolBuilder) marshalResponse(data interface{}) (*mcp.CallToolResult, error) { +func (b *PulsarClientConsumeToolBuilder) marshalResponse(data interface{}) (*sdk.CallToolResult, error) { jsonBytes, err := json.Marshal(data) if err != nil { - return b.handleError("marshal response", err), nil + return nil, b.handleError("marshal response", err) } - return mcp.NewToolResultText(string(jsonBytes)), nil + return &sdk.CallToolResult{ + Content: []sdk.Content{&sdk.TextContent{Text: string(jsonBytes)}}, + }, nil } diff --git a/pkg/mcp/builders/pulsar/consume_legacy.go b/pkg/mcp/builders/pulsar/consume_legacy.go new file mode 100644 index 0000000..6880f6c --- /dev/null +++ b/pkg/mcp/builders/pulsar/consume_legacy.go @@ -0,0 +1,311 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/apache/pulsar-client-go/pulsar" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" +) + +// PulsarClientConsumeLegacyToolBuilder implements the legacy ToolBuilder interface for Pulsar Client Consumer tools. +// /nolint:revive +type PulsarClientConsumeLegacyToolBuilder struct { + *builders.BaseToolBuilder +} + +// NewPulsarClientConsumeLegacyToolBuilder creates a new legacy Pulsar Client Consumer tool builder instance. +func NewPulsarClientConsumeLegacyToolBuilder() *PulsarClientConsumeLegacyToolBuilder { + metadata := builders.ToolMetadata{ + Name: "pulsar_client_consume", + Version: "1.0.0", + Description: "Pulsar Client message consumption tools", + Category: "pulsar_client", + Tags: []string{"pulsar", "consume", "client", "messaging"}, + } + + features := []string{ + "pulsar-client", + "all", + "all-pulsar", + } + + return &PulsarClientConsumeLegacyToolBuilder{ + BaseToolBuilder: builders.NewBaseToolBuilder(metadata, features), + } +} + +// BuildTools builds the Pulsar Client Consumer tool list for the legacy server. +func (b *PulsarClientConsumeLegacyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { + // Check features - return empty list if no required features are present + if !b.HasAnyRequiredFeature(config.Features) { + return nil, nil + } + + // Validate configuration (only validate when matching features are present) + if err := b.Validate(config); err != nil { + return nil, err + } + + // Build tools + tool := b.buildConsumeTool() + handler := b.buildConsumeHandler(config.ReadOnly) + + return []server.ServerTool{ + { + Tool: tool, + Handler: handler, + }, + }, nil +} + +// buildConsumeTool builds the Pulsar Client Consumer MCP tool definition. +func (b *PulsarClientConsumeLegacyToolBuilder) buildConsumeTool() mcp.Tool { + toolDesc := "Consume messages from a Pulsar topic. " + + "This tool allows you to consume messages from a specified Pulsar topic with various options " + + "to control the subscription behavior, message processing, and display format. " + + "Pulsar supports multiple subscription types (Exclusive, Shared, Failover, Key_Shared) and modes " + + "(Durable, Non-Durable) to accommodate different messaging patterns. " + + "The tool provides comprehensive control over consumption parameters including subscription position, " + + "timeout settings, and message display options. " + + "Do not use this tool for Kafka protocol operations. Use 'kafka_client_consume' instead." + + return mcp.NewTool("pulsar_client_consume", + mcp.WithDescription(toolDesc), + mcp.WithString("topic", mcp.Required(), + mcp.Description("The fully qualified topic name to consume from (format: [persistent|non-persistent]://tenant/namespace/topic). "+ + "For partitioned topics, you can consume from all partitions by specifying the base topic name "+ + "or from a specific partition by appending -partition-N to the topic name."), + ), + mcp.WithString("subscription-name", mcp.Required(), + mcp.Description("The subscription name for this consumer. "+ + "A subscription represents a named cursor for tracking message consumption progress. "+ + "Multiple consumers can share the same subscription name to form a consumer group."), + ), + mcp.WithString("subscription-type", + mcp.Description("Subscription type controlling message distribution among consumers:\n"+ + "- exclusive: Only one consumer can consume from the subscription at a time\n"+ + "- shared: Messages are distributed across all consumers in a round-robin fashion\n"+ + "- failover: Only one active consumer, others act as backups\n"+ + "- key_shared: Messages with the same key are delivered to the same consumer (default: exclusive)"), + ), + mcp.WithString("subscription-mode", + mcp.Description("Subscription durability mode:\n"+ + "- durable: Subscription persists even when all consumers disconnect\n"+ + "- non-durable: Subscription is deleted when all consumers disconnect (default: durable)"), + ), + mcp.WithString("initial-position", + mcp.Description("Initial cursor position for new subscriptions:\n"+ + "- latest: Start consuming from the latest (most recent) message\n"+ + "- earliest: Start consuming from the earliest (oldest available) message (default: latest)"), + ), + mcp.WithNumber("num-messages", + mcp.Description("Maximum number of messages to consume in this session. "+ + "Set to 0 for unlimited consumption until timeout. (default: 10)"), + ), + mcp.WithNumber("timeout", + mcp.Description("Maximum time to wait for messages in seconds. "+ + "The consumer will stop after this timeout even if fewer messages were received. (default: 30)"), + ), + mcp.WithBoolean("show-properties", + mcp.Description("Include message properties in the output. "+ + "Message properties are key-value pairs attached to messages for metadata purposes. (default: false)"), + ), + mcp.WithBoolean("hide-payload", + mcp.Description("Exclude message payload from the output. "+ + "Useful when you only need message metadata or are dealing with large payloads. (default: false)"), + ), + ) +} + +// buildConsumeHandler builds the Pulsar Client Consumer handler function. +func (b *PulsarClientConsumeLegacyToolBuilder) buildConsumeHandler(_ bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Extract required parameters with validation + topic, err := request.RequireString("topic") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get topic: %v", err)), nil + } + + subscriptionName, err := request.RequireString("subscription-name") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get subscription name: %v", err)), nil + } + + // Set default values and extract optional parameters + subscriptionType := request.GetString("subscription-type", "exclusive") + subscriptionMode := request.GetString("subscription-mode", "durable") + initialPosition := request.GetString("initial-position", "latest") + numMessages := int(request.GetFloat("num-messages", 10)) + timeout := int(request.GetFloat("timeout", 30)) + showProperties := request.GetBool("show-properties", false) + hidePayload := request.GetBool("hide-payload", false) + + // Get Pulsar session from context + session := mcpCtx.GetPulsarSession(ctx) + if session == nil { + return mcp.NewToolResultError("Pulsar session not found in context"), nil + } + + // Setup client + client, err := session.GetPulsarClient() + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to create Pulsar client: %v", err)), nil + } + defer client.Close() + + // Prepare consumer options + consumerOpts := pulsar.ConsumerOptions{ + Name: "snmcp-consumer", + Topic: topic, + SubscriptionName: subscriptionName, + } + + // Set subscription type + switch strings.ToLower(subscriptionType) { + case "exclusive": + consumerOpts.Type = pulsar.Exclusive + case "shared": + consumerOpts.Type = pulsar.Shared + case "failover": + consumerOpts.Type = pulsar.Failover + case "key_shared": + consumerOpts.Type = pulsar.KeyShared + default: + return mcp.NewToolResultError(fmt.Sprintf("Invalid subscription type: %s. Valid types: exclusive, shared, failover, key_shared", subscriptionType)), nil + } + + // Set subscription mode + switch strings.ToLower(subscriptionMode) { + case "durable": + consumerOpts.SubscriptionMode = pulsar.Durable + case "non-durable": + consumerOpts.SubscriptionMode = pulsar.NonDurable + default: + return mcp.NewToolResultError(fmt.Sprintf("Invalid subscription mode: %s. Valid modes: durable, non-durable", subscriptionMode)), nil + } + + // Set initial position + switch strings.ToLower(initialPosition) { + case "latest": + consumerOpts.SubscriptionInitialPosition = pulsar.SubscriptionPositionLatest + case "earliest": + consumerOpts.SubscriptionInitialPosition = pulsar.SubscriptionPositionEarliest + default: + return mcp.NewToolResultError(fmt.Sprintf("Invalid initial position: %s. Valid positions: latest, earliest", initialPosition)), nil + } + + // Create consumer + consumer, err := client.Subscribe(consumerOpts) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to create consumer: %v", err)), nil + } + defer consumer.Close() + + // Set up timeout context + timeoutDuration := time.Duration(timeout) * time.Second + consumeCtx, cancelConsume := context.WithTimeout(ctx, timeoutDuration) + defer cancelConsume() + + // Container for messages + type MessageData struct { + ID string `json:"id"` + PublishTime string `json:"publish_time"` + Properties map[string]string `json:"properties,omitempty"` + Key string `json:"key,omitempty"` + Data string `json:"data,omitempty"` + MessageCount int `json:"message_count"` + } + + messages := []MessageData{} + messageCount := 0 + + // Consume messages + for numMessages <= 0 || messageCount < numMessages { + // Receive message with timeout + msg, err := consumer.Receive(consumeCtx) + if err != nil { + if err == context.DeadlineExceeded || err == context.Canceled { + break + } + return mcp.NewToolResultError(fmt.Sprintf("Error receiving message: %v", err)), nil + } + + // Process the message + messageCount++ + + // Create message data + messageData := MessageData{ + ID: msg.ID().String(), + PublishTime: msg.PublishTime().Format(time.RFC3339), + MessageCount: messageCount, + } + + // Add properties if requested + if showProperties { + messageData.Properties = msg.Properties() + } + + // Add key if present + if msg.Key() != "" { + messageData.Key = msg.Key() + } + + // Add payload unless hidden + if !hidePayload { + messageData.Data = string(msg.Payload()) + } + + messages = append(messages, messageData) + + // Acknowledge the message + _ = consumer.Ack(msg) + } + + // Prepare response + response := map[string]interface{}{ + "topic": topic, + "subscription_name": subscriptionName, + "messages_consumed": messageCount, + "messages": messages, + } + + return b.marshalResponse(response) + } +} + +// Unified error handling and utility functions + +// handleError provides unified error handling +func (b *PulsarClientConsumeLegacyToolBuilder) handleError(operation string, err error) *mcp.CallToolResult { + return mcp.NewToolResultError(fmt.Sprintf("Failed to %s: %v", operation, err)) +} + +// marshalResponse provides unified JSON serialization for responses +func (b *PulsarClientConsumeLegacyToolBuilder) marshalResponse(data interface{}) (*mcp.CallToolResult, error) { + jsonBytes, err := json.Marshal(data) + if err != nil { + return b.handleError("marshal response", err), nil + } + return mcp.NewToolResultText(string(jsonBytes)), nil +} diff --git a/pkg/mcp/builders/pulsar/consume_test.go b/pkg/mcp/builders/pulsar/consume_test.go new file mode 100644 index 0000000..23baaa2 --- /dev/null +++ b/pkg/mcp/builders/pulsar/consume_test.go @@ -0,0 +1,175 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "testing" + + "github.com/google/jsonschema-go/jsonschema" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPulsarClientConsumeToolBuilder(t *testing.T) { + builder := NewPulsarClientConsumeToolBuilder() + + t.Run("GetName", func(t *testing.T) { + assert.Equal(t, "pulsar_client_consume", builder.GetName()) + }) + + t.Run("GetRequiredFeatures", func(t *testing.T) { + features := builder.GetRequiredFeatures() + assert.NotEmpty(t, features) + assert.Contains(t, features, "pulsar-client") + }) + + t.Run("BuildTools_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"pulsar-client"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "pulsar_client_consume", tools[0].Definition().Name) + assert.NotNil(t, tools[0]) + }) + + t.Run("BuildTools_ReadOnlyMode", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: true, + Features: []string{"pulsar-client"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + }) + + t.Run("BuildTools_NoFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"unrelated-feature"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 0) + }) + + t.Run("Validate_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"pulsar-client"}, + } + + err := builder.Validate(config) + assert.NoError(t, err) + }) + + t.Run("Validate_MissingFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"unrelated-feature"}, + } + + err := builder.Validate(config) + assert.Error(t, err) + }) + + t.Run("Handler_MissingTopic", func(t *testing.T) { + handler := builder.buildConsumeHandler(false) + + _, _, err := handler(context.Background(), nil, pulsarClientConsumeInput{ + SubscriptionName: "sub", + }) + require.Error(t, err) + assert.Equal(t, "failed to get topic: topic is required", err.Error()) + }) + + t.Run("Handler_MissingSubscriptionName", func(t *testing.T) { + handler := builder.buildConsumeHandler(false) + + _, _, err := handler(context.Background(), nil, pulsarClientConsumeInput{ + Topic: "persistent://tenant/ns/topic", + }) + require.Error(t, err) + assert.Equal(t, "failed to get subscription name: subscription-name is required", err.Error()) + }) + + t.Run("Handler_InvalidSubscriptionTypeMissingSession", func(t *testing.T) { + handler := builder.buildConsumeHandler(false) + invalidType := "unknown" + + _, _, err := handler(context.Background(), nil, pulsarClientConsumeInput{ + Topic: "persistent://tenant/ns/topic", + SubscriptionName: "sub", + SubscriptionType: &invalidType, + }) + require.Error(t, err) + assert.Equal(t, "pulsar session not found in context", err.Error()) + }) + + t.Run("Handler_SessionMissing", func(t *testing.T) { + handler := builder.buildConsumeHandler(false) + + _, _, err := handler(context.Background(), nil, pulsarClientConsumeInput{ + Topic: "persistent://tenant/ns/topic", + SubscriptionName: "sub", + }) + require.Error(t, err) + assert.Equal(t, "pulsar session not found in context", err.Error()) + }) +} + +func TestPulsarClientConsumeToolSchema(t *testing.T) { + builder := NewPulsarClientConsumeToolBuilder() + tool, err := builder.buildConsumeTool() + require.NoError(t, err) + assert.Equal(t, "pulsar_client_consume", tool.Name) + + schema, ok := tool.InputSchema.(*jsonschema.Schema) + require.True(t, ok) + require.NotNil(t, schema.Properties) + + expectedRequired := []string{"topic", "subscription-name"} + assert.ElementsMatch(t, expectedRequired, schema.Required) + + expectedProps := []string{ + "topic", + "subscription-name", + "subscription-type", + "subscription-mode", + "initial-position", + "num-messages", + "timeout", + "show-properties", + "hide-payload", + } + assert.ElementsMatch(t, expectedProps, mapStringKeys(schema.Properties)) + + topicSchema := schema.Properties["topic"] + require.NotNil(t, topicSchema) + assert.Equal(t, pulsarClientConsumeTopicDesc, topicSchema.Description) + + subscriptionNameSchema := schema.Properties["subscription-name"] + require.NotNil(t, subscriptionNameSchema) + assert.Equal(t, pulsarClientConsumeSubscriptionNameDesc, subscriptionNameSchema.Description) + + subscriptionTypeSchema := schema.Properties["subscription-type"] + require.NotNil(t, subscriptionTypeSchema) + assert.Equal(t, pulsarClientConsumeSubscriptionTypeDesc, subscriptionTypeSchema.Description) +} diff --git a/pkg/mcp/builders/pulsar/functions.go b/pkg/mcp/builders/pulsar/functions.go index 6bf8789..628204c 100644 --- a/pkg/mcp/builders/pulsar/functions.go +++ b/pkg/mcp/builders/pulsar/functions.go @@ -20,13 +20,114 @@ import ( "fmt" "github.com/apache/pulsar-client-go/pulsaradmin/pkg/utils" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + "github.com/google/jsonschema-go/jsonschema" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/pulsarctl/pkg/cmdutils" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" ) +type pulsarAdminFunctionsInput struct { + Operation string `json:"operation"` + Tenant string `json:"tenant"` + Namespace string `json:"namespace"` + Name *string `json:"name,omitempty"` + ClassName *string `json:"classname,omitempty"` + Inputs []string `json:"inputs,omitempty"` + Output *string `json:"output,omitempty"` + Jar *string `json:"jar,omitempty"` + Py *string `json:"py,omitempty"` + GoFile *string `json:"go,omitempty"` + Parallelism *int `json:"parallelism,omitempty"` + UserConfig map[string]any `json:"userConfig,omitempty"` + Key *string `json:"key,omitempty"` + Value *string `json:"value,omitempty"` + Topic *string `json:"topic,omitempty"` + TriggerValue *string `json:"triggerValue,omitempty"` +} + +const ( + pulsarAdminFunctionsOperationDesc = "Operation to perform. Available operations:\n" + + "- list: List all functions under a specific tenant and namespace\n" + + "- get: Get the configuration of a function\n" + + "- status: Get the runtime status of a function (instances, metrics)\n" + + "- stats: Get detailed statistics of a function (throughput, processing latency)\n" + + "- querystate: Query state stored by a stateful function for a specific key\n" + + "- create: Deploy a new function with specified parameters\n" + + "- update: Update the configuration of an existing function\n" + + "- delete: Delete a function\n" + + "- start: Start a stopped function\n" + + "- stop: Stop a running function\n" + + "- restart: Restart a function\n" + + "- putstate: Store state in a function's state store\n" + + "- trigger: Manually trigger a function with a specific value" + pulsarAdminFunctionsTenantDesc = "The tenant name. Tenants are the primary organizational unit in Pulsar, " + + "providing multi-tenancy and resource isolation. Functions deployed within a tenant " + + "inherit its permissions and resource quotas." + pulsarAdminFunctionsNamespaceDesc = "The namespace name. Namespaces are logical groupings of topics and functions " + + "within a tenant. They encapsulate configuration policies and access control. " + + "Functions in a namespace typically process topics within the same namespace." + pulsarAdminFunctionsNameDesc = "The function name. Required for all operations except 'list'. " + + "Names should be descriptive of the function's purpose and must be unique within a namespace. " + + "Function names are used in metrics, logs, and when addressing the function via APIs." + pulsarAdminFunctionsClassNameDesc = "The fully qualified class name implementing the function. Required for 'create' operation, optional for 'update'. " + + "For Java functions, this should be the class that implements pulsar function interfaces. " + + "For Python, this MUST be in format of `.` - for example: " + + "if file is '/path/to/exclamation.py' with class 'ExclamationFunction', classname must be 'exclamation.ExclamationFunction'; " + + "if file is '/path/to/double_number.py' with class 'DoubleNumber', classname must be 'double_number.DoubleNumber'. " + + "Common error: using just the class name 'DoubleNumber' (without filename prefix) will cause function creation to fail. " + + "Go functions should specify the 'main' function of the binary." + pulsarAdminFunctionsInputsDesc = "The input topics for the function (array of strings). Optional for 'create' and 'update' operations. " + + "Topics must be specified in the format 'persistent://tenant/namespace/topic'. " + + "Functions can consume from multiple topics, each with potentially different serialization types. " + + "All input topics should exist before the function is created." + pulsarAdminFunctionsOutputDesc = "The output topic for the function results. Optional for 'create' and 'update' operations. " + + "Specified in the format 'persistent://tenant/namespace/topic'. " + + "If not set, the function will not produce any output to topics. " + + "The output topic will be automatically created if it doesn't exist." + pulsarAdminFunctionsJarDesc = "Path to the JAR file containing the function code. Optional for 'create' and 'update' operations. " + + "Support `file://`, `http://`, `https://`, `function://`, `source://`, `sink://` protocol. " + + "Can be a local path or supported URL protocol accessible to the Pulsar broker. " + + "For Java functions, this should contain all dependencies for the function. " + + "The jar file must be compatible with the Pulsar Functions API." + pulsarAdminFunctionsPyDesc = "Path to the Python file containing the function code. Optional for 'create' and 'update' operations. " + + "Support `file://`, `http://`, `https://`, `function://`, `source://`, `sink://` protocol. " + + "Can be a local path or supported URL protocol accessible to the Pulsar broker. " + + "For Python functions, this should be the file path to the Python file, in format of `.py`, `.zip`, or `.whl`. " + + "The Python file must be compatible with the Pulsar Functions API." + pulsarAdminFunctionsGoDesc = "Path to the Go file containing the function code. Optional for 'create' and 'update' operations. " + + "Support `file://`, `http://`, `https://`, `function://`, `source://`, `sink://` protocol. " + + "Can be a local path or supported URL protocol accessible to the Pulsar broker. " + + "For Go functions, this should be the file path to the Go file, in format of executable binary. " + + "The Go file must be compatible with the Pulsar Functions API." + pulsarAdminFunctionsParallelismDesc = "The parallelism factor of the function. Optional for 'create' and 'update' operations. " + + "Determines how many instances of the function will run concurrently. " + + "Higher values improve throughput but require more resources. " + + "For stateful functions, consider how parallelism affects state consistency. " + + "Default is 1 (single instance)." + pulsarAdminFunctionsUserConfigDesc = "User-defined config key/values. Optional for 'create' and 'update' operations. " + + "Provides configuration parameters accessible to the function at runtime. " + + "Specify as a JSON object with string, number, or boolean values. " + + "Common configs include connection parameters, batch sizes, or feature toggles. " + + "Example: {\"maxBatchSize\": 100, \"connectionString\": \"host:port\", \"debugMode\": true}" + pulsarAdminFunctionsKeyDesc = "The state key. Required for 'querystate' and 'putstate' operations. " + + "Keys are used to identify values in the function's state store. " + + "They should be reasonable in length and follow a consistent pattern. " + + "State keys are typically limited to 128 characters." + pulsarAdminFunctionsValueDesc = "The state value. Required for 'putstate' operation. " + + "Values are stored in the function's state system. " + + "For simple values, specify as a string. For complex objects, use JSON-serialized strings. " + + "State values are typically limited to 1MB in size." + pulsarAdminFunctionsTopicDesc = "The specific topic name that the function should consume from. Optional for 'trigger' operation. " + + "Specified in the format 'persistent://tenant/namespace/topic'. " + + "Used when triggering a function that consumes from multiple topics. " + + "If not provided, the first input topic will be used." + pulsarAdminFunctionsTriggerValueDesc = "The value with which to trigger the function. Required for 'trigger' operation. " + + "This value will be passed to the function as if it were a message from the input topic. " + + "String values are sent as is; for typed values, ensure proper formatting based on function expectations. " + + "The function processes this value just like a normal message." +) + // PulsarAdminFunctionsToolBuilder implements the ToolBuilder interface for Pulsar admin functions operations // It provides functionality to build Pulsar functions management tools // /nolint:revive @@ -58,7 +159,7 @@ func NewPulsarAdminFunctionsToolBuilder() *PulsarAdminFunctionsToolBuilder { // BuildTools builds the Pulsar admin functions tool list // This is the core method implementing the ToolBuilder interface -func (b *PulsarAdminFunctionsToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { +func (b *PulsarAdminFunctionsToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]builders.ToolDefinition, error) { // Check features - return empty list if no required features are present if !b.HasAnyRequiredFeature(config.Features) { return nil, nil @@ -70,11 +171,14 @@ func (b *PulsarAdminFunctionsToolBuilder) BuildTools(_ context.Context, config b } // Build tools - tool := b.buildPulsarAdminFunctionsTool() + tool, err := b.buildPulsarAdminFunctionsTool() + if err != nil { + return nil, err + } handler := b.buildPulsarAdminFunctionsHandler(config.ReadOnly) - return []server.ServerTool{ - { + return []builders.ToolDefinition{ + builders.ServerTool[pulsarAdminFunctionsInput, any]{ Tool: tool, Handler: handler, }, @@ -83,7 +187,12 @@ func (b *PulsarAdminFunctionsToolBuilder) BuildTools(_ context.Context, config b // buildPulsarAdminFunctionsTool builds the Pulsar admin functions MCP tool definition // Migrated from the original tool definition logic -func (b *PulsarAdminFunctionsToolBuilder) buildPulsarAdminFunctionsTool() mcp.Tool { +func (b *PulsarAdminFunctionsToolBuilder) buildPulsarAdminFunctionsTool() (*sdk.Tool, error) { + inputSchema, err := buildPulsarAdminFunctionsInputSchema() + if err != nil { + return nil, err + } + toolDesc := "Manage Apache Pulsar Functions for stream processing. " + "Pulsar Functions are lightweight compute processes that can consume messages from one or more Pulsar topics, " + "apply user-defined processing logic, and produce results to another topic. " + @@ -94,135 +203,21 @@ func (b *PulsarAdminFunctionsToolBuilder) buildPulsarAdminFunctionsTool() mcp.To "This tool provides complete lifecycle management including deployment, monitoring, scaling, " + "state management, and triggering. Functions require proper permissions to access their topics." - operationDesc := "Operation to perform. Available operations:\n" + - "- list: List all functions under a specific tenant and namespace\n" + - "- get: Get the configuration of a function\n" + - "- status: Get the runtime status of a function (instances, metrics)\n" + - "- stats: Get detailed statistics of a function (throughput, processing latency)\n" + - "- querystate: Query state stored by a stateful function for a specific key\n" + - "- create: Deploy a new function with specified parameters\n" + - "- update: Update the configuration of an existing function\n" + - "- delete: Delete a function\n" + - "- start: Start a stopped function\n" + - "- stop: Stop a running function\n" + - "- restart: Restart a function\n" + - "- putstate: Store state in a function's state store\n" + - "- trigger: Manually trigger a function with a specific value" - - return mcp.NewTool("pulsar_admin_functions", - mcp.WithDescription(toolDesc), - mcp.WithString("operation", mcp.Required(), - mcp.Description(operationDesc)), - mcp.WithString("tenant", mcp.Required(), - mcp.Description("The tenant name. Tenants are the primary organizational unit in Pulsar, "+ - "providing multi-tenancy and resource isolation. Functions deployed within a tenant "+ - "inherit its permissions and resource quotas.")), - mcp.WithString("namespace", mcp.Required(), - mcp.Description("The namespace name. Namespaces are logical groupings of topics and functions "+ - "within a tenant. They encapsulate configuration policies and access control. "+ - "Functions in a namespace typically process topics within the same namespace.")), - mcp.WithString("name", - mcp.Description("The function name. Required for all operations except 'list'. "+ - "Names should be descriptive of the function's purpose and must be unique within a namespace. "+ - "Function names are used in metrics, logs, and when addressing the function via APIs.")), - // Additional parameters for specific operations - mcp.WithString("classname", - mcp.Description("The fully qualified class name implementing the function. Required for 'create' operation, optional for 'update'. "+ - "For Java functions, this should be the class that implements pulsar function interfaces. "+ - "For Python, this MUST be in format of `.` - for example: "+ - "if file is '/path/to/exclamation.py' with class 'ExclamationFunction', classname must be 'exclamation.ExclamationFunction'; "+ - "if file is '/path/to/double_number.py' with class 'DoubleNumber', classname must be 'double_number.DoubleNumber'. "+ - "Common error: using just the class name 'DoubleNumber' (without filename prefix) will cause function creation to fail. "+ - "Go functions should specify the 'main' function of the binary.")), - mcp.WithArray("inputs", - mcp.Description("The input topics for the function (array of strings). Optional for 'create' and 'update' operations. "+ - "Topics must be specified in the format 'persistent://tenant/namespace/topic'. "+ - "Functions can consume from multiple topics, each with potentially different serialization types. "+ - "All input topics should exist before the function is created."), - mcp.Items( - map[string]interface{}{ - "type": "string", - "description": "input topic", - }, - ), - ), - mcp.WithString("output", - mcp.Description("The output topic for the function results. Optional for 'create' and 'update' operations. "+ - "Specified in the format 'persistent://tenant/namespace/topic'. "+ - "If not set, the function will not produce any output to topics. "+ - "The output topic will be automatically created if it doesn't exist.")), - mcp.WithString("jar", - mcp.Description("Path to the JAR file containing the function code. Optional for 'create' and 'update' operations. "+ - "Support `file://`, `http://`, `https://`, `function://`, `source://`, `sink://` protocol. "+ - "Can be a local path or supported URL protocol accessible to the Pulsar broker. "+ - "For Java functions, this should contain all dependencies for the function. "+ - "The jar file must be compatible with the Pulsar Functions API.")), - mcp.WithString("py", - mcp.Description("Path to the Python file containing the function code. Optional for 'create' and 'update' operations. "+ - "Support `file://`, `http://`, `https://`, `function://`, `source://`, `sink://` protocol. "+ - "Can be a local path or supported URL protocol accessible to the Pulsar broker. "+ - "For Python functions, this should be the file path to the Python file, in format of `.py`, `.zip`, or `.whl`. "+ - "The Python file must be compatible with the Pulsar Functions API.")), - mcp.WithString("go", - mcp.Description("Path to the Go file containing the function code. Optional for 'create' and 'update' operations. "+ - "Support `file://`, `http://`, `https://`, `function://`, `source://`, `sink://` protocol. "+ - "Can be a local path or supported URL protocol accessible to the Pulsar broker. "+ - "For Go functions, this should be the file path to the Go file, in format of executable binary. "+ - "The Go file must be compatible with the Pulsar Functions API.")), - mcp.WithNumber("parallelism", - mcp.Description("The parallelism factor of the function. Optional for 'create' and 'update' operations. "+ - "Determines how many instances of the function will run concurrently. "+ - "Higher values improve throughput but require more resources. "+ - "For stateful functions, consider how parallelism affects state consistency. "+ - "Default is 1 (single instance).")), - mcp.WithObject("userConfig", - mcp.Description("User-defined config key/values. Optional for 'create' and 'update' operations. "+ - "Provides configuration parameters accessible to the function at runtime. "+ - "Specify as a JSON object with string, number, or boolean values. "+ - "Common configs include connection parameters, batch sizes, or feature toggles. "+ - "Example: {\"maxBatchSize\": 100, \"connectionString\": \"host:port\", \"debugMode\": true}")), - mcp.WithString("key", - mcp.Description("The state key. Required for 'querystate' and 'putstate' operations. "+ - "Keys are used to identify values in the function's state store. "+ - "They should be reasonable in length and follow a consistent pattern. "+ - "State keys are typically limited to 128 characters.")), - mcp.WithString("value", - mcp.Description("The state value. Required for 'putstate' operation. "+ - "Values are stored in the function's state system. "+ - "For simple values, specify as a string. For complex objects, use JSON-serialized strings. "+ - "State values are typically limited to 1MB in size.")), - mcp.WithString("topic", - mcp.Description("The specific topic name that the function should consume from. Optional for 'trigger' operation. "+ - "Specified in the format 'persistent://tenant/namespace/topic'. "+ - "Used when triggering a function that consumes from multiple topics. "+ - "If not provided, the first input topic will be used.")), - mcp.WithString("triggerValue", - mcp.Description("The value with which to trigger the function. Required for 'trigger' operation. "+ - "This value will be passed to the function as if it were a message from the input topic. "+ - "String values are sent as is; for typed values, ensure proper formatting based on function expectations. "+ - "The function processes this value just like a normal message.")), - ) + return &sdk.Tool{ + Name: "pulsar_admin_functions", + Description: toolDesc, + InputSchema: inputSchema, + }, nil } // buildPulsarAdminFunctionsHandler builds the Pulsar admin functions handler function // Migrated from the original handler logic -func (b *PulsarAdminFunctionsToolBuilder) buildPulsarAdminFunctionsHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - // Get Pulsar session from context - session := mcpCtx.GetPulsarSession(ctx) - if session == nil { - return mcp.NewToolResultError("Pulsar session not found in context"), nil - } - - client, err := session.GetAdminV3Client() - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get Pulsar client: %v", err)), nil - } - +func (b *PulsarAdminFunctionsToolBuilder) buildPulsarAdminFunctionsHandler(readOnly bool) builders.ToolHandlerFunc[pulsarAdminFunctionsInput, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input pulsarAdminFunctionsInput) (*sdk.CallToolResult, any, error) { // Extract and validate operation parameter - operation, err := request.RequireString("operation") - if err != nil { - return b.handleError("get operation", err), nil + operation := input.Operation + if operation == "" { + return nil, nil, fmt.Errorf("missing required parameter 'operation'") } // Check if the operation is valid @@ -233,7 +228,7 @@ func (b *PulsarAdminFunctionsToolBuilder) buildPulsarAdminFunctionsHandler(readO } if !validOperations[operation] { - return b.handleError("validate operation", fmt.Errorf("invalid operation: '%s'. Supported operations: list, get, status, stats, querystate, create, update, delete, start, stop, restart, putstate, trigger", operation)), nil + return nil, nil, fmt.Errorf("invalid operation: '%s'. Supported operations: list, get, status, stats, querystate, create, update, delete, start, stop, restart, putstate, trigger", operation) } // Check write permissions for write operations @@ -243,76 +238,103 @@ func (b *PulsarAdminFunctionsToolBuilder) buildPulsarAdminFunctionsHandler(readO } if readOnly && writeOperations[operation] { - return b.handleError("check permissions", fmt.Errorf("operation '%s' not allowed in read-only mode. Read-only mode restricts modifications to Pulsar Functions", operation)), nil + return nil, nil, fmt.Errorf("operation '%s' not allowed in read-only mode. Read-only mode restricts modifications to Pulsar Functions", operation) + } + + // Get Pulsar session from context + session := mcpCtx.GetPulsarSession(ctx) + if session == nil { + return nil, nil, fmt.Errorf("pulsar session not found in context") + } + + client, err := session.GetAdminV3Client() + if err != nil { + return nil, nil, fmt.Errorf("failed to get pulsar client: %v", err) } // Extract common parameters - tenant, err := request.RequireString("tenant") + tenant, err := requireNonEmpty(input.Tenant, "tenant") if err != nil { - return b.handleError("get tenant", fmt.Errorf("missing required parameter 'tenant': %v. A tenant is required for all Pulsar Functions operations", err)), nil + return nil, nil, fmt.Errorf("missing required parameter 'tenant': %v. A tenant is required for all Pulsar Functions operations", err) } - namespace, err := request.RequireString("namespace") + namespace, err := requireNonEmpty(input.Namespace, "namespace") if err != nil { - return b.handleError("get namespace", fmt.Errorf("missing required parameter 'namespace': %v. A namespace is required for all Pulsar Functions operations", err)), nil + return nil, nil, fmt.Errorf("missing required parameter 'namespace': %v. A namespace is required for all Pulsar Functions operations", err) } // For all operations except 'list', name is required var name string if operation != "list" { - name, err = request.RequireString("name") + name, err = requireString(input.Name, "name") if err != nil { - return b.handleError("get name", fmt.Errorf("missing required parameter 'name' for operation '%s': %v. The function name must be specified for this operation", operation, err)), nil + return nil, nil, fmt.Errorf("missing required parameter 'name' for operation '%s': %v. The function name must be specified for this operation", operation, err) } } // Handle operation using delegated handlers switch operation { case "list": - return b.handleFunctionList(ctx, client, tenant, namespace) + result, err := b.handleFunctionList(ctx, client, tenant, namespace) + return result, nil, err case "get": - return b.handleFunctionGet(ctx, client, tenant, namespace, name) + result, err := b.handleFunctionGet(ctx, client, tenant, namespace, name) + return result, nil, err case "status": - return b.handleFunctionStatus(ctx, client, tenant, namespace, name) + result, err := b.handleFunctionStatus(ctx, client, tenant, namespace, name) + return result, nil, err case "stats": - return b.handleFunctionStats(ctx, client, tenant, namespace, name) + result, err := b.handleFunctionStats(ctx, client, tenant, namespace, name) + return result, nil, err case "querystate": - key, err := request.RequireString("key") + key, err := requireString(input.Key, "key") if err != nil { - return b.handleError("get key", fmt.Errorf("missing required parameter 'key' for operation 'querystate': %v. A key is required to look up state in the function's state store", err)), nil + return nil, nil, fmt.Errorf("missing required parameter 'key' for operation 'querystate': %v. A key is required to look up state in the function's state store", err) } - return b.handleFunctionQuerystate(ctx, client, tenant, namespace, name, key) + result, err := b.handleFunctionQuerystate(ctx, client, tenant, namespace, name, key) + return result, nil, err case "create": - return b.handleFunctionCreate(ctx, client, tenant, namespace, name, request) + result, err := b.handleFunctionCreate(ctx, client, tenant, namespace, name, input) + return result, nil, err case "update": - return b.handleFunctionUpdate(ctx, client, tenant, namespace, name, request) + result, err := b.handleFunctionUpdate(ctx, client, tenant, namespace, name, input) + return result, nil, err case "delete": - return b.handleFunctionDelete(ctx, client, tenant, namespace, name) + result, err := b.handleFunctionDelete(ctx, client, tenant, namespace, name) + return result, nil, err case "start": - return b.handleFunctionStart(ctx, client, tenant, namespace, name) + result, err := b.handleFunctionStart(ctx, client, tenant, namespace, name) + return result, nil, err case "stop": - return b.handleFunctionStop(ctx, client, tenant, namespace, name) + result, err := b.handleFunctionStop(ctx, client, tenant, namespace, name) + return result, nil, err case "restart": - return b.handleFunctionRestart(ctx, client, tenant, namespace, name) + result, err := b.handleFunctionRestart(ctx, client, tenant, namespace, name) + return result, nil, err case "putstate": - key, err := request.RequireString("key") + key, err := requireString(input.Key, "key") if err != nil { - return b.handleError("get key", fmt.Errorf("missing required parameter 'key' for operation 'putstate': %v. A key is required to store state in the function's state store", err)), nil + return nil, nil, fmt.Errorf("missing required parameter 'key' for operation 'putstate': %v. A key is required to store state in the function's state store", err) } - value, err := request.RequireString("value") + value, err := requireString(input.Value, "value") if err != nil { - return b.handleError("get value", fmt.Errorf("missing required parameter 'value' for operation 'putstate': %v. A value is required to store state in the function's state store", err)), nil + return nil, nil, fmt.Errorf("missing required parameter 'value' for operation 'putstate': %v. A value is required to store state in the function's state store", err) } - return b.handleFunctionPutstate(ctx, client, tenant, namespace, name, key, value) + result, err := b.handleFunctionPutstate(ctx, client, tenant, namespace, name, key, value) + return result, nil, err case "trigger": - triggerValue, err := request.RequireString("triggerValue") + triggerValue, err := requireString(input.TriggerValue, "triggerValue") if err != nil { - return b.handleError("get triggerValue", fmt.Errorf("missing required parameter 'triggerValue' for operation 'trigger': %v. A trigger value is required to manually trigger the function", err)), nil + return nil, nil, fmt.Errorf("missing required parameter 'triggerValue' for operation 'trigger': %v. A trigger value is required to manually trigger the function", err) + } + topic := "" + if input.Topic != nil { + topic = *input.Topic } - topic := request.GetString("topic", "") - return b.handleFunctionTrigger(ctx, client, tenant, namespace, name, triggerValue, topic) + result, err := b.handleFunctionTrigger(ctx, client, tenant, namespace, name, triggerValue, topic) + return result, nil, err default: - return b.handleError("handle operation", fmt.Errorf("unsupported operation: %s", operation)), nil + return nil, nil, fmt.Errorf("unsupported operation: %s", operation) } } } @@ -320,12 +342,12 @@ func (b *PulsarAdminFunctionsToolBuilder) buildPulsarAdminFunctionsHandler(readO // Helper functions - delegated operation handlers // handleFunctionList handles the list operation -func (b *PulsarAdminFunctionsToolBuilder) handleFunctionList(_ context.Context, client cmdutils.Client, tenant, namespace string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminFunctionsToolBuilder) handleFunctionList(_ context.Context, client cmdutils.Client, tenant, namespace string) (*sdk.CallToolResult, error) { admin := client.Functions() functions, err := admin.GetFunctions(tenant, namespace) if err != nil { - return b.handleError("list functions", err), nil + return nil, b.handleError("list functions", err) } return b.marshalResponse(map[string]interface{}{ @@ -336,50 +358,50 @@ func (b *PulsarAdminFunctionsToolBuilder) handleFunctionList(_ context.Context, } // handleFunctionGet handles the get operation -func (b *PulsarAdminFunctionsToolBuilder) handleFunctionGet(_ context.Context, client cmdutils.Client, tenant, namespace, name string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminFunctionsToolBuilder) handleFunctionGet(_ context.Context, client cmdutils.Client, tenant, namespace, name string) (*sdk.CallToolResult, error) { admin := client.Functions() functionConfig, err := admin.GetFunction(tenant, namespace, name) if err != nil { - return b.handleError("get function config", err), nil + return nil, b.handleError("get function config", err) } return b.marshalResponse(functionConfig) } // handleFunctionStatus handles the status operation -func (b *PulsarAdminFunctionsToolBuilder) handleFunctionStatus(_ context.Context, client cmdutils.Client, tenant, namespace, name string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminFunctionsToolBuilder) handleFunctionStatus(_ context.Context, client cmdutils.Client, tenant, namespace, name string) (*sdk.CallToolResult, error) { admin := client.Functions() status, err := admin.GetFunctionStatus(tenant, namespace, name) if err != nil { - return b.handleError("get function status", err), nil + return nil, b.handleError("get function status", err) } return b.marshalResponse(status) } // handleFunctionStats handles the stats operation -func (b *PulsarAdminFunctionsToolBuilder) handleFunctionStats(_ context.Context, client cmdutils.Client, tenant, namespace, name string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminFunctionsToolBuilder) handleFunctionStats(_ context.Context, client cmdutils.Client, tenant, namespace, name string) (*sdk.CallToolResult, error) { admin := client.Functions() stats, err := admin.GetFunctionStats(tenant, namespace, name) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get stats for function '%s' in tenant '%s' namespace '%s': %v. Verify the function exists and is running.", - name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to get stats for function '%s' in tenant '%s' namespace '%s': %v; verify the function exists and is running", + name, tenant, namespace, err) } return b.marshalResponse(stats) } // handleFunctionQuerystate handles the querystate operation -func (b *PulsarAdminFunctionsToolBuilder) handleFunctionQuerystate(_ context.Context, client cmdutils.Client, tenant, namespace, name, key string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminFunctionsToolBuilder) handleFunctionQuerystate(_ context.Context, client cmdutils.Client, tenant, namespace, name, key string) (*sdk.CallToolResult, error) { admin := client.Functions() state, err := admin.GetFunctionState(tenant, namespace, name, key) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to query state for key '%s' in function '%s' (tenant '%s' namespace '%s'): %v. Verify the function exists and has state enabled.", - key, name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to query state for key '%s' in function '%s' (tenant '%s' namespace '%s'): %v; verify the function exists and has state enabled", + key, name, tenant, namespace, err) } return b.marshalResponse(map[string]interface{}{ @@ -394,12 +416,12 @@ func (b *PulsarAdminFunctionsToolBuilder) handleFunctionQuerystate(_ context.Con } // handleFunctionCreate handles the create operation -func (b *PulsarAdminFunctionsToolBuilder) handleFunctionCreate(_ context.Context, client cmdutils.Client, tenant, namespace, name string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminFunctionsToolBuilder) handleFunctionCreate(_ context.Context, client cmdutils.Client, tenant, namespace, name string, input pulsarAdminFunctionsInput) (*sdk.CallToolResult, error) { // Build function configuration from request parameters to validate - functionConfig, err := b.buildFunctionConfig(tenant, namespace, name, request, false) + functionConfig, err := b.buildFunctionConfig(tenant, namespace, name, input, false) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to build function configuration for '%s' in tenant '%s' namespace '%s': %v. Please verify all required parameters are provided correctly.", - name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to build function configuration for '%s' in tenant '%s' namespace '%s': %v; please verify all required parameters are provided correctly", + name, tenant, namespace, err) } admin := client.Functions() @@ -415,23 +437,23 @@ func (b *PulsarAdminFunctionsToolBuilder) handleFunctionCreate(_ context.Context err = admin.CreateFuncWithURL(functionConfig, packagePath) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to create function '%s' in tenant '%s' namespace '%s': %v. Verify the function configuration is valid.", - name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to create function '%s' in tenant '%s' namespace '%s': %v; verify the function configuration is valid", + name, tenant, namespace, err) } - return mcp.NewToolResultText(fmt.Sprintf("Created function '%s' successfully in tenant '%s' namespace '%s'. The function configuration has been created.", + return textResult(fmt.Sprintf("Created function '%s' successfully in tenant '%s' namespace '%s'. The function configuration has been created.", name, tenant, namespace)), nil } // handleFunctionUpdate handles the update operation -func (b *PulsarAdminFunctionsToolBuilder) handleFunctionUpdate(_ context.Context, client cmdutils.Client, tenant, namespace, name string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminFunctionsToolBuilder) handleFunctionUpdate(_ context.Context, client cmdutils.Client, tenant, namespace, name string, input pulsarAdminFunctionsInput) (*sdk.CallToolResult, error) { admin := client.Functions() // Build function configuration from request parameters - config, err := b.buildFunctionConfig(tenant, namespace, name, request, true) + config, err := b.buildFunctionConfig(tenant, namespace, name, input, true) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to build function configuration for '%s' in tenant '%s' namespace '%s': %v. Please verify all parameters are provided correctly.", - name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to build function configuration for '%s' in tenant '%s' namespace '%s': %v; please verify all parameters are provided correctly", + name, tenant, namespace, err) } // Update the function @@ -440,72 +462,72 @@ func (b *PulsarAdminFunctionsToolBuilder) handleFunctionUpdate(_ context.Context } err = admin.UpdateFunction(config, "", updateOptions) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to update function '%s' in tenant '%s' namespace '%s': %v. Verify the function exists and the configuration is valid.", - name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to update function '%s' in tenant '%s' namespace '%s': %v; verify the function exists and the configuration is valid", + name, tenant, namespace, err) } - return mcp.NewToolResultText(fmt.Sprintf("Updated function '%s' successfully in tenant '%s' namespace '%s'. The function configuration has been modified.", + return textResult(fmt.Sprintf("Updated function '%s' successfully in tenant '%s' namespace '%s'. The function configuration has been modified.", name, tenant, namespace)), nil } // handleFunctionDelete handles the delete operation -func (b *PulsarAdminFunctionsToolBuilder) handleFunctionDelete(_ context.Context, client cmdutils.Client, tenant, namespace, name string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminFunctionsToolBuilder) handleFunctionDelete(_ context.Context, client cmdutils.Client, tenant, namespace, name string) (*sdk.CallToolResult, error) { admin := client.Functions() err := admin.DeleteFunction(tenant, namespace, name) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to delete function '%s' in tenant '%s' namespace '%s': %v. Verify the function exists and you have deletion permissions.", - name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to delete function '%s' in tenant '%s' namespace '%s': %v; verify the function exists and you have deletion permissions", + name, tenant, namespace, err) } - return mcp.NewToolResultText(fmt.Sprintf("Deleted function '%s' successfully from tenant '%s' namespace '%s'. All running instances have been terminated.", + return textResult(fmt.Sprintf("Deleted function '%s' successfully from tenant '%s' namespace '%s'. All running instances have been terminated.", name, tenant, namespace)), nil } // handleFunctionStart handles the start operation -func (b *PulsarAdminFunctionsToolBuilder) handleFunctionStart(_ context.Context, client cmdutils.Client, tenant, namespace, name string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminFunctionsToolBuilder) handleFunctionStart(_ context.Context, client cmdutils.Client, tenant, namespace, name string) (*sdk.CallToolResult, error) { admin := client.Functions() err := admin.StartFunction(tenant, namespace, name) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to start function '%s' in tenant '%s' namespace '%s': %v. Verify the function exists and is not already running.", - name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to start function '%s' in tenant '%s' namespace '%s': %v; verify the function exists and is not already running", + name, tenant, namespace, err) } - return mcp.NewToolResultText(fmt.Sprintf("Started function '%s' successfully in tenant '%s' namespace '%s'. The function instances are now processing messages.", + return textResult(fmt.Sprintf("Started function '%s' successfully in tenant '%s' namespace '%s'. The function instances are now processing messages.", name, tenant, namespace)), nil } // handleFunctionStop handles the stop operation -func (b *PulsarAdminFunctionsToolBuilder) handleFunctionStop(_ context.Context, client cmdutils.Client, tenant, namespace, name string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminFunctionsToolBuilder) handleFunctionStop(_ context.Context, client cmdutils.Client, tenant, namespace, name string) (*sdk.CallToolResult, error) { admin := client.Functions() err := admin.StopFunction(tenant, namespace, name) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to stop function '%s' in tenant '%s' namespace '%s': %v. Verify the function exists and is currently running.", - name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to stop function '%s' in tenant '%s' namespace '%s': %v; verify the function exists and is currently running", + name, tenant, namespace, err) } - return mcp.NewToolResultText(fmt.Sprintf("Stopped function '%s' successfully in tenant '%s' namespace '%s'. The function will no longer process messages until restarted.", + return textResult(fmt.Sprintf("Stopped function '%s' successfully in tenant '%s' namespace '%s'. The function will no longer process messages until restarted.", name, tenant, namespace)), nil } // handleFunctionRestart handles the restart operation -func (b *PulsarAdminFunctionsToolBuilder) handleFunctionRestart(_ context.Context, client cmdutils.Client, tenant, namespace, name string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminFunctionsToolBuilder) handleFunctionRestart(_ context.Context, client cmdutils.Client, tenant, namespace, name string) (*sdk.CallToolResult, error) { admin := client.Functions() err := admin.RestartFunction(tenant, namespace, name) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to restart function '%s' in tenant '%s' namespace '%s': %v. Verify the function exists and is properly deployed.", - name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to restart function '%s' in tenant '%s' namespace '%s': %v; verify the function exists and is properly deployed", + name, tenant, namespace, err) } - return mcp.NewToolResultText(fmt.Sprintf("Restarted function '%s' successfully in tenant '%s' namespace '%s'. All function instances have been restarted.", + return textResult(fmt.Sprintf("Restarted function '%s' successfully in tenant '%s' namespace '%s'. All function instances have been restarted.", name, tenant, namespace)), nil } // handleFunctionPutstate handles the putstate operation -func (b *PulsarAdminFunctionsToolBuilder) handleFunctionPutstate(_ context.Context, client cmdutils.Client, tenant, namespace, name, key, value string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminFunctionsToolBuilder) handleFunctionPutstate(_ context.Context, client cmdutils.Client, tenant, namespace, name, key, value string) (*sdk.CallToolResult, error) { admin := client.Functions() err := admin.PutFunctionState(tenant, namespace, name, utils.FunctionState{ @@ -513,16 +535,16 @@ func (b *PulsarAdminFunctionsToolBuilder) handleFunctionPutstate(_ context.Conte StringValue: value, }) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to put state for key '%s' in function '%s' (tenant '%s' namespace '%s'): %v. Verify the function exists and has state enabled.", - key, name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to put state for key '%s' in function '%s' (tenant '%s' namespace '%s'): %v; verify the function exists and has state enabled", + key, name, tenant, namespace, err) } - return mcp.NewToolResultText(fmt.Sprintf("Successfully stored state for key '%s' in function '%s' (tenant '%s' namespace '%s'). State value has been updated.", + return textResult(fmt.Sprintf("Successfully stored state for key '%s' in function '%s' (tenant '%s' namespace '%s'). State value has been updated.", key, name, tenant, namespace)), nil } // handleFunctionTrigger handles the trigger operation -func (b *PulsarAdminFunctionsToolBuilder) handleFunctionTrigger(_ context.Context, client cmdutils.Client, tenant, namespace, name, triggerValue, topic string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminFunctionsToolBuilder) handleFunctionTrigger(_ context.Context, client cmdutils.Client, tenant, namespace, name, triggerValue, topic string) (*sdk.CallToolResult, error) { admin := client.Functions() var err error @@ -536,8 +558,8 @@ func (b *PulsarAdminFunctionsToolBuilder) handleFunctionTrigger(_ context.Contex } if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to trigger function '%s' in tenant '%s' namespace '%s': %v. Verify the function exists and is running.", - name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to trigger function '%s' in tenant '%s' namespace '%s': %v; verify the function exists and is running", + name, tenant, namespace, err) } var message string @@ -549,13 +571,13 @@ func (b *PulsarAdminFunctionsToolBuilder) handleFunctionTrigger(_ context.Contex name, tenant, namespace, result) } - return mcp.NewToolResultText(message), nil + return textResult(message), nil } // Helper functions // buildFunctionConfig builds a Pulsar Function configuration from MCP request parameters -func (b *PulsarAdminFunctionsToolBuilder) buildFunctionConfig(tenant, namespace, name string, request mcp.CallToolRequest, isUpdate bool) (*utils.FunctionConfig, error) { +func (b *PulsarAdminFunctionsToolBuilder) buildFunctionConfig(tenant, namespace, name string, input pulsarAdminFunctionsInput, isUpdate bool) (*utils.FunctionConfig, error) { config := &utils.FunctionConfig{ Tenant: tenant, Namespace: namespace, @@ -564,47 +586,38 @@ func (b *PulsarAdminFunctionsToolBuilder) buildFunctionConfig(tenant, namespace, // Get required classname parameter (for create operations) if !isUpdate { - classname, err := request.RequireString("classname") + classname, err := requireString(input.ClassName, "classname") if err != nil { return nil, fmt.Errorf("missing required parameter 'classname': %v", err) } config.ClassName = classname - } else { + } else if input.ClassName != nil && *input.ClassName != "" { // For update, classname is optional - if classname := request.GetString("classname", ""); classname != "" { - config.ClassName = classname - } + config.ClassName = *input.ClassName } // Get inputs parameter (array of strings) - args := request.GetArguments() - if inputsInterface, exists := args["inputs"]; exists && inputsInterface != nil { - if inputsArray, ok := inputsInterface.([]interface{}); ok { - inputSpecs := make(map[string]utils.ConsumerConfig) - for _, input := range inputsArray { - if inputStr, ok := input.(string); ok { - inputSpecs[inputStr] = utils.ConsumerConfig{ - SerdeClassName: "", - SchemaType: "", - } - } - } - if len(inputSpecs) > 0 { - config.InputSpecs = inputSpecs + if len(input.Inputs) > 0 { + inputSpecs := make(map[string]utils.ConsumerConfig) + for _, inputTopic := range input.Inputs { + inputSpecs[inputTopic] = utils.ConsumerConfig{ + SerdeClassName: "", + SchemaType: "", } } + if len(inputSpecs) > 0 { + config.InputSpecs = inputSpecs + } } // Get optional output parameter - if output := request.GetString("output", ""); output != "" { - config.Output = output + if input.Output != nil && *input.Output != "" { + config.Output = *input.Output } // Get optional parallelism parameter - if parallelismInterface, exists := args["parallelism"]; exists && parallelismInterface != nil { - if parallelismFloat, ok := parallelismInterface.(float64); ok { - config.Parallelism = int(parallelismFloat) - } + if input.Parallelism != nil { + config.Parallelism = *input.Parallelism } // Set default parallelism if not specified @@ -613,40 +626,83 @@ func (b *PulsarAdminFunctionsToolBuilder) buildFunctionConfig(tenant, namespace, } // Get optional jar parameter - if jar := request.GetString("jar", ""); jar != "" { + if input.Jar != nil && *input.Jar != "" { + jar := *input.Jar config.Jar = &jar } // Get optional py parameter - if py := request.GetString("py", ""); py != "" { + if input.Py != nil && *input.Py != "" { + py := *input.Py config.Py = &py } // Get optional go parameter - if goFile := request.GetString("go", ""); goFile != "" { + if input.GoFile != nil && *input.GoFile != "" { + goFile := *input.GoFile config.Go = &goFile } // Get optional userConfig parameter (JSON object) - if userConfigInterface, exists := args["userConfig"]; exists && userConfigInterface != nil { - if userConfigMap, ok := userConfigInterface.(map[string]interface{}); ok { - config.UserConfig = userConfigMap - } + if input.UserConfig != nil { + config.UserConfig = input.UserConfig } return config, nil } // handleError provides unified error handling -func (b *PulsarAdminFunctionsToolBuilder) handleError(operation string, err error) *mcp.CallToolResult { - return mcp.NewToolResultError(fmt.Sprintf("Failed to %s: %v", operation, err)) +func (b *PulsarAdminFunctionsToolBuilder) handleError(operation string, err error) error { + return fmt.Errorf("failed to %s: %v", operation, err) } // marshalResponse provides unified JSON serialization for responses -func (b *PulsarAdminFunctionsToolBuilder) marshalResponse(data interface{}) (*mcp.CallToolResult, error) { +func (b *PulsarAdminFunctionsToolBuilder) marshalResponse(data interface{}) (*sdk.CallToolResult, error) { jsonBytes, err := json.Marshal(data) if err != nil { - return b.handleError("marshal response", err), nil + return nil, b.handleError("marshal response", err) } - return mcp.NewToolResultText(string(jsonBytes)), nil + return &sdk.CallToolResult{ + Content: []sdk.Content{&sdk.TextContent{Text: string(jsonBytes)}}, + }, nil +} + +func requireNonEmpty(value string, key string) (string, error) { + if value == "" { + return "", fmt.Errorf("required argument %q not found", key) + } + return value, nil +} + +func buildPulsarAdminFunctionsInputSchema() (*jsonschema.Schema, error) { + schema, err := jsonschema.For[pulsarAdminFunctionsInput](nil) + if err != nil { + return nil, fmt.Errorf("input schema: %w", err) + } + if schema.Type != "object" { + return nil, fmt.Errorf("input schema must have type \"object\"") + } + + if schema.Properties == nil { + schema.Properties = map[string]*jsonschema.Schema{} + } + setSchemaDescription(schema, "operation", pulsarAdminFunctionsOperationDesc) + setSchemaDescription(schema, "tenant", pulsarAdminFunctionsTenantDesc) + setSchemaDescription(schema, "namespace", pulsarAdminFunctionsNamespaceDesc) + setSchemaDescription(schema, "name", pulsarAdminFunctionsNameDesc) + setSchemaDescription(schema, "classname", pulsarAdminFunctionsClassNameDesc) + setSchemaDescription(schema, "inputs", pulsarAdminFunctionsInputsDesc) + setSchemaDescription(schema, "output", pulsarAdminFunctionsOutputDesc) + setSchemaDescription(schema, "jar", pulsarAdminFunctionsJarDesc) + setSchemaDescription(schema, "py", pulsarAdminFunctionsPyDesc) + setSchemaDescription(schema, "go", pulsarAdminFunctionsGoDesc) + setSchemaDescription(schema, "parallelism", pulsarAdminFunctionsParallelismDesc) + setSchemaDescription(schema, "userConfig", pulsarAdminFunctionsUserConfigDesc) + setSchemaDescription(schema, "key", pulsarAdminFunctionsKeyDesc) + setSchemaDescription(schema, "value", pulsarAdminFunctionsValueDesc) + setSchemaDescription(schema, "topic", pulsarAdminFunctionsTopicDesc) + setSchemaDescription(schema, "triggerValue", pulsarAdminFunctionsTriggerValueDesc) + + normalizeAdditionalProperties(schema) + return schema, nil } diff --git a/pkg/mcp/builders/pulsar/functions_legacy.go b/pkg/mcp/builders/pulsar/functions_legacy.go new file mode 100644 index 0000000..d4754c3 --- /dev/null +++ b/pkg/mcp/builders/pulsar/functions_legacy.go @@ -0,0 +1,652 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/apache/pulsar-client-go/pulsaradmin/pkg/utils" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/pulsarctl/pkg/cmdutils" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" +) + +// PulsarAdminFunctionsLegacyToolBuilder implements the ToolBuilder interface for Pulsar admin functions operations +// It provides functionality to build Pulsar functions management tools +// /nolint:revive +type PulsarAdminFunctionsLegacyToolBuilder struct { + *builders.BaseToolBuilder +} + +// NewPulsarAdminFunctionsLegacyToolBuilder creates a new Pulsar admin functions tool builder instance +func NewPulsarAdminFunctionsLegacyToolBuilder() *PulsarAdminFunctionsLegacyToolBuilder { + metadata := builders.ToolMetadata{ + Name: "pulsar_admin_functions", + Version: "1.0.0", + Description: "Pulsar admin functions management tools", + Category: "pulsar_admin", + Tags: []string{"pulsar", "admin", "functions"}, + } + + features := []string{ + "pulsar-admin-functions", + "pulsar-admin", + "all", + "all-pulsar", + } + + return &PulsarAdminFunctionsLegacyToolBuilder{ + BaseToolBuilder: builders.NewBaseToolBuilder(metadata, features), + } +} + +// BuildTools builds the Pulsar admin functions tool list +// This is the core method implementing the ToolBuilder interface +func (b *PulsarAdminFunctionsLegacyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { + // Check features - return empty list if no required features are present + if !b.HasAnyRequiredFeature(config.Features) { + return nil, nil + } + + // Validate configuration (only validate when matching features are present) + if err := b.Validate(config); err != nil { + return nil, err + } + + // Build tools + tool := b.buildPulsarAdminFunctionsTool() + handler := b.buildPulsarAdminFunctionsHandler(config.ReadOnly) + + return []server.ServerTool{ + { + Tool: tool, + Handler: handler, + }, + }, nil +} + +// buildPulsarAdminFunctionsTool builds the Pulsar admin functions MCP tool definition +// Migrated from the original tool definition logic +func (b *PulsarAdminFunctionsLegacyToolBuilder) buildPulsarAdminFunctionsTool() mcp.Tool { + toolDesc := "Manage Apache Pulsar Functions for stream processing. " + + "Pulsar Functions are lightweight compute processes that can consume messages from one or more Pulsar topics, " + + "apply user-defined processing logic, and produce results to another topic. " + + "Functions support Java, Python, and Go runtimes, enabling complex event processing, " + + "data transformations, filtering, and integration with external systems. " + + "Functions follow the tenant/namespace/name hierarchy for organization, " + + "can maintain state, and can scale through parallelism configuration. " + + "This tool provides complete lifecycle management including deployment, monitoring, scaling, " + + "state management, and triggering. Functions require proper permissions to access their topics." + + operationDesc := "Operation to perform. Available operations:\n" + + "- list: List all functions under a specific tenant and namespace\n" + + "- get: Get the configuration of a function\n" + + "- status: Get the runtime status of a function (instances, metrics)\n" + + "- stats: Get detailed statistics of a function (throughput, processing latency)\n" + + "- querystate: Query state stored by a stateful function for a specific key\n" + + "- create: Deploy a new function with specified parameters\n" + + "- update: Update the configuration of an existing function\n" + + "- delete: Delete a function\n" + + "- start: Start a stopped function\n" + + "- stop: Stop a running function\n" + + "- restart: Restart a function\n" + + "- putstate: Store state in a function's state store\n" + + "- trigger: Manually trigger a function with a specific value" + + return mcp.NewTool("pulsar_admin_functions", + mcp.WithDescription(toolDesc), + mcp.WithString("operation", mcp.Required(), + mcp.Description(operationDesc)), + mcp.WithString("tenant", mcp.Required(), + mcp.Description("The tenant name. Tenants are the primary organizational unit in Pulsar, "+ + "providing multi-tenancy and resource isolation. Functions deployed within a tenant "+ + "inherit its permissions and resource quotas.")), + mcp.WithString("namespace", mcp.Required(), + mcp.Description("The namespace name. Namespaces are logical groupings of topics and functions "+ + "within a tenant. They encapsulate configuration policies and access control. "+ + "Functions in a namespace typically process topics within the same namespace.")), + mcp.WithString("name", + mcp.Description("The function name. Required for all operations except 'list'. "+ + "Names should be descriptive of the function's purpose and must be unique within a namespace. "+ + "Function names are used in metrics, logs, and when addressing the function via APIs.")), + // Additional parameters for specific operations + mcp.WithString("classname", + mcp.Description("The fully qualified class name implementing the function. Required for 'create' operation, optional for 'update'. "+ + "For Java functions, this should be the class that implements pulsar function interfaces. "+ + "For Python, this MUST be in format of `.` - for example: "+ + "if file is '/path/to/exclamation.py' with class 'ExclamationFunction', classname must be 'exclamation.ExclamationFunction'; "+ + "if file is '/path/to/double_number.py' with class 'DoubleNumber', classname must be 'double_number.DoubleNumber'. "+ + "Common error: using just the class name 'DoubleNumber' (without filename prefix) will cause function creation to fail. "+ + "Go functions should specify the 'main' function of the binary.")), + mcp.WithArray("inputs", + mcp.Description("The input topics for the function (array of strings). Optional for 'create' and 'update' operations. "+ + "Topics must be specified in the format 'persistent://tenant/namespace/topic'. "+ + "Functions can consume from multiple topics, each with potentially different serialization types. "+ + "All input topics should exist before the function is created."), + mcp.Items( + map[string]interface{}{ + "type": "string", + "description": "input topic", + }, + ), + ), + mcp.WithString("output", + mcp.Description("The output topic for the function results. Optional for 'create' and 'update' operations. "+ + "Specified in the format 'persistent://tenant/namespace/topic'. "+ + "If not set, the function will not produce any output to topics. "+ + "The output topic will be automatically created if it doesn't exist.")), + mcp.WithString("jar", + mcp.Description("Path to the JAR file containing the function code. Optional for 'create' and 'update' operations. "+ + "Support `file://`, `http://`, `https://`, `function://`, `source://`, `sink://` protocol. "+ + "Can be a local path or supported URL protocol accessible to the Pulsar broker. "+ + "For Java functions, this should contain all dependencies for the function. "+ + "The jar file must be compatible with the Pulsar Functions API.")), + mcp.WithString("py", + mcp.Description("Path to the Python file containing the function code. Optional for 'create' and 'update' operations. "+ + "Support `file://`, `http://`, `https://`, `function://`, `source://`, `sink://` protocol. "+ + "Can be a local path or supported URL protocol accessible to the Pulsar broker. "+ + "For Python functions, this should be the file path to the Python file, in format of `.py`, `.zip`, or `.whl`. "+ + "The Python file must be compatible with the Pulsar Functions API.")), + mcp.WithString("go", + mcp.Description("Path to the Go file containing the function code. Optional for 'create' and 'update' operations. "+ + "Support `file://`, `http://`, `https://`, `function://`, `source://`, `sink://` protocol. "+ + "Can be a local path or supported URL protocol accessible to the Pulsar broker. "+ + "For Go functions, this should be the file path to the Go file, in format of executable binary. "+ + "The Go file must be compatible with the Pulsar Functions API.")), + mcp.WithNumber("parallelism", + mcp.Description("The parallelism factor of the function. Optional for 'create' and 'update' operations. "+ + "Determines how many instances of the function will run concurrently. "+ + "Higher values improve throughput but require more resources. "+ + "For stateful functions, consider how parallelism affects state consistency. "+ + "Default is 1 (single instance).")), + mcp.WithObject("userConfig", + mcp.Description("User-defined config key/values. Optional for 'create' and 'update' operations. "+ + "Provides configuration parameters accessible to the function at runtime. "+ + "Specify as a JSON object with string, number, or boolean values. "+ + "Common configs include connection parameters, batch sizes, or feature toggles. "+ + "Example: {\"maxBatchSize\": 100, \"connectionString\": \"host:port\", \"debugMode\": true}")), + mcp.WithString("key", + mcp.Description("The state key. Required for 'querystate' and 'putstate' operations. "+ + "Keys are used to identify values in the function's state store. "+ + "They should be reasonable in length and follow a consistent pattern. "+ + "State keys are typically limited to 128 characters.")), + mcp.WithString("value", + mcp.Description("The state value. Required for 'putstate' operation. "+ + "Values are stored in the function's state system. "+ + "For simple values, specify as a string. For complex objects, use JSON-serialized strings. "+ + "State values are typically limited to 1MB in size.")), + mcp.WithString("topic", + mcp.Description("The specific topic name that the function should consume from. Optional for 'trigger' operation. "+ + "Specified in the format 'persistent://tenant/namespace/topic'. "+ + "Used when triggering a function that consumes from multiple topics. "+ + "If not provided, the first input topic will be used.")), + mcp.WithString("triggerValue", + mcp.Description("The value with which to trigger the function. Required for 'trigger' operation. "+ + "This value will be passed to the function as if it were a message from the input topic. "+ + "String values are sent as is; for typed values, ensure proper formatting based on function expectations. "+ + "The function processes this value just like a normal message.")), + ) +} + +// buildPulsarAdminFunctionsHandler builds the Pulsar admin functions handler function +// Migrated from the original handler logic +func (b *PulsarAdminFunctionsLegacyToolBuilder) buildPulsarAdminFunctionsHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Get Pulsar session from context + session := mcpCtx.GetPulsarSession(ctx) + if session == nil { + return mcp.NewToolResultError("Pulsar session not found in context"), nil + } + + client, err := session.GetAdminV3Client() + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get Pulsar client: %v", err)), nil + } + + // Extract and validate operation parameter + operation, err := request.RequireString("operation") + if err != nil { + return b.handleError("get operation", err), nil + } + + // Check if the operation is valid + validOperations := map[string]bool{ + "list": true, "get": true, "status": true, "stats": true, "querystate": true, + "create": true, "update": true, "delete": true, "start": true, "stop": true, + "restart": true, "putstate": true, "trigger": true, + } + + if !validOperations[operation] { + return b.handleError("validate operation", fmt.Errorf("invalid operation: '%s'. Supported operations: list, get, status, stats, querystate, create, update, delete, start, stop, restart, putstate, trigger", operation)), nil + } + + // Check write permissions for write operations + writeOperations := map[string]bool{ + "create": true, "update": true, "delete": true, "start": true, + "stop": true, "restart": true, "putstate": true, "trigger": true, + } + + if readOnly && writeOperations[operation] { + return b.handleError("check permissions", fmt.Errorf("operation '%s' not allowed in read-only mode. Read-only mode restricts modifications to Pulsar Functions", operation)), nil + } + + // Extract common parameters + tenant, err := request.RequireString("tenant") + if err != nil { + return b.handleError("get tenant", fmt.Errorf("missing required parameter 'tenant': %v. A tenant is required for all Pulsar Functions operations", err)), nil + } + + namespace, err := request.RequireString("namespace") + if err != nil { + return b.handleError("get namespace", fmt.Errorf("missing required parameter 'namespace': %v. A namespace is required for all Pulsar Functions operations", err)), nil + } + + // For all operations except 'list', name is required + var name string + if operation != "list" { + name, err = request.RequireString("name") + if err != nil { + return b.handleError("get name", fmt.Errorf("missing required parameter 'name' for operation '%s': %v. The function name must be specified for this operation", operation, err)), nil + } + } + + // Handle operation using delegated handlers + switch operation { + case "list": + return b.handleFunctionList(ctx, client, tenant, namespace) + case "get": + return b.handleFunctionGet(ctx, client, tenant, namespace, name) + case "status": + return b.handleFunctionStatus(ctx, client, tenant, namespace, name) + case "stats": + return b.handleFunctionStats(ctx, client, tenant, namespace, name) + case "querystate": + key, err := request.RequireString("key") + if err != nil { + return b.handleError("get key", fmt.Errorf("missing required parameter 'key' for operation 'querystate': %v. A key is required to look up state in the function's state store", err)), nil + } + return b.handleFunctionQuerystate(ctx, client, tenant, namespace, name, key) + case "create": + return b.handleFunctionCreate(ctx, client, tenant, namespace, name, request) + case "update": + return b.handleFunctionUpdate(ctx, client, tenant, namespace, name, request) + case "delete": + return b.handleFunctionDelete(ctx, client, tenant, namespace, name) + case "start": + return b.handleFunctionStart(ctx, client, tenant, namespace, name) + case "stop": + return b.handleFunctionStop(ctx, client, tenant, namespace, name) + case "restart": + return b.handleFunctionRestart(ctx, client, tenant, namespace, name) + case "putstate": + key, err := request.RequireString("key") + if err != nil { + return b.handleError("get key", fmt.Errorf("missing required parameter 'key' for operation 'putstate': %v. A key is required to store state in the function's state store", err)), nil + } + value, err := request.RequireString("value") + if err != nil { + return b.handleError("get value", fmt.Errorf("missing required parameter 'value' for operation 'putstate': %v. A value is required to store state in the function's state store", err)), nil + } + return b.handleFunctionPutstate(ctx, client, tenant, namespace, name, key, value) + case "trigger": + triggerValue, err := request.RequireString("triggerValue") + if err != nil { + return b.handleError("get triggerValue", fmt.Errorf("missing required parameter 'triggerValue' for operation 'trigger': %v. A trigger value is required to manually trigger the function", err)), nil + } + topic := request.GetString("topic", "") + return b.handleFunctionTrigger(ctx, client, tenant, namespace, name, triggerValue, topic) + default: + return b.handleError("handle operation", fmt.Errorf("unsupported operation: %s", operation)), nil + } + } +} + +// Helper functions - delegated operation handlers + +// handleFunctionList handles the list operation +func (b *PulsarAdminFunctionsLegacyToolBuilder) handleFunctionList(_ context.Context, client cmdutils.Client, tenant, namespace string) (*mcp.CallToolResult, error) { + admin := client.Functions() + + functions, err := admin.GetFunctions(tenant, namespace) + if err != nil { + return b.handleError("list functions", err), nil + } + + return b.marshalResponse(map[string]interface{}{ + "functions": functions, + "tenant": tenant, + "namespace": namespace, + }) +} + +// handleFunctionGet handles the get operation +func (b *PulsarAdminFunctionsLegacyToolBuilder) handleFunctionGet(_ context.Context, client cmdutils.Client, tenant, namespace, name string) (*mcp.CallToolResult, error) { + admin := client.Functions() + + functionConfig, err := admin.GetFunction(tenant, namespace, name) + if err != nil { + return b.handleError("get function config", err), nil + } + + return b.marshalResponse(functionConfig) +} + +// handleFunctionStatus handles the status operation +func (b *PulsarAdminFunctionsLegacyToolBuilder) handleFunctionStatus(_ context.Context, client cmdutils.Client, tenant, namespace, name string) (*mcp.CallToolResult, error) { + admin := client.Functions() + + status, err := admin.GetFunctionStatus(tenant, namespace, name) + if err != nil { + return b.handleError("get function status", err), nil + } + + return b.marshalResponse(status) +} + +// handleFunctionStats handles the stats operation +func (b *PulsarAdminFunctionsLegacyToolBuilder) handleFunctionStats(_ context.Context, client cmdutils.Client, tenant, namespace, name string) (*mcp.CallToolResult, error) { + admin := client.Functions() + + stats, err := admin.GetFunctionStats(tenant, namespace, name) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get stats for function '%s' in tenant '%s' namespace '%s': %v. Verify the function exists and is running.", + name, tenant, namespace, err)), nil + } + + return b.marshalResponse(stats) +} + +// handleFunctionQuerystate handles the querystate operation +func (b *PulsarAdminFunctionsLegacyToolBuilder) handleFunctionQuerystate(_ context.Context, client cmdutils.Client, tenant, namespace, name, key string) (*mcp.CallToolResult, error) { + admin := client.Functions() + + state, err := admin.GetFunctionState(tenant, namespace, name, key) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to query state for key '%s' in function '%s' (tenant '%s' namespace '%s'): %v. Verify the function exists and has state enabled.", + key, name, tenant, namespace, err)), nil + } + + return b.marshalResponse(map[string]interface{}{ + "key": key, + "value": state, + "function": map[string]string{ + "tenant": tenant, + "namespace": namespace, + "name": name, + }, + }) +} + +// handleFunctionCreate handles the create operation +func (b *PulsarAdminFunctionsLegacyToolBuilder) handleFunctionCreate(_ context.Context, client cmdutils.Client, tenant, namespace, name string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Build function configuration from request parameters to validate + functionConfig, err := b.buildFunctionConfig(tenant, namespace, name, request, false) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to build function configuration for '%s' in tenant '%s' namespace '%s': %v. Please verify all required parameters are provided correctly.", + name, tenant, namespace, err)), nil + } + + admin := client.Functions() + packagePath := "" + //nolint:gocritic + if functionConfig.Jar != nil { + packagePath = *functionConfig.Jar + } else if functionConfig.Py != nil { + packagePath = *functionConfig.Py + } else if functionConfig.Go != nil { + packagePath = *functionConfig.Go + } + + err = admin.CreateFuncWithURL(functionConfig, packagePath) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to create function '%s' in tenant '%s' namespace '%s': %v. Verify the function configuration is valid.", + name, tenant, namespace, err)), nil + } + + return mcp.NewToolResultText(fmt.Sprintf("Created function '%s' successfully in tenant '%s' namespace '%s'. The function configuration has been created.", + name, tenant, namespace)), nil +} + +// handleFunctionUpdate handles the update operation +func (b *PulsarAdminFunctionsLegacyToolBuilder) handleFunctionUpdate(_ context.Context, client cmdutils.Client, tenant, namespace, name string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + admin := client.Functions() + + // Build function configuration from request parameters + config, err := b.buildFunctionConfig(tenant, namespace, name, request, true) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to build function configuration for '%s' in tenant '%s' namespace '%s': %v. Please verify all parameters are provided correctly.", + name, tenant, namespace, err)), nil + } + + // Update the function + updateOptions := &utils.UpdateOptions{ + UpdateAuthData: true, + } + err = admin.UpdateFunction(config, "", updateOptions) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to update function '%s' in tenant '%s' namespace '%s': %v. Verify the function exists and the configuration is valid.", + name, tenant, namespace, err)), nil + } + + return mcp.NewToolResultText(fmt.Sprintf("Updated function '%s' successfully in tenant '%s' namespace '%s'. The function configuration has been modified.", + name, tenant, namespace)), nil +} + +// handleFunctionDelete handles the delete operation +func (b *PulsarAdminFunctionsLegacyToolBuilder) handleFunctionDelete(_ context.Context, client cmdutils.Client, tenant, namespace, name string) (*mcp.CallToolResult, error) { + admin := client.Functions() + + err := admin.DeleteFunction(tenant, namespace, name) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to delete function '%s' in tenant '%s' namespace '%s': %v. Verify the function exists and you have deletion permissions.", + name, tenant, namespace, err)), nil + } + + return mcp.NewToolResultText(fmt.Sprintf("Deleted function '%s' successfully from tenant '%s' namespace '%s'. All running instances have been terminated.", + name, tenant, namespace)), nil +} + +// handleFunctionStart handles the start operation +func (b *PulsarAdminFunctionsLegacyToolBuilder) handleFunctionStart(_ context.Context, client cmdutils.Client, tenant, namespace, name string) (*mcp.CallToolResult, error) { + admin := client.Functions() + + err := admin.StartFunction(tenant, namespace, name) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to start function '%s' in tenant '%s' namespace '%s': %v. Verify the function exists and is not already running.", + name, tenant, namespace, err)), nil + } + + return mcp.NewToolResultText(fmt.Sprintf("Started function '%s' successfully in tenant '%s' namespace '%s'. The function instances are now processing messages.", + name, tenant, namespace)), nil +} + +// handleFunctionStop handles the stop operation +func (b *PulsarAdminFunctionsLegacyToolBuilder) handleFunctionStop(_ context.Context, client cmdutils.Client, tenant, namespace, name string) (*mcp.CallToolResult, error) { + admin := client.Functions() + + err := admin.StopFunction(tenant, namespace, name) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to stop function '%s' in tenant '%s' namespace '%s': %v. Verify the function exists and is currently running.", + name, tenant, namespace, err)), nil + } + + return mcp.NewToolResultText(fmt.Sprintf("Stopped function '%s' successfully in tenant '%s' namespace '%s'. The function will no longer process messages until restarted.", + name, tenant, namespace)), nil +} + +// handleFunctionRestart handles the restart operation +func (b *PulsarAdminFunctionsLegacyToolBuilder) handleFunctionRestart(_ context.Context, client cmdutils.Client, tenant, namespace, name string) (*mcp.CallToolResult, error) { + admin := client.Functions() + + err := admin.RestartFunction(tenant, namespace, name) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to restart function '%s' in tenant '%s' namespace '%s': %v. Verify the function exists and is properly deployed.", + name, tenant, namespace, err)), nil + } + + return mcp.NewToolResultText(fmt.Sprintf("Restarted function '%s' successfully in tenant '%s' namespace '%s'. All function instances have been restarted.", + name, tenant, namespace)), nil +} + +// handleFunctionPutstate handles the putstate operation +func (b *PulsarAdminFunctionsLegacyToolBuilder) handleFunctionPutstate(_ context.Context, client cmdutils.Client, tenant, namespace, name, key, value string) (*mcp.CallToolResult, error) { + admin := client.Functions() + + err := admin.PutFunctionState(tenant, namespace, name, utils.FunctionState{ + Key: key, + StringValue: value, + }) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to put state for key '%s' in function '%s' (tenant '%s' namespace '%s'): %v. Verify the function exists and has state enabled.", + key, name, tenant, namespace, err)), nil + } + + return mcp.NewToolResultText(fmt.Sprintf("Successfully stored state for key '%s' in function '%s' (tenant '%s' namespace '%s'). State value has been updated.", + key, name, tenant, namespace)), nil +} + +// handleFunctionTrigger handles the trigger operation +func (b *PulsarAdminFunctionsLegacyToolBuilder) handleFunctionTrigger(_ context.Context, client cmdutils.Client, tenant, namespace, name, triggerValue, topic string) (*mcp.CallToolResult, error) { + admin := client.Functions() + + var err error + var result string + if topic != "" { + // Trigger with specific topic + result, err = admin.TriggerFunction(tenant, namespace, name, topic, triggerValue, "") + } else { + // Trigger without specific topic (uses first input topic) + result, err = admin.TriggerFunction(tenant, namespace, name, "", triggerValue, "") + } + + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to trigger function '%s' in tenant '%s' namespace '%s': %v. Verify the function exists and is running.", + name, tenant, namespace, err)), nil + } + + var message string + if topic != "" { + message = fmt.Sprintf("Successfully triggered function '%s' in tenant '%s' namespace '%s' with topic '%s'. Result: %s", + name, tenant, namespace, topic, result) + } else { + message = fmt.Sprintf("Successfully triggered function '%s' in tenant '%s' namespace '%s'. Result: %s", + name, tenant, namespace, result) + } + + return mcp.NewToolResultText(message), nil +} + +// Helper functions + +// buildFunctionConfig builds a Pulsar Function configuration from MCP request parameters +func (b *PulsarAdminFunctionsLegacyToolBuilder) buildFunctionConfig(tenant, namespace, name string, request mcp.CallToolRequest, isUpdate bool) (*utils.FunctionConfig, error) { + config := &utils.FunctionConfig{ + Tenant: tenant, + Namespace: namespace, + Name: name, + } + + // Get required classname parameter (for create operations) + if !isUpdate { + classname, err := request.RequireString("classname") + if err != nil { + return nil, fmt.Errorf("missing required parameter 'classname': %v", err) + } + config.ClassName = classname + } else { + // For update, classname is optional + if classname := request.GetString("classname", ""); classname != "" { + config.ClassName = classname + } + } + + // Get inputs parameter (array of strings) + args := request.GetArguments() + if inputsInterface, exists := args["inputs"]; exists && inputsInterface != nil { + if inputsArray, ok := inputsInterface.([]interface{}); ok { + inputSpecs := make(map[string]utils.ConsumerConfig) + for _, input := range inputsArray { + if inputStr, ok := input.(string); ok { + inputSpecs[inputStr] = utils.ConsumerConfig{ + SerdeClassName: "", + SchemaType: "", + } + } + } + if len(inputSpecs) > 0 { + config.InputSpecs = inputSpecs + } + } + } + + // Get optional output parameter + if output := request.GetString("output", ""); output != "" { + config.Output = output + } + + // Get optional parallelism parameter + if parallelismInterface, exists := args["parallelism"]; exists && parallelismInterface != nil { + if parallelismFloat, ok := parallelismInterface.(float64); ok { + config.Parallelism = int(parallelismFloat) + } + } + + // Set default parallelism if not specified + if config.Parallelism <= 0 { + config.Parallelism = 1 + } + + // Get optional jar parameter + if jar := request.GetString("jar", ""); jar != "" { + config.Jar = &jar + } + + // Get optional py parameter + if py := request.GetString("py", ""); py != "" { + config.Py = &py + } + + // Get optional go parameter + if goFile := request.GetString("go", ""); goFile != "" { + config.Go = &goFile + } + + // Get optional userConfig parameter (JSON object) + if userConfigInterface, exists := args["userConfig"]; exists && userConfigInterface != nil { + if userConfigMap, ok := userConfigInterface.(map[string]interface{}); ok { + config.UserConfig = userConfigMap + } + } + + return config, nil +} + +// handleError provides unified error handling +func (b *PulsarAdminFunctionsLegacyToolBuilder) handleError(operation string, err error) *mcp.CallToolResult { + return mcp.NewToolResultError(fmt.Sprintf("Failed to %s: %v", operation, err)) +} + +// marshalResponse provides unified JSON serialization for responses +func (b *PulsarAdminFunctionsLegacyToolBuilder) marshalResponse(data interface{}) (*mcp.CallToolResult, error) { + jsonBytes, err := json.Marshal(data) + if err != nil { + return b.handleError("marshal response", err), nil + } + return mcp.NewToolResultText(string(jsonBytes)), nil +} diff --git a/pkg/mcp/builders/pulsar/functions_test.go b/pkg/mcp/builders/pulsar/functions_test.go index 51e29ec..35005e0 100644 --- a/pkg/mcp/builders/pulsar/functions_test.go +++ b/pkg/mcp/builders/pulsar/functions_test.go @@ -18,6 +18,7 @@ import ( "context" "testing" + "github.com/google/jsonschema-go/jsonschema" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -45,8 +46,20 @@ func TestPulsarAdminFunctionsToolBuilder(t *testing.T) { tools, err := builder.BuildTools(context.Background(), config) require.NoError(t, err) assert.Len(t, tools, 1) - assert.Equal(t, "pulsar_admin_functions", tools[0].Tool.Name) - assert.NotNil(t, tools[0].Handler) + assert.Equal(t, "pulsar_admin_functions", tools[0].Definition().Name) + assert.NotNil(t, tools[0]) + }) + + t.Run("BuildTools_ReadOnly", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: true, + Features: []string{"pulsar-admin-functions"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "pulsar_admin_functions", tools[0].Definition().Name) }) t.Run("BuildTools_NoFeatures", func(t *testing.T) { @@ -78,3 +91,55 @@ func TestPulsarAdminFunctionsToolBuilder(t *testing.T) { assert.Error(t, err) }) } + +func TestPulsarAdminFunctionsToolSchema(t *testing.T) { + builder := NewPulsarAdminFunctionsToolBuilder() + tool, err := builder.buildPulsarAdminFunctionsTool() + require.NoError(t, err) + assert.Equal(t, "pulsar_admin_functions", tool.Name) + + schema, ok := tool.InputSchema.(*jsonschema.Schema) + require.True(t, ok) + require.NotNil(t, schema.Properties) + + expectedRequired := []string{"operation", "tenant", "namespace"} + assert.ElementsMatch(t, expectedRequired, schema.Required) + + expectedProps := []string{ + "operation", + "tenant", + "namespace", + "name", + "classname", + "inputs", + "output", + "jar", + "py", + "go", + "parallelism", + "userConfig", + "key", + "value", + "topic", + "triggerValue", + } + assert.ElementsMatch(t, expectedProps, mapStringKeys(schema.Properties)) + + operationSchema := schema.Properties["operation"] + require.NotNil(t, operationSchema) + assert.Equal(t, pulsarAdminFunctionsOperationDesc, operationSchema.Description) +} + +func TestPulsarAdminFunctionsToolBuilder_ReadOnlyRejectsWrite(t *testing.T) { + builder := NewPulsarAdminFunctionsToolBuilder() + handler := builder.buildPulsarAdminFunctionsHandler(true) + + _, _, err := handler(context.Background(), nil, pulsarAdminFunctionsInput{ + Operation: "create", + Tenant: "tenant", + Namespace: "namespace", + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "read-only") +} diff --git a/pkg/mcp/builders/pulsar/functions_worker.go b/pkg/mcp/builders/pulsar/functions_worker.go index d244585..9475b81 100644 --- a/pkg/mcp/builders/pulsar/functions_worker.go +++ b/pkg/mcp/builders/pulsar/functions_worker.go @@ -19,13 +19,34 @@ import ( "encoding/json" "fmt" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + "github.com/google/jsonschema-go/jsonschema" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/pulsarctl/pkg/cmdutils" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" ) +type pulsarAdminFunctionsWorkerInput struct { + Resource string `json:"resource"` +} + +const ( + pulsarAdminFunctionsWorkerToolDesc = "Unified tool for managing Apache Pulsar Functions Worker resources. " + + "Pulsar Functions is a serverless compute framework that allows you to process messages in a streaming fashion. " + + "The Functions Worker is the runtime environment that executes and manages Pulsar Functions. " + + "This tool provides comprehensive access to functions worker resources including function statistics, " + + "monitoring metrics, cluster information, leader election status, and function assignments across the cluster. " + + "Functions workers can be deployed in multiple modes (standalone, cluster) and this tool helps monitor " + + "and manage the worker cluster state, performance metrics, and function distribution. " + + "Most operations require Pulsar super-user permissions for security reasons." + pulsarAdminFunctionsWorkerResourceDesc = "Type of functions worker resource to access. Available resources:\n" + + "- function_stats: Statistics for all functions running on the functions worker, including processing rates, error counts, and resource usage\n" + + "- monitoring_metrics: Comprehensive metrics for monitoring function workers, including JVM metrics, resource utilization, and performance indicators\n" + + "- cluster: Information about all workers in the functions worker cluster, including their status, capabilities, and workload distribution\n" + + "- cluster_leader: Information about the leader of the functions worker cluster, essential for understanding cluster coordination\n" + + "- function_assignments: Current assignments of functions across the functions worker cluster, showing which functions are running on which workers" +) + // PulsarAdminFunctionsWorkerToolBuilder implements the ToolBuilder interface for Pulsar Admin Functions Worker tools // It provides functionality to build Pulsar functions worker monitoring and management tools // /nolint:revive @@ -57,7 +78,7 @@ func NewPulsarAdminFunctionsWorkerToolBuilder() *PulsarAdminFunctionsWorkerToolB // BuildTools builds the Pulsar Admin Functions Worker tool list // This is the core method implementing the ToolBuilder interface -func (b *PulsarAdminFunctionsWorkerToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { +func (b *PulsarAdminFunctionsWorkerToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]builders.ToolDefinition, error) { // Check features - return empty list if no required features are present if !b.HasAnyRequiredFeature(config.Features) { return nil, nil @@ -69,11 +90,14 @@ func (b *PulsarAdminFunctionsWorkerToolBuilder) BuildTools(_ context.Context, co } // Build tools - tool := b.buildFunctionsWorkerTool() + tool, err := b.buildFunctionsWorkerTool() + if err != nil { + return nil, err + } handler := b.buildFunctionsWorkerHandler(config.ReadOnly) - return []server.ServerTool{ - { + return []builders.ToolDefinition{ + builders.ServerTool[pulsarAdminFunctionsWorkerInput, any]{ Tool: tool, Handler: handler, }, @@ -82,69 +106,60 @@ func (b *PulsarAdminFunctionsWorkerToolBuilder) BuildTools(_ context.Context, co // buildFunctionsWorkerTool builds the Pulsar Admin Functions Worker MCP tool definition // Migrated from the original tool definition logic -func (b *PulsarAdminFunctionsWorkerToolBuilder) buildFunctionsWorkerTool() mcp.Tool { - toolDesc := "Unified tool for managing Apache Pulsar Functions Worker resources. " + - "Pulsar Functions is a serverless compute framework that allows you to process messages in a streaming fashion. " + - "The Functions Worker is the runtime environment that executes and manages Pulsar Functions. " + - "This tool provides comprehensive access to functions worker resources including function statistics, " + - "monitoring metrics, cluster information, leader election status, and function assignments across the cluster. " + - "Functions workers can be deployed in multiple modes (standalone, cluster) and this tool helps monitor " + - "and manage the worker cluster state, performance metrics, and function distribution. " + - "Most operations require Pulsar super-user permissions for security reasons." - - resourceDesc := "Type of functions worker resource to access. Available resources:\\n" + - "- function_stats: Statistics for all functions running on the functions worker, including processing rates, error counts, and resource usage\\n" + - "- monitoring_metrics: Comprehensive metrics for monitoring function workers, including JVM metrics, resource utilization, and performance indicators\\n" + - "- cluster: Information about all workers in the functions worker cluster, including their status, capabilities, and workload distribution\\n" + - "- cluster_leader: Information about the leader of the functions worker cluster, essential for understanding cluster coordination\\n" + - "- function_assignments: Current assignments of functions across the functions worker cluster, showing which functions are running on which workers" +func (b *PulsarAdminFunctionsWorkerToolBuilder) buildFunctionsWorkerTool() (*sdk.Tool, error) { + inputSchema, err := buildPulsarAdminFunctionsWorkerInputSchema() + if err != nil { + return nil, err + } - return mcp.NewTool("pulsar_admin_functions_worker", - mcp.WithDescription(toolDesc), - mcp.WithString("resource", mcp.Required(), - mcp.Description(resourceDesc), - ), - ) + return &sdk.Tool{ + Name: "pulsar_admin_functions_worker", + Description: pulsarAdminFunctionsWorkerToolDesc, + InputSchema: inputSchema, + }, nil } // buildFunctionsWorkerHandler builds the Pulsar Admin Functions Worker handler function // Migrated from the original handler logic -func (b *PulsarAdminFunctionsWorkerToolBuilder) buildFunctionsWorkerHandler(_ bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminFunctionsWorkerToolBuilder) buildFunctionsWorkerHandler(_ bool) builders.ToolHandlerFunc[pulsarAdminFunctionsWorkerInput, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input pulsarAdminFunctionsWorkerInput) (*sdk.CallToolResult, any, error) { // Get Pulsar session from context session := mcpCtx.GetPulsarSession(ctx) if session == nil { - return mcp.NewToolResultError("Pulsar session not found in context"), nil + return nil, nil, fmt.Errorf("pulsar session not found in context") } // Create the admin client admin, err := session.GetAdminClient() if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get admin client: %v", err)), nil + return nil, nil, b.handleError("get admin client", err) } // Get required resource parameter - resource, err := request.RequireString("resource") - if err != nil { - return mcp.NewToolResultError("Missing required parameter 'resource'. " + - "Please specify one of: function_stats, monitoring_metrics, cluster, cluster_leader, function_assignments"), nil + resource := input.Resource + if resource == "" { + return nil, nil, fmt.Errorf("missing required parameter 'resource'; please specify one of: function_stats, monitoring_metrics, cluster, cluster_leader, function_assignments") } // Process request based on resource type switch resource { case "function_stats": - return b.handleFunctionsWorkerFunctionStats(admin) + result, handlerErr := b.handleFunctionsWorkerFunctionStats(admin) + return result, nil, handlerErr case "monitoring_metrics": - return b.handleFunctionsWorkerMonitoringMetrics(admin) + result, handlerErr := b.handleFunctionsWorkerMonitoringMetrics(admin) + return result, nil, handlerErr case "cluster": - return b.handleFunctionsWorkerGetCluster(admin) + result, handlerErr := b.handleFunctionsWorkerGetCluster(admin) + return result, nil, handlerErr case "cluster_leader": - return b.handleFunctionsWorkerGetClusterLeader(admin) + result, handlerErr := b.handleFunctionsWorkerGetClusterLeader(admin) + return result, nil, handlerErr case "function_assignments": - return b.handleFunctionsWorkerGetFunctionAssignments(admin) + result, handlerErr := b.handleFunctionsWorkerGetFunctionAssignments(admin) + return result, nil, handlerErr default: - return mcp.NewToolResultError(fmt.Sprintf("Unsupported resource: %s. "+ - "Please use one of: function_stats, monitoring_metrics, cluster, cluster_leader, function_assignments", resource)), nil + return nil, nil, fmt.Errorf("unsupported resource: %s. please use one of: function_stats, monitoring_metrics, cluster, cluster_leader, function_assignments", resource) } } } @@ -152,72 +167,92 @@ func (b *PulsarAdminFunctionsWorkerToolBuilder) buildFunctionsWorkerHandler(_ bo // Unified error handling and utility functions // handleError provides unified error handling -func (b *PulsarAdminFunctionsWorkerToolBuilder) handleError(operation string, err error) *mcp.CallToolResult { - return mcp.NewToolResultError(fmt.Sprintf("Failed to %s: %v", operation, err)) +func (b *PulsarAdminFunctionsWorkerToolBuilder) handleError(operation string, err error) error { + return fmt.Errorf("failed to %s: %v", operation, err) } // marshalResponse provides unified JSON serialization for responses -func (b *PulsarAdminFunctionsWorkerToolBuilder) marshalResponse(data interface{}) (*mcp.CallToolResult, error) { +func (b *PulsarAdminFunctionsWorkerToolBuilder) marshalResponse(data interface{}) (*sdk.CallToolResult, error) { jsonBytes, err := json.Marshal(data) if err != nil { - return b.handleError("marshal response", err), nil + return nil, b.handleError("marshal response", err) } - return mcp.NewToolResultText(string(jsonBytes)), nil + return &sdk.CallToolResult{ + Content: []sdk.Content{&sdk.TextContent{Text: string(jsonBytes)}}, + }, nil } // Operation handler functions - migrated from the original implementation // handleFunctionsWorkerFunctionStats handles retrieving function statistics -func (b *PulsarAdminFunctionsWorkerToolBuilder) handleFunctionsWorkerFunctionStats(admin cmdutils.Client) (*mcp.CallToolResult, error) { +func (b *PulsarAdminFunctionsWorkerToolBuilder) handleFunctionsWorkerFunctionStats(admin cmdutils.Client) (*sdk.CallToolResult, error) { // Get function stats stats, err := admin.FunctionsWorker().GetFunctionsStats() if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get functions stats: %v", err)), nil + return nil, b.handleError("get functions stats", err) } return b.marshalResponse(stats) } // handleFunctionsWorkerMonitoringMetrics handles retrieving monitoring metrics -func (b *PulsarAdminFunctionsWorkerToolBuilder) handleFunctionsWorkerMonitoringMetrics(admin cmdutils.Client) (*mcp.CallToolResult, error) { +func (b *PulsarAdminFunctionsWorkerToolBuilder) handleFunctionsWorkerMonitoringMetrics(admin cmdutils.Client) (*sdk.CallToolResult, error) { // Get monitoring metrics metrics, err := admin.FunctionsWorker().GetMetrics() if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get monitoring metrics: %v", err)), nil + return nil, b.handleError("get monitoring metrics", err) } return b.marshalResponse(metrics) } // handleFunctionsWorkerGetCluster handles retrieving cluster information -func (b *PulsarAdminFunctionsWorkerToolBuilder) handleFunctionsWorkerGetCluster(admin cmdutils.Client) (*mcp.CallToolResult, error) { +func (b *PulsarAdminFunctionsWorkerToolBuilder) handleFunctionsWorkerGetCluster(admin cmdutils.Client) (*sdk.CallToolResult, error) { // Get cluster info cluster, err := admin.FunctionsWorker().GetCluster() if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get worker cluster: %v", err)), nil + return nil, b.handleError("get worker cluster", err) } return b.marshalResponse(cluster) } // handleFunctionsWorkerGetClusterLeader handles retrieving cluster leader information -func (b *PulsarAdminFunctionsWorkerToolBuilder) handleFunctionsWorkerGetClusterLeader(admin cmdutils.Client) (*mcp.CallToolResult, error) { +func (b *PulsarAdminFunctionsWorkerToolBuilder) handleFunctionsWorkerGetClusterLeader(admin cmdutils.Client) (*sdk.CallToolResult, error) { // Get cluster leader leader, err := admin.FunctionsWorker().GetClusterLeader() if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get worker cluster leader: %v", err)), nil + return nil, b.handleError("get worker cluster leader", err) } return b.marshalResponse(leader) } // handleFunctionsWorkerGetFunctionAssignments handles retrieving function assignments -func (b *PulsarAdminFunctionsWorkerToolBuilder) handleFunctionsWorkerGetFunctionAssignments(admin cmdutils.Client) (*mcp.CallToolResult, error) { +func (b *PulsarAdminFunctionsWorkerToolBuilder) handleFunctionsWorkerGetFunctionAssignments(admin cmdutils.Client) (*sdk.CallToolResult, error) { // Get function assignments assignments, err := admin.FunctionsWorker().GetAssignments() if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get function assignments: %v", err)), nil + return nil, b.handleError("get function assignments", err) } return b.marshalResponse(assignments) } + +func buildPulsarAdminFunctionsWorkerInputSchema() (*jsonschema.Schema, error) { + schema, err := jsonschema.For[pulsarAdminFunctionsWorkerInput](nil) + if err != nil { + return nil, fmt.Errorf("input schema: %w", err) + } + if schema.Type != "object" { + return nil, fmt.Errorf("input schema must have type \"object\"") + } + + if schema.Properties == nil { + schema.Properties = map[string]*jsonschema.Schema{} + } + + setSchemaDescription(schema, "resource", pulsarAdminFunctionsWorkerResourceDesc) + normalizeAdditionalProperties(schema) + return schema, nil +} diff --git a/pkg/mcp/builders/pulsar/functions_worker_legacy.go b/pkg/mcp/builders/pulsar/functions_worker_legacy.go new file mode 100644 index 0000000..fe0a1bc --- /dev/null +++ b/pkg/mcp/builders/pulsar/functions_worker_legacy.go @@ -0,0 +1,113 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" +) + +// PulsarAdminFunctionsWorkerLegacyToolBuilder implements the legacy ToolBuilder interface for Pulsar functions worker tools. +// /nolint:revive +type PulsarAdminFunctionsWorkerLegacyToolBuilder struct { + *builders.BaseToolBuilder +} + +// NewPulsarAdminFunctionsWorkerLegacyToolBuilder creates a new Pulsar admin functions worker legacy tool builder instance. +func NewPulsarAdminFunctionsWorkerLegacyToolBuilder() *PulsarAdminFunctionsWorkerLegacyToolBuilder { + metadata := builders.ToolMetadata{ + Name: "pulsar_admin_functions_worker", + Version: "1.0.0", + Description: "Pulsar Admin functions worker management tools", + Category: "pulsar_admin", + Tags: []string{"pulsar", "functions", "worker", "admin", "monitoring"}, + } + + features := []string{ + "pulsar-admin-functions-worker", + "all", + "all-pulsar", + "pulsar-admin", + } + + return &PulsarAdminFunctionsWorkerLegacyToolBuilder{ + BaseToolBuilder: builders.NewBaseToolBuilder(metadata, features), + } +} + +// BuildTools builds the Pulsar admin functions worker legacy tool list. +func (b *PulsarAdminFunctionsWorkerLegacyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { + if !b.HasAnyRequiredFeature(config.Features) { + return nil, nil + } + + if err := b.Validate(config); err != nil { + return nil, err + } + + tool, err := b.buildFunctionsWorkerTool() + if err != nil { + return nil, err + } + handler := b.buildFunctionsWorkerHandler(config.ReadOnly) + + return []server.ServerTool{ + { + Tool: tool, + Handler: handler, + }, + }, nil +} + +func (b *PulsarAdminFunctionsWorkerLegacyToolBuilder) buildFunctionsWorkerTool() (mcp.Tool, error) { + inputSchema, err := buildPulsarAdminFunctionsWorkerInputSchema() + if err != nil { + return mcp.Tool{}, err + } + + schemaJSON, err := json.Marshal(inputSchema) + if err != nil { + return mcp.Tool{}, fmt.Errorf("marshal input schema: %w", err) + } + + return mcp.Tool{ + Name: "pulsar_admin_functions_worker", + Description: pulsarAdminFunctionsWorkerToolDesc, + RawInputSchema: schemaJSON, + }, nil +} + +func (b *PulsarAdminFunctionsWorkerLegacyToolBuilder) buildFunctionsWorkerHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { + sdkBuilder := NewPulsarAdminFunctionsWorkerToolBuilder() + sdkHandler := sdkBuilder.buildFunctionsWorkerHandler(readOnly) + + return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + var input pulsarAdminFunctionsWorkerInput + if err := request.BindArguments(&input); err != nil { + return mcp.NewToolResultError(fmt.Sprintf("failed to parse arguments: %v", err)), nil + } + + result, _, err := sdkHandler(ctx, nil, input) + if err != nil { + return mcp.NewToolResultError(err.Error()), nil + } + return legacyToolResultFromSDK(result), nil + } +} diff --git a/pkg/mcp/builders/pulsar/functions_worker_test.go b/pkg/mcp/builders/pulsar/functions_worker_test.go new file mode 100644 index 0000000..9232d1f --- /dev/null +++ b/pkg/mcp/builders/pulsar/functions_worker_test.go @@ -0,0 +1,112 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "testing" + + "github.com/google/jsonschema-go/jsonschema" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPulsarAdminFunctionsWorkerToolBuilder(t *testing.T) { + builder := NewPulsarAdminFunctionsWorkerToolBuilder() + + t.Run("GetName", func(t *testing.T) { + assert.Equal(t, "pulsar_admin_functions_worker", builder.GetName()) + }) + + t.Run("GetRequiredFeatures", func(t *testing.T) { + features := builder.GetRequiredFeatures() + assert.NotEmpty(t, features) + assert.Contains(t, features, "pulsar-admin-functions-worker") + }) + + t.Run("BuildTools_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"pulsar-admin-functions-worker"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "pulsar_admin_functions_worker", tools[0].Definition().Name) + }) + + t.Run("BuildTools_ReadOnly", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: true, + Features: []string{"pulsar-admin-functions-worker"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + }) + + t.Run("BuildTools_NoFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"unrelated-feature"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 0) + }) + + t.Run("Validate_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"pulsar-admin-functions-worker"}, + } + + err := builder.Validate(config) + assert.NoError(t, err) + }) + + t.Run("Validate_MissingFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"unrelated-feature"}, + } + + err := builder.Validate(config) + assert.Error(t, err) + }) +} + +func TestPulsarAdminFunctionsWorkerToolSchema(t *testing.T) { + builder := NewPulsarAdminFunctionsWorkerToolBuilder() + tool, err := builder.buildFunctionsWorkerTool() + require.NoError(t, err) + assert.Equal(t, "pulsar_admin_functions_worker", tool.Name) + + schema, ok := tool.InputSchema.(*jsonschema.Schema) + require.True(t, ok) + require.NotNil(t, schema.Properties) + + expectedRequired := []string{"resource"} + assert.ElementsMatch(t, expectedRequired, schema.Required) + + expectedProps := []string{"resource"} + assert.ElementsMatch(t, expectedProps, mapStringKeys(schema.Properties)) + + resourceSchema := schema.Properties["resource"] + require.NotNil(t, resourceSchema) + assert.Equal(t, pulsarAdminFunctionsWorkerResourceDesc, resourceSchema.Description) +} diff --git a/pkg/mcp/builders/pulsar/legacy_adapter.go b/pkg/mcp/builders/pulsar/legacy_adapter.go new file mode 100644 index 0000000..b47dd3d --- /dev/null +++ b/pkg/mcp/builders/pulsar/legacy_adapter.go @@ -0,0 +1,43 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + legacy "github.com/mark3labs/mcp-go/mcp" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" +) + +func legacyToolResultFromSDK(result *sdk.CallToolResult) *legacy.CallToolResult { + if result == nil { + return legacy.NewToolResultText("") + } + + text := "" + for _, content := range result.Content { + if textContent, ok := content.(*sdk.TextContent); ok { + text = textContent.Text + break + } + } + + if result.IsError { + if text == "" { + text = "tool call failed" + } + return legacy.NewToolResultError(text) + } + + return legacy.NewToolResultText(text) +} diff --git a/pkg/mcp/builders/pulsar/namespace.go b/pkg/mcp/builders/pulsar/namespace.go index ebef3f4..0c9643a 100644 --- a/pkg/mcp/builders/pulsar/namespace.go +++ b/pkg/mcp/builders/pulsar/namespace.go @@ -21,13 +21,45 @@ import ( "strconv" "github.com/apache/pulsar-client-go/pulsaradmin/pkg/utils" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + "github.com/google/jsonschema-go/jsonschema" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/pulsarctl/pkg/cmdutils" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" ) +type pulsarAdminNamespaceInput struct { + Operation string `json:"operation"` + Tenant *string `json:"tenant,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Bundles *string `json:"bundles,omitempty"` + Clusters []string `json:"clusters,omitempty"` + Subscription *string `json:"subscription,omitempty"` + Bundle *string `json:"bundle,omitempty"` + Force *string `json:"force,omitempty"` + Unload *string `json:"unload,omitempty"` +} + +const ( + pulsarAdminNamespaceOperationDesc = "Operation to perform on namespaces. Available operations:\n" + + "- list: List all namespaces for a tenant\n" + + "- get_topics: Get all topics within a namespace\n" + + "- create: Create a new namespace\n" + + "- delete: Delete an existing namespace\n" + + "- clear_backlog: Clear backlog for all topics in a namespace\n" + + "- unsubscribe: Unsubscribe from a subscription for all topics in a namespace\n" + + "- unload: Unload a namespace from the current serving broker\n" + + "- split_bundle: Split a namespace bundle" + pulsarAdminNamespaceTenantDesc = "The tenant name. Required for 'list' operation." + pulsarAdminNamespaceNamespaceDesc = "The namespace name in format 'tenant/namespace'. Required for all operations except 'list'." + pulsarAdminNamespaceBundlesDesc = "Number of bundles to activate when creating a namespace (default: 0 for default number of bundles). Used with 'create' operation." + pulsarAdminNamespaceClustersDesc = "List of clusters to assign when creating a namespace. Used with 'create' operation." + pulsarAdminNamespaceSubscriptionDesc = "Subscription name. Required for 'unsubscribe' operation, optional for 'clear_backlog'." + pulsarAdminNamespaceBundleDesc = "Bundle name or range. Required for 'split_bundle' operation, optional for 'clear_backlog', 'unsubscribe', and 'unload'." + pulsarAdminNamespaceForceDesc = "Force clear backlog (true/false). Used with 'clear_backlog' operation." + pulsarAdminNamespaceUnloadDesc = "Unload newly split bundles after splitting (true/false). Used with 'split_bundle' operation." +) + // PulsarAdminNamespaceToolBuilder implements the ToolBuilder interface for Pulsar Admin Namespace tools // It provides functionality to build Pulsar namespace management tools // /nolint:revive @@ -59,7 +91,7 @@ func NewPulsarAdminNamespaceToolBuilder() *PulsarAdminNamespaceToolBuilder { // BuildTools builds the Pulsar Admin Namespace tool list // This is the core method implementing the ToolBuilder interface -func (b *PulsarAdminNamespaceToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { +func (b *PulsarAdminNamespaceToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]builders.ToolDefinition, error) { // Check features - return empty list if no required features are present if !b.HasAnyRequiredFeature(config.Features) { return nil, nil @@ -71,11 +103,14 @@ func (b *PulsarAdminNamespaceToolBuilder) BuildTools(_ context.Context, config b } // Build tools - tool := b.buildNamespaceTool() + tool, err := b.buildNamespaceTool() + if err != nil { + return nil, err + } handler := b.buildNamespaceHandler(config.ReadOnly) - return []server.ServerTool{ - { + return []builders.ToolDefinition{ + builders.ServerTool[pulsarAdminNamespaceInput, any]{ Tool: tool, Handler: handler, }, @@ -84,186 +119,153 @@ func (b *PulsarAdminNamespaceToolBuilder) BuildTools(_ context.Context, config b // buildNamespaceTool builds the Pulsar Admin Namespace MCP tool definition // Migrated from the original tool definition logic -func (b *PulsarAdminNamespaceToolBuilder) buildNamespaceTool() mcp.Tool { +func (b *PulsarAdminNamespaceToolBuilder) buildNamespaceTool() (*sdk.Tool, error) { + inputSchema, err := buildPulsarAdminNamespaceInputSchema() + if err != nil { + return nil, err + } + toolDesc := "Manage Pulsar namespaces with various operations. " + "This tool provides functionality to work with namespaces in Apache Pulsar, " + "including listing, creating, deleting, and performing various operations on namespaces." - operationDesc := "Operation to perform on namespaces. Available operations:\n" + - "- list: List all namespaces for a tenant\n" + - "- get_topics: Get all topics within a namespace\n" + - "- create: Create a new namespace\n" + - "- delete: Delete an existing namespace\n" + - "- clear_backlog: Clear backlog for all topics in a namespace\n" + - "- unsubscribe: Unsubscribe from a subscription for all topics in a namespace\n" + - "- unload: Unload a namespace from the current serving broker\n" + - "- split_bundle: Split a namespace bundle" - - return mcp.NewTool("pulsar_admin_namespace", - mcp.WithDescription(toolDesc), - mcp.WithString("operation", mcp.Required(), - mcp.Description(operationDesc), - ), - mcp.WithString("tenant", - mcp.Description("The tenant name. Required for 'list' operation."), - ), - mcp.WithString("namespace", - mcp.Description("The namespace name in format 'tenant/namespace'. Required for all operations except 'list'."), - ), - mcp.WithString("bundles", - mcp.Description("Number of bundles to activate when creating a namespace (default: 0 for default number of bundles). Used with 'create' operation."), - ), - mcp.WithArray("clusters", - mcp.Description("List of clusters to assign when creating a namespace. Used with 'create' operation."), - mcp.Items( - map[string]interface{}{ - "type": "string", - "description": "Cluster name", - }, - ), - ), - mcp.WithString("subscription", - mcp.Description("Subscription name. Required for 'unsubscribe' operation, optional for 'clear_backlog'."), - ), - mcp.WithString("bundle", - mcp.Description("Bundle name or range. Required for 'split_bundle' operation, optional for 'clear_backlog', 'unsubscribe', and 'unload'."), - ), - mcp.WithString("force", - mcp.Description("Force clear backlog (true/false). Used with 'clear_backlog' operation."), - ), - mcp.WithString("unload", - mcp.Description("Unload newly split bundles after splitting (true/false). Used with 'split_bundle' operation."), - ), - ) + return &sdk.Tool{ + Name: "pulsar_admin_namespace", + Description: toolDesc, + InputSchema: inputSchema, + }, nil } // buildNamespaceHandler builds the Pulsar Admin Namespace handler function // Migrated from the original handler logic -func (b *PulsarAdminNamespaceToolBuilder) buildNamespaceHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - // Get operation parameter - operation, err := request.RequireString("operation") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get operation: %v", err)), nil +func (b *PulsarAdminNamespaceToolBuilder) buildNamespaceHandler(readOnly bool) builders.ToolHandlerFunc[pulsarAdminNamespaceInput, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input pulsarAdminNamespaceInput) (*sdk.CallToolResult, any, error) { + operation := input.Operation + if operation == "" { + return nil, nil, fmt.Errorf("missing required parameter 'operation'") + } + + // Validate write operations in read-only mode + if readOnly && (operation == "create" || operation == "delete" || operation == "clear_backlog" || + operation == "unsubscribe" || operation == "unload" || operation == "split_bundle") { + return nil, nil, fmt.Errorf("write operations are not allowed in read-only mode") } // Get Pulsar session from context session := mcpCtx.GetPulsarSession(ctx) if session == nil { - return mcp.NewToolResultError("Pulsar session not found in context"), nil + return nil, nil, fmt.Errorf("pulsar session not found in context") } // Create Pulsar client client, err := session.GetAdminClient() if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get admin client: %v", err)), nil + return nil, nil, b.handleError("get admin client", err) } // Route to appropriate handler based on operation switch operation { case "list": - return b.handleNamespaceList(ctx, client, request) + result, err := b.handleNamespaceList(client, input) + return result, nil, err case "get_topics": - return b.handleNamespaceGetTopics(ctx, client, request) - case "create", "delete", "clear_backlog", "unsubscribe", "unload", "split_bundle": - // Check if write operations are allowed - if readOnly { - return mcp.NewToolResultError(fmt.Sprintf("Operation '%s' not allowed in read-only mode", operation)), nil - } - - // Route to appropriate write operation handler - switch operation { - case "create": - return b.handleNamespaceCreate(ctx, client, request) - case "delete": - return b.handleNamespaceDelete(ctx, client, request) - case "clear_backlog": - return b.handleClearBacklog(ctx, client, request) - case "unsubscribe": - return b.handleUnsubscribe(ctx, client, request) - case "unload": - return b.handleUnload(ctx, client, request) - case "split_bundle": - return b.handleSplitBundle(ctx, client, request) - } + result, err := b.handleNamespaceGetTopics(client, input) + return result, nil, err + case "create": + result, err := b.handleNamespaceCreate(client, input) + return result, nil, err + case "delete": + result, err := b.handleNamespaceDelete(client, input) + return result, nil, err + case "clear_backlog": + result, err := b.handleClearBacklog(client, input) + return result, nil, err + case "unsubscribe": + result, err := b.handleUnsubscribe(client, input) + return result, nil, err + case "unload": + result, err := b.handleUnload(client, input) + return result, nil, err + case "split_bundle": + result, err := b.handleSplitBundle(client, input) + return result, nil, err default: - return mcp.NewToolResultError(fmt.Sprintf("Unknown operation: %s. Supported operations: list, get_topics, create, delete, clear_backlog, unsubscribe, unload, split_bundle", operation)), nil + return nil, nil, fmt.Errorf("unknown operation: %s. supported operations: list, get_topics, create, delete, clear_backlog, unsubscribe, unload, split_bundle", operation) } - - // Should not reach here - return mcp.NewToolResultError("Unexpected error: operation not handled"), nil } } // Unified error handling and utility functions // handleError provides unified error handling -func (b *PulsarAdminNamespaceToolBuilder) handleError(operation string, err error) *mcp.CallToolResult { - return mcp.NewToolResultError(fmt.Sprintf("Failed to %s: %v", operation, err)) +func (b *PulsarAdminNamespaceToolBuilder) handleError(operation string, err error) error { + return fmt.Errorf("failed to %s: %v", operation, err) } // marshalResponse provides unified JSON serialization for responses -func (b *PulsarAdminNamespaceToolBuilder) marshalResponse(data interface{}) (*mcp.CallToolResult, error) { +func (b *PulsarAdminNamespaceToolBuilder) marshalResponse(data interface{}) (*sdk.CallToolResult, error) { jsonBytes, err := json.Marshal(data) if err != nil { - return b.handleError("marshal response", err), nil + return nil, b.handleError("marshal response", err) } - return mcp.NewToolResultText(string(jsonBytes)), nil + return &sdk.CallToolResult{ + Content: []sdk.Content{&sdk.TextContent{Text: string(jsonBytes)}}, + }, nil } // Operation handler functions - migrated from the original implementation // handleNamespaceList handles listing namespaces for a tenant -func (b *PulsarAdminNamespaceToolBuilder) handleNamespaceList(_ context.Context, client cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - tenant, err := request.RequireString("tenant") +func (b *PulsarAdminNamespaceToolBuilder) handleNamespaceList(client cmdutils.Client, input pulsarAdminNamespaceInput) (*sdk.CallToolResult, error) { + tenant, err := requireString(input.Tenant, "tenant") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get tenant name: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'tenant' for namespace.list: %v", err) } // Get namespace list namespaces, err := client.Namespaces().GetNamespaces(tenant) if err != nil { - return b.handleError("list namespaces", err), nil + return nil, b.handleError("list namespaces", err) } return b.marshalResponse(namespaces) } // handleNamespaceGetTopics handles getting topics for a namespace -func (b *PulsarAdminNamespaceToolBuilder) handleNamespaceGetTopics(_ context.Context, client cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - namespace, err := request.RequireString("namespace") +func (b *PulsarAdminNamespaceToolBuilder) handleNamespaceGetTopics(client cmdutils.Client, input pulsarAdminNamespaceInput) (*sdk.CallToolResult, error) { + namespace, err := requireString(input.Namespace, "namespace") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get namespace name: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'namespace' for namespace.get_topics: %v", err) } // Get topics list topics, err := client.Namespaces().GetTopics(namespace) if err != nil { - return b.handleError("get topics", err), nil + return nil, b.handleError("get topics", err) } return b.marshalResponse(topics) } // handleNamespaceCreate handles creating a new namespace -func (b *PulsarAdminNamespaceToolBuilder) handleNamespaceCreate(_ context.Context, client cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - namespace, err := request.RequireString("namespace") +func (b *PulsarAdminNamespaceToolBuilder) handleNamespaceCreate(client cmdutils.Client, input pulsarAdminNamespaceInput) (*sdk.CallToolResult, error) { + namespace, err := requireString(input.Namespace, "namespace") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get namespace name: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'namespace' for namespace.create: %v", err) } // Get optional parameters - bundlesStr := request.GetString("bundles", "") + bundlesStr := stringValue(input.Bundles) bundles := 0 if bundlesStr != "" { bundlesInt, err := strconv.Atoi(bundlesStr) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid bundles value, must be an integer: %v", err)), nil + return nil, fmt.Errorf("invalid bundles value, must be an integer: %v", err) } bundles = bundlesInt } - clusters := request.GetStringSlice("clusters", []string{}) + clusters := input.Clusters // Prepare policies policies := utils.NewDefaultPolicies() @@ -271,9 +273,7 @@ func (b *PulsarAdminNamespaceToolBuilder) handleNamespaceCreate(_ context.Contex // Set bundles if provided if bundles > 0 { if bundles < 0 || bundles > int(^uint32(0)) { // MaxInt32 - return mcp.NewToolResultError( - fmt.Sprintf("Invalid number of bundles. Number of bundles has to be in the range of (0, %d].", int(^uint32(0))), - ), nil + return nil, fmt.Errorf("invalid number of bundles, number of bundles has to be in the range of (0, %d]", int(^uint32(0))) } policies.Bundles = utils.NewBundlesDataWithNumBundles(bundles) } @@ -286,57 +286,54 @@ func (b *PulsarAdminNamespaceToolBuilder) handleNamespaceCreate(_ context.Contex // Create namespace ns, err := utils.GetNamespaceName(namespace) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid namespace name: %v", err)), nil + return nil, fmt.Errorf("invalid namespace name: %v", err) } err = client.Namespaces().CreateNsWithPolices(ns.String(), *policies) if err != nil { - return b.handleError("create namespace", err), nil + return nil, b.handleError("create namespace", err) } - return mcp.NewToolResultText(fmt.Sprintf("Created %s successfully", namespace)), nil + return textResult(fmt.Sprintf("Created %s successfully", namespace)), nil } // handleNamespaceDelete handles deleting a namespace -func (b *PulsarAdminNamespaceToolBuilder) handleNamespaceDelete(_ context.Context, client cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - namespace, err := request.RequireString("namespace") +func (b *PulsarAdminNamespaceToolBuilder) handleNamespaceDelete(client cmdutils.Client, input pulsarAdminNamespaceInput) (*sdk.CallToolResult, error) { + namespace, err := requireString(input.Namespace, "namespace") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get namespace name: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'namespace' for namespace.delete: %v", err) } // Delete namespace err = client.Namespaces().DeleteNamespace(namespace) if err != nil { - return b.handleError("delete namespace", err), nil + return nil, b.handleError("delete namespace", err) } - return mcp.NewToolResultText(fmt.Sprintf("Deleted %s successfully", namespace)), nil + return textResult(fmt.Sprintf("Deleted %s successfully", namespace)), nil } // handleClearBacklog handles clearing the backlog for all topics in a namespace -func (b *PulsarAdminNamespaceToolBuilder) handleClearBacklog(_ context.Context, client cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - namespace, err := request.RequireString("namespace") +func (b *PulsarAdminNamespaceToolBuilder) handleClearBacklog(client cmdutils.Client, input pulsarAdminNamespaceInput) (*sdk.CallToolResult, error) { + namespace, err := requireString(input.Namespace, "namespace") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get namespace name: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'namespace' for namespace.clear_backlog: %v", err) } // Get optional parameters - subscription := request.GetString("subscription", "") - bundle := request.GetString("bundle", "") - force := request.GetString("force", "") - forceFlag := force == "true" + subscription := stringValue(input.Subscription) + bundle := stringValue(input.Bundle) + forceFlag := stringValue(input.Force) == "true" // If not forced, return an error requiring explicit force flag if !forceFlag { - return mcp.NewToolResultError( - "Clear backlog operation requires explicit confirmation. Please set force=true to proceed.", - ), nil + return nil, fmt.Errorf("clear backlog operation requires explicit confirmation, set force=true to proceed") } // Get namespace name ns, err := utils.GetNamespaceName(namespace) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid namespace name: %v", err)), nil + return nil, fmt.Errorf("invalid namespace name: %v", err) } // Handle different backlog clearing scenarios @@ -355,33 +352,33 @@ func (b *PulsarAdminNamespaceToolBuilder) handleClearBacklog(_ context.Context, } if clearErr != nil { - return b.handleError("clear backlog", clearErr), nil + return nil, b.handleError("clear backlog", clearErr) } - return mcp.NewToolResultText( + return textResult( fmt.Sprintf("Successfully cleared backlog for all topics in namespace %s", namespace), ), nil } // handleUnsubscribe handles unsubscribing the specified subscription for all topics of a namespace -func (b *PulsarAdminNamespaceToolBuilder) handleUnsubscribe(_ context.Context, client cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - namespace, err := request.RequireString("namespace") +func (b *PulsarAdminNamespaceToolBuilder) handleUnsubscribe(client cmdutils.Client, input pulsarAdminNamespaceInput) (*sdk.CallToolResult, error) { + namespace, err := requireString(input.Namespace, "namespace") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get namespace name: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'namespace' for namespace.unsubscribe: %v", err) } - subscription, err := request.RequireString("subscription") + subscription, err := requireString(input.Subscription, "subscription") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get subscription name: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'subscription' for namespace.unsubscribe: %v", err) } // Get optional bundle - bundle := request.GetString("bundle", "") + bundle := stringValue(input.Bundle) // Get namespace name ns, err := utils.GetNamespaceName(namespace) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid namespace name: %v", err)), nil + return nil, fmt.Errorf("invalid namespace name: %v", err) } // Unsubscribe namespace @@ -393,31 +390,31 @@ func (b *PulsarAdminNamespaceToolBuilder) handleUnsubscribe(_ context.Context, c } if unsubErr != nil { - return b.handleError("unsubscribe", unsubErr), nil + return nil, b.handleError("unsubscribe", unsubErr) } if bundle == "" { - return mcp.NewToolResultText( + return textResult( fmt.Sprintf("Successfully unsubscribed the subscription %s for all topics of the namespace %s", subscription, namespace), ), nil } - return mcp.NewToolResultText( + return textResult( fmt.Sprintf("Successfully unsubscribed the subscription %s for all topics of the namespace %s with bundle range %s", subscription, namespace, bundle), ), nil } // handleUnload handles unloading a namespace from the current serving broker -func (b *PulsarAdminNamespaceToolBuilder) handleUnload(_ context.Context, client cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - namespace, err := request.RequireString("namespace") +func (b *PulsarAdminNamespaceToolBuilder) handleUnload(client cmdutils.Client, input pulsarAdminNamespaceInput) (*sdk.CallToolResult, error) { + namespace, err := requireString(input.Namespace, "namespace") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get namespace name: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'namespace' for namespace.unload: %v", err) } // Get optional bundle - bundle := request.GetString("bundle", "") + bundle := stringValue(input.Bundle) // Unload namespace var unloadErr error @@ -428,42 +425,80 @@ func (b *PulsarAdminNamespaceToolBuilder) handleUnload(_ context.Context, client } if unloadErr != nil { - return b.handleError("unload namespace", unloadErr), nil + return nil, b.handleError("unload namespace", unloadErr) } if bundle == "" { - return mcp.NewToolResultText( + return textResult( fmt.Sprintf("Unloaded namespace %s successfully", namespace), ), nil } - return mcp.NewToolResultText( + return textResult( fmt.Sprintf("Unloaded namespace %s with bundle %s successfully", namespace, bundle), ), nil } // handleSplitBundle handles splitting a namespace bundle -func (b *PulsarAdminNamespaceToolBuilder) handleSplitBundle(_ context.Context, client cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - namespace, err := request.RequireString("namespace") +func (b *PulsarAdminNamespaceToolBuilder) handleSplitBundle(client cmdutils.Client, input pulsarAdminNamespaceInput) (*sdk.CallToolResult, error) { + namespace, err := requireString(input.Namespace, "namespace") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get namespace name: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'namespace' for namespace.split_bundle: %v", err) } - bundle, err := request.RequireString("bundle") + bundle, err := requireString(input.Bundle, "bundle") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get bundle: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'bundle' for namespace.split_bundle: %v", err) } // Get optional unload flag - unload := request.GetString("unload", "") == "true" + unload := stringValue(input.Unload) == "true" // Split namespace bundle err = client.Namespaces().SplitNamespaceBundle(namespace, bundle, unload) if err != nil { - return b.handleError("split namespace bundle", err), nil + return nil, b.handleError("split namespace bundle", err) } - return mcp.NewToolResultText( + return textResult( fmt.Sprintf("Split namespace bundle %s successfully", bundle), ), nil } + +func buildPulsarAdminNamespaceInputSchema() (*jsonschema.Schema, error) { + schema, err := jsonschema.For[pulsarAdminNamespaceInput](nil) + if err != nil { + return nil, fmt.Errorf("input schema: %w", err) + } + if schema.Type != "object" { + return nil, fmt.Errorf("input schema must have type \"object\"") + } + + if schema.Properties == nil { + schema.Properties = map[string]*jsonschema.Schema{} + } + + setSchemaDescription(schema, "operation", pulsarAdminNamespaceOperationDesc) + setSchemaDescription(schema, "tenant", pulsarAdminNamespaceTenantDesc) + setSchemaDescription(schema, "namespace", pulsarAdminNamespaceNamespaceDesc) + setSchemaDescription(schema, "bundles", pulsarAdminNamespaceBundlesDesc) + setSchemaDescription(schema, "clusters", pulsarAdminNamespaceClustersDesc) + setSchemaDescription(schema, "subscription", pulsarAdminNamespaceSubscriptionDesc) + setSchemaDescription(schema, "bundle", pulsarAdminNamespaceBundleDesc) + setSchemaDescription(schema, "force", pulsarAdminNamespaceForceDesc) + setSchemaDescription(schema, "unload", pulsarAdminNamespaceUnloadDesc) + + if clustersSchema := schema.Properties["clusters"]; clustersSchema != nil && clustersSchema.Items != nil { + clustersSchema.Items.Description = "cluster" + } + + normalizeAdditionalProperties(schema) + return schema, nil +} + +func stringValue(value *string) string { + if value == nil { + return "" + } + return *value +} diff --git a/pkg/mcp/builders/pulsar/namespace_legacy.go b/pkg/mcp/builders/pulsar/namespace_legacy.go new file mode 100644 index 0000000..3d214d7 --- /dev/null +++ b/pkg/mcp/builders/pulsar/namespace_legacy.go @@ -0,0 +1,469 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + + "github.com/apache/pulsar-client-go/pulsaradmin/pkg/utils" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/pulsarctl/pkg/cmdutils" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" +) + +// PulsarAdminNamespaceLegacyToolBuilder implements the ToolBuilder interface for Pulsar Admin Namespace tools +// It provides functionality to build Pulsar namespace management tools for the legacy server +// /nolint:revive +type PulsarAdminNamespaceLegacyToolBuilder struct { + *builders.BaseToolBuilder +} + +// NewPulsarAdminNamespaceLegacyToolBuilder creates a new Pulsar Admin Namespace legacy tool builder instance +func NewPulsarAdminNamespaceLegacyToolBuilder() *PulsarAdminNamespaceLegacyToolBuilder { + metadata := builders.ToolMetadata{ + Name: "pulsar_admin_namespace", + Version: "1.0.0", + Description: "Pulsar Admin namespace management tools", + Category: "pulsar_admin", + Tags: []string{"pulsar", "namespace", "admin"}, + } + + features := []string{ + "pulsar-admin-namespaces", + "all", + "all-pulsar", + "pulsar-admin", + } + + return &PulsarAdminNamespaceLegacyToolBuilder{ + BaseToolBuilder: builders.NewBaseToolBuilder(metadata, features), + } +} + +// BuildTools builds the Pulsar Admin Namespace tool list for the legacy server +// This is the core method implementing the ToolBuilder interface +func (b *PulsarAdminNamespaceLegacyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { + // Check features - return empty list if no required features are present + if !b.HasAnyRequiredFeature(config.Features) { + return nil, nil + } + + // Validate configuration (only validate when matching features are present) + if err := b.Validate(config); err != nil { + return nil, err + } + + // Build tools + tool := b.buildNamespaceTool() + handler := b.buildNamespaceHandler(config.ReadOnly) + + return []server.ServerTool{ + { + Tool: tool, + Handler: handler, + }, + }, nil +} + +// buildNamespaceTool builds the Pulsar Admin Namespace MCP tool definition +// Migrated from the original tool definition logic +func (b *PulsarAdminNamespaceLegacyToolBuilder) buildNamespaceTool() mcp.Tool { + toolDesc := "Manage Pulsar namespaces with various operations. " + + "This tool provides functionality to work with namespaces in Apache Pulsar, " + + "including listing, creating, deleting, and performing various operations on namespaces." + + operationDesc := "Operation to perform on namespaces. Available operations:\n" + + "- list: List all namespaces for a tenant\n" + + "- get_topics: Get all topics within a namespace\n" + + "- create: Create a new namespace\n" + + "- delete: Delete an existing namespace\n" + + "- clear_backlog: Clear backlog for all topics in a namespace\n" + + "- unsubscribe: Unsubscribe from a subscription for all topics in a namespace\n" + + "- unload: Unload a namespace from the current serving broker\n" + + "- split_bundle: Split a namespace bundle" + + return mcp.NewTool("pulsar_admin_namespace", + mcp.WithDescription(toolDesc), + mcp.WithString("operation", mcp.Required(), + mcp.Description(operationDesc), + ), + mcp.WithString("tenant", + mcp.Description("The tenant name. Required for 'list' operation."), + ), + mcp.WithString("namespace", + mcp.Description("The namespace name in format 'tenant/namespace'. Required for all operations except 'list'."), + ), + mcp.WithString("bundles", + mcp.Description("Number of bundles to activate when creating a namespace (default: 0 for default number of bundles). Used with 'create' operation."), + ), + mcp.WithArray("clusters", + mcp.Description("List of clusters to assign when creating a namespace. Used with 'create' operation."), + mcp.Items( + map[string]interface{}{ + "type": "string", + "description": "Cluster name", + }, + ), + ), + mcp.WithString("subscription", + mcp.Description("Subscription name. Required for 'unsubscribe' operation, optional for 'clear_backlog'."), + ), + mcp.WithString("bundle", + mcp.Description("Bundle name or range. Required for 'split_bundle' operation, optional for 'clear_backlog', 'unsubscribe', and 'unload'."), + ), + mcp.WithString("force", + mcp.Description("Force clear backlog (true/false). Used with 'clear_backlog' operation."), + ), + mcp.WithString("unload", + mcp.Description("Unload newly split bundles after splitting (true/false). Used with 'split_bundle' operation."), + ), + ) +} + +// buildNamespaceHandler builds the Pulsar Admin Namespace handler function +// Migrated from the original handler logic +func (b *PulsarAdminNamespaceLegacyToolBuilder) buildNamespaceHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Get operation parameter + operation, err := request.RequireString("operation") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get operation: %v", err)), nil + } + + // Get Pulsar session from context + session := mcpCtx.GetPulsarSession(ctx) + if session == nil { + return mcp.NewToolResultError("Pulsar session not found in context"), nil + } + + // Create Pulsar client + client, err := session.GetAdminClient() + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get admin client: %v", err)), nil + } + + // Route to appropriate handler based on operation + switch operation { + case "list": + return b.handleNamespaceList(ctx, client, request) + case "get_topics": + return b.handleNamespaceGetTopics(ctx, client, request) + case "create", "delete", "clear_backlog", "unsubscribe", "unload", "split_bundle": + // Check if write operations are allowed + if readOnly { + return mcp.NewToolResultError(fmt.Sprintf("Operation '%s' not allowed in read-only mode", operation)), nil + } + + // Route to appropriate write operation handler + switch operation { + case "create": + return b.handleNamespaceCreate(ctx, client, request) + case "delete": + return b.handleNamespaceDelete(ctx, client, request) + case "clear_backlog": + return b.handleClearBacklog(ctx, client, request) + case "unsubscribe": + return b.handleUnsubscribe(ctx, client, request) + case "unload": + return b.handleUnload(ctx, client, request) + case "split_bundle": + return b.handleSplitBundle(ctx, client, request) + } + default: + return mcp.NewToolResultError(fmt.Sprintf("Unknown operation: %s. Supported operations: list, get_topics, create, delete, clear_backlog, unsubscribe, unload, split_bundle", operation)), nil + } + + // Should not reach here + return mcp.NewToolResultError("Unexpected error: operation not handled"), nil + } +} + +// Unified error handling and utility functions + +// handleError provides unified error handling +func (b *PulsarAdminNamespaceLegacyToolBuilder) handleError(operation string, err error) *mcp.CallToolResult { + return mcp.NewToolResultError(fmt.Sprintf("Failed to %s: %v", operation, err)) +} + +// marshalResponse provides unified JSON serialization for responses +func (b *PulsarAdminNamespaceLegacyToolBuilder) marshalResponse(data interface{}) (*mcp.CallToolResult, error) { + jsonBytes, err := json.Marshal(data) + if err != nil { + return b.handleError("marshal response", err), nil + } + return mcp.NewToolResultText(string(jsonBytes)), nil +} + +// Operation handler functions - migrated from the original implementation + +// handleNamespaceList handles listing namespaces for a tenant +func (b *PulsarAdminNamespaceLegacyToolBuilder) handleNamespaceList(_ context.Context, client cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + tenant, err := request.RequireString("tenant") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get tenant name: %v", err)), nil + } + + // Get namespace list + namespaces, err := client.Namespaces().GetNamespaces(tenant) + if err != nil { + return b.handleError("list namespaces", err), nil + } + + return b.marshalResponse(namespaces) +} + +// handleNamespaceGetTopics handles getting topics for a namespace +func (b *PulsarAdminNamespaceLegacyToolBuilder) handleNamespaceGetTopics(_ context.Context, client cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + namespace, err := request.RequireString("namespace") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get namespace name: %v", err)), nil + } + + // Get topics list + topics, err := client.Namespaces().GetTopics(namespace) + if err != nil { + return b.handleError("get topics", err), nil + } + + return b.marshalResponse(topics) +} + +// handleNamespaceCreate handles creating a new namespace +func (b *PulsarAdminNamespaceLegacyToolBuilder) handleNamespaceCreate(_ context.Context, client cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + namespace, err := request.RequireString("namespace") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get namespace name: %v", err)), nil + } + + // Get optional parameters + bundlesStr := request.GetString("bundles", "") + bundles := 0 + if bundlesStr != "" { + bundlesInt, err := strconv.Atoi(bundlesStr) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Invalid bundles value, must be an integer: %v", err)), nil + } + bundles = bundlesInt + } + + clusters := request.GetStringSlice("clusters", []string{}) + + // Prepare policies + policies := utils.NewDefaultPolicies() + + // Set bundles if provided + if bundles > 0 { + if bundles < 0 || bundles > int(^uint32(0)) { // MaxInt32 + return mcp.NewToolResultError( + fmt.Sprintf("Invalid number of bundles. Number of bundles has to be in the range of (0, %d].", int(^uint32(0))), + ), nil + } + policies.Bundles = utils.NewBundlesDataWithNumBundles(bundles) + } + + // Set clusters if provided + if len(clusters) > 0 { + policies.ReplicationClusters = clusters + } + + // Create namespace + ns, err := utils.GetNamespaceName(namespace) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Invalid namespace name: %v", err)), nil + } + + err = client.Namespaces().CreateNsWithPolices(ns.String(), *policies) + if err != nil { + return b.handleError("create namespace", err), nil + } + + return mcp.NewToolResultText(fmt.Sprintf("Created %s successfully", namespace)), nil +} + +// handleNamespaceDelete handles deleting a namespace +func (b *PulsarAdminNamespaceLegacyToolBuilder) handleNamespaceDelete(_ context.Context, client cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + namespace, err := request.RequireString("namespace") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get namespace name: %v", err)), nil + } + + // Delete namespace + err = client.Namespaces().DeleteNamespace(namespace) + if err != nil { + return b.handleError("delete namespace", err), nil + } + + return mcp.NewToolResultText(fmt.Sprintf("Deleted %s successfully", namespace)), nil +} + +// handleClearBacklog handles clearing the backlog for all topics in a namespace +func (b *PulsarAdminNamespaceLegacyToolBuilder) handleClearBacklog(_ context.Context, client cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + namespace, err := request.RequireString("namespace") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get namespace name: %v", err)), nil + } + + // Get optional parameters + subscription := request.GetString("subscription", "") + bundle := request.GetString("bundle", "") + force := request.GetString("force", "") + forceFlag := force == "true" + + // If not forced, return an error requiring explicit force flag + if !forceFlag { + return mcp.NewToolResultError( + "Clear backlog operation requires explicit confirmation. Please set force=true to proceed.", + ), nil + } + + // Get namespace name + ns, err := utils.GetNamespaceName(namespace) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Invalid namespace name: %v", err)), nil + } + + // Handle different backlog clearing scenarios + var clearErr error + //nolint:gocritic + if subscription != "" { + if bundle != "" { + clearErr = client.Namespaces().ClearNamespaceBundleBacklogForSubscription(*ns, bundle, subscription) + } else { + clearErr = client.Namespaces().ClearNamespaceBacklogForSubscription(*ns, subscription) + } + } else if bundle != "" { + clearErr = client.Namespaces().ClearNamespaceBundleBacklog(*ns, bundle) + } else { + clearErr = client.Namespaces().ClearNamespaceBacklog(*ns) + } + + if clearErr != nil { + return b.handleError("clear backlog", clearErr), nil + } + + return mcp.NewToolResultText( + fmt.Sprintf("Successfully cleared backlog for all topics in namespace %s", namespace), + ), nil +} + +// handleUnsubscribe handles unsubscribing the specified subscription for all topics of a namespace +func (b *PulsarAdminNamespaceLegacyToolBuilder) handleUnsubscribe(_ context.Context, client cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + namespace, err := request.RequireString("namespace") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get namespace name: %v", err)), nil + } + + subscription, err := request.RequireString("subscription") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get subscription name: %v", err)), nil + } + + // Get optional bundle + bundle := request.GetString("bundle", "") + + // Get namespace name + ns, err := utils.GetNamespaceName(namespace) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Invalid namespace name: %v", err)), nil + } + + // Unsubscribe namespace + var unsubErr error + if bundle == "" { + unsubErr = client.Namespaces().UnsubscribeNamespace(*ns, subscription) + } else { + unsubErr = client.Namespaces().UnsubscribeNamespaceBundle(*ns, bundle, subscription) + } + + if unsubErr != nil { + return b.handleError("unsubscribe", unsubErr), nil + } + + if bundle == "" { + return mcp.NewToolResultText( + fmt.Sprintf("Successfully unsubscribed the subscription %s for all topics of the namespace %s", + subscription, namespace), + ), nil + } + + return mcp.NewToolResultText( + fmt.Sprintf("Successfully unsubscribed the subscription %s for all topics of the namespace %s with bundle range %s", + subscription, namespace, bundle), + ), nil +} + +// handleUnload handles unloading a namespace from the current serving broker +func (b *PulsarAdminNamespaceLegacyToolBuilder) handleUnload(_ context.Context, client cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + namespace, err := request.RequireString("namespace") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get namespace name: %v", err)), nil + } + + // Get optional bundle + bundle := request.GetString("bundle", "") + + // Unload namespace + var unloadErr error + if bundle == "" { + unloadErr = client.Namespaces().Unload(namespace) + } else { + unloadErr = client.Namespaces().UnloadNamespaceBundle(namespace, bundle) + } + + if unloadErr != nil { + return b.handleError("unload namespace", unloadErr), nil + } + + if bundle == "" { + return mcp.NewToolResultText( + fmt.Sprintf("Unloaded namespace %s successfully", namespace), + ), nil + } + + return mcp.NewToolResultText( + fmt.Sprintf("Unloaded namespace %s with bundle %s successfully", namespace, bundle), + ), nil +} + +// handleSplitBundle handles splitting a namespace bundle +func (b *PulsarAdminNamespaceLegacyToolBuilder) handleSplitBundle(_ context.Context, client cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + namespace, err := request.RequireString("namespace") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get namespace name: %v", err)), nil + } + + bundle, err := request.RequireString("bundle") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get bundle: %v", err)), nil + } + + // Get optional unload flag + unload := request.GetString("unload", "") == "true" + + // Split namespace bundle + err = client.Namespaces().SplitNamespaceBundle(namespace, bundle, unload) + if err != nil { + return b.handleError("split namespace bundle", err), nil + } + + return mcp.NewToolResultText( + fmt.Sprintf("Split namespace bundle %s successfully", bundle), + ), nil +} diff --git a/pkg/mcp/builders/pulsar/namespace_policy.go b/pkg/mcp/builders/pulsar/namespace_policy.go index 3410dff..dbfaec6 100644 --- a/pkg/mcp/builders/pulsar/namespace_policy.go +++ b/pkg/mcp/builders/pulsar/namespace_policy.go @@ -22,90 +22,43 @@ import ( "strings" "github.com/apache/pulsar-client-go/pulsaradmin/pkg/utils" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + "github.com/google/jsonschema-go/jsonschema" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/pulsarctl/pkg/cmdutils" pulsarctlutils "github.com/streamnative/pulsarctl/pkg/ctl/utils" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" ) -// PulsarAdminNamespacePolicyToolBuilder implements the ToolBuilder interface for Pulsar admin namespace policies -// /nolint:revive -type PulsarAdminNamespacePolicyToolBuilder struct { - *builders.BaseToolBuilder +type pulsarAdminNamespacePolicyGetInput struct { + Namespace string `json:"namespace"` } -// NewPulsarAdminNamespacePolicyToolBuilder creates a new Pulsar admin namespace policy tool builder instance -func NewPulsarAdminNamespacePolicyToolBuilder() *PulsarAdminNamespacePolicyToolBuilder { - metadata := builders.ToolMetadata{ - Name: "pulsar_admin_namespace_policy", - Version: "1.0.0", - Description: "Pulsar admin namespace policy management tools", - Category: "pulsar_admin", - Tags: []string{"pulsar", "admin", "namespace_policy"}, - } - - features := []string{ - "pulsar-admin-namespace-policy", - "all", - "all-pulsar", - "pulsar-admin", - } - - return &PulsarAdminNamespacePolicyToolBuilder{ - BaseToolBuilder: builders.NewBaseToolBuilder(metadata, features), - } +type pulsarAdminNamespacePolicySetInput struct { + Namespace string `json:"namespace"` + Policy string `json:"policy"` + Role *string `json:"role,omitempty"` + Actions []string `json:"actions,omitempty"` + Clusters []string `json:"clusters,omitempty"` + Roles []string `json:"roles,omitempty"` + TTL *string `json:"ttl,omitempty"` + Time *string `json:"time,omitempty"` + Size *string `json:"size,omitempty"` + LimitSize *string `json:"limit-size,omitempty"` + LimitTime *string `json:"limit-time,omitempty"` + Type *string `json:"type,omitempty"` } -// BuildTools builds the Pulsar admin namespace policy tool list -func (b *PulsarAdminNamespacePolicyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { - // Check features - return empty list if no required features are present - if !b.HasAnyRequiredFeature(config.Features) { - return nil, nil - } - - // Validate configuration (only validate when matching features are present) - if err := b.Validate(config); err != nil { - return nil, err - } - - // Build tools - tools := []server.ServerTool{} - - // Always add get policies tool - getTool := b.buildNamespaceGetPoliciesTool() - getHandler := b.buildNamespaceGetPoliciesHandler() - tools = append(tools, server.ServerTool{ - Tool: getTool, - Handler: getHandler, - }) - - // Add write operations if not in read-only mode - if !config.ReadOnly { - // Add set policy tool - setTool := b.buildNamespaceSetPolicyTool() - setHandler := b.buildNamespaceSetPolicyHandler() - tools = append(tools, server.ServerTool{ - Tool: setTool, - Handler: setHandler, - }) - - // Add remove policy tool - removeTool := b.buildNamespaceRemovePolicyTool() - removeHandler := b.buildNamespaceRemovePolicyHandler() - tools = append(tools, server.ServerTool{ - Tool: removeTool, - Handler: removeHandler, - }) - } - - return tools, nil +type pulsarAdminNamespacePolicyRemoveInput struct { + Namespace string `json:"namespace"` + Policy string `json:"policy"` + Role *string `json:"role,omitempty"` + Subscription *string `json:"subscription,omitempty"` + Type *string `json:"type,omitempty"` } -// buildNamespaceGetPoliciesTool builds the get policies tool -func (b *PulsarAdminNamespacePolicyToolBuilder) buildNamespaceGetPoliciesTool() mcp.Tool { - toolDesc := "Get the configuration policies of a namespace. " + +const ( + pulsarAdminNamespacePolicyGetToolDesc = "Get the configuration policies of a namespace. " + "Returns a comprehensive view of all policies applied to the namespace. " + "The response includes the following fields:" + "\n* bundles: Namespace bundle configuration, including boundaries and number of bundles" + @@ -137,18 +90,7 @@ func (b *PulsarAdminNamespacePolicyToolBuilder) buildNamespaceGetPoliciesTool() "\n* subscription_auth_mode: Authentication mode for subscriptions" + "\n* is_allow_auto_update_schema: Whether automatic schema updates are allowed" + "\nRequires tenant admin permissions." - - return mcp.NewTool("pulsar_admin_namespace_policy_get", - mcp.WithDescription(toolDesc), - mcp.WithString("namespace", mcp.Required(), - mcp.Description("The namespace name (tenant/namespace) to get policies for"), - ), - ) -} - -// buildNamespaceSetPolicyTool builds the set policy tool -func (b *PulsarAdminNamespacePolicyToolBuilder) buildNamespaceSetPolicyTool() mcp.Tool { - toolDesc := "Set a policy for a namespace. " + + pulsarAdminNamespacePolicySetToolDesc = "Set a policy for a namespace. " + "This is a unified tool for setting different types of policies on a namespace. " + "The policy type determines which specific policy will be set, and the required parameters " + "vary based on the policy type. " + @@ -172,78 +114,7 @@ func (b *PulsarAdminNamespacePolicyToolBuilder) buildNamespaceSetPolicyTool() mc "max-consumers-per-subscription, anti-affinity-group, persistence, deduplication, encryption-required, " + "subscription-auth-mode, subscription-permission, dispatch-rate, replicator-dispatch-rate, subscribe-rate, " + "subscription-dispatch-rate, publish-rate" - - return mcp.NewTool("pulsar_admin_namespace_policy_set", - mcp.WithDescription(toolDesc), - mcp.WithString("namespace", mcp.Required(), - mcp.Description("The namespace name (tenant/namespace) to set the policy for"), - ), - mcp.WithString("policy", mcp.Required(), - mcp.Description("Type of policy to set. Available options: "+ - "message-ttl, retention, permission, replication-clusters, backlog-quota, "+ - "topic-auto-creation, schema-validation, schema-auto-update, auto-update-schema, "+ - "offload-threshold, offload-deletion-lag, compaction-threshold, "+ - "max-producers-per-topic, max-consumers-per-topic, max-consumers-per-subscription, "+ - "anti-affinity-group, persistence, deduplication, encryption-required, "+ - "subscription-auth-mode, subscription-permission, dispatch-rate, "+ - "replicator-dispatch-rate, subscribe-rate, subscription-dispatch-rate, publish-rate"), - ), - // Generic policy parameters - specific ones will be used based on the policy type - mcp.WithString("role", - mcp.Description("Role name for permission policies"), - ), - mcp.WithArray("actions", - mcp.Description("Actions to grant for permission policies (e.g., produce, consume)"), - mcp.Items( - map[string]interface{}{ - "type": "string", - "description": "action", - }, - ), - ), - mcp.WithArray("clusters", - mcp.Description("List of clusters for replication policies"), - mcp.Items( - map[string]interface{}{ - "type": "string", - "description": "cluster", - }, - ), - ), - mcp.WithArray("roles", - mcp.Description("List of roles for subscription permission policies"), - mcp.Items( - map[string]interface{}{ - "type": "string", - "description": "role", - }, - ), - ), - mcp.WithString("ttl", - mcp.Description("Message TTL in seconds (or 0 to disable TTL)"), - ), - mcp.WithString("time", - mcp.Description("Retention time in minutes, or special values: 0 (no retention) or -1 (infinite retention)"), - ), - mcp.WithString("size", - mcp.Description("Retention size limit (e.g., 10M, 16G, 3T), or special values: 0 (no retention) or -1 (infinite size retention)"), - ), - mcp.WithString("limit-size", - mcp.Description("Size limit for backlog quota (e.g., 10M, 16G)"), - ), - mcp.WithString("limit-time", - mcp.Description("Time limit in seconds for backlog quota. Default is -1 (infinite)"), - ), - mcp.WithString("policy", - mcp.Description("Retention policy for backlog quota (valid options: producer_request_hold, producer_exception, consumer_backlog_eviction)"), - ), - // Add more parameters as needed - ) -} - -// buildNamespaceRemovePolicyTool builds the remove policy tool -func (b *PulsarAdminNamespacePolicyToolBuilder) buildNamespaceRemovePolicyTool() mcp.Tool { - toolDesc := "Remove a policy from a namespace. " + + pulsarAdminNamespacePolicyRemoveToolDesc = "Remove a policy from a namespace. " + "This is a unified tool for removing different types of policies from a namespace. " + "The policy type determines which specific policy will be removed. " + "Requires appropriate admin permissions based on the policy being modified.\n\n" + @@ -262,180 +133,320 @@ func (b *PulsarAdminNamespacePolicyToolBuilder) buildNamespaceRemovePolicyTool() "6. subscription-permission: Revokes permission from a role to access a subscription\n" + " - Required: namespace, subscription, role" - return mcp.NewTool("pulsar_admin_namespace_policy_remove", - mcp.WithDescription(toolDesc), - mcp.WithString("namespace", mcp.Required(), - mcp.Description("The namespace name (tenant/namespace) to remove the policy from"), - ), - mcp.WithString("policy", mcp.Required(), - mcp.Description("Type of policy to remove. Available options: "+ - "backlog-quota, topic-auto-creation, offload-deletion-lag, anti-affinity-group, "+ - "permission, subscription-permission"), - ), - mcp.WithString("role", - mcp.Description("Role name for permission policies"), - ), - mcp.WithString("subscription", - mcp.Description("Subscription name for subscription permission policies"), - ), - mcp.WithString("type", - mcp.Description("Type of backlog quota to remove"), - ), - ) + pulsarAdminNamespacePolicyGetNamespaceDesc = "The namespace name (tenant/namespace) to get policies for" + pulsarAdminNamespacePolicySetNamespaceDesc = "The namespace name (tenant/namespace) to set the policy for" + pulsarAdminNamespacePolicySetPolicyDesc = "Type of policy to set. Available options: " + + "message-ttl, retention, permission, replication-clusters, backlog-quota, " + + "topic-auto-creation, schema-validation, schema-auto-update, auto-update-schema, " + + "offload-threshold, offload-deletion-lag, compaction-threshold, " + + "max-producers-per-topic, max-consumers-per-topic, max-consumers-per-subscription, " + + "anti-affinity-group, persistence, deduplication, encryption-required, " + + "subscription-auth-mode, subscription-permission, dispatch-rate, " + + "replicator-dispatch-rate, subscribe-rate, subscription-dispatch-rate, publish-rate" + pulsarAdminNamespacePolicySetRoleDesc = "Role name for permission policies" + pulsarAdminNamespacePolicySetActionsDesc = "Actions to grant for permission policies (e.g., produce, consume)" + pulsarAdminNamespacePolicySetClustersDesc = "List of clusters for replication policies" + pulsarAdminNamespacePolicySetRolesDesc = "List of roles for subscription permission policies" + pulsarAdminNamespacePolicySetTTLDesc = "Message TTL in seconds (or 0 to disable TTL)" + pulsarAdminNamespacePolicySetTimeDesc = "Retention time in minutes, or special values: 0 (no retention) or -1 (infinite retention)" + pulsarAdminNamespacePolicySetSizeDesc = "Retention size limit (e.g., 10M, 16G, 3T), or special values: 0 (no retention) or -1 (infinite size retention)" + pulsarAdminNamespacePolicySetLimitSizeDesc = "Size limit for backlog quota (e.g., 10M, 16G)" + pulsarAdminNamespacePolicySetLimitTimeDesc = "Time limit in seconds for backlog quota. Default is -1 (infinite)" + pulsarAdminNamespacePolicySetTypeDesc = "Type of backlog quota to apply" + pulsarAdminNamespacePolicyRemoveNamespaceDesc = "The namespace name (tenant/namespace) to remove the policy from" + pulsarAdminNamespacePolicyRemovePolicyDesc = "Type of policy to remove. Available options: " + + "backlog-quota, topic-auto-creation, offload-deletion-lag, anti-affinity-group, " + + "permission, subscription-permission" + pulsarAdminNamespacePolicyRemoveRoleDesc = "Role name for permission policies" + pulsarAdminNamespacePolicyRemoveSubscriptionDesc = "Subscription name for subscription permission policies" + pulsarAdminNamespacePolicyRemoveTypeDesc = "Type of backlog quota to remove" +) + +// PulsarAdminNamespacePolicyToolBuilder implements the ToolBuilder interface for Pulsar admin namespace policies +// /nolint:revive +type PulsarAdminNamespacePolicyToolBuilder struct { + *builders.BaseToolBuilder +} + +// NewPulsarAdminNamespacePolicyToolBuilder creates a new Pulsar admin namespace policy tool builder instance +func NewPulsarAdminNamespacePolicyToolBuilder() *PulsarAdminNamespacePolicyToolBuilder { + metadata := builders.ToolMetadata{ + Name: "pulsar_admin_namespace_policy", + Version: "1.0.0", + Description: "Pulsar admin namespace policy management tools", + Category: "pulsar_admin", + Tags: []string{"pulsar", "admin", "namespace_policy"}, + } + + features := []string{ + "pulsar-admin-namespace-policy", + "all", + "all-pulsar", + "pulsar-admin", + } + + return &PulsarAdminNamespacePolicyToolBuilder{ + BaseToolBuilder: builders.NewBaseToolBuilder(metadata, features), + } +} + +// BuildTools builds the Pulsar admin namespace policy tool list +func (b *PulsarAdminNamespacePolicyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]builders.ToolDefinition, error) { + // Check features - return empty list if no required features are present + if !b.HasAnyRequiredFeature(config.Features) { + return nil, nil + } + + // Validate configuration (only validate when matching features are present) + if err := b.Validate(config); err != nil { + return nil, err + } + + tools := []builders.ToolDefinition{} + + getTool, err := b.buildNamespaceGetPoliciesTool() + if err != nil { + return nil, err + } + getHandler := b.buildNamespaceGetPoliciesHandler() + tools = append(tools, builders.ServerTool[pulsarAdminNamespacePolicyGetInput, any]{ + Tool: getTool, + Handler: getHandler, + }) + + if !config.ReadOnly { + setTool, err := b.buildNamespaceSetPolicyTool() + if err != nil { + return nil, err + } + setHandler := b.buildNamespaceSetPolicyHandler() + tools = append(tools, builders.ServerTool[pulsarAdminNamespacePolicySetInput, any]{ + Tool: setTool, + Handler: setHandler, + }) + + removeTool, err := b.buildNamespaceRemovePolicyTool() + if err != nil { + return nil, err + } + removeHandler := b.buildNamespaceRemovePolicyHandler() + tools = append(tools, builders.ServerTool[pulsarAdminNamespacePolicyRemoveInput, any]{ + Tool: removeTool, + Handler: removeHandler, + }) + } + + return tools, nil +} + +// buildNamespaceGetPoliciesTool builds the get policies tool +func (b *PulsarAdminNamespacePolicyToolBuilder) buildNamespaceGetPoliciesTool() (*sdk.Tool, error) { + inputSchema, err := buildPulsarAdminNamespacePolicyGetInputSchema() + if err != nil { + return nil, err + } + + return &sdk.Tool{ + Name: "pulsar_admin_namespace_policy_get", + Description: pulsarAdminNamespacePolicyGetToolDesc, + InputSchema: inputSchema, + }, nil +} + +// buildNamespaceSetPolicyTool builds the set policy tool +func (b *PulsarAdminNamespacePolicyToolBuilder) buildNamespaceSetPolicyTool() (*sdk.Tool, error) { + inputSchema, err := buildPulsarAdminNamespacePolicySetInputSchema() + if err != nil { + return nil, err + } + + return &sdk.Tool{ + Name: "pulsar_admin_namespace_policy_set", + Description: pulsarAdminNamespacePolicySetToolDesc, + InputSchema: inputSchema, + }, nil +} + +// buildNamespaceRemovePolicyTool builds the remove policy tool +func (b *PulsarAdminNamespacePolicyToolBuilder) buildNamespaceRemovePolicyTool() (*sdk.Tool, error) { + inputSchema, err := buildPulsarAdminNamespacePolicyRemoveInputSchema() + if err != nil { + return nil, err + } + + return &sdk.Tool{ + Name: "pulsar_admin_namespace_policy_remove", + Description: pulsarAdminNamespacePolicyRemoveToolDesc, + InputSchema: inputSchema, + }, nil } // buildNamespaceGetPoliciesHandler builds the get policies handler -func (b *PulsarAdminNamespacePolicyToolBuilder) buildNamespaceGetPoliciesHandler() func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminNamespacePolicyToolBuilder) buildNamespaceGetPoliciesHandler() builders.ToolHandlerFunc[pulsarAdminNamespacePolicyGetInput, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input pulsarAdminNamespacePolicyGetInput) (*sdk.CallToolResult, any, error) { // Get Pulsar session from context session := mcpCtx.GetPulsarSession(ctx) if session == nil { - return mcp.NewToolResultError("Pulsar session not found in context"), nil + return nil, nil, fmt.Errorf("pulsar session not found in context") } client, err := session.GetAdminClient() if err != nil { - return b.handleError("get admin client", err), nil + return nil, nil, b.handleError("get admin client", err) } - namespace, err := request.RequireString("namespace") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get namespace name: %v", err)), nil + if input.Namespace == "" { + return nil, nil, fmt.Errorf("missing required parameter 'namespace'") } // Get policies - policies, err := client.Namespaces().GetPolicies(namespace) + policies, err := client.Namespaces().GetPolicies(input.Namespace) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get policies: %v", err)), nil + return nil, nil, b.handleError("get policies", err) } - return b.marshalResponse(policies) + result, err := b.marshalResponse(policies) + return result, nil, err } } // buildNamespaceSetPolicyHandler builds the set policy handler -func (b *PulsarAdminNamespacePolicyToolBuilder) buildNamespaceSetPolicyHandler() func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminNamespacePolicyToolBuilder) buildNamespaceSetPolicyHandler() builders.ToolHandlerFunc[pulsarAdminNamespacePolicySetInput, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input pulsarAdminNamespacePolicySetInput) (*sdk.CallToolResult, any, error) { // Get Pulsar session from context session := mcpCtx.GetPulsarSession(ctx) if session == nil { - return mcp.NewToolResultError("Pulsar session not found in context"), nil + return nil, nil, fmt.Errorf("pulsar session not found in context") } client, err := session.GetAdminClient() if err != nil { - return b.handleError("get admin client", err), nil + return nil, nil, b.handleError("get admin client", err) } - namespace, err := request.RequireString("namespace") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get namespace name: %v", err)), nil + namespace := input.Namespace + if namespace == "" { + return nil, nil, fmt.Errorf("missing required parameter 'namespace'") } - policy, err := request.RequireString("policy") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get policy type: %v", err)), nil + policy := input.Policy + if policy == "" { + return nil, nil, fmt.Errorf("missing required parameter 'policy'") } // Handle different policy types switch policy { case "message-ttl": - return b.handleSetMessageTTL(ctx, client, namespace, request) + result, handlerErr := b.handleSetMessageTTL(client, namespace, input) + return result, nil, handlerErr case "retention": - return b.handleSetRetention(ctx, client, namespace, request) + result, handlerErr := b.handleSetRetention(client, namespace, input) + return result, nil, handlerErr case "permission": - return b.handleGrantPermission(ctx, client, namespace, request) + result, handlerErr := b.handleGrantPermission(client, namespace, input) + return result, nil, handlerErr case "replication-clusters": - return b.handleSetReplicationClusters(ctx, client, namespace, request) + result, handlerErr := b.handleSetReplicationClusters(client, namespace, input) + return result, nil, handlerErr case "backlog-quota": - return b.handleSetBacklogQuota(ctx, client, namespace, request) - // Add more policy types as needed + result, handlerErr := b.handleSetBacklogQuota(client, namespace, input) + return result, nil, handlerErr default: - return mcp.NewToolResultError(fmt.Sprintf("Unsupported policy type: %s", policy)), nil + return nil, nil, fmt.Errorf("unsupported policy type: %s", policy) } } } // buildNamespaceRemovePolicyHandler builds the remove policy handler -func (b *PulsarAdminNamespacePolicyToolBuilder) buildNamespaceRemovePolicyHandler() func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminNamespacePolicyToolBuilder) buildNamespaceRemovePolicyHandler() builders.ToolHandlerFunc[pulsarAdminNamespacePolicyRemoveInput, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input pulsarAdminNamespacePolicyRemoveInput) (*sdk.CallToolResult, any, error) { // Get Pulsar session from context session := mcpCtx.GetPulsarSession(ctx) if session == nil { - return mcp.NewToolResultError("Pulsar session not found in context"), nil + return nil, nil, fmt.Errorf("pulsar session not found in context") } client, err := session.GetAdminClient() if err != nil { - return b.handleError("get admin client", err), nil + return nil, nil, b.handleError("get admin client", err) } - namespace, err := request.RequireString("namespace") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get namespace name: %v", err)), nil + namespace := input.Namespace + if namespace == "" { + return nil, nil, fmt.Errorf("missing required parameter 'namespace'") } - policy, err := request.RequireString("policy") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get policy type: %v", err)), nil + policy := input.Policy + if policy == "" { + return nil, nil, fmt.Errorf("missing required parameter 'policy'") } // Handle different policy types switch policy { case "permission": - return b.handleRevokePermission(ctx, client, namespace, request) + result, handlerErr := b.handleRevokePermission(client, namespace, input) + return result, nil, handlerErr case "backlog-quota": - return b.handleRemoveBacklogQuota(ctx, client, namespace, request) - // Add more policy types as needed + result, handlerErr := b.handleRemoveBacklogQuota(client, namespace) + return result, nil, handlerErr default: - return mcp.NewToolResultError(fmt.Sprintf("Unsupported policy type for removal: %s", policy)), nil + return nil, nil, fmt.Errorf("unsupported policy type for removal: %s", policy) } } } // Utility functions -func (b *PulsarAdminNamespacePolicyToolBuilder) handleError(operation string, err error) *mcp.CallToolResult { - return mcp.NewToolResultError(fmt.Sprintf("Failed to %s: %v", operation, err)) +func (b *PulsarAdminNamespacePolicyToolBuilder) handleError(operation string, err error) error { + return fmt.Errorf("failed to %s: %v", operation, err) } -func (b *PulsarAdminNamespacePolicyToolBuilder) marshalResponse(data interface{}) (*mcp.CallToolResult, error) { +func (b *PulsarAdminNamespacePolicyToolBuilder) marshalResponse(data interface{}) (*sdk.CallToolResult, error) { jsonBytes, err := json.Marshal(data) if err != nil { - return b.handleError("marshal response", err), nil + return nil, b.handleError("marshal response", err) } - return mcp.NewToolResultText(string(jsonBytes)), nil + return &sdk.CallToolResult{ + Content: []sdk.Content{&sdk.TextContent{Text: string(jsonBytes)}}, + }, nil } // Policy-specific handler functions // handleSetMessageTTL handles setting message TTL for a namespace -func (b *PulsarAdminNamespacePolicyToolBuilder) handleSetMessageTTL(_ context.Context, client cmdutils.Client, namespace string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - ttlStr, err := request.RequireString("ttl") +func (b *PulsarAdminNamespacePolicyToolBuilder) handleSetMessageTTL(client cmdutils.Client, namespace string, input pulsarAdminNamespacePolicySetInput) (*sdk.CallToolResult, error) { + ttlStr, err := requireString(input.TTL, "ttl") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get TTL: %v", err)), nil + return nil, fmt.Errorf("failed to get TTL: %v", err) } ttl, err := strconv.ParseInt(ttlStr, 10, 64) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid TTL value, must be an integer: %v", err)), nil + return nil, fmt.Errorf("invalid TTL value, must be an integer: %v", err) } // Set message TTL err = client.Namespaces().SetNamespaceMessageTTL(namespace, int(ttl)) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to set message TTL: %v", err)), nil + return nil, fmt.Errorf("failed to set message TTL: %v", err) } - return mcp.NewToolResultText(fmt.Sprintf("Set message TTL for %s to %d seconds", namespace, ttl)), nil + return textResult(fmt.Sprintf("Set message TTL for %s to %d seconds", namespace, ttl)), nil } // handleSetRetention handles setting retention for a namespace -func (b *PulsarAdminNamespacePolicyToolBuilder) handleSetRetention(_ context.Context, client cmdutils.Client, namespace string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - timeStr := request.GetString("time", "") - sizeStr := request.GetString("size", "") +func (b *PulsarAdminNamespacePolicyToolBuilder) handleSetRetention(client cmdutils.Client, namespace string, input pulsarAdminNamespacePolicySetInput) (*sdk.CallToolResult, error) { + timeStr := "" + if input.Time != nil { + timeStr = *input.Time + } + + sizeStr := "" + if input.Size != nil { + sizeStr = *input.Size + } if timeStr == "" && sizeStr == "" { - return mcp.NewToolResultError("At least one of 'time' or 'size' must be specified"), nil + return nil, fmt.Errorf("at least one of 'time' or 'size' must be specified") } // Parse retention time @@ -444,7 +455,7 @@ func (b *PulsarAdminNamespacePolicyToolBuilder) handleSetRetention(_ context.Con // Parse relative time in seconds from the input string retentionTime, err := pulsarctlutils.ParseRelativeTimeInSeconds(timeStr) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid retention time format: %v", err)), nil + return nil, fmt.Errorf("invalid retention time format: %v", err) } if retentionTime != -1 { @@ -466,14 +477,14 @@ func (b *PulsarAdminNamespacePolicyToolBuilder) handleSetRetention(_ context.Con // Parse size string (e.g., "10M", "16G", "3T") sizeInBytes, err := pulsarctlutils.ValidateSizeString(sizeStr) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid retention size format: %v", err)), nil + return nil, fmt.Errorf("invalid retention size format: %v", err) } if sizeInBytes != -1 { // Convert bytes to MB retentionSizeInMB = int(sizeInBytes / (1024 * 1024)) if retentionSizeInMB < 1 { - return mcp.NewToolResultError("Retention size must be at least 1MB"), nil + return nil, fmt.Errorf("retention size must be at least 1MB") } } else { retentionSizeInMB = -1 // Infinite size retention @@ -489,122 +500,128 @@ func (b *PulsarAdminNamespacePolicyToolBuilder) handleSetRetention(_ context.Con // Set retention err := client.Namespaces().SetRetention(namespace, retention) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to set retention: %v", err)), nil + return nil, fmt.Errorf("failed to set retention: %v", err) } - return mcp.NewToolResultText(fmt.Sprintf("Set retention for %s successfully", namespace)), nil + return textResult(fmt.Sprintf("Set retention for %s successfully", namespace)), nil } // handleGrantPermission handles granting permissions on a namespace -func (b *PulsarAdminNamespacePolicyToolBuilder) handleGrantPermission(_ context.Context, client cmdutils.Client, namespace string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - role, err := request.RequireString("role") +func (b *PulsarAdminNamespacePolicyToolBuilder) handleGrantPermission(client cmdutils.Client, namespace string, input pulsarAdminNamespacePolicySetInput) (*sdk.CallToolResult, error) { + role, err := requireString(input.Role, "role") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get role: %v", err)), nil + return nil, fmt.Errorf("failed to get role: %v", err) } - actions, err := request.RequireStringSlice("actions") + actions, err := requireStringSlice(input.Actions, "actions") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get actions: %v", err)), nil + return nil, fmt.Errorf("failed to get actions: %v", err) } ns, err := utils.GetNamespaceName(namespace) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid namespace name: %v", err)), nil + return nil, fmt.Errorf("invalid namespace name: %v", err) } a, err := b.parseActions(actions) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to parse actions: %v", err)), nil + return nil, fmt.Errorf("failed to parse actions: %v", err) } // Grant permissions err = client.Namespaces().GrantNamespacePermission(*ns, role, a) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to grant permission: %v", err)), nil + return nil, fmt.Errorf("failed to grant permission: %v", err) } - return mcp.NewToolResultText(fmt.Sprintf("Granted %v permission(s) to role %s on %s", actions, role, namespace)), nil + return textResult(fmt.Sprintf("Granted %v permission(s) to role %s on %s", actions, role, namespace)), nil } // handleRevokePermission handles revoking permissions from a namespace -func (b *PulsarAdminNamespacePolicyToolBuilder) handleRevokePermission(_ context.Context, client cmdutils.Client, namespace string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - role, err := request.RequireString("role") +func (b *PulsarAdminNamespacePolicyToolBuilder) handleRevokePermission(client cmdutils.Client, namespace string, input pulsarAdminNamespacePolicyRemoveInput) (*sdk.CallToolResult, error) { + role, err := requireString(input.Role, "role") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get role: %v", err)), nil + return nil, fmt.Errorf("failed to get role: %v", err) } ns, err := utils.GetNamespaceName(namespace) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid namespace name: %v", err)), nil + return nil, fmt.Errorf("invalid namespace name: %v", err) } // Revoke permissions err = client.Namespaces().RevokeNamespacePermission(*ns, role) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to revoke permission: %v", err)), nil + return nil, fmt.Errorf("failed to revoke permission: %v", err) } - return mcp.NewToolResultText(fmt.Sprintf("Revoked all permissions from role %s on %s", role, namespace)), nil + return textResult(fmt.Sprintf("Revoked all permissions from role %s on %s", role, namespace)), nil } // handleSetReplicationClusters handles setting replication clusters for a namespace -func (b *PulsarAdminNamespacePolicyToolBuilder) handleSetReplicationClusters(_ context.Context, client cmdutils.Client, namespace string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - clusters, err := request.RequireStringSlice("clusters") +func (b *PulsarAdminNamespacePolicyToolBuilder) handleSetReplicationClusters(client cmdutils.Client, namespace string, input pulsarAdminNamespacePolicySetInput) (*sdk.CallToolResult, error) { + clusters, err := requireStringSlice(input.Clusters, "clusters") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get clusters: %v", err)), nil + return nil, fmt.Errorf("failed to get clusters: %v", err) } if len(clusters) == 0 { - return mcp.NewToolResultError("At least one cluster must be specified"), nil + return nil, fmt.Errorf("at least one cluster must be specified") } // Set replication clusters err = client.Namespaces().SetNamespaceReplicationClusters(namespace, clusters) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to set replication clusters: %v", err)), nil + return nil, fmt.Errorf("failed to set replication clusters: %v", err) } - return mcp.NewToolResultText(fmt.Sprintf("Set replication clusters for %s to %s", namespace, strings.Join(clusters, ", "))), nil + return textResult(fmt.Sprintf("Set replication clusters for %s to %s", namespace, strings.Join(clusters, ", "))), nil } // handleSetBacklogQuota handles setting backlog quota for a namespace -func (b *PulsarAdminNamespacePolicyToolBuilder) handleSetBacklogQuota(_ context.Context, client cmdutils.Client, namespace string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - limitSizeStr, err := request.RequireString("limit-size") +func (b *PulsarAdminNamespacePolicyToolBuilder) handleSetBacklogQuota(client cmdutils.Client, namespace string, input pulsarAdminNamespacePolicySetInput) (*sdk.CallToolResult, error) { + limitSizeStr, err := requireString(input.LimitSize, "limit-size") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get limit size: %v", err)), nil + return nil, fmt.Errorf("failed to get limit size: %v", err) } - policyStr, err := request.RequireString("policy") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get policy: %v", err)), nil + policyStr := input.Policy + if policyStr == "" { + return nil, fmt.Errorf("failed to get policy: required argument \"policy\" not found") } // Parse backlog size limit limitSize, err := pulsarctlutils.ValidateSizeString(limitSizeStr) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid limit size format: %v", err)), nil + return nil, fmt.Errorf("invalid limit size format: %v", err) } // Parse backlog quota policy using the ParseRetentionPolicy function policy, err := utils.ParseRetentionPolicy(policyStr) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid backlog quota policy: %s. Valid options: producer_request_hold, producer_exception, consumer_backlog_eviction", policyStr)), nil + return nil, fmt.Errorf("invalid backlog quota policy: %s. Valid options: producer_request_hold, producer_exception, consumer_backlog_eviction", policyStr) } // Get optional time limit - limitTimeStr := request.GetString("limit-time", "-1") + limitTimeStr := "-1" + if input.LimitTime != nil { + limitTimeStr = *input.LimitTime + } limitTime, err := strconv.ParseInt(limitTimeStr, 10, 64) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid limit time: %v", err)), nil + return nil, fmt.Errorf("invalid limit time: %v", err) } // Parse quota type (optional, default to destination_storage) - quotaTypeStr := request.GetString("type", "destination_storage") - quotaType := utils.DestinationStorage // Default + quotaTypeStr := "destination_storage" + if input.Type != nil && *input.Type != "" { + quotaTypeStr = *input.Type + } + quotaType := utils.DestinationStorage if quotaTypeStr != "" { parsedType, err := utils.ParseBacklogQuotaType(quotaTypeStr) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid backlog quota type: %v", err)), nil + return nil, fmt.Errorf("invalid backlog quota type: %v", err) } quotaType = parsedType } @@ -613,21 +630,21 @@ func (b *PulsarAdminNamespacePolicyToolBuilder) handleSetBacklogQuota(_ context. backlogQuota := utils.NewBacklogQuota(limitSize, limitTime, policy) err = client.Namespaces().SetBacklogQuota(namespace, backlogQuota, quotaType) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to set backlog quota: %v", err)), nil + return nil, fmt.Errorf("failed to set backlog quota: %v", err) } - return mcp.NewToolResultText(fmt.Sprintf("Set backlog quota for %s successfully", namespace)), nil + return textResult(fmt.Sprintf("Set backlog quota for %s successfully", namespace)), nil } // handleRemoveBacklogQuota handles removing backlog quota for a namespace -func (b *PulsarAdminNamespacePolicyToolBuilder) handleRemoveBacklogQuota(_ context.Context, client cmdutils.Client, namespace string, _ mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminNamespacePolicyToolBuilder) handleRemoveBacklogQuota(client cmdutils.Client, namespace string) (*sdk.CallToolResult, error) { // Remove backlog quota (API doesn't require quota type for removal) err := client.Namespaces().RemoveBacklogQuota(namespace) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to remove backlog quota: %v", err)), nil + return nil, fmt.Errorf("failed to remove backlog quota: %v", err) } - return mcp.NewToolResultText(fmt.Sprintf("Removed backlog quota for %s successfully", namespace)), nil + return textResult(fmt.Sprintf("Removed backlog quota for %s successfully", namespace)), nil } // parseActions parses action strings into AuthAction enums @@ -642,3 +659,81 @@ func (b *PulsarAdminNamespacePolicyToolBuilder) parseActions(actions []string) ( } return r, nil } + +func buildPulsarAdminNamespacePolicyGetInputSchema() (*jsonschema.Schema, error) { + schema, err := jsonschema.For[pulsarAdminNamespacePolicyGetInput](nil) + if err != nil { + return nil, fmt.Errorf("input schema: %w", err) + } + if schema.Type != "object" { + return nil, fmt.Errorf("input schema must have type \"object\"") + } + if schema.Properties == nil { + schema.Properties = map[string]*jsonschema.Schema{} + } + + setSchemaDescription(schema, "namespace", pulsarAdminNamespacePolicyGetNamespaceDesc) + normalizeAdditionalProperties(schema) + return schema, nil +} + +func buildPulsarAdminNamespacePolicySetInputSchema() (*jsonschema.Schema, error) { + schema, err := jsonschema.For[pulsarAdminNamespacePolicySetInput](nil) + if err != nil { + return nil, fmt.Errorf("input schema: %w", err) + } + if schema.Type != "object" { + return nil, fmt.Errorf("input schema must have type \"object\"") + } + if schema.Properties == nil { + schema.Properties = map[string]*jsonschema.Schema{} + } + + setSchemaDescription(schema, "namespace", pulsarAdminNamespacePolicySetNamespaceDesc) + setSchemaDescription(schema, "policy", pulsarAdminNamespacePolicySetPolicyDesc) + setSchemaDescription(schema, "role", pulsarAdminNamespacePolicySetRoleDesc) + setSchemaDescription(schema, "actions", pulsarAdminNamespacePolicySetActionsDesc) + setSchemaDescription(schema, "clusters", pulsarAdminNamespacePolicySetClustersDesc) + setSchemaDescription(schema, "roles", pulsarAdminNamespacePolicySetRolesDesc) + setSchemaDescription(schema, "ttl", pulsarAdminNamespacePolicySetTTLDesc) + setSchemaDescription(schema, "time", pulsarAdminNamespacePolicySetTimeDesc) + setSchemaDescription(schema, "size", pulsarAdminNamespacePolicySetSizeDesc) + setSchemaDescription(schema, "limit-size", pulsarAdminNamespacePolicySetLimitSizeDesc) + setSchemaDescription(schema, "limit-time", pulsarAdminNamespacePolicySetLimitTimeDesc) + setSchemaDescription(schema, "type", pulsarAdminNamespacePolicySetTypeDesc) + + if actionsSchema := schema.Properties["actions"]; actionsSchema != nil && actionsSchema.Items != nil { + actionsSchema.Items.Description = "action" + } + if clustersSchema := schema.Properties["clusters"]; clustersSchema != nil && clustersSchema.Items != nil { + clustersSchema.Items.Description = "cluster" + } + if rolesSchema := schema.Properties["roles"]; rolesSchema != nil && rolesSchema.Items != nil { + rolesSchema.Items.Description = "role" + } + + normalizeAdditionalProperties(schema) + return schema, nil +} + +func buildPulsarAdminNamespacePolicyRemoveInputSchema() (*jsonschema.Schema, error) { + schema, err := jsonschema.For[pulsarAdminNamespacePolicyRemoveInput](nil) + if err != nil { + return nil, fmt.Errorf("input schema: %w", err) + } + if schema.Type != "object" { + return nil, fmt.Errorf("input schema must have type \"object\"") + } + if schema.Properties == nil { + schema.Properties = map[string]*jsonschema.Schema{} + } + + setSchemaDescription(schema, "namespace", pulsarAdminNamespacePolicyRemoveNamespaceDesc) + setSchemaDescription(schema, "policy", pulsarAdminNamespacePolicyRemovePolicyDesc) + setSchemaDescription(schema, "role", pulsarAdminNamespacePolicyRemoveRoleDesc) + setSchemaDescription(schema, "subscription", pulsarAdminNamespacePolicyRemoveSubscriptionDesc) + setSchemaDescription(schema, "type", pulsarAdminNamespacePolicyRemoveTypeDesc) + + normalizeAdditionalProperties(schema) + return schema, nil +} diff --git a/pkg/mcp/builders/pulsar/namespace_policy_legacy.go b/pkg/mcp/builders/pulsar/namespace_policy_legacy.go new file mode 100644 index 0000000..310995a --- /dev/null +++ b/pkg/mcp/builders/pulsar/namespace_policy_legacy.go @@ -0,0 +1,208 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" +) + +// PulsarAdminNamespacePolicyLegacyToolBuilder implements the legacy ToolBuilder interface for Pulsar admin namespace policies. +// /nolint:revive +type PulsarAdminNamespacePolicyLegacyToolBuilder struct { + *builders.BaseToolBuilder +} + +// NewPulsarAdminNamespacePolicyLegacyToolBuilder creates a new Pulsar admin namespace policy legacy tool builder instance. +func NewPulsarAdminNamespacePolicyLegacyToolBuilder() *PulsarAdminNamespacePolicyLegacyToolBuilder { + metadata := builders.ToolMetadata{ + Name: "pulsar_admin_namespace_policy", + Version: "1.0.0", + Description: "Pulsar admin namespace policy management tools", + Category: "pulsar_admin", + Tags: []string{"pulsar", "admin", "namespace_policy"}, + } + + features := []string{ + "pulsar-admin-namespace-policy", + "all", + "all-pulsar", + "pulsar-admin", + } + + return &PulsarAdminNamespacePolicyLegacyToolBuilder{ + BaseToolBuilder: builders.NewBaseToolBuilder(metadata, features), + } +} + +// BuildTools builds the Pulsar admin namespace policy legacy tool list. +func (b *PulsarAdminNamespacePolicyLegacyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { + if !b.HasAnyRequiredFeature(config.Features) { + return nil, nil + } + + if err := b.Validate(config); err != nil { + return nil, err + } + + tools := []server.ServerTool{} + + getTool, err := b.buildNamespaceGetPoliciesTool() + if err != nil { + return nil, err + } + getHandler := b.buildNamespaceGetPoliciesHandler() + tools = append(tools, server.ServerTool{ + Tool: getTool, + Handler: getHandler, + }) + + if !config.ReadOnly { + setTool, err := b.buildNamespaceSetPolicyTool() + if err != nil { + return nil, err + } + setHandler := b.buildNamespaceSetPolicyHandler() + tools = append(tools, server.ServerTool{ + Tool: setTool, + Handler: setHandler, + }) + + removeTool, err := b.buildNamespaceRemovePolicyTool() + if err != nil { + return nil, err + } + removeHandler := b.buildNamespaceRemovePolicyHandler() + tools = append(tools, server.ServerTool{ + Tool: removeTool, + Handler: removeHandler, + }) + } + + return tools, nil +} + +func (b *PulsarAdminNamespacePolicyLegacyToolBuilder) buildNamespaceGetPoliciesTool() (mcp.Tool, error) { + inputSchema, err := buildPulsarAdminNamespacePolicyGetInputSchema() + if err != nil { + return mcp.Tool{}, err + } + + schemaJSON, err := json.Marshal(inputSchema) + if err != nil { + return mcp.Tool{}, fmt.Errorf("marshal input schema: %w", err) + } + + return mcp.Tool{ + Name: "pulsar_admin_namespace_policy_get", + Description: pulsarAdminNamespacePolicyGetToolDesc, + RawInputSchema: schemaJSON, + }, nil +} + +func (b *PulsarAdminNamespacePolicyLegacyToolBuilder) buildNamespaceSetPolicyTool() (mcp.Tool, error) { + inputSchema, err := buildPulsarAdminNamespacePolicySetInputSchema() + if err != nil { + return mcp.Tool{}, err + } + + schemaJSON, err := json.Marshal(inputSchema) + if err != nil { + return mcp.Tool{}, fmt.Errorf("marshal input schema: %w", err) + } + + return mcp.Tool{ + Name: "pulsar_admin_namespace_policy_set", + Description: pulsarAdminNamespacePolicySetToolDesc, + RawInputSchema: schemaJSON, + }, nil +} + +func (b *PulsarAdminNamespacePolicyLegacyToolBuilder) buildNamespaceRemovePolicyTool() (mcp.Tool, error) { + inputSchema, err := buildPulsarAdminNamespacePolicyRemoveInputSchema() + if err != nil { + return mcp.Tool{}, err + } + + schemaJSON, err := json.Marshal(inputSchema) + if err != nil { + return mcp.Tool{}, fmt.Errorf("marshal input schema: %w", err) + } + + return mcp.Tool{ + Name: "pulsar_admin_namespace_policy_remove", + Description: pulsarAdminNamespacePolicyRemoveToolDesc, + RawInputSchema: schemaJSON, + }, nil +} + +func (b *PulsarAdminNamespacePolicyLegacyToolBuilder) buildNamespaceGetPoliciesHandler() func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { + sdkBuilder := NewPulsarAdminNamespacePolicyToolBuilder() + sdkHandler := sdkBuilder.buildNamespaceGetPoliciesHandler() + + return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + var input pulsarAdminNamespacePolicyGetInput + if err := request.BindArguments(&input); err != nil { + return mcp.NewToolResultError(fmt.Sprintf("failed to parse arguments: %v", err)), nil + } + + result, _, err := sdkHandler(ctx, nil, input) + if err != nil { + return mcp.NewToolResultError(err.Error()), nil + } + return legacyToolResultFromSDK(result), nil + } +} + +func (b *PulsarAdminNamespacePolicyLegacyToolBuilder) buildNamespaceSetPolicyHandler() func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { + sdkBuilder := NewPulsarAdminNamespacePolicyToolBuilder() + sdkHandler := sdkBuilder.buildNamespaceSetPolicyHandler() + + return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + var input pulsarAdminNamespacePolicySetInput + if err := request.BindArguments(&input); err != nil { + return mcp.NewToolResultError(fmt.Sprintf("failed to parse arguments: %v", err)), nil + } + + result, _, err := sdkHandler(ctx, nil, input) + if err != nil { + return mcp.NewToolResultError(err.Error()), nil + } + return legacyToolResultFromSDK(result), nil + } +} + +func (b *PulsarAdminNamespacePolicyLegacyToolBuilder) buildNamespaceRemovePolicyHandler() func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { + sdkBuilder := NewPulsarAdminNamespacePolicyToolBuilder() + sdkHandler := sdkBuilder.buildNamespaceRemovePolicyHandler() + + return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + var input pulsarAdminNamespacePolicyRemoveInput + if err := request.BindArguments(&input); err != nil { + return mcp.NewToolResultError(fmt.Sprintf("failed to parse arguments: %v", err)), nil + } + + result, _, err := sdkHandler(ctx, nil, input) + if err != nil { + return mcp.NewToolResultError(err.Error()), nil + } + return legacyToolResultFromSDK(result), nil + } +} diff --git a/pkg/mcp/builders/pulsar/namespace_policy_test.go b/pkg/mcp/builders/pulsar/namespace_policy_test.go new file mode 100644 index 0000000..555c6a2 --- /dev/null +++ b/pkg/mcp/builders/pulsar/namespace_policy_test.go @@ -0,0 +1,166 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "testing" + + "github.com/google/jsonschema-go/jsonschema" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPulsarAdminNamespacePolicyToolBuilder(t *testing.T) { + builder := NewPulsarAdminNamespacePolicyToolBuilder() + + t.Run("GetName", func(t *testing.T) { + assert.Equal(t, "pulsar_admin_namespace_policy", builder.GetName()) + }) + + t.Run("GetRequiredFeatures", func(t *testing.T) { + features := builder.GetRequiredFeatures() + assert.NotEmpty(t, features) + assert.Contains(t, features, "pulsar-admin-namespace-policy") + }) + + t.Run("BuildTools_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"pulsar-admin-namespace-policy"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 3) + + names := make([]string, 0, len(tools)) + for _, tool := range tools { + names = append(names, tool.Definition().Name) + } + + assert.ElementsMatch(t, []string{ + "pulsar_admin_namespace_policy_get", + "pulsar_admin_namespace_policy_set", + "pulsar_admin_namespace_policy_remove", + }, names) + }) + + t.Run("BuildTools_ReadOnly", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: true, + Features: []string{"pulsar-admin-namespace-policy"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "pulsar_admin_namespace_policy_get", tools[0].Definition().Name) + }) + + t.Run("BuildTools_NoFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"unrelated-feature"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 0) + }) + + t.Run("Validate_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"pulsar-admin-namespace-policy"}, + } + + err := builder.Validate(config) + assert.NoError(t, err) + }) + + t.Run("Validate_MissingFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"unrelated-feature"}, + } + + err := builder.Validate(config) + assert.Error(t, err) + }) +} + +func TestPulsarAdminNamespacePolicyToolSchema(t *testing.T) { + builder := NewPulsarAdminNamespacePolicyToolBuilder() + + getTool, err := builder.buildNamespaceGetPoliciesTool() + require.NoError(t, err) + assert.Equal(t, "pulsar_admin_namespace_policy_get", getTool.Name) + + getSchema, ok := getTool.InputSchema.(*jsonschema.Schema) + require.True(t, ok) + require.NotNil(t, getSchema.Properties) + + assert.ElementsMatch(t, []string{"namespace"}, getSchema.Required) + assert.ElementsMatch(t, []string{"namespace"}, mapStringKeys(getSchema.Properties)) + assert.Equal(t, pulsarAdminNamespacePolicyGetNamespaceDesc, getSchema.Properties["namespace"].Description) + + setTool, err := builder.buildNamespaceSetPolicyTool() + require.NoError(t, err) + assert.Equal(t, "pulsar_admin_namespace_policy_set", setTool.Name) + + setSchema, ok := setTool.InputSchema.(*jsonschema.Schema) + require.True(t, ok) + require.NotNil(t, setSchema.Properties) + + assert.ElementsMatch(t, []string{"namespace", "policy"}, setSchema.Required) + assert.ElementsMatch(t, []string{ + "namespace", + "policy", + "role", + "actions", + "clusters", + "roles", + "ttl", + "time", + "size", + "limit-size", + "limit-time", + "type", + }, mapStringKeys(setSchema.Properties)) + + assert.Equal(t, pulsarAdminNamespacePolicySetNamespaceDesc, setSchema.Properties["namespace"].Description) + assert.Equal(t, pulsarAdminNamespacePolicySetPolicyDesc, setSchema.Properties["policy"].Description) + assert.Equal(t, pulsarAdminNamespacePolicySetTypeDesc, setSchema.Properties["type"].Description) + + removeTool, err := builder.buildNamespaceRemovePolicyTool() + require.NoError(t, err) + assert.Equal(t, "pulsar_admin_namespace_policy_remove", removeTool.Name) + + removeSchema, ok := removeTool.InputSchema.(*jsonschema.Schema) + require.True(t, ok) + require.NotNil(t, removeSchema.Properties) + + assert.ElementsMatch(t, []string{"namespace", "policy"}, removeSchema.Required) + assert.ElementsMatch(t, []string{ + "namespace", + "policy", + "role", + "subscription", + "type", + }, mapStringKeys(removeSchema.Properties)) + + assert.Equal(t, pulsarAdminNamespacePolicyRemoveNamespaceDesc, removeSchema.Properties["namespace"].Description) + assert.Equal(t, pulsarAdminNamespacePolicyRemovePolicyDesc, removeSchema.Properties["policy"].Description) +} diff --git a/pkg/mcp/builders/pulsar/namespace_test.go b/pkg/mcp/builders/pulsar/namespace_test.go new file mode 100644 index 0000000..88c7244 --- /dev/null +++ b/pkg/mcp/builders/pulsar/namespace_test.go @@ -0,0 +1,140 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "testing" + + "github.com/google/jsonschema-go/jsonschema" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPulsarAdminNamespaceToolBuilder(t *testing.T) { + builder := NewPulsarAdminNamespaceToolBuilder() + + t.Run("GetName", func(t *testing.T) { + assert.Equal(t, "pulsar_admin_namespace", builder.GetName()) + }) + + t.Run("GetRequiredFeatures", func(t *testing.T) { + features := builder.GetRequiredFeatures() + assert.NotEmpty(t, features) + assert.Contains(t, features, "pulsar-admin-namespaces") + }) + + t.Run("BuildTools_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"pulsar-admin-namespaces"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "pulsar_admin_namespace", tools[0].Definition().Name) + assert.NotNil(t, tools[0]) + }) + + t.Run("BuildTools_ReadOnly", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: true, + Features: []string{"pulsar-admin-namespaces"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "pulsar_admin_namespace", tools[0].Definition().Name) + }) + + t.Run("BuildTools_NoFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"unrelated-feature"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 0) + }) + + t.Run("Validate_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"pulsar-admin-namespaces"}, + } + + err := builder.Validate(config) + assert.NoError(t, err) + }) + + t.Run("Validate_MissingFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"unrelated-feature"}, + } + + err := builder.Validate(config) + assert.Error(t, err) + }) +} + +func TestPulsarAdminNamespaceToolSchema(t *testing.T) { + builder := NewPulsarAdminNamespaceToolBuilder() + tool, err := builder.buildNamespaceTool() + require.NoError(t, err) + assert.Equal(t, "pulsar_admin_namespace", tool.Name) + + schema, ok := tool.InputSchema.(*jsonschema.Schema) + require.True(t, ok) + require.NotNil(t, schema.Properties) + + expectedRequired := []string{"operation"} + assert.ElementsMatch(t, expectedRequired, schema.Required) + + expectedProps := []string{ + "operation", + "tenant", + "namespace", + "bundles", + "clusters", + "subscription", + "bundle", + "force", + "unload", + } + assert.ElementsMatch(t, expectedProps, mapStringKeys(schema.Properties)) + + operationSchema := schema.Properties["operation"] + require.NotNil(t, operationSchema) + assert.Equal(t, pulsarAdminNamespaceOperationDesc, operationSchema.Description) + + tenantSchema := schema.Properties["tenant"] + require.NotNil(t, tenantSchema) + assert.Equal(t, pulsarAdminNamespaceTenantDesc, tenantSchema.Description) +} + +func TestPulsarAdminNamespaceToolBuilder_ReadOnlyRejectsWrite(t *testing.T) { + builder := NewPulsarAdminNamespaceToolBuilder() + handler := builder.buildNamespaceHandler(true) + + _, _, err := handler(context.Background(), nil, pulsarAdminNamespaceInput{ + Operation: "create", + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "read-only") +} diff --git a/pkg/mcp/builders/pulsar/nsisolationpolicy.go b/pkg/mcp/builders/pulsar/nsisolationpolicy.go index 74dd9aa..f49f92d 100644 --- a/pkg/mcp/builders/pulsar/nsisolationpolicy.go +++ b/pkg/mcp/builders/pulsar/nsisolationpolicy.go @@ -21,14 +21,45 @@ import ( "strings" "github.com/apache/pulsar-client-go/pulsaradmin/pkg/utils" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + "github.com/google/jsonschema-go/jsonschema" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/pulsarctl/pkg/cmdutils" - "github.com/streamnative/streamnative-mcp-server/pkg/common" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" ) +type pulsarAdminNsIsolationPolicyInput struct { + Resource string `json:"resource"` + Operation string `json:"operation"` + Cluster string `json:"cluster"` + Name *string `json:"name,omitempty"` + Namespaces []string `json:"namespaces,omitempty"` + Primary []string `json:"primary,omitempty"` + Secondary []string `json:"secondary,omitempty"` + AutoFailoverPolicyType *string `json:"autoFailoverPolicyType,omitempty"` + AutoFailoverPolicyParams map[string]any `json:"autoFailoverPolicyParams,omitempty"` +} + +const ( + pulsarAdminNsIsolationPolicyResourceDesc = "Resource to operate on. Available resources:\n" + + "- policy: Namespace isolation policy\n" + + "- broker: Broker with namespace isolation policies\n" + + "- brokers: All brokers with namespace isolation policies" + pulsarAdminNsIsolationPolicyOperationDesc = "Operation to perform. Available operations:\n" + + "- get: Get resource details\n" + + "- list: List all instances of the resource\n" + + "- set: Create or update a resource (requires super-user permissions)\n" + + "- delete: Delete a resource (requires super-user permissions)" + pulsarAdminNsIsolationPolicyClusterDesc = "Cluster name" + pulsarAdminNsIsolationPolicyNameDesc = "Name of the policy or broker to operate on, based on resource type.\n" + + "Required for: policy.get, policy.delete, policy.set, broker.get" + pulsarAdminNsIsolationPolicyNamespacesDesc = "List of namespaces to apply the isolation policy. Required for policy.set" + pulsarAdminNsIsolationPolicyPrimaryDesc = "List of primary brokers for the namespaces. Required for policy.set" + pulsarAdminNsIsolationPolicySecondaryDesc = "List of secondary brokers for the namespaces. Optional for policy.set" + pulsarAdminNsIsolationPolicyTypeDesc = "Auto failover policy type (e.g., min_available). Optional for policy.set" + pulsarAdminNsIsolationPolicyParamsDesc = "Auto failover policy parameters as an object (e.g., {'min_limit': '1', 'usage_threshold': '100'}). Optional for policy.set" +) + // PulsarAdminNsIsolationPolicyToolBuilder implements the ToolBuilder interface for Pulsar admin namespace isolation policies // /nolint:revive type PulsarAdminNsIsolationPolicyToolBuilder struct { @@ -58,7 +89,7 @@ func NewPulsarAdminNsIsolationPolicyToolBuilder() *PulsarAdminNsIsolationPolicyT } // BuildTools builds the Pulsar admin namespace isolation policy tool list -func (b *PulsarAdminNsIsolationPolicyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { +func (b *PulsarAdminNsIsolationPolicyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]builders.ToolDefinition, error) { // Check features - return empty list if no required features are present if !b.HasAnyRequiredFeature(config.Features) { return nil, nil @@ -69,12 +100,14 @@ func (b *PulsarAdminNsIsolationPolicyToolBuilder) BuildTools(_ context.Context, return nil, err } - // Build tools - tool := b.buildNsIsolationPolicyTool() + tool, err := b.buildNsIsolationPolicyTool() + if err != nil { + return nil, err + } handler := b.buildNsIsolationPolicyHandler(config.ReadOnly) - return []server.ServerTool{ - { + return []builders.ToolDefinition{ + builders.ServerTool[pulsarAdminNsIsolationPolicyInput, any]{ Tool: tool, Handler: handler, }, @@ -82,122 +115,70 @@ func (b *PulsarAdminNsIsolationPolicyToolBuilder) BuildTools(_ context.Context, } // buildNsIsolationPolicyTool builds the Pulsar admin namespace isolation policy MCP tool definition -func (b *PulsarAdminNsIsolationPolicyToolBuilder) buildNsIsolationPolicyTool() mcp.Tool { +func (b *PulsarAdminNsIsolationPolicyToolBuilder) buildNsIsolationPolicyTool() (*sdk.Tool, error) { + inputSchema, err := buildPulsarAdminNsIsolationPolicyInputSchema() + if err != nil { + return nil, err + } + toolDesc := "Manage namespace isolation policies in a Pulsar cluster. " + "Allows viewing, creating, updating, and deleting namespace isolation policies. " + "Some operations require super-user permissions." - resourceDesc := "Resource to operate on. Available resources:\n" + - "- policy: Namespace isolation policy\n" + - "- broker: Broker with namespace isolation policies\n" + - "- brokers: All brokers with namespace isolation policies" - - operationDesc := "Operation to perform. Available operations:\n" + - "- get: Get resource details\n" + - "- list: List all instances of the resource\n" + - "- set: Create or update a resource (requires super-user permissions)\n" + - "- delete: Delete a resource (requires super-user permissions)" - - return mcp.NewTool("pulsar_admin_nsisolationpolicy", - mcp.WithDescription(toolDesc), - mcp.WithString("resource", mcp.Required(), - mcp.Description(resourceDesc), - ), - mcp.WithString("operation", mcp.Required(), - mcp.Description(operationDesc), - ), - mcp.WithString("cluster", mcp.Required(), - mcp.Description("Cluster name"), - ), - mcp.WithString("name", - mcp.Description("Name of the policy or broker to operate on, based on resource type.\n"+ - "Required for: policy.get, policy.delete, policy.set, broker.get"), - ), - mcp.WithArray("namespaces", - mcp.Description("List of namespaces to apply the isolation policy. Required for policy.set"), - mcp.Items( - map[string]interface{}{ - "type": "string", - "description": "namespace", - }, - ), - ), - mcp.WithArray("primary", - mcp.Description("List of primary brokers for the namespaces. Required for policy.set"), - mcp.Items( - map[string]interface{}{ - "type": "string", - "description": "primary broker", - }, - ), - ), - mcp.WithArray("secondary", - mcp.Description("List of secondary brokers for the namespaces. Optional for policy.set"), - mcp.Items( - map[string]interface{}{ - "type": "string", - "description": "secondary broker", - }, - ), - ), - mcp.WithString("autoFailoverPolicyType", - mcp.Description("Auto failover policy type (e.g., min_available). Optional for policy.set"), - ), - mcp.WithObject("autoFailoverPolicyParams", - mcp.Description("Auto failover policy parameters as an object (e.g., {'min_limit': '1', 'usage_threshold': '100'}). Optional for policy.set"), - ), - ) + return &sdk.Tool{ + Name: "pulsar_admin_nsisolationpolicy", + Description: toolDesc, + InputSchema: inputSchema, + }, nil } // buildNsIsolationPolicyHandler builds the Pulsar admin namespace isolation policy handler function -func (b *PulsarAdminNsIsolationPolicyToolBuilder) buildNsIsolationPolicyHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminNsIsolationPolicyToolBuilder) buildNsIsolationPolicyHandler(readOnly bool) builders.ToolHandlerFunc[pulsarAdminNsIsolationPolicyInput, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input pulsarAdminNsIsolationPolicyInput) (*sdk.CallToolResult, any, error) { // Get Pulsar session from context session := mcpCtx.GetPulsarSession(ctx) if session == nil { - return mcp.NewToolResultError("Pulsar session not found in context"), nil + return nil, nil, fmt.Errorf("pulsar session not found in context") } client, err := session.GetAdminClient() if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get admin client: %v", err)), nil + return nil, nil, b.handleError("get admin client", err) } - // Get required parameters - resource, err := request.RequireString("resource") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get resource: %v", err)), nil + resource := strings.ToLower(input.Resource) + if resource == "" { + return nil, nil, fmt.Errorf("missing required parameter 'resource'; please specify one of: policy, broker, brokers") } - operation, err := request.RequireString("operation") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get operation: %v", err)), nil + operation := strings.ToLower(input.Operation) + if operation == "" { + return nil, nil, fmt.Errorf("missing required parameter 'operation'; please specify one of: get, list, set, delete") } - cluster, err := request.RequireString("cluster") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get cluster name: %v", err)), nil + cluster := input.Cluster + if cluster == "" { + return nil, nil, fmt.Errorf("missing required parameter 'cluster'") } - // Normalize parameters - resource = strings.ToLower(resource) - operation = strings.ToLower(operation) - // Validate write operations in read-only mode if readOnly && (operation == "set" || operation == "delete") { - return mcp.NewToolResultError("Write operations are not allowed in read-only mode"), nil + return nil, nil, fmt.Errorf("write operations are not allowed in read-only mode") } // Dispatch based on resource type switch resource { case "policy": - return b.handlePolicyResource(client, operation, cluster, request) + result, err := b.handlePolicyResource(client, operation, cluster, input) + return result, nil, err case "broker": - return b.handleBrokerResource(client, operation, cluster, request) + result, err := b.handleBrokerResource(client, operation, cluster, input) + return result, nil, err case "brokers": - return b.handleNsIsolationBrokersResource(client, operation, cluster, request) + result, err := b.handleNsIsolationBrokersResource(client, operation, cluster) + return result, nil, err default: - return mcp.NewToolResultError(fmt.Sprintf("Invalid resource: %s. Available resources: policy, broker, brokers", resource)), nil + return nil, nil, fmt.Errorf("invalid resource: %s. available resources: policy, broker, brokers", resource) } } } @@ -205,156 +186,178 @@ func (b *PulsarAdminNsIsolationPolicyToolBuilder) buildNsIsolationPolicyHandler( // Helper functions // handlePolicyResource handles operations on the "policy" resource -func (b *PulsarAdminNsIsolationPolicyToolBuilder) handlePolicyResource(client cmdutils.Client, operation, cluster string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminNsIsolationPolicyToolBuilder) handlePolicyResource(client cmdutils.Client, operation, cluster string, input pulsarAdminNsIsolationPolicyInput) (*sdk.CallToolResult, error) { switch operation { case "get": - name, err := request.RequireString("name") + name, err := requireString(input.Name, "name") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'name' for policy.get: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'name' for policy.get: %v", err) } - // Get namespace isolation policy policyInfo, err := client.NsIsolationPolicy().GetNamespaceIsolationPolicy(cluster, name) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get namespace isolation policy: %v", err)), nil + return nil, b.handleError("get namespace isolation policy", err) } - // Convert result to JSON string - policyInfoJSON, err := json.Marshal(policyInfo) - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to serialize namespace isolation policy: %v", err)), nil - } - - return mcp.NewToolResultText(string(policyInfoJSON)), nil - + return b.marshalResponse(policyInfo) case "list": - // Get namespace isolation policies policies, err := client.NsIsolationPolicy().GetNamespaceIsolationPolicies(cluster) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to list namespace isolation policies: %v", err)), nil + return nil, b.handleError("list namespace isolation policies", err) } - // Convert result to JSON string - policiesJSON, err := json.Marshal(policies) - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to serialize namespace isolation policies: %v", err)), nil - } - - return mcp.NewToolResultText(string(policiesJSON)), nil - + return b.marshalResponse(policies) case "delete": - name, err := request.RequireString("name") + name, err := requireString(input.Name, "name") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'name' for policy.delete: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'name' for policy.delete: %v", err) } - // Delete namespace isolation policy err = client.NsIsolationPolicy().DeleteNamespaceIsolationPolicy(cluster, name) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to delete namespace isolation policy: %v", err)), nil + return nil, b.handleError("delete namespace isolation policy", err) } - return mcp.NewToolResultText(fmt.Sprintf("Delete namespace isolation policy %s successfully", name)), nil - + return textResult(fmt.Sprintf("Delete namespace isolation policy %s successfully", name)), nil case "set": - name, err := request.RequireString("name") + name, err := requireString(input.Name, "name") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'name' for policy.set: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'name' for policy.set: %v", err) } - namespaces, err := request.RequireStringSlice("namespaces") + namespaces, err := requireStringSlice(input.Namespaces, "namespaces") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'namespaces' for policy.set: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'namespaces' for policy.set: %v", err) } - primary, err := request.RequireStringSlice("primary") + primary, err := requireStringSlice(input.Primary, "primary") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'primary' for policy.set: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'primary' for policy.set: %v", err) } - secondary := request.GetStringSlice("secondary", []string{}) - autoFailoverPolicyType := request.GetString("autoFailoverPolicyType", "") - - // Parse autoFailoverPolicyParams as a map - autoFailoverPolicyParamsRaw, ok := common.OptionalParamObject(request.GetArguments(), "autoFailoverPolicyParams") - if !ok { - return mcp.NewToolResultError("Failed to extract autoFailoverPolicyParams"), nil + secondary := input.Secondary + autoFailoverPolicyType := "" + if input.AutoFailoverPolicyType != nil { + autoFailoverPolicyType = *input.AutoFailoverPolicyType } - autoFailoverPolicyParams := make(map[string]string) - for k, v := range autoFailoverPolicyParamsRaw { - if strVal, ok := v.(string); ok { - autoFailoverPolicyParams[k] = strVal - } + autoFailoverPolicyParams, err := b.extractAutoFailoverPolicyParams(input.AutoFailoverPolicyParams) + if err != nil { + return nil, err } - // Create namespace isolation policy data nsIsolationData, err := utils.CreateNamespaceIsolationData(namespaces, primary, secondary, autoFailoverPolicyType, autoFailoverPolicyParams) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to create namespace isolation data: %v", err)), nil + return nil, b.handleError("create namespace isolation data", err) } - // Create/update namespace isolation policy err = client.NsIsolationPolicy().CreateNamespaceIsolationPolicy(cluster, name, *nsIsolationData) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to create/update namespace isolation policy: %v", err)), nil + return nil, b.handleError("create/update namespace isolation policy", err) } - return mcp.NewToolResultText(fmt.Sprintf("Create/Update namespace isolation policy %s successfully", name)), nil - + return textResult(fmt.Sprintf("Create/Update namespace isolation policy %s successfully", name)), nil default: - return mcp.NewToolResultError(fmt.Sprintf("Invalid operation for resource 'policy': %s. Available operations: get, list, delete, set", operation)), nil + return nil, fmt.Errorf("invalid operation for resource 'policy': %s. available operations: get, list, delete, set", operation) } } // handleBrokerResource handles operations on the "broker" resource -func (b *PulsarAdminNsIsolationPolicyToolBuilder) handleBrokerResource(client cmdutils.Client, operation, cluster string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminNsIsolationPolicyToolBuilder) handleBrokerResource(client cmdutils.Client, operation, cluster string, input pulsarAdminNsIsolationPolicyInput) (*sdk.CallToolResult, error) { switch operation { case "get": - name, err := request.RequireString("name") + name, err := requireString(input.Name, "name") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'name' for broker.get: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'name' for broker.get: %v", err) } - // Get broker with policies brokerInfo, err := client.NsIsolationPolicy().GetBrokerWithNamespaceIsolationPolicy(cluster, name) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get broker with namespace isolation policy: %v", err)), nil + return nil, b.handleError("get broker with namespace isolation policy", err) } - // Convert result to JSON string - brokerInfoJSON, err := json.Marshal(brokerInfo) - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to serialize broker information: %v", err)), nil - } - - return mcp.NewToolResultText(string(brokerInfoJSON)), nil - + return b.marshalResponse(brokerInfo) default: - return mcp.NewToolResultError(fmt.Sprintf("Invalid operation for resource 'broker': %s. Available operations: get", operation)), nil + return nil, fmt.Errorf("invalid operation for resource 'broker': %s. available operations: get", operation) } } // handleNsIsolationBrokersResource handles operations on the "brokers" resource for namespace isolation policies -func (b *PulsarAdminNsIsolationPolicyToolBuilder) handleNsIsolationBrokersResource(client cmdutils.Client, operation, cluster string, _ mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminNsIsolationPolicyToolBuilder) handleNsIsolationBrokersResource(client cmdutils.Client, operation, cluster string) (*sdk.CallToolResult, error) { switch operation { case "list": - // Get all brokers with policies brokersInfo, err := client.NsIsolationPolicy().GetBrokersWithNamespaceIsolationPolicy(cluster) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get brokers with namespace isolation policy: %v", err)), nil + return nil, b.handleError("get brokers with namespace isolation policy", err) } - // Convert result to JSON string - brokersInfoJSON, err := json.Marshal(brokersInfo) - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to serialize brokers information: %v", err)), nil + return b.marshalResponse(brokersInfo) + default: + return nil, fmt.Errorf("invalid operation for resource 'brokers': %s. available operations: list", operation) + } +} + +func (b *PulsarAdminNsIsolationPolicyToolBuilder) extractAutoFailoverPolicyParams(params map[string]any) (map[string]string, error) { + if params == nil { + return nil, fmt.Errorf("failed to extract autoFailoverPolicyParams") + } + + converted := make(map[string]string) + for key, value := range params { + if strValue, ok := value.(string); ok { + converted[key] = strValue } + } + return converted, nil +} - return mcp.NewToolResultText(string(brokersInfoJSON)), nil +// Utility functions - default: - return mcp.NewToolResultError(fmt.Sprintf("Invalid operation for resource 'brokers': %s. Available operations: list", operation)), nil +func (b *PulsarAdminNsIsolationPolicyToolBuilder) handleError(operation string, err error) error { + return fmt.Errorf("failed to %s: %v", operation, err) +} + +func (b *PulsarAdminNsIsolationPolicyToolBuilder) marshalResponse(data interface{}) (*sdk.CallToolResult, error) { + jsonBytes, err := json.Marshal(data) + if err != nil { + return nil, b.handleError("marshal response", err) } + return textResult(string(jsonBytes)), nil +} + +func buildPulsarAdminNsIsolationPolicyInputSchema() (*jsonschema.Schema, error) { + schema, err := jsonschema.For[pulsarAdminNsIsolationPolicyInput](nil) + if err != nil { + return nil, fmt.Errorf("input schema: %w", err) + } + if schema.Type != "object" { + return nil, fmt.Errorf("input schema must have type \"object\"") + } + if schema.Properties == nil { + schema.Properties = map[string]*jsonschema.Schema{} + } + + setSchemaDescription(schema, "resource", pulsarAdminNsIsolationPolicyResourceDesc) + setSchemaDescription(schema, "operation", pulsarAdminNsIsolationPolicyOperationDesc) + setSchemaDescription(schema, "cluster", pulsarAdminNsIsolationPolicyClusterDesc) + setSchemaDescription(schema, "name", pulsarAdminNsIsolationPolicyNameDesc) + setSchemaDescription(schema, "namespaces", pulsarAdminNsIsolationPolicyNamespacesDesc) + setSchemaDescription(schema, "primary", pulsarAdminNsIsolationPolicyPrimaryDesc) + setSchemaDescription(schema, "secondary", pulsarAdminNsIsolationPolicySecondaryDesc) + setSchemaDescription(schema, "autoFailoverPolicyType", pulsarAdminNsIsolationPolicyTypeDesc) + setSchemaDescription(schema, "autoFailoverPolicyParams", pulsarAdminNsIsolationPolicyParamsDesc) + + if namespacesSchema := schema.Properties["namespaces"]; namespacesSchema != nil && namespacesSchema.Items != nil { + namespacesSchema.Items.Description = "namespace" + } + if primarySchema := schema.Properties["primary"]; primarySchema != nil && primarySchema.Items != nil { + primarySchema.Items.Description = "primary broker" + } + if secondarySchema := schema.Properties["secondary"]; secondarySchema != nil && secondarySchema.Items != nil { + secondarySchema.Items.Description = "secondary broker" + } + + normalizeAdditionalProperties(schema) + return schema, nil } diff --git a/pkg/mcp/builders/pulsar/nsisolationpolicy_legacy.go b/pkg/mcp/builders/pulsar/nsisolationpolicy_legacy.go new file mode 100644 index 0000000..5d6f97c --- /dev/null +++ b/pkg/mcp/builders/pulsar/nsisolationpolicy_legacy.go @@ -0,0 +1,117 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" +) + +// PulsarAdminNsIsolationPolicyLegacyToolBuilder implements the legacy ToolBuilder interface for Pulsar admin namespace isolation policy tools. +// /nolint:revive +type PulsarAdminNsIsolationPolicyLegacyToolBuilder struct { + *builders.BaseToolBuilder +} + +// NewPulsarAdminNsIsolationPolicyLegacyToolBuilder creates a new Pulsar admin namespace isolation policy legacy tool builder instance. +func NewPulsarAdminNsIsolationPolicyLegacyToolBuilder() *PulsarAdminNsIsolationPolicyLegacyToolBuilder { + metadata := builders.ToolMetadata{ + Name: "pulsar_admin_nsisolationpolicy", + Version: "1.0.0", + Description: "Pulsar admin namespace isolation policy management tools", + Category: "pulsar_admin", + Tags: []string{"pulsar", "admin", "nsisolationpolicy"}, + } + + features := []string{ + "pulsar-admin-nsisolationpolicy", + "all", + "all-pulsar", + "pulsar-admin", + } + + return &PulsarAdminNsIsolationPolicyLegacyToolBuilder{ + BaseToolBuilder: builders.NewBaseToolBuilder(metadata, features), + } +} + +// BuildTools builds the Pulsar admin namespace isolation policy legacy tool list. +func (b *PulsarAdminNsIsolationPolicyLegacyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { + if !b.HasAnyRequiredFeature(config.Features) { + return nil, nil + } + + if err := b.Validate(config); err != nil { + return nil, err + } + + tool, err := b.buildNsIsolationPolicyTool() + if err != nil { + return nil, err + } + handler := b.buildNsIsolationPolicyHandler(config.ReadOnly) + + return []server.ServerTool{ + { + Tool: tool, + Handler: handler, + }, + }, nil +} + +func (b *PulsarAdminNsIsolationPolicyLegacyToolBuilder) buildNsIsolationPolicyTool() (mcp.Tool, error) { + inputSchema, err := buildPulsarAdminNsIsolationPolicyInputSchema() + if err != nil { + return mcp.Tool{}, err + } + + schemaJSON, err := json.Marshal(inputSchema) + if err != nil { + return mcp.Tool{}, fmt.Errorf("marshal input schema: %w", err) + } + + toolDesc := "Manage namespace isolation policies in a Pulsar cluster. " + + "Allows viewing, creating, updating, and deleting namespace isolation policies. " + + "Some operations require super-user permissions." + + return mcp.Tool{ + Name: "pulsar_admin_nsisolationpolicy", + Description: toolDesc, + RawInputSchema: schemaJSON, + }, nil +} + +func (b *PulsarAdminNsIsolationPolicyLegacyToolBuilder) buildNsIsolationPolicyHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { + sdkBuilder := NewPulsarAdminNsIsolationPolicyToolBuilder() + sdkHandler := sdkBuilder.buildNsIsolationPolicyHandler(readOnly) + + return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + var input pulsarAdminNsIsolationPolicyInput + if err := request.BindArguments(&input); err != nil { + return mcp.NewToolResultError(fmt.Sprintf("failed to parse arguments: %v", err)), nil + } + + result, _, err := sdkHandler(ctx, nil, input) + if err != nil { + return mcp.NewToolResultError(err.Error()), nil + } + return legacyToolResultFromSDK(result), nil + } +} diff --git a/pkg/mcp/builders/pulsar/nsisolationpolicy_test.go b/pkg/mcp/builders/pulsar/nsisolationpolicy_test.go new file mode 100644 index 0000000..656b02b --- /dev/null +++ b/pkg/mcp/builders/pulsar/nsisolationpolicy_test.go @@ -0,0 +1,160 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "testing" + + "github.com/google/jsonschema-go/jsonschema" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPulsarAdminNsIsolationPolicyToolBuilder(t *testing.T) { + builder := NewPulsarAdminNsIsolationPolicyToolBuilder() + + t.Run("GetName", func(t *testing.T) { + assert.Equal(t, "pulsar_admin_nsisolationpolicy", builder.GetName()) + }) + + t.Run("GetRequiredFeatures", func(t *testing.T) { + features := builder.GetRequiredFeatures() + assert.NotEmpty(t, features) + assert.Contains(t, features, "pulsar-admin-nsisolationpolicy") + }) + + t.Run("BuildTools_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"pulsar-admin-nsisolationpolicy"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "pulsar_admin_nsisolationpolicy", tools[0].Definition().Name) + }) + + t.Run("BuildTools_ReadOnly", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: true, + Features: []string{"pulsar-admin-nsisolationpolicy"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + }) + + t.Run("BuildTools_NoFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"unrelated-feature"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 0) + }) + + t.Run("Validate_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"pulsar-admin-nsisolationpolicy"}, + } + + err := builder.Validate(config) + assert.NoError(t, err) + }) + + t.Run("Validate_MissingFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"unrelated-feature"}, + } + + err := builder.Validate(config) + assert.Error(t, err) + }) +} + +func TestPulsarAdminNsIsolationPolicyToolSchema(t *testing.T) { + builder := NewPulsarAdminNsIsolationPolicyToolBuilder() + tool, err := builder.buildNsIsolationPolicyTool() + require.NoError(t, err) + assert.Equal(t, "pulsar_admin_nsisolationpolicy", tool.Name) + + schema, ok := tool.InputSchema.(*jsonschema.Schema) + require.True(t, ok) + require.NotNil(t, schema.Properties) + + expectedRequired := []string{"resource", "operation", "cluster"} + assert.ElementsMatch(t, expectedRequired, schema.Required) + + expectedProps := []string{ + "resource", + "operation", + "cluster", + "name", + "namespaces", + "primary", + "secondary", + "autoFailoverPolicyType", + "autoFailoverPolicyParams", + } + assert.ElementsMatch(t, expectedProps, mapStringKeys(schema.Properties)) + + resourceSchema := schema.Properties["resource"] + require.NotNil(t, resourceSchema) + assert.Equal(t, pulsarAdminNsIsolationPolicyResourceDesc, resourceSchema.Description) + + operationSchema := schema.Properties["operation"] + require.NotNil(t, operationSchema) + assert.Equal(t, pulsarAdminNsIsolationPolicyOperationDesc, operationSchema.Description) + + clusterSchema := schema.Properties["cluster"] + require.NotNil(t, clusterSchema) + assert.Equal(t, pulsarAdminNsIsolationPolicyClusterDesc, clusterSchema.Description) + + nameSchema := schema.Properties["name"] + require.NotNil(t, nameSchema) + assert.Equal(t, pulsarAdminNsIsolationPolicyNameDesc, nameSchema.Description) + + namespacesSchema := schema.Properties["namespaces"] + require.NotNil(t, namespacesSchema) + assert.Equal(t, pulsarAdminNsIsolationPolicyNamespacesDesc, namespacesSchema.Description) + require.NotNil(t, namespacesSchema.Items) + assert.Equal(t, "namespace", namespacesSchema.Items.Description) + + primarySchema := schema.Properties["primary"] + require.NotNil(t, primarySchema) + assert.Equal(t, pulsarAdminNsIsolationPolicyPrimaryDesc, primarySchema.Description) + require.NotNil(t, primarySchema.Items) + assert.Equal(t, "primary broker", primarySchema.Items.Description) + + secondarySchema := schema.Properties["secondary"] + require.NotNil(t, secondarySchema) + assert.Equal(t, pulsarAdminNsIsolationPolicySecondaryDesc, secondarySchema.Description) + require.NotNil(t, secondarySchema.Items) + assert.Equal(t, "secondary broker", secondarySchema.Items.Description) + + autoFailoverTypeSchema := schema.Properties["autoFailoverPolicyType"] + require.NotNil(t, autoFailoverTypeSchema) + assert.Equal(t, pulsarAdminNsIsolationPolicyTypeDesc, autoFailoverTypeSchema.Description) + + autoFailoverParamsSchema := schema.Properties["autoFailoverPolicyParams"] + require.NotNil(t, autoFailoverParamsSchema) + assert.Equal(t, pulsarAdminNsIsolationPolicyParamsDesc, autoFailoverParamsSchema.Description) +} diff --git a/pkg/mcp/builders/pulsar/packages.go b/pkg/mcp/builders/pulsar/packages.go index 7d4d0f0..e0f6427 100644 --- a/pkg/mcp/builders/pulsar/packages.go +++ b/pkg/mcp/builders/pulsar/packages.go @@ -20,13 +20,46 @@ import ( "fmt" "strings" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + "github.com/google/jsonschema-go/jsonschema" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/pulsarctl/pkg/cmdutils" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" ) +type pulsarAdminPackagesInput struct { + Resource string `json:"resource"` + Operation string `json:"operation"` + PackageName *string `json:"packageName,omitempty"` + Namespace *string `json:"namespace,omitempty"` + PackageType *string `json:"type,omitempty"` + Description *string `json:"description,omitempty"` + Contact *string `json:"contact,omitempty"` + Path *string `json:"path,omitempty"` + Properties map[string]any `json:"properties,omitempty"` +} + +const ( + pulsarAdminPackagesResourceDesc = "Resource to operate on. Available resources:\n" + + "- package: A specific package\n" + + "- packages: All packages of a specific type" + pulsarAdminPackagesOperationDesc = "Operation to perform. Available operations:\n" + + "- list: List all packages of a specific type or versions of a package\n" + + "- get: Get metadata of a package\n" + + "- update: Update metadata of a package (requires super-user permissions)\n" + + "- delete: Delete a package (requires super-user permissions)\n" + + "- download: Download a package (requires super-user permissions)\n" + + "- upload: Upload a package (requires super-user permissions)" + pulsarAdminPackagesPackageNameDesc = "Name of the package to operate on. " + + "Required for operations on a specific package: get, update, delete, download, upload" + pulsarAdminPackagesNamespaceDesc = "The namespace name. Required for listing packages of a specific type" + pulsarAdminPackagesTypeDesc = "Package type (function, source, sink). Required for listing packages of a specific type" + pulsarAdminPackagesDescription = "Description of the package. Required for update and upload operations" + pulsarAdminPackagesContactDesc = "Contact information for the package. Optional for update and upload operations" + pulsarAdminPackagesPathDesc = "Path to download a package to or upload a package from. Required for download and upload operations" + pulsarAdminPackagesProperties = "Additional properties for the package as key-value pairs. Optional for update and upload operations" +) + // PulsarAdminPackagesToolBuilder implements the ToolBuilder interface for Pulsar admin packages // /nolint:revive type PulsarAdminPackagesToolBuilder struct { @@ -56,7 +89,7 @@ func NewPulsarAdminPackagesToolBuilder() *PulsarAdminPackagesToolBuilder { } // BuildTools builds the Pulsar admin packages tool list -func (b *PulsarAdminPackagesToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { +func (b *PulsarAdminPackagesToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]builders.ToolDefinition, error) { // Check features - return empty list if no required features are present if !b.HasAnyRequiredFeature(config.Features) { return nil, nil @@ -68,11 +101,14 @@ func (b *PulsarAdminPackagesToolBuilder) BuildTools(_ context.Context, config bu } // Build tools - tool := b.buildPackagesTool() + tool, err := b.buildPackagesTool() + if err != nil { + return nil, err + } handler := b.buildPackagesHandler(config.ReadOnly) - return []server.ServerTool{ - { + return []builders.ToolDefinition{ + builders.ServerTool[pulsarAdminPackagesInput, any]{ Tool: tool, Handler: handler, }, @@ -80,109 +116,71 @@ func (b *PulsarAdminPackagesToolBuilder) BuildTools(_ context.Context, config bu } // buildPackagesTool builds the Pulsar admin packages MCP tool definition -func (b *PulsarAdminPackagesToolBuilder) buildPackagesTool() mcp.Tool { +func (b *PulsarAdminPackagesToolBuilder) buildPackagesTool() (*sdk.Tool, error) { + inputSchema, err := buildPulsarAdminPackagesInputSchema() + if err != nil { + return nil, err + } + toolDesc := "Manage packages in Apache Pulsar. Support package scheme: `function://`, `source://`, `sink://`" + "Allows listing, viewing, updating, downloading and uploading packages. " + "Some operations require super-user permissions." - resourceDesc := "Resource to operate on. Available resources:\n" + - "- package: A specific package\n" + - "- packages: All packages of a specific type" - - operationDesc := "Operation to perform. Available operations:\n" + - "- list: List all packages of a specific type or versions of a package\n" + - "- get: Get metadata of a package\n" + - "- update: Update metadata of a package (requires super-user permissions)\n" + - "- delete: Delete a package (requires super-user permissions)\n" + - "- download: Download a package (requires super-user permissions)\n" + - "- upload: Upload a package (requires super-user permissions)" - - return mcp.NewTool("pulsar_admin_package", - mcp.WithDescription(toolDesc), - mcp.WithString("resource", mcp.Required(), - mcp.Description(resourceDesc), - ), - mcp.WithString("operation", mcp.Required(), - mcp.Description(operationDesc), - ), - mcp.WithString("packageName", - mcp.Description("Name of the package to operate on. "+ - "Required for operations on a specific package: get, update, delete, download, upload"), - ), - mcp.WithString("namespace", - mcp.Description("The namespace name. Required for listing packages of a specific type"), - ), - mcp.WithString("type", - mcp.Description("Package type (function, source, sink). Required for listing packages of a specific type"), - ), - mcp.WithString("description", - mcp.Description("Description of the package. Required for update and upload operations"), - ), - mcp.WithString("contact", - mcp.Description("Contact information for the package. Optional for update and upload operations"), - ), - mcp.WithString("path", - mcp.Description("Path to download a package to or upload a package from. Required for download and upload operations"), - ), - mcp.WithObject("properties", - mcp.Description("Additional properties for the package as key-value pairs. Optional for update and upload operations"), - ), - ) + return &sdk.Tool{ + Name: "pulsar_admin_package", + Description: toolDesc, + InputSchema: inputSchema, + }, nil } // buildPackagesHandler builds the Pulsar admin packages handler function -func (b *PulsarAdminPackagesToolBuilder) buildPackagesHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - // Get required parameters - resource, err := request.RequireString("resource") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get resource: %v", err)), nil +func (b *PulsarAdminPackagesToolBuilder) buildPackagesHandler(readOnly bool) builders.ToolHandlerFunc[pulsarAdminPackagesInput, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input pulsarAdminPackagesInput) (*sdk.CallToolResult, any, error) { + resource := strings.ToLower(strings.TrimSpace(input.Resource)) + if resource == "" { + return nil, nil, fmt.Errorf("missing required parameter 'resource'; please specify one of: package, packages") } - operation, err := request.RequireString("operation") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get operation: %v", err)), nil + operation := strings.ToLower(strings.TrimSpace(input.Operation)) + if operation == "" { + return nil, nil, fmt.Errorf("missing required parameter 'operation'; please specify one of: list, get, update, delete, download, upload") } - // Normalize parameters - resource = strings.ToLower(resource) - operation = strings.ToLower(operation) - // Validate write operations in read-only mode if readOnly && (operation == "update" || operation == "delete" || operation == "download" || operation == "upload") { - return mcp.NewToolResultError("Write operations are not allowed in read-only mode"), nil + return nil, nil, fmt.Errorf("write operations are not allowed in read-only mode") } // Get Pulsar session from context session := mcpCtx.GetPulsarSession(ctx) if session == nil { - return mcp.NewToolResultError("Pulsar session not found in context"), nil + return nil, nil, fmt.Errorf("pulsar session not found in context") } client, err := session.GetAdminV3Client() if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get Pulsar client: %v", err)), nil + return nil, nil, fmt.Errorf("failed to get Pulsar client: %v", err) } // Dispatch based on resource type switch resource { case "package": - return b.handlePackageResource(client, operation, request) + result, err := b.handlePackageResource(client, operation, input) + return result, nil, err case "packages": - return b.handlePackagesResource(client, operation, request) + result, err := b.handlePackagesResource(client, operation, input) + return result, nil, err default: - return mcp.NewToolResultError(fmt.Sprintf("Invalid resource: %s. Available resources: package, packages", resource)), nil + return nil, nil, fmt.Errorf("invalid resource: %s. Available resources: package, packages", resource) } } } -// Helper functions - // handlePackageResource handles operations on a specific package -func (b *PulsarAdminPackagesToolBuilder) handlePackageResource(client cmdutils.Client, operation string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - packageName, err := request.RequireString("packageName") +func (b *PulsarAdminPackagesToolBuilder) handlePackageResource(client cmdutils.Client, operation string, input pulsarAdminPackagesInput) (*sdk.CallToolResult, error) { + packageName, err := requireString(input.PackageName, "packageName") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'packageName' for package operations: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'packageName' for package operations: %v", err) } switch operation { @@ -190,152 +188,183 @@ func (b *PulsarAdminPackagesToolBuilder) handlePackageResource(client cmdutils.C // Get package versions packageVersions, err := client.Packages().ListVersions(packageName) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to list package versions: %v", err)), nil + return nil, fmt.Errorf("failed to list package versions: %v", err) } // Convert result to JSON string packageVersionsJSON, err := json.Marshal(packageVersions) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to serialize package versions: %v", err)), nil + return nil, fmt.Errorf("failed to serialize package versions: %v", err) } - return mcp.NewToolResultText(string(packageVersionsJSON)), nil + return textResult(string(packageVersionsJSON)), nil case "get": // Get package metadata metadata, err := client.Packages().GetMetadata(packageName) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get package metadata: %v", err)), nil + return nil, fmt.Errorf("failed to get package metadata: %v", err) } // Convert result to JSON string metadataJSON, err := json.Marshal(metadata) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to serialize package metadata: %v", err)), nil + return nil, fmt.Errorf("failed to serialize package metadata: %v", err) } - return mcp.NewToolResultText(string(metadataJSON)), nil + return textResult(string(metadataJSON)), nil case "update": - description, err := request.RequireString("description") + description, err := requireString(input.Description, "description") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'description' for package.update: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'description' for package.update: %v", err) } - contact := request.GetString("contact", "") - properties := b.extractProperties(request.GetArguments()) + contact := "" + if input.Contact != nil { + contact = *input.Contact + } + properties := b.extractProperties(input.Properties) // Update package metadata err = client.Packages().UpdateMetadata(packageName, description, contact, properties) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to update package metadata: %v", err)), nil + return nil, fmt.Errorf("failed to update package metadata: %v", err) } - return mcp.NewToolResultText(fmt.Sprintf("The metadata of the package '%s' updated successfully", packageName)), nil + return textResult(fmt.Sprintf("The metadata of the package '%s' updated successfully", packageName)), nil case "delete": // Delete package err = client.Packages().Delete(packageName) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to delete package: %v", err)), nil + return nil, fmt.Errorf("failed to delete package: %v", err) } - return mcp.NewToolResultText(fmt.Sprintf("The package '%s' deleted successfully", packageName)), nil + return textResult(fmt.Sprintf("The package '%s' deleted successfully", packageName)), nil case "download": - path, err := request.RequireString("path") + path, err := requireString(input.Path, "path") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'path' for package.download: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'path' for package.download: %v", err) } // Download package err = client.Packages().Download(packageName, path) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to download package: %v", err)), nil + return nil, fmt.Errorf("failed to download package: %v", err) } - return mcp.NewToolResultText( + return textResult( fmt.Sprintf("The package '%s' downloaded to path '%s' successfully", packageName, path), ), nil case "upload": - path, err := request.RequireString("path") + path, err := requireString(input.Path, "path") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'path' for package.upload: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'path' for package.upload: %v", err) } - description, err := request.RequireString("description") + description, err := requireString(input.Description, "description") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'description' for package.upload: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'description' for package.upload: %v", err) } - contact := request.GetString("contact", "") - properties := b.extractProperties(request.GetArguments()) + contact := "" + if input.Contact != nil { + contact = *input.Contact + } + properties := b.extractProperties(input.Properties) // Upload package err = client.Packages().Upload(packageName, path, description, contact, properties) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to upload package: %v", err)), nil + return nil, fmt.Errorf("failed to upload package: %v", err) } - return mcp.NewToolResultText( + return textResult( fmt.Sprintf("The package '%s' uploaded from path '%s' successfully", packageName, path), ), nil default: - return mcp.NewToolResultError(fmt.Sprintf("Invalid operation for resource 'package': %s. Available operations: list, get, update, delete, download, upload", operation)), nil + return nil, fmt.Errorf("invalid operation for resource 'package': %s. Available operations: list, get, update, delete, download, upload", operation) } } // handlePackagesResource handles operations on multiple packages -func (b *PulsarAdminPackagesToolBuilder) handlePackagesResource(client cmdutils.Client, operation string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminPackagesToolBuilder) handlePackagesResource(client cmdutils.Client, operation string, input pulsarAdminPackagesInput) (*sdk.CallToolResult, error) { switch operation { case "list": - packageType, err := request.RequireString("type") + packageType, err := requireString(input.PackageType, "type") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'type' for packages.list: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'type' for packages.list: %v", err) } - namespace, err := request.RequireString("namespace") + namespace, err := requireString(input.Namespace, "namespace") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'namespace' for packages.list: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'namespace' for packages.list: %v", err) } // Get package list packages, err := client.Packages().List(packageType, namespace) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to list packages: %v", err)), nil + return nil, fmt.Errorf("failed to list packages: %v", err) } // Convert result to JSON string packagesJSON, err := json.Marshal(packages) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to serialize package list: %v", err)), nil + return nil, fmt.Errorf("failed to serialize package list: %v", err) } - return mcp.NewToolResultText(string(packagesJSON)), nil + return textResult(string(packagesJSON)), nil default: - return mcp.NewToolResultError(fmt.Sprintf("Invalid operation for resource 'packages': %s. Available operations: list", operation)), nil + return nil, fmt.Errorf("invalid operation for resource 'packages': %s. Available operations: list", operation) } } -// extractProperties extracts properties from request arguments -func (b *PulsarAdminPackagesToolBuilder) extractProperties(args map[string]interface{}) map[string]string { - var properties map[string]string - propsObj, ok := args["properties"] - if ok && propsObj != nil { - // Convert to map[string]string - if propsMap, isMap := propsObj.(map[string]interface{}); isMap { - properties = make(map[string]string) - for k, v := range propsMap { - if strVal, ok := v.(string); ok { - properties[k] = strVal - } else { - properties[k] = fmt.Sprintf("%v", v) - } - } +// extractProperties extracts properties from input arguments +func (b *PulsarAdminPackagesToolBuilder) extractProperties(props map[string]any) map[string]string { + if len(props) == 0 { + return nil + } + + properties := make(map[string]string, len(props)) + for key, value := range props { + switch typed := value.(type) { + case string: + properties[key] = typed + default: + properties[key] = fmt.Sprintf("%v", value) } } + return properties } + +func buildPulsarAdminPackagesInputSchema() (*jsonschema.Schema, error) { + schema, err := jsonschema.For[pulsarAdminPackagesInput](nil) + if err != nil { + return nil, fmt.Errorf("input schema: %w", err) + } + if schema.Type != "object" { + return nil, fmt.Errorf("input schema must have type \"object\"") + } + if schema.Properties == nil { + schema.Properties = map[string]*jsonschema.Schema{} + } + + setSchemaDescription(schema, "resource", pulsarAdminPackagesResourceDesc) + setSchemaDescription(schema, "operation", pulsarAdminPackagesOperationDesc) + setSchemaDescription(schema, "packageName", pulsarAdminPackagesPackageNameDesc) + setSchemaDescription(schema, "namespace", pulsarAdminPackagesNamespaceDesc) + setSchemaDescription(schema, "type", pulsarAdminPackagesTypeDesc) + setSchemaDescription(schema, "description", pulsarAdminPackagesDescription) + setSchemaDescription(schema, "contact", pulsarAdminPackagesContactDesc) + setSchemaDescription(schema, "path", pulsarAdminPackagesPathDesc) + setSchemaDescription(schema, "properties", pulsarAdminPackagesProperties) + + normalizeAdditionalProperties(schema) + return schema, nil +} diff --git a/pkg/mcp/builders/pulsar/packages_legacy.go b/pkg/mcp/builders/pulsar/packages_legacy.go new file mode 100644 index 0000000..6b1c246 --- /dev/null +++ b/pkg/mcp/builders/pulsar/packages_legacy.go @@ -0,0 +1,117 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" +) + +// PulsarAdminPackagesLegacyToolBuilder implements the legacy ToolBuilder interface for Pulsar admin packages. +// /nolint:revive +type PulsarAdminPackagesLegacyToolBuilder struct { + *builders.BaseToolBuilder +} + +// NewPulsarAdminPackagesLegacyToolBuilder creates a new Pulsar admin packages legacy tool builder instance. +func NewPulsarAdminPackagesLegacyToolBuilder() *PulsarAdminPackagesLegacyToolBuilder { + metadata := builders.ToolMetadata{ + Name: "pulsar_admin_packages", + Version: "1.0.0", + Description: "Pulsar admin packages management tools", + Category: "pulsar_admin", + Tags: []string{"pulsar", "admin", "packages"}, + } + + features := []string{ + "pulsar-admin-packages", + "all", + "all-pulsar", + "pulsar-admin", + } + + return &PulsarAdminPackagesLegacyToolBuilder{ + BaseToolBuilder: builders.NewBaseToolBuilder(metadata, features), + } +} + +// BuildTools builds the Pulsar admin packages legacy tool list. +func (b *PulsarAdminPackagesLegacyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { + if !b.HasAnyRequiredFeature(config.Features) { + return nil, nil + } + + if err := b.Validate(config); err != nil { + return nil, err + } + + tool, err := b.buildPackagesTool() + if err != nil { + return nil, err + } + handler := b.buildPackagesHandler(config.ReadOnly) + + return []server.ServerTool{ + { + Tool: tool, + Handler: handler, + }, + }, nil +} + +func (b *PulsarAdminPackagesLegacyToolBuilder) buildPackagesTool() (mcp.Tool, error) { + inputSchema, err := buildPulsarAdminPackagesInputSchema() + if err != nil { + return mcp.Tool{}, err + } + + schemaJSON, err := json.Marshal(inputSchema) + if err != nil { + return mcp.Tool{}, fmt.Errorf("marshal input schema: %w", err) + } + + toolDesc := "Manage packages in Apache Pulsar. Support package scheme: `function://`, `source://`, `sink://`" + + "Allows listing, viewing, updating, downloading and uploading packages. " + + "Some operations require super-user permissions." + + return mcp.Tool{ + Name: "pulsar_admin_package", + Description: toolDesc, + RawInputSchema: schemaJSON, + }, nil +} + +func (b *PulsarAdminPackagesLegacyToolBuilder) buildPackagesHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { + sdkBuilder := NewPulsarAdminPackagesToolBuilder() + sdkHandler := sdkBuilder.buildPackagesHandler(readOnly) + + return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + var input pulsarAdminPackagesInput + if err := request.BindArguments(&input); err != nil { + return mcp.NewToolResultError(fmt.Sprintf("failed to parse arguments: %v", err)), nil + } + + result, _, err := sdkHandler(ctx, nil, input) + if err != nil { + return mcp.NewToolResultError(err.Error()), nil + } + return legacyToolResultFromSDK(result), nil + } +} diff --git a/pkg/mcp/builders/pulsar/produce.go b/pkg/mcp/builders/pulsar/produce.go index 402b24c..c877191 100644 --- a/pkg/mcp/builders/pulsar/produce.go +++ b/pkg/mcp/builders/pulsar/produce.go @@ -22,12 +22,55 @@ import ( "time" "github.com/apache/pulsar-client-go/pulsar" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + "github.com/google/jsonschema-go/jsonschema" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" ) +type pulsarClientProduceInput struct { + Topic string `json:"topic"` + Messages []string `json:"messages,omitempty"` + NumProduce *float64 `json:"num-produce,omitempty"` + Rate *float64 `json:"rate,omitempty"` + DisableBatching bool `json:"disable-batching,omitempty"` + Chunking bool `json:"chunking,omitempty"` + Separator *string `json:"separator,omitempty"` + Properties []string `json:"properties,omitempty"` + Key *string `json:"key,omitempty"` +} + +const ( + pulsarClientProduceTopicDesc = "The fully qualified topic name to produce to (format: [persistent|non-persistent]://tenant/namespace/topic). " + + "For partitioned topics, messages will be distributed across partitions based on the partitioning scheme. " + + "If a message key is provided, messages with the same key will go to the same partition." + pulsarClientProduceMessagesDesc = "List of message content to send. Each array element represents one message payload. " + + "IMPORTANT: Use this parameter to provide message content. Multiple messages can be sent in a single operation. " + + "Each message will be sent according to the specified num-produce parameter." + pulsarClientProduceMessageItemDesc = "Individual message content to be sent to the topic" + pulsarClientProduceNumProduceDesc = "Number of times to send the entire message set. " + + "If you have 3 messages and set num-produce to 2, a total of 6 messages will be sent. (default: 1)" + pulsarClientProduceRateDesc = "Rate limiting in messages per second. Controls the maximum speed of message production. " + + "Set to 0 to produce messages as fast as possible without rate limiting. " + + "Higher rates may be limited by broker capacity and network bandwidth. (default: 0)" + pulsarClientProduceDisableBatchingDesc = "Disable message batching. When false (default), Pulsar batches multiple messages " + + "to improve throughput and reduce network overhead. Set to true to send each message individually. " + + "Disabling batching may reduce throughput but provides lower latency. (default: false)" + pulsarClientProduceChunkingDesc = "Enable message chunking for large messages. When true, messages larger than " + + "the maximum allowed size will be automatically split into smaller chunks and reassembled on consumption. " + + "This allows sending messages that exceed broker size limits. (default: false)" + pulsarClientProduceSeparatorDesc = "Character or string to split message content on. When specified, each message " + + "in the messages array will be split by this separator to create additional individual messages. " + + "Useful for sending multiple messages from a single delimited string. (default: none)" + pulsarClientProducePropertiesDesc = "Message properties in key=value format. Properties are metadata key-value pairs " + + "attached to messages for filtering, routing, or application-specific processing. " + + "Example: ['priority=high', 'source=api', 'version=1.0']. Multiple properties can be specified." + pulsarClientProducePropertyItemDesc = "Property in key=value format" + pulsarClientProduceKeyDesc = "Partitioning key for message routing. Messages with the same key will be sent " + + "to the same partition in partitioned topics, ensuring ordering for related messages. " + + "The key is also available to consumers for processing logic. Leave empty for round-robin partitioning." +) + // PulsarClientProduceToolBuilder implements the ToolBuilder interface for Pulsar Client Producer tools // It provides functionality to build Pulsar message production tools // /nolint:revive @@ -58,7 +101,7 @@ func NewPulsarClientProduceToolBuilder() *PulsarClientProduceToolBuilder { // BuildTools builds the Pulsar Client Producer tool list // This is the core method implementing the ToolBuilder interface -func (b *PulsarClientProduceToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { +func (b *PulsarClientProduceToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]builders.ToolDefinition, error) { // Check features - return empty list if no required features are present if !b.HasAnyRequiredFeature(config.Features) { return nil, nil @@ -70,11 +113,14 @@ func (b *PulsarClientProduceToolBuilder) BuildTools(_ context.Context, config bu } // Build tools - tool := b.buildProduceTool() + tool, err := b.buildProduceTool() + if err != nil { + return nil, err + } handler := b.buildProduceHandler(config.ReadOnly) - return []server.ServerTool{ - { + return []builders.ToolDefinition{ + builders.ServerTool[pulsarClientProduceInput, any]{ Tool: tool, Handler: handler, }, @@ -83,7 +129,12 @@ func (b *PulsarClientProduceToolBuilder) BuildTools(_ context.Context, config bu // buildProduceTool builds the Pulsar Client Producer MCP tool definition // Migrated from the original tool definition logic -func (b *PulsarClientProduceToolBuilder) buildProduceTool() mcp.Tool { +func (b *PulsarClientProduceToolBuilder) buildProduceTool() (*sdk.Tool, error) { + inputSchema, err := buildPulsarClientProduceInputSchema() + if err != nil { + return nil, err + } + toolDesc := "Produce messages to a Pulsar topic. " + "This tool allows you to send messages to a specified Pulsar topic with various options " + "to control message format, batching, rate limiting, and properties. " + @@ -93,103 +144,61 @@ func (b *PulsarClientProduceToolBuilder) buildProduceTool() mcp.Tool { "The tool supports message partitioning through keys and provides detailed feedback on sent messages. " + "Do not use this tool for Kafka protocol operations. Use 'kafka_client_produce' instead." - return mcp.NewTool("pulsar_client_produce", - mcp.WithDescription(toolDesc), - mcp.WithString("topic", mcp.Required(), - mcp.Description("The fully qualified topic name to produce to (format: [persistent|non-persistent]://tenant/namespace/topic). "+ - "For partitioned topics, messages will be distributed across partitions based on the partitioning scheme. "+ - "If a message key is provided, messages with the same key will go to the same partition."), - ), - mcp.WithArray("messages", - mcp.Description("List of message content to send. Each array element represents one message payload. "+ - "IMPORTANT: Use this parameter to provide message content. Multiple messages can be sent in a single operation. "+ - "Each message will be sent according to the specified num-produce parameter."), - mcp.Items( - map[string]interface{}{ - "type": "string", - "description": "Individual message content to be sent to the topic", - }, - ), - ), - mcp.WithNumber("num-produce", - mcp.Description("Number of times to send the entire message set. "+ - "If you have 3 messages and set num-produce to 2, a total of 6 messages will be sent. (default: 1)"), - ), - mcp.WithNumber("rate", - mcp.Description("Rate limiting in messages per second. Controls the maximum speed of message production. "+ - "Set to 0 to produce messages as fast as possible without rate limiting. "+ - "Higher rates may be limited by broker capacity and network bandwidth. (default: 0)"), - ), - mcp.WithBoolean("disable-batching", - mcp.Description("Disable message batching. When false (default), Pulsar batches multiple messages "+ - "to improve throughput and reduce network overhead. Set to true to send each message individually. "+ - "Disabling batching may reduce throughput but provides lower latency. (default: false)"), - ), - mcp.WithBoolean("chunking", - mcp.Description("Enable message chunking for large messages. When true, messages larger than "+ - "the maximum allowed size will be automatically split into smaller chunks and reassembled on consumption. "+ - "This allows sending messages that exceed broker size limits. (default: false)"), - ), - mcp.WithString("separator", - mcp.Description("Character or string to split message content on. When specified, each message "+ - "in the messages array will be split by this separator to create additional individual messages. "+ - "Useful for sending multiple messages from a single delimited string. (default: none)"), - ), - mcp.WithArray("properties", - mcp.Description("Message properties in key=value format. Properties are metadata key-value pairs "+ - "attached to messages for filtering, routing, or application-specific processing. "+ - "Example: ['priority=high', 'source=api', 'version=1.0']. Multiple properties can be specified."), - mcp.Items( - map[string]interface{}{ - "type": "string", - "description": "Property in key=value format", - }, - ), - ), - mcp.WithString("key", - mcp.Description("Partitioning key for message routing. Messages with the same key will be sent "+ - "to the same partition in partitioned topics, ensuring ordering for related messages. "+ - "The key is also available to consumers for processing logic. Leave empty for round-robin partitioning."), - ), - ) + return &sdk.Tool{ + Name: "pulsar_client_produce", + Description: toolDesc, + InputSchema: inputSchema, + }, nil } // buildProduceHandler builds the Pulsar Client Producer handler function // Migrated from the original handler logic -func (b *PulsarClientProduceToolBuilder) buildProduceHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarClientProduceToolBuilder) buildProduceHandler(readOnly bool) builders.ToolHandlerFunc[pulsarClientProduceInput, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input pulsarClientProduceInput) (*sdk.CallToolResult, any, error) { // Check read-only mode - producing is a write operation if readOnly { - return mcp.NewToolResultError("Message production is not allowed in read-only mode"), nil + return nil, nil, fmt.Errorf("message production is not allowed in read-only mode") } // Extract required parameters with validation - topic, err := request.RequireString("topic") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get topic: %v", err)), nil + topic := strings.TrimSpace(input.Topic) + if topic == "" { + return nil, nil, fmt.Errorf("failed to get topic: topic is required") } // Set default values and extract optional parameters - messages := request.GetStringSlice("messages", []string{}) + messages := input.Messages if len(messages) == 0 { - return mcp.NewToolResultError("Please supply message content with 'messages' parameter"), nil + return nil, nil, fmt.Errorf("please supply message content with 'messages' parameter") } - numProduce := int(request.GetFloat("num-produce", 1)) + numProduce := 1 + if input.NumProduce != nil { + numProduce = int(*input.NumProduce) + } if numProduce < 1 { - return mcp.NewToolResultError("num-produce must be at least 1"), nil + return nil, nil, fmt.Errorf("num-produce must be at least 1") } - rate := request.GetFloat("rate", 0) + rate := 0.0 + if input.Rate != nil { + rate = *input.Rate + } if rate < 0 { - return mcp.NewToolResultError("rate must be non-negative"), nil + return nil, nil, fmt.Errorf("rate must be non-negative") } - disableBatching := request.GetBool("disable-batching", false) - chunkingAllowed := request.GetBool("chunking", false) - separator := request.GetString("separator", "") - properties := request.GetStringSlice("properties", []string{}) - key := request.GetString("key", "") + disableBatching := input.DisableBatching + chunkingAllowed := input.Chunking + separator := "" + if input.Separator != nil { + separator = *input.Separator + } + properties := input.Properties + key := "" + if input.Key != nil { + key = *input.Key + } // Split messages by separator if needed if separator != "" && len(messages) > 0 { @@ -208,13 +217,13 @@ func (b *PulsarClientProduceToolBuilder) buildProduceHandler(readOnly bool) func // Get Pulsar session from context session := mcpCtx.GetPulsarSession(ctx) if session == nil { - return mcp.NewToolResultError("Pulsar session not found in context"), nil + return nil, nil, fmt.Errorf("pulsar session not found in context") } // Setup client client, err := session.GetPulsarClient() if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to create Pulsar client: %v", err)), nil + return nil, nil, fmt.Errorf("failed to create Pulsar client: %v", err) } defer client.Close() @@ -233,20 +242,20 @@ func (b *PulsarClientProduceToolBuilder) buildProduceHandler(readOnly bool) func producer, err := client.CreateProducer(producerOpts) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to create producer: %v", err)), nil + return nil, nil, fmt.Errorf("failed to create producer: %v", err) } defer producer.Close() // Generate message bodies from messages messagePayloads, err := b.generateMessagePayloads(messages) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to generate message payloads: %v", err)), nil + return nil, nil, fmt.Errorf("failed to generate message payloads: %v", err) } // Parse properties propMap, err := b.parseProperties(properties) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to parse properties: %v", err)), nil + return nil, nil, fmt.Errorf("failed to parse properties: %v", err) } // Setup rate limiter @@ -286,7 +295,7 @@ func (b *PulsarClientProduceToolBuilder) buildProduceHandler(readOnly bool) func // Send the message msgID, err := producer.Send(ctx, msg) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to send message: %v", err)), nil + return nil, nil, fmt.Errorf("failed to send message: %v", err) } lastMessageID = msgID @@ -302,24 +311,63 @@ func (b *PulsarClientProduceToolBuilder) buildProduceHandler(readOnly bool) func "success": true, } - return b.marshalResponse(response) + result, err := b.marshalResponse(response) + return result, nil, err } } +func buildPulsarClientProduceInputSchema() (*jsonschema.Schema, error) { + schema, err := jsonschema.For[pulsarClientProduceInput](nil) + if err != nil { + return nil, fmt.Errorf("input schema: %w", err) + } + if schema.Type != "object" { + return nil, fmt.Errorf("input schema must have type \"object\"") + } + + if schema.Properties == nil { + schema.Properties = map[string]*jsonschema.Schema{} + } + setSchemaDescription(schema, "topic", pulsarClientProduceTopicDesc) + setSchemaDescription(schema, "messages", pulsarClientProduceMessagesDesc) + setSchemaDescription(schema, "num-produce", pulsarClientProduceNumProduceDesc) + setSchemaDescription(schema, "rate", pulsarClientProduceRateDesc) + setSchemaDescription(schema, "disable-batching", pulsarClientProduceDisableBatchingDesc) + setSchemaDescription(schema, "chunking", pulsarClientProduceChunkingDesc) + setSchemaDescription(schema, "separator", pulsarClientProduceSeparatorDesc) + setSchemaDescription(schema, "properties", pulsarClientProducePropertiesDesc) + setSchemaDescription(schema, "key", pulsarClientProduceKeyDesc) + + messagesSchema := schema.Properties["messages"] + if messagesSchema != nil && messagesSchema.Items != nil { + messagesSchema.Items.Description = pulsarClientProduceMessageItemDesc + } + + propertiesSchema := schema.Properties["properties"] + if propertiesSchema != nil && propertiesSchema.Items != nil { + propertiesSchema.Items.Description = pulsarClientProducePropertyItemDesc + } + + normalizeAdditionalProperties(schema) + return schema, nil +} + // Unified error handling and utility functions // handleError provides unified error handling -func (b *PulsarClientProduceToolBuilder) handleError(operation string, err error) *mcp.CallToolResult { - return mcp.NewToolResultError(fmt.Sprintf("Failed to %s: %v", operation, err)) +func (b *PulsarClientProduceToolBuilder) handleError(operation string, err error) error { + return fmt.Errorf("failed to %s: %v", operation, err) } // marshalResponse provides unified JSON serialization for responses -func (b *PulsarClientProduceToolBuilder) marshalResponse(data interface{}) (*mcp.CallToolResult, error) { +func (b *PulsarClientProduceToolBuilder) marshalResponse(data interface{}) (*sdk.CallToolResult, error) { jsonBytes, err := json.Marshal(data) if err != nil { - return b.handleError("marshal response", err), nil + return nil, b.handleError("marshal response", err) } - return mcp.NewToolResultText(string(jsonBytes)), nil + return &sdk.CallToolResult{ + Content: []sdk.Content{&sdk.TextContent{Text: string(jsonBytes)}}, + }, nil } // generateMessagePayloads generates message payloads from message strings diff --git a/pkg/mcp/builders/pulsar/produce_legacy.go b/pkg/mcp/builders/pulsar/produce_legacy.go new file mode 100644 index 0000000..8cfdd81 --- /dev/null +++ b/pkg/mcp/builders/pulsar/produce_legacy.go @@ -0,0 +1,344 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/apache/pulsar-client-go/pulsar" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" +) + +// PulsarClientProduceLegacyToolBuilder implements the legacy ToolBuilder interface for Pulsar Client Producer tools. +// /nolint:revive +type PulsarClientProduceLegacyToolBuilder struct { + *builders.BaseToolBuilder +} + +// NewPulsarClientProduceLegacyToolBuilder creates a new legacy Pulsar Client Producer tool builder instance. +func NewPulsarClientProduceLegacyToolBuilder() *PulsarClientProduceLegacyToolBuilder { + metadata := builders.ToolMetadata{ + Name: "pulsar_client_produce", + Version: "1.0.0", + Description: "Pulsar Client message production tools", + Category: "pulsar_client", + Tags: []string{"pulsar", "produce", "client", "messaging"}, + } + + features := []string{ + "pulsar-client", + "all", + "all-pulsar", + } + + return &PulsarClientProduceLegacyToolBuilder{ + BaseToolBuilder: builders.NewBaseToolBuilder(metadata, features), + } +} + +// BuildTools builds the Pulsar Client Producer tool list for the legacy server. +func (b *PulsarClientProduceLegacyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { + // Check features - return empty list if no required features are present + if !b.HasAnyRequiredFeature(config.Features) { + return nil, nil + } + + // Validate configuration (only validate when matching features are present) + if err := b.Validate(config); err != nil { + return nil, err + } + + // Build tools + tool := b.buildProduceTool() + handler := b.buildProduceHandler(config.ReadOnly) + + return []server.ServerTool{ + { + Tool: tool, + Handler: handler, + }, + }, nil +} + +// buildProduceTool builds the Pulsar Client Producer MCP tool definition. +func (b *PulsarClientProduceLegacyToolBuilder) buildProduceTool() mcp.Tool { + toolDesc := "Produce messages to a Pulsar topic. " + + "This tool allows you to send messages to a specified Pulsar topic with various options " + + "to control message format, batching, rate limiting, and properties. " + + "Pulsar supports message batching for improved throughput, chunking for large messages, " + + "and message properties for metadata attachment. " + + "You can send single or multiple messages, control the production rate, and add custom properties. " + + "The tool supports message partitioning through keys and provides detailed feedback on sent messages. " + + "Do not use this tool for Kafka protocol operations. Use 'kafka_client_produce' instead." + + return mcp.NewTool("pulsar_client_produce", + mcp.WithDescription(toolDesc), + mcp.WithString("topic", mcp.Required(), + mcp.Description("The fully qualified topic name to produce to (format: [persistent|non-persistent]://tenant/namespace/topic). "+ + "For partitioned topics, messages will be distributed across partitions based on the partitioning scheme. "+ + "If a message key is provided, messages with the same key will go to the same partition."), + ), + mcp.WithArray("messages", + mcp.Description("List of message content to send. Each array element represents one message payload. "+ + "IMPORTANT: Use this parameter to provide message content. Multiple messages can be sent in a single operation. "+ + "Each message will be sent according to the specified num-produce parameter."), + mcp.Items( + map[string]interface{}{ + "type": "string", + "description": "Individual message content to be sent to the topic", + }, + ), + ), + mcp.WithNumber("num-produce", + mcp.Description("Number of times to send the entire message set. "+ + "If you have 3 messages and set num-produce to 2, a total of 6 messages will be sent. (default: 1)"), + ), + mcp.WithNumber("rate", + mcp.Description("Rate limiting in messages per second. Controls the maximum speed of message production. "+ + "Set to 0 to produce messages as fast as possible without rate limiting. "+ + "Higher rates may be limited by broker capacity and network bandwidth. (default: 0)"), + ), + mcp.WithBoolean("disable-batching", + mcp.Description("Disable message batching. When false (default), Pulsar batches multiple messages "+ + "to improve throughput and reduce network overhead. Set to true to send each message individually. "+ + "Disabling batching may reduce throughput but provides lower latency. (default: false)"), + ), + mcp.WithBoolean("chunking", + mcp.Description("Enable message chunking for large messages. When true, messages larger than "+ + "the maximum allowed size will be automatically split into smaller chunks and reassembled on consumption. "+ + "This allows sending messages that exceed broker size limits. (default: false)"), + ), + mcp.WithString("separator", + mcp.Description("Character or string to split message content on. When specified, each message "+ + "in the messages array will be split by this separator to create additional individual messages. "+ + "Useful for sending multiple messages from a single delimited string. (default: none)"), + ), + mcp.WithArray("properties", + mcp.Description("Message properties in key=value format. Properties are metadata key-value pairs "+ + "attached to messages for filtering, routing, or application-specific processing. "+ + "Example: ['priority=high', 'source=api', 'version=1.0']. Multiple properties can be specified."), + mcp.Items( + map[string]interface{}{ + "type": "string", + "description": "Property in key=value format", + }, + ), + ), + mcp.WithString("key", + mcp.Description("Partitioning key for message routing. Messages with the same key will be sent "+ + "to the same partition in partitioned topics, ensuring ordering for related messages. "+ + "The key is also available to consumers for processing logic. Leave empty for round-robin partitioning."), + ), + ) +} + +// buildProduceHandler builds the Pulsar Client Producer handler function. +func (b *PulsarClientProduceLegacyToolBuilder) buildProduceHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Check read-only mode - producing is a write operation + if readOnly { + return mcp.NewToolResultError("Message production is not allowed in read-only mode"), nil + } + + // Extract required parameters with validation + topic, err := request.RequireString("topic") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get topic: %v", err)), nil + } + + // Set default values and extract optional parameters + messages := request.GetStringSlice("messages", []string{}) + if len(messages) == 0 { + return mcp.NewToolResultError("Please supply message content with 'messages' parameter"), nil + } + + numProduce := int(request.GetFloat("num-produce", 1)) + if numProduce < 1 { + return mcp.NewToolResultError("num-produce must be at least 1"), nil + } + + rate := request.GetFloat("rate", 0) + if rate < 0 { + return mcp.NewToolResultError("rate must be non-negative"), nil + } + + disableBatching := request.GetBool("disable-batching", false) + chunkingAllowed := request.GetBool("chunking", false) + separator := request.GetString("separator", "") + properties := request.GetStringSlice("properties", []string{}) + key := request.GetString("key", "") + + // Split messages by separator if needed + if separator != "" && len(messages) > 0 { + var splitMessages []string + for _, msg := range messages { + parts := strings.Split(msg, separator) + for _, part := range parts { + if part != "" { + splitMessages = append(splitMessages, part) + } + } + } + messages = splitMessages + } + + // Get Pulsar session from context + session := mcpCtx.GetPulsarSession(ctx) + if session == nil { + return mcp.NewToolResultError("Pulsar session not found in context"), nil + } + + // Setup client + client, err := session.GetPulsarClient() + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to create Pulsar client: %v", err)), nil + } + defer client.Close() + + // Prepare producer options + producerOpts := pulsar.ProducerOptions{ + Topic: topic, + } + + // Set batching and chunking options + if chunkingAllowed { + producerOpts.EnableChunking = true + producerOpts.BatchingMaxPublishDelay = 0 * time.Millisecond + } else if disableBatching { + producerOpts.BatchingMaxPublishDelay = 0 * time.Millisecond + } + + producer, err := client.CreateProducer(producerOpts) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to create producer: %v", err)), nil + } + defer producer.Close() + + // Generate message bodies from messages + messagePayloads, err := b.generateMessagePayloads(messages) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to generate message payloads: %v", err)), nil + } + + // Parse properties + propMap, err := b.parseProperties(properties) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to parse properties: %v", err)), nil + } + + // Setup rate limiter + var limiter *time.Ticker + if rate > 0 { + interval := time.Duration(1000/rate) * time.Millisecond + limiter = time.NewTicker(interval) + defer limiter.Stop() + } + + // Send messages + numMessagesSent := 0 + var lastMessageID pulsar.MessageID + + for range numProduce { + for _, payload := range messagePayloads { + // Apply rate limiting if enabled + if limiter != nil { + <-limiter.C + } + + // Create message to send + msg := &pulsar.ProducerMessage{ + Payload: payload, + } + + // Add properties if specified + if len(propMap) > 0 { + msg.Properties = propMap + } + + // Add key if specified + if key != "" { + msg.Key = key + } + + // Send the message + msgID, err := producer.Send(ctx, msg) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to send message: %v", err)), nil + } + + lastMessageID = msgID + numMessagesSent++ + } + } + + // Prepare response + response := map[string]interface{}{ + "topic": topic, + "messages_sent": numMessagesSent, + "last_message_id": fmt.Sprintf("%v", lastMessageID), + "success": true, + } + + return b.marshalResponse(response) + } +} + +// Unified error handling and utility functions + +// handleError provides unified error handling +func (b *PulsarClientProduceLegacyToolBuilder) handleError(operation string, err error) *mcp.CallToolResult { + return mcp.NewToolResultError(fmt.Sprintf("Failed to %s: %v", operation, err)) +} + +// marshalResponse provides unified JSON serialization for responses +func (b *PulsarClientProduceLegacyToolBuilder) marshalResponse(data interface{}) (*mcp.CallToolResult, error) { + jsonBytes, err := json.Marshal(data) + if err != nil { + return b.handleError("marshal response", err), nil + } + return mcp.NewToolResultText(string(jsonBytes)), nil +} + +// generateMessagePayloads generates message payloads from message strings +func (b *PulsarClientProduceLegacyToolBuilder) generateMessagePayloads(messages []string) ([][]byte, error) { + var payloads [][]byte + + // Add message strings + for _, msg := range messages { + payloads = append(payloads, []byte(msg)) + } + + return payloads, nil +} + +// parseProperties parses property strings in key=value format +func (b *PulsarClientProduceLegacyToolBuilder) parseProperties(properties []string) (map[string]string, error) { + propMap := make(map[string]string) + for _, prop := range properties { + parts := strings.SplitN(prop, "=", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid property format '%s', expected key=value", prop) + } + propMap[parts[0]] = parts[1] + } + return propMap, nil +} diff --git a/pkg/mcp/builders/pulsar/produce_test.go b/pkg/mcp/builders/pulsar/produce_test.go new file mode 100644 index 0000000..8de0ac0 --- /dev/null +++ b/pkg/mcp/builders/pulsar/produce_test.go @@ -0,0 +1,154 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "testing" + + "github.com/google/jsonschema-go/jsonschema" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPulsarClientProduceToolBuilder(t *testing.T) { + builder := NewPulsarClientProduceToolBuilder() + + t.Run("GetName", func(t *testing.T) { + assert.Equal(t, "pulsar_client_produce", builder.GetName()) + }) + + t.Run("GetRequiredFeatures", func(t *testing.T) { + features := builder.GetRequiredFeatures() + assert.NotEmpty(t, features) + assert.Contains(t, features, "pulsar-client") + }) + + t.Run("BuildTools_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"pulsar-client"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "pulsar_client_produce", tools[0].Definition().Name) + assert.NotNil(t, tools[0]) + }) + + t.Run("BuildTools_ReadOnlyMode", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: true, + Features: []string{"pulsar-client"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + }) + + t.Run("BuildTools_NoFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"unrelated-feature"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 0) + }) + + t.Run("Validate_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"pulsar-client"}, + } + + err := builder.Validate(config) + assert.NoError(t, err) + }) + + t.Run("Validate_MissingFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"unrelated-feature"}, + } + + err := builder.Validate(config) + assert.Error(t, err) + }) + + t.Run("Handler_ReadOnlyRejects", func(t *testing.T) { + handler := builder.buildProduceHandler(true) + + _, _, err := handler(context.Background(), nil, pulsarClientProduceInput{ + Topic: "persistent://tenant/ns/topic", + Messages: []string{"message"}, + }) + require.Error(t, err) + assert.Equal(t, "message production is not allowed in read-only mode", err.Error()) + }) + + t.Run("Handler_MissingMessages", func(t *testing.T) { + handler := builder.buildProduceHandler(false) + + _, _, err := handler(context.Background(), nil, pulsarClientProduceInput{ + Topic: "persistent://tenant/ns/topic", + }) + require.Error(t, err) + assert.Equal(t, "please supply message content with 'messages' parameter", err.Error()) + }) +} + +func TestPulsarClientProduceToolSchema(t *testing.T) { + builder := NewPulsarClientProduceToolBuilder() + tool, err := builder.buildProduceTool() + require.NoError(t, err) + assert.Equal(t, "pulsar_client_produce", tool.Name) + + schema, ok := tool.InputSchema.(*jsonschema.Schema) + require.True(t, ok) + require.NotNil(t, schema.Properties) + + expectedRequired := []string{"topic"} + assert.ElementsMatch(t, expectedRequired, schema.Required) + + expectedProps := []string{ + "topic", + "messages", + "num-produce", + "rate", + "disable-batching", + "chunking", + "separator", + "properties", + "key", + } + assert.ElementsMatch(t, expectedProps, mapStringKeys(schema.Properties)) + + topicSchema := schema.Properties["topic"] + require.NotNil(t, topicSchema) + assert.Equal(t, pulsarClientProduceTopicDesc, topicSchema.Description) + + messagesSchema := schema.Properties["messages"] + require.NotNil(t, messagesSchema) + require.NotNil(t, messagesSchema.Items) + assert.Equal(t, pulsarClientProduceMessageItemDesc, messagesSchema.Items.Description) + + propertiesSchema := schema.Properties["properties"] + require.NotNil(t, propertiesSchema) + require.NotNil(t, propertiesSchema.Items) + assert.Equal(t, pulsarClientProducePropertyItemDesc, propertiesSchema.Items.Description) +} diff --git a/pkg/mcp/builders/pulsar/resourcequotas.go b/pkg/mcp/builders/pulsar/resourcequotas.go index 7b53dec..9129778 100644 --- a/pkg/mcp/builders/pulsar/resourcequotas.go +++ b/pkg/mcp/builders/pulsar/resourcequotas.go @@ -21,13 +21,52 @@ import ( "strings" "github.com/apache/pulsar-client-go/pulsaradmin/pkg/utils" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + "github.com/google/jsonschema-go/jsonschema" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/pulsarctl/pkg/cmdutils" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" ) +type pulsarAdminResourceQuotasInput struct { + Resource string `json:"resource"` + Operation string `json:"operation"` + Namespace *string `json:"namespace,omitempty"` + Bundle *string `json:"bundle,omitempty"` + MsgRateIn *float64 `json:"msgRateIn,omitempty"` + MsgRateOut *float64 `json:"msgRateOut,omitempty"` + BandwidthIn *float64 `json:"bandwidthIn,omitempty"` + BandwidthOut *float64 `json:"bandwidthOut,omitempty"` + Memory *float64 `json:"memory,omitempty"` + Dynamic *bool `json:"dynamic,omitempty"` +} + +const ( + pulsarAdminResourceQuotasResourceDesc = "Resource to operate on. Available resources:\n" + + "- quota: The resource quota configuration for a specific namespace bundle or the default quota" + pulsarAdminResourceQuotasOperationDesc = "Operation to perform. Available operations:\n" + + "- get: Get the resource quota for a specified namespace bundle or default quota\n" + + "- set: Set the resource quota for a specified namespace bundle or default quota (requires super-user permissions)\n" + + "- reset: Reset a namespace bundle's resource quota to default value (requires super-user permissions)" + pulsarAdminResourceQuotasNamespaceDesc = "The namespace name in the format 'tenant/namespace'. " + + "Optional for 'get' and 'set' operations (to get/set default quota if omitted). " + + "Required for 'reset' operation." + pulsarAdminResourceQuotasBundleDesc = "The bundle range in the format '{start-boundary}_{end-boundary}'. " + + "Must be specified together with namespace. Bundle is a hash range of the topic names belonging to a namespace." + pulsarAdminResourceQuotasMsgRateInDesc = "Expected incoming messages per second. Required for 'set' operation. " + + "This defines the maximum rate of incoming messages allowed for the namespace or bundle." + pulsarAdminResourceQuotasMsgRateOutDesc = "Expected outgoing messages per second. Required for 'set' operation. " + + "This defines the maximum rate of outgoing messages allowed for the namespace or bundle." + pulsarAdminResourceQuotasBandwidthInDesc = "Expected inbound bandwidth in bytes per second. Required for 'set' operation. " + + "This defines the maximum rate of incoming bytes allowed for the namespace or bundle." + pulsarAdminResourceQuotasBandwidthOutDesc = "Expected outbound bandwidth in bytes per second. Required for 'set' operation. " + + "This defines the maximum rate of outgoing bytes allowed for the namespace or bundle." + pulsarAdminResourceQuotasMemoryDesc = "Expected memory usage in Mbytes. Required for 'set' operation. " + + "This defines the maximum memory allowed for storing messages for the namespace or bundle." + pulsarAdminResourceQuotasDynamicDesc = "Whether to allow quota to be dynamically re-calculated. Optional for 'set' operation. " + + "If true, the broker can dynamically adjust the quota based on the current usage patterns." +) + // PulsarAdminResourceQuotasToolBuilder implements the ToolBuilder interface for Pulsar admin resource quotas // /nolint:revive type PulsarAdminResourceQuotasToolBuilder struct { @@ -57,7 +96,7 @@ func NewPulsarAdminResourceQuotasToolBuilder() *PulsarAdminResourceQuotasToolBui } // BuildTools builds the Pulsar admin resource quotas tool list -func (b *PulsarAdminResourceQuotasToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { +func (b *PulsarAdminResourceQuotasToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]builders.ToolDefinition, error) { // Check features - return empty list if no required features are present if !b.HasAnyRequiredFeature(config.Features) { return nil, nil @@ -69,11 +108,14 @@ func (b *PulsarAdminResourceQuotasToolBuilder) BuildTools(_ context.Context, con } // Build tools - tool := b.buildResourceQuotasTool() + tool, err := b.buildResourceQuotasTool() + if err != nil { + return nil, err + } handler := b.buildResourceQuotasHandler(config.ReadOnly) - return []server.ServerTool{ - { + return []builders.ToolDefinition{ + builders.ServerTool[pulsarAdminResourceQuotasInput, any]{ Tool: tool, Handler: handler, }, @@ -81,129 +123,90 @@ func (b *PulsarAdminResourceQuotasToolBuilder) BuildTools(_ context.Context, con } // buildResourceQuotasTool builds the Pulsar admin resource quotas MCP tool definition -func (b *PulsarAdminResourceQuotasToolBuilder) buildResourceQuotasTool() mcp.Tool { +func (b *PulsarAdminResourceQuotasToolBuilder) buildResourceQuotasTool() (*sdk.Tool, error) { + inputSchema, err := buildPulsarAdminResourceQuotasInputSchema() + if err != nil { + return nil, err + } + toolDesc := "Manage Apache Pulsar resource quotas for brokers, namespaces and bundles. " + "Resource quotas define limits for resource usage such as message rates, bandwidth, and memory. " + "These quotas help prevent resource abuse and ensure fair resource allocation across the Pulsar cluster. " + "Operations include getting, setting, and resetting quotas. " + "Requires super-user permissions for all operations." - resourceDesc := "Resource to operate on. Available resources:\n" + - "- quota: The resource quota configuration for a specific namespace bundle or the default quota" - - operationDesc := "Operation to perform. Available operations:\n" + - "- get: Get the resource quota for a specified namespace bundle or default quota\n" + - "- set: Set the resource quota for a specified namespace bundle or default quota (requires super-user permissions)\n" + - "- reset: Reset a namespace bundle's resource quota to default value (requires super-user permissions)" - - return mcp.NewTool("pulsar_admin_resourcequota", - mcp.WithDescription(toolDesc), - mcp.WithString("resource", mcp.Required(), - mcp.Description(resourceDesc), - ), - mcp.WithString("operation", mcp.Required(), - mcp.Description(operationDesc), - ), - mcp.WithString("namespace", - mcp.Description("The namespace name in the format 'tenant/namespace'. "+ - "Optional for 'get' and 'set' operations (to get/set default quota if omitted). "+ - "Required for 'reset' operation."), - ), - mcp.WithString("bundle", - mcp.Description("The bundle range in the format '{start-boundary}_{end-boundary}'. "+ - "Must be specified together with namespace. Bundle is a hash range of the topic names belonging to a namespace."), - ), - mcp.WithNumber("msgRateIn", - mcp.Description("Expected incoming messages per second. Required for 'set' operation. "+ - "This defines the maximum rate of incoming messages allowed for the namespace or bundle."), - ), - mcp.WithNumber("msgRateOut", - mcp.Description("Expected outgoing messages per second. Required for 'set' operation. "+ - "This defines the maximum rate of outgoing messages allowed for the namespace or bundle."), - ), - mcp.WithNumber("bandwidthIn", - mcp.Description("Expected inbound bandwidth in bytes per second. Required for 'set' operation. "+ - "This defines the maximum rate of incoming bytes allowed for the namespace or bundle."), - ), - mcp.WithNumber("bandwidthOut", - mcp.Description("Expected outbound bandwidth in bytes per second. Required for 'set' operation. "+ - "This defines the maximum rate of outgoing bytes allowed for the namespace or bundle."), - ), - mcp.WithNumber("memory", - mcp.Description("Expected memory usage in Mbytes. Required for 'set' operation. "+ - "This defines the maximum memory allowed for storing messages for the namespace or bundle."), - ), - mcp.WithBoolean("dynamic", - mcp.Description("Whether to allow quota to be dynamically re-calculated. Optional for 'set' operation. "+ - "If true, the broker can dynamically adjust the quota based on the current usage patterns."), - ), - ) + return &sdk.Tool{ + Name: "pulsar_admin_resourcequota", + Description: toolDesc, + InputSchema: inputSchema, + }, nil } // buildResourceQuotasHandler builds the Pulsar admin resource quotas handler function -func (b *PulsarAdminResourceQuotasToolBuilder) buildResourceQuotasHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - // Get required parameters - resource, err := request.RequireString("resource") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get resource: %v", err)), nil +func (b *PulsarAdminResourceQuotasToolBuilder) buildResourceQuotasHandler(readOnly bool) builders.ToolHandlerFunc[pulsarAdminResourceQuotasInput, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input pulsarAdminResourceQuotasInput) (*sdk.CallToolResult, any, error) { + resource := strings.ToLower(input.Resource) + if resource == "" { + return nil, nil, fmt.Errorf("missing required parameter 'resource'; please specify: quota") } - operation, err := request.RequireString("operation") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get operation: %v", err)), nil + operation := strings.ToLower(input.Operation) + if operation == "" { + return nil, nil, fmt.Errorf("missing required parameter 'operation'; please specify: get, set, reset") } - // Normalize parameters - resource = strings.ToLower(resource) - operation = strings.ToLower(operation) - // Validate write operations in read-only mode if readOnly && (operation == "set" || operation == "reset") { - return mcp.NewToolResultError("Write operations are not allowed in read-only mode"), nil + return nil, nil, fmt.Errorf("write operations are not allowed in read-only mode") } // Verify resource type if resource != "quota" { - return mcp.NewToolResultError(fmt.Sprintf("Invalid resource: %s. Only 'quota' is supported", resource)), nil + return nil, nil, fmt.Errorf("invalid resource: %s. available resources: quota", resource) } // Get Pulsar session from context session := mcpCtx.GetPulsarSession(ctx) if session == nil { - return mcp.NewToolResultError("Pulsar session not found in context"), nil + return nil, nil, fmt.Errorf("pulsar session not found in context") } admin, err := session.GetAdminClient() if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get admin client: %v", err)), nil + return nil, nil, fmt.Errorf("failed to get admin client: %v", err) } // Dispatch based on operation switch operation { case "get": - return b.handleQuotaGet(admin, request) + result, handlerErr := b.handleQuotaGet(admin, input) + return result, nil, handlerErr case "set": - return b.handleQuotaSet(admin, request) + result, handlerErr := b.handleQuotaSet(admin, input) + return result, nil, handlerErr case "reset": - return b.handleQuotaReset(admin, request) + result, handlerErr := b.handleQuotaReset(admin, input) + return result, nil, handlerErr default: - return mcp.NewToolResultError(fmt.Sprintf("Invalid operation: %s. Available operations: get, set, reset", operation)), nil + return nil, nil, fmt.Errorf("invalid operation: %s. available operations: get, set, reset", operation) } } } -// Helper functions - // handleQuotaGet handles getting a resource quota -func (b *PulsarAdminResourceQuotasToolBuilder) handleQuotaGet(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - // Get optional parameters - namespace := request.GetString("namespace", "") - bundle := request.GetString("bundle", "") +func (b *PulsarAdminResourceQuotasToolBuilder) handleQuotaGet(admin cmdutils.Client, input pulsarAdminResourceQuotasInput) (*sdk.CallToolResult, error) { + namespace := "" + if input.Namespace != nil { + namespace = *input.Namespace + } + bundle := "" + if input.Bundle != nil { + bundle = *input.Bundle + } // Check if both namespace and bundle are provided or neither is provided if (namespace != "" && bundle == "") || (namespace == "" && bundle != "") { - return mcp.NewToolResultError("When specifying a namespace, you must also specify a bundle and vice versa."), nil + return nil, fmt.Errorf("when specifying a namespace, you must also specify a bundle and vice versa") } var ( @@ -215,66 +218,74 @@ func (b *PulsarAdminResourceQuotasToolBuilder) handleQuotaGet(admin cmdutils.Cli // Get default resource quota resourceQuotaData, getErr = admin.ResourceQuotas().GetDefaultResourceQuota() if getErr != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get default resource quota: %v", getErr)), nil + return nil, fmt.Errorf("failed to get default resource quota: %v", getErr) } } else { // Get namespace bundle resource quota nsName, getErr := utils.GetNamespaceName(namespace) if getErr != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid namespace name '%s': %v", namespace, getErr)), nil + return nil, fmt.Errorf("invalid namespace name '%s': %v", namespace, getErr) } resourceQuotaData, getErr = admin.ResourceQuotas().GetNamespaceBundleResourceQuota(nsName.String(), bundle) if getErr != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get resource quota for namespace '%s' bundle '%s': %v", - namespace, bundle, getErr)), nil + return nil, fmt.Errorf("failed to get resource quota for namespace '%s' bundle '%s': %v", + namespace, bundle, getErr) } } // Format the output jsonBytes, err := json.Marshal(resourceQuotaData) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to marshal resource quota data: %v", err)), nil + return nil, fmt.Errorf("failed to marshal resource quota data: %v", err) } - return mcp.NewToolResultText(string(jsonBytes)), nil + return textResult(string(jsonBytes)), nil } // handleQuotaSet handles setting a resource quota -func (b *PulsarAdminResourceQuotasToolBuilder) handleQuotaSet(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - // Get required parameters for set operation - msgRateIn, err := request.RequireFloat("msgRateIn") +func (b *PulsarAdminResourceQuotasToolBuilder) handleQuotaSet(admin cmdutils.Client, input pulsarAdminResourceQuotasInput) (*sdk.CallToolResult, error) { + msgRateIn, err := requireFloat(input.MsgRateIn, "msgRateIn") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'msgRateIn' for quota.set: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'msgRateIn' for quota.set: %v", err) } - msgRateOut, err := request.RequireFloat("msgRateOut") + msgRateOut, err := requireFloat(input.MsgRateOut, "msgRateOut") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'msgRateOut' for quota.set: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'msgRateOut' for quota.set: %v", err) } - bandwidthIn, err := request.RequireFloat("bandwidthIn") + bandwidthIn, err := requireFloat(input.BandwidthIn, "bandwidthIn") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'bandwidthIn' for quota.set: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'bandwidthIn' for quota.set: %v", err) } - bandwidthOut, err := request.RequireFloat("bandwidthOut") + bandwidthOut, err := requireFloat(input.BandwidthOut, "bandwidthOut") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'bandwidthOut' for quota.set: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'bandwidthOut' for quota.set: %v", err) } - memory, err := request.RequireFloat("memory") + memory, err := requireFloat(input.Memory, "memory") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'memory' for quota.set: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'memory' for quota.set: %v", err) } // Get optional parameters - namespace := request.GetString("namespace", "") - bundle := request.GetString("bundle", "") - dynamic := request.GetBool("dynamic", false) + namespace := "" + if input.Namespace != nil { + namespace = *input.Namespace + } + bundle := "" + if input.Bundle != nil { + bundle = *input.Bundle + } + dynamic := false + if input.Dynamic != nil { + dynamic = *input.Dynamic + } // Check if both namespace and bundle are provided or neither is provided if (namespace != "" && bundle == "") || (namespace == "" && bundle != "") { - return mcp.NewToolResultError("When specifying a namespace, you must also specify a bundle and vice versa."), nil + return nil, fmt.Errorf("when specifying a namespace, you must also specify a bundle and vice versa") } // Create resource quota object @@ -291,48 +302,81 @@ func (b *PulsarAdminResourceQuotasToolBuilder) handleQuotaSet(admin cmdutils.Cli // Set default resource quota err = admin.ResourceQuotas().SetDefaultResourceQuota(*quota) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to set default resource quota: %v", err)), nil + return nil, fmt.Errorf("failed to set default resource quota: %v", err) } resultMsg = "Default resource quota set successfully" } else { // Set namespace bundle resource quota err = admin.ResourceQuotas().SetNamespaceBundleResourceQuota(namespace, bundle, *quota) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to set resource quota for namespace '%s' bundle '%s': %v", - namespace, bundle, err)), nil + return nil, fmt.Errorf("failed to set resource quota for namespace '%s' bundle '%s': %v", + namespace, bundle, err) } resultMsg = fmt.Sprintf("Resource quota for namespace '%s' bundle '%s' set successfully", namespace, bundle) } - return mcp.NewToolResultText(resultMsg), nil + return textResult(resultMsg), nil } // handleQuotaReset handles resetting a resource quota -func (b *PulsarAdminResourceQuotasToolBuilder) handleQuotaReset(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - // Get required parameters for reset operation - namespace, err := request.RequireString("namespace") +func (b *PulsarAdminResourceQuotasToolBuilder) handleQuotaReset(admin cmdutils.Client, input pulsarAdminResourceQuotasInput) (*sdk.CallToolResult, error) { + namespace, err := requireString(input.Namespace, "namespace") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'namespace' for quota.reset: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'namespace' for quota.reset: %v", err) } - bundle, err := request.RequireString("bundle") + bundle, err := requireString(input.Bundle, "bundle") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'bundle' for quota.reset: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'bundle' for quota.reset: %v", err) } // Parse namespace name nsName, err := utils.GetNamespaceName(namespace) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid namespace name '%s': %v", namespace, err)), nil + return nil, fmt.Errorf("invalid namespace name '%s': %v", namespace, err) } // Reset namespace bundle resource quota err = admin.ResourceQuotas().ResetNamespaceBundleResourceQuota(nsName.String(), bundle) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to reset resource quota for namespace '%s' bundle '%s': %v", - namespace, bundle, err)), nil + return nil, fmt.Errorf("failed to reset resource quota for namespace '%s' bundle '%s': %v", + namespace, bundle, err) } - return mcp.NewToolResultText(fmt.Sprintf("Resource quota for namespace '%s' bundle '%s' reset to default successfully", + return textResult(fmt.Sprintf("Resource quota for namespace '%s' bundle '%s' reset to default successfully", namespace, bundle)), nil } + +func buildPulsarAdminResourceQuotasInputSchema() (*jsonschema.Schema, error) { + schema, err := jsonschema.For[pulsarAdminResourceQuotasInput](nil) + if err != nil { + return nil, fmt.Errorf("input schema: %w", err) + } + if schema.Type != "object" { + return nil, fmt.Errorf("input schema must have type \"object\"") + } + if schema.Properties == nil { + schema.Properties = map[string]*jsonschema.Schema{} + } + + setSchemaDescription(schema, "resource", pulsarAdminResourceQuotasResourceDesc) + setSchemaDescription(schema, "operation", pulsarAdminResourceQuotasOperationDesc) + setSchemaDescription(schema, "namespace", pulsarAdminResourceQuotasNamespaceDesc) + setSchemaDescription(schema, "bundle", pulsarAdminResourceQuotasBundleDesc) + setSchemaDescription(schema, "msgRateIn", pulsarAdminResourceQuotasMsgRateInDesc) + setSchemaDescription(schema, "msgRateOut", pulsarAdminResourceQuotasMsgRateOutDesc) + setSchemaDescription(schema, "bandwidthIn", pulsarAdminResourceQuotasBandwidthInDesc) + setSchemaDescription(schema, "bandwidthOut", pulsarAdminResourceQuotasBandwidthOutDesc) + setSchemaDescription(schema, "memory", pulsarAdminResourceQuotasMemoryDesc) + setSchemaDescription(schema, "dynamic", pulsarAdminResourceQuotasDynamicDesc) + + normalizeAdditionalProperties(schema) + return schema, nil +} + +func requireFloat(value *float64, key string) (float64, error) { + if value == nil { + return 0, fmt.Errorf("required argument %q not found", key) + } + return *value, nil +} diff --git a/pkg/mcp/builders/pulsar/resourcequotas_legacy.go b/pkg/mcp/builders/pulsar/resourcequotas_legacy.go new file mode 100644 index 0000000..2f9f65a --- /dev/null +++ b/pkg/mcp/builders/pulsar/resourcequotas_legacy.go @@ -0,0 +1,119 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" +) + +// PulsarAdminResourceQuotasLegacyToolBuilder implements the legacy ToolBuilder interface for Pulsar admin resource quotas. +// /nolint:revive +type PulsarAdminResourceQuotasLegacyToolBuilder struct { + *builders.BaseToolBuilder +} + +// NewPulsarAdminResourceQuotasLegacyToolBuilder creates a new Pulsar admin resource quotas legacy tool builder instance. +func NewPulsarAdminResourceQuotasLegacyToolBuilder() *PulsarAdminResourceQuotasLegacyToolBuilder { + metadata := builders.ToolMetadata{ + Name: "pulsar_admin_resourcequotas", + Version: "1.0.0", + Description: "Pulsar admin resource quotas management tools", + Category: "pulsar_admin", + Tags: []string{"pulsar", "admin", "resourcequotas"}, + } + + features := []string{ + "pulsar-admin-resourcequotas", + "all", + "all-pulsar", + "pulsar-admin", + } + + return &PulsarAdminResourceQuotasLegacyToolBuilder{ + BaseToolBuilder: builders.NewBaseToolBuilder(metadata, features), + } +} + +// BuildTools builds the Pulsar admin resource quotas legacy tool list. +func (b *PulsarAdminResourceQuotasLegacyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { + if !b.HasAnyRequiredFeature(config.Features) { + return nil, nil + } + + if err := b.Validate(config); err != nil { + return nil, err + } + + tool, err := b.buildResourceQuotasTool() + if err != nil { + return nil, err + } + handler := b.buildResourceQuotasHandler(config.ReadOnly) + + return []server.ServerTool{ + { + Tool: tool, + Handler: handler, + }, + }, nil +} + +func (b *PulsarAdminResourceQuotasLegacyToolBuilder) buildResourceQuotasTool() (mcp.Tool, error) { + inputSchema, err := buildPulsarAdminResourceQuotasInputSchema() + if err != nil { + return mcp.Tool{}, err + } + + schemaJSON, err := json.Marshal(inputSchema) + if err != nil { + return mcp.Tool{}, fmt.Errorf("marshal input schema: %w", err) + } + + toolDesc := "Manage Apache Pulsar resource quotas for brokers, namespaces and bundles. " + + "Resource quotas define limits for resource usage such as message rates, bandwidth, and memory. " + + "These quotas help prevent resource abuse and ensure fair resource allocation across the Pulsar cluster. " + + "Operations include getting, setting, and resetting quotas. " + + "Requires super-user permissions for all operations." + + return mcp.Tool{ + Name: "pulsar_admin_resourcequota", + Description: toolDesc, + RawInputSchema: schemaJSON, + }, nil +} + +func (b *PulsarAdminResourceQuotasLegacyToolBuilder) buildResourceQuotasHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { + sdkBuilder := NewPulsarAdminResourceQuotasToolBuilder() + sdkHandler := sdkBuilder.buildResourceQuotasHandler(readOnly) + + return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + var input pulsarAdminResourceQuotasInput + if err := request.BindArguments(&input); err != nil { + return mcp.NewToolResultError(fmt.Sprintf("failed to parse arguments: %v", err)), nil + } + + result, _, err := sdkHandler(ctx, nil, input) + if err != nil { + return mcp.NewToolResultError(err.Error()), nil + } + return legacyToolResultFromSDK(result), nil + } +} diff --git a/pkg/mcp/builders/pulsar/resourcequotas_test.go b/pkg/mcp/builders/pulsar/resourcequotas_test.go new file mode 100644 index 0000000..8d5e906 --- /dev/null +++ b/pkg/mcp/builders/pulsar/resourcequotas_test.go @@ -0,0 +1,172 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "testing" + + "github.com/google/jsonschema-go/jsonschema" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPulsarAdminResourceQuotasToolBuilder(t *testing.T) { + builder := NewPulsarAdminResourceQuotasToolBuilder() + + t.Run("GetName", func(t *testing.T) { + assert.Equal(t, "pulsar_admin_resourcequotas", builder.GetName()) + }) + + t.Run("GetRequiredFeatures", func(t *testing.T) { + features := builder.GetRequiredFeatures() + assert.NotEmpty(t, features) + assert.Contains(t, features, "pulsar-admin-resourcequotas") + }) + + t.Run("BuildTools_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"pulsar-admin-resourcequotas"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "pulsar_admin_resourcequota", tools[0].Definition().Name) + }) + + t.Run("BuildTools_ReadOnly", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: true, + Features: []string{"pulsar-admin-resourcequotas"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + }) + + t.Run("BuildTools_NoFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"unrelated-feature"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 0) + }) + + t.Run("Validate_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"pulsar-admin-resourcequotas"}, + } + + err := builder.Validate(config) + assert.NoError(t, err) + }) + + t.Run("Validate_MissingFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"unrelated-feature"}, + } + + err := builder.Validate(config) + assert.Error(t, err) + }) +} + +func TestPulsarAdminResourceQuotasToolSchema(t *testing.T) { + builder := NewPulsarAdminResourceQuotasToolBuilder() + tool, err := builder.buildResourceQuotasTool() + require.NoError(t, err) + assert.Equal(t, "pulsar_admin_resourcequota", tool.Name) + + schema, ok := tool.InputSchema.(*jsonschema.Schema) + require.True(t, ok) + require.NotNil(t, schema.Properties) + + expectedRequired := []string{"resource", "operation"} + assert.ElementsMatch(t, expectedRequired, schema.Required) + + expectedProps := []string{ + "resource", + "operation", + "namespace", + "bundle", + "msgRateIn", + "msgRateOut", + "bandwidthIn", + "bandwidthOut", + "memory", + "dynamic", + } + assert.ElementsMatch(t, expectedProps, mapStringKeys(schema.Properties)) + + resourceSchema := schema.Properties["resource"] + require.NotNil(t, resourceSchema) + assert.Equal(t, pulsarAdminResourceQuotasResourceDesc, resourceSchema.Description) + + operationSchema := schema.Properties["operation"] + require.NotNil(t, operationSchema) + assert.Equal(t, pulsarAdminResourceQuotasOperationDesc, operationSchema.Description) + + namespaceSchema := schema.Properties["namespace"] + require.NotNil(t, namespaceSchema) + assert.Equal(t, pulsarAdminResourceQuotasNamespaceDesc, namespaceSchema.Description) + + bundleSchema := schema.Properties["bundle"] + require.NotNil(t, bundleSchema) + assert.Equal(t, pulsarAdminResourceQuotasBundleDesc, bundleSchema.Description) + + msgRateInSchema := schema.Properties["msgRateIn"] + require.NotNil(t, msgRateInSchema) + assert.Equal(t, pulsarAdminResourceQuotasMsgRateInDesc, msgRateInSchema.Description) + + msgRateOutSchema := schema.Properties["msgRateOut"] + require.NotNil(t, msgRateOutSchema) + assert.Equal(t, pulsarAdminResourceQuotasMsgRateOutDesc, msgRateOutSchema.Description) + + bandwidthInSchema := schema.Properties["bandwidthIn"] + require.NotNil(t, bandwidthInSchema) + assert.Equal(t, pulsarAdminResourceQuotasBandwidthInDesc, bandwidthInSchema.Description) + + bandwidthOutSchema := schema.Properties["bandwidthOut"] + require.NotNil(t, bandwidthOutSchema) + assert.Equal(t, pulsarAdminResourceQuotasBandwidthOutDesc, bandwidthOutSchema.Description) + + memorySchema := schema.Properties["memory"] + require.NotNil(t, memorySchema) + assert.Equal(t, pulsarAdminResourceQuotasMemoryDesc, memorySchema.Description) + + dynamicSchema := schema.Properties["dynamic"] + require.NotNil(t, dynamicSchema) + assert.Equal(t, pulsarAdminResourceQuotasDynamicDesc, dynamicSchema.Description) +} + +func TestPulsarAdminResourceQuotasToolBuilder_ReadOnlyRejectsWrite(t *testing.T) { + builder := NewPulsarAdminResourceQuotasToolBuilder() + handler := builder.buildResourceQuotasHandler(true) + + _, _, err := handler(context.Background(), nil, pulsarAdminResourceQuotasInput{ + Resource: "quota", + Operation: "set", + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "read-only") +} diff --git a/pkg/mcp/builders/pulsar/schema.go b/pkg/mcp/builders/pulsar/schema.go index 13fc41a..d93a9a9 100644 --- a/pkg/mcp/builders/pulsar/schema.go +++ b/pkg/mcp/builders/pulsar/schema.go @@ -24,13 +24,40 @@ import ( "strings" "github.com/apache/pulsar-client-go/pulsaradmin/pkg/utils" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + "github.com/google/jsonschema-go/jsonschema" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/pulsarctl/pkg/cmdutils" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" ) +type pulsarAdminSchemaInput struct { + Resource string `json:"resource"` + Operation string `json:"operation"` + Topic string `json:"topic"` + Version *float64 `json:"version,omitempty"` + Filename *string `json:"filename,omitempty"` +} + +const ( + pulsarAdminSchemaResourceDesc = "Resource to operate on. Available resources:\n" + + "- schema: The schema configuration for a specific topic" + pulsarAdminSchemaOperationDesc = "Operation to perform. Available operations:\n" + + "- get: Get the schema for a topic (optionally by version)\n" + + "- upload: Upload a new schema for a topic (requires namespace admin permissions)\n" + + "- delete: Delete the schema for a topic (requires namespace admin permissions)" + pulsarAdminSchemaTopicDesc = "The fully qualified topic name in the format 'persistent://tenant/namespace/topic'. " + + "A schema is always associated with a specific topic. The schema will be enforced for all producers " + + "and consumers of this topic." + pulsarAdminSchemaVersionDesc = "The schema version (optional for 'get' operation). " + + "Pulsar maintains a versioned history of schemas. If not specified, the latest schema version will be returned. " + + "Use this parameter to retrieve a specific historical version of the schema." + pulsarAdminSchemaFilenameDesc = "The file path of the schema definition (required for 'upload' operation). " + + "The file should contain a JSON object with 'type', 'schema', and optionally 'properties' fields. " + + "Supported schema types include: AVRO, JSON, PROTOBUF, PROTOBUF_NATIVE, KEY_VALUE, BYTES, STRING, " + + "INT8, INT16, INT32, INT64, FLOAT, DOUBLE, BOOLEAN, NONE." +) + // PulsarAdminSchemaToolBuilder implements the ToolBuilder interface for Pulsar Admin Schema tools // It provides functionality to build Pulsar schema management tools // /nolint:revive @@ -62,7 +89,7 @@ func NewPulsarAdminSchemaToolBuilder() *PulsarAdminSchemaToolBuilder { // BuildTools builds the Pulsar Admin Schema tool list // This is the core method implementing the ToolBuilder interface -func (b *PulsarAdminSchemaToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { +func (b *PulsarAdminSchemaToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]builders.ToolDefinition, error) { // Check features - return empty list if no required features are present if !b.HasAnyRequiredFeature(config.Features) { return nil, nil @@ -74,11 +101,14 @@ func (b *PulsarAdminSchemaToolBuilder) BuildTools(_ context.Context, config buil } // Build tools - tool := b.buildSchemaTool() + tool, err := b.buildSchemaTool() + if err != nil { + return nil, err + } handler := b.buildSchemaHandler(config.ReadOnly) - return []server.ServerTool{ - { + return []builders.ToolDefinition{ + builders.ServerTool[pulsarAdminSchemaInput, any]{ Tool: tool, Handler: handler, }, @@ -87,7 +117,12 @@ func (b *PulsarAdminSchemaToolBuilder) BuildTools(_ context.Context, config buil // buildSchemaTool builds the Pulsar Admin Schema MCP tool definition // Migrated from the original tool definition logic -func (b *PulsarAdminSchemaToolBuilder) buildSchemaTool() mcp.Tool { +func (b *PulsarAdminSchemaToolBuilder) buildSchemaTool() (*sdk.Tool, error) { + inputSchema, err := buildPulsarAdminSchemaInputSchema() + if err != nil { + return nil, err + } + toolDesc := "Manage Apache Pulsar schemas for topics. " + "Schemas in Pulsar define the structure of message data, enabling data validation, evolution, and interoperability. " + "Pulsar supports multiple schema types including AVRO, JSON, PROTOBUF, etc., allowing strong typing of message content. " + @@ -95,96 +130,67 @@ func (b *PulsarAdminSchemaToolBuilder) buildSchemaTool() mcp.Tool { "Operations include getting, uploading, and deleting schemas. " + "Requires namespace admin permissions for all operations." - resourceDesc := "Resource to operate on. Available resources:\n" + - "- schema: The schema configuration for a specific topic" - - operationDesc := "Operation to perform. Available operations:\n" + - "- get: Get the schema for a topic (optionally by version)\n" + - "- upload: Upload a new schema for a topic (requires namespace admin permissions)\n" + - "- delete: Delete the schema for a topic (requires namespace admin permissions)" - - return mcp.NewTool("pulsar_admin_schema", - mcp.WithDescription(toolDesc), - mcp.WithString("resource", mcp.Required(), - mcp.Description(resourceDesc), - ), - mcp.WithString("operation", mcp.Required(), - mcp.Description(operationDesc), - ), - mcp.WithString("topic", mcp.Required(), - mcp.Description("The fully qualified topic name in the format 'persistent://tenant/namespace/topic'. "+ - "A schema is always associated with a specific topic. The schema will be enforced for all producers "+ - "and consumers of this topic."), - ), - mcp.WithNumber("version", - mcp.Description("The schema version (optional for 'get' operation). "+ - "Pulsar maintains a versioned history of schemas. If not specified, the latest schema version will be returned. "+ - "Use this parameter to retrieve a specific historical version of the schema."), - ), - mcp.WithString("filename", - mcp.Description("The file path of the schema definition (required for 'upload' operation). "+ - "The file should contain a JSON object with 'type', 'schema', and optionally 'properties' fields. "+ - "Supported schema types include: AVRO, JSON, PROTOBUF, PROTOBUF_NATIVE, KEY_VALUE, BYTES, STRING, INT8, INT16, INT32, INT64, FLOAT, DOUBLE, BOOLEAN, NONE."), - ), - ) + return &sdk.Tool{ + Name: "pulsar_admin_schema", + Description: toolDesc, + InputSchema: inputSchema, + }, nil } // buildSchemaHandler builds the Pulsar Admin Schema handler function // Migrated from the original handler logic -func (b *PulsarAdminSchemaToolBuilder) buildSchemaHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - // Get required parameters - resource, err := request.RequireString("resource") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get resource: %v", err)), nil +func (b *PulsarAdminSchemaToolBuilder) buildSchemaHandler(readOnly bool) builders.ToolHandlerFunc[pulsarAdminSchemaInput, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input pulsarAdminSchemaInput) (*sdk.CallToolResult, any, error) { + resource := strings.ToLower(input.Resource) + if resource == "" { + return nil, nil, fmt.Errorf("missing required parameter 'resource'") } - operation, err := request.RequireString("operation") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get operation: %v", err)), nil + operation := strings.ToLower(input.Operation) + if operation == "" { + return nil, nil, fmt.Errorf("missing required parameter 'operation'") } - topic, err := request.RequireString("topic") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic'. Please provide the fully qualified topic name: %v", err)), nil + topic := input.Topic + if topic == "" { + return nil, nil, fmt.Errorf("missing required parameter 'topic'. please provide the fully qualified topic name") } - // Normalize parameters - resource = strings.ToLower(resource) - operation = strings.ToLower(operation) - // Validate write operations in read-only mode if readOnly && (operation == "upload" || operation == "delete") { - return mcp.NewToolResultError("Write operations are not allowed in read-only mode"), nil + return nil, nil, fmt.Errorf("write operations are not allowed in read-only mode") } // Verify resource type if resource != "schema" { - return mcp.NewToolResultError(fmt.Sprintf("Invalid resource: %s. Only 'schema' is supported", resource)), nil + return nil, nil, fmt.Errorf("invalid resource: %s. only 'schema' is supported", resource) } // Get Pulsar session from context session := mcpCtx.GetPulsarSession(ctx) if session == nil { - return mcp.NewToolResultError("Pulsar session not found in context"), nil + return nil, nil, fmt.Errorf("pulsar session not found in context") } // Create the admin client admin, err := session.GetAdminClient() if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get admin client: %v", err)), nil + return nil, nil, fmt.Errorf("failed to get admin client: %v", err) } // Dispatch based on operation switch operation { case "get": - return b.handleSchemaGet(admin, topic, request) + result, err := b.handleSchemaGet(admin, topic, input) + return result, nil, err case "upload": - return b.handleSchemaUpload(admin, topic, request) + result, err := b.handleSchemaUpload(admin, topic, input) + return result, nil, err case "delete": - return b.handleSchemaDelete(admin, topic) + result, err := b.handleSchemaDelete(admin, topic) + return result, nil, err default: - return mcp.NewToolResultError(fmt.Sprintf("Unknown operation: %s", operation)), nil + return nil, nil, fmt.Errorf("unknown operation: %s", operation) } } } @@ -201,97 +207,121 @@ func (b *PulsarAdminSchemaToolBuilder) prettyPrint(data []byte) ([]byte, error) // Operation handler functions - migrated from the original implementation // handleSchemaGet handles getting a schema -func (b *PulsarAdminSchemaToolBuilder) handleSchemaGet(admin cmdutils.Client, topic string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminSchemaToolBuilder) handleSchemaGet(admin cmdutils.Client, topic string, input pulsarAdminSchemaInput) (*sdk.CallToolResult, error) { // Get optional version parameter - version := request.GetFloat("version", 0) + var version float64 + if input.Version != nil { + version = *input.Version + } // Get schema info if version != 0 { // Get schema by version info, err := admin.Schemas().GetSchemaInfoByVersion(topic, int64(version)) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get schema version %v for topic '%s': %v", - version, topic, err)), nil + return nil, fmt.Errorf("failed to get schema version %v for topic '%s': %v", version, topic, err) } jsonBytes, err := json.Marshal(info) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to process schema information: %v", err)), nil + return nil, fmt.Errorf("failed to process schema information: %v", err) } - return mcp.NewToolResultText(string(jsonBytes)), nil + return textResult(string(jsonBytes)), nil } // Get latest schema schemaInfoWithVersion, err := admin.Schemas().GetSchemaInfoWithVersion(topic) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get latest schema for topic '%s': %v", - topic, err)), nil + return nil, fmt.Errorf("failed to get latest schema for topic '%s': %v", topic, err) } // Format the output var output bytes.Buffer name, err := json.Marshal(schemaInfoWithVersion.SchemaInfo.Name) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to process schema name: %v", err)), nil + return nil, fmt.Errorf("failed to process schema name: %v", err) } schemaType, err := json.Marshal(schemaInfoWithVersion.SchemaInfo.Type) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to process schema type: %v", err)), nil + return nil, fmt.Errorf("failed to process schema type: %v", err) } properties, err := json.Marshal(schemaInfoWithVersion.SchemaInfo.Properties) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to process schema properties: %v", err)), nil + return nil, fmt.Errorf("failed to process schema properties: %v", err) } schema, err := b.prettyPrint(schemaInfoWithVersion.SchemaInfo.Schema) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to format schema definition: %v", err)), nil + return nil, fmt.Errorf("failed to format schema definition: %v", err) } fmt.Fprintf(&output, "{\n name: %s \n schema: %s\n type: %s \n properties: %s\n version: %d\n}", string(name), string(schema), string(schemaType), string(properties), schemaInfoWithVersion.Version) - return mcp.NewToolResultText(output.String()), nil + return textResult(output.String()), nil } // handleSchemaUpload handles uploading a schema -func (b *PulsarAdminSchemaToolBuilder) handleSchemaUpload(admin cmdutils.Client, topic string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - filename, err := request.RequireString("filename") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'filename' for schema.upload. Please provide the path to the schema definition file: %v", err)), nil +func (b *PulsarAdminSchemaToolBuilder) handleSchemaUpload(admin cmdutils.Client, topic string, input pulsarAdminSchemaInput) (*sdk.CallToolResult, error) { + if input.Filename == nil || *input.Filename == "" { + return nil, fmt.Errorf("missing required parameter 'filename' for schema.upload. please provide the path to the schema definition file") } + filename := *input.Filename // Read and parse the schema file var payload utils.PostSchemaPayload file, err := os.ReadFile(filepath.Clean(filename)) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to read schema file '%s': %v", filename, err)), nil + return nil, fmt.Errorf("failed to read schema file '%s': %v", filename, err) } err = json.Unmarshal(file, &payload) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to parse schema file '%s'. The file must contain valid JSON with 'type', 'schema', and optionally 'properties' fields: %v", - filename, err)), nil + return nil, fmt.Errorf("failed to parse schema file '%s'. the file must contain valid JSON with 'type', 'schema', and optionally 'properties' fields: %v", + filename, err) } // Upload the schema err = admin.Schemas().CreateSchemaByPayload(topic, payload) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to upload schema for topic '%s': %v", topic, err)), nil + return nil, fmt.Errorf("failed to upload schema for topic '%s': %v", topic, err) } - return mcp.NewToolResultText(fmt.Sprintf("Schema uploaded successfully for topic '%s'", topic)), nil + return textResult(fmt.Sprintf("Schema uploaded successfully for topic '%s'", topic)), nil } // handleSchemaDelete handles deleting a schema -func (b *PulsarAdminSchemaToolBuilder) handleSchemaDelete(admin cmdutils.Client, topic string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminSchemaToolBuilder) handleSchemaDelete(admin cmdutils.Client, topic string) (*sdk.CallToolResult, error) { // Delete the schema err := admin.Schemas().DeleteSchema(topic) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to delete schema for topic '%s': %v", topic, err)), nil + return nil, fmt.Errorf("failed to delete schema for topic '%s': %v", topic, err) } - return mcp.NewToolResultText(fmt.Sprintf("Schema deleted successfully for topic '%s'", topic)), nil + return textResult(fmt.Sprintf("Schema deleted successfully for topic '%s'", topic)), nil +} + +func buildPulsarAdminSchemaInputSchema() (*jsonschema.Schema, error) { + schema, err := jsonschema.For[pulsarAdminSchemaInput](nil) + if err != nil { + return nil, fmt.Errorf("input schema: %w", err) + } + if schema.Type != "object" { + return nil, fmt.Errorf("input schema must have type \"object\"") + } + + if schema.Properties == nil { + schema.Properties = map[string]*jsonschema.Schema{} + } + + setSchemaDescription(schema, "resource", pulsarAdminSchemaResourceDesc) + setSchemaDescription(schema, "operation", pulsarAdminSchemaOperationDesc) + setSchemaDescription(schema, "topic", pulsarAdminSchemaTopicDesc) + setSchemaDescription(schema, "version", pulsarAdminSchemaVersionDesc) + setSchemaDescription(schema, "filename", pulsarAdminSchemaFilenameDesc) + + normalizeAdditionalProperties(schema) + return schema, nil } diff --git a/pkg/mcp/builders/pulsar/schema_legacy.go b/pkg/mcp/builders/pulsar/schema_legacy.go new file mode 100644 index 0000000..4a111d2 --- /dev/null +++ b/pkg/mcp/builders/pulsar/schema_legacy.go @@ -0,0 +1,296 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/apache/pulsar-client-go/pulsaradmin/pkg/utils" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/pulsarctl/pkg/cmdutils" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" +) + +// PulsarAdminSchemaLegacyToolBuilder implements the legacy ToolBuilder interface for Pulsar Admin Schema tools. +// It provides functionality to build Pulsar schema management tools for the legacy server. +// /nolint:revive +type PulsarAdminSchemaLegacyToolBuilder struct { + *builders.BaseToolBuilder +} + +// NewPulsarAdminSchemaLegacyToolBuilder creates a new Pulsar Admin Schema legacy tool builder instance. +func NewPulsarAdminSchemaLegacyToolBuilder() *PulsarAdminSchemaLegacyToolBuilder { + metadata := builders.ToolMetadata{ + Name: "pulsar_admin_schema", + Version: "1.0.0", + Description: "Pulsar Admin schema management tools", + Category: "pulsar_admin", + Tags: []string{"pulsar", "schema", "admin"}, + } + + features := []string{ + "pulsar-admin-schemas", + "all", + "all-pulsar", + "pulsar-admin", + } + + return &PulsarAdminSchemaLegacyToolBuilder{ + BaseToolBuilder: builders.NewBaseToolBuilder(metadata, features), + } +} + +// BuildTools builds the Pulsar Admin Schema tool list for the legacy server. +func (b *PulsarAdminSchemaLegacyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { + // Check features - return empty list if no required features are present + if !b.HasAnyRequiredFeature(config.Features) { + return nil, nil + } + + // Validate configuration (only validate when matching features are present) + if err := b.Validate(config); err != nil { + return nil, err + } + + // Build tools + tool := b.buildSchemaTool() + handler := b.buildSchemaHandler(config.ReadOnly) + + return []server.ServerTool{ + { + Tool: tool, + Handler: handler, + }, + }, nil +} + +// buildSchemaTool builds the Pulsar Admin Schema MCP tool definition +// Migrated from the original tool definition logic +func (b *PulsarAdminSchemaLegacyToolBuilder) buildSchemaTool() mcp.Tool { + toolDesc := "Manage Apache Pulsar schemas for topics. " + + "Schemas in Pulsar define the structure of message data, enabling data validation, evolution, and interoperability. " + + "Pulsar supports multiple schema types including AVRO, JSON, PROTOBUF, etc., allowing strong typing of message content. " + + "Schema versioning ensures backward/forward compatibility as data structures evolve over time. " + + "Operations include getting, uploading, and deleting schemas. " + + "Requires namespace admin permissions for all operations." + + resourceDesc := "Resource to operate on. Available resources:\n" + + "- schema: The schema configuration for a specific topic" + + operationDesc := "Operation to perform. Available operations:\n" + + "- get: Get the schema for a topic (optionally by version)\n" + + "- upload: Upload a new schema for a topic (requires namespace admin permissions)\n" + + "- delete: Delete the schema for a topic (requires namespace admin permissions)" + + return mcp.NewTool("pulsar_admin_schema", + mcp.WithDescription(toolDesc), + mcp.WithString("resource", mcp.Required(), + mcp.Description(resourceDesc), + ), + mcp.WithString("operation", mcp.Required(), + mcp.Description(operationDesc), + ), + mcp.WithString("topic", mcp.Required(), + mcp.Description("The fully qualified topic name in the format 'persistent://tenant/namespace/topic'. "+ + "A schema is always associated with a specific topic. The schema will be enforced for all producers "+ + "and consumers of this topic."), + ), + mcp.WithNumber("version", + mcp.Description("The schema version (optional for 'get' operation). "+ + "Pulsar maintains a versioned history of schemas. If not specified, the latest schema version will be returned. "+ + "Use this parameter to retrieve a specific historical version of the schema."), + ), + mcp.WithString("filename", + mcp.Description("The file path of the schema definition (required for 'upload' operation). "+ + "The file should contain a JSON object with 'type', 'schema', and optionally 'properties' fields. "+ + "Supported schema types include: AVRO, JSON, PROTOBUF, PROTOBUF_NATIVE, KEY_VALUE, BYTES, STRING, INT8, INT16, INT32, INT64, FLOAT, DOUBLE, BOOLEAN, NONE."), + ), + ) +} + +// buildSchemaHandler builds the Pulsar Admin Schema handler function +// Migrated from the original handler logic +func (b *PulsarAdminSchemaLegacyToolBuilder) buildSchemaHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Get required parameters + resource, err := request.RequireString("resource") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get resource: %v", err)), nil + } + + operation, err := request.RequireString("operation") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get operation: %v", err)), nil + } + + topic, err := request.RequireString("topic") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic'. Please provide the fully qualified topic name: %v", err)), nil + } + + // Normalize parameters + resource = strings.ToLower(resource) + operation = strings.ToLower(operation) + + // Validate write operations in read-only mode + if readOnly && (operation == "upload" || operation == "delete") { + return mcp.NewToolResultError("Write operations are not allowed in read-only mode"), nil + } + + // Verify resource type + if resource != "schema" { + return mcp.NewToolResultError(fmt.Sprintf("Invalid resource: %s. Only 'schema' is supported", resource)), nil + } + + // Get Pulsar session from context + session := mcpCtx.GetPulsarSession(ctx) + if session == nil { + return mcp.NewToolResultError("Pulsar session not found in context"), nil + } + + // Create the admin client + admin, err := session.GetAdminClient() + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get admin client: %v", err)), nil + } + + // Dispatch based on operation + switch operation { + case "get": + return b.handleSchemaGet(admin, topic, request) + case "upload": + return b.handleSchemaUpload(admin, topic, request) + case "delete": + return b.handleSchemaDelete(admin, topic) + default: + return mcp.NewToolResultError(fmt.Sprintf("Unknown operation: %s", operation)), nil + } + } +} + +// Unified error handling and utility functions + +// prettyPrint formats JSON bytes with indentation +func (b *PulsarAdminSchemaLegacyToolBuilder) prettyPrint(data []byte) ([]byte, error) { + var out bytes.Buffer + err := json.Indent(&out, data, "", " ") + return out.Bytes(), err +} + +// Operation handler functions - migrated from the original implementation + +// handleSchemaGet handles getting a schema +func (b *PulsarAdminSchemaLegacyToolBuilder) handleSchemaGet(admin cmdutils.Client, topic string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Get optional version parameter + version := request.GetFloat("version", 0) + + // Get schema info + if version != 0 { + // Get schema by version + info, err := admin.Schemas().GetSchemaInfoByVersion(topic, int64(version)) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get schema version %v for topic '%s': %v", + version, topic, err)), nil + } + + jsonBytes, err := json.Marshal(info) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to process schema information: %v", err)), nil + } + + return mcp.NewToolResultText(string(jsonBytes)), nil + } + // Get latest schema + schemaInfoWithVersion, err := admin.Schemas().GetSchemaInfoWithVersion(topic) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get latest schema for topic '%s': %v", + topic, err)), nil + } + + // Format the output + var output bytes.Buffer + name, err := json.Marshal(schemaInfoWithVersion.SchemaInfo.Name) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to process schema name: %v", err)), nil + } + + schemaType, err := json.Marshal(schemaInfoWithVersion.SchemaInfo.Type) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to process schema type: %v", err)), nil + } + + properties, err := json.Marshal(schemaInfoWithVersion.SchemaInfo.Properties) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to process schema properties: %v", err)), nil + } + + schema, err := b.prettyPrint(schemaInfoWithVersion.SchemaInfo.Schema) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to format schema definition: %v", err)), nil + } + + fmt.Fprintf(&output, "{\n name: %s \n schema: %s\n type: %s \n properties: %s\n version: %d\n}", + string(name), string(schema), string(schemaType), string(properties), schemaInfoWithVersion.Version) + + return mcp.NewToolResultText(output.String()), nil +} + +// handleSchemaUpload handles uploading a schema +func (b *PulsarAdminSchemaLegacyToolBuilder) handleSchemaUpload(admin cmdutils.Client, topic string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + filename, err := request.RequireString("filename") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'filename' for schema.upload. Please provide the path to the schema definition file: %v", err)), nil + } + + // Read and parse the schema file + var payload utils.PostSchemaPayload + file, err := os.ReadFile(filepath.Clean(filename)) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to read schema file '%s': %v", filename, err)), nil + } + + err = json.Unmarshal(file, &payload) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to parse schema file '%s'. The file must contain valid JSON with 'type', 'schema', and optionally 'properties' fields: %v", + filename, err)), nil + } + + // Upload the schema + err = admin.Schemas().CreateSchemaByPayload(topic, payload) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to upload schema for topic '%s': %v", topic, err)), nil + } + + return mcp.NewToolResultText(fmt.Sprintf("Schema uploaded successfully for topic '%s'", topic)), nil +} + +// handleSchemaDelete handles deleting a schema +func (b *PulsarAdminSchemaLegacyToolBuilder) handleSchemaDelete(admin cmdutils.Client, topic string) (*mcp.CallToolResult, error) { + // Delete the schema + err := admin.Schemas().DeleteSchema(topic) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to delete schema for topic '%s': %v", topic, err)), nil + } + + return mcp.NewToolResultText(fmt.Sprintf("Schema deleted successfully for topic '%s'", topic)), nil +} diff --git a/pkg/mcp/builders/pulsar/schema_test.go b/pkg/mcp/builders/pulsar/schema_test.go new file mode 100644 index 0000000..c7f49dd --- /dev/null +++ b/pkg/mcp/builders/pulsar/schema_test.go @@ -0,0 +1,143 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "testing" + + "github.com/google/jsonschema-go/jsonschema" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPulsarAdminSchemaToolBuilder(t *testing.T) { + builder := NewPulsarAdminSchemaToolBuilder() + + t.Run("GetName", func(t *testing.T) { + assert.Equal(t, "pulsar_admin_schema", builder.GetName()) + }) + + t.Run("GetRequiredFeatures", func(t *testing.T) { + features := builder.GetRequiredFeatures() + assert.NotEmpty(t, features) + assert.Contains(t, features, "pulsar-admin-schemas") + }) + + t.Run("BuildTools_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"pulsar-admin-schemas"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + require.Len(t, tools, 1) + assert.Equal(t, "pulsar_admin_schema", tools[0].Definition().Name) + }) + + t.Run("BuildTools_ReadOnly", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: true, + Features: []string{"pulsar-admin-schemas"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + require.Len(t, tools, 1) + assert.Equal(t, "pulsar_admin_schema", tools[0].Definition().Name) + }) + + t.Run("BuildTools_NoFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"unrelated-feature"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 0) + }) + + t.Run("Validate_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"pulsar-admin-schemas"}, + } + + err := builder.Validate(config) + assert.NoError(t, err) + }) + + t.Run("Validate_MissingFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"unrelated-feature"}, + } + + err := builder.Validate(config) + assert.Error(t, err) + }) +} + +func TestPulsarAdminSchemaToolSchema(t *testing.T) { + builder := NewPulsarAdminSchemaToolBuilder() + tool, err := builder.buildSchemaTool() + require.NoError(t, err) + assert.Equal(t, "pulsar_admin_schema", tool.Name) + + schema, ok := tool.InputSchema.(*jsonschema.Schema) + require.True(t, ok) + require.NotNil(t, schema.Properties) + + expectedRequired := []string{"resource", "operation", "topic"} + assert.ElementsMatch(t, expectedRequired, schema.Required) + + expectedProps := []string{"resource", "operation", "topic", "version", "filename"} + assert.ElementsMatch(t, expectedProps, mapStringKeys(schema.Properties)) + + resourceSchema := schema.Properties["resource"] + require.NotNil(t, resourceSchema) + assert.Equal(t, pulsarAdminSchemaResourceDesc, resourceSchema.Description) + + operationSchema := schema.Properties["operation"] + require.NotNil(t, operationSchema) + assert.Equal(t, pulsarAdminSchemaOperationDesc, operationSchema.Description) + + topicSchema := schema.Properties["topic"] + require.NotNil(t, topicSchema) + assert.Equal(t, pulsarAdminSchemaTopicDesc, topicSchema.Description) + + versionSchema := schema.Properties["version"] + require.NotNil(t, versionSchema) + assert.Equal(t, pulsarAdminSchemaVersionDesc, versionSchema.Description) + + filenameSchema := schema.Properties["filename"] + require.NotNil(t, filenameSchema) + assert.Equal(t, pulsarAdminSchemaFilenameDesc, filenameSchema.Description) +} + +func TestPulsarAdminSchemaToolBuilder_ReadOnlyRejectsWrite(t *testing.T) { + builder := NewPulsarAdminSchemaToolBuilder() + handler := builder.buildSchemaHandler(true) + + _, _, err := handler(context.Background(), nil, pulsarAdminSchemaInput{ + Resource: "schema", + Operation: "upload", + Topic: "persistent://tenant/ns/topic", + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "read-only") +} diff --git a/pkg/mcp/builders/pulsar/sinks.go b/pkg/mcp/builders/pulsar/sinks.go index a120d11..25a7dbe 100644 --- a/pkg/mcp/builders/pulsar/sinks.go +++ b/pkg/mcp/builders/pulsar/sinks.go @@ -21,13 +21,84 @@ import ( "strings" "github.com/apache/pulsar-client-go/pulsaradmin/pkg/utils" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + "github.com/google/jsonschema-go/jsonschema" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/pulsarctl/pkg/cmdutils" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" ) +type pulsarAdminSinksInput struct { + Operation string `json:"operation"` + Tenant *string `json:"tenant,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` + Archive *string `json:"archive,omitempty"` + SinkType *string `json:"sink-type,omitempty"` + Inputs []string `json:"inputs,omitempty"` + TopicsPattern *string `json:"topics-pattern,omitempty"` + SubsName *string `json:"subs-name,omitempty"` + Parallelism *float64 `json:"parallelism,omitempty"` + SinkConfig map[string]any `json:"sink-config,omitempty"` +} + +const ( + pulsarAdminSinksOperationDesc = "Operation to perform. Available operations:\n" + + "- list: List all sinks under a specific tenant and namespace\n" + + "- get: Get the configuration of a sink\n" + + "- status: Get the runtime status of a sink (instances, metrics)\n" + + "- create: Deploy a new sink with specified parameters\n" + + "- update: Update the configuration of an existing sink\n" + + "- delete: Delete a sink\n" + + "- start: Start a stopped sink\n" + + "- stop: Stop a running sink\n" + + "- restart: Restart a sink\n" + + "- list-built-in: List all built-in sink connectors available in the system" + pulsarAdminSinksTenantDesc = "The tenant name. Tenants are the primary organizational unit in Pulsar, " + + "providing multi-tenancy and resource isolation. Sinks deployed within a tenant " + + "inherit its permissions and resource quotas. " + + "Required for all operations except 'list-built-in'." + pulsarAdminSinksNamespaceDesc = "The namespace name. Namespaces are logical groupings of topics and sinks " + + "within a tenant. They encapsulate configuration policies and access control. " + + "Sinks in a namespace typically process topics within the same namespace. " + + "Required for all operations except 'list-built-in'." + pulsarAdminSinksNameDesc = "The sink name. Required for all operations except 'list' and 'list-built-in'. " + + "Names should be descriptive of the sink's purpose and must be unique within a namespace. " + + "Sink names are used in metrics, logs, and when addressing the sink via APIs." + pulsarAdminSinksArchiveDesc = "Path to the archive file containing the sink code. Optional for 'create' and 'update' operations. " + + "Can be a local path, NAR file, or a URL accessible to the Pulsar broker. " + + "The archive should contain all dependencies for the sink connector. " + + "Either archive or sink-type must be specified, but not both." + pulsarAdminSinksSinkTypeDesc = "The built-in sink connector type to use. Optional for 'create' and 'update' operations. " + + "Specifies which built-in connector to use, such as 'jdbc', 'elastic-search', 'kafka', etc. " + + "Use 'list-built-in' operation to see available sink types. " + + "Either sink-type or archive must be specified, but not both." + pulsarAdminSinksInputsDesc = "The sink's input topics (array of strings). Optional for 'create' and 'update' operations. " + + "Topics must be specified in the format 'persistent://tenant/namespace/topic'. " + + "Sinks can consume from multiple topics, but they should have compatible schemas. " + + "All input topics should exist before the sink is created. " + + "Either inputs or topics-pattern must be specified." + pulsarAdminSinksTopicsPatternDesc = "TopicsPattern to consume from list of topics that match the pattern. Optional for 'create' and 'update' operations. " + + "Specified as a regular expression, e.g., 'persistent://tenant/namespace/prefix.*'. " + + "This allows the sink to automatically consume from topics that match the pattern, " + + "including topics created after the sink is deployed. " + + "Either topics-pattern or inputs must be specified." + pulsarAdminSinksSubsNameDesc = "Pulsar subscription name for input topic consumer. Optional for 'create' and 'update' operations. " + + "Defines the subscription name used by the sink to consume from input topics. " + + "If not specified, a default subscription name will be generated. " + + "The subscription type used is Shared by default." + pulsarAdminSinksParallelismDesc = "The parallelism factor of the sink. Optional for 'create' and 'update' operations. " + + "Determines how many instances of the sink will run concurrently. " + + "Higher values improve throughput but require more resources. " + + "Default is 1 (single instance). Recommended to align with topic partition count " + + "when consuming from partitioned topics." + pulsarAdminSinksConfigDesc = "User-defined sink config key/values. Optional for 'create' and 'update' operations. " + + "Provides configuration parameters specific to the sink connector being used. " + + "For example, JDBC connection strings, Elasticsearch indices, S3 bucket details, etc. " + + "Specify as a JSON object with configuration properties required by the specific sink type. " + + "Example: {\"jdbcUrl\": \"jdbc:postgresql://localhost:5432/database\", \"tableName\": \"events\"}" +) + // PulsarAdminSinksToolBuilder implements the ToolBuilder interface for Pulsar admin sinks // /nolint:revive type PulsarAdminSinksToolBuilder struct { @@ -57,7 +128,7 @@ func NewPulsarAdminSinksToolBuilder() *PulsarAdminSinksToolBuilder { } // BuildTools builds the Pulsar admin sinks tool list -func (b *PulsarAdminSinksToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { +func (b *PulsarAdminSinksToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]builders.ToolDefinition, error) { // Check features - return empty list if no required features are present if !b.HasAnyRequiredFeature(config.Features) { return nil, nil @@ -69,11 +140,14 @@ func (b *PulsarAdminSinksToolBuilder) BuildTools(_ context.Context, config build } // Build tools - tool := b.buildSinksTool() + tool, err := b.buildSinksTool() + if err != nil { + return nil, err + } handler := b.buildSinksHandler(config.ReadOnly) - return []server.ServerTool{ - { + return []builders.ToolDefinition{ + builders.ServerTool[pulsarAdminSinksInput, any]{ Tool: tool, Handler: handler, }, @@ -81,7 +155,12 @@ func (b *PulsarAdminSinksToolBuilder) BuildTools(_ context.Context, config build } // buildSinksTool builds the Pulsar admin sinks MCP tool definition -func (b *PulsarAdminSinksToolBuilder) buildSinksTool() mcp.Tool { +func (b *PulsarAdminSinksToolBuilder) buildSinksTool() (*sdk.Tool, error) { + inputSchema, err := buildPulsarAdminSinksInputSchema() + if err != nil { + return nil, err + } + toolDesc := "Manage Apache Pulsar Sinks for data movement and integration. " + "Pulsar Sinks are connectors that export data from Pulsar topics to external systems such as databases, " + "storage services, messaging systems, and third-party applications. " + @@ -93,92 +172,20 @@ func (b *PulsarAdminSinksToolBuilder) buildSinksTool() mcp.Tool { "This tool provides complete lifecycle management including deployment, configuration, " + "monitoring, and runtime control. Sinks require proper permissions to access their input topics." - operationDesc := "Operation to perform. Available operations:\n" + - "- list: List all sinks under a specific tenant and namespace\n" + - "- get: Get the configuration of a sink\n" + - "- status: Get the runtime status of a sink (instances, metrics)\n" + - "- create: Deploy a new sink with specified parameters\n" + - "- update: Update the configuration of an existing sink\n" + - "- delete: Delete a sink\n" + - "- start: Start a stopped sink\n" + - "- stop: Stop a running sink\n" + - "- restart: Restart a sink\n" + - "- list-built-in: List all built-in sink connectors available in the system" - - return mcp.NewTool("pulsar_admin_sinks", - mcp.WithDescription(toolDesc), - mcp.WithString("operation", mcp.Required(), - mcp.Description(operationDesc)), - mcp.WithString("tenant", - mcp.Description("The tenant name. Tenants are the primary organizational unit in Pulsar, "+ - "providing multi-tenancy and resource isolation. Sinks deployed within a tenant "+ - "inherit its permissions and resource quotas. "+ - "Required for all operations except 'list-built-in'.")), - mcp.WithString("namespace", - mcp.Description("The namespace name. Namespaces are logical groupings of topics and sinks "+ - "within a tenant. They encapsulate configuration policies and access control. "+ - "Sinks in a namespace typically process topics within the same namespace. "+ - "Required for all operations except 'list-built-in'.")), - mcp.WithString("name", - mcp.Description("The sink name. Required for all operations except 'list' and 'list-built-in'. "+ - "Names should be descriptive of the sink's purpose and must be unique within a namespace. "+ - "Sink names are used in metrics, logs, and when addressing the sink via APIs.")), - mcp.WithString("archive", - mcp.Description("Path to the archive file containing the sink code. Optional for 'create' and 'update' operations. "+ - "Can be a local path, NAR file, or a URL accessible to the Pulsar broker. "+ - "The archive should contain all dependencies for the sink connector. "+ - "Either archive or sink-type must be specified, but not both.")), - mcp.WithString("sink-type", - mcp.Description("The built-in sink connector type to use. Optional for 'create' and 'update' operations. "+ - "Specifies which built-in connector to use, such as 'jdbc', 'elastic-search', 'kafka', etc. "+ - "Use 'list-built-in' operation to see available sink types. "+ - "Either sink-type or archive must be specified, but not both.")), - mcp.WithArray("inputs", - mcp.Description("The sink's input topics (array of strings). Optional for 'create' and 'update' operations. "+ - "Topics must be specified in the format 'persistent://tenant/namespace/topic'. "+ - "Sinks can consume from multiple topics, but they should have compatible schemas. "+ - "All input topics should exist before the sink is created. "+ - "Either inputs or topics-pattern must be specified."), - mcp.Items( - map[string]interface{}{ - "type": "string", - "description": "input topic", - }, - ), - ), - mcp.WithString("topics-pattern", - mcp.Description("TopicsPattern to consume from list of topics that match the pattern. Optional for 'create' and 'update' operations. "+ - "Specified as a regular expression, e.g., 'persistent://tenant/namespace/prefix.*'. "+ - "This allows the sink to automatically consume from topics that match the pattern, "+ - "including topics created after the sink is deployed. "+ - "Either topics-pattern or inputs must be specified.")), - mcp.WithString("subs-name", - mcp.Description("Pulsar subscription name for input topic consumer. Optional for 'create' and 'update' operations. "+ - "Defines the subscription name used by the sink to consume from input topics. "+ - "If not specified, a default subscription name will be generated. "+ - "The subscription type used is Shared by default.")), - mcp.WithNumber("parallelism", - mcp.Description("The parallelism factor of the sink. Optional for 'create' and 'update' operations. "+ - "Determines how many instances of the sink will run concurrently. "+ - "Higher values improve throughput but require more resources. "+ - "Default is 1 (single instance). Recommended to align with topic partition count "+ - "when consuming from partitioned topics.")), - mcp.WithObject("sink-config", - mcp.Description("User-defined sink config key/values. Optional for 'create' and 'update' operations. "+ - "Provides configuration parameters specific to the sink connector being used. "+ - "For example, JDBC connection strings, Elasticsearch indices, S3 bucket details, etc. "+ - "Specify as a JSON object with configuration properties required by the specific sink type. "+ - "Example: {\"jdbcUrl\": \"jdbc:postgresql://localhost:5432/database\", \"tableName\": \"events\"}")), - ) + return &sdk.Tool{ + Name: "pulsar_admin_sinks", + Description: toolDesc, + InputSchema: inputSchema, + }, nil } // buildSinksHandler builds the Pulsar admin sinks handler function -func (b *PulsarAdminSinksToolBuilder) buildSinksHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminSinksToolBuilder) buildSinksHandler(readOnly bool) builders.ToolHandlerFunc[pulsarAdminSinksInput, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input pulsarAdminSinksInput) (*sdk.CallToolResult, any, error) { // Extract and validate operation parameter - operation, err := request.RequireString("operation") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'operation': %v", err)), nil + operation := input.Operation + if operation == "" { + return nil, nil, fmt.Errorf("missing required parameter 'operation'") } // Check if the operation is valid @@ -188,7 +195,7 @@ func (b *PulsarAdminSinksToolBuilder) buildSinksHandler(readOnly bool) func(cont } if !validOperations[operation] { - return mcp.NewToolResultError(fmt.Sprintf("Invalid operation: '%s'. Supported operations: list, get, status, create, update, delete, start, stop, restart, list-built-in", operation)), nil + return nil, nil, fmt.Errorf("invalid operation: '%s'. supported operations: list, get, status, create, update, delete, start, stop, restart, list-built-in", operation) } // Check write permissions for write operations @@ -198,142 +205,131 @@ func (b *PulsarAdminSinksToolBuilder) buildSinksHandler(readOnly bool) func(cont } if readOnly && writeOperations[operation] { - return mcp.NewToolResultError(fmt.Sprintf("Operation '%s' not allowed in read-only mode. Read-only mode restricts modifications to Pulsar Sinks.", operation)), nil + return nil, nil, fmt.Errorf("operation '%s' not allowed in read-only mode. read-only mode restricts modifications to Pulsar Sinks", operation) } // Get Pulsar session from context session := mcpCtx.GetPulsarSession(ctx) if session == nil { - return mcp.NewToolResultError("Pulsar session not found in context"), nil + return nil, nil, fmt.Errorf("pulsar session not found in context") } admin, err := session.GetAdminV3Client() if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get Pulsar client: %v", err)), nil + return nil, nil, fmt.Errorf("failed to get Pulsar client: %v", err) } // List built-in sinks doesn't require tenant, namespace or name if operation == "list-built-in" { - return b.handleListBuiltInSinks(ctx, admin) + result, err := b.handleListBuiltInSinks(ctx, admin) + return result, nil, err } // Extract common parameters (all operations except list-built-in require tenant and namespace) - tenant, err := request.RequireString("tenant") + tenant, err := requireString(input.Tenant, "tenant") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'tenant': %v. A tenant is required for operation '%s'.", err, operation)), nil + return nil, nil, fmt.Errorf("missing required parameter 'tenant': %v", err) } - namespace, err := request.RequireString("namespace") + namespace, err := requireString(input.Namespace, "namespace") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'namespace': %v. A namespace is required for operation '%s'.", err, operation)), nil + return nil, nil, fmt.Errorf("missing required parameter 'namespace': %v", err) } - // For all operations except 'list', name is required - var name string + // name is required for all operations except list and list-built-in + name := "" if operation != "list" { - name, err = request.RequireString("name") + name, err = requireString(input.Name, "name") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'name' for operation '%s': %v. The sink name must be specified for this operation.", operation, err)), nil + return nil, nil, fmt.Errorf("missing required parameter 'name': %v", err) } } - // Handle operations + // Dispatch based on operation switch operation { case "list": - return b.handleSinkList(ctx, admin, tenant, namespace) + result, err := b.handleSinkList(ctx, admin, tenant, namespace) + return result, nil, err case "get": - return b.handleSinkGet(ctx, admin, tenant, namespace, name) + result, err := b.handleSinkGet(ctx, admin, tenant, namespace, name) + return result, nil, err case "status": - return b.handleSinkStatus(ctx, admin, tenant, namespace, name) + result, err := b.handleSinkStatus(ctx, admin, tenant, namespace, name) + return result, nil, err case "create": - return b.handleSinkCreate(ctx, admin, request) + result, err := b.handleSinkCreate(ctx, admin, input, tenant, namespace, name) + return result, nil, err case "update": - return b.handleSinkUpdate(ctx, admin, request) + result, err := b.handleSinkUpdate(ctx, admin, input, tenant, namespace, name) + return result, nil, err case "delete": - return b.handleSinkDelete(ctx, admin, tenant, namespace, name) + result, err := b.handleSinkDelete(ctx, admin, tenant, namespace, name) + return result, nil, err case "start": - return b.handleSinkStart(ctx, admin, tenant, namespace, name) + result, err := b.handleSinkStart(ctx, admin, tenant, namespace, name) + return result, nil, err case "stop": - return b.handleSinkStop(ctx, admin, tenant, namespace, name) + result, err := b.handleSinkStop(ctx, admin, tenant, namespace, name) + return result, nil, err case "restart": - return b.handleSinkRestart(ctx, admin, tenant, namespace, name) + result, err := b.handleSinkRestart(ctx, admin, tenant, namespace, name) + return result, nil, err default: - // This should never happen due to the valid operations check above - return mcp.NewToolResultError(fmt.Sprintf("Unsupported operation: %s", operation)), nil + return nil, nil, fmt.Errorf("unsupported operation: %s", operation) } } } -// Helper functions - -// handleSinkList handles listing all sinks under a namespace -func (b *PulsarAdminSinksToolBuilder) handleSinkList(_ context.Context, admin cmdutils.Client, tenant, namespace string) (*mcp.CallToolResult, error) { +// handleSinkList handles listing sinks +func (b *PulsarAdminSinksToolBuilder) handleSinkList(_ context.Context, admin cmdutils.Client, tenant, namespace string) (*sdk.CallToolResult, error) { sinks, err := admin.Sinks().ListSinks(tenant, namespace) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to list sinks in tenant '%s' namespace '%s': %v. Check that the tenant and namespace exist and you have proper permissions.", - tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to list sinks in tenant '%s' namespace '%s': %v", tenant, namespace, err) } // Convert result to JSON string sinksJSON, err := json.Marshal(sinks) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to serialize sink list: %v", err)), nil + return nil, fmt.Errorf("failed to serialize sinks list: %v", err) } - return mcp.NewToolResultText(string(sinksJSON)), nil + return textResult(string(sinksJSON)), nil } -// handleSinkGet handles getting information about a sink -func (b *PulsarAdminSinksToolBuilder) handleSinkGet(_ context.Context, admin cmdutils.Client, tenant, namespace, name string) (*mcp.CallToolResult, error) { +// handleSinkGet handles getting a sink's details +func (b *PulsarAdminSinksToolBuilder) handleSinkGet(_ context.Context, admin cmdutils.Client, tenant, namespace, name string) (*sdk.CallToolResult, error) { sink, err := admin.Sinks().GetSink(tenant, namespace, name) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get sink '%s' in tenant '%s' namespace '%s': %v. Verify the sink exists and you have proper permissions.", - name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to get sink '%s' in tenant '%s' namespace '%s': %v. verify the sink exists and you have the correct permissions", name, tenant, namespace, err) } // Convert result to JSON string sinkJSON, err := json.Marshal(sink) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to serialize sink info: %v", err)), nil + return nil, fmt.Errorf("failed to serialize sink details: %v", err) } - return mcp.NewToolResultText(string(sinkJSON)), nil + return textResult(string(sinkJSON)), nil } // handleSinkStatus handles getting the status of a sink -func (b *PulsarAdminSinksToolBuilder) handleSinkStatus(_ context.Context, admin cmdutils.Client, tenant, namespace, name string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminSinksToolBuilder) handleSinkStatus(_ context.Context, admin cmdutils.Client, tenant, namespace, name string) (*sdk.CallToolResult, error) { status, err := admin.Sinks().GetSinkStatus(tenant, namespace, name) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get status for sink '%s' in tenant '%s' namespace '%s': %v. Verify the sink exists and is properly deployed.", - name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to get status for sink '%s' in tenant '%s' namespace '%s': %v. verify the sink exists and is properly deployed", name, tenant, namespace, err) } // Convert result to JSON string statusJSON, err := json.Marshal(status) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to serialize sink status: %v", err)), nil + return nil, fmt.Errorf("failed to serialize sink status: %v", err) } - return mcp.NewToolResultText(string(statusJSON)), nil + return textResult(string(statusJSON)), nil } // handleSinkCreate handles creating a new sink -func (b *PulsarAdminSinksToolBuilder) handleSinkCreate(_ context.Context, admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - tenant, err := request.RequireString("tenant") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get tenant: %v", err)), nil - } - - namespace, err := request.RequireString("namespace") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get namespace: %v", err)), nil - } - - name, err := request.RequireString("name") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get name: %v", err)), nil - } - +func (b *PulsarAdminSinksToolBuilder) handleSinkCreate(_ context.Context, admin cmdutils.Client, input pulsarAdminSinksInput, tenant, namespace, name string) (*sdk.CallToolResult, error) { // Create a new SinkData object sinkData := &utils.SinkData{ Tenant: tenant, @@ -343,68 +339,56 @@ func (b *PulsarAdminSinksToolBuilder) handleSinkCreate(_ context.Context, admin } // Get optional parameters - archive := request.GetString("archive", "") - if archive != "" { + if archive := stringValue(input.Archive); archive != "" { sinkData.Archive = archive } - sinkType := request.GetString("sink-type", "") - if sinkType != "" { + if sinkType := stringValue(input.SinkType); sinkType != "" { sinkData.SinkType = sinkType } - inputsArray := request.GetStringSlice("inputs", []string{}) - if len(inputsArray) > 0 { - sinkData.Inputs = strings.Join(inputsArray, ",") + if len(input.Inputs) > 0 { + sinkData.Inputs = strings.Join(input.Inputs, ",") } - topicsPattern := request.GetString("topics-pattern", "") - if topicsPattern != "" { + if topicsPattern := stringValue(input.TopicsPattern); topicsPattern != "" { sinkData.TopicsPattern = topicsPattern } - subsName := request.GetString("subs-name", "") - if subsName != "" { + if subsName := stringValue(input.SubsName); subsName != "" { sinkData.SubsName = subsName } - parallelismFloat := request.GetFloat("parallelism", 1) - if parallelismFloat >= 0 { - sinkData.Parallelism = int(parallelismFloat) + if input.Parallelism != nil && *input.Parallelism >= 0 { + sinkData.Parallelism = int(*input.Parallelism) } // Get sink config if available - var sinkConfigMap map[string]interface{} - sinkConfigObj, ok := request.GetArguments()["sink-config"] - if ok && sinkConfigObj != nil { - if configMap, isMap := sinkConfigObj.(map[string]interface{}); isMap { - sinkConfigMap = configMap - // Convert to JSON string - sinkConfigJSON, err := json.Marshal(sinkConfigMap) - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to marshal sink-config: %v. Ensure the sink configuration is a valid JSON object.", err)), nil - } - sinkData.SinkConfigString = string(sinkConfigJSON) + if input.SinkConfig != nil { + sinkConfigJSON, err := json.Marshal(input.SinkConfig) + if err != nil { + return nil, fmt.Errorf("failed to marshal sink-config: %v. ensure the sink configuration is a valid JSON object", err) } + sinkData.SinkConfigString = string(sinkConfigJSON) } // Validate inputs if sinkData.Archive == "" && sinkData.SinkType == "" { - return mcp.NewToolResultError("Missing required parameter: Either 'archive' or 'sink-type' must be specified for sink creation. Use 'archive' for custom connectors or 'sink-type' for built-in connectors."), nil + return nil, fmt.Errorf("missing required parameter: either 'archive' or 'sink-type' must be specified for sink creation. use 'archive' for custom connectors or 'sink-type' for built-in connectors") } if sinkData.Archive != "" && sinkData.SinkType != "" { - return mcp.NewToolResultError("Invalid parameters: Cannot specify both 'archive' and 'sink-type'. Use only one of these parameters based on your connector type."), nil + return nil, fmt.Errorf("invalid parameters: cannot specify both 'archive' and 'sink-type'. use only one of these parameters based on your connector type") } if sinkData.Inputs == "" && sinkData.TopicsPattern == "" { - return mcp.NewToolResultError("Missing required parameter: Either 'inputs' or 'topics-pattern' must be specified. The sink needs a source of data to consume from Pulsar."), nil + return nil, fmt.Errorf("missing required parameter: either 'inputs' or 'topics-pattern' must be specified. the sink needs a source of data to consume from Pulsar") } // Process the arguments - err = b.processArguments(sinkData) + err := b.processArguments(sinkData) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to process arguments: %v", err)), nil + return nil, fmt.Errorf("failed to process arguments: %v", err) } // Create the sink @@ -415,31 +399,14 @@ func (b *PulsarAdminSinksToolBuilder) handleSinkCreate(_ context.Context, admin } if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to create sink '%s' in tenant '%s' namespace '%s': %v. Verify all parameters are correct and required resources exist.", - name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to create sink '%s' in tenant '%s' namespace '%s': %v. verify all parameters are correct and required resources exist", name, tenant, namespace, err) } - return mcp.NewToolResultText(fmt.Sprintf("Created sink '%s' successfully in tenant '%s' namespace '%s'. The sink will start consuming from its input topics and writing to the configured destination.", - name, tenant, namespace)), nil + return textResult(fmt.Sprintf("Created sink '%s' successfully in tenant '%s' namespace '%s'. The sink will start consuming from its input topics and writing to the configured destination.", name, tenant, namespace)), nil } // handleSinkUpdate handles updating an existing sink -func (b *PulsarAdminSinksToolBuilder) handleSinkUpdate(_ context.Context, admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - tenant, err := request.RequireString("tenant") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get tenant: %v", err)), nil - } - - namespace, err := request.RequireString("namespace") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get namespace: %v", err)), nil - } - - name, err := request.RequireString("name") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get name: %v", err)), nil - } - +func (b *PulsarAdminSinksToolBuilder) handleSinkUpdate(_ context.Context, admin cmdutils.Client, input pulsarAdminSinksInput, tenant, namespace, name string) (*sdk.CallToolResult, error) { // Create a new SinkData object sinkData := &utils.SinkData{ Tenant: tenant, @@ -449,60 +416,48 @@ func (b *PulsarAdminSinksToolBuilder) handleSinkUpdate(_ context.Context, admin } // Get optional parameters - archive := request.GetString("archive", "") - if archive != "" { + if archive := stringValue(input.Archive); archive != "" { sinkData.Archive = archive } - sinkType := request.GetString("sink-type", "") - if sinkType != "" { + if sinkType := stringValue(input.SinkType); sinkType != "" { sinkData.SinkType = sinkType } - inputsArray := request.GetStringSlice("inputs", []string{}) - if len(inputsArray) > 0 { - sinkData.Inputs = strings.Join(inputsArray, ",") + if len(input.Inputs) > 0 { + sinkData.Inputs = strings.Join(input.Inputs, ",") } - topicsPattern := request.GetString("topics-pattern", "") - if topicsPattern != "" { + if topicsPattern := stringValue(input.TopicsPattern); topicsPattern != "" { sinkData.TopicsPattern = topicsPattern } - subsName := request.GetString("subs-name", "") - if subsName != "" { + if subsName := stringValue(input.SubsName); subsName != "" { sinkData.SubsName = subsName } - parallelismFloat := request.GetFloat("parallelism", 1) - if parallelismFloat >= 0 { - sinkData.Parallelism = int(parallelismFloat) + if input.Parallelism != nil && *input.Parallelism >= 0 { + sinkData.Parallelism = int(*input.Parallelism) } // Get sink config if available - var sinkConfigMap map[string]interface{} - sinkConfigObj, ok := request.GetArguments()["sink-config"] - if ok && sinkConfigObj != nil { - if configMap, isMap := sinkConfigObj.(map[string]interface{}); isMap { - sinkConfigMap = configMap - // Convert to JSON string - sinkConfigJSON, err := json.Marshal(sinkConfigMap) - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to marshal sink-config: %v. Ensure the sink configuration is a valid JSON object.", err)), nil - } - sinkData.SinkConfigString = string(sinkConfigJSON) + if input.SinkConfig != nil { + sinkConfigJSON, err := json.Marshal(input.SinkConfig) + if err != nil { + return nil, fmt.Errorf("failed to marshal sink-config: %v. ensure the sink configuration is a valid JSON object", err) } + sinkData.SinkConfigString = string(sinkConfigJSON) } // Validate inputs if both are specified if sinkData.Archive != "" && sinkData.SinkType != "" { - return mcp.NewToolResultError("Invalid parameters: Cannot specify both 'archive' and 'sink-type'. Use only one of these parameters based on your connector type."), nil + return nil, fmt.Errorf("invalid parameters: cannot specify both 'archive' and 'sink-type'. use only one of these parameters based on your connector type") } // Process the arguments - err = b.processArguments(sinkData) + err := b.processArguments(sinkData) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to process arguments: %v", err)), nil + return nil, fmt.Errorf("failed to process arguments: %v", err) } // Create update options @@ -518,76 +473,66 @@ func (b *PulsarAdminSinksToolBuilder) handleSinkUpdate(_ context.Context, admin } if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to update sink '%s' in tenant '%s' namespace '%s': %v. Verify the sink exists and all parameters are valid.", - name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to update sink '%s' in tenant '%s' namespace '%s': %v. verify the sink exists and all parameters are valid", name, tenant, namespace, err) } - return mcp.NewToolResultText(fmt.Sprintf("Updated sink '%s' successfully in tenant '%s' namespace '%s'. The sink may need to be restarted to apply all changes.", - name, tenant, namespace)), nil + return textResult(fmt.Sprintf("Updated sink '%s' successfully in tenant '%s' namespace '%s'. The sink may need to be restarted to apply all changes.", name, tenant, namespace)), nil } // handleSinkDelete handles deleting a sink -func (b *PulsarAdminSinksToolBuilder) handleSinkDelete(_ context.Context, admin cmdutils.Client, tenant, namespace, name string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminSinksToolBuilder) handleSinkDelete(_ context.Context, admin cmdutils.Client, tenant, namespace, name string) (*sdk.CallToolResult, error) { err := admin.Sinks().DeleteSink(tenant, namespace, name) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to delete sink '%s' in tenant '%s' namespace '%s': %v. Verify the sink exists and you have deletion permissions.", - name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to delete sink '%s' in tenant '%s' namespace '%s': %v. verify the sink exists and you have deletion permissions", name, tenant, namespace, err) } - return mcp.NewToolResultText(fmt.Sprintf("Deleted sink '%s' successfully from tenant '%s' namespace '%s'. All running instances have been terminated.", - name, tenant, namespace)), nil + return textResult(fmt.Sprintf("Deleted sink '%s' successfully from tenant '%s' namespace '%s'. All running instances have been terminated.", name, tenant, namespace)), nil } // handleSinkStart handles starting a sink -func (b *PulsarAdminSinksToolBuilder) handleSinkStart(_ context.Context, admin cmdutils.Client, tenant, namespace, name string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminSinksToolBuilder) handleSinkStart(_ context.Context, admin cmdutils.Client, tenant, namespace, name string) (*sdk.CallToolResult, error) { err := admin.Sinks().StartSink(tenant, namespace, name) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to start sink '%s' in tenant '%s' namespace '%s': %v. Verify the sink exists and is not already running.", - name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to start sink '%s' in tenant '%s' namespace '%s': %v. verify the sink exists and is not already running", name, tenant, namespace, err) } - return mcp.NewToolResultText(fmt.Sprintf("Started sink '%s' successfully in tenant '%s' namespace '%s'. The sink will begin consuming from its input topics.", - name, tenant, namespace)), nil + return textResult(fmt.Sprintf("Started sink '%s' successfully in tenant '%s' namespace '%s'. The sink will begin consuming from its input topics.", name, tenant, namespace)), nil } // handleSinkStop handles stopping a sink -func (b *PulsarAdminSinksToolBuilder) handleSinkStop(_ context.Context, admin cmdutils.Client, tenant, namespace, name string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminSinksToolBuilder) handleSinkStop(_ context.Context, admin cmdutils.Client, tenant, namespace, name string) (*sdk.CallToolResult, error) { err := admin.Sinks().StopSink(tenant, namespace, name) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to stop sink '%s' in tenant '%s' namespace '%s': %v. Verify the sink exists and is currently running.", - name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to stop sink '%s' in tenant '%s' namespace '%s': %v. verify the sink exists and is currently running", name, tenant, namespace, err) } - return mcp.NewToolResultText(fmt.Sprintf("Stopped sink '%s' successfully in tenant '%s' namespace '%s'. The sink will no longer consume messages until restarted.", - name, tenant, namespace)), nil + return textResult(fmt.Sprintf("Stopped sink '%s' successfully in tenant '%s' namespace '%s'. The sink will no longer consume data until restarted.", name, tenant, namespace)), nil } // handleSinkRestart handles restarting a sink -func (b *PulsarAdminSinksToolBuilder) handleSinkRestart(_ context.Context, admin cmdutils.Client, tenant, namespace, name string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminSinksToolBuilder) handleSinkRestart(_ context.Context, admin cmdutils.Client, tenant, namespace, name string) (*sdk.CallToolResult, error) { err := admin.Sinks().RestartSink(tenant, namespace, name) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to restart sink '%s' in tenant '%s' namespace '%s': %v. Verify the sink exists and is properly deployed.", - name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to restart sink '%s' in tenant '%s' namespace '%s': %v. verify the sink exists and is properly deployed", name, tenant, namespace, err) } - return mcp.NewToolResultText(fmt.Sprintf("Restarted sink '%s' successfully in tenant '%s' namespace '%s'. All sink instances have been restarted.", - name, tenant, namespace)), nil + return textResult(fmt.Sprintf("Restarted sink '%s' successfully in tenant '%s' namespace '%s'. All sink instances have been restarted.", name, tenant, namespace)), nil } // handleListBuiltInSinks handles listing all built-in sink connectors -func (b *PulsarAdminSinksToolBuilder) handleListBuiltInSinks(_ context.Context, admin cmdutils.Client) (*mcp.CallToolResult, error) { +func (b *PulsarAdminSinksToolBuilder) handleListBuiltInSinks(_ context.Context, admin cmdutils.Client) (*sdk.CallToolResult, error) { sinks, err := admin.Sinks().GetBuiltInSinks() if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to list built-in sinks: %v. There might be an issue connecting to the Pulsar cluster.", err)), nil + return nil, fmt.Errorf("failed to list built-in sinks: %v. there might be an issue connecting to the Pulsar cluster", err) } // Convert result to JSON string sinksJSON, err := json.Marshal(sinks) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to serialize built-in sinks: %v", err)), nil + return nil, fmt.Errorf("failed to serialize built-in sinks: %v", err) } - return mcp.NewToolResultText(string(sinksJSON)), nil + return textResult(string(sinksJSON)), nil } // processArguments is a simplified version of the pulsarctl function to process sink arguments @@ -601,33 +546,20 @@ func (b *PulsarAdminSinksToolBuilder) processArguments(sinkData *utils.SinkData) sinkData.SinkConf.Tenant = sinkData.Tenant sinkData.SinkConf.Namespace = sinkData.Namespace sinkData.SinkConf.Name = sinkData.Name - - // Set inputs if provided if sinkData.Inputs != "" { - inputTopics := strings.Split(sinkData.Inputs, ",") - sinkData.SinkConf.Inputs = inputTopics + sinkData.SinkConf.Inputs = strings.Split(sinkData.Inputs, ",") } - // Set topics pattern if provided - if sinkData.TopicsPattern != "" { - sinkData.SinkConf.TopicsPattern = &sinkData.TopicsPattern - } - - // Set subscription name if provided if sinkData.SubsName != "" { sinkData.SinkConf.SourceSubscriptionName = sinkData.SubsName } - // Set parallelism if provided - if sinkData.Parallelism != 0 { - sinkData.SinkConf.Parallelism = sinkData.Parallelism - } else if sinkData.SinkConf.Parallelism <= 0 { - sinkData.SinkConf.Parallelism = 1 + if sinkData.TopicsPattern != "" { + sinkData.SinkConf.TopicsPattern = &sinkData.TopicsPattern } - // Handle archive and sink-type - if sinkData.Archive != "" && sinkData.SinkType != "" { - return fmt.Errorf("cannot specify both archive and sink-type") + if sinkData.Parallelism != 0 { + sinkData.SinkConf.Parallelism = sinkData.Parallelism } if sinkData.Archive != "" { @@ -635,11 +567,9 @@ func (b *PulsarAdminSinksToolBuilder) processArguments(sinkData *utils.SinkData) } if sinkData.SinkType != "" { - // In a real implementation, we would validate the sink type here sinkData.SinkConf.Archive = sinkData.SinkType } - // Parse sink config if provided if sinkData.SinkConfigString != "" { var configs map[string]interface{} if err := json.Unmarshal([]byte(sinkData.SinkConfigString), &configs); err != nil { @@ -663,8 +593,8 @@ func (b *PulsarAdminSinksToolBuilder) isPackageURLSupported(archive string) bool "http://", "https://", "file://", - "function://", // Pulsar function package URL - "sink://", // Pulsar sink package URL + "sink://", // Pulsar sink package URL + "function://", } for _, scheme := range supportedSchemes { @@ -676,3 +606,36 @@ func (b *PulsarAdminSinksToolBuilder) isPackageURLSupported(archive string) bool // Also check if it's a local file path (not a URL) return !strings.Contains(archive, "://") } + +func buildPulsarAdminSinksInputSchema() (*jsonschema.Schema, error) { + schema, err := jsonschema.For[pulsarAdminSinksInput](nil) + if err != nil { + return nil, fmt.Errorf("input schema: %w", err) + } + if schema.Type != "object" { + return nil, fmt.Errorf("input schema must have type \"object\"") + } + + if schema.Properties == nil { + schema.Properties = map[string]*jsonschema.Schema{} + } + + setSchemaDescription(schema, "operation", pulsarAdminSinksOperationDesc) + setSchemaDescription(schema, "tenant", pulsarAdminSinksTenantDesc) + setSchemaDescription(schema, "namespace", pulsarAdminSinksNamespaceDesc) + setSchemaDescription(schema, "name", pulsarAdminSinksNameDesc) + setSchemaDescription(schema, "archive", pulsarAdminSinksArchiveDesc) + setSchemaDescription(schema, "sink-type", pulsarAdminSinksSinkTypeDesc) + setSchemaDescription(schema, "inputs", pulsarAdminSinksInputsDesc) + setSchemaDescription(schema, "topics-pattern", pulsarAdminSinksTopicsPatternDesc) + setSchemaDescription(schema, "subs-name", pulsarAdminSinksSubsNameDesc) + setSchemaDescription(schema, "parallelism", pulsarAdminSinksParallelismDesc) + setSchemaDescription(schema, "sink-config", pulsarAdminSinksConfigDesc) + + if inputsSchema := schema.Properties["inputs"]; inputsSchema != nil && inputsSchema.Items != nil { + inputsSchema.Items.Description = "input topic" + } + + normalizeAdditionalProperties(schema) + return schema, nil +} diff --git a/pkg/mcp/builders/pulsar/sinks_legacy.go b/pkg/mcp/builders/pulsar/sinks_legacy.go new file mode 100644 index 0000000..c7eeb54 --- /dev/null +++ b/pkg/mcp/builders/pulsar/sinks_legacy.go @@ -0,0 +1,124 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" +) + +// PulsarAdminSinksLegacyToolBuilder implements the legacy ToolBuilder interface for Pulsar admin sinks. +// /nolint:revive +type PulsarAdminSinksLegacyToolBuilder struct { + *builders.BaseToolBuilder +} + +// NewPulsarAdminSinksLegacyToolBuilder creates a new Pulsar admin sinks legacy tool builder instance. +func NewPulsarAdminSinksLegacyToolBuilder() *PulsarAdminSinksLegacyToolBuilder { + metadata := builders.ToolMetadata{ + Name: "pulsar_admin_sinks", + Version: "1.0.0", + Description: "Pulsar admin sinks management tools", + Category: "pulsar_admin", + Tags: []string{"pulsar", "admin", "sinks"}, + } + + features := []string{ + "pulsar-admin-sinks", + "all", + "all-pulsar", + "pulsar-admin", + } + + return &PulsarAdminSinksLegacyToolBuilder{ + BaseToolBuilder: builders.NewBaseToolBuilder(metadata, features), + } +} + +// BuildTools builds the Pulsar admin sinks legacy tool list. +func (b *PulsarAdminSinksLegacyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { + if !b.HasAnyRequiredFeature(config.Features) { + return nil, nil + } + + if err := b.Validate(config); err != nil { + return nil, err + } + + tool, err := b.buildSinksTool() + if err != nil { + return nil, err + } + handler := b.buildSinksHandler(config.ReadOnly) + + return []server.ServerTool{ + { + Tool: tool, + Handler: handler, + }, + }, nil +} + +func (b *PulsarAdminSinksLegacyToolBuilder) buildSinksTool() (mcp.Tool, error) { + inputSchema, err := buildPulsarAdminSinksInputSchema() + if err != nil { + return mcp.Tool{}, err + } + + schemaJSON, err := json.Marshal(inputSchema) + if err != nil { + return mcp.Tool{}, fmt.Errorf("marshal input schema: %w", err) + } + + toolDesc := "Manage Apache Pulsar Sinks for data movement and integration. " + + "Pulsar Sinks are connectors that export data from Pulsar topics to external systems such as databases, " + + "storage services, messaging systems, and third-party applications. " + + "Sinks consume messages from one or more Pulsar topics, transform the data if needed, " + + "and write it to external systems in a format compatible with the target destination. " + + "Built-in sink connectors are available for common systems like Kafka, JDBC, Elasticsearch, and cloud storage. " + + "Sinks follow the tenant/namespace/name hierarchy for organization and access control, " + + "can scale through parallelism configuration, and support configurable subscription types. " + + "This tool provides complete lifecycle management including deployment, configuration, " + + "monitoring, and runtime control. Sinks require proper permissions to access their input topics." + + return mcp.Tool{ + Name: "pulsar_admin_sinks", + Description: toolDesc, + RawInputSchema: schemaJSON, + }, nil +} + +func (b *PulsarAdminSinksLegacyToolBuilder) buildSinksHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { + sdkBuilder := NewPulsarAdminSinksToolBuilder() + sdkHandler := sdkBuilder.buildSinksHandler(readOnly) + + return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + var input pulsarAdminSinksInput + if err := request.BindArguments(&input); err != nil { + return mcp.NewToolResultError(fmt.Sprintf("failed to parse arguments: %v", err)), nil + } + + result, _, err := sdkHandler(ctx, nil, input) + if err != nil { + return mcp.NewToolResultError(err.Error()), nil + } + return legacyToolResultFromSDK(result), nil + } +} diff --git a/pkg/mcp/builders/pulsar/sources.go b/pkg/mcp/builders/pulsar/sources.go index ca9d148..d82ccb2 100644 --- a/pkg/mcp/builders/pulsar/sources.go +++ b/pkg/mcp/builders/pulsar/sources.go @@ -21,13 +21,93 @@ import ( "strings" "github.com/apache/pulsar-client-go/pulsaradmin/pkg/utils" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + "github.com/google/jsonschema-go/jsonschema" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/pulsarctl/pkg/cmdutils" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" ) +type pulsarAdminSourcesInput struct { + Operation string `json:"operation"` + Tenant *string `json:"tenant,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` + Archive *string `json:"archive,omitempty"` + SourceType *string `json:"source-type,omitempty"` + DestinationTopicName *string `json:"destination-topic-name,omitempty"` + DeserializationClass *string `json:"deserialization-classname,omitempty"` + SchemaType *string `json:"schema-type,omitempty"` + ClassName *string `json:"classname,omitempty"` + ProcessingGuarantees *string `json:"processing-guarantees,omitempty"` + Parallelism *float64 `json:"parallelism,omitempty"` + SourceConfig map[string]any `json:"source-config,omitempty"` +} + +const ( + pulsarAdminSourcesOperationDesc = "Operation to perform. Available operations:\n" + + "- list: List all sources under a specific tenant and namespace\n" + + "- get: Get the configuration of a source\n" + + "- status: Get the runtime status of a source (instances, metrics)\n" + + "- create: Deploy a new source with specified parameters\n" + + "- update: Update the configuration of an existing source\n" + + "- delete: Delete a source\n" + + "- start: Start a stopped source\n" + + "- stop: Stop a running source\n" + + "- restart: Restart a source\n" + + "- list-built-in: List all built-in source connectors available in the system" + pulsarAdminSourcesTenantDesc = "The tenant name. Tenants are the primary organizational unit in Pulsar, " + + "providing multi-tenancy and resource isolation. Sources deployed within a tenant " + + "inherit its permissions and resource quotas. " + + "Required for all operations except 'list-built-in'." + pulsarAdminSourcesNamespaceDesc = "The namespace name. Namespaces are logical groupings of topics and sources " + + "within a tenant. They encapsulate configuration policies and access control. " + + "Sources in a namespace typically publish to topics within the same namespace. " + + "Required for all operations except 'list-built-in'." + pulsarAdminSourcesNameDesc = "The source name. Required for all operations except 'list' and 'list-built-in'. " + + "Names should be descriptive of the source's purpose and must be unique within a namespace. " + + "Source names are used in metrics, logs, and when addressing the source via APIs." + pulsarAdminSourcesArchiveDesc = "Path to the archive file containing the source code. Optional for 'create' and 'update' operations. " + + "Can be a local path, NAR file, or a URL accessible to the Pulsar broker. " + + "The archive should contain all dependencies for the source connector. " + + "Either archive or source-type must be specified, but not both." + pulsarAdminSourcesSourceTypeDesc = "The built-in source connector type to use. Optional for 'create' and 'update' operations. " + + "Specifies which built-in connector to use, such as 'kafka', 'jdbc', 'file', etc. " + + "Use 'list-built-in' operation to see available source types. " + + "Either source-type or archive must be specified, but not both." + pulsarAdminSourcesDestinationTopicDesc = "The Pulsar topic to which data is published. Required for 'create' operation, optional for 'update'. " + + "Specified in the format 'persistent://tenant/namespace/topic'. " + + "This is the topic where the source will send the data it extracts from the external system. " + + "The topic will be automatically created if it doesn't exist." + pulsarAdminSourcesDeserializationClassDesc = "The SerDe (Serialization/Deserialization) classname for the source. Optional for 'create' and 'update'. " + + "Specifies how to convert data from the external system into Pulsar messages. " + + "Common SerDe classes include AvroSchema, JsonSchema, StringSchema, etc. " + + "If not specified, the source will use the default SerDe for the connector type." + pulsarAdminSourcesSchemaTypeDesc = "The schema type to be used to encode messages emitted from the source. Optional for 'create' and 'update'. " + + "Available schema types include: 'avro', 'json', 'protobuf', 'string', etc. " + + "Schema types ensure data compatibility and enable schema evolution. " + + "The schema type should match the format of data being ingested." + pulsarAdminSourcesClassNameDesc = "The source's class name if archive is a file-url-path (file://...). Optional for 'create' and 'update'. " + + "This specifies the fully qualified class name that implements the source connector. " + + "Only needed when using a custom source implementation in a JAR file. " + + "Built-in connectors don't require this parameter." + pulsarAdminSourcesProcessingGuaranteesDesc = "The processing guarantees (delivery semantics) applied to the source. Optional for 'create' and 'update'. " + + "Available options: 'atleast_once', 'atmost_once', 'effectively_once'. " + + "Controls how data is delivered in failure scenarios. " + + "'atleast_once' is the most common and ensures no data loss but may have duplicates. " + + "Default is 'atleast_once'." + pulsarAdminSourcesParallelismDesc = "The parallelism factor of the source. Optional for 'create' and 'update' operations. " + + "Determines how many instances of the source will run concurrently. " + + "Higher values improve throughput but require more resources. " + + "Default is 1 (single instance). Recommended to align with both source capacity " + + "and destination topic partition count." + pulsarAdminSourcesConfigDesc = "User-defined source config key/values. Optional for 'create' and 'update' operations. " + + "Provides configuration parameters specific to the source connector being used. " + + "For example, database connection details, Kafka bootstrap servers, credentials, etc. " + + "Specify as a JSON object with configuration properties required by the specific source type. " + + "Example: {\"topic\": \"external-kafka-topic\", \"bootstrapServers\": \"kafka:9092\"}" +) + // PulsarAdminSourcesToolBuilder implements the ToolBuilder interface for Pulsar admin sources // /nolint:revive type PulsarAdminSourcesToolBuilder struct { @@ -57,7 +137,7 @@ func NewPulsarAdminSourcesToolBuilder() *PulsarAdminSourcesToolBuilder { } // BuildTools builds the Pulsar admin sources tool list -func (b *PulsarAdminSourcesToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { +func (b *PulsarAdminSourcesToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]builders.ToolDefinition, error) { // Check features - return empty list if no required features are present if !b.HasAnyRequiredFeature(config.Features) { return nil, nil @@ -69,11 +149,14 @@ func (b *PulsarAdminSourcesToolBuilder) BuildTools(_ context.Context, config bui } // Build tools - tool := b.buildSourcesTool() + tool, err := b.buildSourcesTool() + if err != nil { + return nil, err + } handler := b.buildSourcesHandler(config.ReadOnly) - return []server.ServerTool{ - { + return []builders.ToolDefinition{ + builders.ServerTool[pulsarAdminSourcesInput, any]{ Tool: tool, Handler: handler, }, @@ -81,7 +164,12 @@ func (b *PulsarAdminSourcesToolBuilder) BuildTools(_ context.Context, config bui } // buildSourcesTool builds the Pulsar admin sources MCP tool definition -func (b *PulsarAdminSourcesToolBuilder) buildSourcesTool() mcp.Tool { +func (b *PulsarAdminSourcesToolBuilder) buildSourcesTool() (*sdk.Tool, error) { + inputSchema, err := buildPulsarAdminSourcesInputSchema() + if err != nil { + return nil, err + } + toolDesc := "Manage Apache Pulsar Sources for data ingestion and integration. " + "Pulsar Sources are connectors that import data from external systems into Pulsar topics. " + "Sources connect to external systems such as databases, messaging platforms, storage services, " + @@ -92,94 +180,20 @@ func (b *PulsarAdminSourcesToolBuilder) buildSourcesTool() mcp.Tool { "This tool provides complete lifecycle management including deployment, configuration, " + "monitoring, and runtime control. Sources use schema types to ensure data compatibility." - operationDesc := "Operation to perform. Available operations:\n" + - "- list: List all sources under a specific tenant and namespace\n" + - "- get: Get the configuration of a source\n" + - "- status: Get the runtime status of a source (instances, metrics)\n" + - "- create: Deploy a new source with specified parameters\n" + - "- update: Update the configuration of an existing source\n" + - "- delete: Delete a source\n" + - "- start: Start a stopped source\n" + - "- stop: Stop a running source\n" + - "- restart: Restart a source\n" + - "- list-built-in: List all built-in source connectors available in the system" - - return mcp.NewTool("pulsar_admin_sources", - mcp.WithDescription(toolDesc), - mcp.WithString("operation", mcp.Required(), - mcp.Description(operationDesc)), - mcp.WithString("tenant", - mcp.Description("The tenant name. Tenants are the primary organizational unit in Pulsar, "+ - "providing multi-tenancy and resource isolation. Sources deployed within a tenant "+ - "inherit its permissions and resource quotas. "+ - "Required for all operations except 'list-built-in'.")), - mcp.WithString("namespace", - mcp.Description("The namespace name. Namespaces are logical groupings of topics and sources "+ - "within a tenant. They encapsulate configuration policies and access control. "+ - "Sources in a namespace typically publish to topics within the same namespace. "+ - "Required for all operations except 'list-built-in'.")), - mcp.WithString("name", - mcp.Description("The source name. Required for all operations except 'list' and 'list-built-in'. "+ - "Names should be descriptive of the source's purpose and must be unique within a namespace. "+ - "Source names are used in metrics, logs, and when addressing the source via APIs.")), - mcp.WithString("archive", - mcp.Description("Path to the archive file containing the source code. Optional for 'create' and 'update' operations. "+ - "Can be a local path, NAR file, or a URL accessible to the Pulsar broker. "+ - "The archive should contain all dependencies for the source connector. "+ - "Either archive or source-type must be specified, but not both.")), - mcp.WithString("source-type", - mcp.Description("The built-in source connector type to use. Optional for 'create' and 'update' operations. "+ - "Specifies which built-in connector to use, such as 'kafka', 'jdbc', 'file', etc. "+ - "Use 'list-built-in' operation to see available source types. "+ - "Either source-type or archive must be specified, but not both.")), - mcp.WithString("destination-topic-name", - mcp.Description("The Pulsar topic to which data is published. Required for 'create' operation, optional for 'update'. "+ - "Specified in the format 'persistent://tenant/namespace/topic'. "+ - "This is the topic where the source will send the data it extracts from the external system. "+ - "The topic will be automatically created if it doesn't exist.")), - mcp.WithString("deserialization-classname", - mcp.Description("The SerDe (Serialization/Deserialization) classname for the source. Optional for 'create' and 'update'. "+ - "Specifies how to convert data from the external system into Pulsar messages. "+ - "Common SerDe classes include AvroSchema, JsonSchema, StringSchema, etc. "+ - "If not specified, the source will use the default SerDe for the connector type.")), - mcp.WithString("schema-type", - mcp.Description("The schema type to be used to encode messages emitted from the source. Optional for 'create' and 'update'. "+ - "Available schema types include: 'avro', 'json', 'protobuf', 'string', etc. "+ - "Schema types ensure data compatibility and enable schema evolution. "+ - "The schema type should match the format of data being ingested.")), - mcp.WithString("classname", - mcp.Description("The source's class name if archive is a file-url-path (file://...). Optional for 'create' and 'update'. "+ - "This specifies the fully qualified class name that implements the source connector. "+ - "Only needed when using a custom source implementation in a JAR file. "+ - "Built-in connectors don't require this parameter.")), - mcp.WithString("processing-guarantees", - mcp.Description("The processing guarantees (delivery semantics) applied to the source. Optional for 'create' and 'update'. "+ - "Available options: 'atleast_once', 'atmost_once', 'effectively_once'. "+ - "Controls how data is delivered in failure scenarios. "+ - "'atleast_once' is the most common and ensures no data loss but may have duplicates. "+ - "Default is 'atleast_once'.")), - mcp.WithNumber("parallelism", - mcp.Description("The parallelism factor of the source. Optional for 'create' and 'update' operations. "+ - "Determines how many instances of the source will run concurrently. "+ - "Higher values improve throughput but require more resources. "+ - "Default is 1 (single instance). Recommended to align with both source capacity "+ - "and destination topic partition count.")), - mcp.WithObject("source-config", - mcp.Description("User-defined source config key/values. Optional for 'create' and 'update' operations. "+ - "Provides configuration parameters specific to the source connector being used. "+ - "For example, database connection details, Kafka bootstrap servers, credentials, etc. "+ - "Specify as a JSON object with configuration properties required by the specific source type. "+ - "Example: {\"topic\": \"external-kafka-topic\", \"bootstrapServers\": \"kafka:9092\"}")), - ) + return &sdk.Tool{ + Name: "pulsar_admin_sources", + Description: toolDesc, + InputSchema: inputSchema, + }, nil } // buildSourcesHandler builds the Pulsar admin sources handler function -func (b *PulsarAdminSourcesToolBuilder) buildSourcesHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminSourcesToolBuilder) buildSourcesHandler(readOnly bool) builders.ToolHandlerFunc[pulsarAdminSourcesInput, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input pulsarAdminSourcesInput) (*sdk.CallToolResult, any, error) { // Extract and validate operation parameter - operation, err := request.RequireString("operation") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'operation': %v", err)), nil + operation := input.Operation + if operation == "" { + return nil, nil, fmt.Errorf("missing required parameter 'operation'") } // Check if the operation is valid @@ -189,7 +203,7 @@ func (b *PulsarAdminSourcesToolBuilder) buildSourcesHandler(readOnly bool) func( } if !validOperations[operation] { - return mcp.NewToolResultError(fmt.Sprintf("Invalid operation: '%s'. Supported operations: list, get, status, create, update, delete, start, stop, restart, list-built-in", operation)), nil + return nil, nil, fmt.Errorf("invalid operation: '%s'. supported operations: list, get, status, create, update, delete, start, stop, restart, list-built-in", operation) } // Check write permissions for write operations @@ -199,142 +213,131 @@ func (b *PulsarAdminSourcesToolBuilder) buildSourcesHandler(readOnly bool) func( } if readOnly && writeOperations[operation] { - return mcp.NewToolResultError(fmt.Sprintf("Operation '%s' not allowed in read-only mode. Read-only mode restricts modifications to Pulsar Sources.", operation)), nil + return nil, nil, fmt.Errorf("operation '%s' not allowed in read-only mode. read-only mode restricts modifications to Pulsar Sources", operation) } // Get Pulsar session from context session := mcpCtx.GetPulsarSession(ctx) if session == nil { - return mcp.NewToolResultError("Pulsar session not found in context"), nil + return nil, nil, fmt.Errorf("pulsar session not found in context") } admin, err := session.GetAdminV3Client() if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get Pulsar client: %v", err)), nil + return nil, nil, fmt.Errorf("failed to get Pulsar client: %v", err) } // List built-in sources doesn't require tenant, namespace or name if operation == "list-built-in" { - return b.handleListBuiltInSources(ctx, admin) + result, err := b.handleListBuiltInSources(ctx, admin) + return result, nil, err } // Extract common parameters (all operations except list-built-in require tenant and namespace) - tenant, err := request.RequireString("tenant") + tenant, err := requireString(input.Tenant, "tenant") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'tenant': %v. A tenant is required for operation '%s'.", err, operation)), nil + return nil, nil, fmt.Errorf("missing required parameter 'tenant': %v", err) } - namespace, err := request.RequireString("namespace") + namespace, err := requireString(input.Namespace, "namespace") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'namespace': %v. A namespace is required for operation '%s'.", err, operation)), nil + return nil, nil, fmt.Errorf("missing required parameter 'namespace': %v", err) } - // For all operations except 'list', name is required - var name string + // name is required for all operations except list and list-built-in + name := "" if operation != "list" { - name, err = request.RequireString("name") + name, err = requireString(input.Name, "name") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'name' for operation '%s': %v. The source name must be specified for this operation.", operation, err)), nil + return nil, nil, fmt.Errorf("missing required parameter 'name': %v", err) } } - // Handle operations + // Dispatch based on operation switch operation { case "list": - return b.handleSourceList(ctx, admin, tenant, namespace) + result, err := b.handleSourceList(ctx, admin, tenant, namespace) + return result, nil, err case "get": - return b.handleSourceGet(ctx, admin, tenant, namespace, name) + result, err := b.handleSourceGet(ctx, admin, tenant, namespace, name) + return result, nil, err case "status": - return b.handleSourceStatus(ctx, admin, tenant, namespace, name) + result, err := b.handleSourceStatus(ctx, admin, tenant, namespace, name) + return result, nil, err case "create": - return b.handleSourceCreate(ctx, admin, request) + result, err := b.handleSourceCreate(ctx, admin, input, tenant, namespace, name) + return result, nil, err case "update": - return b.handleSourceUpdate(ctx, admin, request) + result, err := b.handleSourceUpdate(ctx, admin, input, tenant, namespace, name) + return result, nil, err case "delete": - return b.handleSourceDelete(ctx, admin, tenant, namespace, name) + result, err := b.handleSourceDelete(ctx, admin, tenant, namespace, name) + return result, nil, err case "start": - return b.handleSourceStart(ctx, admin, tenant, namespace, name) + result, err := b.handleSourceStart(ctx, admin, tenant, namespace, name) + return result, nil, err case "stop": - return b.handleSourceStop(ctx, admin, tenant, namespace, name) + result, err := b.handleSourceStop(ctx, admin, tenant, namespace, name) + return result, nil, err case "restart": - return b.handleSourceRestart(ctx, admin, tenant, namespace, name) + result, err := b.handleSourceRestart(ctx, admin, tenant, namespace, name) + return result, nil, err default: - // This should never happen due to the valid operations check above - return mcp.NewToolResultError(fmt.Sprintf("Unsupported operation: %s", operation)), nil + return nil, nil, fmt.Errorf("unsupported operation: %s", operation) } } } -// Helper functions - -// handleSourceList handles listing all sources under a namespace -func (b *PulsarAdminSourcesToolBuilder) handleSourceList(_ context.Context, admin cmdutils.Client, tenant, namespace string) (*mcp.CallToolResult, error) { +// handleSourceList handles listing sources +func (b *PulsarAdminSourcesToolBuilder) handleSourceList(_ context.Context, admin cmdutils.Client, tenant, namespace string) (*sdk.CallToolResult, error) { sources, err := admin.Sources().ListSources(tenant, namespace) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to list sources in tenant '%s' namespace '%s': %v. Check that the tenant and namespace exist and you have proper permissions.", - tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to list sources in tenant '%s' namespace '%s': %v", tenant, namespace, err) } // Convert result to JSON string sourcesJSON, err := json.Marshal(sources) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to serialize source list: %v", err)), nil + return nil, fmt.Errorf("failed to serialize sources list: %v", err) } - return mcp.NewToolResultText(string(sourcesJSON)), nil + return textResult(string(sourcesJSON)), nil } -// handleSourceGet handles getting information about a source -func (b *PulsarAdminSourcesToolBuilder) handleSourceGet(_ context.Context, admin cmdutils.Client, tenant, namespace, name string) (*mcp.CallToolResult, error) { +// handleSourceGet handles getting a source's details +func (b *PulsarAdminSourcesToolBuilder) handleSourceGet(_ context.Context, admin cmdutils.Client, tenant, namespace, name string) (*sdk.CallToolResult, error) { source, err := admin.Sources().GetSource(tenant, namespace, name) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get source '%s' in tenant '%s' namespace '%s': %v. Verify the source exists and you have proper permissions.", - name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to get source '%s' in tenant '%s' namespace '%s': %v. verify the source exists and you have the correct permissions", name, tenant, namespace, err) } // Convert result to JSON string sourceJSON, err := json.Marshal(source) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to serialize source info: %v", err)), nil + return nil, fmt.Errorf("failed to serialize source details: %v", err) } - return mcp.NewToolResultText(string(sourceJSON)), nil + return textResult(string(sourceJSON)), nil } // handleSourceStatus handles getting the status of a source -func (b *PulsarAdminSourcesToolBuilder) handleSourceStatus(_ context.Context, admin cmdutils.Client, tenant, namespace, name string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminSourcesToolBuilder) handleSourceStatus(_ context.Context, admin cmdutils.Client, tenant, namespace, name string) (*sdk.CallToolResult, error) { status, err := admin.Sources().GetSourceStatus(tenant, namespace, name) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get status for source '%s' in tenant '%s' namespace '%s': %v. Verify the source exists and is properly deployed.", - name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to get status for source '%s' in tenant '%s' namespace '%s': %v. verify the source exists and is properly deployed", name, tenant, namespace, err) } // Convert result to JSON string statusJSON, err := json.Marshal(status) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to serialize source status: %v", err)), nil + return nil, fmt.Errorf("failed to serialize source status: %v", err) } - return mcp.NewToolResultText(string(statusJSON)), nil + return textResult(string(statusJSON)), nil } // handleSourceCreate handles creating a new source -func (b *PulsarAdminSourcesToolBuilder) handleSourceCreate(_ context.Context, admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - tenant, err := request.RequireString("tenant") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get tenant: %v", err)), nil - } - - namespace, err := request.RequireString("namespace") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get namespace: %v", err)), nil - } - - name, err := request.RequireString("name") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get name: %v", err)), nil - } - +func (b *PulsarAdminSourcesToolBuilder) handleSourceCreate(_ context.Context, admin cmdutils.Client, input pulsarAdminSourcesInput, tenant, namespace, name string) (*sdk.CallToolResult, error) { // Create a new SourceData object sourceData := &utils.SourceData{ Tenant: tenant, @@ -344,78 +347,64 @@ func (b *PulsarAdminSourcesToolBuilder) handleSourceCreate(_ context.Context, ad } // Get optional parameters - archive := request.GetString("archive", "") - if archive != "" { + if archive := stringValue(input.Archive); archive != "" { sourceData.Archive = archive } - sourceType := request.GetString("source-type", "") - if sourceType != "" { + if sourceType := stringValue(input.SourceType); sourceType != "" { sourceData.SourceType = sourceType } - destTopic := request.GetString("destination-topic-name", "") - if destTopic != "" { + if destTopic := stringValue(input.DestinationTopicName); destTopic != "" { sourceData.DestinationTopicName = destTopic } - deserializationClassName := request.GetString("deserialization-classname", "") - if deserializationClassName != "" { + if deserializationClassName := stringValue(input.DeserializationClass); deserializationClassName != "" { sourceData.DeserializationClassName = deserializationClassName } - schemaType := request.GetString("schema-type", "") - if schemaType != "" { + if schemaType := stringValue(input.SchemaType); schemaType != "" { sourceData.SchemaType = schemaType } - className := request.GetString("classname", "") - if className != "" { + if className := stringValue(input.ClassName); className != "" { sourceData.ClassName = className } - processingGuarantees := request.GetString("processing-guarantees", "") - if processingGuarantees != "" { + if processingGuarantees := stringValue(input.ProcessingGuarantees); processingGuarantees != "" { sourceData.ProcessingGuarantees = processingGuarantees } - parallelismFloat := request.GetFloat("parallelism", 1) - if parallelismFloat >= 0 { - sourceData.Parallelism = int(parallelismFloat) + if input.Parallelism != nil && *input.Parallelism >= 0 { + sourceData.Parallelism = int(*input.Parallelism) } // Get source config if available - var sourceConfigMap map[string]interface{} - sourceConfigObj, ok := request.GetArguments()["source-config"] - if ok && sourceConfigObj != nil { - if configMap, isMap := sourceConfigObj.(map[string]interface{}); isMap { - sourceConfigMap = configMap - // Convert to JSON string - sourceConfigJSON, err := json.Marshal(sourceConfigMap) - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to marshal source-config: %v. Ensure the source configuration is a valid JSON object.", err)), nil - } - sourceData.SourceConfigString = string(sourceConfigJSON) + if input.SourceConfig != nil { + sourceConfigJSON, err := json.Marshal(input.SourceConfig) + if err != nil { + return nil, fmt.Errorf("failed to marshal source-config: %v. ensure the source configuration is a valid JSON object", err) } + sourceData.SourceConfigString = string(sourceConfigJSON) } // Validate inputs if sourceData.Archive == "" && sourceData.SourceType == "" { - return mcp.NewToolResultError("Missing required parameter: Either 'archive' or 'source-type' must be specified for source creation. Use 'archive' for custom connectors or 'source-type' for built-in connectors."), nil + return nil, fmt.Errorf("missing required parameter: either 'archive' or 'source-type' must be specified for source creation. use 'archive' for custom connectors or 'source-type' for built-in connectors") } if sourceData.Archive != "" && sourceData.SourceType != "" { - return mcp.NewToolResultError("Invalid parameters: Cannot specify both 'archive' and 'source-type'. Use only one of these parameters based on your connector type."), nil + return nil, fmt.Errorf("invalid parameters: cannot specify both 'archive' and 'source-type'. use only one of these parameters based on your connector type") } if sourceData.DestinationTopicName == "" { - return mcp.NewToolResultError("Missing required parameter: 'destination-topic-name' must be specified. This is the Pulsar topic where the source will publish data."), nil + return nil, fmt.Errorf("missing required parameter: 'destination-topic-name' must be specified. this is the Pulsar topic where the source will publish data") } // Process the arguments - err = b.processSourceArguments(sourceData) + err := b.processSourceArguments(sourceData) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to process arguments: %v", err)), nil + return nil, fmt.Errorf("failed to process arguments: %v", err) } // Create the source @@ -426,31 +415,14 @@ func (b *PulsarAdminSourcesToolBuilder) handleSourceCreate(_ context.Context, ad } if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to create source '%s' in tenant '%s' namespace '%s': %v. Verify all parameters are correct and required resources exist.", - name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to create source '%s' in tenant '%s' namespace '%s': %v. verify all parameters are correct and required resources exist", name, tenant, namespace, err) } - return mcp.NewToolResultText(fmt.Sprintf("Created source '%s' successfully in tenant '%s' namespace '%s'. The source will start pulling data from the external system and publishing to the destination topic.", - name, tenant, namespace)), nil + return textResult(fmt.Sprintf("Created source '%s' successfully in tenant '%s' namespace '%s'. The source will start pulling data from the external system and publishing to the destination topic.", name, tenant, namespace)), nil } // handleSourceUpdate handles updating an existing source -func (b *PulsarAdminSourcesToolBuilder) handleSourceUpdate(_ context.Context, admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - tenant, err := request.RequireString("tenant") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get tenant: %v", err)), nil - } - - namespace, err := request.RequireString("namespace") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get namespace: %v", err)), nil - } - - name, err := request.RequireString("name") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get name: %v", err)), nil - } - +func (b *PulsarAdminSourcesToolBuilder) handleSourceUpdate(_ context.Context, admin cmdutils.Client, input pulsarAdminSourcesInput, tenant, namespace, name string) (*sdk.CallToolResult, error) { // Create a new SourceData object sourceData := &utils.SourceData{ Tenant: tenant, @@ -460,70 +432,56 @@ func (b *PulsarAdminSourcesToolBuilder) handleSourceUpdate(_ context.Context, ad } // Get optional parameters - archive := request.GetString("archive", "") - if archive != "" { + if archive := stringValue(input.Archive); archive != "" { sourceData.Archive = archive } - sourceType := request.GetString("source-type", "") - if sourceType != "" { + if sourceType := stringValue(input.SourceType); sourceType != "" { sourceData.SourceType = sourceType } - destTopic := request.GetString("destination-topic-name", "") - if destTopic != "" { + if destTopic := stringValue(input.DestinationTopicName); destTopic != "" { sourceData.DestinationTopicName = destTopic } - deserializationClassName := request.GetString("deserialization-classname", "") - if deserializationClassName != "" { + if deserializationClassName := stringValue(input.DeserializationClass); deserializationClassName != "" { sourceData.DeserializationClassName = deserializationClassName } - schemaType := request.GetString("schema-type", "") - if schemaType != "" { + if schemaType := stringValue(input.SchemaType); schemaType != "" { sourceData.SchemaType = schemaType } - className := request.GetString("classname", "") - if className != "" { + if className := stringValue(input.ClassName); className != "" { sourceData.ClassName = className } - processingGuarantees := request.GetString("processing-guarantees", "") - if processingGuarantees != "" { + if processingGuarantees := stringValue(input.ProcessingGuarantees); processingGuarantees != "" { sourceData.ProcessingGuarantees = processingGuarantees } - parallelismFloat := request.GetFloat("parallelism", 1) - if parallelismFloat >= 0 { - sourceData.Parallelism = int(parallelismFloat) + if input.Parallelism != nil && *input.Parallelism >= 0 { + sourceData.Parallelism = int(*input.Parallelism) } // Get source config if available - var sourceConfigMap map[string]interface{} - sourceConfigObj, ok := request.GetArguments()["source-config"] - if ok && sourceConfigObj != nil { - if configMap, isMap := sourceConfigObj.(map[string]interface{}); isMap { - sourceConfigMap = configMap - // Convert to JSON string - sourceConfigJSON, err := json.Marshal(sourceConfigMap) - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to marshal source-config: %v. Ensure the source configuration is a valid JSON object.", err)), nil - } - sourceData.SourceConfigString = string(sourceConfigJSON) + if input.SourceConfig != nil { + sourceConfigJSON, err := json.Marshal(input.SourceConfig) + if err != nil { + return nil, fmt.Errorf("failed to marshal source-config: %v. ensure the source configuration is a valid JSON object", err) } + sourceData.SourceConfigString = string(sourceConfigJSON) } // Validate inputs if both are specified if sourceData.Archive != "" && sourceData.SourceType != "" { - return mcp.NewToolResultError("Invalid parameters: Cannot specify both 'archive' and 'source-type'. Use only one of these parameters based on your connector type."), nil + return nil, fmt.Errorf("invalid parameters: cannot specify both 'archive' and 'source-type'. use only one of these parameters based on your connector type") } // Process the arguments - err = b.processSourceArguments(sourceData) + err := b.processSourceArguments(sourceData) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to process arguments: %v", err)), nil + return nil, fmt.Errorf("failed to process arguments: %v", err) } // Create update options @@ -539,76 +497,66 @@ func (b *PulsarAdminSourcesToolBuilder) handleSourceUpdate(_ context.Context, ad } if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to update source '%s' in tenant '%s' namespace '%s': %v. Verify the source exists and all parameters are valid.", - name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to update source '%s' in tenant '%s' namespace '%s': %v. verify the source exists and all parameters are valid", name, tenant, namespace, err) } - return mcp.NewToolResultText(fmt.Sprintf("Updated source '%s' successfully in tenant '%s' namespace '%s'. The source may need to be restarted to apply all changes.", - name, tenant, namespace)), nil + return textResult(fmt.Sprintf("Updated source '%s' successfully in tenant '%s' namespace '%s'. The source may need to be restarted to apply all changes.", name, tenant, namespace)), nil } // handleSourceDelete handles deleting a source -func (b *PulsarAdminSourcesToolBuilder) handleSourceDelete(_ context.Context, admin cmdutils.Client, tenant, namespace, name string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminSourcesToolBuilder) handleSourceDelete(_ context.Context, admin cmdutils.Client, tenant, namespace, name string) (*sdk.CallToolResult, error) { err := admin.Sources().DeleteSource(tenant, namespace, name) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to delete source '%s' in tenant '%s' namespace '%s': %v. Verify the source exists and you have deletion permissions.", - name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to delete source '%s' in tenant '%s' namespace '%s': %v. verify the source exists and you have deletion permissions", name, tenant, namespace, err) } - return mcp.NewToolResultText(fmt.Sprintf("Deleted source '%s' successfully from tenant '%s' namespace '%s'. All running instances have been terminated.", - name, tenant, namespace)), nil + return textResult(fmt.Sprintf("Deleted source '%s' successfully from tenant '%s' namespace '%s'. All running instances have been terminated.", name, tenant, namespace)), nil } // handleSourceStart handles starting a source -func (b *PulsarAdminSourcesToolBuilder) handleSourceStart(_ context.Context, admin cmdutils.Client, tenant, namespace, name string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminSourcesToolBuilder) handleSourceStart(_ context.Context, admin cmdutils.Client, tenant, namespace, name string) (*sdk.CallToolResult, error) { err := admin.Sources().StartSource(tenant, namespace, name) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to start source '%s' in tenant '%s' namespace '%s': %v. Verify the source exists and is not already running.", - name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to start source '%s' in tenant '%s' namespace '%s': %v. verify the source exists and is not already running", name, tenant, namespace, err) } - return mcp.NewToolResultText(fmt.Sprintf("Started source '%s' successfully in tenant '%s' namespace '%s'. The source will begin pulling data from the external system.", - name, tenant, namespace)), nil + return textResult(fmt.Sprintf("Started source '%s' successfully in tenant '%s' namespace '%s'. The source will begin pulling data from the external system.", name, tenant, namespace)), nil } // handleSourceStop handles stopping a source -func (b *PulsarAdminSourcesToolBuilder) handleSourceStop(_ context.Context, admin cmdutils.Client, tenant, namespace, name string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminSourcesToolBuilder) handleSourceStop(_ context.Context, admin cmdutils.Client, tenant, namespace, name string) (*sdk.CallToolResult, error) { err := admin.Sources().StopSource(tenant, namespace, name) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to stop source '%s' in tenant '%s' namespace '%s': %v. Verify the source exists and is currently running.", - name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to stop source '%s' in tenant '%s' namespace '%s': %v. verify the source exists and is currently running", name, tenant, namespace, err) } - return mcp.NewToolResultText(fmt.Sprintf("Stopped source '%s' successfully in tenant '%s' namespace '%s'. The source will no longer pull data until restarted.", - name, tenant, namespace)), nil + return textResult(fmt.Sprintf("Stopped source '%s' successfully in tenant '%s' namespace '%s'. The source will no longer pull data until restarted.", name, tenant, namespace)), nil } // handleSourceRestart handles restarting a source -func (b *PulsarAdminSourcesToolBuilder) handleSourceRestart(_ context.Context, admin cmdutils.Client, tenant, namespace, name string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminSourcesToolBuilder) handleSourceRestart(_ context.Context, admin cmdutils.Client, tenant, namespace, name string) (*sdk.CallToolResult, error) { err := admin.Sources().RestartSource(tenant, namespace, name) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to restart source '%s' in tenant '%s' namespace '%s': %v. Verify the source exists and is properly deployed.", - name, tenant, namespace, err)), nil + return nil, fmt.Errorf("failed to restart source '%s' in tenant '%s' namespace '%s': %v. verify the source exists and is properly deployed", name, tenant, namespace, err) } - return mcp.NewToolResultText(fmt.Sprintf("Restarted source '%s' successfully in tenant '%s' namespace '%s'. All source instances have been restarted.", - name, tenant, namespace)), nil + return textResult(fmt.Sprintf("Restarted source '%s' successfully in tenant '%s' namespace '%s'. All source instances have been restarted.", name, tenant, namespace)), nil } // handleListBuiltInSources handles listing all built-in source connectors -func (b *PulsarAdminSourcesToolBuilder) handleListBuiltInSources(_ context.Context, admin cmdutils.Client) (*mcp.CallToolResult, error) { +func (b *PulsarAdminSourcesToolBuilder) handleListBuiltInSources(_ context.Context, admin cmdutils.Client) (*sdk.CallToolResult, error) { sources, err := admin.Sources().GetBuiltInSources() if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to list built-in sources: %v. There might be an issue connecting to the Pulsar cluster.", err)), nil + return nil, fmt.Errorf("failed to list built-in sources: %v. there might be an issue connecting to the Pulsar cluster", err) } // Convert result to JSON string sourcesJSON, err := json.Marshal(sources) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to serialize built-in sources: %v", err)), nil + return nil, fmt.Errorf("failed to serialize built-in sources: %v", err) } - return mcp.NewToolResultText(string(sourcesJSON)), nil + return textResult(string(sourcesJSON)), nil } // processSourceArguments is a simplified version of the pulsarctl function to process source arguments @@ -706,3 +654,34 @@ func (b *PulsarAdminSourcesToolBuilder) isPackageURLSupported(archive string) bo // Also check if it's a local file path (not a URL) return !strings.Contains(archive, "://") } + +func buildPulsarAdminSourcesInputSchema() (*jsonschema.Schema, error) { + schema, err := jsonschema.For[pulsarAdminSourcesInput](nil) + if err != nil { + return nil, fmt.Errorf("input schema: %w", err) + } + if schema.Type != "object" { + return nil, fmt.Errorf("input schema must have type \"object\"") + } + + if schema.Properties == nil { + schema.Properties = map[string]*jsonschema.Schema{} + } + + setSchemaDescription(schema, "operation", pulsarAdminSourcesOperationDesc) + setSchemaDescription(schema, "tenant", pulsarAdminSourcesTenantDesc) + setSchemaDescription(schema, "namespace", pulsarAdminSourcesNamespaceDesc) + setSchemaDescription(schema, "name", pulsarAdminSourcesNameDesc) + setSchemaDescription(schema, "archive", pulsarAdminSourcesArchiveDesc) + setSchemaDescription(schema, "source-type", pulsarAdminSourcesSourceTypeDesc) + setSchemaDescription(schema, "destination-topic-name", pulsarAdminSourcesDestinationTopicDesc) + setSchemaDescription(schema, "deserialization-classname", pulsarAdminSourcesDeserializationClassDesc) + setSchemaDescription(schema, "schema-type", pulsarAdminSourcesSchemaTypeDesc) + setSchemaDescription(schema, "classname", pulsarAdminSourcesClassNameDesc) + setSchemaDescription(schema, "processing-guarantees", pulsarAdminSourcesProcessingGuaranteesDesc) + setSchemaDescription(schema, "parallelism", pulsarAdminSourcesParallelismDesc) + setSchemaDescription(schema, "source-config", pulsarAdminSourcesConfigDesc) + + normalizeAdditionalProperties(schema) + return schema, nil +} diff --git a/pkg/mcp/builders/pulsar/sources_legacy.go b/pkg/mcp/builders/pulsar/sources_legacy.go new file mode 100644 index 0000000..68bdc53 --- /dev/null +++ b/pkg/mcp/builders/pulsar/sources_legacy.go @@ -0,0 +1,123 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" +) + +// PulsarAdminSourcesLegacyToolBuilder implements the legacy ToolBuilder interface for Pulsar admin sources. +// /nolint:revive +type PulsarAdminSourcesLegacyToolBuilder struct { + *builders.BaseToolBuilder +} + +// NewPulsarAdminSourcesLegacyToolBuilder creates a new Pulsar admin sources legacy tool builder instance. +func NewPulsarAdminSourcesLegacyToolBuilder() *PulsarAdminSourcesLegacyToolBuilder { + metadata := builders.ToolMetadata{ + Name: "pulsar_admin_sources", + Version: "1.0.0", + Description: "Pulsar admin sources management tools", + Category: "pulsar_admin", + Tags: []string{"pulsar", "admin", "sources"}, + } + + features := []string{ + "pulsar-admin-sources", + "all", + "all-pulsar", + "pulsar-admin", + } + + return &PulsarAdminSourcesLegacyToolBuilder{ + BaseToolBuilder: builders.NewBaseToolBuilder(metadata, features), + } +} + +// BuildTools builds the Pulsar admin sources legacy tool list. +func (b *PulsarAdminSourcesLegacyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { + if !b.HasAnyRequiredFeature(config.Features) { + return nil, nil + } + + if err := b.Validate(config); err != nil { + return nil, err + } + + tool, err := b.buildSourcesTool() + if err != nil { + return nil, err + } + handler := b.buildSourcesHandler(config.ReadOnly) + + return []server.ServerTool{ + { + Tool: tool, + Handler: handler, + }, + }, nil +} + +func (b *PulsarAdminSourcesLegacyToolBuilder) buildSourcesTool() (mcp.Tool, error) { + inputSchema, err := buildPulsarAdminSourcesInputSchema() + if err != nil { + return mcp.Tool{}, err + } + + schemaJSON, err := json.Marshal(inputSchema) + if err != nil { + return mcp.Tool{}, fmt.Errorf("marshal input schema: %w", err) + } + + toolDesc := "Manage Apache Pulsar Sources for data ingestion and integration. " + + "Pulsar Sources are connectors that import data from external systems into Pulsar topics. " + + "Sources connect to external systems such as databases, messaging platforms, storage services, " + + "and real-time data streams to pull data and publish it to Pulsar topics. " + + "Built-in source connectors are available for common systems like Kafka, JDBC, AWS services, and more. " + + "Sources follow the tenant/namespace/name hierarchy for organization and access control, " + + "can scale through parallelism configuration, and support various processing guarantees. " + + "This tool provides complete lifecycle management including deployment, configuration, " + + "monitoring, and runtime control. Sources use schema types to ensure data compatibility." + + return mcp.Tool{ + Name: "pulsar_admin_sources", + Description: toolDesc, + RawInputSchema: schemaJSON, + }, nil +} + +func (b *PulsarAdminSourcesLegacyToolBuilder) buildSourcesHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { + sdkBuilder := NewPulsarAdminSourcesToolBuilder() + sdkHandler := sdkBuilder.buildSourcesHandler(readOnly) + + return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + var input pulsarAdminSourcesInput + if err := request.BindArguments(&input); err != nil { + return mcp.NewToolResultError(fmt.Sprintf("failed to parse arguments: %v", err)), nil + } + + result, _, err := sdkHandler(ctx, nil, input) + if err != nil { + return mcp.NewToolResultError(err.Error()), nil + } + return legacyToolResultFromSDK(result), nil + } +} diff --git a/pkg/mcp/builders/pulsar/subscription.go b/pkg/mcp/builders/pulsar/subscription.go index f1a1882..af86375 100644 --- a/pkg/mcp/builders/pulsar/subscription.go +++ b/pkg/mcp/builders/pulsar/subscription.go @@ -21,13 +21,54 @@ import ( "strings" "github.com/apache/pulsar-client-go/pulsaradmin/pkg/utils" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + "github.com/google/jsonschema-go/jsonschema" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/pulsarctl/pkg/cmdutils" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" ) +type pulsarAdminSubscriptionInput struct { + Resource string `json:"resource"` + Operation string `json:"operation"` + Topic string `json:"topic"` + Subscription *string `json:"subscription,omitempty"` + MessageID *string `json:"messageId,omitempty"` + Count *float64 `json:"count,omitempty"` + ExpireTimeInSeconds *float64 `json:"expireTimeInSeconds,omitempty"` + Force *bool `json:"force,omitempty"` +} + +const ( + pulsarAdminSubscriptionResourceDesc = "Resource to operate on. Available resources:\n" + + "- subscription: A subscription on a topic representing a consumer group" + pulsarAdminSubscriptionOperationDesc = "Operation to perform. Available operations:\n" + + "- list: List all subscriptions for a topic\n" + + "- create: Create a new subscription on a topic\n" + + "- delete: Delete a subscription from a topic\n" + + "- skip: Skip a specified number of messages for a subscription\n" + + "- expire: Expire messages older than specified time for a subscription\n" + + "- reset-cursor: Reset the cursor position for a subscription to a specific message ID" + pulsarAdminSubscriptionTopicDesc = "The fully qualified topic name in the format 'persistent://tenant/namespace/topic'. " + + "For partitioned topics, you can either specify the base topic name (to apply the operation across all partitions) " + + "or a specific partition in the format 'topicName-partition-N'." + pulsarAdminSubscriptionNameDesc = "The subscription name. Required for all operations except 'list'. " + + "A subscription name is a logical identifier for a durable position in a topic. " + + "Multiple consumers can attach to the same subscription to implement different messaging patterns." + pulsarAdminSubscriptionMessageIDDesc = "Message ID for positioning the subscription cursor. Used in 'create' and 'reset-cursor' operations. " + + "Values can be:\n" + + "- 'latest': Position at the latest (most recent) message\n" + + "- 'earliest': Position at the earliest (oldest available) message\n" + + "- specific position in 'ledgerId:entryId' format for precise positioning" + pulsarAdminSubscriptionCountDesc = "The number of messages to skip (required for 'skip' operation). " + + "This moves the subscription cursor forward by the specified number of messages without processing them." + pulsarAdminSubscriptionExpireDesc = "Expire messages older than the specified seconds (required for 'expire' operation). " + + "This moves the subscription cursor to skip all messages published before the specified time." + pulsarAdminSubscriptionForceDesc = "Force deletion of subscription (optional for 'delete' operation). " + + "When true, all consumers will be forcefully disconnected and the subscription will be deleted. " + + "Use with caution as it can interrupt active message processing." +) + // PulsarAdminSubscriptionToolBuilder implements the ToolBuilder interface for Pulsar Admin Subscription tools // It provides functionality to build Pulsar subscription management tools // /nolint:revive @@ -59,7 +100,7 @@ func NewPulsarAdminSubscriptionToolBuilder() *PulsarAdminSubscriptionToolBuilder // BuildTools builds the Pulsar Admin Subscription tool list // This is the core method implementing the ToolBuilder interface -func (b *PulsarAdminSubscriptionToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { +func (b *PulsarAdminSubscriptionToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]builders.ToolDefinition, error) { // Check features - return empty list if no required features are present if !b.HasAnyRequiredFeature(config.Features) { return nil, nil @@ -71,11 +112,14 @@ func (b *PulsarAdminSubscriptionToolBuilder) BuildTools(_ context.Context, confi } // Build tools - tool := b.buildSubscriptionTool() + tool, err := b.buildSubscriptionTool() + if err != nil { + return nil, err + } handler := b.buildSubscriptionHandler(config.ReadOnly) - return []server.ServerTool{ - { + return []builders.ToolDefinition{ + builders.ServerTool[pulsarAdminSubscriptionInput, any]{ Tool: tool, Handler: handler, }, @@ -84,7 +128,12 @@ func (b *PulsarAdminSubscriptionToolBuilder) BuildTools(_ context.Context, confi // buildSubscriptionTool builds the Pulsar Admin Subscription MCP tool definition // Migrated from the original tool definition logic -func (b *PulsarAdminSubscriptionToolBuilder) buildSubscriptionTool() mcp.Tool { +func (b *PulsarAdminSubscriptionToolBuilder) buildSubscriptionTool() (*sdk.Tool, error) { + inputSchema, err := buildPulsarAdminSubscriptionInputSchema() + if err != nil { + return nil, err + } + toolDesc := "Manage Apache Pulsar subscriptions on topics. " + "Subscriptions are named entities representing consumer groups that maintain their position in a topic. " + "Pulsar supports multiple subscription modes (Exclusive, Shared, Failover, Key_Shared) to accommodate different messaging patterns. " + @@ -93,76 +142,30 @@ func (b *PulsarAdminSubscriptionToolBuilder) buildSubscriptionTool() mcp.Tool { "Operations include listing, creating, deleting, and manipulating message cursors within subscriptions. " + "Most operations require namespace admin permissions plus produce/consume permissions on the topic." - resourceDesc := "Resource to operate on. Available resources:\n" + - "- subscription: A subscription on a topic representing a consumer group" - - operationDesc := "Operation to perform. Available operations:\n" + - "- list: List all subscriptions for a topic\n" + - "- create: Create a new subscription on a topic\n" + - "- delete: Delete a subscription from a topic\n" + - "- skip: Skip a specified number of messages for a subscription\n" + - "- expire: Expire messages older than specified time for a subscription\n" + - "- reset-cursor: Reset the cursor position for a subscription to a specific message ID" - - return mcp.NewTool("pulsar_admin_subscription", - mcp.WithDescription(toolDesc), - mcp.WithString("resource", mcp.Required(), - mcp.Description(resourceDesc), - ), - mcp.WithString("operation", mcp.Required(), - mcp.Description(operationDesc), - ), - mcp.WithString("topic", mcp.Required(), - mcp.Description("The fully qualified topic name in the format 'persistent://tenant/namespace/topic'. "+ - "For partitioned topics, you can either specify the base topic name (to apply the operation across all partitions) "+ - "or a specific partition in the format 'topicName-partition-N'."), - ), - mcp.WithString("subscription", - mcp.Description("The subscription name. Required for all operations except 'list'. "+ - "A subscription name is a logical identifier for a durable position in a topic. "+ - "Multiple consumers can attach to the same subscription to implement different messaging patterns."), - ), - mcp.WithString("messageId", - mcp.Description("Message ID for positioning the subscription cursor. Used in 'create' and 'reset-cursor' operations. "+ - "Values can be:\n"+ - "- 'latest': Position at the latest (most recent) message\n"+ - "- 'earliest': Position at the earliest (oldest available) message\n"+ - "- specific position in 'ledgerId:entryId' format for precise positioning"), - ), - mcp.WithNumber("count", - mcp.Description("The number of messages to skip (required for 'skip' operation). "+ - "This moves the subscription cursor forward by the specified number of messages without processing them."), - ), - mcp.WithNumber("expireTimeInSeconds", - mcp.Description("Expire messages older than the specified seconds (required for 'expire' operation). "+ - "This moves the subscription cursor to skip all messages published before the specified time."), - ), - mcp.WithBoolean("force", - mcp.Description("Force deletion of subscription (optional for 'delete' operation). "+ - "When true, all consumers will be forcefully disconnected and the subscription will be deleted. "+ - "Use with caution as it can interrupt active message processing."), - ), - ) + return &sdk.Tool{ + Name: "pulsar_admin_subscription", + Description: toolDesc, + InputSchema: inputSchema, + }, nil } // buildSubscriptionHandler builds the Pulsar Admin Subscription handler function // Migrated from the original handler logic -func (b *PulsarAdminSubscriptionToolBuilder) buildSubscriptionHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - // Get required parameters - resource, err := request.RequireString("resource") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get resource: %v", err)), nil +func (b *PulsarAdminSubscriptionToolBuilder) buildSubscriptionHandler(readOnly bool) builders.ToolHandlerFunc[pulsarAdminSubscriptionInput, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input pulsarAdminSubscriptionInput) (*sdk.CallToolResult, any, error) { + resource := input.Resource + if resource == "" { + return nil, nil, fmt.Errorf("missing required parameter 'resource'") } - operation, err := request.RequireString("operation") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get operation: %v", err)), nil + operation := input.Operation + if operation == "" { + return nil, nil, fmt.Errorf("missing required parameter 'operation'") } - topic, err := request.RequireString("topic") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic'. Please provide the fully qualified topic name: %v", err)), nil + topic := input.Topic + if topic == "" { + return nil, nil, fmt.Errorf("missing required parameter 'topic'. please provide the fully qualified topic name") } // Normalize parameters @@ -171,48 +174,54 @@ func (b *PulsarAdminSubscriptionToolBuilder) buildSubscriptionHandler(readOnly b // Validate write operations in read-only mode if readOnly && (operation != "list") { - return mcp.NewToolResultError("Write operations are not allowed in read-only mode"), nil + return nil, nil, fmt.Errorf("write operations are not allowed in read-only mode") } // Verify resource type if resource != "subscription" { - return mcp.NewToolResultError(fmt.Sprintf("Invalid resource: %s. Only 'subscription' is supported", resource)), nil + return nil, nil, fmt.Errorf("invalid resource: %s. only 'subscription' is supported", resource) } // Parse topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + return nil, nil, fmt.Errorf("invalid topic name '%s': %v", topic, err) } // Get Pulsar session from context session := mcpCtx.GetPulsarSession(ctx) if session == nil { - return mcp.NewToolResultError("Pulsar session not found in context"), nil + return nil, nil, fmt.Errorf("pulsar session not found in context") } // Create the admin client admin, err := session.GetAdminClient() if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get admin client: %v", err)), nil + return nil, nil, fmt.Errorf("failed to get admin client: %v", err) } // Dispatch based on operation switch operation { case "list": - return b.handleSubsList(admin, topicName) + result, err := b.handleSubsList(admin, topicName) + return result, nil, err case "create": - return b.handleSubsCreate(admin, topicName, request) + result, err := b.handleSubsCreate(admin, topicName, input) + return result, nil, err case "delete": - return b.handleSubsDelete(admin, topicName, request) + result, err := b.handleSubsDelete(admin, topicName, input) + return result, nil, err case "skip": - return b.handleSubsSkip(admin, topicName, request) + result, err := b.handleSubsSkip(admin, topicName, input) + return result, nil, err case "expire": - return b.handleSubsExpire(admin, topicName, request) + result, err := b.handleSubsExpire(admin, topicName, input) + return result, nil, err case "reset-cursor": - return b.handleSubsResetCursor(admin, topicName, request) + result, err := b.handleSubsResetCursor(admin, topicName, input) + return result, nil, err default: - return mcp.NewToolResultError(fmt.Sprintf("Unknown operation: %s", operation)), nil + return nil, nil, fmt.Errorf("unknown operation: %s", operation) } } } @@ -220,43 +229,47 @@ func (b *PulsarAdminSubscriptionToolBuilder) buildSubscriptionHandler(readOnly b // Unified error handling and utility functions // handleError provides unified error handling -func (b *PulsarAdminSubscriptionToolBuilder) handleError(operation string, err error) *mcp.CallToolResult { - return mcp.NewToolResultError(fmt.Sprintf("Failed to %s: %v", operation, err)) +func (b *PulsarAdminSubscriptionToolBuilder) handleError(operation string, err error) error { + return fmt.Errorf("failed to %s: %v", operation, err) } // marshalResponse provides unified JSON serialization for responses -func (b *PulsarAdminSubscriptionToolBuilder) marshalResponse(data interface{}) (*mcp.CallToolResult, error) { +func (b *PulsarAdminSubscriptionToolBuilder) marshalResponse(data interface{}) (*sdk.CallToolResult, error) { jsonBytes, err := json.Marshal(data) if err != nil { - return b.handleError("marshal response", err), nil + return nil, b.handleError("marshal response", err) } - return mcp.NewToolResultText(string(jsonBytes)), nil + return &sdk.CallToolResult{ + Content: []sdk.Content{&sdk.TextContent{Text: string(jsonBytes)}}, + }, nil } // Operation handler functions - migrated from the original implementation // handleSubsList handles listing all subscriptions for a topic -func (b *PulsarAdminSubscriptionToolBuilder) handleSubsList(admin cmdutils.Client, topicName *utils.TopicName) (*mcp.CallToolResult, error) { +func (b *PulsarAdminSubscriptionToolBuilder) handleSubsList(admin cmdutils.Client, topicName *utils.TopicName) (*sdk.CallToolResult, error) { // List subscriptions subscriptions, err := admin.Subscriptions().List(*topicName) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to list subscriptions for topic '%s': %v", - topicName.String(), err)), nil + return nil, fmt.Errorf("failed to list subscriptions for topic '%s': %v", topicName.String(), err) } return b.marshalResponse(subscriptions) } // handleSubsCreate handles creating a new subscription -func (b *PulsarAdminSubscriptionToolBuilder) handleSubsCreate(admin cmdutils.Client, topicName *utils.TopicName, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminSubscriptionToolBuilder) handleSubsCreate(admin cmdutils.Client, topicName *utils.TopicName, input pulsarAdminSubscriptionInput) (*sdk.CallToolResult, error) { // Get required parameter - subscription, err := request.RequireString("subscription") + subscription, err := requireString(input.Subscription, "subscription") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'subscription' for subscription.create: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'subscription' for subscription.create: %v", err) } // Get optional messageID parameter (default is "latest") - messageID := request.GetString("messageId", "latest") + messageID := stringValue(input.MessageID) + if messageID == "" { + messageID = "latest" + } // Parse messageId var messageIDObj utils.MessageID @@ -268,12 +281,11 @@ func (b *PulsarAdminSubscriptionToolBuilder) handleSubsCreate(admin cmdutils.Cli default: s := strings.Split(messageID, ":") if len(s) != 2 { - return mcp.NewToolResultError(fmt.Sprintf( - "Invalid messageId format: %s. Use 'latest', 'earliest', or 'ledgerId:entryId' format", messageID)), nil + return nil, fmt.Errorf("invalid messageId format: %s. use 'latest', 'earliest', or 'ledgerId:entryId' format", messageID) } msgID, err := utils.ParseMessageID(messageID) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to parse messageId '%s': %v", messageID, err)), nil + return nil, fmt.Errorf("failed to parse messageId '%s': %v", messageID, err) } messageIDObj = *msgID } @@ -281,37 +293,33 @@ func (b *PulsarAdminSubscriptionToolBuilder) handleSubsCreate(admin cmdutils.Cli // Create subscription err = admin.Subscriptions().Create(*topicName, subscription, messageIDObj) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to create subscription '%s' on topic '%s': %v", - subscription, topicName.String(), err)), nil + return nil, fmt.Errorf("failed to create subscription '%s' on topic '%s': %v", subscription, topicName.String(), err) } - return mcp.NewToolResultText(fmt.Sprintf("Created subscription '%s' on topic '%s' from position '%s' successfully", - subscription, topicName.String(), messageID)), nil + return textResult(fmt.Sprintf("Created subscription '%s' on topic '%s' from position '%s' successfully", subscription, topicName.String(), messageID)), nil } // handleSubsDelete handles deleting a subscription -func (b *PulsarAdminSubscriptionToolBuilder) handleSubsDelete(admin cmdutils.Client, topicName *utils.TopicName, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminSubscriptionToolBuilder) handleSubsDelete(admin cmdutils.Client, topicName *utils.TopicName, input pulsarAdminSubscriptionInput) (*sdk.CallToolResult, error) { // Get required parameter - subscription, err := request.RequireString("subscription") + subscription, err := requireString(input.Subscription, "subscription") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'subscription' for subscription.delete: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'subscription' for subscription.delete: %v", err) } // Get optional force parameter (default is false) - force := request.GetBool("force", false) + force := input.Force != nil && *input.Force // Delete subscription if force { err = admin.Subscriptions().ForceDelete(*topicName, subscription) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to forcefully delete subscription '%s' from topic '%s': %v", - subscription, topicName.String(), err)), nil + return nil, fmt.Errorf("failed to forcefully delete subscription '%s' from topic '%s': %v", subscription, topicName.String(), err) } } else { err = admin.Subscriptions().Delete(*topicName, subscription) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to delete subscription '%s' from topic '%s': %v", - subscription, topicName.String(), err)), nil + return nil, fmt.Errorf("failed to delete subscription '%s' from topic '%s': %v", subscription, topicName.String(), err) } } @@ -319,71 +327,68 @@ func (b *PulsarAdminSubscriptionToolBuilder) handleSubsDelete(admin cmdutils.Cli if force { forceStr = " forcefully" } - return mcp.NewToolResultText(fmt.Sprintf("Deleted subscription '%s' from topic '%s'%s successfully", - subscription, topicName.String(), forceStr)), nil + return textResult(fmt.Sprintf("Deleted subscription '%s' from topic '%s'%s successfully", subscription, topicName.String(), forceStr)), nil } // handleSubsSkip handles skipping messages for a subscription -func (b *PulsarAdminSubscriptionToolBuilder) handleSubsSkip(admin cmdutils.Client, topicName *utils.TopicName, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminSubscriptionToolBuilder) handleSubsSkip(admin cmdutils.Client, topicName *utils.TopicName, input pulsarAdminSubscriptionInput) (*sdk.CallToolResult, error) { // Get required parameters - subscription, err := request.RequireString("subscription") + subscription, err := requireString(input.Subscription, "subscription") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'subscription' for subscription.skip: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'subscription' for subscription.skip: %v", err) } - count, err := request.RequireFloat("count") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'count' for subscription.skip: %v", err)), nil + if input.Count == nil { + return nil, fmt.Errorf("missing required parameter 'count' for subscription.skip") } + count := *input.Count + // Skip messages err = admin.Subscriptions().SkipMessages(*topicName, subscription, int64(count)) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to skip messages for subscription '%s' on topic '%s': %v", - subscription, topicName.String(), err)), nil + return nil, fmt.Errorf("failed to skip messages for subscription '%s' on topic '%s': %v", subscription, topicName.String(), err) } - return mcp.NewToolResultText(fmt.Sprintf("Skipped %d messages for subscription '%s' on topic '%s' successfully", - int(count), subscription, topicName.String())), nil + return textResult(fmt.Sprintf("Skipped %d messages for subscription '%s' on topic '%s' successfully", int(count), subscription, topicName.String())), nil } // handleSubsExpire handles expiring messages for a subscription -func (b *PulsarAdminSubscriptionToolBuilder) handleSubsExpire(admin cmdutils.Client, topicName *utils.TopicName, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminSubscriptionToolBuilder) handleSubsExpire(admin cmdutils.Client, topicName *utils.TopicName, input pulsarAdminSubscriptionInput) (*sdk.CallToolResult, error) { // Get required parameters - subscription, err := request.RequireString("subscription") + subscription, err := requireString(input.Subscription, "subscription") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'subscription' for subscription.expire: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'subscription' for subscription.expire: %v", err) } - expireTime, err := request.RequireFloat("expireTimeInSeconds") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'expireTimeInSeconds' for subscription.expire: %v", err)), nil + if input.ExpireTimeInSeconds == nil { + return nil, fmt.Errorf("missing required parameter 'expireTimeInSeconds' for subscription.expire") } + expireTime := *input.ExpireTimeInSeconds + // Expire messages err = admin.Subscriptions().ExpireMessages(*topicName, subscription, int64(expireTime)) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to expire messages for subscription '%s' on topic '%s': %v", - subscription, topicName.String(), err)), nil + return nil, fmt.Errorf("failed to expire messages for subscription '%s' on topic '%s': %v", subscription, topicName.String(), err) } - return mcp.NewToolResultText( - fmt.Sprintf("Expired messages older than %d seconds for subscription '%s' on topic '%s' successfully", - int(expireTime), subscription, topicName.String()), + return textResult( + fmt.Sprintf("Expired messages older than %d seconds for subscription '%s' on topic '%s' successfully", int(expireTime), subscription, topicName.String()), ), nil } // handleSubsResetCursor handles resetting a subscription cursor -func (b *PulsarAdminSubscriptionToolBuilder) handleSubsResetCursor(admin cmdutils.Client, topicName *utils.TopicName, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminSubscriptionToolBuilder) handleSubsResetCursor(admin cmdutils.Client, topicName *utils.TopicName, input pulsarAdminSubscriptionInput) (*sdk.CallToolResult, error) { // Get required parameters - subscription, err := request.RequireString("subscription") + subscription, err := requireString(input.Subscription, "subscription") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'subscription' for subscription.reset-cursor: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'subscription' for subscription.reset-cursor: %v", err) } - messageID, err := request.RequireString("messageId") + messageID, err := requireString(input.MessageID, "messageId") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'messageId' for subscription.reset-cursor: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'messageId' for subscription.reset-cursor: %v", err) } // Parse messageId @@ -396,12 +401,11 @@ func (b *PulsarAdminSubscriptionToolBuilder) handleSubsResetCursor(admin cmdutil default: s := strings.Split(messageID, ":") if len(s) != 2 { - return mcp.NewToolResultError(fmt.Sprintf( - "Invalid messageId format: %s. Use 'latest', 'earliest', or 'ledgerId:entryId' format", messageID)), nil + return nil, fmt.Errorf("invalid messageId format: %s. use 'latest', 'earliest', or 'ledgerId:entryId' format", messageID) } msgID, err := utils.ParseMessageID(messageID) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to parse messageId '%s': %v", messageID, err)), nil + return nil, fmt.Errorf("failed to parse messageId '%s': %v", messageID, err) } messageIDObj = *msgID } @@ -409,12 +413,34 @@ func (b *PulsarAdminSubscriptionToolBuilder) handleSubsResetCursor(admin cmdutil // Reset cursor err = admin.Subscriptions().ResetCursorToMessageID(*topicName, subscription, messageIDObj) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to reset cursor for subscription '%s' on topic '%s': %v", - subscription, topicName.String(), err)), nil + return nil, fmt.Errorf("failed to reset cursor for subscription '%s' on topic '%s': %v", subscription, topicName.String(), err) } - return mcp.NewToolResultText( - fmt.Sprintf("Reset cursor for subscription '%s' on topic '%s' to position '%s' successfully", - subscription, topicName.String(), messageID), - ), nil + return textResult(fmt.Sprintf("Reset cursor for subscription '%s' on topic '%s' to position '%s' successfully", subscription, topicName.String(), messageID)), nil +} + +func buildPulsarAdminSubscriptionInputSchema() (*jsonschema.Schema, error) { + schema, err := jsonschema.For[pulsarAdminSubscriptionInput](nil) + if err != nil { + return nil, fmt.Errorf("input schema: %w", err) + } + if schema.Type != "object" { + return nil, fmt.Errorf("input schema must have type \"object\"") + } + + if schema.Properties == nil { + schema.Properties = map[string]*jsonschema.Schema{} + } + + setSchemaDescription(schema, "resource", pulsarAdminSubscriptionResourceDesc) + setSchemaDescription(schema, "operation", pulsarAdminSubscriptionOperationDesc) + setSchemaDescription(schema, "topic", pulsarAdminSubscriptionTopicDesc) + setSchemaDescription(schema, "subscription", pulsarAdminSubscriptionNameDesc) + setSchemaDescription(schema, "messageId", pulsarAdminSubscriptionMessageIDDesc) + setSchemaDescription(schema, "count", pulsarAdminSubscriptionCountDesc) + setSchemaDescription(schema, "expireTimeInSeconds", pulsarAdminSubscriptionExpireDesc) + setSchemaDescription(schema, "force", pulsarAdminSubscriptionForceDesc) + + normalizeAdditionalProperties(schema) + return schema, nil } diff --git a/pkg/mcp/builders/pulsar/subscription_legacy.go b/pkg/mcp/builders/pulsar/subscription_legacy.go new file mode 100644 index 0000000..4b4e185 --- /dev/null +++ b/pkg/mcp/builders/pulsar/subscription_legacy.go @@ -0,0 +1,121 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" +) + +// PulsarAdminSubscriptionLegacyToolBuilder implements the legacy ToolBuilder interface for Pulsar admin subscriptions. +// /nolint:revive +type PulsarAdminSubscriptionLegacyToolBuilder struct { + *builders.BaseToolBuilder +} + +// NewPulsarAdminSubscriptionLegacyToolBuilder creates a new Pulsar admin subscription legacy tool builder instance. +func NewPulsarAdminSubscriptionLegacyToolBuilder() *PulsarAdminSubscriptionLegacyToolBuilder { + metadata := builders.ToolMetadata{ + Name: "pulsar_admin_subscription", + Version: "1.0.0", + Description: "Pulsar Admin subscription management tools", + Category: "pulsar_admin", + Tags: []string{"pulsar", "subscription", "admin"}, + } + + features := []string{ + "pulsar-admin-subscriptions", + "all", + "all-pulsar", + "pulsar-admin", + } + + return &PulsarAdminSubscriptionLegacyToolBuilder{ + BaseToolBuilder: builders.NewBaseToolBuilder(metadata, features), + } +} + +// BuildTools builds the Pulsar admin subscription legacy tool list. +func (b *PulsarAdminSubscriptionLegacyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { + if !b.HasAnyRequiredFeature(config.Features) { + return nil, nil + } + + if err := b.Validate(config); err != nil { + return nil, err + } + + tool, err := b.buildSubscriptionTool() + if err != nil { + return nil, err + } + handler := b.buildSubscriptionHandler(config.ReadOnly) + + return []server.ServerTool{ + { + Tool: tool, + Handler: handler, + }, + }, nil +} + +func (b *PulsarAdminSubscriptionLegacyToolBuilder) buildSubscriptionTool() (mcp.Tool, error) { + inputSchema, err := buildPulsarAdminSubscriptionInputSchema() + if err != nil { + return mcp.Tool{}, err + } + + schemaJSON, err := json.Marshal(inputSchema) + if err != nil { + return mcp.Tool{}, fmt.Errorf("marshal input schema: %w", err) + } + + toolDesc := "Manage Apache Pulsar subscriptions on topics. " + + "Subscriptions are named entities representing consumer groups that maintain their position in a topic. " + + "Pulsar supports multiple subscription modes (Exclusive, Shared, Failover, Key_Shared) to accommodate different messaging patterns. " + + "Each subscription tracks message acknowledgments independently, allowing multiple consumers to process messages at their own pace. " + + "Subscriptions persist even when all consumers disconnect, maintaining state and preventing message loss. " + + "Operations include listing, creating, deleting, and manipulating message cursors within subscriptions. " + + "Most operations require namespace admin permissions plus produce/consume permissions on the topic." + + return mcp.Tool{ + Name: "pulsar_admin_subscription", + Description: toolDesc, + RawInputSchema: schemaJSON, + }, nil +} + +func (b *PulsarAdminSubscriptionLegacyToolBuilder) buildSubscriptionHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { + sdkBuilder := NewPulsarAdminSubscriptionToolBuilder() + sdkHandler := sdkBuilder.buildSubscriptionHandler(readOnly) + + return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + var input pulsarAdminSubscriptionInput + if err := request.BindArguments(&input); err != nil { + return mcp.NewToolResultError(fmt.Sprintf("failed to parse arguments: %v", err)), nil + } + + result, _, err := sdkHandler(ctx, nil, input) + if err != nil { + return mcp.NewToolResultError(err.Error()), nil + } + return legacyToolResultFromSDK(result), nil + } +} diff --git a/pkg/mcp/builders/pulsar/tenant.go b/pkg/mcp/builders/pulsar/tenant.go index 8a0d457..a46f719 100644 --- a/pkg/mcp/builders/pulsar/tenant.go +++ b/pkg/mcp/builders/pulsar/tenant.go @@ -21,13 +21,45 @@ import ( "strings" "github.com/apache/pulsar-client-go/pulsaradmin/pkg/utils" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + "github.com/google/jsonschema-go/jsonschema" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/pulsarctl/pkg/cmdutils" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" ) +type pulsarAdminTenantInput struct { + Resource string `json:"resource"` + Operation string `json:"operation"` + Tenant *string `json:"tenant,omitempty"` + AdminRoles []string `json:"adminRoles,omitempty"` + AllowedClusters []string `json:"allowedClusters,omitempty"` +} + +const ( + pulsarAdminTenantResourceDesc = "Resource to operate on. Available resources:\n" + + "- tenant: A tenant in the Pulsar instance" + pulsarAdminTenantOperationDesc = "Operation to perform. Available operations:\n" + + "- list: List all tenants in the Pulsar instance\n" + + "- get: Get configuration details for a specific tenant\n" + + "- create: Create a new tenant with specified configuration\n" + + "- update: Update configuration for an existing tenant\n" + + "- delete: Delete an existing tenant (must not have any active namespaces)" + pulsarAdminTenantNameDesc = "The tenant name to operate on. Required for all operations except 'list'. " + + "Tenant names are unique identifiers and form the root of the topic naming hierarchy. " + + "A valid tenant name must be comprised of alphanumeric characters and/or the following special characters: " + + "'-', '_', '.', ':'. Ensure the tenant name follows your organization's naming conventions." + pulsarAdminTenantAdminRolesDesc = "List of auth principals (users or roles) allowed to administrate the tenant. " + + "Required for 'create' and 'update' operations. These roles can create, update, or delete any " + + "namespaces within the tenant, and can manage topic configurations. " + + "Format: array of role strings, e.g., ['admin1', 'orgAdmin']. " + + "Use empty array [] to remove all admin roles." + pulsarAdminTenantAllowedClustersDesc = "List of clusters that this tenant can access. Required for 'create' and 'update' operations. " + + "Restricts the tenant to only use specified clusters, enabling geographic or infrastructure isolation. " + + "Format: array of cluster names, e.g., ['us-west', 'us-east']. " + + "An empty list means no clusters are accessible to this tenant." +) + // PulsarAdminTenantToolBuilder implements the ToolBuilder interface for Pulsar Admin Tenant tools // It provides functionality to build Pulsar tenant management tools // /nolint:revive @@ -59,7 +91,7 @@ func NewPulsarAdminTenantToolBuilder() *PulsarAdminTenantToolBuilder { // BuildTools builds the Pulsar Admin Tenant tool list // This is the core method implementing the ToolBuilder interface -func (b *PulsarAdminTenantToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { +func (b *PulsarAdminTenantToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]builders.ToolDefinition, error) { // Check features - return empty list if no required features are present if !b.HasAnyRequiredFeature(config.Features) { return nil, nil @@ -71,11 +103,14 @@ func (b *PulsarAdminTenantToolBuilder) BuildTools(_ context.Context, config buil } // Build tools - tool := b.buildTenantTool() + tool, err := b.buildTenantTool() + if err != nil { + return nil, err + } handler := b.buildTenantHandler(config.ReadOnly) - return []server.ServerTool{ - { + return []builders.ToolDefinition{ + builders.ServerTool[pulsarAdminTenantInput, any]{ Tool: tool, Handler: handler, }, @@ -84,7 +119,12 @@ func (b *PulsarAdminTenantToolBuilder) BuildTools(_ context.Context, config buil // buildTenantTool builds the Pulsar Admin Tenant MCP tool definition // Migrated from the original tool definition logic -func (b *PulsarAdminTenantToolBuilder) buildTenantTool() mcp.Tool { +func (b *PulsarAdminTenantToolBuilder) buildTenantTool() (*sdk.Tool, error) { + inputSchema, err := buildPulsarAdminTenantInputSchema() + if err != nil { + return nil, err + } + toolDesc := "Manage Apache Pulsar tenants. " + "Tenants are the highest level administrative unit in Pulsar's multi-tenancy hierarchy. " + "Each tenant can contain multiple namespaces, allowing for logical isolation of applications. " + @@ -94,113 +134,62 @@ func (b *PulsarAdminTenantToolBuilder) buildTenantTool() mcp.Tool { "appropriate access controls, and effective resource sharing. " + "All tenant operations require super-user permissions." - resourceDesc := "Resource to operate on. Available resources:\n" + - "- tenant: A tenant in the Pulsar instance" - - operationDesc := "Operation to perform. Available operations:\n" + - "- list: List all tenants in the Pulsar instance\n" + - "- get: Get configuration details for a specific tenant\n" + - "- create: Create a new tenant with specified configuration\n" + - "- update: Update configuration for an existing tenant\n" + - "- delete: Delete an existing tenant (must not have any active namespaces)" - - return mcp.NewTool("pulsar_admin_tenant", - mcp.WithDescription(toolDesc), - mcp.WithString("resource", mcp.Required(), - mcp.Description(resourceDesc), - ), - mcp.WithString("operation", mcp.Required(), - mcp.Description(operationDesc), - ), - mcp.WithString("tenant", - mcp.Description("The tenant name to operate on. Required for all operations except 'list'. "+ - "Tenant names are unique identifiers and form the root of the topic naming hierarchy. "+ - "A valid tenant name must be comprised of alphanumeric characters and/or the following special characters: "+ - "'-', '_', '.', ':'. Ensure the tenant name follows your organization's naming conventions."), - ), - mcp.WithArray("adminRoles", - mcp.Description("List of auth principals (users or roles) allowed to administrate the tenant. "+ - "Required for 'create' and 'update' operations. These roles can create, update, or delete any "+ - "namespaces within the tenant, and can manage topic configurations. "+ - "Format: array of role strings, e.g., ['admin1', 'orgAdmin']. "+ - "Use empty array [] to remove all admin roles."), - mcp.Items( - map[string]interface{}{ - "type": "string", - "description": "role", - }, - ), - ), - mcp.WithArray("allowedClusters", - mcp.Description("List of clusters that this tenant can access. Required for 'create' and 'update' operations. "+ - "Restricts the tenant to only use specified clusters, enabling geographic or infrastructure isolation. "+ - "Format: array of cluster names, e.g., ['us-west', 'us-east']. "+ - "An empty list means no clusters are accessible to this tenant."), - mcp.Items( - map[string]interface{}{ - "type": "string", - "description": "cluster", - }, - ), - ), - ) + return &sdk.Tool{ + Name: "pulsar_admin_tenant", + Description: toolDesc, + InputSchema: inputSchema, + }, nil } // buildTenantHandler builds the Pulsar Admin Tenant handler function // Migrated from the original handler logic -func (b *PulsarAdminTenantToolBuilder) buildTenantHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - // Get required parameters - resource, err := request.RequireString("resource") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get resource: %v", err)), nil - } - - operation, err := request.RequireString("operation") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get operation: %v", err)), nil - } - +func (b *PulsarAdminTenantToolBuilder) buildTenantHandler(readOnly bool) builders.ToolHandlerFunc[pulsarAdminTenantInput, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input pulsarAdminTenantInput) (*sdk.CallToolResult, any, error) { // Normalize parameters - resource = strings.ToLower(resource) - operation = strings.ToLower(operation) + resource := strings.ToLower(input.Resource) + operation := strings.ToLower(input.Operation) // Validate resource if resource != "tenant" { - return mcp.NewToolResultError(fmt.Sprintf("Invalid resource: %s. Only 'tenant' is supported.", resource)), nil + return nil, nil, fmt.Errorf("invalid resource: %s. only 'tenant' is supported", resource) } // Validate write operations in read-only mode if readOnly && (operation == "create" || operation == "update" || operation == "delete") { - return mcp.NewToolResultError("Write operations are not allowed in read-only mode"), nil + return nil, nil, fmt.Errorf("write operations are not allowed in read-only mode") } // Get Pulsar session from context session := mcpCtx.GetPulsarSession(ctx) if session == nil { - return mcp.NewToolResultError("Pulsar session not found in context"), nil + return nil, nil, fmt.Errorf("pulsar session not found in context") } // Create the admin client admin, err := session.GetAdminClient() if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get admin client: %v", err)), nil + return nil, nil, b.handleError("get admin client", err) } // Dispatch based on operation switch operation { case "list": - return b.handleTenantsList(admin) + result, err := b.handleTenantsList(admin) + return result, nil, err case "get": - return b.handleTenantGet(admin, request) + result, err := b.handleTenantGet(admin, input) + return result, nil, err case "create": - return b.handleTenantCreate(admin, request) + result, err := b.handleTenantCreate(admin, input) + return result, nil, err case "update": - return b.handleTenantUpdate(admin, request) + result, err := b.handleTenantUpdate(admin, input) + return result, nil, err case "delete": - return b.handleTenantDelete(admin, request) + result, err := b.handleTenantDelete(admin, input) + return result, nil, err default: - return mcp.NewToolResultError(fmt.Sprintf("Unknown operation: %s", operation)), nil + return nil, nil, fmt.Errorf("unknown operation: %s", operation) } } } @@ -208,63 +197,65 @@ func (b *PulsarAdminTenantToolBuilder) buildTenantHandler(readOnly bool) func(co // Unified error handling and utility functions // handleError provides unified error handling -func (b *PulsarAdminTenantToolBuilder) handleError(operation string, err error) *mcp.CallToolResult { - return mcp.NewToolResultError(fmt.Sprintf("Failed to %s: %v", operation, err)) +func (b *PulsarAdminTenantToolBuilder) handleError(operation string, err error) error { + return fmt.Errorf("failed to %s: %v", operation, err) } // marshalResponse provides unified JSON serialization for responses -func (b *PulsarAdminTenantToolBuilder) marshalResponse(data interface{}) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTenantToolBuilder) marshalResponse(data interface{}) (*sdk.CallToolResult, error) { jsonBytes, err := json.Marshal(data) if err != nil { - return b.handleError("marshal response", err), nil + return nil, b.handleError("marshal response", err) } - return mcp.NewToolResultText(string(jsonBytes)), nil + return &sdk.CallToolResult{ + Content: []sdk.Content{&sdk.TextContent{Text: string(jsonBytes)}}, + }, nil } // Operation handler functions - migrated from the original implementation // handleTenantsList handles listing all tenants -func (b *PulsarAdminTenantToolBuilder) handleTenantsList(admin cmdutils.Client) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTenantToolBuilder) handleTenantsList(admin cmdutils.Client) (*sdk.CallToolResult, error) { // Get tenants list tenants, err := admin.Tenants().List() if err != nil { - return b.handleError("list tenants", err), nil + return nil, b.handleError("list tenants", err) } return b.marshalResponse(tenants) } // handleTenantGet handles getting tenant configuration -func (b *PulsarAdminTenantToolBuilder) handleTenantGet(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - tenant, err := request.RequireString("tenant") +func (b *PulsarAdminTenantToolBuilder) handleTenantGet(admin cmdutils.Client, input pulsarAdminTenantInput) (*sdk.CallToolResult, error) { + tenant, err := requireString(input.Tenant, "tenant") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get tenant name: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'tenant' for tenant.get: %v", err) } // Get tenant info tenantInfo, err := admin.Tenants().Get(tenant) if err != nil { - return b.handleError("get tenant", err), nil + return nil, b.handleError("get tenant", err) } return b.marshalResponse(tenantInfo) } // handleTenantCreate handles creating a new tenant -func (b *PulsarAdminTenantToolBuilder) handleTenantCreate(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - tenant, err := request.RequireString("tenant") +func (b *PulsarAdminTenantToolBuilder) handleTenantCreate(admin cmdutils.Client, input pulsarAdminTenantInput) (*sdk.CallToolResult, error) { + tenant, err := requireString(input.Tenant, "tenant") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get tenant name: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'tenant' for tenant.create: %v", err) } - adminRoles, err := request.RequireStringSlice("adminRoles") + adminRoles, err := requireStringSlice(input.AdminRoles, "adminRoles") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get admin roles: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'adminRoles' for tenant.create: %v", err) } - allowedClusters, err := request.RequireStringSlice("allowedClusters") + allowedClusters, err := requireStringSlice(input.AllowedClusters, "allowedClusters") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get allowed clusters: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'allowedClusters' for tenant.create: %v", err) } // Create tenant data @@ -277,27 +268,27 @@ func (b *PulsarAdminTenantToolBuilder) handleTenantCreate(admin cmdutils.Client, // Create tenant err = admin.Tenants().Create(tenantData) if err != nil { - return b.handleError("create tenant", err), nil + return nil, b.handleError("create tenant", err) } - return mcp.NewToolResultText(fmt.Sprintf("Tenant %s created successfully", tenant)), nil + return textResult(fmt.Sprintf("Tenant %s created successfully", tenant)), nil } // handleTenantUpdate handles updating tenant configuration -func (b *PulsarAdminTenantToolBuilder) handleTenantUpdate(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - tenant, err := request.RequireString("tenant") +func (b *PulsarAdminTenantToolBuilder) handleTenantUpdate(admin cmdutils.Client, input pulsarAdminTenantInput) (*sdk.CallToolResult, error) { + tenant, err := requireString(input.Tenant, "tenant") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get tenant name: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'tenant' for tenant.update: %v", err) } - adminRoles, err := request.RequireStringSlice("adminRoles") + adminRoles, err := requireStringSlice(input.AdminRoles, "adminRoles") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get admin roles: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'adminRoles' for tenant.update: %v", err) } - allowedClusters, err := request.RequireStringSlice("allowedClusters") + allowedClusters, err := requireStringSlice(input.AllowedClusters, "allowedClusters") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get allowed clusters: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'allowedClusters' for tenant.update: %v", err) } // Create tenant data @@ -310,24 +301,60 @@ func (b *PulsarAdminTenantToolBuilder) handleTenantUpdate(admin cmdutils.Client, // Update tenant err = admin.Tenants().Update(tenantData) if err != nil { - return b.handleError("update tenant", err), nil + return nil, b.handleError("update tenant", err) } - return mcp.NewToolResultText(fmt.Sprintf("Tenant %s updated successfully", tenant)), nil + return textResult(fmt.Sprintf("Tenant %s updated successfully", tenant)), nil } // handleTenantDelete handles deleting a tenant -func (b *PulsarAdminTenantToolBuilder) handleTenantDelete(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - tenant, err := request.RequireString("tenant") +func (b *PulsarAdminTenantToolBuilder) handleTenantDelete(admin cmdutils.Client, input pulsarAdminTenantInput) (*sdk.CallToolResult, error) { + tenant, err := requireString(input.Tenant, "tenant") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get tenant name: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'tenant' for tenant.delete: %v", err) } // Delete tenant err = admin.Tenants().Delete(tenant) if err != nil { - return b.handleError("delete tenant", err), nil + return nil, b.handleError("delete tenant", err) } - return mcp.NewToolResultText(fmt.Sprintf("Tenant %s deleted successfully", tenant)), nil + return textResult(fmt.Sprintf("Tenant %s deleted successfully", tenant)), nil +} + +func buildPulsarAdminTenantInputSchema() (*jsonschema.Schema, error) { + schema, err := jsonschema.For[pulsarAdminTenantInput](nil) + if err != nil { + return nil, fmt.Errorf("input schema: %w", err) + } + if schema.Type != "object" { + return nil, fmt.Errorf("input schema must have type \"object\"") + } + + if schema.Properties == nil { + schema.Properties = map[string]*jsonschema.Schema{} + } + setSchemaDescription(schema, "resource", pulsarAdminTenantResourceDesc) + setSchemaDescription(schema, "operation", pulsarAdminTenantOperationDesc) + setSchemaDescription(schema, "tenant", pulsarAdminTenantNameDesc) + setSchemaDescription(schema, "adminRoles", pulsarAdminTenantAdminRolesDesc) + setSchemaDescription(schema, "allowedClusters", pulsarAdminTenantAllowedClustersDesc) + + if adminRolesSchema := schema.Properties["adminRoles"]; adminRolesSchema != nil && adminRolesSchema.Items != nil { + adminRolesSchema.Items.Description = "role" + } + if allowedClustersSchema := schema.Properties["allowedClusters"]; allowedClustersSchema != nil && allowedClustersSchema.Items != nil { + allowedClustersSchema.Items.Description = "cluster" + } + + normalizeAdditionalProperties(schema) + return schema, nil +} + +func requireStringSlice(values []string, key string) ([]string, error) { + if values == nil { + return nil, fmt.Errorf("required argument %q not found", key) + } + return values, nil } diff --git a/pkg/mcp/builders/pulsar/tenant_legacy.go b/pkg/mcp/builders/pulsar/tenant_legacy.go new file mode 100644 index 0000000..15381d8 --- /dev/null +++ b/pkg/mcp/builders/pulsar/tenant_legacy.go @@ -0,0 +1,305 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/apache/pulsar-client-go/pulsaradmin/pkg/utils" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/pulsarctl/pkg/cmdutils" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" +) + +// PulsarAdminTenantLegacyToolBuilder implements the legacy ToolBuilder interface for Pulsar Admin Tenant tools. +// /nolint:revive +type PulsarAdminTenantLegacyToolBuilder struct { + *builders.BaseToolBuilder +} + +// NewPulsarAdminTenantLegacyToolBuilder creates a new Pulsar Admin Tenant legacy tool builder instance. +func NewPulsarAdminTenantLegacyToolBuilder() *PulsarAdminTenantLegacyToolBuilder { + metadata := builders.ToolMetadata{ + Name: "pulsar_admin_tenant", + Version: "1.0.0", + Description: "Pulsar Admin tenant management tools", + Category: "pulsar_admin", + Tags: []string{"pulsar", "tenant", "admin"}, + } + + features := []string{ + "pulsar-admin-tenants", + "all", + "all-pulsar", + "pulsar-admin", + } + + return &PulsarAdminTenantLegacyToolBuilder{ + BaseToolBuilder: builders.NewBaseToolBuilder(metadata, features), + } +} + +// BuildTools builds the Pulsar Admin Tenant tool list. +func (b *PulsarAdminTenantLegacyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { + // Check features - return empty list if no required features are present + if !b.HasAnyRequiredFeature(config.Features) { + return nil, nil + } + + // Validate configuration (only validate when matching features are present) + if err := b.Validate(config); err != nil { + return nil, err + } + + // Build tools + tool := b.buildTenantTool() + handler := b.buildTenantHandler(config.ReadOnly) + + return []server.ServerTool{ + { + Tool: tool, + Handler: handler, + }, + }, nil +} + +// buildTenantTool builds the Pulsar Admin Tenant MCP tool definition. +func (b *PulsarAdminTenantLegacyToolBuilder) buildTenantTool() mcp.Tool { + toolDesc := "Manage Apache Pulsar tenants. " + + "Tenants are the highest level administrative unit in Pulsar's multi-tenancy hierarchy. " + + "Each tenant can contain multiple namespaces, allowing for logical isolation of applications. " + + "Tenant configuration controls admin access and cluster availability across organizations. " + + "Tenants provide isolation boundaries for topics, security policies, and resource quotas. " + + "Proper tenant management is essential for multi-tenant Pulsar deployments to ensure data isolation, " + + "appropriate access controls, and effective resource sharing. " + + "All tenant operations require super-user permissions." + + return mcp.NewTool("pulsar_admin_tenant", + mcp.WithDescription(toolDesc), + mcp.WithString("resource", mcp.Required(), + mcp.Description(pulsarAdminTenantResourceDesc), + ), + mcp.WithString("operation", mcp.Required(), + mcp.Description(pulsarAdminTenantOperationDesc), + ), + mcp.WithString("tenant", + mcp.Description(pulsarAdminTenantNameDesc), + ), + mcp.WithArray("adminRoles", + mcp.Description(pulsarAdminTenantAdminRolesDesc), + mcp.Items( + map[string]interface{}{ + "type": "string", + "description": "role", + }, + ), + ), + mcp.WithArray("allowedClusters", + mcp.Description(pulsarAdminTenantAllowedClustersDesc), + mcp.Items( + map[string]interface{}{ + "type": "string", + "description": "cluster", + }, + ), + ), + ) +} + +// buildTenantHandler builds the Pulsar Admin Tenant handler function. +func (b *PulsarAdminTenantLegacyToolBuilder) buildTenantHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Get required parameters + resource, err := request.RequireString("resource") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get resource: %v", err)), nil + } + + operation, err := request.RequireString("operation") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get operation: %v", err)), nil + } + + // Normalize parameters + resource = strings.ToLower(resource) + operation = strings.ToLower(operation) + + // Validate resource + if resource != "tenant" { + return mcp.NewToolResultError(fmt.Sprintf("Invalid resource: %s. Only 'tenant' is supported.", resource)), nil + } + + // Validate write operations in read-only mode + if readOnly && (operation == "create" || operation == "update" || operation == "delete") { + return mcp.NewToolResultError("Write operations are not allowed in read-only mode"), nil + } + + // Get Pulsar session from context + session := mcpCtx.GetPulsarSession(ctx) + if session == nil { + return mcp.NewToolResultError("Pulsar session not found in context"), nil + } + + // Create the admin client + admin, err := session.GetAdminClient() + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get admin client: %v", err)), nil + } + + // Dispatch based on operation + switch operation { + case "list": + return b.handleTenantsList(admin) + case "get": + return b.handleTenantGet(admin, request) + case "create": + return b.handleTenantCreate(admin, request) + case "update": + return b.handleTenantUpdate(admin, request) + case "delete": + return b.handleTenantDelete(admin, request) + default: + return mcp.NewToolResultError(fmt.Sprintf("Unknown operation: %s", operation)), nil + } + } +} + +// handleError provides unified error handling. +func (b *PulsarAdminTenantLegacyToolBuilder) handleError(operation string, err error) *mcp.CallToolResult { + return mcp.NewToolResultError(fmt.Sprintf("Failed to %s: %v", operation, err)) +} + +// marshalResponse provides unified JSON serialization for responses. +func (b *PulsarAdminTenantLegacyToolBuilder) marshalResponse(data interface{}) (*mcp.CallToolResult, error) { + jsonBytes, err := json.Marshal(data) + if err != nil { + return b.handleError("marshal response", err), nil + } + return mcp.NewToolResultText(string(jsonBytes)), nil +} + +// handleTenantsList handles listing all tenants. +func (b *PulsarAdminTenantLegacyToolBuilder) handleTenantsList(admin cmdutils.Client) (*mcp.CallToolResult, error) { + // Get tenants list + tenants, err := admin.Tenants().List() + if err != nil { + return b.handleError("list tenants", err), nil + } + + return b.marshalResponse(tenants) +} + +// handleTenantGet handles getting tenant configuration. +func (b *PulsarAdminTenantLegacyToolBuilder) handleTenantGet(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + tenant, err := request.RequireString("tenant") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get tenant name: %v", err)), nil + } + + // Get tenant info + tenantInfo, err := admin.Tenants().Get(tenant) + if err != nil { + return b.handleError("get tenant", err), nil + } + + return b.marshalResponse(tenantInfo) +} + +// handleTenantCreate handles creating a new tenant. +func (b *PulsarAdminTenantLegacyToolBuilder) handleTenantCreate(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + tenant, err := request.RequireString("tenant") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get tenant name: %v", err)), nil + } + + adminRoles, err := request.RequireStringSlice("adminRoles") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get admin roles: %v", err)), nil + } + + allowedClusters, err := request.RequireStringSlice("allowedClusters") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get allowed clusters: %v", err)), nil + } + + // Create tenant data + tenantData := utils.TenantData{ + Name: tenant, + AdminRoles: adminRoles, + AllowedClusters: allowedClusters, + } + + // Create tenant + err = admin.Tenants().Create(tenantData) + if err != nil { + return b.handleError("create tenant", err), nil + } + + return mcp.NewToolResultText(fmt.Sprintf("Tenant %s created successfully", tenant)), nil +} + +// handleTenantUpdate handles updating tenant configuration. +func (b *PulsarAdminTenantLegacyToolBuilder) handleTenantUpdate(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + tenant, err := request.RequireString("tenant") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get tenant name: %v", err)), nil + } + + adminRoles, err := request.RequireStringSlice("adminRoles") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get admin roles: %v", err)), nil + } + + allowedClusters, err := request.RequireStringSlice("allowedClusters") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get allowed clusters: %v", err)), nil + } + + // Create tenant data + tenantData := utils.TenantData{ + Name: tenant, + AdminRoles: adminRoles, + AllowedClusters: allowedClusters, + } + + // Update tenant + err = admin.Tenants().Update(tenantData) + if err != nil { + return b.handleError("update tenant", err), nil + } + + return mcp.NewToolResultText(fmt.Sprintf("Tenant %s updated successfully", tenant)), nil +} + +// handleTenantDelete handles deleting a tenant. +func (b *PulsarAdminTenantLegacyToolBuilder) handleTenantDelete(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + tenant, err := request.RequireString("tenant") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get tenant name: %v", err)), nil + } + + // Delete tenant + err = admin.Tenants().Delete(tenant) + if err != nil { + return b.handleError("delete tenant", err), nil + } + + return mcp.NewToolResultText(fmt.Sprintf("Tenant %s deleted successfully", tenant)), nil +} diff --git a/pkg/mcp/builders/pulsar/tenant_test.go b/pkg/mcp/builders/pulsar/tenant_test.go new file mode 100644 index 0000000..bc1040c --- /dev/null +++ b/pkg/mcp/builders/pulsar/tenant_test.go @@ -0,0 +1,137 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "testing" + + "github.com/google/jsonschema-go/jsonschema" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPulsarAdminTenantToolBuilder(t *testing.T) { + builder := NewPulsarAdminTenantToolBuilder() + + t.Run("GetName", func(t *testing.T) { + assert.Equal(t, "pulsar_admin_tenant", builder.GetName()) + }) + + t.Run("GetRequiredFeatures", func(t *testing.T) { + features := builder.GetRequiredFeatures() + assert.NotEmpty(t, features) + assert.Contains(t, features, "pulsar-admin-tenants") + }) + + t.Run("BuildTools_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"pulsar-admin-tenants"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "pulsar_admin_tenant", tools[0].Definition().Name) + assert.NotNil(t, tools[0]) + }) + + t.Run("BuildTools_ReadOnly", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: true, + Features: []string{"pulsar-admin-tenants"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "pulsar_admin_tenant", tools[0].Definition().Name) + }) + + t.Run("BuildTools_NoFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"unrelated-feature"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 0) + }) + + t.Run("Validate_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"pulsar-admin-tenants"}, + } + + err := builder.Validate(config) + assert.NoError(t, err) + }) + + t.Run("Validate_MissingFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"unrelated-feature"}, + } + + err := builder.Validate(config) + assert.Error(t, err) + }) +} + +func TestPulsarAdminTenantToolSchema(t *testing.T) { + builder := NewPulsarAdminTenantToolBuilder() + tool, err := builder.buildTenantTool() + require.NoError(t, err) + assert.Equal(t, "pulsar_admin_tenant", tool.Name) + + schema, ok := tool.InputSchema.(*jsonschema.Schema) + require.True(t, ok) + require.NotNil(t, schema.Properties) + + expectedRequired := []string{"resource", "operation"} + assert.ElementsMatch(t, expectedRequired, schema.Required) + + expectedProps := []string{ + "resource", + "operation", + "tenant", + "adminRoles", + "allowedClusters", + } + assert.ElementsMatch(t, expectedProps, mapStringKeys(schema.Properties)) + + resourceSchema := schema.Properties["resource"] + require.NotNil(t, resourceSchema) + assert.Equal(t, pulsarAdminTenantResourceDesc, resourceSchema.Description) + + operationSchema := schema.Properties["operation"] + require.NotNil(t, operationSchema) + assert.Equal(t, pulsarAdminTenantOperationDesc, operationSchema.Description) +} + +func TestPulsarAdminTenantToolBuilder_ReadOnlyRejectsWrite(t *testing.T) { + builder := NewPulsarAdminTenantToolBuilder() + handler := builder.buildTenantHandler(true) + + _, _, err := handler(context.Background(), nil, pulsarAdminTenantInput{ + Resource: "tenant", + Operation: "create", + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "read-only") +} diff --git a/pkg/mcp/builders/pulsar/topic.go b/pkg/mcp/builders/pulsar/topic.go index 90430e1..cb36fb3 100644 --- a/pkg/mcp/builders/pulsar/topic.go +++ b/pkg/mcp/builders/pulsar/topic.go @@ -18,17 +18,84 @@ import ( "context" "encoding/json" "fmt" + "reflect" "slices" "strings" "github.com/apache/pulsar-client-go/pulsaradmin/pkg/utils" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + "github.com/google/jsonschema-go/jsonschema" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/pulsarctl/pkg/cmdutils" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" ) +type pulsarAdminTopicInput struct { + Resource string `json:"resource"` + Operation string `json:"operation"` + Topic *string `json:"topic,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Partitions *int `json:"partitions,omitempty"` + Force bool `json:"force,omitempty"` + NonPartitioned bool `json:"non-partitioned,omitempty"` + Partitioned bool `json:"partitioned,omitempty"` + PerPartition bool `json:"per-partition,omitempty"` + Config *string `json:"config,omitempty"` + MessageID *string `json:"messageId,omitempty"` +} + +const ( + pulsarAdminTopicResourceDesc = "Resource to operate on. Available resources:\n" + + "- topic: A Pulsar topic\n" + + "- topics: Multiple topics within a namespace" + pulsarAdminTopicOperationDesc = "Operation to perform. Available operations:\n" + + "- list: List all topics in a namespace\n" + + "- get: Get metadata for a topic\n" + + "- create: Create a new topic with optional partitions\n" + + "- delete: Delete a topic\n" + + "- stats: Get stats for a topic\n" + + "- lookup: Look up the broker serving a topic\n" + + "- internal-stats: Get internal stats for a topic\n" + + "- internal-info: Get internal info for a topic\n" + + "- bundle-range: Get the bundle range of a topic\n" + + "- last-message-id: Get the last message ID of a topic\n" + + "- status: Get the status of a topic\n" + + "- unload: Unload a topic\n" + + "- terminate: Terminate a topic\n" + + "- compact: Trigger compaction on a topic\n" + + "- update: Update a topic partitions\n" + + "- offload: Offload data from a topic to long-term storage\n" + + "- offload-status: Check the status of data offloading for a topic" + pulsarAdminTopicTopicDesc = "The fully qualified topic name (format: [persistent|non-persistent]://tenant/namespace/topic). " + + "Required for all operations except 'list'. " + + "For partitioned topics, reference the base topic name without the partition suffix. " + + "To operate on a specific partition, append -partition-N to the topic name." + pulsarAdminTopicNamespaceDesc = "The namespace name in the format 'tenant/namespace'. " + + "Required for the 'list' operation. " + + "A namespace is a logical grouping of topics within a tenant." + pulsarAdminTopicPartitionsDesc = "The number of partitions for the topic. Required for 'create' and 'update' operations. " + + "Set to 0 for a non-partitioned topic. " + + "Partitioned topics provide higher throughput by dividing message traffic across multiple brokers. " + + "Each partition is an independent unit with its own retention and cursor positions." + pulsarAdminTopicForceDesc = "Force operation even if it disrupts producers or consumers. Optional for 'delete' operation. " + + "When true, all producers and consumers will be forcefully disconnected. " + + "Use with caution as it can interrupt active message processing." + pulsarAdminTopicNonPartitionedDesc = "Operate on a non-partitioned topic. Optional for 'delete' operation. " + + "When true and operating on a partitioned topic name, only deletes the non-partitioned topic " + + "with the same name, if it exists." + pulsarAdminTopicPartitionedDesc = "Get stats for a partitioned topic. Optional for 'stats' operation. " + + "It has to be true if the topic is partitioned. Leave it empty or false for non-partitioned topic." + pulsarAdminTopicPerPartitionDesc = "Include per-partition stats. Optional for 'stats' operation. " + + "When true, returns statistics for each partition separately. " + + "Requires 'partitioned' parameter to be true." + pulsarAdminTopicConfigDesc = "JSON configuration for the topic. Required for 'update' operation. " + + "Set various policies like retention, compaction, deduplication, etc. " + + "Use a JSON object format, e.g., '{\"deduplicationEnabled\": true, \"replication_clusters\": [\"us-west\", \"us-east\"]}'" + pulsarAdminTopicMessageIDDesc = "Message ID for operations that require a position. Required for 'offload' operation. " + + "Format is 'ledgerId:entryId' representing a position in the topic's message log. " + + "For offload operations, specifies the message up to which data should be moved to long-term storage." +) + // PulsarAdminTopicToolBuilder implements the ToolBuilder interface for Pulsar Admin Topic tools // It provides functionality to build Pulsar topic management tools // /nolint:revive @@ -60,7 +127,7 @@ func NewPulsarAdminTopicToolBuilder() *PulsarAdminTopicToolBuilder { // BuildTools builds the Pulsar Admin Topic tool list // This is the core method implementing the ToolBuilder interface -func (b *PulsarAdminTopicToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { +func (b *PulsarAdminTopicToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]builders.ToolDefinition, error) { // Check features - return empty list if no required features are present if !b.HasAnyRequiredFeature(config.Features) { return nil, nil @@ -72,11 +139,14 @@ func (b *PulsarAdminTopicToolBuilder) BuildTools(_ context.Context, config build } // Build tools - tool := b.buildTopicTool() + tool, err := b.buildTopicTool() + if err != nil { + return nil, err + } handler := b.buildTopicHandler(config.ReadOnly) - return []server.ServerTool{ - { + return []builders.ToolDefinition{ + builders.ServerTool[pulsarAdminTopicInput, any]{ Tool: tool, Handler: handler, }, @@ -85,7 +155,12 @@ func (b *PulsarAdminTopicToolBuilder) BuildTools(_ context.Context, config build // buildTopicTool builds the Pulsar Admin Topic MCP tool definition // Migrated from the original tool definition logic -func (b *PulsarAdminTopicToolBuilder) buildTopicTool() mcp.Tool { +func (b *PulsarAdminTopicToolBuilder) buildTopicTool() (*sdk.Tool, error) { + inputSchema, err := buildPulsarAdminTopicInputSchema() + if err != nil { + return nil, err + } + toolDesc := "Manage Apache Pulsar topics. " + "Topics are the core messaging entities in Pulsar that store and transmit messages. " + "Pulsar supports two types of topics: persistent (durable storage with guaranteed delivery) " + @@ -98,121 +173,37 @@ func (b *PulsarAdminTopicToolBuilder) buildTopicTool() mcp.Tool { "Do not use this tool for Kafka protocol operations. Use 'kafka_admin_topics' instead." + "Most operations require namespace admin permissions." - resourceDesc := "Resource to operate on. Available resources:\n" + - "- topic: A Pulsar topic\n" + - "- topics: Multiple topics within a namespace" - - operationDesc := "Operation to perform. Available operations:\n" + - "- list: List all topics in a namespace\n" + - "- get: Get metadata for a topic\n" + - "- create: Create a new topic with optional partitions\n" + - "- delete: Delete a topic\n" + - "- stats: Get stats for a topic\n" + - "- lookup: Look up the broker serving a topic\n" + - "- internal-stats: Get internal stats for a topic\n" + - "- internal-info: Get internal info for a topic\n" + - "- bundle-range: Get the bundle range of a topic\n" + - "- last-message-id: Get the last message ID of a topic\n" + - "- status: Get the status of a topic\n" + - "- unload: Unload a topic\n" + - "- terminate: Terminate a topic\n" + - "- compact: Trigger compaction on a topic\n" + - "- update: Update a topic partitions\n" + - "- offload: Offload data from a topic to long-term storage\n" + - "- offload-status: Check the status of data offloading for a topic" - - return mcp.NewTool("pulsar_admin_topic", - mcp.WithDescription(toolDesc), - mcp.WithString("resource", mcp.Required(), - mcp.Description(resourceDesc), - ), - mcp.WithString("operation", mcp.Required(), - mcp.Description(operationDesc), - ), - mcp.WithString("topic", - mcp.Description("The fully qualified topic name (format: [persistent|non-persistent]://tenant/namespace/topic). "+ - "Required for all operations except 'list'. "+ - "For partitioned topics, reference the base topic name without the partition suffix. "+ - "To operate on a specific partition, append -partition-N to the topic name."), - ), - mcp.WithString("namespace", - mcp.Description("The namespace name in the format 'tenant/namespace'. "+ - "Required for the 'list' operation. "+ - "A namespace is a logical grouping of topics within a tenant."), - ), - mcp.WithNumber("partitions", - mcp.Description("The number of partitions for the topic. Required for 'create' and 'update' operations. "+ - "Set to 0 for a non-partitioned topic. "+ - "Partitioned topics provide higher throughput by dividing message traffic across multiple brokers. "+ - "Each partition is an independent unit with its own retention and cursor positions."), - ), - mcp.WithBoolean("force", - mcp.Description("Force operation even if it disrupts producers or consumers. Optional for 'delete' operation. "+ - "When true, all producers and consumers will be forcefully disconnected. "+ - "Use with caution as it can interrupt active message processing."), - ), - mcp.WithBoolean("non-partitioned", - mcp.Description("Operate on a non-partitioned topic. Optional for 'delete' operation. "+ - "When true and operating on a partitioned topic name, only deletes the non-partitioned topic "+ - "with the same name, if it exists."), - ), - mcp.WithBoolean("partitioned", - mcp.Description("Get stats for a partitioned topic. Optional for 'stats' operation. "+ - "It has to be true if the topic is partitioned. Leave it empty or false for non-partitioned topic."), - ), - mcp.WithBoolean("per-partition", - mcp.Description("Include per-partition stats. Optional for 'stats' operation. "+ - "When true, returns statistics for each partition separately. "+ - "Requires 'partitioned' parameter to be true."), - ), - mcp.WithString("config", - mcp.Description("JSON configuration for the topic. Required for 'update' operation. "+ - "Set various policies like retention, compaction, deduplication, etc. "+ - "Use a JSON object format, e.g., '{\"deduplicationEnabled\": true, \"replication_clusters\": [\"us-west\", \"us-east\"]}'"), - ), - mcp.WithString("messageId", - mcp.Description("Message ID for operations that require a position. Required for 'offload' operation. "+ - "Format is 'ledgerId:entryId' representing a position in the topic's message log. "+ - "For offload operations, specifies the message up to which data should be moved to long-term storage."), - ), - ) + return &sdk.Tool{ + Name: "pulsar_admin_topic", + Description: toolDesc, + InputSchema: inputSchema, + }, nil } // buildTopicHandler builds the Pulsar Admin Topic handler function // Migrated from the original handler logic -func (b *PulsarAdminTopicToolBuilder) buildTopicHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - // Get required parameters - resource, err := request.RequireString("resource") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get resource: %v", err)), nil - } - - operation, err := request.RequireString("operation") - if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get operation: %v", err)), nil - } - +func (b *PulsarAdminTopicToolBuilder) buildTopicHandler(readOnly bool) builders.ToolHandlerFunc[pulsarAdminTopicInput, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input pulsarAdminTopicInput) (*sdk.CallToolResult, any, error) { // Normalize parameters - resource = strings.ToLower(resource) - operation = strings.ToLower(operation) + resource := strings.ToLower(input.Resource) + operation := strings.ToLower(input.Operation) // Validate write operations in read-only mode if readOnly && (operation == "create" || operation == "delete" || operation == "unload" || operation == "terminate" || operation == "compact" || operation == "update" || operation == "offload") { - return mcp.NewToolResultError("Write operations are not allowed in read-only mode"), nil + return nil, nil, fmt.Errorf("write operations are not allowed in read-only mode") } // Get Pulsar session from context session := mcpCtx.GetPulsarSession(ctx) if session == nil { - return mcp.NewToolResultError("Pulsar session not found in context"), nil + return nil, nil, fmt.Errorf("pulsar session not found in context") } // Create the admin client admin, err := session.GetAdminClient() if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get admin client: %v", err)), nil + return nil, nil, b.handleError("get admin client", err) } // Dispatch based on resource and operation @@ -220,88 +211,155 @@ func (b *PulsarAdminTopicToolBuilder) buildTopicHandler(readOnly bool) func(cont case "topic": switch operation { case "get": - return b.handleTopicGet(admin, request) + result, err := b.handleTopicGet(admin, input) + return result, nil, err case "create": - return b.handleTopicCreate(admin, request) + result, err := b.handleTopicCreate(admin, input) + return result, nil, err case "delete": - return b.handleTopicDelete(admin, request) + result, err := b.handleTopicDelete(admin, input) + return result, nil, err case "stats": - return b.handleTopicStats(admin, request) + result, err := b.handleTopicStats(admin, input) + return result, nil, err case "lookup": - return b.handleTopicLookup(admin, request) + result, err := b.handleTopicLookup(admin, input) + return result, nil, err case "internal-stats": - return b.handleTopicInternalStats(admin, request) + result, err := b.handleTopicInternalStats(admin, input) + return result, nil, err case "internal-info": - return b.handleTopicInternalInfo(admin, request) + result, err := b.handleTopicInternalInfo(admin, input) + return result, nil, err case "bundle-range": - return b.handleTopicBundleRange(admin, request) + result, err := b.handleTopicBundleRange(admin, input) + return result, nil, err case "last-message-id": - return b.handleTopicLastMessageID(admin, request) + result, err := b.handleTopicLastMessageID(admin, input) + return result, nil, err case "status": - return b.handleTopicStatus(admin, request) + result, err := b.handleTopicStatus(admin, input) + return result, nil, err case "unload": - return b.handleTopicUnload(admin, request) + result, err := b.handleTopicUnload(admin, input) + return result, nil, err case "terminate": - return b.handleTopicTerminate(admin, request) + result, err := b.handleTopicTerminate(admin, input) + return result, nil, err case "compact": - return b.handleTopicCompact(admin, request) + result, err := b.handleTopicCompact(admin, input) + return result, nil, err case "update": - return b.handleTopicUpdate(admin, request) + result, err := b.handleTopicUpdate(admin, input) + return result, nil, err case "offload": - return b.handleTopicOffload(admin, request) + result, err := b.handleTopicOffload(admin, input) + return result, nil, err case "offload-status": - return b.handleTopicOffloadStatus(admin, request) + result, err := b.handleTopicOffloadStatus(admin, input) + return result, nil, err default: - return mcp.NewToolResultError(fmt.Sprintf("Unknown topic operation: %s", operation)), nil + return nil, nil, fmt.Errorf("unknown topic operation: %s", operation) } case "topics": switch operation { case "list": - return b.handleTopicsList(admin, request) + result, err := b.handleTopicsList(admin, input) + return result, nil, err default: - return mcp.NewToolResultError(fmt.Sprintf("Unknown topics operation: %s", operation)), nil + return nil, nil, fmt.Errorf("unknown topics operation: %s", operation) } default: - return mcp.NewToolResultError(fmt.Sprintf("Unknown resource: %s", resource)), nil + return nil, nil, fmt.Errorf("unknown resource: %s", resource) } } } +func buildPulsarAdminTopicInputSchema() (*jsonschema.Schema, error) { + schema, err := jsonschema.For[pulsarAdminTopicInput](nil) + if err != nil { + return nil, fmt.Errorf("input schema: %w", err) + } + if schema.Type != "object" { + return nil, fmt.Errorf("input schema must have type \"object\"") + } + + if schema.Properties == nil { + schema.Properties = map[string]*jsonschema.Schema{} + } + setSchemaDescription(schema, "resource", pulsarAdminTopicResourceDesc) + setSchemaDescription(schema, "operation", pulsarAdminTopicOperationDesc) + setSchemaDescription(schema, "topic", pulsarAdminTopicTopicDesc) + setSchemaDescription(schema, "namespace", pulsarAdminTopicNamespaceDesc) + setSchemaDescription(schema, "partitions", pulsarAdminTopicPartitionsDesc) + setSchemaDescription(schema, "force", pulsarAdminTopicForceDesc) + setSchemaDescription(schema, "non-partitioned", pulsarAdminTopicNonPartitionedDesc) + setSchemaDescription(schema, "partitioned", pulsarAdminTopicPartitionedDesc) + setSchemaDescription(schema, "per-partition", pulsarAdminTopicPerPartitionDesc) + setSchemaDescription(schema, "config", pulsarAdminTopicConfigDesc) + setSchemaDescription(schema, "messageId", pulsarAdminTopicMessageIDDesc) + + normalizeAdditionalProperties(schema) + return schema, nil +} + // Unified error handling and utility functions // handleError provides unified error handling -func (b *PulsarAdminTopicToolBuilder) handleError(operation string, err error) *mcp.CallToolResult { - return mcp.NewToolResultError(fmt.Sprintf("Failed to %s: %v", operation, err)) +func (b *PulsarAdminTopicToolBuilder) handleError(operation string, err error) error { + return fmt.Errorf("failed to %s: %v", operation, err) } // marshalResponse provides unified JSON serialization for responses -func (b *PulsarAdminTopicToolBuilder) marshalResponse(data interface{}) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicToolBuilder) marshalResponse(data interface{}) (*sdk.CallToolResult, error) { jsonBytes, err := json.Marshal(data) if err != nil { - return b.handleError("marshal response", err), nil + return nil, b.handleError("marshal response", err) + } + return &sdk.CallToolResult{ + Content: []sdk.Content{&sdk.TextContent{Text: string(jsonBytes)}}, + }, nil +} + +func textResult(message string) *sdk.CallToolResult { + return &sdk.CallToolResult{ + Content: []sdk.Content{&sdk.TextContent{Text: message}}, + } +} + +func requireString(value *string, key string) (string, error) { + if value == nil { + return "", fmt.Errorf("required argument %q not found", key) + } + return *value, nil +} + +func requireInt(value *int, key string) (int, error) { + if value == nil { + return 0, fmt.Errorf("required argument %q not found", key) } - return mcp.NewToolResultText(string(jsonBytes)), nil + return *value, nil } // handleTopicsList lists all existing topics under the specified namespace -func (b *PulsarAdminTopicToolBuilder) handleTopicsList(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicToolBuilder) handleTopicsList(admin cmdutils.Client, input pulsarAdminTopicInput) (*sdk.CallToolResult, error) { // Get required parameters - namespace, err := request.RequireString("namespace") + namespace, err := requireString(input.Namespace, "namespace") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'namespace' for topics.list: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'namespace' for topics.list: %v", err) } // Get namespace name namespaceName, err := utils.GetNamespaceName(namespace) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid namespace name '%s': %v", namespace, err)), nil + return nil, fmt.Errorf("invalid namespace name '%s': %v", namespace, err) } // List topics partitionedTopics, nonPartitionedTopics, err := admin.Topics().List(*namespaceName) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to list topics in namespace '%s': %v", - namespace, err)), nil + return nil, fmt.Errorf("failed to list topics in namespace '%s': %v", + namespace, err) } // Format the output @@ -317,57 +375,57 @@ func (b *PulsarAdminTopicToolBuilder) handleTopicsList(admin cmdutils.Client, re } // handleTopicGet gets the metadata of an existing topic -func (b *PulsarAdminTopicToolBuilder) handleTopicGet(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicToolBuilder) handleTopicGet(admin cmdutils.Client, input pulsarAdminTopicInput) (*sdk.CallToolResult, error) { // Get required parameters - topic, err := request.RequireString("topic") + topic, err := requireString(input.Topic, "topic") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.get: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'topic' for topic.get: %v", err) } // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + return nil, fmt.Errorf("invalid topic name '%s': %v", topic, err) } // Get topic metadata metadata, err := admin.Topics().GetMetadata(*topicName) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get metadata for topic '%s': %v", - topic, err)), nil + return nil, fmt.Errorf("failed to get metadata for topic '%s': %v", + topic, err) } return b.marshalResponse(metadata) } // handleTopicStats gets the stats for an existing topic -func (b *PulsarAdminTopicToolBuilder) handleTopicStats(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicToolBuilder) handleTopicStats(admin cmdutils.Client, input pulsarAdminTopicInput) (*sdk.CallToolResult, error) { // Get required parameters - topic, err := request.RequireString("topic") + topic, err := requireString(input.Topic, "topic") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.stats: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'topic' for topic.stats: %v", err) } // Get optional parameters - partitioned := request.GetBool("partitioned", false) - perPartition := request.GetBool("per-partition", false) + partitioned := input.Partitioned + perPartition := input.PerPartition // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + return nil, fmt.Errorf("invalid topic name '%s': %v", topic, err) } namespaceName, err := utils.GetNamespaceName(topicName.GetTenant() + "/" + topicName.GetNamespace()) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid namespace name: %v", err)), nil + return nil, fmt.Errorf("invalid namespace name: %v", err) } // List topics to determine if this topic is partitioned partitionedTopics, nonPartitionedTopics, err := admin.Topics().List(*namespaceName) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to list topics in namespace '%s': %v", - namespaceName, err)), nil + return nil, fmt.Errorf("failed to list topics in namespace '%s': %v", + namespaceName, err) } if slices.Contains(partitionedTopics, topicName.String()) { @@ -382,16 +440,16 @@ func (b *PulsarAdminTopicToolBuilder) handleTopicStats(admin cmdutils.Client, re // Get partitioned topic stats stats, err := admin.Topics().GetPartitionedStats(*topicName, perPartition) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get stats for partitioned topic '%s': %v", - topic, err)), nil + return nil, fmt.Errorf("failed to get stats for partitioned topic '%s': %v", + topic, err) } data = stats } else { // Get topic stats stats, err := admin.Topics().GetStats(*topicName) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get stats for topic '%s': %v", - topic, err)), nil + return nil, fmt.Errorf("failed to get stats for topic '%s': %v", + topic, err) } data = stats } @@ -400,90 +458,90 @@ func (b *PulsarAdminTopicToolBuilder) handleTopicStats(admin cmdutils.Client, re } // handleTopicLookup looks up the owner broker of a topic -func (b *PulsarAdminTopicToolBuilder) handleTopicLookup(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicToolBuilder) handleTopicLookup(admin cmdutils.Client, input pulsarAdminTopicInput) (*sdk.CallToolResult, error) { // Get required parameters - topic, err := request.RequireString("topic") + topic, err := requireString(input.Topic, "topic") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.lookup: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'topic' for topic.lookup: %v", err) } // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + return nil, fmt.Errorf("invalid topic name '%s': %v", topic, err) } // Lookup topic lookup, err := admin.Topics().Lookup(*topicName) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to lookup topic '%s': %v", - topic, err)), nil + return nil, fmt.Errorf("failed to lookup topic '%s': %v", + topic, err) } return b.marshalResponse(lookup) } // handleTopicCreate creates a topic with the specified number of partitions -func (b *PulsarAdminTopicToolBuilder) handleTopicCreate(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicToolBuilder) handleTopicCreate(admin cmdutils.Client, input pulsarAdminTopicInput) (*sdk.CallToolResult, error) { // Get required parameters - topic, err := request.RequireString("topic") + topic, err := requireString(input.Topic, "topic") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.create: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'topic' for topic.create: %v", err) } - partitions, err := request.RequireFloat("partitions") + partitions, err := requireInt(input.Partitions, "partitions") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'partitions' for topic.create: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'partitions' for topic.create: %v", err) } // Validate partitions if partitions < 0 { - return mcp.NewToolResultError("Invalid partitions number: must be non-negative. Use 0 for a non-partitioned topic."), nil + return nil, fmt.Errorf("invalid partitions number: must be non-negative; use 0 for a non-partitioned topic") } // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + return nil, fmt.Errorf("invalid topic name '%s': %v", topic, err) } // Create topic - err = admin.Topics().Create(*topicName, int(partitions)) + err = admin.Topics().Create(*topicName, partitions) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to create topic '%s' with %d partitions: %v", - topic, int(partitions), err)), nil + return nil, fmt.Errorf("failed to create topic '%s' with %d partitions: %v", + topic, partitions, err) } - if int(partitions) == 0 { - return mcp.NewToolResultText(fmt.Sprintf("Successfully created non-partitioned topic '%s'", + if partitions == 0 { + return textResult(fmt.Sprintf("Successfully created non-partitioned topic '%s'", topicName.String())), nil } - return mcp.NewToolResultText(fmt.Sprintf("Successfully created topic '%s' with %d partitions", - topicName.String(), int(partitions))), nil + return textResult(fmt.Sprintf("Successfully created topic '%s' with %d partitions", + topicName.String(), partitions)), nil } // handleTopicDelete deletes a topic -func (b *PulsarAdminTopicToolBuilder) handleTopicDelete(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicToolBuilder) handleTopicDelete(admin cmdutils.Client, input pulsarAdminTopicInput) (*sdk.CallToolResult, error) { // Get required parameters - topic, err := request.RequireString("topic") + topic, err := requireString(input.Topic, "topic") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.delete: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'topic' for topic.delete: %v", err) } // Get optional parameters - force := request.GetBool("force", false) - nonPartitioned := request.GetBool("non-partitioned", false) + force := input.Force + nonPartitioned := input.NonPartitioned // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + return nil, fmt.Errorf("invalid topic name '%s': %v", topic, err) } // Delete topic err = admin.Topics().Delete(*topicName, force, nonPartitioned) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to delete topic '%s': %v", topic, err)), nil + return nil, fmt.Errorf("failed to delete topic '%s': %v", topic, err) } forceStr := "" @@ -496,195 +554,195 @@ func (b *PulsarAdminTopicToolBuilder) handleTopicDelete(admin cmdutils.Client, r nonPartitionedStr = " (non-partitioned)" } - return mcp.NewToolResultText(fmt.Sprintf("Successfully deleted topic '%s'%s%s", + return textResult(fmt.Sprintf("Successfully deleted topic '%s'%s%s", topicName.String(), forceStr, nonPartitionedStr)), nil } // handleTopicUnload unloads a topic -func (b *PulsarAdminTopicToolBuilder) handleTopicUnload(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicToolBuilder) handleTopicUnload(admin cmdutils.Client, input pulsarAdminTopicInput) (*sdk.CallToolResult, error) { // Get required parameters - topic, err := request.RequireString("topic") + topic, err := requireString(input.Topic, "topic") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.unload: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'topic' for topic.unload: %v", err) } // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + return nil, fmt.Errorf("invalid topic name '%s': %v", topic, err) } // Unload topic err = admin.Topics().Unload(*topicName) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to unload topic '%s': %v", topic, err)), nil + return nil, fmt.Errorf("failed to unload topic '%s': %v", topic, err) } - return mcp.NewToolResultText(fmt.Sprintf("Successfully unloaded topic '%s'", topicName.String())), nil + return textResult(fmt.Sprintf("Successfully unloaded topic '%s'", topicName.String())), nil } // handleTopicTerminate terminates a topic -func (b *PulsarAdminTopicToolBuilder) handleTopicTerminate(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicToolBuilder) handleTopicTerminate(admin cmdutils.Client, input pulsarAdminTopicInput) (*sdk.CallToolResult, error) { // Get required parameters - topic, err := request.RequireString("topic") + topic, err := requireString(input.Topic, "topic") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.terminate: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'topic' for topic.terminate: %v", err) } // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + return nil, fmt.Errorf("invalid topic name '%s': %v", topic, err) } // Terminate topic messageID, err := admin.Topics().Terminate(*topicName) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to terminate topic '%s': %v", topic, err)), nil + return nil, fmt.Errorf("failed to terminate topic '%s': %v", topic, err) } // Convert message ID to string msgIDStr := fmt.Sprintf("%d:%d", messageID.LedgerID, messageID.EntryID) - return mcp.NewToolResultText(fmt.Sprintf("Successfully terminated topic '%s' at message %s. "+ + return textResult(fmt.Sprintf("Successfully terminated topic '%s' at message %s. "+ "No more messages can be published to this topic.", topicName.String(), msgIDStr)), nil } // handleTopicCompact triggers compaction on a topic -func (b *PulsarAdminTopicToolBuilder) handleTopicCompact(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicToolBuilder) handleTopicCompact(admin cmdutils.Client, input pulsarAdminTopicInput) (*sdk.CallToolResult, error) { // Get required parameters - topic, err := request.RequireString("topic") + topic, err := requireString(input.Topic, "topic") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.compact: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'topic' for topic.compact: %v", err) } // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + return nil, fmt.Errorf("invalid topic name '%s': %v", topic, err) } // Compact topic err = admin.Topics().Compact(*topicName) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to trigger compaction for topic '%s': %v", topic, err)), nil + return nil, fmt.Errorf("failed to trigger compaction for topic '%s': %v", topic, err) } - return mcp.NewToolResultText(fmt.Sprintf("Successfully triggered compaction for topic '%s'. "+ + return textResult(fmt.Sprintf("Successfully triggered compaction for topic '%s'. "+ "Run 'topic.status' to check compaction status.", topicName.String())), nil } // handleTopicInternalStats gets the internal stats for a topic -func (b *PulsarAdminTopicToolBuilder) handleTopicInternalStats(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicToolBuilder) handleTopicInternalStats(admin cmdutils.Client, input pulsarAdminTopicInput) (*sdk.CallToolResult, error) { // Get required parameters - topic, err := request.RequireString("topic") + topic, err := requireString(input.Topic, "topic") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.internal-stats: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'topic' for topic.internal-stats: %v", err) } // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + return nil, fmt.Errorf("invalid topic name '%s': %v", topic, err) } // Get internal stats stats, err := admin.Topics().GetInternalStats(*topicName) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get internal stats for topic '%s': %v", topic, err)), nil + return nil, fmt.Errorf("failed to get internal stats for topic '%s': %v", topic, err) } return b.marshalResponse(stats) } // handleTopicInternalInfo gets the internal info for a topic -func (b *PulsarAdminTopicToolBuilder) handleTopicInternalInfo(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicToolBuilder) handleTopicInternalInfo(admin cmdutils.Client, input pulsarAdminTopicInput) (*sdk.CallToolResult, error) { // Get required parameters - topic, err := request.RequireString("topic") + topic, err := requireString(input.Topic, "topic") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.internal-info: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'topic' for topic.internal-info: %v", err) } // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + return nil, fmt.Errorf("invalid topic name '%s': %v", topic, err) } // Get internal info info, err := admin.Topics().GetInternalInfo(*topicName) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get internal info for topic '%s': %v", topic, err)), nil + return nil, fmt.Errorf("failed to get internal info for topic '%s': %v", topic, err) } return b.marshalResponse(info) } // handleTopicBundleRange gets the bundle range of a topic -func (b *PulsarAdminTopicToolBuilder) handleTopicBundleRange(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicToolBuilder) handleTopicBundleRange(admin cmdutils.Client, input pulsarAdminTopicInput) (*sdk.CallToolResult, error) { // Get required parameters - topic, err := request.RequireString("topic") + topic, err := requireString(input.Topic, "topic") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.bundle-range: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'topic' for topic.bundle-range: %v", err) } // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + return nil, fmt.Errorf("invalid topic name '%s': %v", topic, err) } // Get bundle range bundle, err := admin.Topics().GetBundleRange(*topicName) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get bundle range for topic '%s': %v", topic, err)), nil + return nil, fmt.Errorf("failed to get bundle range for topic '%s': %v", topic, err) } - return mcp.NewToolResultText(fmt.Sprintf("Bundle range for topic '%s': %s", topicName.String(), bundle)), nil + return textResult(fmt.Sprintf("Bundle range for topic '%s': %s", topicName.String(), bundle)), nil } // handleTopicLastMessageID gets the last message ID of a topic -func (b *PulsarAdminTopicToolBuilder) handleTopicLastMessageID(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicToolBuilder) handleTopicLastMessageID(admin cmdutils.Client, input pulsarAdminTopicInput) (*sdk.CallToolResult, error) { // Get required parameters - topic, err := request.RequireString("topic") + topic, err := requireString(input.Topic, "topic") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.last-message-id: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'topic' for topic.last-message-id: %v", err) } // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + return nil, fmt.Errorf("invalid topic name '%s': %v", topic, err) } // Get last message ID messageID, err := admin.Topics().GetLastMessageID(*topicName) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get last message ID for topic '%s': %v", topic, err)), nil + return nil, fmt.Errorf("failed to get last message ID for topic '%s': %v", topic, err) } return b.marshalResponse(messageID) } // handleTopicStatus gets the status of a topic -func (b *PulsarAdminTopicToolBuilder) handleTopicStatus(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicToolBuilder) handleTopicStatus(admin cmdutils.Client, input pulsarAdminTopicInput) (*sdk.CallToolResult, error) { // Get required parameters - topic, err := request.RequireString("topic") + topic, err := requireString(input.Topic, "topic") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.status: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'topic' for topic.status: %v", err) } // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + return nil, fmt.Errorf("invalid topic name '%s': %v", topic, err) } // Get topic metadata for status check metadata, err := admin.Topics().GetMetadata(*topicName) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get status for topic '%s': %v", topic, err)), nil + return nil, fmt.Errorf("failed to get status for topic '%s': %v", topic, err) } // Create status object with available information @@ -693,64 +751,64 @@ func (b *PulsarAdminTopicToolBuilder) handleTopicStatus(admin cmdutils.Client, r Active bool `json:"active"` }{ Metadata: metadata, - Active: true, // If metadata retrieval succeeded, topic is active + Active: true, } return b.marshalResponse(status) } // handleTopicUpdate updates a topic configuration -func (b *PulsarAdminTopicToolBuilder) handleTopicUpdate(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicToolBuilder) handleTopicUpdate(admin cmdutils.Client, input pulsarAdminTopicInput) (*sdk.CallToolResult, error) { // Get required parameters - topic, err := request.RequireString("topic") + topic, err := requireString(input.Topic, "topic") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.update: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'topic' for topic.update: %v", err) } - partitions, err := request.RequireFloat("partitions") + partitions, err := requireInt(input.Partitions, "partitions") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'partitions' for topic.update: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'partitions' for topic.update: %v", err) } // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + return nil, fmt.Errorf("invalid topic name '%s': %v", topic, err) } - err = admin.Topics().Update(*topicName, int(partitions)) + err = admin.Topics().Update(*topicName, partitions) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to update topic '%s': %v", topic, err)), nil + return nil, fmt.Errorf("failed to update topic '%s': %v", topic, err) } - return mcp.NewToolResultText(fmt.Sprintf("Successfully updated topic '%s' partitions to %d", - topicName.String(), int(partitions))), nil + return textResult(fmt.Sprintf("Successfully updated topic '%s' partitions to %d", + topicName.String(), partitions)), nil } // handleTopicOffload offloads data from a topic to long-term storage -func (b *PulsarAdminTopicToolBuilder) handleTopicOffload(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicToolBuilder) handleTopicOffload(admin cmdutils.Client, input pulsarAdminTopicInput) (*sdk.CallToolResult, error) { // Get required parameters - topic, err := request.RequireString("topic") + topic, err := requireString(input.Topic, "topic") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.offload: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'topic' for topic.offload: %v", err) } - messageIDStr, err := request.RequireString("messageId") + messageIDStr, err := requireString(input.MessageID, "messageId") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'messageId' for topic.offload: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'messageId' for topic.offload: %v", err) } // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + return nil, fmt.Errorf("invalid topic name '%s': %v", topic, err) } // Parse message ID from format "ledgerId:entryId" var ledgerID, entryID int64 if _, err := fmt.Sscanf(messageIDStr, "%d:%d", &ledgerID, &entryID); err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid message ID format (expected 'ledgerId:entryId'): %v. "+ - "Valid examples: '123:456'", err)), nil + return nil, fmt.Errorf("invalid message ID format (expected 'ledgerId:entryId'): %v. "+ + "Valid examples: '123:456'", err) } // Create MessageID object @@ -762,33 +820,133 @@ func (b *PulsarAdminTopicToolBuilder) handleTopicOffload(admin cmdutils.Client, // Offload topic err = admin.Topics().Offload(*topicName, messageID) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to trigger offload for topic '%s': %v", topic, err)), nil + return nil, fmt.Errorf("failed to trigger offload for topic '%s': %v", topic, err) } - return mcp.NewToolResultText(fmt.Sprintf("Successfully triggered offload for topic '%s' up to message %s. "+ + return textResult(fmt.Sprintf("Successfully triggered offload for topic '%s' up to message %s. "+ "Use 'topic.offload-status' to check the offload progress.", topicName.String(), messageIDStr)), nil } // handleTopicOffloadStatus checks the status of data offloading for a topic -func (b *PulsarAdminTopicToolBuilder) handleTopicOffloadStatus(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicToolBuilder) handleTopicOffloadStatus(admin cmdutils.Client, input pulsarAdminTopicInput) (*sdk.CallToolResult, error) { // Get required parameters - topic, err := request.RequireString("topic") + topic, err := requireString(input.Topic, "topic") if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.offload-status: %v", err)), nil + return nil, fmt.Errorf("missing required parameter 'topic' for topic.offload-status: %v", err) } // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + return nil, fmt.Errorf("invalid topic name '%s': %v", topic, err) } // Get offload status status, err := admin.Topics().OffloadStatus(*topicName) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get offload status for topic '%s': %v", topic, err)), nil + return nil, fmt.Errorf("failed to get offload status for topic '%s': %v", topic, err) } return b.marshalResponse(status) } + +func setSchemaDescription(schema *jsonschema.Schema, name, desc string) { + if schema == nil { + return + } + prop, ok := schema.Properties[name] + if !ok || prop == nil { + return + } + prop.Description = desc +} + +func normalizeAdditionalProperties(schema *jsonschema.Schema) { + visited := map[*jsonschema.Schema]bool{} + var walk func(*jsonschema.Schema) + walk = func(s *jsonschema.Schema) { + if s == nil || visited[s] { + return + } + visited[s] = true + + if s.Type == "object" && s.Properties != nil && isFalseSchema(s.AdditionalProperties) { + s.AdditionalProperties = nil + } + + for _, prop := range s.Properties { + walk(prop) + } + for _, prop := range s.PatternProperties { + walk(prop) + } + for _, def := range s.Defs { + walk(def) + } + for _, def := range s.Definitions { + walk(def) + } + if s.AdditionalProperties != nil && !isFalseSchema(s.AdditionalProperties) { + walk(s.AdditionalProperties) + } + if s.Items != nil { + walk(s.Items) + } + for _, item := range s.PrefixItems { + walk(item) + } + if s.AdditionalItems != nil { + walk(s.AdditionalItems) + } + if s.UnevaluatedItems != nil { + walk(s.UnevaluatedItems) + } + if s.UnevaluatedProperties != nil { + walk(s.UnevaluatedProperties) + } + if s.PropertyNames != nil { + walk(s.PropertyNames) + } + if s.Contains != nil { + walk(s.Contains) + } + for _, subschema := range s.AllOf { + walk(subschema) + } + for _, subschema := range s.AnyOf { + walk(subschema) + } + for _, subschema := range s.OneOf { + walk(subschema) + } + if s.Not != nil { + walk(s.Not) + } + if s.If != nil { + walk(s.If) + } + if s.Then != nil { + walk(s.Then) + } + if s.Else != nil { + walk(s.Else) + } + for _, subschema := range s.DependentSchemas { + walk(subschema) + } + } + walk(schema) +} + +func isFalseSchema(schema *jsonschema.Schema) bool { + if schema == nil || schema.Not == nil { + return false + } + if !reflect.ValueOf(*schema.Not).IsZero() { + return false + } + clone := *schema + clone.Not = nil + return reflect.ValueOf(clone).IsZero() +} diff --git a/pkg/mcp/builders/pulsar/topic_legacy.go b/pkg/mcp/builders/pulsar/topic_legacy.go new file mode 100644 index 0000000..4e8433f --- /dev/null +++ b/pkg/mcp/builders/pulsar/topic_legacy.go @@ -0,0 +1,793 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "encoding/json" + "fmt" + "slices" + "strings" + + "github.com/apache/pulsar-client-go/pulsaradmin/pkg/utils" + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/pulsarctl/pkg/cmdutils" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" +) + +// PulsarAdminTopicLegacyToolBuilder implements the ToolBuilder interface for Pulsar Admin Topic tools. +// It provides functionality to build Pulsar topic management tools for the legacy server. +// /nolint:revive +type PulsarAdminTopicLegacyToolBuilder struct { + *builders.BaseToolBuilder +} + +// NewPulsarAdminTopicLegacyToolBuilder creates a new Pulsar Admin Topic legacy tool builder instance. +func NewPulsarAdminTopicLegacyToolBuilder() *PulsarAdminTopicLegacyToolBuilder { + metadata := builders.ToolMetadata{ + Name: "pulsar_admin_topic", + Version: "1.0.0", + Description: "Pulsar Admin topic management tools", + Category: "pulsar_admin", + Tags: []string{"pulsar", "topic", "admin"}, + } + + features := []string{ + "pulsar-admin-topics", + "all", + "all-pulsar", + "pulsar-admin", + } + + return &PulsarAdminTopicLegacyToolBuilder{ + BaseToolBuilder: builders.NewBaseToolBuilder(metadata, features), + } +} + +// BuildTools builds the Pulsar Admin Topic tool list for the legacy server. +func (b *PulsarAdminTopicLegacyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { + // Check features - return empty list if no required features are present + if !b.HasAnyRequiredFeature(config.Features) { + return nil, nil + } + + // Validate configuration (only validate when matching features are present) + if err := b.Validate(config); err != nil { + return nil, err + } + + // Build tools + tool := b.buildTopicTool() + handler := b.buildTopicHandler(config.ReadOnly) + + return []server.ServerTool{ + { + Tool: tool, + Handler: handler, + }, + }, nil +} + +// buildTopicTool builds the Pulsar Admin Topic MCP tool definition +// Migrated from the original tool definition logic +func (b *PulsarAdminTopicLegacyToolBuilder) buildTopicTool() mcp.Tool { + toolDesc := "Manage Apache Pulsar topics. " + + "Topics are the core messaging entities in Pulsar that store and transmit messages. " + + "Pulsar supports two types of topics: persistent (durable storage with guaranteed delivery) " + + "and non-persistent (in-memory with at-most-once delivery). " + + "Topics can be partitioned for parallel processing and higher throughput, where each partition " + + "functions as an independent topic with its own message log. " + + "Topics follow a hierarchical naming structure: persistent://tenant/namespace/topic. " + + "This tool supports various operations on topics including creation, deletion, lookup, compaction, " + + "offloading, and retrieving statistics. " + + "Do not use this tool for Kafka protocol operations. Use 'kafka_admin_topics' instead." + + "Most operations require namespace admin permissions." + + resourceDesc := "Resource to operate on. Available resources:\n" + + "- topic: A Pulsar topic\n" + + "- topics: Multiple topics within a namespace" + + operationDesc := "Operation to perform. Available operations:\n" + + "- list: List all topics in a namespace\n" + + "- get: Get metadata for a topic\n" + + "- create: Create a new topic with optional partitions\n" + + "- delete: Delete a topic\n" + + "- stats: Get stats for a topic\n" + + "- lookup: Look up the broker serving a topic\n" + + "- internal-stats: Get internal stats for a topic\n" + + "- internal-info: Get internal info for a topic\n" + + "- bundle-range: Get the bundle range of a topic\n" + + "- last-message-id: Get the last message ID of a topic\n" + + "- status: Get the status of a topic\n" + + "- unload: Unload a topic\n" + + "- terminate: Terminate a topic\n" + + "- compact: Trigger compaction on a topic\n" + + "- update: Update a topic partitions\n" + + "- offload: Offload data from a topic to long-term storage\n" + + "- offload-status: Check the status of data offloading for a topic" + + return mcp.NewTool("pulsar_admin_topic", + mcp.WithDescription(toolDesc), + mcp.WithString("resource", mcp.Required(), + mcp.Description(resourceDesc), + ), + mcp.WithString("operation", mcp.Required(), + mcp.Description(operationDesc), + ), + mcp.WithString("topic", + mcp.Description("The fully qualified topic name (format: [persistent|non-persistent]://tenant/namespace/topic). "+ + "Required for all operations except 'list'. "+ + "For partitioned topics, reference the base topic name without the partition suffix. "+ + "To operate on a specific partition, append -partition-N to the topic name."), + ), + mcp.WithString("namespace", + mcp.Description("The namespace name in the format 'tenant/namespace'. "+ + "Required for the 'list' operation. "+ + "A namespace is a logical grouping of topics within a tenant."), + ), + mcp.WithNumber("partitions", + mcp.Description("The number of partitions for the topic. Required for 'create' and 'update' operations. "+ + "Set to 0 for a non-partitioned topic. "+ + "Partitioned topics provide higher throughput by dividing message traffic across multiple brokers. "+ + "Each partition is an independent unit with its own retention and cursor positions."), + ), + mcp.WithBoolean("force", + mcp.Description("Force operation even if it disrupts producers or consumers. Optional for 'delete' operation. "+ + "When true, all producers and consumers will be forcefully disconnected. "+ + "Use with caution as it can interrupt active message processing."), + ), + mcp.WithBoolean("non-partitioned", + mcp.Description("Operate on a non-partitioned topic. Optional for 'delete' operation. "+ + "When true and operating on a partitioned topic name, only deletes the non-partitioned topic "+ + "with the same name, if it exists."), + ), + mcp.WithBoolean("partitioned", + mcp.Description("Get stats for a partitioned topic. Optional for 'stats' operation. "+ + "It has to be true if the topic is partitioned. Leave it empty or false for non-partitioned topic."), + ), + mcp.WithBoolean("per-partition", + mcp.Description("Include per-partition stats. Optional for 'stats' operation. "+ + "When true, returns statistics for each partition separately. "+ + "Requires 'partitioned' parameter to be true."), + ), + mcp.WithString("config", + mcp.Description("JSON configuration for the topic. Required for 'update' operation. "+ + "Set various policies like retention, compaction, deduplication, etc. "+ + "Use a JSON object format, e.g., '{\"deduplicationEnabled\": true, \"replication_clusters\": [\"us-west\", \"us-east\"]}'"), + ), + mcp.WithString("messageId", + mcp.Description("Message ID for operations that require a position. Required for 'offload' operation. "+ + "Format is 'ledgerId:entryId' representing a position in the topic's message log. "+ + "For offload operations, specifies the message up to which data should be moved to long-term storage."), + ), + ) +} + +// buildTopicHandler builds the Pulsar Admin Topic handler function +// Migrated from the original handler logic +func (b *PulsarAdminTopicLegacyToolBuilder) buildTopicHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { + return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Get required parameters + resource, err := request.RequireString("resource") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get resource: %v", err)), nil + } + + operation, err := request.RequireString("operation") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get operation: %v", err)), nil + } + + // Normalize parameters + resource = strings.ToLower(resource) + operation = strings.ToLower(operation) + + // Validate write operations in read-only mode + if readOnly && (operation == "create" || operation == "delete" || operation == "unload" || + operation == "terminate" || operation == "compact" || operation == "update" || operation == "offload") { + return mcp.NewToolResultError("Write operations are not allowed in read-only mode"), nil + } + + // Get Pulsar session from context + session := mcpCtx.GetPulsarSession(ctx) + if session == nil { + return mcp.NewToolResultError("Pulsar session not found in context"), nil + } + + // Create the admin client + admin, err := session.GetAdminClient() + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get admin client: %v", err)), nil + } + + // Dispatch based on resource and operation + switch resource { + case "topic": + switch operation { + case "get": + return b.handleTopicGet(admin, request) + case "create": + return b.handleTopicCreate(admin, request) + case "delete": + return b.handleTopicDelete(admin, request) + case "stats": + return b.handleTopicStats(admin, request) + case "lookup": + return b.handleTopicLookup(admin, request) + case "internal-stats": + return b.handleTopicInternalStats(admin, request) + case "internal-info": + return b.handleTopicInternalInfo(admin, request) + case "bundle-range": + return b.handleTopicBundleRange(admin, request) + case "last-message-id": + return b.handleTopicLastMessageID(admin, request) + case "status": + return b.handleTopicStatus(admin, request) + case "unload": + return b.handleTopicUnload(admin, request) + case "terminate": + return b.handleTopicTerminate(admin, request) + case "compact": + return b.handleTopicCompact(admin, request) + case "update": + return b.handleTopicUpdate(admin, request) + case "offload": + return b.handleTopicOffload(admin, request) + case "offload-status": + return b.handleTopicOffloadStatus(admin, request) + default: + return mcp.NewToolResultError(fmt.Sprintf("Unknown topic operation: %s", operation)), nil + } + case "topics": + switch operation { + case "list": + return b.handleTopicsList(admin, request) + default: + return mcp.NewToolResultError(fmt.Sprintf("Unknown topics operation: %s", operation)), nil + } + default: + return mcp.NewToolResultError(fmt.Sprintf("Unknown resource: %s", resource)), nil + } + } +} + +// Unified error handling and utility functions + +// handleError provides unified error handling +func (b *PulsarAdminTopicLegacyToolBuilder) handleError(operation string, err error) *mcp.CallToolResult { + return mcp.NewToolResultError(fmt.Sprintf("Failed to %s: %v", operation, err)) +} + +// marshalResponse provides unified JSON serialization for responses +func (b *PulsarAdminTopicLegacyToolBuilder) marshalResponse(data interface{}) (*mcp.CallToolResult, error) { + jsonBytes, err := json.Marshal(data) + if err != nil { + return b.handleError("marshal response", err), nil + } + return mcp.NewToolResultText(string(jsonBytes)), nil +} + +// handleTopicsList lists all existing topics under the specified namespace +func (b *PulsarAdminTopicLegacyToolBuilder) handleTopicsList(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Get required parameters + namespace, err := request.RequireString("namespace") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'namespace' for topics.list: %v", err)), nil + } + + // Get namespace name + namespaceName, err := utils.GetNamespaceName(namespace) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Invalid namespace name '%s': %v", namespace, err)), nil + } + + // List topics + partitionedTopics, nonPartitionedTopics, err := admin.Topics().List(*namespaceName) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to list topics in namespace '%s': %v", + namespace, err)), nil + } + + // Format the output + result := struct { + PartitionedTopics []string `json:"partitionedTopics"` + NonPartitionedTopics []string `json:"nonPartitionedTopics"` + }{ + PartitionedTopics: partitionedTopics, + NonPartitionedTopics: nonPartitionedTopics, + } + + return b.marshalResponse(result) +} + +// handleTopicGet gets the metadata of an existing topic +func (b *PulsarAdminTopicLegacyToolBuilder) handleTopicGet(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Get required parameters + topic, err := request.RequireString("topic") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.get: %v", err)), nil + } + + // Get topic name + topicName, err := utils.GetTopicName(topic) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + } + + // Get topic metadata + metadata, err := admin.Topics().GetMetadata(*topicName) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get metadata for topic '%s': %v", + topic, err)), nil + } + + return b.marshalResponse(metadata) +} + +// handleTopicStats gets the stats for an existing topic +func (b *PulsarAdminTopicLegacyToolBuilder) handleTopicStats(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Get required parameters + topic, err := request.RequireString("topic") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.stats: %v", err)), nil + } + + // Get optional parameters + partitioned := request.GetBool("partitioned", false) + perPartition := request.GetBool("per-partition", false) + + // Get topic name + topicName, err := utils.GetTopicName(topic) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + } + + namespaceName, err := utils.GetNamespaceName(topicName.GetTenant() + "/" + topicName.GetNamespace()) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Invalid namespace name: %v", err)), nil + } + + // List topics to determine if this topic is partitioned + partitionedTopics, nonPartitionedTopics, err := admin.Topics().List(*namespaceName) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to list topics in namespace '%s': %v", + namespaceName, err)), nil + } + + if slices.Contains(partitionedTopics, topicName.String()) { + partitioned = true + } + if slices.Contains(nonPartitionedTopics, topicName.String()) { + partitioned = false + } + + var data interface{} + if partitioned { + // Get partitioned topic stats + stats, err := admin.Topics().GetPartitionedStats(*topicName, perPartition) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get stats for partitioned topic '%s': %v", + topic, err)), nil + } + data = stats + } else { + // Get topic stats + stats, err := admin.Topics().GetStats(*topicName) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get stats for topic '%s': %v", + topic, err)), nil + } + data = stats + } + + return b.marshalResponse(data) +} + +// handleTopicLookup looks up the owner broker of a topic +func (b *PulsarAdminTopicLegacyToolBuilder) handleTopicLookup(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Get required parameters + topic, err := request.RequireString("topic") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.lookup: %v", err)), nil + } + + // Get topic name + topicName, err := utils.GetTopicName(topic) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + } + + // Lookup topic + lookup, err := admin.Topics().Lookup(*topicName) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to lookup topic '%s': %v", + topic, err)), nil + } + + return b.marshalResponse(lookup) +} + +// handleTopicCreate creates a topic with the specified number of partitions +func (b *PulsarAdminTopicLegacyToolBuilder) handleTopicCreate(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Get required parameters + topic, err := request.RequireString("topic") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.create: %v", err)), nil + } + + partitions, err := request.RequireFloat("partitions") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'partitions' for topic.create: %v", err)), nil + } + + // Validate partitions + if partitions < 0 { + return mcp.NewToolResultError("Invalid partitions number: must be non-negative. Use 0 for a non-partitioned topic."), nil + } + + // Get topic name + topicName, err := utils.GetTopicName(topic) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + } + + // Create topic + err = admin.Topics().Create(*topicName, int(partitions)) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to create topic '%s' with %d partitions: %v", + topic, int(partitions), err)), nil + } + + if int(partitions) == 0 { + return mcp.NewToolResultText(fmt.Sprintf("Successfully created non-partitioned topic '%s'", + topicName.String())), nil + } + return mcp.NewToolResultText(fmt.Sprintf("Successfully created topic '%s' with %d partitions", + topicName.String(), int(partitions))), nil +} + +// handleTopicDelete deletes a topic +func (b *PulsarAdminTopicLegacyToolBuilder) handleTopicDelete(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Get required parameters + topic, err := request.RequireString("topic") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.delete: %v", err)), nil + } + + // Get optional parameters + force := request.GetBool("force", false) + nonPartitioned := request.GetBool("non-partitioned", false) + + // Get topic name + topicName, err := utils.GetTopicName(topic) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + } + + // Delete topic + err = admin.Topics().Delete(*topicName, force, nonPartitioned) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to delete topic '%s': %v", topic, err)), nil + } + + forceStr := "" + if force { + forceStr = " forcefully" + } + + nonPartitionedStr := "" + if nonPartitioned { + nonPartitionedStr = " (non-partitioned)" + } + + return mcp.NewToolResultText(fmt.Sprintf("Successfully deleted topic '%s'%s%s", + topicName.String(), forceStr, nonPartitionedStr)), nil +} + +// handleTopicUnload unloads a topic +func (b *PulsarAdminTopicLegacyToolBuilder) handleTopicUnload(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Get required parameters + topic, err := request.RequireString("topic") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.unload: %v", err)), nil + } + + // Get topic name + topicName, err := utils.GetTopicName(topic) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + } + + // Unload topic + err = admin.Topics().Unload(*topicName) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to unload topic '%s': %v", topic, err)), nil + } + + return mcp.NewToolResultText(fmt.Sprintf("Successfully unloaded topic '%s'", topicName.String())), nil +} + +// handleTopicTerminate terminates a topic +func (b *PulsarAdminTopicLegacyToolBuilder) handleTopicTerminate(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Get required parameters + topic, err := request.RequireString("topic") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.terminate: %v", err)), nil + } + + // Get topic name + topicName, err := utils.GetTopicName(topic) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + } + + // Terminate topic + messageID, err := admin.Topics().Terminate(*topicName) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to terminate topic '%s': %v", topic, err)), nil + } + + // Convert message ID to string + msgIDStr := fmt.Sprintf("%d:%d", messageID.LedgerID, messageID.EntryID) + + return mcp.NewToolResultText(fmt.Sprintf("Successfully terminated topic '%s' at message %s. "+ + "No more messages can be published to this topic.", + topicName.String(), msgIDStr)), nil +} + +// handleTopicCompact triggers compaction on a topic +func (b *PulsarAdminTopicLegacyToolBuilder) handleTopicCompact(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Get required parameters + topic, err := request.RequireString("topic") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.compact: %v", err)), nil + } + + // Get topic name + topicName, err := utils.GetTopicName(topic) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + } + + // Compact topic + err = admin.Topics().Compact(*topicName) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to trigger compaction for topic '%s': %v", topic, err)), nil + } + + return mcp.NewToolResultText(fmt.Sprintf("Successfully triggered compaction for topic '%s'. "+ + "Run 'topic.status' to check compaction status.", topicName.String())), nil +} + +// handleTopicInternalStats gets the internal stats for a topic +func (b *PulsarAdminTopicLegacyToolBuilder) handleTopicInternalStats(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Get required parameters + topic, err := request.RequireString("topic") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.internal-stats: %v", err)), nil + } + + // Get topic name + topicName, err := utils.GetTopicName(topic) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + } + + // Get internal stats + stats, err := admin.Topics().GetInternalStats(*topicName) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get internal stats for topic '%s': %v", topic, err)), nil + } + + return b.marshalResponse(stats) +} + +// handleTopicInternalInfo gets the internal info for a topic +func (b *PulsarAdminTopicLegacyToolBuilder) handleTopicInternalInfo(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Get required parameters + topic, err := request.RequireString("topic") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.internal-info: %v", err)), nil + } + + // Get topic name + topicName, err := utils.GetTopicName(topic) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + } + + // Get internal info + info, err := admin.Topics().GetInternalInfo(*topicName) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get internal info for topic '%s': %v", topic, err)), nil + } + + return b.marshalResponse(info) +} + +// handleTopicBundleRange gets the bundle range of a topic +func (b *PulsarAdminTopicLegacyToolBuilder) handleTopicBundleRange(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Get required parameters + topic, err := request.RequireString("topic") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.bundle-range: %v", err)), nil + } + + // Get topic name + topicName, err := utils.GetTopicName(topic) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + } + + // Get bundle range + bundle, err := admin.Topics().GetBundleRange(*topicName) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get bundle range for topic '%s': %v", topic, err)), nil + } + + return mcp.NewToolResultText(fmt.Sprintf("Bundle range for topic '%s': %s", topicName.String(), bundle)), nil +} + +// handleTopicLastMessageID gets the last message ID of a topic +func (b *PulsarAdminTopicLegacyToolBuilder) handleTopicLastMessageID(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Get required parameters + topic, err := request.RequireString("topic") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.last-message-id: %v", err)), nil + } + + // Get topic name + topicName, err := utils.GetTopicName(topic) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + } + + // Get last message ID + messageID, err := admin.Topics().GetLastMessageID(*topicName) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get last message ID for topic '%s': %v", topic, err)), nil + } + + return b.marshalResponse(messageID) +} + +// handleTopicStatus gets the status of a topic +func (b *PulsarAdminTopicLegacyToolBuilder) handleTopicStatus(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Get required parameters + topic, err := request.RequireString("topic") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.status: %v", err)), nil + } + + // Get topic name + topicName, err := utils.GetTopicName(topic) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + } + + // Get topic metadata for status check + metadata, err := admin.Topics().GetMetadata(*topicName) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get status for topic '%s': %v", topic, err)), nil + } + + // Create status object with available information + status := struct { + Metadata interface{} `json:"metadata"` + Active bool `json:"active"` + }{ + Metadata: metadata, + Active: true, + } + + return b.marshalResponse(status) +} + +// handleTopicUpdate updates a topic configuration +func (b *PulsarAdminTopicLegacyToolBuilder) handleTopicUpdate(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Get required parameters + topic, err := request.RequireString("topic") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.update: %v", err)), nil + } + + partitions, err := request.RequireFloat("partitions") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'partitions' for topic.update: %v", err)), nil + } + + // Get topic name + topicName, err := utils.GetTopicName(topic) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + } + + err = admin.Topics().Update(*topicName, int(partitions)) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to update topic '%s': %v", topic, err)), nil + } + + return mcp.NewToolResultText(fmt.Sprintf("Successfully updated topic '%s' partitions to %d", + topicName.String(), int(partitions))), nil +} + +// handleTopicOffload offloads data from a topic to long-term storage +func (b *PulsarAdminTopicLegacyToolBuilder) handleTopicOffload(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Get required parameters + topic, err := request.RequireString("topic") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.offload: %v", err)), nil + } + + messageIDStr, err := request.RequireString("messageId") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'messageId' for topic.offload: %v", err)), nil + } + + // Get topic name + topicName, err := utils.GetTopicName(topic) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + } + + // Parse message ID from format "ledgerId:entryId" + var ledgerID, entryID int64 + if _, err := fmt.Sscanf(messageIDStr, "%d:%d", &ledgerID, &entryID); err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Invalid message ID format (expected 'ledgerId:entryId'): %v. "+ + "Valid examples: '123:456'", err)), nil + } + + // Create MessageID object + messageID := utils.MessageID{ + LedgerID: ledgerID, + EntryID: entryID, + } + + // Offload topic + err = admin.Topics().Offload(*topicName, messageID) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to trigger offload for topic '%s': %v", topic, err)), nil + } + + return mcp.NewToolResultText(fmt.Sprintf("Successfully triggered offload for topic '%s' up to message %s. "+ + "Use 'topic.offload-status' to check the offload progress.", + topicName.String(), messageIDStr)), nil +} + +// handleTopicOffloadStatus checks the status of data offloading for a topic +func (b *PulsarAdminTopicLegacyToolBuilder) handleTopicOffloadStatus(admin cmdutils.Client, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + // Get required parameters + topic, err := request.RequireString("topic") + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Missing required parameter 'topic' for topic.offload-status: %v", err)), nil + } + + // Get topic name + topicName, err := utils.GetTopicName(topic) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Invalid topic name '%s': %v", topic, err)), nil + } + + // Get offload status + status, err := admin.Topics().OffloadStatus(*topicName) + if err != nil { + return mcp.NewToolResultError(fmt.Sprintf("Failed to get offload status for topic '%s': %v", topic, err)), nil + } + + return b.marshalResponse(status) +} diff --git a/pkg/mcp/builders/pulsar/topic_policy.go b/pkg/mcp/builders/pulsar/topic_policy.go index 065caa6..e889b02 100644 --- a/pkg/mcp/builders/pulsar/topic_policy.go +++ b/pkg/mcp/builders/pulsar/topic_policy.go @@ -22,13 +22,48 @@ import ( "strings" "github.com/apache/pulsar-client-go/pulsaradmin/pkg/utils" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + "github.com/google/jsonschema-go/jsonschema" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/pulsarctl/pkg/cmdutils" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" mcpCtx "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" ) +type pulsarAdminTopicPolicyInput struct { + Operation string `json:"operation"` + Topic string `json:"topic"` + RetentionSize *string `json:"retention_size,omitempty"` + RetentionTime *string `json:"retention_time,omitempty"` + TTLSeconds *float64 `json:"ttl_seconds,omitempty"` + CompactionThreshold *float64 `json:"compaction_threshold,omitempty"` + SubscriptionTypes []string `json:"subscription_types,omitempty"` +} + +const ( + pulsarAdminTopicPolicyToolDesc = "Manage Pulsar topic policies including retention, TTL, compaction, and subscription policies. " + + "This tool provides functionality to get, set, and remove various topic-level policies in Apache Pulsar." + + pulsarAdminTopicPolicyOperationDesc = "Operation to perform on topic policies. Available operations:\n" + + "- get_retention: Get message retention policy for a topic\n" + + "- set_retention: Set message retention policy for a topic\n" + + "- remove_retention: Remove message retention policy for a topic\n" + + "- get_ttl: Get message TTL policy for a topic\n" + + "- set_ttl: Set message TTL policy for a topic\n" + + "- remove_ttl: Remove message TTL policy for a topic\n" + + "- get_compaction: Get compaction policy for a topic\n" + + "- set_compaction: Set compaction policy for a topic\n" + + "- remove_compaction: Remove compaction policy for a topic\n" + + "- get_subscription_types: Get allowed subscription types for a topic\n" + + "- set_subscription_types: Set allowed subscription types for a topic\n" + + "- remove_subscription_types: Remove subscription types restriction for a topic" + pulsarAdminTopicPolicyTopicDesc = "Topic name in format 'persistent://tenant/namespace/topic' or 'tenant/namespace/topic'" + pulsarAdminTopicPolicyRetentionSizeDesc = "Retention size policy (e.g., '100MB', '1GB') - used with retention operations" + pulsarAdminTopicPolicyRetentionTimeDesc = "Retention time policy (e.g., '1d', '24h', '1440m') - used with retention operations" + pulsarAdminTopicPolicyTTLSecondsDesc = "TTL in seconds - used with TTL operations" + pulsarAdminTopicPolicyCompactionThresholdDesc = "Compaction threshold in bytes - used with compaction operations" + pulsarAdminTopicPolicySubscriptionTypesDesc = "List of allowed subscription types - used with subscription type operations" +) + // PulsarAdminTopicPolicyToolBuilder implements the ToolBuilder interface for Pulsar admin topic policies // /nolint:revive type PulsarAdminTopicPolicyToolBuilder struct { @@ -58,7 +93,7 @@ func NewPulsarAdminTopicPolicyToolBuilder() *PulsarAdminTopicPolicyToolBuilder { } // BuildTools builds the Pulsar admin topic policy tool list -func (b *PulsarAdminTopicPolicyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { +func (b *PulsarAdminTopicPolicyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]builders.ToolDefinition, error) { // Check features - return empty list if no required features are present if !b.HasAnyRequiredFeature(config.Features) { return nil, nil @@ -70,11 +105,14 @@ func (b *PulsarAdminTopicPolicyToolBuilder) BuildTools(_ context.Context, config } // Build tools - tool := b.buildTopicPolicyTool() + tool, err := b.buildTopicPolicyTool() + if err != nil { + return nil, err + } handler := b.buildTopicPolicyHandler(config.ReadOnly) - return []server.ServerTool{ - { + return []builders.ToolDefinition{ + builders.ServerTool[pulsarAdminTopicPolicyInput, any]{ Tool: tool, Handler: handler, }, @@ -82,79 +120,42 @@ func (b *PulsarAdminTopicPolicyToolBuilder) BuildTools(_ context.Context, config } // buildTopicPolicyTool builds the Pulsar Admin Topic Policy MCP tool definition -func (b *PulsarAdminTopicPolicyToolBuilder) buildTopicPolicyTool() mcp.Tool { - toolDesc := "Manage Pulsar topic policies including retention, TTL, compaction, and subscription policies. " + - "This tool provides functionality to get, set, and remove various topic-level policies in Apache Pulsar." - - operationDesc := "Operation to perform on topic policies. Available operations:\n" + - "- get_retention: Get message retention policy for a topic\n" + - "- set_retention: Set message retention policy for a topic\n" + - "- remove_retention: Remove message retention policy for a topic\n" + - "- get_ttl: Get message TTL policy for a topic\n" + - "- set_ttl: Set message TTL policy for a topic\n" + - "- remove_ttl: Remove message TTL policy for a topic\n" + - "- get_compaction: Get compaction policy for a topic\n" + - "- set_compaction: Set compaction policy for a topic\n" + - "- remove_compaction: Remove compaction policy for a topic\n" + - "- get_subscription_types: Get allowed subscription types for a topic\n" + - "- set_subscription_types: Set allowed subscription types for a topic\n" + - "- remove_subscription_types: Remove subscription types restriction for a topic" +func (b *PulsarAdminTopicPolicyToolBuilder) buildTopicPolicyTool() (*sdk.Tool, error) { + inputSchema, err := buildPulsarAdminTopicPolicyInputSchema() + if err != nil { + return nil, err + } - return mcp.NewTool("pulsar_admin_topic_policy", - mcp.WithDescription(toolDesc), - mcp.WithString("operation", mcp.Required(), - mcp.Description(operationDesc), - ), - mcp.WithString("topic", mcp.Required(), - mcp.Description("Topic name in format 'persistent://tenant/namespace/topic' or 'tenant/namespace/topic'"), - ), - mcp.WithString("retention_size", - mcp.Description("Retention size policy (e.g., '100MB', '1GB') - used with retention operations"), - ), - mcp.WithString("retention_time", - mcp.Description("Retention time policy (e.g., '1d', '24h', '1440m') - used with retention operations"), - ), - mcp.WithNumber("ttl_seconds", - mcp.Description("TTL in seconds - used with TTL operations"), - ), - mcp.WithNumber("compaction_threshold", - mcp.Description("Compaction threshold in bytes - used with compaction operations"), - ), - mcp.WithArray("subscription_types", - mcp.Description("List of allowed subscription types - used with subscription type operations"), - mcp.Items( - map[string]interface{}{ - "type": "string", - "description": "subscription type: Exclusive, Shared, Failover, Key_Shared", - }, - ), - ), - ) + return &sdk.Tool{ + Name: "pulsar_admin_topic_policy", + Description: pulsarAdminTopicPolicyToolDesc, + InputSchema: inputSchema, + }, nil } // buildTopicPolicyHandler builds the Pulsar Admin Topic Policy handler function -func (b *PulsarAdminTopicPolicyToolBuilder) buildTopicPolicyHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicPolicyToolBuilder) buildTopicPolicyHandler(readOnly bool) builders.ToolHandlerFunc[pulsarAdminTopicPolicyInput, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input pulsarAdminTopicPolicyInput) (*sdk.CallToolResult, any, error) { // Get Pulsar session from context session := mcpCtx.GetPulsarSession(ctx) if session == nil { - return mcp.NewToolResultError("Pulsar session not found in context"), nil + return nil, nil, fmt.Errorf("pulsar session not found in context") } client, err := session.GetAdminClient() if err != nil { - return b.handleError("get admin client", err), nil + return nil, nil, b.handleError("get admin client", err) } // Get required parameters - operation, err := request.RequireString("operation") - if err != nil { - return mcp.NewToolResultError("Missing required operation parameter"), nil + operation := strings.TrimSpace(input.Operation) + if operation == "" { + return nil, nil, fmt.Errorf("missing required operation parameter") } - topic, err := request.RequireString("topic") - if err != nil { - return mcp.NewToolResultError("Missing required topic parameter"), nil + topic := strings.TrimSpace(input.Topic) + if topic == "" { + return nil, nil, fmt.Errorf("missing required topic parameter") } // Check write operation permissions @@ -169,71 +170,107 @@ func (b *PulsarAdminTopicPolicyToolBuilder) buildTopicPolicyHandler(readOnly boo } if isWriteOp && readOnly { - return mcp.NewToolResultError("Write operations not allowed in read-only mode"), nil + return nil, nil, fmt.Errorf("write operations not allowed in read-only mode") } // Handle operations switch operation { case "get_retention": - return b.handleGetTopicRetention(client, topic) + result, handlerErr := b.handleGetTopicRetention(client, topic) + return result, nil, handlerErr case "set_retention": - return b.handleSetTopicRetention(client, topic, request) + result, handlerErr := b.handleSetTopicRetention(client, topic, input) + return result, nil, handlerErr case "remove_retention": - return b.handleRemoveTopicRetention(client, topic) + result, handlerErr := b.handleRemoveTopicRetention(client, topic) + return result, nil, handlerErr case "get_ttl": - return b.handleGetTopicTTL(client, topic) + result, handlerErr := b.handleGetTopicTTL(client, topic) + return result, nil, handlerErr case "set_ttl": - return b.handleSetTopicTTL(client, topic, request) + result, handlerErr := b.handleSetTopicTTL(client, topic, input) + return result, nil, handlerErr case "remove_ttl": - return b.handleRemoveTopicTTL(client, topic) + result, handlerErr := b.handleRemoveTopicTTL(client, topic) + return result, nil, handlerErr case "get_compaction": - return b.handleGetTopicCompaction(client, topic) + result, handlerErr := b.handleGetTopicCompaction(client, topic) + return result, nil, handlerErr case "set_compaction": - return b.handleSetTopicCompaction(client, topic, request) + result, handlerErr := b.handleSetTopicCompaction(client, topic, input) + return result, nil, handlerErr case "remove_compaction": - return b.handleRemoveTopicCompaction(client, topic) + result, handlerErr := b.handleRemoveTopicCompaction(client, topic) + return result, nil, handlerErr case "get_subscription_types": - return b.handleGetTopicSubscriptionTypes(client, topic) + result, handlerErr := b.handleGetTopicSubscriptionTypes(client, topic) + return result, nil, handlerErr case "set_subscription_types": - return b.handleSetTopicSubscriptionTypes(client, topic, request) + result, handlerErr := b.handleSetTopicSubscriptionTypes(client, topic, input) + return result, nil, handlerErr case "remove_subscription_types": - return b.handleRemoveTopicSubscriptionTypes(client, topic) + result, handlerErr := b.handleRemoveTopicSubscriptionTypes(client, topic) + return result, nil, handlerErr default: - return mcp.NewToolResultError(fmt.Sprintf("Unsupported operation: %s", operation)), nil + return nil, nil, fmt.Errorf("unsupported operation: %s", operation) } } } +func buildPulsarAdminTopicPolicyInputSchema() (*jsonschema.Schema, error) { + schema, err := jsonschema.For[pulsarAdminTopicPolicyInput](nil) + if err != nil { + return nil, fmt.Errorf("input schema: %w", err) + } + if schema.Type != "object" { + return nil, fmt.Errorf("input schema must have type \"object\"") + } + if schema.Properties == nil { + schema.Properties = map[string]*jsonschema.Schema{} + } + + setSchemaDescription(schema, "operation", pulsarAdminTopicPolicyOperationDesc) + setSchemaDescription(schema, "topic", pulsarAdminTopicPolicyTopicDesc) + setSchemaDescription(schema, "retention_size", pulsarAdminTopicPolicyRetentionSizeDesc) + setSchemaDescription(schema, "retention_time", pulsarAdminTopicPolicyRetentionTimeDesc) + setSchemaDescription(schema, "ttl_seconds", pulsarAdminTopicPolicyTTLSecondsDesc) + setSchemaDescription(schema, "compaction_threshold", pulsarAdminTopicPolicyCompactionThresholdDesc) + setSchemaDescription(schema, "subscription_types", pulsarAdminTopicPolicySubscriptionTypesDesc) + + normalizeAdditionalProperties(schema) + return schema, nil +} + // Utility functions -func (b *PulsarAdminTopicPolicyToolBuilder) handleError(operation string, err error) *mcp.CallToolResult { - return mcp.NewToolResultError(fmt.Sprintf("Failed to %s: %v", operation, err)) +func (b *PulsarAdminTopicPolicyToolBuilder) handleError(operation string, err error) error { + return fmt.Errorf("failed to %s: %v", operation, err) } -func (b *PulsarAdminTopicPolicyToolBuilder) marshalResponse(data interface{}) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicPolicyToolBuilder) marshalResponse(data interface{}) (*sdk.CallToolResult, error) { jsonBytes, err := json.Marshal(data) if err != nil { - return b.handleError("marshal response", err), nil + return nil, b.handleError("marshal response", err) } - return mcp.NewToolResultText(string(jsonBytes)), nil + return textResult(string(jsonBytes)), nil } // Topic policy operation handlers -func (b *PulsarAdminTopicPolicyToolBuilder) handleGetTopicRetention(client cmdutils.Client, topic string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicPolicyToolBuilder) handleGetTopicRetention(client cmdutils.Client, topic string) (*sdk.CallToolResult, error) { // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return b.handleError("parse topic name", err), nil + return nil, b.handleError("parse topic name", err) } // Get retention policy retention, err := client.Topics().GetRetention(*topicName, false) if err != nil { - return b.handleError("get topic retention policy", err), nil + return nil, b.handleError("get topic retention policy", err) } // If no retention policy is defined if retention == nil { - return mcp.NewToolResultText(fmt.Sprintf("No retention policy found for topic %s", topicName.String())), nil + return textResult(fmt.Sprintf("No retention policy found for topic %s", topicName.String())), nil } // Format the output @@ -251,35 +288,39 @@ func (b *PulsarAdminTopicPolicyToolBuilder) handleGetTopicRetention(client cmdut retentionSize = fmt.Sprintf("%d MB", retention.RetentionSizeInMB) } - return mcp.NewToolResultText(fmt.Sprintf("Retention policy for topic %s: %s and %s", + return textResult(fmt.Sprintf("Retention policy for topic %s: %s and %s", topicName.String(), retentionTime, retentionSize)), nil } -func (b *PulsarAdminTopicPolicyToolBuilder) handleSetTopicRetention(client cmdutils.Client, topic string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicPolicyToolBuilder) handleSetTopicRetention(client cmdutils.Client, topic string, input pulsarAdminTopicPolicyInput) (*sdk.CallToolResult, error) { // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return b.handleError("parse topic name", err), nil + return nil, b.handleError("parse topic name", err) } // Parse retention parameters - var retentionTimeInMinutes int64 = -1 - var retentionSizeInMB int64 = -1 - - // /nolint:revive - if retentionTime := request.GetString("retention_time", ""); retentionTime != "" { - if parsed, err := b.parseRetentionTime(retentionTime); err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid retention time format: %v", err)), nil - } else { + retentionTimeInMinutes := int64(-1) + retentionSizeInMB := int64(-1) + + if input.RetentionTime != nil { + retentionTime := strings.TrimSpace(*input.RetentionTime) + if retentionTime != "" { + parsed, err := b.parseRetentionTime(retentionTime) + if err != nil { + return nil, fmt.Errorf("invalid retention time format: %v", err) + } retentionTimeInMinutes = parsed } } - // /nolint:revive - if retentionSize := request.GetString("retention_size", ""); retentionSize != "" { - if parsed, err := b.parseRetentionSize(retentionSize); err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Invalid retention size format: %v", err)), nil - } else { + if input.RetentionSize != nil { + retentionSize := strings.TrimSpace(*input.RetentionSize) + if retentionSize != "" { + parsed, err := b.parseRetentionSize(retentionSize) + if err != nil { + return nil, fmt.Errorf("invalid retention size format: %v", err) + } retentionSizeInMB = parsed } } @@ -293,132 +334,132 @@ func (b *PulsarAdminTopicPolicyToolBuilder) handleSetTopicRetention(client cmdut // Set retention policy err = client.Topics().SetRetention(*topicName, retentionPolicy) if err != nil { - return b.handleError("set topic retention policy", err), nil + return nil, b.handleError("set topic retention policy", err) } - return mcp.NewToolResultText(fmt.Sprintf("Retention policy set for topic %s", topicName.String())), nil + return textResult(fmt.Sprintf("Retention policy set for topic %s", topicName.String())), nil } -func (b *PulsarAdminTopicPolicyToolBuilder) handleRemoveTopicRetention(client cmdutils.Client, topic string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicPolicyToolBuilder) handleRemoveTopicRetention(client cmdutils.Client, topic string) (*sdk.CallToolResult, error) { // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return b.handleError("parse topic name", err), nil + return nil, b.handleError("parse topic name", err) } // Remove retention policy err = client.Topics().RemoveRetention(*topicName) if err != nil { - return b.handleError("remove topic retention policy", err), nil + return nil, b.handleError("remove topic retention policy", err) } - return mcp.NewToolResultText(fmt.Sprintf("Retention policy removed for topic %s", topicName.String())), nil + return textResult(fmt.Sprintf("Retention policy removed for topic %s", topicName.String())), nil } -func (b *PulsarAdminTopicPolicyToolBuilder) handleGetTopicTTL(client cmdutils.Client, topic string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicPolicyToolBuilder) handleGetTopicTTL(client cmdutils.Client, topic string) (*sdk.CallToolResult, error) { // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return b.handleError("parse topic name", err), nil + return nil, b.handleError("parse topic name", err) } // Get message TTL ttl, err := client.Topics().GetMessageTTL(*topicName) if err != nil { - return b.handleError("get topic message TTL", err), nil + return nil, b.handleError("get topic message TTL", err) } // Check if TTL is set if ttl == 0 { - return mcp.NewToolResultText(fmt.Sprintf("Message TTL is not configured for topic %s", topicName.String())), nil + return textResult(fmt.Sprintf("Message TTL is not configured for topic %s", topicName.String())), nil } - return mcp.NewToolResultText(fmt.Sprintf("Message TTL for topic %s is %d seconds", topicName.String(), ttl)), nil + return textResult(fmt.Sprintf("Message TTL for topic %s is %d seconds", topicName.String(), ttl)), nil } -func (b *PulsarAdminTopicPolicyToolBuilder) handleSetTopicTTL(client cmdutils.Client, topic string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicPolicyToolBuilder) handleSetTopicTTL(client cmdutils.Client, topic string, input pulsarAdminTopicPolicyInput) (*sdk.CallToolResult, error) { // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return b.handleError("parse topic name", err), nil + return nil, b.handleError("parse topic name", err) } // Get TTL seconds parameter - ttlSeconds, err := request.RequireFloat("ttl_seconds") - if err != nil { - return mcp.NewToolResultError("Missing required parameter 'ttl_seconds'"), nil + if input.TTLSeconds == nil { + return nil, fmt.Errorf("missing required parameter 'ttl_seconds'") } + ttlSeconds := *input.TTLSeconds if ttlSeconds < 0 { - return mcp.NewToolResultError("TTL seconds must be non-negative"), nil + return nil, fmt.Errorf("TTL seconds must be non-negative") } // Set message TTL err = client.Topics().SetMessageTTL(*topicName, int(ttlSeconds)) if err != nil { - return b.handleError("set topic message TTL", err), nil + return nil, b.handleError("set topic message TTL", err) } if ttlSeconds == 0 { - return mcp.NewToolResultText(fmt.Sprintf("Message TTL disabled for topic %s", topicName.String())), nil + return textResult(fmt.Sprintf("Message TTL disabled for topic %s", topicName.String())), nil } - return mcp.NewToolResultText(fmt.Sprintf("Message TTL set to %d seconds for topic %s", int(ttlSeconds), topicName.String())), nil + return textResult(fmt.Sprintf("Message TTL set to %d seconds for topic %s", int(ttlSeconds), topicName.String())), nil } -func (b *PulsarAdminTopicPolicyToolBuilder) handleRemoveTopicTTL(client cmdutils.Client, topic string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicPolicyToolBuilder) handleRemoveTopicTTL(client cmdutils.Client, topic string) (*sdk.CallToolResult, error) { // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return b.handleError("parse topic name", err), nil + return nil, b.handleError("parse topic name", err) } // Remove message TTL err = client.Topics().RemoveMessageTTL(*topicName) if err != nil { - return b.handleError("remove topic message TTL", err), nil + return nil, b.handleError("remove topic message TTL", err) } - return mcp.NewToolResultText(fmt.Sprintf("Message TTL removed for topic %s", topicName.String())), nil + return textResult(fmt.Sprintf("Message TTL removed for topic %s", topicName.String())), nil } -func (b *PulsarAdminTopicPolicyToolBuilder) handleGetTopicCompaction(client cmdutils.Client, topic string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicPolicyToolBuilder) handleGetTopicCompaction(client cmdutils.Client, topic string) (*sdk.CallToolResult, error) { // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return b.handleError("parse topic name", err), nil + return nil, b.handleError("parse topic name", err) } // Get compaction threshold threshold, err := client.Topics().GetCompactionThreshold(*topicName, false) if err != nil { - return b.handleError("get topic compaction threshold", err), nil + return nil, b.handleError("get topic compaction threshold", err) } // Format the result if threshold == 0 { - return mcp.NewToolResultText(fmt.Sprintf("Automatic compaction is disabled for topic %s", topicName.String())), nil + return textResult(fmt.Sprintf("Automatic compaction is disabled for topic %s", topicName.String())), nil } - return mcp.NewToolResultText(fmt.Sprintf("The compaction threshold of the topic %s is %d byte(s)", + return textResult(fmt.Sprintf("The compaction threshold of the topic %s is %d byte(s)", topicName.String(), threshold)), nil } -func (b *PulsarAdminTopicPolicyToolBuilder) handleSetTopicCompaction(client cmdutils.Client, topic string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicPolicyToolBuilder) handleSetTopicCompaction(client cmdutils.Client, topic string, input pulsarAdminTopicPolicyInput) (*sdk.CallToolResult, error) { // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return b.handleError("parse topic name", err), nil + return nil, b.handleError("parse topic name", err) } // Get compaction threshold parameter - thresholdNum, err := request.RequireFloat("compaction_threshold") - if err != nil { - return mcp.NewToolResultError("Missing required parameter 'compaction_threshold'"), nil + if input.CompactionThreshold == nil { + return nil, fmt.Errorf("missing required parameter 'compaction_threshold'") } + thresholdNum := *input.CompactionThreshold if thresholdNum < 0 { - return mcp.NewToolResultError("Compaction threshold must be non-negative"), nil + return nil, fmt.Errorf("compaction threshold must be non-negative") } threshold := int64(thresholdNum) @@ -426,37 +467,37 @@ func (b *PulsarAdminTopicPolicyToolBuilder) handleSetTopicCompaction(client cmdu // Set compaction threshold err = client.Topics().SetCompactionThreshold(*topicName, threshold) if err != nil { - return b.handleError("set topic compaction threshold", err), nil + return nil, b.handleError("set topic compaction threshold", err) } if threshold == 0 { - return mcp.NewToolResultText(fmt.Sprintf("Automatic compaction disabled for topic %s", topicName.String())), nil + return textResult(fmt.Sprintf("Automatic compaction disabled for topic %s", topicName.String())), nil } - return mcp.NewToolResultText(fmt.Sprintf("Compaction threshold set to %d bytes for topic %s", threshold, topicName.String())), nil + return textResult(fmt.Sprintf("Compaction threshold set to %d bytes for topic %s", threshold, topicName.String())), nil } -func (b *PulsarAdminTopicPolicyToolBuilder) handleRemoveTopicCompaction(client cmdutils.Client, topic string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicPolicyToolBuilder) handleRemoveTopicCompaction(client cmdutils.Client, topic string) (*sdk.CallToolResult, error) { // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return b.handleError("parse topic name", err), nil + return nil, b.handleError("parse topic name", err) } // Remove compaction threshold err = client.Topics().RemoveCompactionThreshold(*topicName) if err != nil { - return b.handleError("remove topic compaction threshold", err), nil + return nil, b.handleError("remove topic compaction threshold", err) } - return mcp.NewToolResultText(fmt.Sprintf("Compaction threshold removed for topic %s", topicName.String())), nil + return textResult(fmt.Sprintf("Compaction threshold removed for topic %s", topicName.String())), nil } -func (b *PulsarAdminTopicPolicyToolBuilder) handleGetTopicSubscriptionTypes(client cmdutils.Client, topic string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicPolicyToolBuilder) handleGetTopicSubscriptionTypes(client cmdutils.Client, topic string) (*sdk.CallToolResult, error) { // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return b.handleError("parse topic name", err), nil + return nil, b.handleError("parse topic name", err) } // Check if the API supports subscription types management @@ -471,11 +512,11 @@ func (b *PulsarAdminTopicPolicyToolBuilder) handleGetTopicSubscriptionTypes(clie if getter, ok := topicsClient.(SubscriptionTypesGetter); ok { subscriptionTypes, err := getter.GetSubscriptionTypesEnabled(*topicName) if err != nil { - return b.handleError("get topic subscription types", err), nil + return nil, b.handleError("get topic subscription types", err) } if len(subscriptionTypes) == 0 { - return mcp.NewToolResultText(fmt.Sprintf("No subscription type restrictions configured for topic %s (all types allowed)", topicName.String())), nil + return textResult(fmt.Sprintf("No subscription type restrictions configured for topic %s (all types allowed)", topicName.String())), nil } return b.marshalResponse(map[string]interface{}{ @@ -485,24 +526,25 @@ func (b *PulsarAdminTopicPolicyToolBuilder) handleGetTopicSubscriptionTypes(clie } // Fallback: API not available in current version - return mcp.NewToolResultError("Subscription types policy management is not available in the current pulsarctl API version. " + - "This feature may require a newer version of Pulsar or pulsarctl."), nil + return nil, fmt.Errorf("subscription types policy management is not available in the current pulsarctl API version; " + + "this feature may require a newer version of Pulsar or pulsarctl") } -func (b *PulsarAdminTopicPolicyToolBuilder) handleSetTopicSubscriptionTypes(client cmdutils.Client, topic string, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicPolicyToolBuilder) handleSetTopicSubscriptionTypes(client cmdutils.Client, topic string, input pulsarAdminTopicPolicyInput) (*sdk.CallToolResult, error) { // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return b.handleError("parse topic name", err), nil + return nil, b.handleError("parse topic name", err) } // Get subscription types parameter - subscriptionTypes, err := request.RequireStringSlice("subscription_types") - if err != nil { - return mcp.NewToolResultError("Missing required parameter 'subscription_types'. " + - "Please provide an array of subscription types: Exclusive, Shared, Failover, Key_Shared"), nil + if input.SubscriptionTypes == nil { + return nil, fmt.Errorf("missing required parameter 'subscription_types'; " + + "please provide an array of subscription types: Exclusive, Shared, Failover, Key_Shared") } + subscriptionTypes := input.SubscriptionTypes + // Validate subscription types validTypes := map[string]bool{ "Exclusive": true, @@ -514,13 +556,13 @@ func (b *PulsarAdminTopicPolicyToolBuilder) handleSetTopicSubscriptionTypes(clie var validatedTypes []string for _, subType := range subscriptionTypes { if !validTypes[subType] { - return mcp.NewToolResultError(fmt.Sprintf("Invalid subscription type: %s. Valid types are: Exclusive, Shared, Failover, Key_Shared", subType)), nil + return nil, fmt.Errorf("invalid subscription type: %s. valid types are: Exclusive, Shared, Failover, Key_Shared", subType) } validatedTypes = append(validatedTypes, subType) } if len(validatedTypes) == 0 { - return mcp.NewToolResultError("At least one valid subscription type must be specified"), nil + return nil, fmt.Errorf("at least one valid subscription type must be specified") } // Check if the API supports subscription types management @@ -534,23 +576,23 @@ func (b *PulsarAdminTopicPolicyToolBuilder) handleSetTopicSubscriptionTypes(clie if setter, ok := topicsClient.(SubscriptionTypesSetter); ok { err := setter.SetSubscriptionTypesEnabled(*topicName, validatedTypes) if err != nil { - return b.handleError("set topic subscription types", err), nil + return nil, b.handleError("set topic subscription types", err) } - return mcp.NewToolResultText(fmt.Sprintf("Subscription types set for topic %s: %s", + return textResult(fmt.Sprintf("Subscription types set for topic %s: %s", topicName.String(), strings.Join(validatedTypes, ", "))), nil } // Fallback: API not available in current version - return mcp.NewToolResultError("Subscription types policy management is not available in the current pulsarctl API version. " + - "This feature may require a newer version of Pulsar or pulsarctl."), nil + return nil, fmt.Errorf("subscription types policy management is not available in the current pulsarctl API version; " + + "this feature may require a newer version of Pulsar or pulsarctl") } -func (b *PulsarAdminTopicPolicyToolBuilder) handleRemoveTopicSubscriptionTypes(client cmdutils.Client, topic string) (*mcp.CallToolResult, error) { +func (b *PulsarAdminTopicPolicyToolBuilder) handleRemoveTopicSubscriptionTypes(client cmdutils.Client, topic string) (*sdk.CallToolResult, error) { // Get topic name topicName, err := utils.GetTopicName(topic) if err != nil { - return b.handleError("parse topic name", err), nil + return nil, b.handleError("parse topic name", err) } // Check if the API supports subscription types management @@ -564,15 +606,15 @@ func (b *PulsarAdminTopicPolicyToolBuilder) handleRemoveTopicSubscriptionTypes(c if remover, ok := topicsClient.(SubscriptionTypesRemover); ok { err := remover.RemoveSubscriptionTypesEnabled(*topicName) if err != nil { - return b.handleError("remove topic subscription types policy", err), nil + return nil, b.handleError("remove topic subscription types policy", err) } - return mcp.NewToolResultText(fmt.Sprintf("Subscription types policy removed for topic %s (all types now allowed)", topicName.String())), nil + return textResult(fmt.Sprintf("Subscription types policy removed for topic %s (all types now allowed)", topicName.String())), nil } // Fallback: API not available in current version - return mcp.NewToolResultError("Subscription types policy management is not available in the current pulsarctl API version. " + - "This feature may require a newer version of Pulsar or pulsarctl."), nil + return nil, fmt.Errorf("subscription types policy management is not available in the current pulsarctl API version; " + + "this feature may require a newer version of Pulsar or pulsarctl") } // Utility functions for parsing retention parameters diff --git a/pkg/mcp/builders/pulsar/topic_policy_legacy.go b/pkg/mcp/builders/pulsar/topic_policy_legacy.go new file mode 100644 index 0000000..824526c --- /dev/null +++ b/pkg/mcp/builders/pulsar/topic_policy_legacy.go @@ -0,0 +1,113 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/mark3labs/mcp-go/mcp" + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" +) + +// PulsarAdminTopicPolicyLegacyToolBuilder implements the legacy ToolBuilder interface for Pulsar admin topic policies. +// /nolint:revive +type PulsarAdminTopicPolicyLegacyToolBuilder struct { + *builders.BaseToolBuilder +} + +// NewPulsarAdminTopicPolicyLegacyToolBuilder creates a new Pulsar admin topic policy legacy tool builder instance. +func NewPulsarAdminTopicPolicyLegacyToolBuilder() *PulsarAdminTopicPolicyLegacyToolBuilder { + metadata := builders.ToolMetadata{ + Name: "pulsar_admin_topic_policy", + Version: "1.0.0", + Description: "Pulsar admin topic policy management tools", + Category: "pulsar_admin", + Tags: []string{"pulsar", "admin", "topic_policy"}, + } + + features := []string{ + "pulsar-admin-topic-policy", + "all", + "all-pulsar", + "pulsar-admin", + } + + return &PulsarAdminTopicPolicyLegacyToolBuilder{ + BaseToolBuilder: builders.NewBaseToolBuilder(metadata, features), + } +} + +// BuildTools builds the Pulsar admin topic policy legacy tool list. +func (b *PulsarAdminTopicPolicyLegacyToolBuilder) BuildTools(_ context.Context, config builders.ToolBuildConfig) ([]server.ServerTool, error) { + if !b.HasAnyRequiredFeature(config.Features) { + return nil, nil + } + + if err := b.Validate(config); err != nil { + return nil, err + } + + tool, err := b.buildTopicPolicyTool() + if err != nil { + return nil, err + } + handler := b.buildTopicPolicyHandler(config.ReadOnly) + + return []server.ServerTool{ + { + Tool: tool, + Handler: handler, + }, + }, nil +} + +func (b *PulsarAdminTopicPolicyLegacyToolBuilder) buildTopicPolicyTool() (mcp.Tool, error) { + inputSchema, err := buildPulsarAdminTopicPolicyInputSchema() + if err != nil { + return mcp.Tool{}, err + } + + schemaJSON, err := json.Marshal(inputSchema) + if err != nil { + return mcp.Tool{}, fmt.Errorf("marshal input schema: %w", err) + } + + return mcp.Tool{ + Name: "pulsar_admin_topic_policy", + Description: pulsarAdminTopicPolicyToolDesc, + RawInputSchema: schemaJSON, + }, nil +} + +func (b *PulsarAdminTopicPolicyLegacyToolBuilder) buildTopicPolicyHandler(readOnly bool) func(context.Context, mcp.CallToolRequest) (*mcp.CallToolResult, error) { + sdkBuilder := NewPulsarAdminTopicPolicyToolBuilder() + sdkHandler := sdkBuilder.buildTopicPolicyHandler(readOnly) + + return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { + var input pulsarAdminTopicPolicyInput + if err := request.BindArguments(&input); err != nil { + return mcp.NewToolResultError(fmt.Sprintf("failed to parse arguments: %v", err)), nil + } + + result, _, err := sdkHandler(ctx, nil, input) + if err != nil { + return mcp.NewToolResultError(err.Error()), nil + } + return legacyToolResultFromSDK(result), nil + } +} diff --git a/pkg/mcp/builders/pulsar/topic_policy_test.go b/pkg/mcp/builders/pulsar/topic_policy_test.go new file mode 100644 index 0000000..de185b9 --- /dev/null +++ b/pkg/mcp/builders/pulsar/topic_policy_test.go @@ -0,0 +1,120 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "testing" + + "github.com/google/jsonschema-go/jsonschema" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPulsarAdminTopicPolicyToolBuilder(t *testing.T) { + builder := NewPulsarAdminTopicPolicyToolBuilder() + + t.Run("GetName", func(t *testing.T) { + assert.Equal(t, "pulsar_admin_topic_policy", builder.GetName()) + }) + + t.Run("GetRequiredFeatures", func(t *testing.T) { + features := builder.GetRequiredFeatures() + assert.NotEmpty(t, features) + assert.Contains(t, features, "pulsar-admin-topic-policy") + }) + + t.Run("BuildTools_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"pulsar-admin-topic-policy"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "pulsar_admin_topic_policy", tools[0].Definition().Name) + }) + + t.Run("BuildTools_ReadOnly", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: true, + Features: []string{"pulsar-admin-topic-policy"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "pulsar_admin_topic_policy", tools[0].Definition().Name) + }) + + t.Run("BuildTools_NoFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"unrelated-feature"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 0) + }) + + t.Run("Validate_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"pulsar-admin-topic-policy"}, + } + + err := builder.Validate(config) + assert.NoError(t, err) + }) + + t.Run("Validate_MissingFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"unrelated-feature"}, + } + + err := builder.Validate(config) + assert.Error(t, err) + }) +} + +func TestPulsarAdminTopicPolicyToolSchema(t *testing.T) { + builder := NewPulsarAdminTopicPolicyToolBuilder() + + tool, err := builder.buildTopicPolicyTool() + require.NoError(t, err) + assert.Equal(t, "pulsar_admin_topic_policy", tool.Name) + + schema, ok := tool.InputSchema.(*jsonschema.Schema) + require.True(t, ok) + require.NotNil(t, schema.Properties) + + assert.ElementsMatch(t, []string{"operation", "topic"}, schema.Required) + assert.ElementsMatch(t, []string{ + "operation", + "topic", + "retention_size", + "retention_time", + "ttl_seconds", + "compaction_threshold", + "subscription_types", + }, mapStringKeys(schema.Properties)) + + assert.Equal(t, pulsarAdminTopicPolicyOperationDesc, schema.Properties["operation"].Description) + assert.Equal(t, pulsarAdminTopicPolicyTopicDesc, schema.Properties["topic"].Description) + assert.Equal(t, pulsarAdminTopicPolicyRetentionTimeDesc, schema.Properties["retention_time"].Description) + assert.Equal(t, pulsarAdminTopicPolicyRetentionSizeDesc, schema.Properties["retention_size"].Description) +} diff --git a/pkg/mcp/builders/pulsar/topic_test.go b/pkg/mcp/builders/pulsar/topic_test.go new file mode 100644 index 0000000..837e430 --- /dev/null +++ b/pkg/mcp/builders/pulsar/topic_test.go @@ -0,0 +1,151 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pulsar + +import ( + "context" + "testing" + + "github.com/google/jsonschema-go/jsonschema" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPulsarAdminTopicToolBuilder(t *testing.T) { + builder := NewPulsarAdminTopicToolBuilder() + + t.Run("GetName", func(t *testing.T) { + assert.Equal(t, "pulsar_admin_topic", builder.GetName()) + }) + + t.Run("GetRequiredFeatures", func(t *testing.T) { + features := builder.GetRequiredFeatures() + assert.NotEmpty(t, features) + assert.Contains(t, features, "pulsar-admin-topics") + }) + + t.Run("BuildTools_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"pulsar-admin-topics"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "pulsar_admin_topic", tools[0].Definition().Name) + assert.NotNil(t, tools[0]) + }) + + t.Run("BuildTools_ReadOnly", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: true, + Features: []string{"pulsar-admin-topics"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 1) + assert.Equal(t, "pulsar_admin_topic", tools[0].Definition().Name) + }) + + t.Run("BuildTools_NoFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + ReadOnly: false, + Features: []string{"unrelated-feature"}, + } + + tools, err := builder.BuildTools(context.Background(), config) + require.NoError(t, err) + assert.Len(t, tools, 0) + }) + + t.Run("Validate_Success", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"pulsar-admin-topics"}, + } + + err := builder.Validate(config) + assert.NoError(t, err) + }) + + t.Run("Validate_MissingFeatures", func(t *testing.T) { + config := builders.ToolBuildConfig{ + Features: []string{"unrelated-feature"}, + } + + err := builder.Validate(config) + assert.Error(t, err) + }) +} + +func TestPulsarAdminTopicToolSchema(t *testing.T) { + builder := NewPulsarAdminTopicToolBuilder() + tool, err := builder.buildTopicTool() + require.NoError(t, err) + assert.Equal(t, "pulsar_admin_topic", tool.Name) + + schema, ok := tool.InputSchema.(*jsonschema.Schema) + require.True(t, ok) + require.NotNil(t, schema.Properties) + + expectedRequired := []string{"resource", "operation"} + assert.ElementsMatch(t, expectedRequired, schema.Required) + + expectedProps := []string{ + "resource", + "operation", + "topic", + "namespace", + "partitions", + "force", + "non-partitioned", + "partitioned", + "per-partition", + "config", + "messageId", + } + assert.ElementsMatch(t, expectedProps, mapStringKeys(schema.Properties)) + + resourceSchema := schema.Properties["resource"] + require.NotNil(t, resourceSchema) + assert.Equal(t, pulsarAdminTopicResourceDesc, resourceSchema.Description) + + operationSchema := schema.Properties["operation"] + require.NotNil(t, operationSchema) + assert.Equal(t, pulsarAdminTopicOperationDesc, operationSchema.Description) +} + +func TestPulsarAdminTopicToolBuilder_ReadOnlyRejectsWrite(t *testing.T) { + builder := NewPulsarAdminTopicToolBuilder() + handler := builder.buildTopicHandler(true) + + _, _, err := handler(context.Background(), nil, pulsarAdminTopicInput{ + Resource: "topic", + Operation: "create", + }) + + require.Error(t, err) + assert.Contains(t, err.Error(), "read-only") +} + +func mapStringKeys[V any](m map[string]V) []string { + keys := make([]string, 0, len(m)) + for key := range m { + keys = append(keys, key) + } + return keys +} diff --git a/pkg/mcp/builders/registry.go b/pkg/mcp/builders/registry.go index 03c3726..6d0dcf0 100644 --- a/pkg/mcp/builders/registry.go +++ b/pkg/mcp/builders/registry.go @@ -19,8 +19,6 @@ import ( "fmt" "sort" "sync" - - "github.com/mark3labs/mcp-go/server" ) // ToolRegistry manages the registration and building of all tool builders @@ -115,11 +113,11 @@ func (r *ToolRegistry) ListMetadata() map[string]ToolMetadata { // BuildAll builds tools for all specified configurations // Returns all successfully built tools and any errors encountered -func (r *ToolRegistry) BuildAll(configs map[string]ToolBuildConfig) ([]server.ServerTool, error) { +func (r *ToolRegistry) BuildAll(configs map[string]ToolBuildConfig) ([]ToolDefinition, error) { r.mu.RLock() defer r.mu.RUnlock() - var allTools []server.ServerTool + var allTools []ToolDefinition var errors []error for name, config := range configs { @@ -149,7 +147,7 @@ func (r *ToolRegistry) BuildAll(configs map[string]ToolBuildConfig) ([]server.Se } // BuildSingle builds tools for a single tool builder -func (r *ToolRegistry) BuildSingle(name string, config ToolBuildConfig) ([]server.ServerTool, error) { +func (r *ToolRegistry) BuildSingle(name string, config ToolBuildConfig) ([]ToolDefinition, error) { r.mu.RLock() builder, exists := r.builders[name] r.mu.RUnlock() @@ -167,7 +165,7 @@ func (r *ToolRegistry) BuildSingle(name string, config ToolBuildConfig) ([]serve // BuildAllWithFeatures builds all relevant tools based on the feature list // Automatically creates configuration for each builder -func (r *ToolRegistry) BuildAllWithFeatures(readOnly bool, features []string) ([]server.ServerTool, error) { +func (r *ToolRegistry) BuildAllWithFeatures(readOnly bool, features []string) ([]ToolDefinition, error) { r.mu.RLock() builders := make(map[string]ToolBuilder, len(r.builders)) for name, builder := range r.builders { @@ -175,7 +173,7 @@ func (r *ToolRegistry) BuildAllWithFeatures(readOnly bool, features []string) ([ } r.mu.RUnlock() - var allTools []server.ServerTool + var allTools []ToolDefinition var errors []error for name, builder := range builders { diff --git a/pkg/mcp/builders/registry_test.go b/pkg/mcp/builders/registry_test.go index 766c266..14728b6 100644 --- a/pkg/mcp/builders/registry_test.go +++ b/pkg/mcp/builders/registry_test.go @@ -19,8 +19,7 @@ import ( "fmt" "testing" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -29,7 +28,7 @@ import ( type MockToolBuilder struct { name string features []string - tools []server.ServerTool + tools []ToolDefinition err error metadata ToolMetadata } @@ -44,13 +43,18 @@ func NewMockToolBuilder(name string, features []string) *MockToolBuilder { Description: fmt.Sprintf("Mock tool builder for %s", name), Category: "test", }, - tools: []server.ServerTool{ - { - Tool: mcp.NewTool(name, - mcp.WithDescription(fmt.Sprintf("Mock tool %s", name)), - ), - Handler: func(_ context.Context, _ mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return mcp.NewToolResultText(fmt.Sprintf("Mock response from %s", name)), nil + tools: []ToolDefinition{ + ServerTool[map[string]any, any]{ + Tool: &mcp.Tool{ + Name: name, + Description: fmt.Sprintf("Mock tool %s", name), + }, + Handler: func(_ context.Context, _ *mcp.CallToolRequest, _ map[string]any) (*mcp.CallToolResult, any, error) { + return &mcp.CallToolResult{ + Content: []mcp.Content{ + &mcp.TextContent{Text: fmt.Sprintf("Mock response from %s", name)}, + }, + }, nil, nil }, }, }, @@ -65,7 +69,7 @@ func (m *MockToolBuilder) GetRequiredFeatures() []string { return m.features } -func (m *MockToolBuilder) BuildTools(_ context.Context, _ ToolBuildConfig) ([]server.ServerTool, error) { +func (m *MockToolBuilder) BuildTools(_ context.Context, _ ToolBuildConfig) ([]ToolDefinition, error) { if m.err != nil { return nil, m.err } @@ -224,7 +228,7 @@ func TestToolRegistry(t *testing.T) { tools, err := registry.BuildSingle("single_tool", config) require.NoError(t, err) assert.Len(t, tools, 1) - assert.Equal(t, "single_tool", tools[0].Tool.Name) + assert.Equal(t, "single_tool", tools[0].Definition().Name) }) t.Run("BuildSingle_NotFound", func(t *testing.T) { diff --git a/pkg/mcp/ctx.go b/pkg/mcp/ctx.go index 434e2ce..29887b6 100644 --- a/pkg/mcp/ctx.go +++ b/pkg/mcp/ctx.go @@ -17,6 +17,7 @@ package mcp import ( "context" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/config" "github.com/streamnative/streamnative-mcp-server/pkg/kafka" internalContext "github.com/streamnative/streamnative-mcp-server/pkg/mcp/internal/context" @@ -53,6 +54,31 @@ func WithKafkaSession(ctx context.Context, session *kafka.Session) context.Conte return internalContext.WithKafkaSession(ctx, session) } +// WithMCPRequest sets the MCP request in the context +func WithMCPRequest(ctx context.Context, request sdk.Request) context.Context { + return internalContext.WithMCPRequest(ctx, request) +} + +// GetMCPRequest gets the MCP request from the context +func GetMCPRequest(ctx context.Context) sdk.Request { + return internalContext.GetMCPRequest(ctx) +} + +// GetMCPSession gets the MCP session from the context +func GetMCPSession(ctx context.Context) sdk.Session { + return internalContext.GetMCPSession(ctx) +} + +// GetMCPSessionID gets the MCP session ID from the context +func GetMCPSessionID(ctx context.Context) string { + return internalContext.GetMCPSessionID(ctx) +} + +// GetMCPRequestExtra gets the MCP request extra from the context +func GetMCPRequestExtra(ctx context.Context) *sdk.RequestExtra { + return internalContext.GetMCPRequestExtra(ctx) +} + // GetSNCloudOrganization gets the SNCloud organization from the context func GetSNCloudOrganization(ctx context.Context) string { return internalContext.GetSNCloudOrganization(ctx) diff --git a/pkg/mcp/ctx_test.go b/pkg/mcp/ctx_test.go new file mode 100644 index 0000000..911985d --- /dev/null +++ b/pkg/mcp/ctx_test.go @@ -0,0 +1,59 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "context" + "testing" + + sdk "github.com/modelcontextprotocol/go-sdk/mcp" +) + +func TestMCPRequestContextHelpers(t *testing.T) { + base := context.Background() + if GetMCPRequest(base) != nil { + t.Fatal("expected nil request") + } + if GetMCPRequestExtra(base) != nil { + t.Fatal("expected nil request extra") + } + if GetMCPSession(base) != nil { + t.Fatal("expected nil session") + } + if GetMCPSessionID(base) != "" { + t.Fatal("expected empty session ID") + } + + extra := &sdk.RequestExtra{} + reqStub := &sdk.ServerRequest[*sdk.CallToolParamsRaw]{Extra: extra} + ctx := WithMCPRequest(base, reqStub) + + req := GetMCPRequest(ctx) + if req == nil { + t.Fatal("expected request") + } + if got := GetMCPRequestExtra(ctx); got != extra { + t.Fatal("expected request extra to match") + } + if GetMCPSession(ctx) != nil { + t.Fatal("expected nil session") + } + if GetMCPSessionID(ctx) != "" { + t.Fatal("expected empty session ID") + } + if stub, ok := req.(*sdk.ServerRequest[*sdk.CallToolParamsRaw]); !ok || stub.Extra != extra { + t.Fatal("expected stored request") + } +} diff --git a/pkg/mcp/internal/context/ctx.go b/pkg/mcp/internal/context/ctx.go index 3008d4f..44242f0 100644 --- a/pkg/mcp/internal/context/ctx.go +++ b/pkg/mcp/internal/context/ctx.go @@ -17,7 +17,9 @@ package context //nolint:revive import ( "context" + "reflect" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/config" "github.com/streamnative/streamnative-mcp-server/pkg/kafka" "github.com/streamnative/streamnative-mcp-server/pkg/pulsar" @@ -33,6 +35,7 @@ const ( SNCloudSessionContextKey contextKey = "sncloud_session" PulsarSessionContextKey contextKey = "pulsar_session" KafkaSessionContextKey contextKey = "kafka_session" + MCPRequestContextKey contextKey = "mcp_request" ) // WithSNCloudOrganization sets the SNCloud organization in the context @@ -65,6 +68,56 @@ func WithKafkaSession(ctx context.Context, session *kafka.Session) context.Conte return context.WithValue(ctx, KafkaSessionContextKey, session) } +// WithMCPRequest sets the MCP request in the context. +func WithMCPRequest(ctx context.Context, request sdk.Request) context.Context { + return context.WithValue(ctx, MCPRequestContextKey, request) +} + +// GetMCPRequest gets the MCP request from the context. +func GetMCPRequest(ctx context.Context) sdk.Request { + if val := ctx.Value(MCPRequestContextKey); val != nil { + if request, ok := val.(sdk.Request); ok { + return request + } + } + return nil +} + +// GetMCPSession gets the MCP session from the context. +func GetMCPSession(ctx context.Context) sdk.Session { + request := GetMCPRequest(ctx) + if request == nil { + return nil + } + session := request.GetSession() + if session == nil { + return nil + } + value := reflect.ValueOf(session) + if value.Kind() == reflect.Ptr && value.IsNil() { + return nil + } + return session +} + +// GetMCPSessionID gets the MCP session ID from the context. +func GetMCPSessionID(ctx context.Context) string { + session := GetMCPSession(ctx) + if session == nil { + return "" + } + return session.ID() +} + +// GetMCPRequestExtra gets the MCP request extra from the context. +func GetMCPRequestExtra(ctx context.Context) *sdk.RequestExtra { + request := GetMCPRequest(ctx) + if request == nil { + return nil + } + return request.GetExtra() +} + // GetSNCloudOrganization gets the SNCloud organization from the context func GetSNCloudOrganization(ctx context.Context) string { if val := ctx.Value(SNCloudOrganizationContextKey); val != nil { diff --git a/pkg/mcp/kafka_admin_groups_tools.go b/pkg/mcp/kafka_admin_groups_tools.go index 143fd9c..0e2e758 100644 --- a/pkg/mcp/kafka_admin_groups_tools.go +++ b/pkg/mcp/kafka_admin_groups_tools.go @@ -17,13 +17,13 @@ package mcp import ( "context" - "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" kafkabuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/kafka" ) // KafkaAdminAddGroupsTools registers Kafka admin group tools. -func KafkaAdminAddGroupsTools(s *server.MCPServer, readOnly bool, features []string) { +func KafkaAdminAddGroupsTools(s *sdk.Server, readOnly bool, features []string) { // Use the new builder pattern builder := kafkabuilders.NewKafkaGroupsToolBuilder() config := builders.ToolBuildConfig{ @@ -39,6 +39,6 @@ func KafkaAdminAddGroupsTools(s *server.MCPServer, readOnly bool, features []str // Add all built tools to the server for _, tool := range tools { - s.AddTool(tool.Tool, tool.Handler) + tool.Register(s) } } diff --git a/pkg/mcp/kafka_admin_groups_tools_legacy.go b/pkg/mcp/kafka_admin_groups_tools_legacy.go new file mode 100644 index 0000000..7a50083 --- /dev/null +++ b/pkg/mcp/kafka_admin_groups_tools_legacy.go @@ -0,0 +1,44 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "context" + + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + kafkabuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/kafka" +) + +// KafkaAdminAddGroupsToolsLegacy registers Kafka admin group tools on legacy servers. +func KafkaAdminAddGroupsToolsLegacy(s *server.MCPServer, readOnly bool, features []string) { + // Use the new builder pattern + builder := kafkabuilders.NewKafkaGroupsLegacyToolBuilder() + config := builders.ToolBuildConfig{ + ReadOnly: readOnly, + Features: features, + } + + tools, err := builder.BuildTools(context.Background(), config) + if err != nil { + // Log error but don't fail - this maintains backward compatibility + return + } + + // Add all built tools to the server + for _, tool := range tools { + s.AddTool(tool.Tool, tool.Handler) + } +} diff --git a/pkg/mcp/kafka_admin_sr_tools.go b/pkg/mcp/kafka_admin_sr_tools.go index c6e01d5..3b6eb2b 100644 --- a/pkg/mcp/kafka_admin_sr_tools.go +++ b/pkg/mcp/kafka_admin_sr_tools.go @@ -17,13 +17,13 @@ package mcp import ( "context" - "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" kafkabuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/kafka" ) // KafkaAdminAddSchemaRegistryTools registers Kafka Schema Registry tools. -func KafkaAdminAddSchemaRegistryTools(s *server.MCPServer, readOnly bool, features []string) { +func KafkaAdminAddSchemaRegistryTools(s *sdk.Server, readOnly bool, features []string) { // Use the new builder pattern builder := kafkabuilders.NewKafkaSchemaRegistryToolBuilder() config := builders.ToolBuildConfig{ @@ -39,6 +39,6 @@ func KafkaAdminAddSchemaRegistryTools(s *server.MCPServer, readOnly bool, featur // Add all built tools to the server for _, tool := range tools { - s.AddTool(tool.Tool, tool.Handler) + tool.Register(s) } } diff --git a/pkg/mcp/kafka_client_consume_tools.go b/pkg/mcp/kafka_client_consume_tools.go index d94def2..2fa6d3c 100644 --- a/pkg/mcp/kafka_client_consume_tools.go +++ b/pkg/mcp/kafka_client_consume_tools.go @@ -17,22 +17,18 @@ package mcp import ( "context" - "github.com/mark3labs/mcp-go/server" - "github.com/sirupsen/logrus" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" kafkabuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/kafka" ) // KafkaClientAddConsumeTools adds Kafka client consume tools to the MCP server -func KafkaClientAddConsumeTools(s *server.MCPServer, _ bool, logrusLogger *logrus.Logger, features []string) { +func KafkaClientAddConsumeTools(s *sdk.Server, readOnly bool, features []string) { // Use the new builder pattern builder := kafkabuilders.NewKafkaConsumeToolBuilder() config := builders.ToolBuildConfig{ - ReadOnly: false, + ReadOnly: readOnly, Features: features, - Options: map[string]interface{}{ - "logger": logrusLogger, - }, } tools, err := builder.BuildTools(context.Background(), config) @@ -43,6 +39,6 @@ func KafkaClientAddConsumeTools(s *server.MCPServer, _ bool, logrusLogger *logru // Add all built tools to the server for _, tool := range tools { - s.AddTool(tool.Tool, tool.Handler) + tool.Register(s) } } diff --git a/pkg/mcp/kafka_client_consume_tools_legacy.go b/pkg/mcp/kafka_client_consume_tools_legacy.go new file mode 100644 index 0000000..85cf368 --- /dev/null +++ b/pkg/mcp/kafka_client_consume_tools_legacy.go @@ -0,0 +1,45 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "context" + + "github.com/mark3labs/mcp-go/server" + "github.com/sirupsen/logrus" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + kafkabuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/kafka" +) + +// KafkaClientAddConsumeToolsLegacy adds Kafka client consume tools to the legacy MCP server. +func KafkaClientAddConsumeToolsLegacy(s *server.MCPServer, readOnly bool, logrusLogger *logrus.Logger, features []string) { + builder := kafkabuilders.NewKafkaConsumeLegacyToolBuilder() + config := builders.ToolBuildConfig{ + ReadOnly: readOnly, + Features: features, + Options: map[string]interface{}{ + "logger": logrusLogger, + }, + } + + tools, err := builder.BuildTools(context.Background(), config) + if err != nil { + return + } + + for _, tool := range tools { + s.AddTool(tool.Tool, tool.Handler) + } +} diff --git a/pkg/mcp/kafka_client_produce_tools.go b/pkg/mcp/kafka_client_produce_tools.go index e75da6a..0fc0916 100644 --- a/pkg/mcp/kafka_client_produce_tools.go +++ b/pkg/mcp/kafka_client_produce_tools.go @@ -17,13 +17,13 @@ package mcp import ( "context" - "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" kafkabuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/kafka" ) // KafkaClientAddProduceTools adds Kafka client produce tools to the MCP server -func KafkaClientAddProduceTools(s *server.MCPServer, readOnly bool, features []string) { +func KafkaClientAddProduceTools(s *sdk.Server, readOnly bool, features []string) { // Use the new builder pattern builder := kafkabuilders.NewKafkaProduceToolBuilder() config := builders.ToolBuildConfig{ @@ -39,6 +39,6 @@ func KafkaClientAddProduceTools(s *server.MCPServer, readOnly bool, features []s // Add all built tools to the server for _, tool := range tools { - s.AddTool(tool.Tool, tool.Handler) + tool.Register(s) } } diff --git a/pkg/mcp/kafka_client_produce_tools_legacy.go b/pkg/mcp/kafka_client_produce_tools_legacy.go new file mode 100644 index 0000000..513f948 --- /dev/null +++ b/pkg/mcp/kafka_client_produce_tools_legacy.go @@ -0,0 +1,41 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "context" + + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + kafkabuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/kafka" +) + +// KafkaClientAddProduceToolsLegacy adds Kafka client produce tools to the legacy MCP server. +func KafkaClientAddProduceToolsLegacy(s *server.MCPServer, readOnly bool, features []string) { + builder := kafkabuilders.NewKafkaProduceLegacyToolBuilder() + config := builders.ToolBuildConfig{ + ReadOnly: readOnly, + Features: features, + } + + tools, err := builder.BuildTools(context.Background(), config) + if err != nil { + return + } + + for _, tool := range tools { + s.AddTool(tool.Tool, tool.Handler) + } +} diff --git a/pkg/mcp/legacy_stdio_transport.go b/pkg/mcp/legacy_stdio_transport.go new file mode 100644 index 0000000..22686e3 --- /dev/null +++ b/pkg/mcp/legacy_stdio_transport.go @@ -0,0 +1,117 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "bytes" + "context" + "io" + stdlog "log" + "sync" + + "github.com/mark3labs/mcp-go/server" + "github.com/modelcontextprotocol/go-sdk/jsonrpc" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" +) + +// Run starts the legacy mark3labs MCP server using a go-sdk transport. +func (s *LegacyServer) Run(ctx context.Context, transport sdk.Transport, errLogger *stdlog.Logger) error { + conn, err := transport.Connect(ctx) + if err != nil { + return err + } + defer func() { + _ = conn.Close() + }() + + stdioServer := server.NewStdioServer(s.MCPServer) + if errLogger != nil { + stdioServer.SetErrorLogger(errLogger) + } + + reader := &jsonrpcReader{ctx: ctx, conn: conn} + writer := &jsonrpcWriter{ctx: ctx, conn: conn} + return stdioServer.Listen(ctx, reader, writer) +} + +type jsonrpcReader struct { + ctx context.Context + conn sdk.Connection + buf []byte +} + +func (r *jsonrpcReader) Read(p []byte) (int, error) { + for len(r.buf) == 0 { + if r.ctx.Err() != nil { + return 0, io.EOF + } + + msg, err := r.conn.Read(r.ctx) + if err != nil { + if r.ctx.Err() != nil { + return 0, io.EOF + } + return 0, err + } + + data, err := jsonrpc.EncodeMessage(msg) + if err != nil { + return 0, err + } + r.buf = make([]byte, len(data)+1) + copy(r.buf, data) + r.buf[len(data)] = '\n' + } + + n := copy(p, r.buf) + r.buf = r.buf[n:] + return n, nil +} + +type jsonrpcWriter struct { + ctx context.Context + conn sdk.Connection + mu sync.Mutex + buf []byte +} + +func (w *jsonrpcWriter) Write(p []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + + w.buf = append(w.buf, p...) + for { + index := bytes.IndexByte(w.buf, '\n') + if index < 0 { + break + } + + line := bytes.TrimSpace(w.buf[:index]) + w.buf = w.buf[index+1:] + if len(line) == 0 { + continue + } + + msg, err := jsonrpc.DecodeMessage(line) + if err != nil { + return 0, err + } + if err := w.conn.Write(w.ctx, msg); err != nil { + return 0, err + } + } + + return len(p), nil +} diff --git a/pkg/mcp/pftools/invocation.go b/pkg/mcp/pftools/invocation.go index 24c3c37..a667220 100644 --- a/pkg/mcp/pftools/invocation.go +++ b/pkg/mcp/pftools/invocation.go @@ -25,7 +25,7 @@ import ( "github.com/apache/pulsar-client-go/pulsar" "github.com/apache/pulsar-client-go/pulsaradmin/pkg/utils" - "github.com/mark3labs/mcp-go/mcp" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/schema" ) @@ -53,16 +53,33 @@ func NewFunctionInvoker(manager *PulsarFunctionManager) *FunctionInvoker { } } +func newToolResultText(text string) *sdk.CallToolResult { + return &sdk.CallToolResult{ + Content: []sdk.Content{ + &sdk.TextContent{Text: text}, + }, + } +} + +func newToolResultError(text string) *sdk.CallToolResult { + return &sdk.CallToolResult{ + Content: []sdk.Content{ + &sdk.TextContent{Text: text}, + }, + IsError: true, + } +} + // InvokeFunctionAndWait sends a message to the function and waits for the result -func (fi *FunctionInvoker) InvokeFunctionAndWait(ctx context.Context, fnTool *FunctionTool, params map[string]interface{}) (*mcp.CallToolResult, error) { +func (fi *FunctionInvoker) InvokeFunctionAndWait(ctx context.Context, fnTool *FunctionTool, params map[string]interface{}) (*sdk.CallToolResult, error) { schemaConverter, err := schema.ConverterFactory(fnTool.OutputSchema.Type) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to get schema converter: %v", err)), nil + return newToolResultError(fmt.Sprintf("Failed to get schema converter: %v", err)), nil } payload, err := schemaConverter.SerializeMCPRequestToPulsarPayload(params, fnTool.OutputSchema.PulsarSchemaInfo) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to serialize payload: %v", err)), nil + return newToolResultError(fmt.Sprintf("Failed to serialize payload: %v", err)), nil } // Create a result channel for this request @@ -71,7 +88,7 @@ func (fi *FunctionInvoker) InvokeFunctionAndWait(ctx context.Context, fnTool *Fu // Send message to input topic msgID, err := fi.sendMessage(ctx, fnTool.InputTopic, payload) if err != nil || msgID == "" { - return mcp.NewToolResultError(fmt.Sprintf("Failed to send message: %v", err)), nil + return newToolResultError(fmt.Sprintf("Failed to send message: %v", err)), nil } fi.registerResultChannel(msgID, resultChan) @@ -80,19 +97,19 @@ func (fi *FunctionInvoker) InvokeFunctionAndWait(ctx context.Context, fnTool *Fu // Set up consumer for output topic err = fi.setupConsumer(ctx, fnTool.InputTopic, fnTool.OutputTopic, msgID, fnTool.OutputSchema) if err != nil { - return mcp.NewToolResultError(fmt.Sprintf("Failed to set up consumer: %v", err)), nil + return newToolResultError(fmt.Sprintf("Failed to set up consumer: %v", err)), nil } // Wait for result or timeout select { case result := <-resultChan: if result.Error != nil { - return mcp.NewToolResultError(fmt.Sprintf("Function execution failed: %v", result.Error)), nil + return newToolResultError(fmt.Sprintf("Function execution failed: %v", result.Error)), nil } - return mcp.NewToolResultText(result.Data), nil + return newToolResultText(result.Data), nil case <-ctx.Done(): - return mcp.NewToolResultError(fmt.Sprintf("Function invocation timed out after %v", ctx.Value("timeout"))), nil + return newToolResultError(fmt.Sprintf("Function invocation timed out after %v", ctx.Value("timeout"))), nil } } diff --git a/pkg/mcp/pftools/manager.go b/pkg/mcp/pftools/manager.go index a509c75..f99e5c8 100644 --- a/pkg/mcp/pftools/manager.go +++ b/pkg/mcp/pftools/manager.go @@ -27,11 +27,11 @@ import ( "github.com/apache/pulsar-client-go/pulsaradmin/pkg/rest" "github.com/apache/pulsar-client-go/pulsaradmin/pkg/utils" "github.com/google/go-cmp/cmp" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + legacy "github.com/mark3labs/mcp-go/mcp" + legacyserver "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/kafka" "github.com/streamnative/streamnative-mcp-server/pkg/pulsar" - "github.com/streamnative/streamnative-mcp-server/pkg/schema" ) const ( @@ -54,7 +54,8 @@ var DefaultStringSchemaInfo = &SchemaInfo{ // Server is imported directly to avoid circular dependency type Server struct { - MCPServer *server.MCPServer + MCPServer *sdk.Server + LegacyServer *legacyserver.MCPServer KafkaSession *kafka.Session PulsarSession *pulsar.Session Logger interface{} @@ -100,6 +101,7 @@ func NewPulsarFunctionManager(snServer *Server, readOnly bool, options *ManagerO stopCh: make(chan struct{}), callInProgressMap: make(map[string]context.CancelFunc), mcpServer: snServer.MCPServer, + legacyServer: snServer.LegacyServer, readOnly: readOnly, defaultTimeout: options.DefaultTimeout, circuitBreakers: make(map[string]*CircuitBreaker), @@ -158,7 +160,7 @@ func (m *PulsarFunctionManager) updateFunctions() { // Check if this is a cluster health error and invoke callback if configured if (IsClusterUnhealthy(err) || IsAuthError(err)) && m.clusterErrorHandler != nil { - go m.clusterErrorHandler(m, err) + go m.clusterErrorHandler(context.Background(), m, err) } return } @@ -205,23 +207,9 @@ func (m *PulsarFunctionManager) updateFunctions() { } if changed { - if m.sessionID != "" { - err := m.mcpServer.DeleteSessionTools(m.sessionID, fnTool.Tool.Name) - if err != nil { - log.Printf("Failed to delete tool %s from session %s: %v", fnTool.Tool.Name, m.sessionID, err) - } - } else { - m.mcpServer.DeleteTools(fnTool.Tool.Name) - } - } - if m.sessionID != "" { - err := m.mcpServer.AddSessionTool(m.sessionID, fnTool.Tool, m.handleToolCall(fnTool)) - if err != nil { - log.Printf("Failed to add tool %s to session %s: %v", fnTool.Tool.Name, m.sessionID, err) - } - } else { - m.mcpServer.AddTool(fnTool.Tool, m.handleToolCall(fnTool)) + m.removeTool(fnTool) } + m.addTool(fnTool) // Add function to map m.mutex.Lock() @@ -239,14 +227,7 @@ func (m *PulsarFunctionManager) updateFunctions() { m.mutex.Lock() for fullName, fnTool := range m.fnToToolMap { if !seenFunctions[fullName] { - if m.sessionID != "" { - err := m.mcpServer.DeleteSessionTools(m.sessionID, fnTool.Tool.Name) - if err != nil { - log.Printf("Failed to delete tool %s from session %s: %v", fnTool.Tool.Name, m.sessionID, err) - } - } else { - m.mcpServer.DeleteTools(fnTool.Tool.Name) - } + m.removeTool(fnTool) delete(m.fnToToolMap, fullName) log.Printf("Removed function %s from MCP tools [%s]", fullName, fnTool.Tool.Name) } @@ -254,6 +235,55 @@ func (m *PulsarFunctionManager) updateFunctions() { m.mutex.Unlock() } +func (m *PulsarFunctionManager) addTool(fnTool *FunctionTool) { + if fnTool == nil || fnTool.Tool == nil { + return + } + + if m.mcpServer != nil { + sdk.AddTool(m.mcpServer, fnTool.Tool, m.handleToolCall(fnTool)) + return + } + + if m.legacyServer == nil { + return + } + + legacyTool := sdkToolToLegacy(fnTool.Tool) + if m.sessionID != "" { + if err := m.legacyServer.AddSessionTool(m.sessionID, legacyTool, m.handleLegacyToolCall(fnTool)); err != nil { + log.Printf("Failed to add tool %s to session %s: %v", legacyTool.Name, m.sessionID, err) + } + return + } + + m.legacyServer.AddTool(legacyTool, m.handleLegacyToolCall(fnTool)) +} + +func (m *PulsarFunctionManager) removeTool(fnTool *FunctionTool) { + if fnTool == nil || fnTool.Tool == nil { + return + } + + if m.mcpServer != nil { + m.mcpServer.RemoveTools(fnTool.Tool.Name) + return + } + + if m.legacyServer == nil { + return + } + + if m.sessionID != "" { + if err := m.legacyServer.DeleteSessionTools(m.sessionID, fnTool.Tool.Name); err != nil { + log.Printf("Failed to delete tool %s from session %s: %v", fnTool.Tool.Name, m.sessionID, err) + } + return + } + + m.legacyServer.DeleteTools(fnTool.Tool.Name) +} + // getFunctionsList retrieves all functions from the specified tenants/namespaces func (m *PulsarFunctionManager) getFunctionsList() ([]*utils.FunctionConfig, error) { var allFunctions []*utils.FunctionConfig @@ -407,23 +437,17 @@ func (m *PulsarFunctionManager) convertFunctionToTool(fn *utils.FunctionConfig) // Create description description := retrieveToolDescription(fn) - schemaConverter, err := schema.ConverterFactory(inputSchema.Type) + toolInputSchema, err := ConvertSchemaToToolInput(inputSchema) if err != nil { - return nil, fmt.Errorf("failed to create schema converter: %w", err) + return nil, fmt.Errorf("failed to convert input schema to MCP tool input schema: %w", err) } - toolInputSchemaProperties, err := schemaConverter.ToMCPToolInputSchemaProperties(inputSchema.PulsarSchemaInfo) - if err != nil { - return nil, fmt.Errorf("failed to convert input schema to MCP tool input schema properties: %w", err) + tool := &sdk.Tool{ + Name: toolName, + Description: description, + InputSchema: toolInputSchema, } - toolInputSchemaProperties = append(toolInputSchemaProperties, mcp.WithDescription(description)) - - // Create the tool - tool := mcp.NewTool(toolName, - toolInputSchemaProperties..., - ) - // Create circuit breaker for this function circuitBreaker := NewCircuitBreaker(5, 60*time.Second) @@ -445,54 +469,71 @@ func (m *PulsarFunctionManager) convertFunctionToTool(fn *utils.FunctionConfig) } // handleToolCall returns a handler function for a specific function tool -func (m *PulsarFunctionManager) handleToolCall(fnTool *FunctionTool) func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - return func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { - // Get the circuit breaker - m.mutex.RLock() - cb, exists := m.circuitBreakers[fnTool.Name] - m.mutex.RUnlock() - - if !exists { - cb = NewCircuitBreaker(5, 60*time.Second) - m.mutex.Lock() - m.circuitBreakers[fnTool.Name] = cb - m.mutex.Unlock() +func (m *PulsarFunctionManager) handleToolCall(fnTool *FunctionTool) sdk.ToolHandlerFor[map[string]any, any] { + return func(ctx context.Context, _ *sdk.CallToolRequest, input map[string]any) (*sdk.CallToolResult, any, error) { + args := input + if args == nil { + args = map[string]any{} } - // Check if the circuit breaker allows the request - if !cb.AllowRequest() { - return mcp.NewToolResultError(fmt.Sprintf("Circuit breaker is open for function %s. Too many failures, please try again later.", fnTool.Name)), nil - } + result, err := m.invokeToolCall(ctx, fnTool, args) + return result, nil, err + } +} - // Create function invoker - invoker := NewFunctionInvoker(m) +func (m *PulsarFunctionManager) handleLegacyToolCall(fnTool *FunctionTool) func(ctx context.Context, request legacy.CallToolRequest) (*legacy.CallToolResult, error) { + return func(ctx context.Context, request legacy.CallToolRequest) (*legacy.CallToolResult, error) { + result, err := m.invokeToolCall(ctx, fnTool, request.GetArguments()) + return sdkResultToLegacy(result), err + } +} - // Create context with timeout - timeoutCtx, cancel := context.WithTimeout(ctx, m.defaultTimeout) - defer cancel() +func (m *PulsarFunctionManager) invokeToolCall(ctx context.Context, fnTool *FunctionTool, args map[string]interface{}) (*sdk.CallToolResult, error) { + // Get the circuit breaker + m.mutex.RLock() + cb, exists := m.circuitBreakers[fnTool.Name] + m.mutex.RUnlock() - // Register call + if !exists { + cb = NewCircuitBreaker(5, 60*time.Second) m.mutex.Lock() - m.callInProgressMap[fnTool.Name] = cancel + m.circuitBreakers[fnTool.Name] = cb m.mutex.Unlock() - defer func() { - m.mutex.Lock() - delete(m.callInProgressMap, fnTool.Name) - m.mutex.Unlock() - }() + } - // Invoke function and wait for result - result, err := invoker.InvokeFunctionAndWait(timeoutCtx, fnTool, request.GetArguments()) + // Check if the circuit breaker allows the request + if !cb.AllowRequest() { + return newToolResultError(fmt.Sprintf("Circuit breaker is open for function %s. Too many failures, please try again later.", fnTool.Name)), nil + } - // Record success or failure - if err != nil { - cb.RecordFailure() - } else { - cb.RecordSuccess() - } + // Create function invoker + invoker := NewFunctionInvoker(m) - return result, err + // Create context with timeout + timeoutCtx, cancel := context.WithTimeout(ctx, m.defaultTimeout) + defer cancel() + + // Register call + m.mutex.Lock() + m.callInProgressMap[fnTool.Name] = cancel + m.mutex.Unlock() + defer func() { + m.mutex.Lock() + delete(m.callInProgressMap, fnTool.Name) + m.mutex.Unlock() + }() + + // Invoke function and wait for result + result, err := invoker.InvokeFunctionAndWait(timeoutCtx, fnTool, args) + + // Record success or failure + if err != nil { + cb.RecordFailure() + } else { + cb.RecordSuccess() } + + return result, err } // getFunctionFullName returns the full name of a function @@ -573,3 +614,191 @@ func (m *PulsarFunctionManager) GetProducer(topic string) (pulsarclient.Producer log.Printf("Created and cached producer for topic: %s", topic) return newProducer, nil } + +func legacyToolToSDK(tool legacy.Tool) *sdk.Tool { + inputSchema := any(tool.InputSchema) + if tool.RawInputSchema != nil { + inputSchema = tool.RawInputSchema + } + + var outputSchema any + if tool.RawOutputSchema != nil { + outputSchema = tool.RawOutputSchema + } else if tool.OutputSchema.Type != "" { + outputSchema = tool.OutputSchema + } + + return &sdk.Tool{ + Name: tool.Name, + Description: tool.Description, + InputSchema: inputSchema, + OutputSchema: outputSchema, + Annotations: legacyAnnotationsToSDK(tool.Annotations), + } +} + +func legacyAnnotationsToSDK(annotations legacy.ToolAnnotation) *sdk.ToolAnnotations { + if annotations.Title == "" && + annotations.ReadOnlyHint == nil && + annotations.DestructiveHint == nil && + annotations.IdempotentHint == nil && + annotations.OpenWorldHint == nil { + return nil + } + + converted := &sdk.ToolAnnotations{ + Title: annotations.Title, + } + if annotations.ReadOnlyHint != nil { + converted.ReadOnlyHint = *annotations.ReadOnlyHint + } + if annotations.DestructiveHint != nil { + converted.DestructiveHint = annotations.DestructiveHint + } + if annotations.IdempotentHint != nil { + converted.IdempotentHint = *annotations.IdempotentHint + } + if annotations.OpenWorldHint != nil { + converted.OpenWorldHint = annotations.OpenWorldHint + } + return converted +} + +func sdkToolToLegacy(tool *sdk.Tool) legacy.Tool { + if tool == nil { + return legacy.Tool{} + } + + legacyTool := legacy.NewTool(tool.Name) + legacyTool.Description = tool.Description + applyLegacyInputSchema(&legacyTool, tool.InputSchema) + applyLegacyOutputSchema(&legacyTool, tool.OutputSchema) + + if tool.Annotations != nil { + legacyTool.Annotations = legacy.ToolAnnotation{ + Title: tool.Annotations.Title, + ReadOnlyHint: boolPtr(tool.Annotations.ReadOnlyHint), + DestructiveHint: tool.Annotations.DestructiveHint, + IdempotentHint: boolPtr(tool.Annotations.IdempotentHint), + OpenWorldHint: tool.Annotations.OpenWorldHint, + } + } + + return legacyTool +} + +func applyLegacyInputSchema(tool *legacy.Tool, schema any) { + if tool == nil || schema == nil { + return + } + + switch value := schema.(type) { + case legacy.ToolInputSchema: + tool.InputSchema = value + case *legacy.ToolInputSchema: + tool.InputSchema = *value + case json.RawMessage: + tool.RawInputSchema = value + tool.InputSchema = legacy.ToolInputSchema{} + case []byte: + tool.RawInputSchema = json.RawMessage(value) + tool.InputSchema = legacy.ToolInputSchema{} + default: + raw, err := json.Marshal(schema) + if err == nil { + tool.RawInputSchema = raw + tool.InputSchema = legacy.ToolInputSchema{} + } + } +} + +func applyLegacyOutputSchema(tool *legacy.Tool, schema any) { + if tool == nil || schema == nil { + return + } + + switch value := schema.(type) { + case legacy.ToolOutputSchema: + tool.OutputSchema = value + case *legacy.ToolOutputSchema: + tool.OutputSchema = *value + case json.RawMessage: + tool.RawOutputSchema = value + tool.OutputSchema = legacy.ToolOutputSchema{} + case []byte: + tool.RawOutputSchema = json.RawMessage(value) + tool.OutputSchema = legacy.ToolOutputSchema{} + default: + raw, err := json.Marshal(schema) + if err == nil { + tool.RawOutputSchema = raw + tool.OutputSchema = legacy.ToolOutputSchema{} + } + } +} + +func legacyResultToSDK(result *legacy.CallToolResult) *sdk.CallToolResult { + if result == nil { + return nil + } + + converted := &sdk.CallToolResult{ + StructuredContent: result.StructuredContent, + IsError: result.IsError, + } + + if len(result.Content) == 0 { + return converted + } + + converted.Content = make([]sdk.Content, 0, len(result.Content)) + for _, content := range result.Content { + switch value := content.(type) { + case legacy.TextContent: + converted.Content = append(converted.Content, &sdk.TextContent{Text: value.Text}) + case *legacy.TextContent: + converted.Content = append(converted.Content, &sdk.TextContent{Text: value.Text}) + default: + converted.Content = append(converted.Content, &sdk.TextContent{Text: fmt.Sprintf("%v", value)}) + } + } + + return converted +} + +func sdkResultToLegacy(result *sdk.CallToolResult) *legacy.CallToolResult { + if result == nil { + return nil + } + + converted := &legacy.CallToolResult{ + StructuredContent: result.StructuredContent, + IsError: result.IsError, + } + + if len(result.Content) == 0 { + return converted + } + + converted.Content = make([]legacy.Content, 0, len(result.Content)) + for _, content := range result.Content { + switch value := content.(type) { + case *sdk.TextContent: + converted.Content = append(converted.Content, legacy.TextContent{ + Type: legacy.ContentTypeText, + Text: value.Text, + }) + default: + converted.Content = append(converted.Content, legacy.TextContent{ + Type: legacy.ContentTypeText, + Text: fmt.Sprintf("%v", value), + }) + } + } + + return converted +} + +func boolPtr(value bool) *bool { + return &value +} diff --git a/pkg/mcp/pftools/manager_conversion_test.go b/pkg/mcp/pftools/manager_conversion_test.go new file mode 100644 index 0000000..0879b90 --- /dev/null +++ b/pkg/mcp/pftools/manager_conversion_test.go @@ -0,0 +1,85 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pftools + +import ( + "encoding/json" + "testing" + + legacy "github.com/mark3labs/mcp-go/mcp" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" + "github.com/stretchr/testify/require" +) + +func TestLegacyToolToSDKUsesStructuredSchema(t *testing.T) { + legacyTool := legacy.NewTool("test_tool", legacy.WithDescription("desc")) + + sdkTool := legacyToolToSDK(legacyTool) + require.NotNil(t, sdkTool) + require.Equal(t, "test_tool", sdkTool.Name) + require.Equal(t, "desc", sdkTool.Description) + + schema, ok := sdkTool.InputSchema.(legacy.ToolInputSchema) + require.True(t, ok) + require.Equal(t, "object", schema.Type) +} + +func TestSDKToolToLegacyHandlesRawSchema(t *testing.T) { + raw := json.RawMessage(`{"type":"object","properties":{"foo":{"type":"string"}}}`) + sdkTool := &sdk.Tool{ + Name: "test_tool", + Description: "desc", + InputSchema: raw, + } + + legacyTool := sdkToolToLegacy(sdkTool) + require.Equal(t, "test_tool", legacyTool.Name) + require.Equal(t, "desc", legacyTool.Description) + require.Equal(t, raw, legacyTool.RawInputSchema) + require.Equal(t, "", legacyTool.InputSchema.Type) +} + +func TestLegacyResultToSDKTextContent(t *testing.T) { + legacyResult := legacy.NewToolResultText("ok") + + sdkResult := legacyResultToSDK(legacyResult) + require.NotNil(t, sdkResult) + require.Len(t, sdkResult.Content, 1) + + textContent, ok := sdkResult.Content[0].(*sdk.TextContent) + require.True(t, ok) + require.Equal(t, "ok", textContent.Text) +} + +func TestSDKResultToLegacyTextContent(t *testing.T) { + sdkResult := &sdk.CallToolResult{ + Content: []sdk.Content{ + &sdk.TextContent{Text: "ok"}, + }, + IsError: true, + StructuredContent: map[string]any{"status": "ok"}, + } + + legacyResult := sdkResultToLegacy(sdkResult) + require.NotNil(t, legacyResult) + require.True(t, legacyResult.IsError) + require.Equal(t, sdkResult.StructuredContent, legacyResult.StructuredContent) + require.Len(t, legacyResult.Content, 1) + + textContent, ok := legacyResult.Content[0].(legacy.TextContent) + require.True(t, ok) + require.Equal(t, "ok", textContent.Text) + require.Equal(t, legacy.ContentTypeText, textContent.Type) +} diff --git a/pkg/mcp/pftools/schema.go b/pkg/mcp/pftools/schema.go index 62ae155..d5e2240 100644 --- a/pkg/mcp/pftools/schema.go +++ b/pkg/mcp/pftools/schema.go @@ -20,17 +20,17 @@ import ( "github.com/apache/pulsar-client-go/pulsar" "github.com/apache/pulsar-client-go/pulsaradmin/pkg/utils" - "github.com/mark3labs/mcp-go/mcp" + "github.com/google/jsonschema-go/jsonschema" "github.com/streamnative/pulsarctl/pkg/cmdutils" ) // DefaultStringSchema defines the default MCP input schema for string payloads. -var DefaultStringSchema = &mcp.ToolInputSchema{ +var DefaultStringSchema = &jsonschema.Schema{ Type: "object", - Properties: map[string]interface{}{ - "payload": map[string]interface{}{ - "type": "string", - "description": "The payload of the message, in plain text format", + Properties: map[string]*jsonschema.Schema{ + "payload": { + Type: "string", + Description: "The payload of the message, in plain text format", }, }, } @@ -79,7 +79,7 @@ func GetSchemaFromTopic(admin cmdutils.Client, topic string) (*SchemaInfo, error } // ConvertSchemaToToolInput converts a schema to MCP tool input schema -func ConvertSchemaToToolInput(schemaInfo *SchemaInfo) (*mcp.ToolInputSchema, error) { +func ConvertSchemaToToolInput(schemaInfo *SchemaInfo) (*jsonschema.Schema, error) { if schemaInfo == nil { // Default to object with any fields if no schema is provided return DefaultStringSchema, nil @@ -97,7 +97,7 @@ func ConvertSchemaToToolInput(schemaInfo *SchemaInfo) (*mcp.ToolInputSchema, err } // convertComplexSchemaToToolInput handles conversion of complex schema types -func convertComplexSchemaToToolInput(schemaInfo *SchemaInfo) (*mcp.ToolInputSchema, error) { +func convertComplexSchemaToToolInput(schemaInfo *SchemaInfo) (*jsonschema.Schema, error) { if schemaInfo.Definition == nil { return DefaultStringSchema, nil } @@ -113,12 +113,12 @@ func convertComplexSchemaToToolInput(schemaInfo *SchemaInfo) (*mcp.ToolInputSche } // For JSON schemas, use the definition directly - return &mcp.ToolInputSchema{ + return &jsonschema.Schema{ Type: "object", - Properties: map[string]interface{}{ - "payload": map[string]interface{}{ - "type": "string", - "description": "The payload of the message, in JSON String format, the schema of the payload in AVRO format is: " + string(definitionString), + Properties: map[string]*jsonschema.Schema{ + "payload": { + Type: "string", + Description: "The payload of the message, in JSON String format, the schema of the payload in AVRO format is: " + string(definitionString), }, }, }, nil diff --git a/pkg/mcp/pftools/schema_test.go b/pkg/mcp/pftools/schema_test.go new file mode 100644 index 0000000..5d25280 --- /dev/null +++ b/pkg/mcp/pftools/schema_test.go @@ -0,0 +1,58 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pftools + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestConvertSchemaToToolInputDefaultsToString(t *testing.T) { + schema, err := ConvertSchemaToToolInput(nil) + require.NoError(t, err) + require.NotNil(t, schema) + require.Equal(t, "object", schema.Type) + + payload, ok := schema.Properties["payload"] + require.True(t, ok) + require.Equal(t, "string", payload.Type) + require.Contains(t, payload.Description, "plain text") +} + +func TestConvertSchemaToToolInputJSONSchema(t *testing.T) { + fields := []any{map[string]any{"name": "value", "type": "string"}} + definition := map[string]interface{}{"fields": fields} + + schema, err := ConvertSchemaToToolInput(&SchemaInfo{Type: "JSON", Definition: definition}) + require.NoError(t, err) + require.NotNil(t, schema) + + payload, ok := schema.Properties["payload"] + require.True(t, ok) + require.Equal(t, "string", payload.Type) + + definitionJSON, err := json.Marshal(fields) + require.NoError(t, err) + expectedDescription := "The payload of the message, in JSON String format, the schema of the payload in AVRO format is: " + string(definitionJSON) + require.Equal(t, expectedDescription, payload.Description) +} + +func TestConvertSchemaToToolInputRejectsAvro(t *testing.T) { + schema, err := ConvertSchemaToToolInput(&SchemaInfo{Type: "AVRO", Definition: map[string]interface{}{}}) + require.Error(t, err) + require.Nil(t, schema) +} diff --git a/pkg/mcp/pftools/types.go b/pkg/mcp/pftools/types.go index 77ad7fe..8c4ab4c 100644 --- a/pkg/mcp/pftools/types.go +++ b/pkg/mcp/pftools/types.go @@ -21,8 +21,8 @@ import ( "github.com/apache/pulsar-client-go/pulsar" "github.com/apache/pulsar-client-go/pulsaradmin/pkg/utils" - "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" + legacyserver "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/pulsarctl/pkg/cmdutils" ) @@ -38,7 +38,8 @@ type PulsarFunctionManager struct { pollInterval time.Duration stopCh chan struct{} callInProgressMap map[string]context.CancelFunc - mcpServer *server.MCPServer + mcpServer *sdk.Server + legacyServer *legacyserver.MCPServer readOnly bool defaultTimeout time.Duration circuitBreakers map[string]*CircuitBreaker @@ -56,7 +57,7 @@ type FunctionTool struct { OutputSchema *SchemaInfo InputTopic string OutputTopic string - Tool mcp.Tool + Tool *sdk.Tool SchemaFetchSuccess bool } @@ -88,7 +89,7 @@ const ( ) // ClusterErrorHandler handles cluster errors for Pulsar function managers. -type ClusterErrorHandler func(*PulsarFunctionManager, error) +type ClusterErrorHandler func(context.Context, *PulsarFunctionManager, error) // ManagerOptions configures PulsarFunctionManager behavior. type ManagerOptions struct { diff --git a/pkg/mcp/pulsar_admin_brokers_stats_tools.go b/pkg/mcp/pulsar_admin_brokers_stats_tools.go index d50b398..80f3aee 100644 --- a/pkg/mcp/pulsar_admin_brokers_stats_tools.go +++ b/pkg/mcp/pulsar_admin_brokers_stats_tools.go @@ -17,13 +17,13 @@ package mcp import ( "context" - "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" ) // PulsarAdminAddBrokerStatsTools adds broker-stats related tools to the MCP server -func PulsarAdminAddBrokerStatsTools(s *server.MCPServer, readOnly bool, features []string) { +func PulsarAdminAddBrokerStatsTools(s *sdk.Server, readOnly bool, features []string) { // Use the new builder pattern builder := pulsarbuilders.NewPulsarAdminBrokerStatsToolBuilder() config := builders.ToolBuildConfig{ @@ -39,6 +39,6 @@ func PulsarAdminAddBrokerStatsTools(s *server.MCPServer, readOnly bool, features // Add all built tools to the server for _, tool := range tools { - s.AddTool(tool.Tool, tool.Handler) + tool.Register(s) } } diff --git a/pkg/mcp/pulsar_admin_brokers_stats_tools_legacy.go b/pkg/mcp/pulsar_admin_brokers_stats_tools_legacy.go new file mode 100644 index 0000000..78f164b --- /dev/null +++ b/pkg/mcp/pulsar_admin_brokers_stats_tools_legacy.go @@ -0,0 +1,41 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "context" + + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" +) + +// PulsarAdminAddBrokerStatsToolsLegacy registers Pulsar admin broker stats tools for the legacy server. +func PulsarAdminAddBrokerStatsToolsLegacy(s *server.MCPServer, readOnly bool, features []string) { + builder := pulsarbuilders.NewPulsarAdminBrokerStatsLegacyToolBuilder() + config := builders.ToolBuildConfig{ + ReadOnly: readOnly, + Features: features, + } + + tools, err := builder.BuildTools(context.Background(), config) + if err != nil { + return + } + + for _, tool := range tools { + s.AddTool(tool.Tool, tool.Handler) + } +} diff --git a/pkg/mcp/pulsar_admin_brokers_tools.go b/pkg/mcp/pulsar_admin_brokers_tools.go index 6ce43ed..5222afe 100644 --- a/pkg/mcp/pulsar_admin_brokers_tools.go +++ b/pkg/mcp/pulsar_admin_brokers_tools.go @@ -17,14 +17,13 @@ package mcp import ( "context" - "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" ) // PulsarAdminAddBrokersTools adds broker-related tools to the MCP server -func PulsarAdminAddBrokersTools(s *server.MCPServer, readOnly bool, features []string) { - // Use the new builder pattern +func PulsarAdminAddBrokersTools(s *sdk.Server, readOnly bool, features []string) { builder := pulsarbuilders.NewPulsarAdminBrokersToolBuilder() config := builders.ToolBuildConfig{ ReadOnly: readOnly, @@ -33,12 +32,10 @@ func PulsarAdminAddBrokersTools(s *server.MCPServer, readOnly bool, features []s tools, err := builder.BuildTools(context.Background(), config) if err != nil { - // Log error but don't fail - this maintains backward compatibility return } - // Add all built tools to the server for _, tool := range tools { - s.AddTool(tool.Tool, tool.Handler) + tool.Register(s) } } diff --git a/pkg/mcp/pulsar_admin_brokers_tools_legacy.go b/pkg/mcp/pulsar_admin_brokers_tools_legacy.go new file mode 100644 index 0000000..4681154 --- /dev/null +++ b/pkg/mcp/pulsar_admin_brokers_tools_legacy.go @@ -0,0 +1,41 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "context" + + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" +) + +// PulsarAdminAddBrokersToolsLegacy registers Pulsar admin broker tools for the legacy server. +func PulsarAdminAddBrokersToolsLegacy(s *server.MCPServer, readOnly bool, features []string) { + builder := pulsarbuilders.NewPulsarAdminBrokersLegacyToolBuilder() + config := builders.ToolBuildConfig{ + ReadOnly: readOnly, + Features: features, + } + + tools, err := builder.BuildTools(context.Background(), config) + if err != nil { + return + } + + for _, tool := range tools { + s.AddTool(tool.Tool, tool.Handler) + } +} diff --git a/pkg/mcp/pulsar_admin_cluster_tools.go b/pkg/mcp/pulsar_admin_cluster_tools.go index 51f1f38..c99f64f 100644 --- a/pkg/mcp/pulsar_admin_cluster_tools.go +++ b/pkg/mcp/pulsar_admin_cluster_tools.go @@ -17,14 +17,14 @@ package mcp import ( "context" - "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" - pulsarBuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" + pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" ) -// PulsarAdminClusterTools creates Pulsar Admin Cluster tool list using the new builder pattern -func PulsarAdminClusterTools(readOnly bool, features []string) []server.ServerTool { - builder := pulsarBuilders.NewPulsarAdminClusterToolBuilder() +// PulsarAdminAddClusterTools adds cluster-related tools to the MCP server +func PulsarAdminAddClusterTools(s *sdk.Server, readOnly bool, features []string) { + builder := pulsarbuilders.NewPulsarAdminClusterToolBuilder() config := builders.ToolBuildConfig{ ReadOnly: readOnly, Features: features, @@ -32,18 +32,10 @@ func PulsarAdminClusterTools(readOnly bool, features []string) []server.ServerTo tools, err := builder.BuildTools(context.Background(), config) if err != nil { - // In production environment, this should use proper logging - return nil + return } - return tools -} - -// PulsarAdminAddClusterTools adds cluster-related tools to the MCP server -func PulsarAdminAddClusterTools(s *server.MCPServer, readOnly bool, features []string) { - tools := PulsarAdminClusterTools(readOnly, features) - for _, tool := range tools { - s.AddTool(tool.Tool, tool.Handler) + tool.Register(s) } } diff --git a/pkg/mcp/pulsar_admin_cluster_tools_legacy.go b/pkg/mcp/pulsar_admin_cluster_tools_legacy.go new file mode 100644 index 0000000..9e67a34 --- /dev/null +++ b/pkg/mcp/pulsar_admin_cluster_tools_legacy.go @@ -0,0 +1,41 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "context" + + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" +) + +// PulsarAdminAddClusterToolsLegacy registers Pulsar admin cluster tools for the legacy server. +func PulsarAdminAddClusterToolsLegacy(s *server.MCPServer, readOnly bool, features []string) { + builder := pulsarbuilders.NewPulsarAdminClusterLegacyToolBuilder() + config := builders.ToolBuildConfig{ + ReadOnly: readOnly, + Features: features, + } + + tools, err := builder.BuildTools(context.Background(), config) + if err != nil { + return + } + + for _, tool := range tools { + s.AddTool(tool.Tool, tool.Handler) + } +} diff --git a/pkg/mcp/pulsar_admin_functions_tools.go b/pkg/mcp/pulsar_admin_functions_tools.go index 2b9203e..3f7b6ba 100644 --- a/pkg/mcp/pulsar_admin_functions_tools.go +++ b/pkg/mcp/pulsar_admin_functions_tools.go @@ -17,13 +17,13 @@ package mcp import ( "context" - "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" ) // PulsarAdminAddFunctionsTools adds a unified function-related tool to the MCP server -func PulsarAdminAddFunctionsTools(s *server.MCPServer, readOnly bool, features []string) { +func PulsarAdminAddFunctionsTools(s *sdk.Server, readOnly bool, features []string) { // Use the new builder pattern builder := pulsarbuilders.NewPulsarAdminFunctionsToolBuilder() config := builders.ToolBuildConfig{ @@ -39,6 +39,6 @@ func PulsarAdminAddFunctionsTools(s *server.MCPServer, readOnly bool, features [ // Add all built tools to the server for _, tool := range tools { - s.AddTool(tool.Tool, tool.Handler) + tool.Register(s) } } diff --git a/pkg/mcp/pulsar_admin_functions_tools_legacy.go b/pkg/mcp/pulsar_admin_functions_tools_legacy.go new file mode 100644 index 0000000..3ae4d41 --- /dev/null +++ b/pkg/mcp/pulsar_admin_functions_tools_legacy.go @@ -0,0 +1,41 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "context" + + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" +) + +// PulsarAdminAddFunctionsToolsLegacy registers Pulsar admin functions tools for the legacy server. +func PulsarAdminAddFunctionsToolsLegacy(s *server.MCPServer, readOnly bool, features []string) { + builder := pulsarbuilders.NewPulsarAdminFunctionsLegacyToolBuilder() + config := builders.ToolBuildConfig{ + ReadOnly: readOnly, + Features: features, + } + + tools, err := builder.BuildTools(context.Background(), config) + if err != nil { + return + } + + for _, tool := range tools { + s.AddTool(tool.Tool, tool.Handler) + } +} diff --git a/pkg/mcp/pulsar_admin_functions_worker_tools.go b/pkg/mcp/pulsar_admin_functions_worker_tools.go index 1a4a428..7a83252 100644 --- a/pkg/mcp/pulsar_admin_functions_worker_tools.go +++ b/pkg/mcp/pulsar_admin_functions_worker_tools.go @@ -16,16 +16,16 @@ package mcp import ( "context" - "fmt" - "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" - pulsarBuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" + pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" ) -// PulsarAdminFunctionsWorkerTools creates Pulsar Admin Functions Worker tool list using the new builder pattern -func PulsarAdminFunctionsWorkerTools(readOnly bool, features []string) []server.ServerTool { - builder := pulsarBuilders.NewPulsarAdminFunctionsWorkerToolBuilder() +// PulsarAdminAddFunctionsWorkerTools adds functions worker-related tools to the MCP server. +func PulsarAdminAddFunctionsWorkerTools(s *sdk.Server, readOnly bool, features []string) { + // Use the new builder pattern + builder := pulsarbuilders.NewPulsarAdminFunctionsWorkerToolBuilder() config := builders.ToolBuildConfig{ ReadOnly: readOnly, Features: features, @@ -33,21 +33,12 @@ func PulsarAdminFunctionsWorkerTools(readOnly bool, features []string) []server. tools, err := builder.BuildTools(context.Background(), config) if err != nil { - // In production environment, this should use proper logging - fmt.Printf("Failed to build Pulsar Admin Functions Worker tools: %v\n", err) - return nil + // Log error but don't fail - this maintains backward compatibility + return } - return tools -} - -// PulsarAdminAddFunctionsWorkerTools adds functions worker-related tools to the MCP server -func PulsarAdminAddFunctionsWorkerTools(s *server.MCPServer, readOnly bool, features []string) { - tools := PulsarAdminFunctionsWorkerTools(readOnly, features) - + // Add all built tools to the server for _, tool := range tools { - s.AddTool(tool.Tool, tool.Handler) + tool.Register(s) } } - -// handleFunctionsWorkerTool returns a function to handle functions worker tool requests diff --git a/pkg/mcp/pulsar_admin_functions_worker_tools_legacy.go b/pkg/mcp/pulsar_admin_functions_worker_tools_legacy.go new file mode 100644 index 0000000..d7429a8 --- /dev/null +++ b/pkg/mcp/pulsar_admin_functions_worker_tools_legacy.go @@ -0,0 +1,41 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "context" + + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" +) + +// PulsarAdminAddFunctionsWorkerToolsLegacy adds functions worker-related tools to the legacy MCP server. +func PulsarAdminAddFunctionsWorkerToolsLegacy(s *server.MCPServer, readOnly bool, features []string) { + builder := pulsarbuilders.NewPulsarAdminFunctionsWorkerLegacyToolBuilder() + config := builders.ToolBuildConfig{ + ReadOnly: readOnly, + Features: features, + } + + tools, err := builder.BuildTools(context.Background(), config) + if err != nil { + return + } + + for _, tool := range tools { + s.AddTool(tool.Tool, tool.Handler) + } +} diff --git a/pkg/mcp/pulsar_admin_namespace_policy_tools.go b/pkg/mcp/pulsar_admin_namespace_policy_tools.go index c74cfe0..7c70401 100644 --- a/pkg/mcp/pulsar_admin_namespace_policy_tools.go +++ b/pkg/mcp/pulsar_admin_namespace_policy_tools.go @@ -17,13 +17,13 @@ package mcp import ( "context" - "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" ) // PulsarAdminAddNamespacePolicyTools adds namespace policy-related tools to the MCP server -func PulsarAdminAddNamespacePolicyTools(s *server.MCPServer, readOnly bool, features []string) { +func PulsarAdminAddNamespacePolicyTools(s *sdk.Server, readOnly bool, features []string) { // Use the new builder pattern builder := pulsarbuilders.NewPulsarAdminNamespacePolicyToolBuilder() config := builders.ToolBuildConfig{ @@ -39,6 +39,6 @@ func PulsarAdminAddNamespacePolicyTools(s *server.MCPServer, readOnly bool, feat // Add all built tools to the server for _, tool := range tools { - s.AddTool(tool.Tool, tool.Handler) + tool.Register(s) } } diff --git a/pkg/mcp/pulsar_admin_namespace_policy_tools_legacy.go b/pkg/mcp/pulsar_admin_namespace_policy_tools_legacy.go new file mode 100644 index 0000000..9aeae84 --- /dev/null +++ b/pkg/mcp/pulsar_admin_namespace_policy_tools_legacy.go @@ -0,0 +1,41 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "context" + + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" +) + +// PulsarAdminAddNamespacePolicyToolsLegacy adds namespace policy-related tools to the legacy MCP server. +func PulsarAdminAddNamespacePolicyToolsLegacy(s *server.MCPServer, readOnly bool, features []string) { + builder := pulsarbuilders.NewPulsarAdminNamespacePolicyLegacyToolBuilder() + config := builders.ToolBuildConfig{ + ReadOnly: readOnly, + Features: features, + } + + tools, err := builder.BuildTools(context.Background(), config) + if err != nil { + return + } + + for _, tool := range tools { + s.AddTool(tool.Tool, tool.Handler) + } +} diff --git a/pkg/mcp/pulsar_admin_namespace_tools.go b/pkg/mcp/pulsar_admin_namespace_tools.go index a31b0e3..a8526a5 100644 --- a/pkg/mcp/pulsar_admin_namespace_tools.go +++ b/pkg/mcp/pulsar_admin_namespace_tools.go @@ -16,16 +16,15 @@ package mcp import ( "context" - "fmt" - "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" - pulsarBuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" + pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" ) -// PulsarAdminNamespaceTools creates Pulsar Admin Namespace tool list using the new builder pattern -func PulsarAdminNamespaceTools(readOnly bool, features []string) []server.ServerTool { - builder := pulsarBuilders.NewPulsarAdminNamespaceToolBuilder() +// PulsarAdminAddNamespaceTools registers Pulsar admin namespace tools. +func PulsarAdminAddNamespaceTools(s *sdk.Server, readOnly bool, features []string) { + builder := pulsarbuilders.NewPulsarAdminNamespaceToolBuilder() config := builders.ToolBuildConfig{ ReadOnly: readOnly, Features: features, @@ -33,19 +32,10 @@ func PulsarAdminNamespaceTools(readOnly bool, features []string) []server.Server tools, err := builder.BuildTools(context.Background(), config) if err != nil { - // In production environment, this should use proper logging - fmt.Printf("Failed to build Pulsar Admin Namespace tools: %v\n", err) - return nil + return } - return tools -} - -// PulsarAdminAddNamespaceTools adds namespace-related tools to the MCP server -func PulsarAdminAddNamespaceTools(s *server.MCPServer, readOnly bool, features []string) { - tools := PulsarAdminNamespaceTools(readOnly, features) - for _, tool := range tools { - s.AddTool(tool.Tool, tool.Handler) + tool.Register(s) } } diff --git a/pkg/mcp/pulsar_admin_namespace_tools_legacy.go b/pkg/mcp/pulsar_admin_namespace_tools_legacy.go new file mode 100644 index 0000000..e985b14 --- /dev/null +++ b/pkg/mcp/pulsar_admin_namespace_tools_legacy.go @@ -0,0 +1,41 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "context" + + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" +) + +// PulsarAdminAddNamespaceToolsLegacy registers Pulsar admin namespace tools for the legacy server. +func PulsarAdminAddNamespaceToolsLegacy(s *server.MCPServer, readOnly bool, features []string) { + builder := pulsarbuilders.NewPulsarAdminNamespaceLegacyToolBuilder() + config := builders.ToolBuildConfig{ + ReadOnly: readOnly, + Features: features, + } + + tools, err := builder.BuildTools(context.Background(), config) + if err != nil { + return + } + + for _, tool := range tools { + s.AddTool(tool.Tool, tool.Handler) + } +} diff --git a/pkg/mcp/pulsar_admin_nsisolationpolicy_tools.go b/pkg/mcp/pulsar_admin_nsisolationpolicy_tools.go index f742f6c..c32d3ef 100644 --- a/pkg/mcp/pulsar_admin_nsisolationpolicy_tools.go +++ b/pkg/mcp/pulsar_admin_nsisolationpolicy_tools.go @@ -17,14 +17,13 @@ package mcp import ( "context" - "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" ) // PulsarAdminAddNsIsolationPolicyTools adds namespace isolation policy related tools to the MCP server -func PulsarAdminAddNsIsolationPolicyTools(s *server.MCPServer, readOnly bool, features []string) { - // Use the new builder pattern +func PulsarAdminAddNsIsolationPolicyTools(s *sdk.Server, readOnly bool, features []string) { builder := pulsarbuilders.NewPulsarAdminNsIsolationPolicyToolBuilder() config := builders.ToolBuildConfig{ ReadOnly: readOnly, @@ -37,8 +36,7 @@ func PulsarAdminAddNsIsolationPolicyTools(s *server.MCPServer, readOnly bool, fe return } - // Add all built tools to the server for _, tool := range tools { - s.AddTool(tool.Tool, tool.Handler) + tool.Register(s) } } diff --git a/pkg/mcp/pulsar_admin_nsisolationpolicy_tools_legacy.go b/pkg/mcp/pulsar_admin_nsisolationpolicy_tools_legacy.go new file mode 100644 index 0000000..405945f --- /dev/null +++ b/pkg/mcp/pulsar_admin_nsisolationpolicy_tools_legacy.go @@ -0,0 +1,41 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "context" + + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" +) + +// PulsarAdminAddNsIsolationPolicyToolsLegacy registers namespace isolation policy tools for the legacy server. +func PulsarAdminAddNsIsolationPolicyToolsLegacy(s *server.MCPServer, readOnly bool, features []string) { + builder := pulsarbuilders.NewPulsarAdminNsIsolationPolicyLegacyToolBuilder() + config := builders.ToolBuildConfig{ + ReadOnly: readOnly, + Features: features, + } + + tools, err := builder.BuildTools(context.Background(), config) + if err != nil { + return + } + + for _, tool := range tools { + s.AddTool(tool.Tool, tool.Handler) + } +} diff --git a/pkg/mcp/pulsar_admin_packages_tools.go b/pkg/mcp/pulsar_admin_packages_tools.go index afc44cd..70eeed6 100644 --- a/pkg/mcp/pulsar_admin_packages_tools.go +++ b/pkg/mcp/pulsar_admin_packages_tools.go @@ -18,7 +18,7 @@ import ( "context" "strings" - "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" ) @@ -54,7 +54,7 @@ func IsPackageURLSupported(functionPkgURL string) bool { } // PulsarAdminAddPackagesTools adds package-related tools to the MCP server -func PulsarAdminAddPackagesTools(s *server.MCPServer, readOnly bool, features []string) { +func PulsarAdminAddPackagesTools(s *sdk.Server, readOnly bool, features []string) { // Use the new builder pattern builder := pulsarbuilders.NewPulsarAdminPackagesToolBuilder() config := builders.ToolBuildConfig{ @@ -70,6 +70,6 @@ func PulsarAdminAddPackagesTools(s *server.MCPServer, readOnly bool, features [] // Add all built tools to the server for _, tool := range tools { - s.AddTool(tool.Tool, tool.Handler) + tool.Register(s) } } diff --git a/pkg/mcp/pulsar_admin_packages_tools_legacy.go b/pkg/mcp/pulsar_admin_packages_tools_legacy.go new file mode 100644 index 0000000..da38d94 --- /dev/null +++ b/pkg/mcp/pulsar_admin_packages_tools_legacy.go @@ -0,0 +1,41 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "context" + + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" +) + +// PulsarAdminAddPackagesToolsLegacy registers Pulsar admin package tools for the legacy server. +func PulsarAdminAddPackagesToolsLegacy(s *server.MCPServer, readOnly bool, features []string) { + builder := pulsarbuilders.NewPulsarAdminPackagesLegacyToolBuilder() + config := builders.ToolBuildConfig{ + ReadOnly: readOnly, + Features: features, + } + + tools, err := builder.BuildTools(context.Background(), config) + if err != nil { + return + } + + for _, tool := range tools { + s.AddTool(tool.Tool, tool.Handler) + } +} diff --git a/pkg/mcp/pulsar_admin_resourcequotas_tools.go b/pkg/mcp/pulsar_admin_resourcequotas_tools.go index 6382d0c..1d95245 100644 --- a/pkg/mcp/pulsar_admin_resourcequotas_tools.go +++ b/pkg/mcp/pulsar_admin_resourcequotas_tools.go @@ -17,13 +17,13 @@ package mcp import ( "context" - "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" ) // PulsarAdminAddResourceQuotasTools adds resource quotas-related tools to the MCP server -func PulsarAdminAddResourceQuotasTools(s *server.MCPServer, readOnly bool, features []string) { +func PulsarAdminAddResourceQuotasTools(s *sdk.Server, readOnly bool, features []string) { // Use the new builder pattern builder := pulsarbuilders.NewPulsarAdminResourceQuotasToolBuilder() config := builders.ToolBuildConfig{ @@ -39,6 +39,6 @@ func PulsarAdminAddResourceQuotasTools(s *server.MCPServer, readOnly bool, featu // Add all built tools to the server for _, tool := range tools { - s.AddTool(tool.Tool, tool.Handler) + tool.Register(s) } } diff --git a/pkg/mcp/pulsar_admin_resourcequotas_tools_legacy.go b/pkg/mcp/pulsar_admin_resourcequotas_tools_legacy.go new file mode 100644 index 0000000..f25bb59 --- /dev/null +++ b/pkg/mcp/pulsar_admin_resourcequotas_tools_legacy.go @@ -0,0 +1,41 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "context" + + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" +) + +// PulsarAdminAddResourceQuotasToolsLegacy registers Pulsar admin resource quotas tools for the legacy server. +func PulsarAdminAddResourceQuotasToolsLegacy(s *server.MCPServer, readOnly bool, features []string) { + builder := pulsarbuilders.NewPulsarAdminResourceQuotasLegacyToolBuilder() + config := builders.ToolBuildConfig{ + ReadOnly: readOnly, + Features: features, + } + + tools, err := builder.BuildTools(context.Background(), config) + if err != nil { + return + } + + for _, tool := range tools { + s.AddTool(tool.Tool, tool.Handler) + } +} diff --git a/pkg/mcp/pulsar_admin_schemas_tools.go b/pkg/mcp/pulsar_admin_schemas_tools.go index fd8ea01..0de3a73 100644 --- a/pkg/mcp/pulsar_admin_schemas_tools.go +++ b/pkg/mcp/pulsar_admin_schemas_tools.go @@ -16,15 +16,14 @@ package mcp import ( "context" - "fmt" - "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" pulsarBuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" ) -// PulsarAdminSchemaTools creates Pulsar Admin Schema tool list using the new builder pattern -func PulsarAdminSchemaTools(readOnly bool, features []string) []server.ServerTool { +// PulsarAdminAddSchemasTools adds schema-related tools to the MCP server. +func PulsarAdminAddSchemasTools(s *sdk.Server, readOnly bool, features []string) { builder := pulsarBuilders.NewPulsarAdminSchemaToolBuilder() config := builders.ToolBuildConfig{ ReadOnly: readOnly, @@ -33,19 +32,10 @@ func PulsarAdminSchemaTools(readOnly bool, features []string) []server.ServerToo tools, err := builder.BuildTools(context.Background(), config) if err != nil { - // In production environment, this should use proper logging - fmt.Printf("Failed to build Pulsar Admin Schema tools: %v\n", err) - return nil + return } - return tools -} - -// PulsarAdminAddSchemasTools adds schema-related tools to the MCP server -func PulsarAdminAddSchemasTools(s *server.MCPServer, readOnly bool, features []string) { - tools := PulsarAdminSchemaTools(readOnly, features) - for _, tool := range tools { - s.AddTool(tool.Tool, tool.Handler) + tool.Register(s) } } diff --git a/pkg/mcp/pulsar_admin_schemas_tools_legacy.go b/pkg/mcp/pulsar_admin_schemas_tools_legacy.go new file mode 100644 index 0000000..3bfa58f --- /dev/null +++ b/pkg/mcp/pulsar_admin_schemas_tools_legacy.go @@ -0,0 +1,41 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "context" + + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + pulsarBuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" +) + +// PulsarAdminAddSchemasToolsLegacy adds schema-related tools to the legacy MCP server. +func PulsarAdminAddSchemasToolsLegacy(s *server.MCPServer, readOnly bool, features []string) { + builder := pulsarBuilders.NewPulsarAdminSchemaLegacyToolBuilder() + config := builders.ToolBuildConfig{ + ReadOnly: readOnly, + Features: features, + } + + tools, err := builder.BuildTools(context.Background(), config) + if err != nil { + return + } + + for _, tool := range tools { + s.AddTool(tool.Tool, tool.Handler) + } +} diff --git a/pkg/mcp/pulsar_admin_sinks_tools.go b/pkg/mcp/pulsar_admin_sinks_tools.go index cc4abac..6f5d07a 100644 --- a/pkg/mcp/pulsar_admin_sinks_tools.go +++ b/pkg/mcp/pulsar_admin_sinks_tools.go @@ -17,14 +17,13 @@ package mcp import ( "context" - "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" ) -// PulsarAdminAddSinksTools adds a unified sink-related tool to the MCP server -func PulsarAdminAddSinksTools(s *server.MCPServer, readOnly bool, features []string) { - // Use the new builder pattern +// PulsarAdminAddSinksTools adds sink-related tools to the MCP server +func PulsarAdminAddSinksTools(s *sdk.Server, readOnly bool, features []string) { builder := pulsarbuilders.NewPulsarAdminSinksToolBuilder() config := builders.ToolBuildConfig{ ReadOnly: readOnly, @@ -33,12 +32,10 @@ func PulsarAdminAddSinksTools(s *server.MCPServer, readOnly bool, features []str tools, err := builder.BuildTools(context.Background(), config) if err != nil { - // Log error but don't fail - this maintains backward compatibility return } - // Add all built tools to the server for _, tool := range tools { - s.AddTool(tool.Tool, tool.Handler) + tool.Register(s) } } diff --git a/pkg/mcp/pulsar_admin_sinks_tools_legacy.go b/pkg/mcp/pulsar_admin_sinks_tools_legacy.go new file mode 100644 index 0000000..c8d8057 --- /dev/null +++ b/pkg/mcp/pulsar_admin_sinks_tools_legacy.go @@ -0,0 +1,41 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "context" + + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" +) + +// PulsarAdminAddSinksToolsLegacy registers Pulsar admin sink tools for the legacy server. +func PulsarAdminAddSinksToolsLegacy(s *server.MCPServer, readOnly bool, features []string) { + builder := pulsarbuilders.NewPulsarAdminSinksLegacyToolBuilder() + config := builders.ToolBuildConfig{ + ReadOnly: readOnly, + Features: features, + } + + tools, err := builder.BuildTools(context.Background(), config) + if err != nil { + return + } + + for _, tool := range tools { + s.AddTool(tool.Tool, tool.Handler) + } +} diff --git a/pkg/mcp/pulsar_admin_sources_tools.go b/pkg/mcp/pulsar_admin_sources_tools.go index 8abc646..c99944c 100644 --- a/pkg/mcp/pulsar_admin_sources_tools.go +++ b/pkg/mcp/pulsar_admin_sources_tools.go @@ -17,14 +17,13 @@ package mcp import ( "context" - "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" ) -// PulsarAdminAddSourcesTools adds a unified source-related tool to the MCP server -func PulsarAdminAddSourcesTools(s *server.MCPServer, readOnly bool, features []string) { - // Use the new builder pattern +// PulsarAdminAddSourcesTools adds source-related tools to the MCP server +func PulsarAdminAddSourcesTools(s *sdk.Server, readOnly bool, features []string) { builder := pulsarbuilders.NewPulsarAdminSourcesToolBuilder() config := builders.ToolBuildConfig{ ReadOnly: readOnly, @@ -33,12 +32,10 @@ func PulsarAdminAddSourcesTools(s *server.MCPServer, readOnly bool, features []s tools, err := builder.BuildTools(context.Background(), config) if err != nil { - // Log error but don't fail - this maintains backward compatibility return } - // Add all built tools to the server for _, tool := range tools { - s.AddTool(tool.Tool, tool.Handler) + tool.Register(s) } } diff --git a/pkg/mcp/pulsar_admin_sources_tools_legacy.go b/pkg/mcp/pulsar_admin_sources_tools_legacy.go new file mode 100644 index 0000000..db80013 --- /dev/null +++ b/pkg/mcp/pulsar_admin_sources_tools_legacy.go @@ -0,0 +1,41 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "context" + + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" +) + +// PulsarAdminAddSourcesToolsLegacy registers Pulsar admin source tools for the legacy server. +func PulsarAdminAddSourcesToolsLegacy(s *server.MCPServer, readOnly bool, features []string) { + builder := pulsarbuilders.NewPulsarAdminSourcesLegacyToolBuilder() + config := builders.ToolBuildConfig{ + ReadOnly: readOnly, + Features: features, + } + + tools, err := builder.BuildTools(context.Background(), config) + if err != nil { + return + } + + for _, tool := range tools { + s.AddTool(tool.Tool, tool.Handler) + } +} diff --git a/pkg/mcp/pulsar_admin_subscription_tools.go b/pkg/mcp/pulsar_admin_subscription_tools.go index 2a7d03e..616cfd4 100644 --- a/pkg/mcp/pulsar_admin_subscription_tools.go +++ b/pkg/mcp/pulsar_admin_subscription_tools.go @@ -16,16 +16,15 @@ package mcp import ( "context" - "fmt" - "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" - pulsarBuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" + pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" ) -// PulsarAdminSubscriptionTools creates Pulsar Admin Subscription tool list using the new builder pattern -func PulsarAdminSubscriptionTools(readOnly bool, features []string) []server.ServerTool { - builder := pulsarBuilders.NewPulsarAdminSubscriptionToolBuilder() +// PulsarAdminAddSubscriptionTools adds subscription-related tools to the MCP server +func PulsarAdminAddSubscriptionTools(s *sdk.Server, readOnly bool, features []string) { + builder := pulsarbuilders.NewPulsarAdminSubscriptionToolBuilder() config := builders.ToolBuildConfig{ ReadOnly: readOnly, Features: features, @@ -33,19 +32,10 @@ func PulsarAdminSubscriptionTools(readOnly bool, features []string) []server.Ser tools, err := builder.BuildTools(context.Background(), config) if err != nil { - // In production environment, this should use proper logging - fmt.Printf("Failed to build Pulsar Admin Subscription tools: %v\n", err) - return nil + return } - return tools -} - -// PulsarAdminAddSubscriptionTools adds subscription-related tools to the MCP server -func PulsarAdminAddSubscriptionTools(s *server.MCPServer, readOnly bool, features []string) { - tools := PulsarAdminSubscriptionTools(readOnly, features) - for _, tool := range tools { - s.AddTool(tool.Tool, tool.Handler) + tool.Register(s) } } diff --git a/pkg/mcp/pulsar_admin_subscription_tools_legacy.go b/pkg/mcp/pulsar_admin_subscription_tools_legacy.go new file mode 100644 index 0000000..9262f87 --- /dev/null +++ b/pkg/mcp/pulsar_admin_subscription_tools_legacy.go @@ -0,0 +1,41 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "context" + + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" +) + +// PulsarAdminAddSubscriptionToolsLegacy registers Pulsar admin subscription tools for the legacy server. +func PulsarAdminAddSubscriptionToolsLegacy(s *server.MCPServer, readOnly bool, features []string) { + builder := pulsarbuilders.NewPulsarAdminSubscriptionLegacyToolBuilder() + config := builders.ToolBuildConfig{ + ReadOnly: readOnly, + Features: features, + } + + tools, err := builder.BuildTools(context.Background(), config) + if err != nil { + return + } + + for _, tool := range tools { + s.AddTool(tool.Tool, tool.Handler) + } +} diff --git a/pkg/mcp/pulsar_admin_tenant_tools.go b/pkg/mcp/pulsar_admin_tenant_tools.go index 3ab45a4..302e95e 100644 --- a/pkg/mcp/pulsar_admin_tenant_tools.go +++ b/pkg/mcp/pulsar_admin_tenant_tools.go @@ -17,13 +17,13 @@ package mcp import ( "context" - "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" ) // PulsarAdminAddTenantTools registers Pulsar admin tenant tools. -func PulsarAdminAddTenantTools(s *server.MCPServer, readOnly bool, features []string) { +func PulsarAdminAddTenantTools(s *sdk.Server, readOnly bool, features []string) { // Use the new builder pattern builder := pulsarbuilders.NewPulsarAdminTenantToolBuilder() config := builders.ToolBuildConfig{ @@ -39,6 +39,6 @@ func PulsarAdminAddTenantTools(s *server.MCPServer, readOnly bool, features []st // Add all built tools to the server for _, tool := range tools { - s.AddTool(tool.Tool, tool.Handler) + tool.Register(s) } } diff --git a/pkg/mcp/pulsar_admin_tenant_tools_legacy.go b/pkg/mcp/pulsar_admin_tenant_tools_legacy.go new file mode 100644 index 0000000..d4c13ee --- /dev/null +++ b/pkg/mcp/pulsar_admin_tenant_tools_legacy.go @@ -0,0 +1,41 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "context" + + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" +) + +// PulsarAdminAddTenantToolsLegacy registers Pulsar admin tenant tools for the legacy server. +func PulsarAdminAddTenantToolsLegacy(s *server.MCPServer, readOnly bool, features []string) { + builder := pulsarbuilders.NewPulsarAdminTenantLegacyToolBuilder() + config := builders.ToolBuildConfig{ + ReadOnly: readOnly, + Features: features, + } + + tools, err := builder.BuildTools(context.Background(), config) + if err != nil { + return + } + + for _, tool := range tools { + s.AddTool(tool.Tool, tool.Handler) + } +} diff --git a/pkg/mcp/pulsar_admin_topic_policy_tools.go b/pkg/mcp/pulsar_admin_topic_policy_tools.go index 7eb1b1c..8199a0e 100644 --- a/pkg/mcp/pulsar_admin_topic_policy_tools.go +++ b/pkg/mcp/pulsar_admin_topic_policy_tools.go @@ -17,13 +17,13 @@ package mcp import ( "context" - "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" ) // PulsarAdminAddTopicPolicyTools adds topic policy-related tools to the MCP server -func PulsarAdminAddTopicPolicyTools(s *server.MCPServer, readOnly bool, features []string) { +func PulsarAdminAddTopicPolicyTools(s *sdk.Server, readOnly bool, features []string) { // Use the new builder pattern builder := pulsarbuilders.NewPulsarAdminTopicPolicyToolBuilder() config := builders.ToolBuildConfig{ @@ -39,6 +39,6 @@ func PulsarAdminAddTopicPolicyTools(s *server.MCPServer, readOnly bool, features // Add all built tools to the server for _, tool := range tools { - s.AddTool(tool.Tool, tool.Handler) + tool.Register(s) } } diff --git a/pkg/mcp/pulsar_admin_topic_policy_tools_legacy.go b/pkg/mcp/pulsar_admin_topic_policy_tools_legacy.go new file mode 100644 index 0000000..b4af979 --- /dev/null +++ b/pkg/mcp/pulsar_admin_topic_policy_tools_legacy.go @@ -0,0 +1,41 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "context" + + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + pulsarbuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" +) + +// PulsarAdminAddTopicPolicyToolsLegacy adds topic policy-related tools to the legacy MCP server. +func PulsarAdminAddTopicPolicyToolsLegacy(s *server.MCPServer, readOnly bool, features []string) { + builder := pulsarbuilders.NewPulsarAdminTopicPolicyLegacyToolBuilder() + config := builders.ToolBuildConfig{ + ReadOnly: readOnly, + Features: features, + } + + tools, err := builder.BuildTools(context.Background(), config) + if err != nil { + return + } + + for _, tool := range tools { + s.AddTool(tool.Tool, tool.Handler) + } +} diff --git a/pkg/mcp/pulsar_admin_topic_tools.go b/pkg/mcp/pulsar_admin_topic_tools.go index c02fc64..fafb8ec 100644 --- a/pkg/mcp/pulsar_admin_topic_tools.go +++ b/pkg/mcp/pulsar_admin_topic_tools.go @@ -16,15 +16,14 @@ package mcp import ( "context" - "fmt" - "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" pulsarBuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" ) -// PulsarAdminTopicTools creates Pulsar Admin Topic tool list using the new builder pattern -func PulsarAdminTopicTools(readOnly bool, features []string) []server.ServerTool { +// PulsarAdminAddTopicTools registers Pulsar admin topic tools. +func PulsarAdminAddTopicTools(s *sdk.Server, readOnly bool, features []string) { builder := pulsarBuilders.NewPulsarAdminTopicToolBuilder() config := builders.ToolBuildConfig{ ReadOnly: readOnly, @@ -33,19 +32,10 @@ func PulsarAdminTopicTools(readOnly bool, features []string) []server.ServerTool tools, err := builder.BuildTools(context.Background(), config) if err != nil { - // In production environment, this should use proper logging - fmt.Printf("Failed to build Pulsar Admin Topic tools: %v\n", err) - return nil + return } - return tools -} - -// PulsarAdminAddTopicTools adds topic-related tools to the MCP server -func PulsarAdminAddTopicTools(s *server.MCPServer, readOnly bool, features []string) { - tools := PulsarAdminTopicTools(readOnly, features) - for _, tool := range tools { - s.AddTool(tool.Tool, tool.Handler) + tool.Register(s) } } diff --git a/pkg/mcp/pulsar_admin_topic_tools_legacy.go b/pkg/mcp/pulsar_admin_topic_tools_legacy.go new file mode 100644 index 0000000..02b64aa --- /dev/null +++ b/pkg/mcp/pulsar_admin_topic_tools_legacy.go @@ -0,0 +1,41 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "context" + + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + pulsarBuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" +) + +// PulsarAdminAddTopicToolsLegacy registers Pulsar admin topic tools for the legacy server. +func PulsarAdminAddTopicToolsLegacy(s *server.MCPServer, readOnly bool, features []string) { + builder := pulsarBuilders.NewPulsarAdminTopicLegacyToolBuilder() + config := builders.ToolBuildConfig{ + ReadOnly: readOnly, + Features: features, + } + + tools, err := builder.BuildTools(context.Background(), config) + if err != nil { + return + } + + for _, tool := range tools { + s.AddTool(tool.Tool, tool.Handler) + } +} diff --git a/pkg/mcp/pulsar_client_consume_tools.go b/pkg/mcp/pulsar_client_consume_tools.go index 7e68f33..f96cd46 100644 --- a/pkg/mcp/pulsar_client_consume_tools.go +++ b/pkg/mcp/pulsar_client_consume_tools.go @@ -16,15 +16,14 @@ package mcp import ( "context" - "fmt" - "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" pulsarBuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" ) -// PulsarClientConsumeTools creates Pulsar Client Consumer tool list using the new builder pattern -func PulsarClientConsumeTools(readOnly bool, features []string) []server.ServerTool { +// PulsarClientAddConsumerTools adds Pulsar client consumer tools to the MCP server. +func PulsarClientAddConsumerTools(s *sdk.Server, readOnly bool, features []string) { builder := pulsarBuilders.NewPulsarClientConsumeToolBuilder() config := builders.ToolBuildConfig{ ReadOnly: readOnly, @@ -33,19 +32,10 @@ func PulsarClientConsumeTools(readOnly bool, features []string) []server.ServerT tools, err := builder.BuildTools(context.Background(), config) if err != nil { - // In production environment, this should use proper logging - fmt.Printf("Failed to build Pulsar Client Consumer tools: %v\n", err) - return nil + return } - return tools -} - -// PulsarClientAddConsumerTools adds Pulsar client consumer tools to the MCP server -func PulsarClientAddConsumerTools(s *server.MCPServer, readOnly bool, features []string) { - tools := PulsarClientConsumeTools(readOnly, features) - for _, tool := range tools { - s.AddTool(tool.Tool, tool.Handler) + tool.Register(s) } } diff --git a/pkg/mcp/pulsar_client_consume_tools_legacy.go b/pkg/mcp/pulsar_client_consume_tools_legacy.go new file mode 100644 index 0000000..aa53645 --- /dev/null +++ b/pkg/mcp/pulsar_client_consume_tools_legacy.go @@ -0,0 +1,41 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "context" + + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + pulsarBuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" +) + +// PulsarClientAddConsumerToolsLegacy adds Pulsar client consumer tools to the legacy MCP server. +func PulsarClientAddConsumerToolsLegacy(s *server.MCPServer, readOnly bool, features []string) { + builder := pulsarBuilders.NewPulsarClientConsumeLegacyToolBuilder() + config := builders.ToolBuildConfig{ + ReadOnly: readOnly, + Features: features, + } + + tools, err := builder.BuildTools(context.Background(), config) + if err != nil { + return + } + + for _, tool := range tools { + s.AddTool(tool.Tool, tool.Handler) + } +} diff --git a/pkg/mcp/pulsar_client_produce_tools.go b/pkg/mcp/pulsar_client_produce_tools.go index 15a5a84..f9f4720 100644 --- a/pkg/mcp/pulsar_client_produce_tools.go +++ b/pkg/mcp/pulsar_client_produce_tools.go @@ -16,15 +16,14 @@ package mcp import ( "context" - "fmt" - "github.com/mark3labs/mcp-go/server" + sdk "github.com/modelcontextprotocol/go-sdk/mcp" "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" pulsarBuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" ) -// PulsarClientProduceTools creates Pulsar Client Producer tool list using the new builder pattern -func PulsarClientProduceTools(readOnly bool, features []string) []server.ServerTool { +// PulsarClientAddProducerTools adds Pulsar client producer tools to the MCP server. +func PulsarClientAddProducerTools(s *sdk.Server, readOnly bool, features []string) { builder := pulsarBuilders.NewPulsarClientProduceToolBuilder() config := builders.ToolBuildConfig{ ReadOnly: readOnly, @@ -33,19 +32,10 @@ func PulsarClientProduceTools(readOnly bool, features []string) []server.ServerT tools, err := builder.BuildTools(context.Background(), config) if err != nil { - // In production environment, this should use proper logging - fmt.Printf("Failed to build Pulsar Client Producer tools: %v\n", err) - return nil + return } - return tools -} - -// PulsarClientAddProducerTools adds Pulsar client producer tools to the MCP server -func PulsarClientAddProducerTools(s *server.MCPServer, readOnly bool, features []string) { - tools := PulsarClientProduceTools(readOnly, features) - for _, tool := range tools { - s.AddTool(tool.Tool, tool.Handler) + tool.Register(s) } } diff --git a/pkg/mcp/pulsar_client_produce_tools_legacy.go b/pkg/mcp/pulsar_client_produce_tools_legacy.go new file mode 100644 index 0000000..72d1a1e --- /dev/null +++ b/pkg/mcp/pulsar_client_produce_tools_legacy.go @@ -0,0 +1,41 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "context" + + "github.com/mark3labs/mcp-go/server" + "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders" + pulsarBuilders "github.com/streamnative/streamnative-mcp-server/pkg/mcp/builders/pulsar" +) + +// PulsarClientAddProducerToolsLegacy adds Pulsar client producer tools to the legacy MCP server. +func PulsarClientAddProducerToolsLegacy(s *server.MCPServer, readOnly bool, features []string) { + builder := pulsarBuilders.NewPulsarClientProduceLegacyToolBuilder() + config := builders.ToolBuildConfig{ + ReadOnly: readOnly, + Features: features, + } + + tools, err := builder.BuildTools(context.Background(), config) + if err != nil { + return + } + + for _, tool := range tools { + s.AddTool(tool.Tool, tool.Handler) + } +} diff --git a/pkg/mcp/pulsar_functions_as_tools.go b/pkg/mcp/pulsar_functions_as_tools.go index 1c76afb..9f5beb0 100644 --- a/pkg/mcp/pulsar_functions_as_tools.go +++ b/pkg/mcp/pulsar_functions_as_tools.go @@ -15,6 +15,7 @@ package mcp import ( + "context" "log" "os" "slices" @@ -23,6 +24,7 @@ import ( "sync" "time" + "github.com/streamnative/streamnative-mcp-server/pkg/config" pftools2 "github.com/streamnative/streamnative-mcp-server/pkg/mcp/pftools" ) @@ -49,8 +51,29 @@ func StopAllPulsarFunctionManagers() { log.Println("All Pulsar Function managers stopped") } +// PulsarFunctionManagedMcpTools registers Pulsar Functions-as-tools handlers. +func (s *LegacyServer) PulsarFunctionManagedMcpTools(readOnly bool, features []string, sessionID string) { + pftoolsServer := &pftools2.Server{ + LegacyServer: s.MCPServer, + KafkaSession: s.KafkaSession, + PulsarSession: s.PulsarSession, + Logger: s.logger, + } + registerPulsarFunctionManagedMcpTools(readOnly, features, sessionID, s.SNCloudSession, pftoolsServer) +} + // PulsarFunctionManagedMcpTools registers Pulsar Functions-as-tools handlers. func (s *Server) PulsarFunctionManagedMcpTools(readOnly bool, features []string, sessionID string) { + pftoolsServer := &pftools2.Server{ + MCPServer: s.MCPServer, + KafkaSession: s.KafkaSession, + PulsarSession: s.PulsarSession, + Logger: s.logger, + } + registerPulsarFunctionManagedMcpTools(readOnly, features, sessionID, s.SNCloudSession, pftoolsServer) +} + +func registerPulsarFunctionManagedMcpTools(readOnly bool, features []string, sessionID string, snCloudSession *config.Session, pftoolsServer *pftools2.Server) { if !slices.Contains(features, string(FeatureAll)) && !slices.Contains(features, string(FeatureFunctionsAsTools)) && !slices.Contains(features, string(FeatureStreamNativeCloud)) { @@ -66,14 +89,17 @@ func (s *Server) PulsarFunctionManagedMcpTools(readOnly bool, features []string, options := pftools2.DefaultManagerOptions() // Configure cluster error handler for graceful cleanup - options.ClusterErrorHandler = func(_ *pftools2.PulsarFunctionManager, err error) { + options.ClusterErrorHandler = func(_ context.Context, _ *pftools2.PulsarFunctionManager, err error) { log.Printf("Cluster health error detected: %v", err) log.Printf("Consider implementing cleanup logic here (e.g., stopping manager, notifying monitoring systems)") // The calling service can implement specific cleanup logic here // For example: stop the manager, send alerts, implement backoff strategies } - if s.SNCloudSession.Ctx.Organization == "" || s.SNCloudSession.Ctx.PulsarInstance == "" || s.SNCloudSession.Ctx.PulsarCluster == "" { + if snCloudSession == nil || + snCloudSession.Ctx.Organization == "" || + snCloudSession.Ctx.PulsarInstance == "" || + snCloudSession.Ctx.PulsarCluster == "" { log.Printf("Skipping Pulsar Functions as MCP Tools because both organization, pulsar instance and pulsar cluster are not set") return } @@ -116,14 +142,6 @@ func (s *Server) PulsarFunctionManagedMcpTools(readOnly bool, features []string, log.Printf("Setting Pulsar Functions strict export to %v", options.StrictExport) } - // Convert Server to the internal pftools.Server type - pftoolsServer := &pftools2.Server{ - MCPServer: s.MCPServer, - KafkaSession: s.KafkaSession, - PulsarSession: s.PulsarSession, - Logger: s.logger, - } - manager, err := pftools2.NewPulsarFunctionManager(pftoolsServer, readOnly, options, sessionID) if err != nil { log.Printf("Failed to create Pulsar Function manager: %v", err) diff --git a/pkg/mcp/schema.go b/pkg/mcp/schema.go new file mode 100644 index 0000000..97f7017 --- /dev/null +++ b/pkg/mcp/schema.go @@ -0,0 +1,162 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "fmt" + "reflect" + + "github.com/google/jsonschema-go/jsonschema" +) + +// InputSchema infers a JSON Schema for tool inputs. +// JSON tags control property names and required fields (omitempty/omitzero). +// jsonschema tags provide property descriptions (jsonschema:"..."). +func InputSchema[T any]() (*jsonschema.Schema, error) { + if isAnyType[T]() { + return &jsonschema.Schema{Type: "object"}, nil + } + return buildSchema[T]("input") +} + +// OutputSchema infers a JSON Schema for tool outputs. +// When the output type is any, no schema is returned to avoid constraining output. +func OutputSchema[T any]() (*jsonschema.Schema, error) { + if isAnyType[T]() { + return nil, nil + } + return buildSchema[T]("output") +} + +func buildSchema[T any](label string) (*jsonschema.Schema, error) { + schema, err := schemaForType[T]() + if err != nil { + return nil, fmt.Errorf("%s schema: %w", label, err) + } + normalizeAdditionalProperties(schema) + if !isObjectSchema(schema) { + return nil, fmt.Errorf("%s schema must have type \"object\"", label) + } + return schema, nil +} + +func schemaForType[T any]() (*jsonschema.Schema, error) { + t := reflect.TypeFor[T]() + for t.Kind() == reflect.Pointer { + t = t.Elem() + } + return jsonschema.ForType(t, &jsonschema.ForOptions{}) +} + +func isObjectSchema(schema *jsonschema.Schema) bool { + if schema == nil { + return false + } + return schema.Type == "object" +} + +func isAnyType[T any]() bool { + t := reflect.TypeFor[T]() + return t.Kind() == reflect.Interface && t.NumMethod() == 0 +} + +func normalizeAdditionalProperties(schema *jsonschema.Schema) { + visited := map[*jsonschema.Schema]bool{} + var walk func(*jsonschema.Schema) + walk = func(s *jsonschema.Schema) { + if s == nil || visited[s] { + return + } + visited[s] = true + + if s.Type == "object" && s.Properties != nil && isFalseSchema(s.AdditionalProperties) { + s.AdditionalProperties = nil + } + + for _, prop := range s.Properties { + walk(prop) + } + for _, prop := range s.PatternProperties { + walk(prop) + } + for _, def := range s.Defs { + walk(def) + } + for _, def := range s.Definitions { + walk(def) + } + if s.AdditionalProperties != nil && !isFalseSchema(s.AdditionalProperties) { + walk(s.AdditionalProperties) + } + if s.Items != nil { + walk(s.Items) + } + for _, item := range s.PrefixItems { + walk(item) + } + if s.AdditionalItems != nil { + walk(s.AdditionalItems) + } + if s.UnevaluatedItems != nil { + walk(s.UnevaluatedItems) + } + if s.UnevaluatedProperties != nil { + walk(s.UnevaluatedProperties) + } + if s.PropertyNames != nil { + walk(s.PropertyNames) + } + if s.Contains != nil { + walk(s.Contains) + } + for _, subschema := range s.AllOf { + walk(subschema) + } + for _, subschema := range s.AnyOf { + walk(subschema) + } + for _, subschema := range s.OneOf { + walk(subschema) + } + if s.Not != nil { + walk(s.Not) + } + if s.If != nil { + walk(s.If) + } + if s.Then != nil { + walk(s.Then) + } + if s.Else != nil { + walk(s.Else) + } + for _, subschema := range s.DependentSchemas { + walk(subschema) + } + } + walk(schema) +} + +func isFalseSchema(schema *jsonschema.Schema) bool { + if schema == nil || schema.Not == nil { + return false + } + if !reflect.ValueOf(*schema.Not).IsZero() { + return false + } + clone := *schema + clone.Not = nil + return reflect.ValueOf(clone).IsZero() +} diff --git a/pkg/mcp/schema_test.go b/pkg/mcp/schema_test.go new file mode 100644 index 0000000..35dcef3 --- /dev/null +++ b/pkg/mcp/schema_test.go @@ -0,0 +1,63 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +type sampleInput struct { + Name string `json:"name" jsonschema:"resource name"` + Count int `json:"count,omitempty" jsonschema:"optional count"` +} + +type nestedInput struct { + Meta sampleInput `json:"meta"` +} + +func TestInputSchemaAllowsAdditionalProperties(t *testing.T) { + schema, err := InputSchema[sampleInput]() + require.NoError(t, err) + require.Equal(t, "object", schema.Type) + require.NotNil(t, schema.Properties["name"]) + require.NotNil(t, schema.Properties["count"]) + require.Contains(t, schema.Required, "name") + require.NotContains(t, schema.Required, "count") + require.Nil(t, schema.AdditionalProperties) +} + +func TestInputSchemaAllowsNestedAdditionalProperties(t *testing.T) { + schema, err := InputSchema[nestedInput]() + require.NoError(t, err) + metaSchema := schema.Properties["meta"] + require.NotNil(t, metaSchema) + require.Nil(t, metaSchema.AdditionalProperties) +} + +func TestInputSchemaMapKeepsAdditionalProperties(t *testing.T) { + schema, err := InputSchema[map[string]string]() + require.NoError(t, err) + require.Equal(t, "object", schema.Type) + require.NotNil(t, schema.AdditionalProperties) + require.Equal(t, "string", schema.AdditionalProperties.Type) +} + +func TestOutputSchemaAnyIsNil(t *testing.T) { + schema, err := OutputSchema[any]() + require.NoError(t, err) + require.Nil(t, schema) +} diff --git a/pkg/mcp/server.go b/pkg/mcp/server.go index c922883..5a8a1bb 100644 --- a/pkg/mcp/server.go +++ b/pkg/mcp/server.go @@ -22,8 +22,8 @@ import ( "github.com/streamnative/streamnative-mcp-server/pkg/pulsar" ) -// Server wraps MCP server state and StreamNative sessions. -type Server struct { +// LegacyServer wraps MCP server state and StreamNative sessions for mark3labs/mcp-go. +type LegacyServer struct { MCPServer *server.MCPServer KafkaSession *kafka.Session PulsarSession *pulsar.Session @@ -31,17 +31,17 @@ type Server struct { logger *logrus.Logger } -// NewServer creates a new MCP server with StreamNative integrations. -func NewServer(name, version string, logger *logrus.Logger, opts ...server.ServerOption) *Server { +// NewLegacyServer creates a new MCP server with StreamNative integrations. +func NewLegacyServer(name, version string, logger *logrus.Logger, opts ...server.ServerOption) *LegacyServer { // Create a new MCP server - opts = AddOpts(opts...) + opts = addLegacyOpts(opts...) s := server.NewMCPServer(name, version, opts...) - mcpserver := CreateSNCloudMCPServer(s, logger) + mcpserver := createSNCloudLegacyServer(s, logger) return mcpserver } -// AddOpts merges default server options with custom options. -func AddOpts(opts ...server.ServerOption) []server.ServerOption { +// addLegacyOpts merges default server options with custom options. +func addLegacyOpts(opts ...server.ServerOption) []server.ServerOption { defaultOpts := []server.ServerOption{ server.WithResourceCapabilities(true, true), server.WithRecovery(), @@ -51,9 +51,9 @@ func AddOpts(opts ...server.ServerOption) []server.ServerOption { return opts } -// CreateSNCloudMCPServer constructs a Server wrapper for StreamNative Cloud. -func CreateSNCloudMCPServer(s *server.MCPServer, logger *logrus.Logger) *Server { - mcpserver := &Server{ +// createSNCloudLegacyServer constructs a LegacyServer wrapper for StreamNative Cloud. +func createSNCloudLegacyServer(s *server.MCPServer, logger *logrus.Logger) *LegacyServer { + mcpserver := &LegacyServer{ MCPServer: s, logger: logger, SNCloudSession: &config.Session{}, diff --git a/pkg/mcp/server_middleware_test.go b/pkg/mcp/server_middleware_test.go new file mode 100644 index 0000000..8c69764 --- /dev/null +++ b/pkg/mcp/server_middleware_test.go @@ -0,0 +1,51 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "context" + "testing" + + sdk "github.com/modelcontextprotocol/go-sdk/mcp" + "github.com/stretchr/testify/require" +) + +func TestRecoveryMiddleware_ToolPanicUsesToolName(t *testing.T) { + middleware := recoveryMiddleware(nil) + handler := middleware(func(context.Context, string, sdk.Request) (sdk.Result, error) { + panic("boom") + }) + + req := &sdk.CallToolRequest{ + Params: &sdk.CallToolParamsRaw{ + Name: "kafka.topics.create", + }, + } + result, err := handler(context.Background(), "tools/call", req) + require.Nil(t, result) + require.EqualError(t, err, "panic recovered in kafka.topics.create tool handler: boom") +} + +func TestRecoveryMiddleware_MethodPanicUsesMethodName(t *testing.T) { + middleware := recoveryMiddleware(nil) + handler := middleware(func(context.Context, string, sdk.Request) (sdk.Result, error) { + panic("boom") + }) + + req := &sdk.ListResourcesRequest{} + result, err := handler(context.Background(), "resources/list", req) + require.Nil(t, result) + require.EqualError(t, err, "panic recovered in resources/list request: boom") +} diff --git a/pkg/mcp/server_new.go b/pkg/mcp/server_new.go new file mode 100644 index 0000000..5d1b8a4 --- /dev/null +++ b/pkg/mcp/server_new.go @@ -0,0 +1,215 @@ +// Copyright 2025 StreamNative +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mcp + +import ( + "context" + "fmt" + "log/slog" + "runtime/debug" + "time" + + sdk "github.com/modelcontextprotocol/go-sdk/mcp" + "github.com/sirupsen/logrus" + "github.com/streamnative/streamnative-mcp-server/pkg/config" + "github.com/streamnative/streamnative-mcp-server/pkg/kafka" + "github.com/streamnative/streamnative-mcp-server/pkg/pulsar" +) + +// Server wraps MCP go-sdk server state and StreamNative sessions. +type Server struct { + MCPServer *sdk.Server + KafkaSession *kafka.Session + PulsarSession *pulsar.Session + SNCloudSession *config.Session + logger *logrus.Logger +} + +// ServerOption mutates MCP go-sdk server options. +type ServerOption func(*sdk.ServerOptions) + +// NewServer creates a new MCP go-sdk server with StreamNative integrations. +func NewServer(name, version string, logger *logrus.Logger, opts ...ServerOption) *Server { + serverOptions := defaultServerOptions(logger) + for _, opt := range opts { + if opt != nil { + opt(serverOptions) + } + } + + impl := &sdk.Implementation{Name: name, Version: version} + mcpServer := sdk.NewServer(impl, serverOptions) + addDefaultMiddleware(mcpServer, logger) + + return &Server{ + MCPServer: mcpServer, + logger: logger, + SNCloudSession: &config.Session{}, + KafkaSession: &kafka.Session{}, + PulsarSession: &pulsar.Session{}, + } +} + +// WithInstructions sets server instructions returned in initialize responses. +func WithInstructions(instructions string) ServerOption { + return func(opts *sdk.ServerOptions) { + opts.Instructions = instructions + } +} + +// WithCapabilities overrides the default server capability configuration. +func WithCapabilities(capabilities *sdk.ServerCapabilities) ServerOption { + return func(opts *sdk.ServerOptions) { + opts.Capabilities = capabilities + } +} + +// WithLogger overrides the default slog logger for the go-sdk server. +func WithLogger(logger *slog.Logger) ServerOption { + return func(opts *sdk.ServerOptions) { + opts.Logger = logger + } +} + +func defaultServerOptions(logger *logrus.Logger) *sdk.ServerOptions { + opts := &sdk.ServerOptions{ + Capabilities: &sdk.ServerCapabilities{ + Logging: &sdk.LoggingCapabilities{}, + Resources: &sdk.ResourceCapabilities{ + Subscribe: true, + ListChanged: true, + }, + }, + } + + if logger != nil { + opts.Logger = slog.New(slog.NewTextHandler(logger.Writer(), &slog.HandlerOptions{ + Level: slogLevelFromLogrus(logger.Level), + })) + } + + return opts +} + +func slogLevelFromLogrus(level logrus.Level) slog.Level { + switch level { + case logrus.TraceLevel, logrus.DebugLevel: + return slog.LevelDebug + case logrus.WarnLevel: + return slog.LevelWarn + case logrus.ErrorLevel, logrus.FatalLevel, logrus.PanicLevel: + return slog.LevelError + default: + return slog.LevelInfo + } +} + +func addDefaultMiddleware(server *sdk.Server, logger *logrus.Logger) { + if server == nil { + return + } + + middlewares := []sdk.Middleware{ + recoveryMiddleware(logger), + } + if logger != nil { + middlewares = append([]sdk.Middleware{loggingMiddleware(logger)}, middlewares...) + } + server.AddReceivingMiddleware(middlewares...) +} + +func loggingMiddleware(logger *logrus.Logger) sdk.Middleware { + return func(next sdk.MethodHandler) sdk.MethodHandler { + return func(ctx context.Context, method string, req sdk.Request) (sdk.Result, error) { + start := time.Now() + sessionID := "" + if req != nil { + if session := req.GetSession(); session != nil { + sessionID = session.ID() + } + } + + entry := logger.WithFields(logrus.Fields{ + "method": method, + "session_id": sessionID, + }) + + if callReq, ok := req.(*sdk.CallToolRequest); ok && callReq != nil && callReq.Params != nil { + entry = entry.WithField("tool", callReq.Params.Name) + entry.Debug("MCP tool call started") + } else { + entry.Debug("MCP request started") + } + + result, err := next(ctx, method, req) + duration := time.Since(start) + + if err != nil { + entry.WithFields(logrus.Fields{ + "duration_ms": duration.Milliseconds(), + }).WithError(err).Error("MCP request failed") + return result, err + } + + entry.WithFields(logrus.Fields{ + "duration_ms": duration.Milliseconds(), + }).Debug("MCP request completed") + return result, nil + } + } +} + +func recoveryMiddleware(logger *logrus.Logger) sdk.Middleware { + return func(next sdk.MethodHandler) sdk.MethodHandler { + return func(ctx context.Context, method string, req sdk.Request) (result sdk.Result, err error) { + defer func() { + if recovered := recover(); recovered != nil { + toolName := "" + if callReq, ok := req.(*sdk.CallToolRequest); ok && callReq != nil && callReq.Params != nil { + toolName = callReq.Params.Name + } + + if logger != nil { + sessionID := "" + if req != nil { + if session := req.GetSession(); session != nil { + sessionID = session.ID() + } + } + fields := logrus.Fields{ + "method": method, + "session_id": sessionID, + "panic": recovered, + } + if toolName != "" { + fields["tool"] = toolName + } + logger.WithFields(fields).Error("MCP request panic recovered") + logger.WithField("stack", string(debug.Stack())).Debug("MCP panic stack") + } + + if toolName != "" { + err = fmt.Errorf("panic recovered in %s tool handler: %v", toolName, recovered) + } else { + err = fmt.Errorf("panic recovered in %s request: %v", method, recovered) + } + result = nil + } + }() + + return next(ctx, method, req) + } + } +} diff --git a/sdk/sdk-apiserver/go.mod b/sdk/sdk-apiserver/go.mod index 9bdd735..e2040d2 100644 --- a/sdk/sdk-apiserver/go.mod +++ b/sdk/sdk-apiserver/go.mod @@ -1,9 +1,7 @@ module github.com/streamnative/streamnative-mcp-server/sdk/sdk-apiserver -go 1.23.0 +go 1.24.0 toolchain go1.24.4 -require golang.org/x/oauth2 v0.27.0 - -require github.com/google/go-cmp v0.7.0 // indirect +require golang.org/x/oauth2 v0.34.0 diff --git a/sdk/sdk-apiserver/go.sum b/sdk/sdk-apiserver/go.sum index c55f307..51ef1a2 100644 --- a/sdk/sdk-apiserver/go.sum +++ b/sdk/sdk-apiserver/go.sum @@ -1,4 +1 @@ -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= -golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=