mirror of
https://github.com/openshift/openshift-mcp-server.git
synced 2025-10-17 14:27:48 +03:00
feat: Multi Cluster Support (#348)
* feat: add cluster provider for kubeconfig Signed-off-by: Calum Murray <cmurray@redhat.com> * feat: move server to use ClusterProvider interface Signed-off-by: Calum Murray <cmurray@redhat.com> * feat: authentication middleware works with cluster provider Signed-off-by: Calum Murray <cmurray@redhat.com> * fix: unit tests work after cluster provider changes Signed-off-by: Calum Murray <cmurray@redhat.com> * feat: add tool mutator to add cluster parameter Signed-off-by: Calum Murray <cmurray@redhat.com> * test: handle cluster parameter Signed-off-by: Calum Murray <cmurray@redhat.com> * fix: handle lazy init correctly Signed-off-by: Calum Murray <cmurray@redhat.com> * refactor: move to using multi-strategy ManagerProvider Signed-off-by: Calum Murray <cmurray@redhat.com> * feat: add contexts_list tool Signed-off-by: Calum Murray <cmurray@redhat.com> * refactor: make tool mutator generic between cluster/context naming Signed-off-by: Calum Murray <cmurray@redhat.com> * feat: introduce tool filter Signed-off-by: Calum Murray <cmurray@redhat.com> * refactor: use new ManagerProvider/mutator/filter within mcp server Signed-off-by: Calum Murray <cmurray@redhat.com> * fix(test): tests expect context parameter in tool defs Signed-off-by: Calum Murray <cmurray@redhat.com> * feat: auth handles multi-cluster case correctly Signed-off-by: Calum Murray <cmurray@redhat.com> * fix: small changes from local testing Signed-off-by: Calum Murray <cmurray@redhat.com> * chore: fix enum test Signed-off-by: Calum Murray <cmurray@redhat.com> * review: Multi Cluster support (#1) * nit: rename contexts_list to configuration_contexts_list Besides the conventional naming, it helps LLMs understand the context of the tool by providing a certain level of hierarchy. Signed-off-by: Marc Nuri <marc@marcnuri.com> * fix(mcp): ToolMutator doesn't rely on magic strings Signed-off-by: Marc Nuri <marc@marcnuri.com> * refactor(api): don't expose ManagerProvider to toolsets Signed-off-by: Marc Nuri <marc@marcnuri.com> * test(mcp): configuration_contexts_list basic tests Signed-off-by: Marc Nuri <marc@marcnuri.com> * test(toolsets): revert edge-case test This test should not be touched. Signed-off-by: Marc Nuri <marc@marcnuri.com> * test(toolsets): add specific metadata tests for multi-cluster Signed-off-by: Marc Nuri <marc@marcnuri.com> * fix(mcp): ToolFilter doesn't rely on magic strings (partially) Signed-off-by: Marc Nuri <marc@marcnuri.com> * test(api): IsClusterAware and IsTargetListProvider default values Signed-off-by: Marc Nuri <marc@marcnuri.com> * test(mcp): revert unneeded changes in mcp_tools_test.go Signed-off-by: Marc Nuri <marc@marcnuri.com> --------- Signed-off-by: Marc Nuri <marc@marcnuri.com> * fix: always include configuration_contexts_list if contexts > 1 Signed-off-by: Calum Murray <cmurray@redhat.com> * feat: include server urls in configuration_contexts_list Signed-off-by: Calum Murray <cmurray@redhat.com> --------- Signed-off-by: Calum Murray <cmurray@redhat.com> Signed-off-by: Marc Nuri <marc@marcnuri.com> Co-authored-by: Marc Nuri <marc@marcnuri.com>
This commit is contained in:
@@ -8,15 +8,10 @@ func KubeConfigFake() *clientcmdapi.Config {
|
||||
fakeConfig := clientcmdapi.NewConfig()
|
||||
fakeConfig.Clusters["fake"] = clientcmdapi.NewCluster()
|
||||
fakeConfig.Clusters["fake"].Server = "https://127.0.0.1:6443"
|
||||
fakeConfig.Clusters["additional-cluster"] = clientcmdapi.NewCluster()
|
||||
fakeConfig.AuthInfos["fake"] = clientcmdapi.NewAuthInfo()
|
||||
fakeConfig.AuthInfos["additional-auth"] = clientcmdapi.NewAuthInfo()
|
||||
fakeConfig.Contexts["fake-context"] = clientcmdapi.NewContext()
|
||||
fakeConfig.Contexts["fake-context"].Cluster = "fake"
|
||||
fakeConfig.Contexts["fake-context"].AuthInfo = "fake"
|
||||
fakeConfig.Contexts["additional-context"] = clientcmdapi.NewContext()
|
||||
fakeConfig.Contexts["additional-context"].Cluster = "additional-cluster"
|
||||
fakeConfig.Contexts["additional-context"].AuthInfo = "additional-auth"
|
||||
fakeConfig.CurrentContext = "fake-context"
|
||||
return fakeConfig
|
||||
}
|
||||
|
||||
@@ -73,10 +73,14 @@ func (m *MockServer) Kubeconfig() *api.Config {
|
||||
}
|
||||
|
||||
func (m *MockServer) KubeconfigFile(t *testing.T) string {
|
||||
kubeconfig := filepath.Join(t.TempDir(), "config")
|
||||
err := clientcmd.WriteToFile(*m.Kubeconfig(), kubeconfig)
|
||||
return KubeconfigFile(t, m.Kubeconfig())
|
||||
}
|
||||
|
||||
func KubeconfigFile(t *testing.T, kubeconfig *api.Config) string {
|
||||
kubeconfigFile := filepath.Join(t.TempDir(), "config")
|
||||
err := clientcmd.WriteToFile(*kubeconfig, kubeconfigFile)
|
||||
require.NoError(t, err, "Expected no error writing kubeconfig file")
|
||||
return kubeconfig
|
||||
return kubeconfigFile
|
||||
}
|
||||
|
||||
func WriteObject(w http.ResponseWriter, obj runtime.Object) {
|
||||
|
||||
@@ -10,8 +10,29 @@ import (
|
||||
)
|
||||
|
||||
type ServerTool struct {
|
||||
Tool Tool
|
||||
Handler ToolHandlerFunc
|
||||
Tool Tool
|
||||
Handler ToolHandlerFunc
|
||||
ClusterAware *bool
|
||||
TargetListProvider *bool
|
||||
}
|
||||
|
||||
// IsClusterAware indicates whether the tool can accept a "cluster" or "context" parameter
|
||||
// to operate on a specific Kubernetes cluster context.
|
||||
// Defaults to true if not explicitly set
|
||||
func (s *ServerTool) IsClusterAware() bool {
|
||||
if s.ClusterAware != nil {
|
||||
return *s.ClusterAware
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// IsTargetListProvider indicates whether the tool is used to provide a list of targets (clusters/contexts)
|
||||
// Defaults to false if not explicitly set
|
||||
func (s *ServerTool) IsTargetListProvider() bool {
|
||||
if s.TargetListProvider != nil {
|
||||
return *s.TargetListProvider
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type Toolset interface {
|
||||
|
||||
47
pkg/api/toolsets_test.go
Normal file
47
pkg/api/toolsets_test.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
type ToolsetsSuite struct {
|
||||
suite.Suite
|
||||
}
|
||||
|
||||
func (s *ToolsetsSuite) TestServerTool() {
|
||||
s.Run("IsClusterAware", func() {
|
||||
s.Run("defaults to true", func() {
|
||||
tool := &ServerTool{}
|
||||
s.True(tool.IsClusterAware(), "Expected IsClusterAware to be true by default")
|
||||
})
|
||||
s.Run("can be set to false", func() {
|
||||
tool := &ServerTool{ClusterAware: ptr.To(false)}
|
||||
s.False(tool.IsClusterAware(), "Expected IsClusterAware to be false when set to false")
|
||||
})
|
||||
s.Run("can be set to true", func() {
|
||||
tool := &ServerTool{ClusterAware: ptr.To(true)}
|
||||
s.True(tool.IsClusterAware(), "Expected IsClusterAware to be true when set to true")
|
||||
})
|
||||
})
|
||||
s.Run("IsTargetListProvider", func() {
|
||||
s.Run("defaults to false", func() {
|
||||
tool := &ServerTool{}
|
||||
s.False(tool.IsTargetListProvider(), "Expected IsTargetListProvider to be false by default")
|
||||
})
|
||||
s.Run("can be set to false", func() {
|
||||
tool := &ServerTool{TargetListProvider: ptr.To(false)}
|
||||
s.False(tool.IsTargetListProvider(), "Expected IsTargetListProvider to be false when set to false")
|
||||
})
|
||||
s.Run("can be set to true", func() {
|
||||
tool := &ServerTool{TargetListProvider: ptr.To(true)}
|
||||
s.True(tool.IsTargetListProvider(), "Expected IsTargetListProvider to be true when set to true")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestToolsets(t *testing.T) {
|
||||
suite.Run(t, new(ToolsetsSuite))
|
||||
}
|
||||
@@ -6,6 +6,11 @@ import (
|
||||
"github.com/BurntSushi/toml"
|
||||
)
|
||||
|
||||
const (
|
||||
ClusterProviderKubeConfig = "kubeconfig"
|
||||
ClusterProviderInCluster = "in-cluster"
|
||||
)
|
||||
|
||||
// StaticConfig is the configuration for the server.
|
||||
// It allows to configure server specific settings and tools to be enabled or disabled.
|
||||
type StaticConfig struct {
|
||||
@@ -49,6 +54,12 @@ type StaticConfig struct {
|
||||
StsScopes []string `toml:"sts_scopes,omitempty"`
|
||||
CertificateAuthority string `toml:"certificate_authority,omitempty"`
|
||||
ServerURL string `toml:"server_url,omitempty"`
|
||||
// ClusterProviderStrategy is how the server finds clusters.
|
||||
// If set to "kubeconfig", the clusters will be loaded from those in the kubeconfig.
|
||||
// If set to "in-cluster", the server will use the in cluster config
|
||||
ClusterProviderStrategy string `toml:"cluster_provider_strategy,omitempty"`
|
||||
// ClusterContexts is which context should be used for each cluster
|
||||
ClusterContexts map[string]string `toml:"cluster_contexts"`
|
||||
}
|
||||
|
||||
func Default() *StaticConfig {
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
@@ -20,7 +23,44 @@ import (
|
||||
|
||||
type KubernetesApiTokenVerifier interface {
|
||||
// KubernetesApiVerifyToken TODO: clarify proper implementation
|
||||
KubernetesApiVerifyToken(ctx context.Context, token, audience string) (*authenticationapiv1.UserInfo, []string, error)
|
||||
KubernetesApiVerifyToken(ctx context.Context, token, audience, cluster string) (*authenticationapiv1.UserInfo, []string, error)
|
||||
// GetTargetParameterName returns the parameter name used for target identification in MCP requests
|
||||
GetTargetParameterName() string
|
||||
}
|
||||
|
||||
// extractTargetFromRequest extracts cluster parameter from MCP request body
|
||||
func extractTargetFromRequest(r *http.Request, targetName string) (string, error) {
|
||||
if r.Body == nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Read the body
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Restore the body for downstream handlers
|
||||
r.Body = io.NopCloser(bytes.NewBuffer(body))
|
||||
|
||||
// Parse the MCP request
|
||||
var mcpRequest struct {
|
||||
Params struct {
|
||||
Arguments map[string]interface{} `json:"arguments"`
|
||||
} `json:"params"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &mcpRequest); err != nil {
|
||||
// If we can't parse the request, just return empty cluster (will use default)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Extract target parameter
|
||||
if cluster, ok := mcpRequest.Params.Arguments[targetName].(string); ok {
|
||||
return cluster, nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// write401 sends a 401/Unauthorized response with WWW-Authenticate header.
|
||||
@@ -132,7 +172,12 @@ func AuthorizationMiddleware(staticConfig *config.StaticConfig, oidcProvider *oi
|
||||
}
|
||||
// Kubernetes API Server TokenReview validation
|
||||
if err == nil && staticConfig.ValidateToken {
|
||||
err = claims.ValidateWithKubernetesApi(r.Context(), staticConfig.OAuthAudience, verifier)
|
||||
targetParameterName := verifier.GetTargetParameterName()
|
||||
cluster, clusterErr := extractTargetFromRequest(r, targetParameterName)
|
||||
if clusterErr != nil {
|
||||
klog.V(2).Infof("Failed to extract cluster from request, using default: %v", clusterErr)
|
||||
}
|
||||
err = claims.ValidateWithKubernetesApi(r.Context(), staticConfig.OAuthAudience, cluster, verifier)
|
||||
}
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Authentication failed - JWT validation error: %s %s from %s, error: %v", r.Method, r.URL.Path, r.RemoteAddr, err)
|
||||
@@ -200,9 +245,9 @@ func (c *JWTClaims) ValidateWithProvider(ctx context.Context, audience string, p
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *JWTClaims) ValidateWithKubernetesApi(ctx context.Context, audience string, verifier KubernetesApiTokenVerifier) error {
|
||||
func (c *JWTClaims) ValidateWithKubernetesApi(ctx context.Context, audience, cluster string, verifier KubernetesApiTokenVerifier) error {
|
||||
if verifier != nil {
|
||||
_, _, err := verifier.KubernetesApiVerifyToken(ctx, c.Token, audience)
|
||||
_, _, err := verifier.KubernetesApiVerifyToken(ctx, c.Token, audience, cluster)
|
||||
if err != nil {
|
||||
return fmt.Errorf("kubernetes API token validation error: %v", err)
|
||||
}
|
||||
|
||||
@@ -292,7 +292,7 @@ func TestHealthCheck(t *testing.T) {
|
||||
})
|
||||
})
|
||||
// Health exposed even when require Authorization
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true}}, func(ctx *httpContext) {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) {
|
||||
resp, err := http.Get(fmt.Sprintf("http://%s/healthz", ctx.HttpAddress))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get health check endpoint with OAuth: %v", err)
|
||||
@@ -313,7 +313,7 @@ func TestWellKnownReverseProxy(t *testing.T) {
|
||||
".well-known/openid-configuration",
|
||||
}
|
||||
// With No Authorization URL configured
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true}}, func(ctx *httpContext) {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) {
|
||||
for _, path := range cases {
|
||||
resp, err := http.Get(fmt.Sprintf("http://%s/%s", ctx.HttpAddress, path))
|
||||
t.Cleanup(func() { _ = resp.Body.Close() })
|
||||
@@ -333,7 +333,12 @@ func TestWellKnownReverseProxy(t *testing.T) {
|
||||
_, _ = w.Write([]byte(`NOT A JSON PAYLOAD`))
|
||||
}))
|
||||
t.Cleanup(invalidPayloadServer.Close)
|
||||
invalidPayloadConfig := &config.StaticConfig{AuthorizationURL: invalidPayloadServer.URL, RequireOAuth: true, ValidateToken: true}
|
||||
invalidPayloadConfig := &config.StaticConfig{
|
||||
AuthorizationURL: invalidPayloadServer.URL,
|
||||
RequireOAuth: true,
|
||||
ValidateToken: true,
|
||||
ClusterProviderStrategy: config.ClusterProviderKubeConfig,
|
||||
}
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: invalidPayloadConfig}, func(ctx *httpContext) {
|
||||
for _, path := range cases {
|
||||
resp, err := http.Get(fmt.Sprintf("http://%s/%s", ctx.HttpAddress, path))
|
||||
@@ -358,7 +363,12 @@ func TestWellKnownReverseProxy(t *testing.T) {
|
||||
_, _ = w.Write([]byte(`{"issuer": "https://example.com","scopes_supported":["mcp-server"]}`))
|
||||
}))
|
||||
t.Cleanup(testServer.Close)
|
||||
staticConfig := &config.StaticConfig{AuthorizationURL: testServer.URL, RequireOAuth: true, ValidateToken: true}
|
||||
staticConfig := &config.StaticConfig{
|
||||
AuthorizationURL: testServer.URL,
|
||||
RequireOAuth: true,
|
||||
ValidateToken: true,
|
||||
ClusterProviderStrategy: config.ClusterProviderKubeConfig,
|
||||
}
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: staticConfig}, func(ctx *httpContext) {
|
||||
for _, path := range cases {
|
||||
resp, err := http.Get(fmt.Sprintf("http://%s/%s", ctx.HttpAddress, path))
|
||||
@@ -401,7 +411,12 @@ func TestWellKnownOverrides(t *testing.T) {
|
||||
}`))
|
||||
}))
|
||||
t.Cleanup(testServer.Close)
|
||||
baseConfig := config.StaticConfig{AuthorizationURL: testServer.URL, RequireOAuth: true, ValidateToken: true}
|
||||
baseConfig := config.StaticConfig{
|
||||
AuthorizationURL: testServer.URL,
|
||||
RequireOAuth: true,
|
||||
ValidateToken: true,
|
||||
ClusterProviderStrategy: config.ClusterProviderKubeConfig,
|
||||
}
|
||||
// With Dynamic Client Registration disabled
|
||||
disableDynamicRegistrationConfig := baseConfig
|
||||
disableDynamicRegistrationConfig.DisableDynamicClientRegistration = true
|
||||
@@ -488,7 +503,7 @@ func TestMiddlewareLogging(t *testing.T) {
|
||||
|
||||
func TestAuthorizationUnauthorized(t *testing.T) {
|
||||
// Missing Authorization header
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true}}, func(ctx *httpContext) {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) {
|
||||
resp, err := http.Get(fmt.Sprintf("http://%s/mcp", ctx.HttpAddress))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get protected endpoint: %v", err)
|
||||
@@ -513,7 +528,7 @@ func TestAuthorizationUnauthorized(t *testing.T) {
|
||||
})
|
||||
})
|
||||
// Authorization header without Bearer prefix
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true}}, func(ctx *httpContext) {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) {
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create request: %v", err)
|
||||
@@ -538,7 +553,7 @@ func TestAuthorizationUnauthorized(t *testing.T) {
|
||||
})
|
||||
})
|
||||
// Invalid Authorization header
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true}}, func(ctx *httpContext) {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) {
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create request: %v", err)
|
||||
@@ -569,7 +584,7 @@ func TestAuthorizationUnauthorized(t *testing.T) {
|
||||
})
|
||||
})
|
||||
// Expired Authorization Bearer token
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true}}, func(ctx *httpContext) {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) {
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create request: %v", err)
|
||||
@@ -600,7 +615,7 @@ func TestAuthorizationUnauthorized(t *testing.T) {
|
||||
})
|
||||
})
|
||||
// Invalid audience claim Bearer token
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "expected-audience", ValidateToken: true}}, func(ctx *httpContext) {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "expected-audience", ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) {
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create request: %v", err)
|
||||
@@ -633,7 +648,7 @@ func TestAuthorizationUnauthorized(t *testing.T) {
|
||||
// Failed OIDC validation
|
||||
oidcTestServer := NewOidcTestServer(t)
|
||||
t.Cleanup(oidcTestServer.Close)
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "mcp-server", ValidateToken: true}, OidcProvider: oidcTestServer.Provider}, func(ctx *httpContext) {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "mcp-server", ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}, OidcProvider: oidcTestServer.Provider}, func(ctx *httpContext) {
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create request: %v", err)
|
||||
@@ -670,7 +685,7 @@ func TestAuthorizationUnauthorized(t *testing.T) {
|
||||
"aud": "mcp-server"
|
||||
}`
|
||||
validOidcToken := oidctest.SignIDToken(oidcTestServer.PrivateKey, "test-oidc-key-id", oidc.RS256, rawClaims)
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "mcp-server", ValidateToken: true}, OidcProvider: oidcTestServer.Provider}, func(ctx *httpContext) {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "mcp-server", ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}, OidcProvider: oidcTestServer.Provider}, func(ctx *httpContext) {
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create request: %v", err)
|
||||
@@ -703,7 +718,7 @@ func TestAuthorizationUnauthorized(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAuthorizationRequireOAuthFalse(t *testing.T) {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: false}}, func(ctx *httpContext) {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: false, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) {
|
||||
resp, err := http.Get(fmt.Sprintf("http://%s/mcp", ctx.HttpAddress))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get protected endpoint: %v", err)
|
||||
@@ -728,7 +743,7 @@ func TestAuthorizationRawToken(t *testing.T) {
|
||||
{"mcp-server", true}, // Audience set, validation enabled
|
||||
}
|
||||
for _, c := range cases {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: c.audience, ValidateToken: c.validateToken}}, func(ctx *httpContext) {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: c.audience, ValidateToken: c.validateToken, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) {
|
||||
tokenReviewed := false
|
||||
ctx.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
if req.URL.EscapedPath() == "/apis/authentication.k8s.io/v1/tokenreviews" {
|
||||
@@ -777,7 +792,7 @@ func TestAuthorizationOidcToken(t *testing.T) {
|
||||
validOidcToken := oidctest.SignIDToken(oidcTestServer.PrivateKey, "test-oidc-key-id", oidc.RS256, rawClaims)
|
||||
cases := []bool{false, true}
|
||||
for _, validateToken := range cases {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "mcp-server", ValidateToken: validateToken}, OidcProvider: oidcTestServer.Provider}, func(ctx *httpContext) {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "mcp-server", ValidateToken: validateToken, ClusterProviderStrategy: config.ClusterProviderKubeConfig}, OidcProvider: oidcTestServer.Provider}, func(ctx *httpContext) {
|
||||
tokenReviewed := false
|
||||
ctx.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
if req.URL.EscapedPath() == "/apis/authentication.k8s.io/v1/tokenreviews" {
|
||||
@@ -833,13 +848,14 @@ func TestAuthorizationOidcTokenExchange(t *testing.T) {
|
||||
cases := []bool{false, true}
|
||||
for _, validateToken := range cases {
|
||||
staticConfig := &config.StaticConfig{
|
||||
RequireOAuth: true,
|
||||
OAuthAudience: "mcp-server",
|
||||
ValidateToken: validateToken,
|
||||
StsClientId: "test-sts-client-id",
|
||||
StsClientSecret: "test-sts-client-secret",
|
||||
StsAudience: "backend-audience",
|
||||
StsScopes: []string{"backend-scope"},
|
||||
RequireOAuth: true,
|
||||
OAuthAudience: "mcp-server",
|
||||
ValidateToken: validateToken,
|
||||
StsClientId: "test-sts-client-id",
|
||||
StsClientSecret: "test-sts-client-secret",
|
||||
StsAudience: "backend-audience",
|
||||
StsScopes: []string{"backend-scope"},
|
||||
ClusterProviderStrategy: config.ClusterProviderKubeConfig,
|
||||
}
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: staticConfig, OidcProvider: oidcTestServer.Provider}, func(ctx *httpContext) {
|
||||
tokenReviewed := false
|
||||
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
"k8s.io/client-go/tools/clientcmd/api/latest"
|
||||
)
|
||||
|
||||
const inClusterKubeConfigDefaultContext = "in-cluster"
|
||||
|
||||
// InClusterConfig is a variable that holds the function to get the in-cluster config
|
||||
// Exposed for testing
|
||||
var InClusterConfig = func() (*rest.Config, error) {
|
||||
@@ -81,6 +83,45 @@ func (m *Manager) ToRawKubeConfigLoader() clientcmd.ClientConfig {
|
||||
return m.clientCmdConfig
|
||||
}
|
||||
|
||||
// ConfigurationContextsDefault returns the current context name
|
||||
// TODO: Should be moved to the Provider level ?
|
||||
func (k *Kubernetes) ConfigurationContextsDefault() (string, error) {
|
||||
if k.manager.IsInCluster() {
|
||||
return inClusterKubeConfigDefaultContext, nil
|
||||
}
|
||||
cfg, err := k.manager.clientCmdConfig.RawConfig()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return cfg.CurrentContext, nil
|
||||
}
|
||||
|
||||
// ConfigurationContextsList returns the list of available context names
|
||||
// TODO: Should be moved to the Provider level ?
|
||||
func (k *Kubernetes) ConfigurationContextsList() (map[string]string, error) {
|
||||
if k.manager.IsInCluster() {
|
||||
return map[string]string{inClusterKubeConfigDefaultContext: ""}, nil
|
||||
}
|
||||
cfg, err := k.manager.clientCmdConfig.RawConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
contexts := make(map[string]string, len(cfg.Contexts))
|
||||
for name, context := range cfg.Contexts {
|
||||
cluster, ok := cfg.Clusters[context.Cluster]
|
||||
if !ok || cluster.Server == "" {
|
||||
contexts[name] = "unknown"
|
||||
} else {
|
||||
contexts[name] = cluster.Server
|
||||
}
|
||||
}
|
||||
return contexts, nil
|
||||
}
|
||||
|
||||
// ConfigurationView returns the current kubeconfig content as a kubeconfig YAML
|
||||
// If minify is true, keeps only the current-context and the relevant pieces of the configuration for that context.
|
||||
// If minify is false, all contexts, clusters, auth-infos, and users are returned in the configuration.
|
||||
// TODO: Should be moved to the Provider level ?
|
||||
func (k *Kubernetes) ConfigurationView(minify bool) (runtime.Object, error) {
|
||||
var cfg clientcmdapi.Config
|
||||
var err error
|
||||
@@ -93,11 +134,11 @@ func (k *Kubernetes) ConfigurationView(minify bool) (runtime.Object, error) {
|
||||
cfg.AuthInfos["user"] = &clientcmdapi.AuthInfo{
|
||||
Token: k.manager.cfg.BearerToken,
|
||||
}
|
||||
cfg.Contexts["context"] = &clientcmdapi.Context{
|
||||
cfg.Contexts[inClusterKubeConfigDefaultContext] = &clientcmdapi.Context{
|
||||
Cluster: "cluster",
|
||||
AuthInfo: "user",
|
||||
}
|
||||
cfg.CurrentContext = "context"
|
||||
cfg.CurrentContext = inClusterKubeConfigDefaultContext
|
||||
} else if cfg, err = k.manager.clientCmdConfig.RawConfig(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
231
pkg/kubernetes/provider.go
Normal file
231
pkg/kubernetes/provider.go
Normal file
@@ -0,0 +1,231 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/config"
|
||||
"k8s.io/client-go/discovery/cached/memory"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/restmapper"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
const (
|
||||
KubeConfigTargetParameterName = "context"
|
||||
)
|
||||
|
||||
type ManagerProvider interface {
|
||||
GetTargets(ctx context.Context) ([]string, error)
|
||||
GetManagerFor(ctx context.Context, target string) (*Manager, error)
|
||||
GetDefaultTarget() string
|
||||
GetTargetParameterName() string
|
||||
WatchTargets(func() error)
|
||||
Close()
|
||||
}
|
||||
|
||||
type kubeConfigClusterProvider struct {
|
||||
defaultContext string
|
||||
managers map[string]*Manager
|
||||
}
|
||||
|
||||
var _ ManagerProvider = &kubeConfigClusterProvider{}
|
||||
|
||||
type inClusterProvider struct {
|
||||
manager *Manager
|
||||
}
|
||||
|
||||
var _ ManagerProvider = &inClusterProvider{}
|
||||
|
||||
func NewManagerProvider(cfg *config.StaticConfig) (ManagerProvider, error) {
|
||||
m, err := NewManager(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch resolveStrategy(cfg, m) {
|
||||
case config.ClusterProviderKubeConfig:
|
||||
return newKubeConfigClusterProvider(m)
|
||||
case config.ClusterProviderInCluster:
|
||||
return newInClusterProvider(m)
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid ClusterProviderStrategy '%s', must be 'kubeconfig' or 'in-cluster'", cfg.ClusterProviderStrategy)
|
||||
}
|
||||
}
|
||||
|
||||
func newKubeConfigClusterProvider(m *Manager) (*kubeConfigClusterProvider, error) {
|
||||
// Handle in-cluster mode
|
||||
if m.IsInCluster() {
|
||||
return nil, fmt.Errorf("kubeconfig ClusterProviderStrategy is invalid for in-cluster deployments")
|
||||
}
|
||||
|
||||
rawConfig, err := m.clientCmdConfig.RawConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allClusterManagers := map[string]*Manager{
|
||||
rawConfig.CurrentContext: m, // we already initialized a manager for the default context, let's use it
|
||||
}
|
||||
|
||||
for name := range rawConfig.Contexts {
|
||||
if name == rawConfig.CurrentContext {
|
||||
continue // already initialized this, don't want to set it to nil
|
||||
}
|
||||
|
||||
allClusterManagers[name] = nil
|
||||
}
|
||||
|
||||
return &kubeConfigClusterProvider{
|
||||
defaultContext: rawConfig.CurrentContext,
|
||||
managers: allClusterManagers,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newInClusterProvider(m *Manager) (*inClusterProvider, error) {
|
||||
return &inClusterProvider{
|
||||
manager: m,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (k *kubeConfigClusterProvider) GetTargets(ctx context.Context) ([]string, error) {
|
||||
contextNames := make([]string, 0, len(k.managers))
|
||||
for cluster := range k.managers {
|
||||
contextNames = append(contextNames, cluster)
|
||||
}
|
||||
|
||||
return contextNames, nil
|
||||
}
|
||||
|
||||
func (k *kubeConfigClusterProvider) GetTargetParameterName() string {
|
||||
return KubeConfigTargetParameterName
|
||||
}
|
||||
|
||||
func (k *kubeConfigClusterProvider) GetManagerFor(ctx context.Context, context string) (*Manager, error) {
|
||||
m, ok := k.managers[context]
|
||||
if ok && m != nil {
|
||||
return m, nil
|
||||
}
|
||||
|
||||
baseManager := k.managers[k.defaultContext]
|
||||
|
||||
if baseManager.IsInCluster() {
|
||||
// In cluster mode, so context switching is not applicable
|
||||
return baseManager, nil
|
||||
}
|
||||
|
||||
m, err := baseManager.newForContext(context)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
k.managers[context] = m
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (k *kubeConfigClusterProvider) GetDefaultTarget() string {
|
||||
return k.defaultContext
|
||||
}
|
||||
|
||||
func (k *kubeConfigClusterProvider) WatchTargets(onKubeConfigChanged func() error) {
|
||||
m := k.managers[k.defaultContext]
|
||||
|
||||
m.WatchKubeConfig(onKubeConfigChanged)
|
||||
}
|
||||
|
||||
func (k *kubeConfigClusterProvider) Close() {
|
||||
m := k.managers[k.defaultContext]
|
||||
|
||||
m.Close()
|
||||
}
|
||||
|
||||
func (i *inClusterProvider) GetTargets(ctx context.Context) ([]string, error) {
|
||||
return []string{""}, nil
|
||||
}
|
||||
|
||||
func (i *inClusterProvider) GetManagerFor(ctx context.Context, target string) (*Manager, error) {
|
||||
if target != "" {
|
||||
return nil, fmt.Errorf("unable to get manager for other context/cluster with in-cluster strategy")
|
||||
}
|
||||
|
||||
return i.manager, nil
|
||||
}
|
||||
|
||||
func (i *inClusterProvider) GetDefaultTarget() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (i *inClusterProvider) GetTargetParameterName() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (i *inClusterProvider) WatchTargets(watch func() error) {
|
||||
i.manager.WatchKubeConfig(watch)
|
||||
}
|
||||
|
||||
func (i *inClusterProvider) Close() {
|
||||
i.manager.Close()
|
||||
}
|
||||
|
||||
func (m *Manager) newForContext(context string) (*Manager, error) {
|
||||
pathOptions := clientcmd.NewDefaultPathOptions()
|
||||
if m.staticConfig.KubeConfig != "" {
|
||||
pathOptions.LoadingRules.ExplicitPath = m.staticConfig.KubeConfig
|
||||
}
|
||||
|
||||
clientCmdConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
|
||||
pathOptions.LoadingRules,
|
||||
&clientcmd.ConfigOverrides{
|
||||
CurrentContext: context,
|
||||
},
|
||||
)
|
||||
|
||||
cfg, err := clientCmdConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cfg.UserAgent == "" {
|
||||
cfg.UserAgent = rest.DefaultKubernetesUserAgent()
|
||||
}
|
||||
|
||||
manager := &Manager{
|
||||
cfg: cfg,
|
||||
clientCmdConfig: clientCmdConfig,
|
||||
staticConfig: m.staticConfig,
|
||||
}
|
||||
|
||||
// Initialize clients for new manager
|
||||
manager.accessControlClientSet, err = NewAccessControlClientset(manager.cfg, manager.staticConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
manager.discoveryClient = memory.NewMemCacheClient(manager.accessControlClientSet.DiscoveryClient())
|
||||
|
||||
manager.accessControlRESTMapper = NewAccessControlRESTMapper(
|
||||
restmapper.NewDeferredDiscoveryRESTMapper(manager.discoveryClient),
|
||||
manager.staticConfig,
|
||||
)
|
||||
|
||||
manager.dynamicClient, err = dynamic.NewForConfig(manager.cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return manager, nil
|
||||
}
|
||||
|
||||
func resolveStrategy(cfg *config.StaticConfig, m *Manager) string {
|
||||
if cfg.ClusterProviderStrategy != "" {
|
||||
return cfg.ClusterProviderStrategy
|
||||
}
|
||||
|
||||
if m.IsInCluster() {
|
||||
return config.ClusterProviderInCluster
|
||||
}
|
||||
|
||||
return config.ClusterProviderKubeConfig
|
||||
}
|
||||
@@ -219,7 +219,7 @@ func (c *mcpContext) withKubeConfig(rc *rest.Config) *clientcmdapi.Config {
|
||||
_ = clientcmd.WriteToFile(*fakeConfig, kubeConfig)
|
||||
_ = os.Setenv("KUBECONFIG", kubeConfig)
|
||||
if c.mcpServer != nil {
|
||||
if err := c.mcpServer.reloadKubernetesClient(); err != nil {
|
||||
if err := c.mcpServer.reloadKubernetesClusterProvider(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
package mcp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/mark3labs/mcp-go/mcp"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"k8s.io/client-go/rest"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
v1 "k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
@@ -22,7 +24,37 @@ func (s *ConfigurationSuite) SetupTest() {
|
||||
// Use mock server for predictable kubeconfig content
|
||||
mockServer := test.NewMockServer()
|
||||
s.T().Cleanup(mockServer.Close)
|
||||
s.Cfg.KubeConfig = mockServer.KubeconfigFile(s.T())
|
||||
kubeconfig := mockServer.Kubeconfig()
|
||||
for i := 0; i < 10; i++ {
|
||||
// Add multiple fake contexts to force configuration_contexts_list tool to appear
|
||||
// and test minification in configuration_view tool
|
||||
name := fmt.Sprintf("cluster-%d", i)
|
||||
kubeconfig.Contexts[name] = clientcmdapi.NewContext()
|
||||
kubeconfig.Clusters[name+"-cluster"] = clientcmdapi.NewCluster()
|
||||
kubeconfig.AuthInfos[name+"-auth"] = clientcmdapi.NewAuthInfo()
|
||||
kubeconfig.Contexts[name].Cluster = name + "-cluster"
|
||||
kubeconfig.Contexts[name].AuthInfo = name + "-auth"
|
||||
}
|
||||
s.Cfg.KubeConfig = test.KubeconfigFile(s.T(), kubeconfig)
|
||||
}
|
||||
|
||||
func (s *ConfigurationSuite) TestContextsList() {
|
||||
s.InitMcpClient()
|
||||
s.Run("configuration_contexts_list", func() {
|
||||
toolResult, err := s.CallTool("configuration_contexts_list", map[string]interface{}{})
|
||||
s.Run("returns contexts", func() {
|
||||
s.Nilf(err, "call tool failed %v", err)
|
||||
})
|
||||
s.Require().NotNil(toolResult, "Expected tool result from call")
|
||||
s.Lenf(toolResult.Content, 1, "invalid tool result content length %v", len(toolResult.Content))
|
||||
s.Run("contains context count", func() {
|
||||
s.Regexpf(`^Available Kubernetes contexts \(11 total`, toolResult.Content[0].(mcp.TextContent).Text, "invalid tool count result content %v", toolResult.Content[0].(mcp.TextContent).Text)
|
||||
})
|
||||
s.Run("contains default context name", func() {
|
||||
s.Regexpf(`^Available Kubernetes contexts \(\d+ total, default: fake-context\)`, toolResult.Content[0].(mcp.TextContent).Text, "invalid tool context default result content %v", toolResult.Content[0].(mcp.TextContent).Text)
|
||||
s.Regexpf(`(?m)^\*fake-context -> http:\/\/127\.0\.0\.1:\d*$`, toolResult.Content[0].(mcp.TextContent).Text, "invalid tool context default result content %v", toolResult.Content[0].(mcp.TextContent).Text)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ConfigurationSuite) TestConfigurationView() {
|
||||
@@ -70,19 +102,23 @@ func (s *ConfigurationSuite) TestConfigurationView() {
|
||||
s.Nilf(err, "invalid tool result content %v", err)
|
||||
})
|
||||
s.Run("returns additional context info", func() {
|
||||
s.Lenf(decoded.Contexts, 2, "invalid context count, expected 2, got %v", len(decoded.Contexts))
|
||||
s.Equalf("additional-context", decoded.Contexts[0].Name, "additional-context not found: %v", decoded.Contexts)
|
||||
s.Equalf("additional-cluster", decoded.Contexts[0].Context.Cluster, "additional-cluster not found: %v", decoded.Contexts)
|
||||
s.Equalf("additional-auth", decoded.Contexts[0].Context.AuthInfo, "additional-auth not found: %v", decoded.Contexts)
|
||||
s.Equalf("fake-context", decoded.Contexts[1].Name, "fake-context not found: %v", decoded.Contexts)
|
||||
s.Lenf(decoded.Contexts, 11, "invalid context count, expected 12, got %v", len(decoded.Contexts))
|
||||
s.Equalf("cluster-0", decoded.Contexts[0].Name, "cluster-0 not found: %v", decoded.Contexts)
|
||||
s.Equalf("cluster-0-cluster", decoded.Contexts[0].Context.Cluster, "cluster-0-cluster not found: %v", decoded.Contexts)
|
||||
s.Equalf("cluster-0-auth", decoded.Contexts[0].Context.AuthInfo, "cluster-0-auth not found: %v", decoded.Contexts)
|
||||
s.Equalf("fake", decoded.Contexts[10].Context.Cluster, "fake not found: %v", decoded.Contexts)
|
||||
s.Equalf("fake", decoded.Contexts[10].Context.AuthInfo, "fake not found: %v", decoded.Contexts)
|
||||
s.Equalf("fake-context", decoded.Contexts[10].Name, "fake-context not found: %v", decoded.Contexts)
|
||||
})
|
||||
s.Run("returns cluster info", func() {
|
||||
s.Lenf(decoded.Clusters, 2, "invalid cluster count, expected 2, got %v", len(decoded.Clusters))
|
||||
s.Equalf("additional-cluster", decoded.Clusters[0].Name, "additional-cluster not found: %v", decoded.Clusters)
|
||||
s.Lenf(decoded.Clusters, 11, "invalid cluster count, expected 2, got %v", len(decoded.Clusters))
|
||||
s.Equalf("cluster-0-cluster", decoded.Clusters[0].Name, "cluster-0-cluster not found: %v", decoded.Clusters)
|
||||
s.Equalf("fake", decoded.Clusters[10].Name, "fake not found: %v", decoded.Clusters)
|
||||
})
|
||||
s.Run("configuration_view with minified=false returns auth info", func() {
|
||||
s.Lenf(decoded.AuthInfos, 2, "invalid auth info count, expected 2, got %v", len(decoded.AuthInfos))
|
||||
s.Equalf("additional-auth", decoded.AuthInfos[0].Name, "additional-auth not found: %v", decoded.AuthInfos)
|
||||
s.Lenf(decoded.AuthInfos, 11, "invalid auth info count, expected 2, got %v", len(decoded.AuthInfos))
|
||||
s.Equalf("cluster-0-auth", decoded.AuthInfos[0].Name, "cluster-0-auth not found: %v", decoded.AuthInfos)
|
||||
s.Equalf("fake", decoded.AuthInfos[10].Name, "fake not found: %v", decoded.AuthInfos)
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -109,11 +145,11 @@ func (s *ConfigurationSuite) TestConfigurationViewInCluster() {
|
||||
s.Nilf(err, "invalid tool result content %v", err)
|
||||
})
|
||||
s.Run("returns current-context", func() {
|
||||
s.Equalf("context", decoded.CurrentContext, "context not found: %v", decoded.CurrentContext)
|
||||
s.Equalf("in-cluster", decoded.CurrentContext, "context not found: %v", decoded.CurrentContext)
|
||||
})
|
||||
s.Run("returns context info", func() {
|
||||
s.Lenf(decoded.Contexts, 1, "invalid context count, expected 1, got %v", len(decoded.Contexts))
|
||||
s.Equalf("context", decoded.Contexts[0].Name, "context not found: %v", decoded.Contexts)
|
||||
s.Equalf("in-cluster", decoded.Contexts[0].Name, "context not found: %v", decoded.Contexts)
|
||||
s.Equalf("cluster", decoded.Contexts[0].Context.Cluster, "cluster not found: %v", decoded.Contexts)
|
||||
s.Equalf("user", decoded.Contexts[0].Context.AuthInfo, "user not found: %v", decoded.Contexts)
|
||||
})
|
||||
|
||||
@@ -39,10 +39,19 @@ func ServerToolToM3LabsServerTool(s *Server, tools []api.ServerTool) ([]server.S
|
||||
m3labTool.RawInputSchema = schema
|
||||
}
|
||||
m3labHandler := func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
|
||||
k, err := s.k.Derived(ctx)
|
||||
// get the correct internalk8s.Manager for the target specified in the request
|
||||
cluster := request.GetString(s.p.GetTargetParameterName(), s.p.GetDefaultTarget())
|
||||
m, err := s.p.GetManagerFor(ctx, cluster)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// derive the manager based on auth on top of the settings for the cluster
|
||||
k, err := m.Derived(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result, err := tool.Handler(api.ToolHandlerParams{
|
||||
Context: ctx,
|
||||
Kubernetes: k,
|
||||
|
||||
@@ -67,7 +67,7 @@ type Server struct {
|
||||
configuration *Configuration
|
||||
server *server.MCPServer
|
||||
enabledTools []string
|
||||
k *internalk8s.Manager
|
||||
p internalk8s.ManagerProvider
|
||||
}
|
||||
|
||||
func NewServer(configuration Configuration) (*Server, error) {
|
||||
@@ -91,26 +91,57 @@ func NewServer(configuration Configuration) (*Server, error) {
|
||||
serverOptions...,
|
||||
),
|
||||
}
|
||||
if err := s.reloadKubernetesClient(); err != nil {
|
||||
if err := s.reloadKubernetesClusterProvider(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.k.WatchKubeConfig(s.reloadKubernetesClient)
|
||||
s.p.WatchTargets(s.reloadKubernetesClusterProvider)
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Server) reloadKubernetesClient() error {
|
||||
k, err := internalk8s.NewManager(s.configuration.StaticConfig)
|
||||
func (s *Server) reloadKubernetesClusterProvider() error {
|
||||
ctx := context.Background()
|
||||
p, err := internalk8s.NewManagerProvider(s.configuration.StaticConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.k = k
|
||||
|
||||
// close the old provider
|
||||
if s.p != nil {
|
||||
s.p.Close()
|
||||
}
|
||||
|
||||
s.p = p
|
||||
|
||||
k, err := s.p.GetManagerFor(ctx, s.p.GetDefaultTarget())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
targets, err := p.GetTargets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
filter := CompositeFilter(
|
||||
s.configuration.isToolApplicable,
|
||||
ShouldIncludeTargetListTool(p.GetTargetParameterName(), targets),
|
||||
)
|
||||
|
||||
mutator := WithTargetParameter(
|
||||
p.GetDefaultTarget(),
|
||||
p.GetTargetParameterName(),
|
||||
targets,
|
||||
)
|
||||
|
||||
applicableTools := make([]api.ServerTool, 0)
|
||||
for _, toolset := range s.configuration.Toolsets() {
|
||||
for _, tool := range toolset.GetTools(s.k) {
|
||||
if !s.configuration.isToolApplicable(tool) {
|
||||
for _, tool := range toolset.GetTools(k) {
|
||||
tool := mutator(tool)
|
||||
if !filter(tool) {
|
||||
continue
|
||||
}
|
||||
|
||||
applicableTools = append(applicableTools, tool)
|
||||
s.enabledTools = append(s.enabledTools, tool.Tool.Name)
|
||||
}
|
||||
@@ -119,7 +150,11 @@ func (s *Server) reloadKubernetesClient() error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert tools: %v", err)
|
||||
}
|
||||
|
||||
s.server.SetTools(m3labsServerTools...)
|
||||
|
||||
// start new watch
|
||||
s.p.WatchTargets(s.reloadKubernetesClusterProvider)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -146,20 +181,32 @@ func (s *Server) ServeHTTP(httpServer *http.Server) *server.StreamableHTTPServer
|
||||
}
|
||||
|
||||
// KubernetesApiVerifyToken verifies the given token with the audience by
|
||||
// sending an TokenReview request to API Server.
|
||||
func (s *Server) KubernetesApiVerifyToken(ctx context.Context, token string, audience string) (*authenticationapiv1.UserInfo, []string, error) {
|
||||
if s.k == nil {
|
||||
return nil, nil, fmt.Errorf("kubernetes manager is not initialized")
|
||||
// sending an TokenReview request to API Server for the specified cluster.
|
||||
func (s *Server) KubernetesApiVerifyToken(ctx context.Context, token string, audience string, cluster string) (*authenticationapiv1.UserInfo, []string, error) {
|
||||
if s.p == nil {
|
||||
return nil, nil, fmt.Errorf("kubernetes cluster provider is not initialized")
|
||||
}
|
||||
return s.k.VerifyToken(ctx, token, audience)
|
||||
|
||||
// Use provided cluster or default
|
||||
if cluster == "" {
|
||||
cluster = s.p.GetDefaultTarget()
|
||||
}
|
||||
|
||||
// Get the cluster manager for the specified cluster
|
||||
m, err := s.p.GetManagerFor(ctx, cluster)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return m.VerifyToken(ctx, token, audience)
|
||||
}
|
||||
|
||||
// GetKubernetesAPIServerHost returns the Kubernetes API server host from the configuration.
|
||||
func (s *Server) GetKubernetesAPIServerHost() string {
|
||||
if s.k == nil {
|
||||
return ""
|
||||
// GetTargetParameterName returns the parameter name used for target identification in MCP requests
|
||||
func (s *Server) GetTargetParameterName() string {
|
||||
if s.p == nil {
|
||||
return "" // fallback for uninitialized provider
|
||||
}
|
||||
return s.k.GetAPIServerHost()
|
||||
return s.p.GetTargetParameterName()
|
||||
}
|
||||
|
||||
func (s *Server) GetEnabledTools() []string {
|
||||
@@ -167,8 +214,8 @@ func (s *Server) GetEnabledTools() []string {
|
||||
}
|
||||
|
||||
func (s *Server) Close() {
|
||||
if s.k != nil {
|
||||
s.k.Close()
|
||||
if s.p != nil {
|
||||
s.p.Close()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
680
pkg/mcp/testdata/toolsets-full-tools-multicluster-enum.json
vendored
Normal file
680
pkg/mcp/testdata/toolsets-full-tools-multicluster-enum.json
vendored
Normal file
@@ -0,0 +1,680 @@
|
||||
[
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Configuration: Contexts List",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": true,
|
||||
"openWorldHint": false
|
||||
},
|
||||
"description": "List all available context names and associated server urls from the kubeconfig file",
|
||||
"inputSchema": {
|
||||
"type": "object"
|
||||
},
|
||||
"name": "configuration_contexts_list"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Configuration: View",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Get the current Kubernetes configuration content as a kubeconfig YAML",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"minified": {
|
||||
"description": "Return a minified version of the configuration. If set to true, keeps only the current-context and the relevant pieces of the configuration for that context. If set to false, all contexts, clusters, auth-infos, and users are returned in the configuration. (Optional, default true)",
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": "configuration_view"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Events: List",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List all the Kubernetes events in the current cluster from all namespaces",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Optional Namespace to retrieve the events from. If not provided, will list events from all namespaces",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": "events_list"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Helm: Install",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Install a Helm chart in the current or provided namespace",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"chart": {
|
||||
"description": "Chart reference to install (for example: stable/grafana, oci://ghcr.io/nginxinc/charts/nginx-ingress)",
|
||||
"type": "string"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Helm release (Optional, random name if not provided)",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to install the Helm chart in (Optional, current namespace if not provided)",
|
||||
"type": "string"
|
||||
},
|
||||
"values": {
|
||||
"description": "Values to pass to the Helm chart (Optional)",
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"chart"
|
||||
]
|
||||
},
|
||||
"name": "helm_install"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Helm: List",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List all the Helm releases in the current or provided namespace (or in all namespaces if specified)",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"all_namespaces": {
|
||||
"description": "If true, lists all Helm releases in all namespaces ignoring the namespace argument (Optional)",
|
||||
"type": "boolean"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to list Helm releases from (Optional, all namespaces if not provided)",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": "helm_list"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Helm: Uninstall",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": true,
|
||||
"idempotentHint": true,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Uninstall a Helm release in the current or provided namespace",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Helm release to uninstall",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to uninstall the Helm release from (Optional, current namespace if not provided)",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"name": "helm_uninstall"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Namespaces: List",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List all the Kubernetes namespaces in the current cluster",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": "namespaces_list"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: Delete",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": true,
|
||||
"idempotentHint": true,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Delete a Kubernetes Pod in the current or provided namespace with the provided name",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Pod to delete",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to delete the Pod from",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"name": "pods_delete"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: Exec",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": true,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Execute a command in a Kubernetes Pod in the current or provided namespace with the provided name and command",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"command": {
|
||||
"description": "Command to execute in the Pod container. The first item is the command to be run, and the rest are the arguments to that command. Example: [\"ls\", \"-l\", \"/tmp\"]",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"container": {
|
||||
"description": "Name of the Pod container where the command will be executed (Optional)",
|
||||
"type": "string"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Pod where the command will be executed",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace of the Pod where the command will be executed",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"name",
|
||||
"command"
|
||||
]
|
||||
},
|
||||
"name": "pods_exec"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: Get",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Get a Kubernetes Pod in the current or provided namespace with the provided name",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Pod",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to get the Pod from",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"name": "pods_get"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: List",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List all the Kubernetes pods in the current cluster from all namespaces",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"labelSelector": {
|
||||
"description": "Optional Kubernetes label selector (e.g. 'app=myapp,env=prod' or 'app in (myapp,yourapp)'), use this option when you want to filter the pods by label",
|
||||
"pattern": "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": "pods_list"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: List in Namespace",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List all the Kubernetes pods in the specified namespace in the current cluster",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"labelSelector": {
|
||||
"description": "Optional Kubernetes label selector (e.g. 'app=myapp,env=prod' or 'app in (myapp,yourapp)'), use this option when you want to filter the pods by label",
|
||||
"pattern": "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to list pods from",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"namespace"
|
||||
]
|
||||
},
|
||||
"name": "pods_list_in_namespace"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: Log",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Get the logs of a Kubernetes Pod in the current or provided namespace with the provided name",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"container": {
|
||||
"description": "Name of the Pod container to get the logs from (Optional)",
|
||||
"type": "string"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Pod to get the logs from",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to get the Pod logs from",
|
||||
"type": "string"
|
||||
},
|
||||
"previous": {
|
||||
"description": "Return previous terminated container logs (Optional)",
|
||||
"type": "boolean"
|
||||
},
|
||||
"tail": {
|
||||
"default": 100,
|
||||
"description": "Number of lines to retrieve from the end of the logs (Optional, default: 100)",
|
||||
"minimum": 0,
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"name": "pods_log"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: Run",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Run a Kubernetes Pod in the current or provided namespace with the provided container image and optional name",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"image": {
|
||||
"description": "Container Image to run in the Pod",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Pod (Optional, random name if not provided)",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to run the Pod in",
|
||||
"type": "string"
|
||||
},
|
||||
"port": {
|
||||
"description": "TCP/IP port to expose from the Pod container (Optional, no port exposed if not provided)",
|
||||
"type": "number"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"image"
|
||||
]
|
||||
},
|
||||
"name": "pods_run"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: Top",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": true,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List the resource consumption (CPU and memory) as recorded by the Kubernetes Metrics Server for the specified Kubernetes Pods in the all namespaces, the provided namespace, or the current namespace",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"all_namespaces": {
|
||||
"default": true,
|
||||
"description": "If true, list the resource consumption for all Pods in all namespaces. If false, list the resource consumption for Pods in the provided namespace or the current namespace",
|
||||
"type": "boolean"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"label_selector": {
|
||||
"description": "Kubernetes label selector (e.g. 'app=myapp,env=prod' or 'app in (myapp,yourapp)'), use this option when you want to filter the pods by label (Optional, only applicable when name is not provided)",
|
||||
"pattern": "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Pod to get the resource consumption from (Optional, all Pods in the namespace if not provided)",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to get the Pods resource consumption from (Optional, current namespace if not provided and all_namespaces is false)",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": "pods_top"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Resources: Create or Update",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": true,
|
||||
"idempotentHint": true,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Create or update a Kubernetes resource in the current cluster by providing a YAML or JSON representation of the resource\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"resource": {
|
||||
"description": "A JSON or YAML containing a representation of the Kubernetes resource. Should include top-level fields such as apiVersion,kind,metadata, and spec",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"resource"
|
||||
]
|
||||
},
|
||||
"name": "resources_create_or_update"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Resources: Delete",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": true,
|
||||
"idempotentHint": true,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Delete a Kubernetes resource in the current cluster by providing its apiVersion, kind, optionally the namespace, and its name\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"apiVersion": {
|
||||
"description": "apiVersion of the resource (examples of valid apiVersion are: v1, apps/v1, networking.k8s.io/v1)",
|
||||
"type": "string"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"kind": {
|
||||
"description": "kind of the resource (examples of valid kind are: Pod, Service, Deployment, Ingress)",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the resource",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Optional Namespace to delete the namespaced resource from (ignored in case of cluster scoped resources). If not provided, will delete resource from configured namespace",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"apiVersion",
|
||||
"kind",
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"name": "resources_delete"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Resources: Get",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Get a Kubernetes resource in the current cluster by providing its apiVersion, kind, optionally the namespace, and its name\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"apiVersion": {
|
||||
"description": "apiVersion of the resource (examples of valid apiVersion are: v1, apps/v1, networking.k8s.io/v1)",
|
||||
"type": "string"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"kind": {
|
||||
"description": "kind of the resource (examples of valid kind are: Pod, Service, Deployment, Ingress)",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the resource",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Optional Namespace to retrieve the namespaced resource from (ignored in case of cluster scoped resources). If not provided, will get resource from configured namespace",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"apiVersion",
|
||||
"kind",
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"name": "resources_get"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Resources: List",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List Kubernetes resources and objects in the current cluster by providing their apiVersion and kind and optionally the namespace and label selector\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"apiVersion": {
|
||||
"description": "apiVersion of the resources (examples of valid apiVersion are: v1, apps/v1, networking.k8s.io/v1)",
|
||||
"type": "string"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"kind": {
|
||||
"description": "kind of the resources (examples of valid kind are: Pod, Service, Deployment, Ingress)",
|
||||
"type": "string"
|
||||
},
|
||||
"labelSelector": {
|
||||
"description": "Optional Kubernetes label selector (e.g. 'app=myapp,env=prod' or 'app in (myapp,yourapp)'), use this option when you want to filter the pods by label",
|
||||
"pattern": "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Optional Namespace to retrieve the namespaced resources from (ignored in case of cluster scoped resources). If not provided, will list resources from all namespaces",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"apiVersion",
|
||||
"kind"
|
||||
]
|
||||
},
|
||||
"name": "resources_list"
|
||||
}
|
||||
]
|
||||
612
pkg/mcp/testdata/toolsets-full-tools-multicluster.json
vendored
Normal file
612
pkg/mcp/testdata/toolsets-full-tools-multicluster.json
vendored
Normal file
@@ -0,0 +1,612 @@
|
||||
[
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Configuration: Contexts List",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": true,
|
||||
"openWorldHint": false
|
||||
},
|
||||
"description": "List all available context names and associated server urls from the kubeconfig file",
|
||||
"inputSchema": {
|
||||
"type": "object"
|
||||
},
|
||||
"name": "configuration_contexts_list"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Configuration: View",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Get the current Kubernetes configuration content as a kubeconfig YAML",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"minified": {
|
||||
"description": "Return a minified version of the configuration. If set to true, keeps only the current-context and the relevant pieces of the configuration for that context. If set to false, all contexts, clusters, auth-infos, and users are returned in the configuration. (Optional, default true)",
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": "configuration_view"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Events: List",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List all the Kubernetes events in the current cluster from all namespaces",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Optional Namespace to retrieve the events from. If not provided, will list events from all namespaces",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": "events_list"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Helm: Install",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Install a Helm chart in the current or provided namespace",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"chart": {
|
||||
"description": "Chart reference to install (for example: stable/grafana, oci://ghcr.io/nginxinc/charts/nginx-ingress)",
|
||||
"type": "string"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Helm release (Optional, random name if not provided)",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to install the Helm chart in (Optional, current namespace if not provided)",
|
||||
"type": "string"
|
||||
},
|
||||
"values": {
|
||||
"description": "Values to pass to the Helm chart (Optional)",
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"chart"
|
||||
]
|
||||
},
|
||||
"name": "helm_install"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Helm: List",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List all the Helm releases in the current or provided namespace (or in all namespaces if specified)",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"all_namespaces": {
|
||||
"description": "If true, lists all Helm releases in all namespaces ignoring the namespace argument (Optional)",
|
||||
"type": "boolean"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to list Helm releases from (Optional, all namespaces if not provided)",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": "helm_list"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Helm: Uninstall",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": true,
|
||||
"idempotentHint": true,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Uninstall a Helm release in the current or provided namespace",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Helm release to uninstall",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to uninstall the Helm release from (Optional, current namespace if not provided)",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"name": "helm_uninstall"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Namespaces: List",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List all the Kubernetes namespaces in the current cluster",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": "namespaces_list"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: Delete",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": true,
|
||||
"idempotentHint": true,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Delete a Kubernetes Pod in the current or provided namespace with the provided name",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Pod to delete",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to delete the Pod from",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"name": "pods_delete"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: Exec",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": true,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Execute a command in a Kubernetes Pod in the current or provided namespace with the provided name and command",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"command": {
|
||||
"description": "Command to execute in the Pod container. The first item is the command to be run, and the rest are the arguments to that command. Example: [\"ls\", \"-l\", \"/tmp\"]",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"container": {
|
||||
"description": "Name of the Pod container where the command will be executed (Optional)",
|
||||
"type": "string"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Pod where the command will be executed",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace of the Pod where the command will be executed",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"name",
|
||||
"command"
|
||||
]
|
||||
},
|
||||
"name": "pods_exec"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: Get",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Get a Kubernetes Pod in the current or provided namespace with the provided name",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Pod",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to get the Pod from",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"name": "pods_get"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: List",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List all the Kubernetes pods in the current cluster from all namespaces",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"labelSelector": {
|
||||
"description": "Optional Kubernetes label selector (e.g. 'app=myapp,env=prod' or 'app in (myapp,yourapp)'), use this option when you want to filter the pods by label",
|
||||
"pattern": "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": "pods_list"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: List in Namespace",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List all the Kubernetes pods in the specified namespace in the current cluster",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"labelSelector": {
|
||||
"description": "Optional Kubernetes label selector (e.g. 'app=myapp,env=prod' or 'app in (myapp,yourapp)'), use this option when you want to filter the pods by label",
|
||||
"pattern": "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to list pods from",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"namespace"
|
||||
]
|
||||
},
|
||||
"name": "pods_list_in_namespace"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: Log",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Get the logs of a Kubernetes Pod in the current or provided namespace with the provided name",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"container": {
|
||||
"description": "Name of the Pod container to get the logs from (Optional)",
|
||||
"type": "string"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Pod to get the logs from",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to get the Pod logs from",
|
||||
"type": "string"
|
||||
},
|
||||
"previous": {
|
||||
"description": "Return previous terminated container logs (Optional)",
|
||||
"type": "boolean"
|
||||
},
|
||||
"tail": {
|
||||
"default": 100,
|
||||
"description": "Number of lines to retrieve from the end of the logs (Optional, default: 100)",
|
||||
"minimum": 0,
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"name": "pods_log"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: Run",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Run a Kubernetes Pod in the current or provided namespace with the provided container image and optional name",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"image": {
|
||||
"description": "Container Image to run in the Pod",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Pod (Optional, random name if not provided)",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to run the Pod in",
|
||||
"type": "string"
|
||||
},
|
||||
"port": {
|
||||
"description": "TCP/IP port to expose from the Pod container (Optional, no port exposed if not provided)",
|
||||
"type": "number"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"image"
|
||||
]
|
||||
},
|
||||
"name": "pods_run"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: Top",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": true,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List the resource consumption (CPU and memory) as recorded by the Kubernetes Metrics Server for the specified Kubernetes Pods in the all namespaces, the provided namespace, or the current namespace",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"all_namespaces": {
|
||||
"default": true,
|
||||
"description": "If true, list the resource consumption for all Pods in all namespaces. If false, list the resource consumption for Pods in the provided namespace or the current namespace",
|
||||
"type": "boolean"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"label_selector": {
|
||||
"description": "Kubernetes label selector (e.g. 'app=myapp,env=prod' or 'app in (myapp,yourapp)'), use this option when you want to filter the pods by label (Optional, only applicable when name is not provided)",
|
||||
"pattern": "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Pod to get the resource consumption from (Optional, all Pods in the namespace if not provided)",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to get the Pods resource consumption from (Optional, current namespace if not provided and all_namespaces is false)",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": "pods_top"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Resources: Create or Update",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": true,
|
||||
"idempotentHint": true,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Create or update a Kubernetes resource in the current cluster by providing a YAML or JSON representation of the resource\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"resource": {
|
||||
"description": "A JSON or YAML containing a representation of the Kubernetes resource. Should include top-level fields such as apiVersion,kind,metadata, and spec",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"resource"
|
||||
]
|
||||
},
|
||||
"name": "resources_create_or_update"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Resources: Delete",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": true,
|
||||
"idempotentHint": true,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Delete a Kubernetes resource in the current cluster by providing its apiVersion, kind, optionally the namespace, and its name\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"apiVersion": {
|
||||
"description": "apiVersion of the resource (examples of valid apiVersion are: v1, apps/v1, networking.k8s.io/v1)",
|
||||
"type": "string"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"kind": {
|
||||
"description": "kind of the resource (examples of valid kind are: Pod, Service, Deployment, Ingress)",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the resource",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Optional Namespace to delete the namespaced resource from (ignored in case of cluster scoped resources). If not provided, will delete resource from configured namespace",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"apiVersion",
|
||||
"kind",
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"name": "resources_delete"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Resources: Get",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Get a Kubernetes resource in the current cluster by providing its apiVersion, kind, optionally the namespace, and its name\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"apiVersion": {
|
||||
"description": "apiVersion of the resource (examples of valid apiVersion are: v1, apps/v1, networking.k8s.io/v1)",
|
||||
"type": "string"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"kind": {
|
||||
"description": "kind of the resource (examples of valid kind are: Pod, Service, Deployment, Ingress)",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the resource",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Optional Namespace to retrieve the namespaced resource from (ignored in case of cluster scoped resources). If not provided, will get resource from configured namespace",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"apiVersion",
|
||||
"kind",
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"name": "resources_get"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Resources: List",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List Kubernetes resources and objects in the current cluster by providing their apiVersion and kind and optionally the namespace and label selector\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"apiVersion": {
|
||||
"description": "apiVersion of the resources (examples of valid apiVersion are: v1, apps/v1, networking.k8s.io/v1)",
|
||||
"type": "string"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"kind": {
|
||||
"description": "kind of the resources (examples of valid kind are: Pod, Service, Deployment, Ingress)",
|
||||
"type": "string"
|
||||
},
|
||||
"labelSelector": {
|
||||
"description": "Optional Kubernetes label selector (e.g. 'app=myapp,env=prod' or 'app in (myapp,yourapp)'), use this option when you want to filter the pods by label",
|
||||
"pattern": "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Optional Namespace to retrieve the namespaced resources from (ignored in case of cluster scoped resources). If not provided, will list resources from all namespaces",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"apiVersion",
|
||||
"kind"
|
||||
]
|
||||
},
|
||||
"name": "resources_list"
|
||||
}
|
||||
]
|
||||
41
pkg/mcp/tool_filter.go
Normal file
41
pkg/mcp/tool_filter.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package mcp
|
||||
|
||||
import (
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/api"
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/kubernetes"
|
||||
)
|
||||
|
||||
// ToolFilter is a function that takes a ServerTool and returns a boolean indicating whether to include the tool
|
||||
type ToolFilter func(tool api.ServerTool) bool
|
||||
|
||||
func CompositeFilter(filters ...ToolFilter) ToolFilter {
|
||||
return func(tool api.ServerTool) bool {
|
||||
for _, f := range filters {
|
||||
if !f(tool) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func ShouldIncludeTargetListTool(targetName string, targets []string) ToolFilter {
|
||||
return func(tool api.ServerTool) bool {
|
||||
if !tool.IsTargetListProvider() {
|
||||
return true
|
||||
}
|
||||
if len(targets) <= 1 {
|
||||
// there is no need to provide a tool to list the single available target
|
||||
return false
|
||||
}
|
||||
|
||||
// TODO: this check should be removed or make more generic when we have other
|
||||
if tool.Tool.Name == "configuration_contexts_list" && targetName != kubernetes.KubeConfigTargetParameterName {
|
||||
// let's not include configuration_contexts_list if we aren't targeting contexts in our ManagerProvider
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
84
pkg/mcp/tool_filter_test.go
Normal file
84
pkg/mcp/tool_filter_test.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package mcp
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/api"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
type ToolFilterSuite struct {
|
||||
suite.Suite
|
||||
}
|
||||
|
||||
func (s *ToolFilterSuite) TestToolFilterType() {
|
||||
s.Run("ToolFilter type can be used as function", func() {
|
||||
var mutator ToolFilter = func(tool api.ServerTool) bool {
|
||||
return tool.Tool.Name == "included"
|
||||
}
|
||||
s.Run("returns true for included tool", func() {
|
||||
tool := api.ServerTool{Tool: api.Tool{Name: "included"}}
|
||||
s.True(mutator(tool))
|
||||
})
|
||||
s.Run("returns false for excluded tool", func() {
|
||||
tool := api.ServerTool{Tool: api.Tool{Name: "excluded"}}
|
||||
s.False(mutator(tool))
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ToolFilterSuite) TestCompositeFilter() {
|
||||
s.Run("returns true if all filters return true", func() {
|
||||
filter := CompositeFilter(
|
||||
func(tool api.ServerTool) bool { return true },
|
||||
func(tool api.ServerTool) bool { return true },
|
||||
)
|
||||
tool := api.ServerTool{Tool: api.Tool{Name: "test"}}
|
||||
s.True(filter(tool))
|
||||
})
|
||||
s.Run("returns false if any filter returns false", func() {
|
||||
filter := CompositeFilter(
|
||||
func(tool api.ServerTool) bool { return true },
|
||||
func(tool api.ServerTool) bool { return false },
|
||||
)
|
||||
tool := api.ServerTool{Tool: api.Tool{Name: "test"}}
|
||||
s.False(filter(tool))
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ToolFilterSuite) TestShouldIncludeTargetListTool() {
|
||||
s.Run("non-target-list-provider tools: returns true ", func() {
|
||||
filter := ShouldIncludeTargetListTool("any", []string{"a", "b", "c", "d", "e", "f"})
|
||||
tool := api.ServerTool{Tool: api.Tool{Name: "test"}, TargetListProvider: ptr.To(false)}
|
||||
s.True(filter(tool))
|
||||
})
|
||||
s.Run("target-list-provider tools", func() {
|
||||
s.Run("with targets == 1: returns false", func() {
|
||||
filter := ShouldIncludeTargetListTool("any", []string{"1"})
|
||||
tool := api.ServerTool{Tool: api.Tool{Name: "test"}, TargetListProvider: ptr.To(true)}
|
||||
s.False(filter(tool))
|
||||
})
|
||||
s.Run("with targets == 1", func() {
|
||||
s.Run("and tool is configuration_contexts_list and targetName is not context: returns false", func() {
|
||||
filter := ShouldIncludeTargetListTool("not_context", []string{"1"})
|
||||
tool := api.ServerTool{Tool: api.Tool{Name: "configuration_contexts_list"}, TargetListProvider: ptr.To(true)}
|
||||
s.False(filter(tool))
|
||||
})
|
||||
s.Run("and tool is configuration_contexts_list and targetName is context: returns false", func() {
|
||||
filter := ShouldIncludeTargetListTool("context", []string{"1"})
|
||||
tool := api.ServerTool{Tool: api.Tool{Name: "configuration_contexts_list"}, TargetListProvider: ptr.To(true)}
|
||||
s.False(filter(tool))
|
||||
})
|
||||
s.Run("and tool is not configuration_contexts_list: returns false", func() {
|
||||
filter := ShouldIncludeTargetListTool("any", []string{"1"})
|
||||
tool := api.ServerTool{Tool: api.Tool{Name: "other_tool"}, TargetListProvider: ptr.To(true)}
|
||||
s.False(filter(tool))
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestToolFilter(t *testing.T) {
|
||||
suite.Run(t, new(ToolFilterSuite))
|
||||
}
|
||||
64
pkg/mcp/tool_mutator.go
Normal file
64
pkg/mcp/tool_mutator.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package mcp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/api"
|
||||
"github.com/google/jsonschema-go/jsonschema"
|
||||
)
|
||||
|
||||
type ToolMutator func(tool api.ServerTool) api.ServerTool
|
||||
|
||||
const maxTargetsInEnum = 5 // TODO: test and validate that this is a reasonable cutoff
|
||||
|
||||
// WithTargetParameter adds a target selection parameter to the tool's input schema if the tool is cluster-aware
|
||||
func WithTargetParameter(defaultCluster, targetParameterName string, targets []string) ToolMutator {
|
||||
return func(tool api.ServerTool) api.ServerTool {
|
||||
if !tool.IsClusterAware() {
|
||||
return tool
|
||||
}
|
||||
|
||||
if tool.Tool.InputSchema == nil {
|
||||
tool.Tool.InputSchema = &jsonschema.Schema{Type: "object"}
|
||||
}
|
||||
|
||||
if tool.Tool.InputSchema.Properties == nil {
|
||||
tool.Tool.InputSchema.Properties = make(map[string]*jsonschema.Schema)
|
||||
}
|
||||
|
||||
if len(targets) > 1 {
|
||||
tool.Tool.InputSchema.Properties[targetParameterName] = createTargetProperty(
|
||||
defaultCluster,
|
||||
targetParameterName,
|
||||
targets,
|
||||
)
|
||||
}
|
||||
|
||||
return tool
|
||||
}
|
||||
}
|
||||
|
||||
func createTargetProperty(defaultCluster, targetName string, targets []string) *jsonschema.Schema {
|
||||
baseSchema := &jsonschema.Schema{
|
||||
Type: "string",
|
||||
Description: fmt.Sprintf(
|
||||
"Optional parameter selecting which %s to run the tool in. Defaults to %s if not set",
|
||||
targetName,
|
||||
defaultCluster,
|
||||
),
|
||||
}
|
||||
|
||||
if len(targets) <= maxTargetsInEnum {
|
||||
// Sort clusters to ensure consistent enum ordering
|
||||
sort.Strings(targets)
|
||||
|
||||
enumValues := make([]any, 0, len(targets))
|
||||
for _, c := range targets {
|
||||
enumValues = append(enumValues, c)
|
||||
}
|
||||
baseSchema.Enum = enumValues
|
||||
}
|
||||
|
||||
return baseSchema
|
||||
}
|
||||
347
pkg/mcp/tool_mutator_test.go
Normal file
347
pkg/mcp/tool_mutator_test.go
Normal file
@@ -0,0 +1,347 @@
|
||||
package mcp
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/api"
|
||||
"github.com/google/jsonschema-go/jsonschema"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
// createTestTool creates a basic ServerTool for testing
|
||||
func createTestTool(name string) api.ServerTool {
|
||||
return api.ServerTool{
|
||||
Tool: api.Tool{
|
||||
Name: name,
|
||||
Description: "A test tool",
|
||||
InputSchema: &jsonschema.Schema{
|
||||
Type: "object",
|
||||
Properties: make(map[string]*jsonschema.Schema),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// createTestToolWithNilSchema creates a ServerTool with nil InputSchema for testing
|
||||
func createTestToolWithNilSchema(name string) api.ServerTool {
|
||||
return api.ServerTool{
|
||||
Tool: api.Tool{
|
||||
Name: name,
|
||||
Description: "A test tool",
|
||||
InputSchema: nil,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// createTestToolWithNilProperties creates a ServerTool with nil Properties for testing
|
||||
func createTestToolWithNilProperties(name string) api.ServerTool {
|
||||
return api.ServerTool{
|
||||
Tool: api.Tool{
|
||||
Name: name,
|
||||
Description: "A test tool",
|
||||
InputSchema: &jsonschema.Schema{
|
||||
Type: "object",
|
||||
Properties: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// createTestToolWithExistingProperties creates a ServerTool with existing properties for testing
|
||||
func createTestToolWithExistingProperties(name string) api.ServerTool {
|
||||
return api.ServerTool{
|
||||
Tool: api.Tool{
|
||||
Name: name,
|
||||
Description: "A test tool",
|
||||
InputSchema: &jsonschema.Schema{
|
||||
Type: "object",
|
||||
Properties: map[string]*jsonschema.Schema{
|
||||
"existing-prop": {Type: "string"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestWithClusterParameter(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
defaultCluster string
|
||||
targetParameterName string
|
||||
clusters []string
|
||||
toolName string
|
||||
toolFactory func(string) api.ServerTool
|
||||
expectCluster bool
|
||||
expectEnum bool
|
||||
enumCount int
|
||||
}{
|
||||
{
|
||||
name: "adds cluster parameter when multiple clusters provided",
|
||||
defaultCluster: "default-cluster",
|
||||
clusters: []string{"cluster1", "cluster2", "cluster3"},
|
||||
toolName: "test-tool",
|
||||
toolFactory: createTestTool,
|
||||
expectCluster: true,
|
||||
expectEnum: true,
|
||||
enumCount: 3,
|
||||
},
|
||||
{
|
||||
name: "does not add cluster parameter when single cluster provided",
|
||||
defaultCluster: "default-cluster",
|
||||
clusters: []string{"single-cluster"},
|
||||
toolName: "test-tool",
|
||||
toolFactory: createTestTool,
|
||||
expectCluster: false,
|
||||
expectEnum: false,
|
||||
enumCount: 0,
|
||||
},
|
||||
{
|
||||
name: "creates InputSchema when nil",
|
||||
defaultCluster: "default-cluster",
|
||||
clusters: []string{"cluster1", "cluster2"},
|
||||
toolName: "test-tool",
|
||||
toolFactory: createTestToolWithNilSchema,
|
||||
expectCluster: true,
|
||||
expectEnum: true,
|
||||
enumCount: 2,
|
||||
},
|
||||
{
|
||||
name: "creates Properties map when nil",
|
||||
defaultCluster: "default-cluster",
|
||||
clusters: []string{"cluster1", "cluster2"},
|
||||
toolName: "test-tool",
|
||||
toolFactory: createTestToolWithNilProperties,
|
||||
expectCluster: true,
|
||||
expectEnum: true,
|
||||
enumCount: 2,
|
||||
},
|
||||
{
|
||||
name: "preserves existing properties",
|
||||
defaultCluster: "default-cluster",
|
||||
clusters: []string{"cluster1", "cluster2"},
|
||||
toolName: "test-tool",
|
||||
toolFactory: createTestToolWithExistingProperties,
|
||||
expectCluster: true,
|
||||
expectEnum: true,
|
||||
enumCount: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.targetParameterName == "" {
|
||||
tt.targetParameterName = "cluster"
|
||||
}
|
||||
mutator := WithTargetParameter(tt.defaultCluster, tt.targetParameterName, tt.clusters)
|
||||
tool := tt.toolFactory(tt.toolName)
|
||||
originalTool := tool // Keep reference to check if tool was unchanged
|
||||
|
||||
result := mutator(tool)
|
||||
|
||||
if !tt.expectCluster {
|
||||
if tt.toolName == "skip-this-tool" {
|
||||
// For skipped tools, the entire tool should be unchanged
|
||||
assert.Equal(t, originalTool, result)
|
||||
} else {
|
||||
// For single cluster, schema should exist but no cluster property
|
||||
require.NotNil(t, result.Tool.InputSchema)
|
||||
require.NotNil(t, result.Tool.InputSchema.Properties)
|
||||
_, exists := result.Tool.InputSchema.Properties["cluster"]
|
||||
assert.False(t, exists, "cluster property should not exist")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Common assertions for cases where cluster parameter should be added
|
||||
require.NotNil(t, result.Tool.InputSchema)
|
||||
assert.Equal(t, "object", result.Tool.InputSchema.Type)
|
||||
require.NotNil(t, result.Tool.InputSchema.Properties)
|
||||
|
||||
clusterProperty, exists := result.Tool.InputSchema.Properties["cluster"]
|
||||
assert.True(t, exists, "cluster property should exist")
|
||||
assert.NotNil(t, clusterProperty)
|
||||
assert.Equal(t, "string", clusterProperty.Type)
|
||||
assert.Contains(t, clusterProperty.Description, tt.defaultCluster)
|
||||
|
||||
if tt.expectEnum {
|
||||
assert.NotNil(t, clusterProperty.Enum)
|
||||
assert.Equal(t, tt.enumCount, len(clusterProperty.Enum))
|
||||
for _, cluster := range tt.clusters {
|
||||
assert.Contains(t, clusterProperty.Enum, cluster)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateClusterProperty(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
defaultCluster string
|
||||
targetName string
|
||||
clusters []string
|
||||
expectEnum bool
|
||||
expectedCount int
|
||||
}{
|
||||
{
|
||||
name: "creates property with enum when clusters <= maxClustersInEnum",
|
||||
defaultCluster: "default",
|
||||
targetName: "cluster",
|
||||
clusters: []string{"cluster1", "cluster2", "cluster3"},
|
||||
expectEnum: true,
|
||||
expectedCount: 3,
|
||||
},
|
||||
{
|
||||
name: "creates property without enum when clusters > maxClustersInEnum",
|
||||
defaultCluster: "default",
|
||||
targetName: "cluster",
|
||||
clusters: make([]string, maxTargetsInEnum+5), // 20 clusters
|
||||
expectEnum: false,
|
||||
expectedCount: 0,
|
||||
},
|
||||
{
|
||||
name: "creates property with exact maxClustersInEnum clusters",
|
||||
defaultCluster: "default",
|
||||
targetName: "cluster",
|
||||
clusters: make([]string, maxTargetsInEnum),
|
||||
expectEnum: true,
|
||||
expectedCount: maxTargetsInEnum,
|
||||
},
|
||||
{
|
||||
name: "handles single cluster",
|
||||
defaultCluster: "default",
|
||||
targetName: "cluster",
|
||||
clusters: []string{"single-cluster"},
|
||||
expectEnum: true,
|
||||
expectedCount: 1,
|
||||
},
|
||||
{
|
||||
name: "handles empty clusters list",
|
||||
defaultCluster: "default",
|
||||
targetName: "cluster",
|
||||
clusters: []string{},
|
||||
expectEnum: true,
|
||||
expectedCount: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Initialize clusters with names if they were created with make()
|
||||
if len(tt.clusters) > 3 && tt.clusters[0] == "" {
|
||||
for i := range tt.clusters {
|
||||
tt.clusters[i] = "cluster" + string(rune('A'+i))
|
||||
}
|
||||
}
|
||||
|
||||
property := createTargetProperty(tt.defaultCluster, tt.targetName, tt.clusters)
|
||||
|
||||
assert.Equal(t, "string", property.Type)
|
||||
assert.Contains(t, property.Description, tt.defaultCluster)
|
||||
assert.Contains(t, property.Description, "Defaults to "+tt.defaultCluster+" if not set")
|
||||
|
||||
if tt.expectEnum {
|
||||
assert.NotNil(t, property.Enum, "enum should be created")
|
||||
assert.Equal(t, tt.expectedCount, len(property.Enum))
|
||||
if tt.expectedCount > 0 && tt.expectedCount <= 3 {
|
||||
// Only check specific values for small, predefined lists
|
||||
for _, cluster := range tt.clusters {
|
||||
assert.Contains(t, property.Enum, cluster)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
assert.Nil(t, property.Enum, "enum should not be created for too many clusters")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestToolMutatorType(t *testing.T) {
|
||||
t.Run("ToolMutator type can be used as function", func(t *testing.T) {
|
||||
var mutator ToolMutator = func(tool api.ServerTool) api.ServerTool {
|
||||
tool.Tool.Name = "modified-" + tool.Tool.Name
|
||||
return tool
|
||||
}
|
||||
|
||||
originalTool := createTestTool("original")
|
||||
result := mutator(originalTool)
|
||||
assert.Equal(t, "modified-original", result.Tool.Name)
|
||||
})
|
||||
}
|
||||
|
||||
func TestMaxClustersInEnumConstant(t *testing.T) {
|
||||
t.Run("maxClustersInEnum has expected value", func(t *testing.T) {
|
||||
assert.Equal(t, 5, maxTargetsInEnum, "maxClustersInEnum should be 5")
|
||||
})
|
||||
}
|
||||
|
||||
type TargetParameterToolMutatorSuite struct {
|
||||
suite.Suite
|
||||
}
|
||||
|
||||
func (s *TargetParameterToolMutatorSuite) TestClusterAwareTool() {
|
||||
tm := WithTargetParameter("default-cluster", "cluster", []string{"cluster-1", "cluster-2", "cluster-3"})
|
||||
tool := createTestTool("cluster-aware-tool")
|
||||
// Tools are cluster-aware by default
|
||||
tm(tool)
|
||||
s.Require().NotNil(tool.Tool.InputSchema.Properties)
|
||||
s.Run("adds cluster parameter", func() {
|
||||
s.NotNil(tool.Tool.InputSchema.Properties["cluster"], "Expected cluster property to be added")
|
||||
})
|
||||
s.Run("adds correct description", func() {
|
||||
desc := tool.Tool.InputSchema.Properties["cluster"].Description
|
||||
s.Contains(desc, "Optional parameter selecting which cluster to run the tool in", "Expected description to mention cluster selection")
|
||||
s.Contains(desc, "Defaults to default-cluster if not set", "Expected description to mention default cluster")
|
||||
})
|
||||
s.Run("adds enum with clusters", func() {
|
||||
s.Require().NotNil(tool.Tool.InputSchema.Properties["cluster"])
|
||||
enum := tool.Tool.InputSchema.Properties["cluster"].Enum
|
||||
s.NotNilf(enum, "Expected enum to be set")
|
||||
s.Equal(3, len(enum), "Expected enum to have 3 entries")
|
||||
s.Contains(enum, "cluster-1", "Expected enum to contain cluster-1")
|
||||
s.Contains(enum, "cluster-2", "Expected enum to contain cluster-2")
|
||||
s.Contains(enum, "cluster-3", "Expected enum to contain cluster-3")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *TargetParameterToolMutatorSuite) TestClusterAwareToolSingleCluster() {
|
||||
tm := WithTargetParameter("default", "cluster", []string{"only-cluster"})
|
||||
tool := createTestTool("cluster-aware-tool-single-cluster")
|
||||
// Tools are cluster-aware by default
|
||||
tm(tool)
|
||||
s.Run("does not add cluster parameter for single cluster", func() {
|
||||
s.Nilf(tool.Tool.InputSchema.Properties["cluster"], "Expected cluster property to not be added for single cluster")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *TargetParameterToolMutatorSuite) TestClusterAwareToolMultipleClusters() {
|
||||
tm := WithTargetParameter("default", "cluster", []string{"cluster-1", "cluster-2", "cluster-3", "cluster-4", "cluster-5", "cluster-6"})
|
||||
tool := createTestTool("cluster-aware-tool-multiple-clusters")
|
||||
// Tools are cluster-aware by default
|
||||
tm(tool)
|
||||
s.Run("adds cluster parameter", func() {
|
||||
s.NotNilf(tool.Tool.InputSchema.Properties["cluster"], "Expected cluster property to be added")
|
||||
})
|
||||
s.Run("does not add enum when list of clusters is > 5", func() {
|
||||
s.Require().NotNil(tool.Tool.InputSchema.Properties["cluster"])
|
||||
enum := tool.Tool.InputSchema.Properties["cluster"].Enum
|
||||
s.Nilf(enum, "Expected enum to not be set for too many clusters")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *TargetParameterToolMutatorSuite) TestNonClusterAwareTool() {
|
||||
tm := WithTargetParameter("default", "cluster", []string{"cluster-1", "cluster-2"})
|
||||
tool := createTestTool("non-cluster-aware-tool")
|
||||
tool.ClusterAware = ptr.To(false)
|
||||
tm(tool)
|
||||
s.Run("does not add cluster parameter", func() {
|
||||
s.Nilf(tool.Tool.InputSchema.Properties["cluster"], "Expected cluster property to not be added")
|
||||
})
|
||||
}
|
||||
|
||||
func TestTargetParameterToolMutator(t *testing.T) {
|
||||
suite.Run(t, new(TargetParameterToolMutatorSuite))
|
||||
}
|
||||
@@ -2,11 +2,9 @@ package mcp
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/mark3labs/mcp-go/mcp"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/internal/test"
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/api"
|
||||
configuration "github.com/containers/kubernetes-mcp-server/pkg/config"
|
||||
@@ -14,6 +12,9 @@ import (
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/toolsets/config"
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/toolsets/core"
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/toolsets/helm"
|
||||
"github.com/mark3labs/mcp-go/mcp"
|
||||
"github.com/stretchr/testify/suite"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
)
|
||||
|
||||
type ToolsetsSuite struct {
|
||||
@@ -98,6 +99,50 @@ func (s *ToolsetsSuite) TestDefaultToolsetsToolsInOpenShift() {
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ToolsetsSuite) TestDefaultToolsetsToolsInMultiCluster() {
|
||||
s.Run("Default configuration toolsets in multi-cluster (with 11 clusters)", func() {
|
||||
kubeconfig := s.Kubeconfig()
|
||||
for i := 0; i < 10; i++ {
|
||||
// Add multiple fake contexts to force multi-cluster behavior
|
||||
kubeconfig.Contexts[strconv.Itoa(i)] = clientcmdapi.NewContext()
|
||||
}
|
||||
s.Cfg.KubeConfig = test.KubeconfigFile(s.T(), kubeconfig)
|
||||
s.InitMcpClient()
|
||||
tools, err := s.ListTools(s.T().Context(), mcp.ListToolsRequest{})
|
||||
s.Run("ListTools returns tools", func() {
|
||||
s.NotNil(tools, "Expected tools from ListTools")
|
||||
s.NoError(err, "Expected no error from ListTools")
|
||||
})
|
||||
s.Run("ListTools returns correct Tool metadata", func() {
|
||||
expectedMetadata := test.ReadFile("testdata", "toolsets-full-tools-multicluster.json")
|
||||
metadata, err := json.MarshalIndent(tools.Tools, "", " ")
|
||||
s.Require().NoErrorf(err, "failed to marshal tools metadata: %v", err)
|
||||
s.JSONEq(expectedMetadata, string(metadata), "tools metadata does not match expected")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ToolsetsSuite) TestDefaultToolsetsToolsInMultiClusterEnum() {
|
||||
s.Run("Default configuration toolsets in multi-cluster (with 2 clusters)", func() {
|
||||
kubeconfig := s.Kubeconfig()
|
||||
// Add additional cluster to force multi-cluster behavior with enum parameter
|
||||
kubeconfig.Contexts["extra-cluster"] = clientcmdapi.NewContext()
|
||||
s.Cfg.KubeConfig = test.KubeconfigFile(s.T(), kubeconfig)
|
||||
s.InitMcpClient()
|
||||
tools, err := s.ListTools(s.T().Context(), mcp.ListToolsRequest{})
|
||||
s.Run("ListTools returns tools", func() {
|
||||
s.NotNil(tools, "Expected tools from ListTools")
|
||||
s.NoError(err, "Expected no error from ListTools")
|
||||
})
|
||||
s.Run("ListTools returns correct Tool metadata", func() {
|
||||
expectedMetadata := test.ReadFile("testdata", "toolsets-full-tools-multicluster-enum.json")
|
||||
metadata, err := json.MarshalIndent(tools.Tools, "", " ")
|
||||
s.Require().NoErrorf(err, "failed to marshal tools metadata: %v", err)
|
||||
s.JSONEq(expectedMetadata, string(metadata), "tools metadata does not match expected")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ToolsetsSuite) TestGranularToolsetsTools() {
|
||||
testCases := []api.Toolset{
|
||||
&core.Toolset{},
|
||||
|
||||
@@ -12,33 +12,91 @@ import (
|
||||
|
||||
func initConfiguration() []api.ServerTool {
|
||||
tools := []api.ServerTool{
|
||||
{Tool: api.Tool{
|
||||
Name: "configuration_view",
|
||||
Description: "Get the current Kubernetes configuration content as a kubeconfig YAML",
|
||||
InputSchema: &jsonschema.Schema{
|
||||
Type: "object",
|
||||
Properties: map[string]*jsonschema.Schema{
|
||||
"minified": {
|
||||
Type: "boolean",
|
||||
Description: "Return a minified version of the configuration. " +
|
||||
"If set to true, keeps only the current-context and the relevant pieces of the configuration for that context. " +
|
||||
"If set to false, all contexts, clusters, auth-infos, and users are returned in the configuration. " +
|
||||
"(Optional, default true)",
|
||||
},
|
||||
{
|
||||
Tool: api.Tool{
|
||||
Name: "configuration_contexts_list",
|
||||
Description: "List all available context names and associated server urls from the kubeconfig file",
|
||||
InputSchema: &jsonschema.Schema{
|
||||
Type: "object",
|
||||
},
|
||||
Annotations: api.ToolAnnotations{
|
||||
Title: "Configuration: Contexts List",
|
||||
ReadOnlyHint: ptr.To(true),
|
||||
DestructiveHint: ptr.To(false),
|
||||
IdempotentHint: ptr.To(true),
|
||||
OpenWorldHint: ptr.To(false),
|
||||
},
|
||||
},
|
||||
Annotations: api.ToolAnnotations{
|
||||
Title: "Configuration: View",
|
||||
ReadOnlyHint: ptr.To(true),
|
||||
DestructiveHint: ptr.To(false),
|
||||
IdempotentHint: ptr.To(false),
|
||||
OpenWorldHint: ptr.To(true),
|
||||
ClusterAware: ptr.To(false),
|
||||
TargetListProvider: ptr.To(true),
|
||||
Handler: contextsList,
|
||||
},
|
||||
{
|
||||
Tool: api.Tool{
|
||||
Name: "configuration_view",
|
||||
Description: "Get the current Kubernetes configuration content as a kubeconfig YAML",
|
||||
InputSchema: &jsonschema.Schema{
|
||||
Type: "object",
|
||||
Properties: map[string]*jsonschema.Schema{
|
||||
"minified": {
|
||||
Type: "boolean",
|
||||
Description: "Return a minified version of the configuration. " +
|
||||
"If set to true, keeps only the current-context and the relevant pieces of the configuration for that context. " +
|
||||
"If set to false, all contexts, clusters, auth-infos, and users are returned in the configuration. " +
|
||||
"(Optional, default true)",
|
||||
},
|
||||
},
|
||||
},
|
||||
Annotations: api.ToolAnnotations{
|
||||
Title: "Configuration: View",
|
||||
ReadOnlyHint: ptr.To(true),
|
||||
DestructiveHint: ptr.To(false),
|
||||
IdempotentHint: ptr.To(false),
|
||||
OpenWorldHint: ptr.To(true),
|
||||
},
|
||||
},
|
||||
}, Handler: configurationView},
|
||||
ClusterAware: ptr.To(false),
|
||||
Handler: configurationView,
|
||||
},
|
||||
}
|
||||
return tools
|
||||
}
|
||||
|
||||
func contextsList(params api.ToolHandlerParams) (*api.ToolCallResult, error) {
|
||||
contexts, err := params.ConfigurationContextsList()
|
||||
if err != nil {
|
||||
return api.NewToolCallResult("", fmt.Errorf("failed to list contexts: %v", err)), nil
|
||||
}
|
||||
|
||||
if len(contexts) == 0 {
|
||||
return api.NewToolCallResult("No contexts found in kubeconfig", nil), nil
|
||||
}
|
||||
|
||||
defaultContext, err := params.ConfigurationContextsDefault()
|
||||
if err != nil {
|
||||
return api.NewToolCallResult("", fmt.Errorf("failed to get default context: %v", err)), nil
|
||||
}
|
||||
|
||||
result := fmt.Sprintf("Available Kubernetes contexts (%d total, default: %s):\n\n", len(contexts), defaultContext)
|
||||
result += "Format: [*] CONTEXT_NAME -> SERVER_URL\n"
|
||||
result += " (* indicates the default context used in tools if context is not set)\n\n"
|
||||
result += "Contexts:\n---------\n"
|
||||
for context, server := range contexts {
|
||||
marker := " "
|
||||
if context == defaultContext {
|
||||
marker = "*"
|
||||
}
|
||||
|
||||
result += fmt.Sprintf("%s%s -> %s\n", marker, context, server)
|
||||
}
|
||||
result += "---------\n\n"
|
||||
|
||||
result += "To use a specific context with any tool, set the 'context' parameter in the tool call arguments"
|
||||
|
||||
// TODO: Review output format, current is not parseable and might not be ideal for LLM consumption
|
||||
return api.NewToolCallResult(result, nil), nil
|
||||
}
|
||||
|
||||
func configurationView(params api.ToolHandlerParams) (*api.ToolCallResult, error) {
|
||||
minify := true
|
||||
minified := params.GetArguments()["minified"]
|
||||
|
||||
Reference in New Issue
Block a user