refactor(kubernetes): keep Provider as only external Kubernetes interface (#372)

* refactor(kubernetes): keep Provider as only external Kubernetes interface

Initial phase to unify-merge the Provider interface with the Manager struct.

- Renamed ManagerProvider to Provider (i.e. kubernets.Provider)
- Moved Manager related logic to specific files
- Exposed relevant method through Provider interface (GetDerivedKubernetes, IsOpenShift, VerifyToken)

Signed-off-by: Marc Nuri <marc@marcnuri.com>

* Update pkg/kubernetes/provider_kubeconfig.go

Co-authored-by: Calum Murray <cmurray@redhat.com>
Signed-off-by: Marc Nuri <marc@marcnuri.com>

---------

Signed-off-by: Marc Nuri <marc@marcnuri.com>
Co-authored-by: Calum Murray <cmurray@redhat.com>
This commit is contained in:
Marc Nuri
2025-10-14 15:25:49 +02:00
committed by GitHub
parent 1e154d7587
commit f3a446676f
14 changed files with 361 additions and 336 deletions

View File

@@ -23,7 +23,7 @@ import (
type KubernetesApiTokenVerifier interface {
// KubernetesApiVerifyToken TODO: clarify proper implementation
KubernetesApiVerifyToken(ctx context.Context, token, audience, cluster string) (*authenticationapiv1.UserInfo, []string, error)
KubernetesApiVerifyToken(ctx context.Context, cluster, token, audience string) (*authenticationapiv1.UserInfo, []string, error)
// GetTargetParameterName returns the parameter name used for target identification in MCP requests
GetTargetParameterName() string
}
@@ -247,7 +247,7 @@ func (c *JWTClaims) ValidateWithProvider(ctx context.Context, audience string, p
func (c *JWTClaims) ValidateWithKubernetesApi(ctx context.Context, audience, cluster string, verifier KubernetesApiTokenVerifier) error {
if verifier != nil {
_, _, err := verifier.KubernetesApiVerifyToken(ctx, c.Token, audience, cluster)
_, _, err := verifier.KubernetesApiVerifyToken(ctx, cluster, c.Token, audience)
if err != nil {
return fmt.Errorf("kubernetes API token validation error: %v", err)
}

View File

@@ -47,42 +47,10 @@ func resolveKubernetesConfigurations(kubernetes *Manager) error {
return err
}
func (m *Manager) IsInCluster() bool {
if m.staticConfig.KubeConfig != "" {
return false
}
cfg, err := InClusterConfig()
return err == nil && cfg != nil
}
func (m *Manager) configuredNamespace() string {
if ns, _, nsErr := m.clientCmdConfig.Namespace(); nsErr == nil {
return ns
}
return ""
}
func (m *Manager) NamespaceOrDefault(namespace string) string {
if namespace == "" {
return m.configuredNamespace()
}
return namespace
}
func (k *Kubernetes) NamespaceOrDefault(namespace string) string {
return k.manager.NamespaceOrDefault(namespace)
}
// ToRESTConfig returns the rest.Config object (genericclioptions.RESTClientGetter)
func (m *Manager) ToRESTConfig() (*rest.Config, error) {
return m.cfg, nil
}
// ToRawKubeConfigLoader returns the clientcmd.ClientConfig object (genericclioptions.RESTClientGetter)
func (m *Manager) ToRawKubeConfigLoader() clientcmd.ClientConfig {
return m.clientCmdConfig
}
// ConfigurationContextsDefault returns the current context name
// TODO: Should be moved to the Provider level ?
func (k *Kubernetes) ConfigurationContextsDefault() (string, error) {

View File

@@ -1,27 +1,10 @@
package kubernetes
import (
"context"
"errors"
"strings"
"k8s.io/apimachinery/pkg/runtime"
"github.com/fsnotify/fsnotify"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/client-go/discovery"
"k8s.io/client-go/discovery/cached/memory"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/klog/v2"
"github.com/containers/kubernetes-mcp-server/pkg/config"
"github.com/containers/kubernetes-mcp-server/pkg/helm"
"k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
)
@@ -47,174 +30,9 @@ func (k *Kubernetes) AccessControlClientset() *AccessControlClientset {
return k.manager.accessControlClientSet
}
type Manager struct {
cfg *rest.Config
clientCmdConfig clientcmd.ClientConfig
discoveryClient discovery.CachedDiscoveryInterface
accessControlClientSet *AccessControlClientset
accessControlRESTMapper *AccessControlRESTMapper
dynamicClient *dynamic.DynamicClient
staticConfig *config.StaticConfig
CloseWatchKubeConfig CloseWatchKubeConfig
}
var _ helm.Kubernetes = (*Manager)(nil)
var _ Openshift = (*Manager)(nil)
var Scheme = scheme.Scheme
var ParameterCodec = runtime.NewParameterCodec(Scheme)
func NewManager(config *config.StaticConfig) (*Manager, error) {
k8s := &Manager{
staticConfig: config,
}
if err := resolveKubernetesConfigurations(k8s); err != nil {
return nil, err
}
// TODO: Won't work because not all client-go clients use the shared context (e.g. discovery client uses context.TODO())
//k8s.cfg.Wrap(func(original http.RoundTripper) http.RoundTripper {
// return &impersonateRoundTripper{original}
//})
var err error
k8s.accessControlClientSet, err = NewAccessControlClientset(k8s.cfg, k8s.staticConfig)
if err != nil {
return nil, err
}
k8s.discoveryClient = memory.NewMemCacheClient(k8s.accessControlClientSet.DiscoveryClient())
k8s.accessControlRESTMapper = NewAccessControlRESTMapper(
restmapper.NewDeferredDiscoveryRESTMapper(k8s.discoveryClient),
k8s.staticConfig,
)
k8s.dynamicClient, err = dynamic.NewForConfig(k8s.cfg)
if err != nil {
return nil, err
}
return k8s, nil
}
func (m *Manager) WatchKubeConfig(onKubeConfigChange func() error) {
if m.clientCmdConfig == nil {
return
}
kubeConfigFiles := m.clientCmdConfig.ConfigAccess().GetLoadingPrecedence()
if len(kubeConfigFiles) == 0 {
return
}
watcher, err := fsnotify.NewWatcher()
if err != nil {
return
}
for _, file := range kubeConfigFiles {
_ = watcher.Add(file)
}
go func() {
for {
select {
case _, ok := <-watcher.Events:
if !ok {
return
}
_ = onKubeConfigChange()
case _, ok := <-watcher.Errors:
if !ok {
return
}
}
}
}()
if m.CloseWatchKubeConfig != nil {
_ = m.CloseWatchKubeConfig()
}
m.CloseWatchKubeConfig = watcher.Close
}
func (m *Manager) Close() {
if m.CloseWatchKubeConfig != nil {
_ = m.CloseWatchKubeConfig()
}
}
func (m *Manager) GetAPIServerHost() string {
if m.cfg == nil {
return ""
}
return m.cfg.Host
}
func (m *Manager) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
return m.discoveryClient, nil
}
func (m *Manager) ToRESTMapper() (meta.RESTMapper, error) {
return m.accessControlRESTMapper, nil
}
func (m *Manager) Derived(ctx context.Context) (*Kubernetes, error) {
authorization, ok := ctx.Value(OAuthAuthorizationHeader).(string)
if !ok || !strings.HasPrefix(authorization, "Bearer ") {
if m.staticConfig.RequireOAuth {
return nil, errors.New("oauth token required")
}
return &Kubernetes{manager: m}, nil
}
klog.V(5).Infof("%s header found (Bearer), using provided bearer token", OAuthAuthorizationHeader)
derivedCfg := &rest.Config{
Host: m.cfg.Host,
APIPath: m.cfg.APIPath,
// Copy only server verification TLS settings (CA bundle and server name)
TLSClientConfig: rest.TLSClientConfig{
Insecure: m.cfg.Insecure,
ServerName: m.cfg.ServerName,
CAFile: m.cfg.CAFile,
CAData: m.cfg.CAData,
},
BearerToken: strings.TrimPrefix(authorization, "Bearer "),
// pass custom UserAgent to identify the client
UserAgent: CustomUserAgent,
QPS: m.cfg.QPS,
Burst: m.cfg.Burst,
Timeout: m.cfg.Timeout,
Impersonate: rest.ImpersonationConfig{},
}
clientCmdApiConfig, err := m.clientCmdConfig.RawConfig()
if err != nil {
if m.staticConfig.RequireOAuth {
klog.Errorf("failed to get kubeconfig: %v", err)
return nil, errors.New("failed to get kubeconfig")
}
return &Kubernetes{manager: m}, nil
}
clientCmdApiConfig.AuthInfos = make(map[string]*clientcmdapi.AuthInfo)
derived := &Kubernetes{manager: &Manager{
clientCmdConfig: clientcmd.NewDefaultClientConfig(clientCmdApiConfig, nil),
cfg: derivedCfg,
staticConfig: m.staticConfig,
}}
derived.manager.accessControlClientSet, err = NewAccessControlClientset(derived.manager.cfg, derived.manager.staticConfig)
if err != nil {
if m.staticConfig.RequireOAuth {
klog.Errorf("failed to get kubeconfig: %v", err)
return nil, errors.New("failed to get kubeconfig")
}
return &Kubernetes{manager: m}, nil
}
derived.manager.discoveryClient = memory.NewMemCacheClient(derived.manager.accessControlClientSet.DiscoveryClient())
derived.manager.accessControlRESTMapper = NewAccessControlRESTMapper(
restmapper.NewDeferredDiscoveryRESTMapper(derived.manager.discoveryClient),
derived.manager.staticConfig,
)
derived.manager.dynamicClient, err = dynamic.NewForConfig(derived.manager.cfg)
if err != nil {
if m.staticConfig.RequireOAuth {
klog.Errorf("failed to initialize dynamic client: %v", err)
return nil, errors.New("failed to initialize dynamic client")
}
return &Kubernetes{manager: m}, nil
}
return derived, nil
}
func (k *Kubernetes) NewHelm() *helm.Helm {
// This is a derived Kubernetes, so it already has the Helm initialized
return helm.NewHelm(k.manager)

251
pkg/kubernetes/manager.go Normal file
View File

@@ -0,0 +1,251 @@
package kubernetes
import (
"context"
"errors"
"fmt"
"strings"
"github.com/containers/kubernetes-mcp-server/pkg/config"
"github.com/containers/kubernetes-mcp-server/pkg/helm"
"github.com/fsnotify/fsnotify"
authenticationv1api "k8s.io/api/authentication/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/discovery"
"k8s.io/client-go/discovery/cached/memory"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/klog/v2"
)
type Manager struct {
cfg *rest.Config
clientCmdConfig clientcmd.ClientConfig
discoveryClient discovery.CachedDiscoveryInterface
accessControlClientSet *AccessControlClientset
accessControlRESTMapper *AccessControlRESTMapper
dynamicClient *dynamic.DynamicClient
staticConfig *config.StaticConfig
CloseWatchKubeConfig CloseWatchKubeConfig
}
var _ helm.Kubernetes = (*Manager)(nil)
var _ Openshift = (*Manager)(nil)
func NewManager(config *config.StaticConfig) (*Manager, error) {
k8s := &Manager{
staticConfig: config,
}
if err := resolveKubernetesConfigurations(k8s); err != nil {
return nil, err
}
// TODO: Won't work because not all client-go clients use the shared context (e.g. discovery client uses context.TODO())
//k8s.cfg.Wrap(func(original http.RoundTripper) http.RoundTripper {
// return &impersonateRoundTripper{original}
//})
var err error
k8s.accessControlClientSet, err = NewAccessControlClientset(k8s.cfg, k8s.staticConfig)
if err != nil {
return nil, err
}
k8s.discoveryClient = memory.NewMemCacheClient(k8s.accessControlClientSet.DiscoveryClient())
k8s.accessControlRESTMapper = NewAccessControlRESTMapper(
restmapper.NewDeferredDiscoveryRESTMapper(k8s.discoveryClient),
k8s.staticConfig,
)
k8s.dynamicClient, err = dynamic.NewForConfig(k8s.cfg)
if err != nil {
return nil, err
}
return k8s, nil
}
func (m *Manager) WatchKubeConfig(onKubeConfigChange func() error) {
if m.clientCmdConfig == nil {
return
}
kubeConfigFiles := m.clientCmdConfig.ConfigAccess().GetLoadingPrecedence()
if len(kubeConfigFiles) == 0 {
return
}
watcher, err := fsnotify.NewWatcher()
if err != nil {
return
}
for _, file := range kubeConfigFiles {
_ = watcher.Add(file)
}
go func() {
for {
select {
case _, ok := <-watcher.Events:
if !ok {
return
}
_ = onKubeConfigChange()
case _, ok := <-watcher.Errors:
if !ok {
return
}
}
}
}()
if m.CloseWatchKubeConfig != nil {
_ = m.CloseWatchKubeConfig()
}
m.CloseWatchKubeConfig = watcher.Close
}
func (m *Manager) Close() {
if m.CloseWatchKubeConfig != nil {
_ = m.CloseWatchKubeConfig()
}
}
func (m *Manager) GetAPIServerHost() string {
if m.cfg == nil {
return ""
}
return m.cfg.Host
}
func (m *Manager) IsInCluster() bool {
if m.staticConfig.KubeConfig != "" {
return false
}
cfg, err := InClusterConfig()
return err == nil && cfg != nil
}
func (m *Manager) configuredNamespace() string {
if ns, _, nsErr := m.clientCmdConfig.Namespace(); nsErr == nil {
return ns
}
return ""
}
func (m *Manager) NamespaceOrDefault(namespace string) string {
if namespace == "" {
return m.configuredNamespace()
}
return namespace
}
func (m *Manager) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
return m.discoveryClient, nil
}
func (m *Manager) ToRESTMapper() (meta.RESTMapper, error) {
return m.accessControlRESTMapper, nil
}
// ToRESTConfig returns the rest.Config object (genericclioptions.RESTClientGetter)
func (m *Manager) ToRESTConfig() (*rest.Config, error) {
return m.cfg, nil
}
// ToRawKubeConfigLoader returns the clientcmd.ClientConfig object (genericclioptions.RESTClientGetter)
func (m *Manager) ToRawKubeConfigLoader() clientcmd.ClientConfig {
return m.clientCmdConfig
}
func (m *Manager) VerifyToken(ctx context.Context, token, audience string) (*authenticationv1api.UserInfo, []string, error) {
tokenReviewClient, err := m.accessControlClientSet.TokenReview()
if err != nil {
return nil, nil, err
}
tokenReview := &authenticationv1api.TokenReview{
TypeMeta: metav1.TypeMeta{
APIVersion: "authentication.k8s.io/v1",
Kind: "TokenReview",
},
Spec: authenticationv1api.TokenReviewSpec{
Token: token,
Audiences: []string{audience},
},
}
result, err := tokenReviewClient.Create(ctx, tokenReview, metav1.CreateOptions{})
if err != nil {
return nil, nil, fmt.Errorf("failed to create token review: %v", err)
}
if !result.Status.Authenticated {
if result.Status.Error != "" {
return nil, nil, fmt.Errorf("token authentication failed: %s", result.Status.Error)
}
return nil, nil, fmt.Errorf("token authentication failed")
}
return &result.Status.User, result.Status.Audiences, nil
}
func (m *Manager) Derived(ctx context.Context) (*Kubernetes, error) {
authorization, ok := ctx.Value(OAuthAuthorizationHeader).(string)
if !ok || !strings.HasPrefix(authorization, "Bearer ") {
if m.staticConfig.RequireOAuth {
return nil, errors.New("oauth token required")
}
return &Kubernetes{manager: m}, nil
}
klog.V(5).Infof("%s header found (Bearer), using provided bearer token", OAuthAuthorizationHeader)
derivedCfg := &rest.Config{
Host: m.cfg.Host,
APIPath: m.cfg.APIPath,
// Copy only server verification TLS settings (CA bundle and server name)
TLSClientConfig: rest.TLSClientConfig{
Insecure: m.cfg.Insecure,
ServerName: m.cfg.ServerName,
CAFile: m.cfg.CAFile,
CAData: m.cfg.CAData,
},
BearerToken: strings.TrimPrefix(authorization, "Bearer "),
// pass custom UserAgent to identify the client
UserAgent: CustomUserAgent,
QPS: m.cfg.QPS,
Burst: m.cfg.Burst,
Timeout: m.cfg.Timeout,
Impersonate: rest.ImpersonationConfig{},
}
clientCmdApiConfig, err := m.clientCmdConfig.RawConfig()
if err != nil {
if m.staticConfig.RequireOAuth {
klog.Errorf("failed to get kubeconfig: %v", err)
return nil, errors.New("failed to get kubeconfig")
}
return &Kubernetes{manager: m}, nil
}
clientCmdApiConfig.AuthInfos = make(map[string]*clientcmdapi.AuthInfo)
derived := &Kubernetes{manager: &Manager{
clientCmdConfig: clientcmd.NewDefaultClientConfig(clientCmdApiConfig, nil),
cfg: derivedCfg,
staticConfig: m.staticConfig,
}}
derived.manager.accessControlClientSet, err = NewAccessControlClientset(derived.manager.cfg, derived.manager.staticConfig)
if err != nil {
if m.staticConfig.RequireOAuth {
klog.Errorf("failed to get kubeconfig: %v", err)
return nil, errors.New("failed to get kubeconfig")
}
return &Kubernetes{manager: m}, nil
}
derived.manager.discoveryClient = memory.NewMemCacheClient(derived.manager.accessControlClientSet.DiscoveryClient())
derived.manager.accessControlRESTMapper = NewAccessControlRESTMapper(
restmapper.NewDeferredDiscoveryRESTMapper(derived.manager.discoveryClient),
derived.manager.staticConfig,
)
derived.manager.dynamicClient, err = dynamic.NewForConfig(derived.manager.cfg)
if err != nil {
if m.staticConfig.RequireOAuth {
klog.Errorf("failed to initialize dynamic client: %v", err)
return nil, errors.New("failed to initialize dynamic client")
}
return &Kubernetes{manager: m}, nil
}
return derived, nil
}

View File

@@ -11,16 +11,23 @@ import (
"k8s.io/client-go/tools/clientcmd"
)
type ManagerProvider interface {
type Provider interface {
// Openshift extends the Openshift interface to provide OpenShift specific functionality to toolset providers
// TODO: with the configurable toolset implementation and especially the multi-cluster approach
// extending this interface might not be a good idea anymore.
// For the kubecontext case, a user might be targeting both an OpenShift flavored cluster and a vanilla Kubernetes cluster.
// See: https://github.com/containers/kubernetes-mcp-server/pull/372#discussion_r2421592315
Openshift
TokenVerifier
GetTargets(ctx context.Context) ([]string, error)
GetManagerFor(ctx context.Context, target string) (*Manager, error)
GetDerivedKubernetes(ctx context.Context, target string) (*Kubernetes, error)
GetDefaultTarget() string
GetTargetParameterName() string
WatchTargets(func() error)
Close()
}
func NewManagerProvider(cfg *config.StaticConfig) (ManagerProvider, error) {
func NewProvider(cfg *config.StaticConfig) (Provider, error) {
m, err := NewManager(cfg)
if err != nil {
return nil, err

View File

@@ -5,13 +5,14 @@ import (
"fmt"
"github.com/containers/kubernetes-mcp-server/pkg/config"
authenticationv1api "k8s.io/api/authentication/v1"
)
// KubeConfigTargetParameterName is the parameter name used to specify
// the kubeconfig context when using the kubeconfig cluster provider strategy.
const KubeConfigTargetParameterName = "context"
// kubeConfigClusterProvider implements ManagerProvider for managing multiple
// kubeConfigClusterProvider implements Provider for managing multiple
// Kubernetes clusters using different contexts from a kubeconfig file.
// It lazily initializes managers for each context as they are requested.
type kubeConfigClusterProvider struct {
@@ -19,7 +20,7 @@ type kubeConfigClusterProvider struct {
managers map[string]*Manager
}
var _ ManagerProvider = &kubeConfigClusterProvider{}
var _ Provider = &kubeConfigClusterProvider{}
func init() {
RegisterProvider(config.ClusterProviderKubeConfig, newKubeConfigClusterProvider)
@@ -27,7 +28,7 @@ func init() {
// newKubeConfigClusterProvider creates a provider that manages multiple clusters
// via kubeconfig contexts. Returns an error if the manager is in-cluster mode.
func newKubeConfigClusterProvider(m *Manager, cfg *config.StaticConfig) (ManagerProvider, error) {
func newKubeConfigClusterProvider(m *Manager, cfg *config.StaticConfig) (Provider, error) {
// Handle in-cluster mode
if m.IsInCluster() {
return nil, fmt.Errorf("kubeconfig ClusterProviderStrategy is invalid for in-cluster deployments")
@@ -56,26 +57,13 @@ func newKubeConfigClusterProvider(m *Manager, cfg *config.StaticConfig) (Manager
}, nil
}
func (k *kubeConfigClusterProvider) GetTargets(ctx context.Context) ([]string, error) {
contextNames := make([]string, 0, len(k.managers))
for cluster := range k.managers {
contextNames = append(contextNames, cluster)
}
return contextNames, nil
}
func (k *kubeConfigClusterProvider) GetTargetParameterName() string {
return KubeConfigTargetParameterName
}
func (k *kubeConfigClusterProvider) GetManagerFor(ctx context.Context, context string) (*Manager, error) {
m, ok := k.managers[context]
func (p *kubeConfigClusterProvider) managerForContext(context string) (*Manager, error) {
m, ok := p.managers[context]
if ok && m != nil {
return m, nil
}
baseManager := k.managers[k.defaultContext]
baseManager := p.managers[p.defaultContext]
if baseManager.IsInCluster() {
// In cluster mode, so context switching is not applicable
@@ -87,23 +75,56 @@ func (k *kubeConfigClusterProvider) GetManagerFor(ctx context.Context, context s
return nil, err
}
k.managers[context] = m
p.managers[context] = m
return m, nil
}
func (k *kubeConfigClusterProvider) GetDefaultTarget() string {
return k.defaultContext
func (p *kubeConfigClusterProvider) IsOpenShift(ctx context.Context) bool {
return p.managers[p.defaultContext].IsOpenShift(ctx)
}
func (k *kubeConfigClusterProvider) WatchTargets(onKubeConfigChanged func() error) {
m := k.managers[k.defaultContext]
func (p *kubeConfigClusterProvider) VerifyToken(ctx context.Context, context, token, audience string) (*authenticationv1api.UserInfo, []string, error) {
m, err := p.managerForContext(context)
if err != nil {
return nil, nil, err
}
return m.VerifyToken(ctx, token, audience)
}
func (p *kubeConfigClusterProvider) GetTargets(ctx context.Context) ([]string, error) {
contextNames := make([]string, 0, len(p.managers))
for contextName := range p.managers {
contextNames = append(contextNames, contextName)
}
return contextNames, nil
}
func (p *kubeConfigClusterProvider) GetTargetParameterName() string {
return KubeConfigTargetParameterName
}
func (p *kubeConfigClusterProvider) GetDerivedKubernetes(ctx context.Context, context string) (*Kubernetes, error) {
m, err := p.managerForContext(context)
if err != nil {
return nil, err
}
return m.Derived(ctx)
}
func (p *kubeConfigClusterProvider) GetDefaultTarget() string {
return p.defaultContext
}
func (p *kubeConfigClusterProvider) WatchTargets(onKubeConfigChanged func() error) {
m := p.managers[p.defaultContext]
m.WatchKubeConfig(onKubeConfigChanged)
}
func (k *kubeConfigClusterProvider) Close() {
m := k.managers[k.defaultContext]
func (p *kubeConfigClusterProvider) Close() {
m := p.managers[p.defaultContext]
m.Close()
}

View File

@@ -7,10 +7,10 @@ import (
"github.com/containers/kubernetes-mcp-server/pkg/config"
)
// ProviderFactory creates a new ManagerProvider instance for a given strategy.
// ProviderFactory creates a new Provider instance for a given strategy.
// Implementations should validate that the Manager is compatible with their strategy
// (e.g., kubeconfig provider should reject in-cluster managers).
type ProviderFactory func(m *Manager, cfg *config.StaticConfig) (ManagerProvider, error)
type ProviderFactory func(m *Manager, cfg *config.StaticConfig) (Provider, error)
var providerFactories = make(map[string]ProviderFactory)

View File

@@ -13,18 +13,18 @@ type ProviderRegistryTestSuite struct {
func (s *ProviderRegistryTestSuite) TestRegisterProvider() {
s.Run("With no pre-existing provider, registers the provider", func() {
RegisterProvider("test-strategy", func(m *Manager, cfg *config.StaticConfig) (ManagerProvider, error) {
RegisterProvider("test-strategy", func(m *Manager, cfg *config.StaticConfig) (Provider, error) {
return nil, nil
})
_, exists := providerFactories["test-strategy"]
s.True(exists, "Provider should be registered")
})
s.Run("With pre-existing provider, panics", func() {
RegisterProvider("test-pre-existent", func(m *Manager, cfg *config.StaticConfig) (ManagerProvider, error) {
RegisterProvider("test-pre-existent", func(m *Manager, cfg *config.StaticConfig) (Provider, error) {
return nil, nil
})
s.Panics(func() {
RegisterProvider("test-pre-existent", func(m *Manager, cfg *config.StaticConfig) (ManagerProvider, error) {
RegisterProvider("test-pre-existent", func(m *Manager, cfg *config.StaticConfig) (Provider, error) {
return nil, nil
})
}, "Registering a provider with an existing strategy should panic")
@@ -39,10 +39,10 @@ func (s *ProviderRegistryTestSuite) TestGetRegisteredStrategies() {
})
s.Run("With multiple registered providers, returns sorted list", func() {
providerFactories = make(map[string]ProviderFactory)
RegisterProvider("foo-strategy", func(m *Manager, cfg *config.StaticConfig) (ManagerProvider, error) {
RegisterProvider("foo-strategy", func(m *Manager, cfg *config.StaticConfig) (Provider, error) {
return nil, nil
})
RegisterProvider("bar-strategy", func(m *Manager, cfg *config.StaticConfig) (ManagerProvider, error) {
RegisterProvider("bar-strategy", func(m *Manager, cfg *config.StaticConfig) (Provider, error) {
return nil, nil
})
strategies := GetRegisteredStrategies()

View File

@@ -5,9 +5,10 @@ import (
"fmt"
"github.com/containers/kubernetes-mcp-server/pkg/config"
authenticationv1api "k8s.io/api/authentication/v1"
)
// singleClusterProvider implements ManagerProvider for managing a single
// singleClusterProvider implements Provider for managing a single
// Kubernetes cluster. Used for in-cluster deployments or when multi-cluster
// support is disabled.
type singleClusterProvider struct {
@@ -15,7 +16,7 @@ type singleClusterProvider struct {
manager *Manager
}
var _ ManagerProvider = &singleClusterProvider{}
var _ Provider = &singleClusterProvider{}
func init() {
RegisterProvider(config.ClusterProviderInCluster, newSingleClusterProvider(config.ClusterProviderInCluster))
@@ -25,7 +26,7 @@ func init() {
// newSingleClusterProvider creates a provider that manages a single cluster.
// Validates that the manager is in-cluster when the in-cluster strategy is used.
func newSingleClusterProvider(strategy string) ProviderFactory {
return func(m *Manager, cfg *config.StaticConfig) (ManagerProvider, error) {
return func(m *Manager, cfg *config.StaticConfig) (Provider, error) {
if strategy == config.ClusterProviderInCluster && !m.IsInCluster() {
return nil, fmt.Errorf("server must be deployed in cluster for the in-cluster ClusterProviderStrategy")
}
@@ -37,30 +38,41 @@ func newSingleClusterProvider(strategy string) ProviderFactory {
}
}
func (s *singleClusterProvider) GetTargets(ctx context.Context) ([]string, error) {
func (p *singleClusterProvider) IsOpenShift(ctx context.Context) bool {
return p.manager.IsOpenShift(ctx)
}
func (p *singleClusterProvider) VerifyToken(ctx context.Context, target, token, audience string) (*authenticationv1api.UserInfo, []string, error) {
if target != "" {
return nil, nil, fmt.Errorf("unable to get manager for other context/cluster with %s strategy", p.strategy)
}
return p.manager.VerifyToken(ctx, token, audience)
}
func (p *singleClusterProvider) GetTargets(ctx context.Context) ([]string, error) {
return []string{""}, nil
}
func (s *singleClusterProvider) GetManagerFor(ctx context.Context, target string) (*Manager, error) {
func (p *singleClusterProvider) GetDerivedKubernetes(ctx context.Context, target string) (*Kubernetes, error) {
if target != "" {
return nil, fmt.Errorf("unable to get manager for other context/cluster with %s strategy", s.strategy)
return nil, fmt.Errorf("unable to get manager for other context/cluster with %s strategy", p.strategy)
}
return s.manager, nil
return p.manager.Derived(ctx)
}
func (s *singleClusterProvider) GetDefaultTarget() string {
func (p *singleClusterProvider) GetDefaultTarget() string {
return ""
}
func (s *singleClusterProvider) GetTargetParameterName() string {
func (p *singleClusterProvider) GetTargetParameterName() string {
return ""
}
func (s *singleClusterProvider) WatchTargets(watch func() error) {
s.manager.WatchKubeConfig(watch)
func (p *singleClusterProvider) WatchTargets(watch func() error) {
p.manager.WatchKubeConfig(watch)
}
func (s *singleClusterProvider) Close() {
s.manager.Close()
func (p *singleClusterProvider) Close() {
p.manager.Close()
}

View File

@@ -43,7 +43,7 @@ func (s *ProviderTestSuite) TestNewManagerProviderInCluster() {
}
s.Run("With no cluster_provider_strategy, returns single-cluster provider", func() {
cfg := test.Must(config.ReadToml([]byte{}))
provider, err := NewManagerProvider(cfg)
provider, err := NewProvider(cfg)
s.Require().NoError(err, "Expected no error for in-cluster provider")
s.NotNil(provider, "Expected provider instance")
s.IsType(&singleClusterProvider{}, provider, "Expected singleClusterProvider type")
@@ -52,7 +52,7 @@ func (s *ProviderTestSuite) TestNewManagerProviderInCluster() {
cfg := test.Must(config.ReadToml([]byte(`
cluster_provider_strategy = "in-cluster"
`)))
provider, err := NewManagerProvider(cfg)
provider, err := NewProvider(cfg)
s.Require().NoError(err, "Expected no error for single-cluster strategy")
s.NotNil(provider, "Expected provider instance")
s.IsType(&singleClusterProvider{}, provider, "Expected singleClusterProvider type")
@@ -61,7 +61,7 @@ func (s *ProviderTestSuite) TestNewManagerProviderInCluster() {
cfg := test.Must(config.ReadToml([]byte(`
cluster_provider_strategy = "kubeconfig"
`)))
provider, err := NewManagerProvider(cfg)
provider, err := NewProvider(cfg)
s.Require().Error(err, "Expected error for kubeconfig strategy")
s.ErrorContains(err, "kubeconfig ClusterProviderStrategy is invalid for in-cluster deployments")
s.Nilf(provider, "Expected no provider instance, got %v", provider)
@@ -70,7 +70,7 @@ func (s *ProviderTestSuite) TestNewManagerProviderInCluster() {
cfg := test.Must(config.ReadToml([]byte(`
cluster_provider_strategy = "i-do-not-exist"
`)))
provider, err := NewManagerProvider(cfg)
provider, err := NewProvider(cfg)
s.Require().Error(err, "Expected error for non-existent strategy")
s.ErrorContains(err, "no provider registered for strategy 'i-do-not-exist'")
s.Nilf(provider, "Expected no provider instance, got %v", provider)
@@ -85,7 +85,7 @@ func (s *ProviderTestSuite) TestNewManagerProviderLocal() {
cfg := test.Must(config.ReadToml([]byte(`
kubeconfig = "` + kubeconfigPath + `"
`)))
provider, err := NewManagerProvider(cfg)
provider, err := NewProvider(cfg)
s.Require().NoError(err, "Expected no error for kubeconfig provider")
s.NotNil(provider, "Expected provider instance")
s.IsType(&kubeConfigClusterProvider{}, provider, "Expected kubeConfigClusterProvider type")
@@ -95,7 +95,7 @@ func (s *ProviderTestSuite) TestNewManagerProviderLocal() {
kubeconfig = "` + kubeconfigPath + `"
cluster_provider_strategy = "kubeconfig"
`)))
provider, err := NewManagerProvider(cfg)
provider, err := NewProvider(cfg)
s.Require().NoError(err, "Expected no error for kubeconfig provider")
s.NotNil(provider, "Expected provider instance")
s.IsType(&kubeConfigClusterProvider{}, provider, "Expected kubeConfigClusterProvider type")
@@ -105,7 +105,7 @@ func (s *ProviderTestSuite) TestNewManagerProviderLocal() {
kubeconfig = "` + kubeconfigPath + `"
cluster_provider_strategy = "in-cluster"
`)))
provider, err := NewManagerProvider(cfg)
provider, err := NewProvider(cfg)
s.Require().Error(err, "Expected error for in-cluster strategy")
s.ErrorContains(err, "server must be deployed in cluster for the in-cluster ClusterProviderStrategy")
s.Nilf(provider, "Expected no provider instance, got %v", provider)
@@ -115,7 +115,7 @@ func (s *ProviderTestSuite) TestNewManagerProviderLocal() {
kubeconfig = "` + kubeconfigPath + `"
cluster_provider_strategy = "i-do-not-exist"
`)))
provider, err := NewManagerProvider(cfg)
provider, err := NewProvider(cfg)
s.Require().Error(err, "Expected error for non-existent strategy")
s.ErrorContains(err, "no provider registered for strategy 'i-do-not-exist'")
s.Nilf(provider, "Expected no provider instance, got %v", provider)

View File

@@ -2,39 +2,10 @@ package kubernetes
import (
"context"
"fmt"
authenticationv1api "k8s.io/api/authentication/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func (m *Manager) VerifyToken(ctx context.Context, token, audience string) (*authenticationv1api.UserInfo, []string, error) {
tokenReviewClient, err := m.accessControlClientSet.TokenReview()
if err != nil {
return nil, nil, err
}
tokenReview := &authenticationv1api.TokenReview{
TypeMeta: metav1.TypeMeta{
APIVersion: "authentication.k8s.io/v1",
Kind: "TokenReview",
},
Spec: authenticationv1api.TokenReviewSpec{
Token: token,
Audiences: []string{audience},
},
}
result, err := tokenReviewClient.Create(ctx, tokenReview, metav1.CreateOptions{})
if err != nil {
return nil, nil, fmt.Errorf("failed to create token review: %v", err)
}
if !result.Status.Authenticated {
if result.Status.Error != "" {
return nil, nil, fmt.Errorf("token authentication failed: %s", result.Status.Error)
}
return nil, nil, fmt.Errorf("token authentication failed")
}
return &result.Status.User, result.Status.Audiences, nil
type TokenVerifier interface {
VerifyToken(ctx context.Context, cluster, token, audience string) (*authenticationv1api.UserInfo, []string, error)
}

View File

@@ -39,15 +39,9 @@ func ServerToolToM3LabsServerTool(s *Server, tools []api.ServerTool) ([]server.S
m3labTool.RawInputSchema = schema
}
m3labHandler := func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
// get the correct internalk8s.Manager for the target specified in the request
// get the correct derived Kubernetes client for the target specified in the request
cluster := request.GetString(s.p.GetTargetParameterName(), s.p.GetDefaultTarget())
m, err := s.p.GetManagerFor(ctx, cluster)
if err != nil {
return nil, err
}
// derive the manager based on auth on top of the settings for the cluster
k, err := m.Derived(ctx)
k, err := s.p.GetDerivedKubernetes(ctx, cluster)
if err != nil {
return nil, err
}

View File

@@ -67,7 +67,7 @@ type Server struct {
configuration *Configuration
server *server.MCPServer
enabledTools []string
p internalk8s.ManagerProvider
p internalk8s.Provider
}
func NewServer(configuration Configuration) (*Server, error) {
@@ -101,7 +101,7 @@ func NewServer(configuration Configuration) (*Server, error) {
func (s *Server) reloadKubernetesClusterProvider() error {
ctx := context.Background()
p, err := internalk8s.NewManagerProvider(s.configuration.StaticConfig)
p, err := internalk8s.NewProvider(s.configuration.StaticConfig)
if err != nil {
return err
}
@@ -113,11 +113,6 @@ func (s *Server) reloadKubernetesClusterProvider() error {
s.p = p
k, err := s.p.GetManagerFor(ctx, s.p.GetDefaultTarget())
if err != nil {
return err
}
targets, err := p.GetTargets(ctx)
if err != nil {
return err
@@ -136,7 +131,7 @@ func (s *Server) reloadKubernetesClusterProvider() error {
applicableTools := make([]api.ServerTool, 0)
for _, toolset := range s.configuration.Toolsets() {
for _, tool := range toolset.GetTools(k) {
for _, tool := range toolset.GetTools(p) {
tool := mutator(tool)
if !filter(tool) {
continue
@@ -182,23 +177,11 @@ func (s *Server) ServeHTTP(httpServer *http.Server) *server.StreamableHTTPServer
// KubernetesApiVerifyToken verifies the given token with the audience by
// sending an TokenReview request to API Server for the specified cluster.
func (s *Server) KubernetesApiVerifyToken(ctx context.Context, token string, audience string, cluster string) (*authenticationapiv1.UserInfo, []string, error) {
func (s *Server) KubernetesApiVerifyToken(ctx context.Context, cluster, token, audience string) (*authenticationapiv1.UserInfo, []string, error) {
if s.p == nil {
return nil, nil, fmt.Errorf("kubernetes cluster provider is not initialized")
}
// Use provided cluster or default
if cluster == "" {
cluster = s.p.GetDefaultTarget()
}
// Get the cluster manager for the specified cluster
m, err := s.p.GetManagerFor(ctx, cluster)
if err != nil {
return nil, nil, err
}
return m.VerifyToken(ctx, token, audience)
return s.p.VerifyToken(ctx, cluster, token, audience)
}
// GetTargetParameterName returns the parameter name used for target identification in MCP requests

View File

@@ -32,7 +32,7 @@ func ShouldIncludeTargetListTool(targetName string, targets []string) ToolFilter
// TODO: this check should be removed or make more generic when we have other
if tool.Tool.Name == "configuration_contexts_list" && targetName != kubernetes.KubeConfigTargetParameterName {
// let's not include configuration_contexts_list if we aren't targeting contexts in our ManagerProvider
// let's not include configuration_contexts_list if we aren't targeting contexts in our Provider
return false
}