refactor(kubernetes): move provider initialization into factory (#365)

Signed-off-by: Calum Murray <cmurray@redhat.com>
This commit is contained in:
Calum Murray
2025-10-08 22:25:34 -04:00
committed by GitHub
parent 99e954304c
commit 31e90fbece
4 changed files with 224 additions and 145 deletions

View File

@@ -2,7 +2,6 @@ package kubernetes
import (
"context"
"fmt"
"github.com/containers/kubernetes-mcp-server/pkg/config"
"k8s.io/client-go/discovery/cached/memory"
@@ -12,10 +11,6 @@ import (
"k8s.io/client-go/tools/clientcmd"
)
const (
KubeConfigTargetParameterName = "context"
)
type ManagerProvider interface {
GetTargets(ctx context.Context) ([]string, error)
GetManagerFor(ctx context.Context, target string) (*Manager, error)
@@ -25,20 +20,6 @@ type ManagerProvider interface {
Close()
}
type kubeConfigClusterProvider struct {
defaultContext string
managers map[string]*Manager
}
var _ ManagerProvider = &kubeConfigClusterProvider{}
type singleClusterProvider struct {
strategy string
manager *Manager
}
var _ ManagerProvider = &singleClusterProvider{}
func NewManagerProvider(cfg *config.StaticConfig) (ManagerProvider, error) {
m, err := NewManager(cfg)
if err != nil {
@@ -46,137 +27,13 @@ func NewManagerProvider(cfg *config.StaticConfig) (ManagerProvider, error) {
}
strategy := resolveStrategy(cfg, m)
switch strategy {
case config.ClusterProviderKubeConfig:
return newKubeConfigClusterProvider(m)
case config.ClusterProviderInCluster, config.ClusterProviderDisabled:
return newSingleClusterProvider(m, strategy)
default:
return nil, fmt.Errorf(
"invalid ClusterProviderStrategy '%s', must be 'kubeconfig', 'in-cluster', or 'disabled'",
strategy,
)
}
}
func newKubeConfigClusterProvider(m *Manager) (*kubeConfigClusterProvider, error) {
// Handle in-cluster mode
if m.IsInCluster() {
return nil, fmt.Errorf("kubeconfig ClusterProviderStrategy is invalid for in-cluster deployments")
}
rawConfig, err := m.clientCmdConfig.RawConfig()
factory, err := getProviderFactory(strategy)
if err != nil {
return nil, err
}
allClusterManagers := map[string]*Manager{
rawConfig.CurrentContext: m, // we already initialized a manager for the default context, let's use it
}
for name := range rawConfig.Contexts {
if name == rawConfig.CurrentContext {
continue // already initialized this, don't want to set it to nil
}
allClusterManagers[name] = nil
}
return &kubeConfigClusterProvider{
defaultContext: rawConfig.CurrentContext,
managers: allClusterManagers,
}, nil
}
func newSingleClusterProvider(m *Manager, strategy string) (*singleClusterProvider, error) {
if strategy == config.ClusterProviderInCluster && !m.IsInCluster() {
return nil, fmt.Errorf("server must be deployed in cluster for the in-cluster ClusterProviderStrategy")
}
return &singleClusterProvider{
manager: m,
strategy: strategy,
}, nil
}
func (k *kubeConfigClusterProvider) GetTargets(ctx context.Context) ([]string, error) {
contextNames := make([]string, 0, len(k.managers))
for cluster := range k.managers {
contextNames = append(contextNames, cluster)
}
return contextNames, nil
}
func (k *kubeConfigClusterProvider) GetTargetParameterName() string {
return KubeConfigTargetParameterName
}
func (k *kubeConfigClusterProvider) GetManagerFor(ctx context.Context, context string) (*Manager, error) {
m, ok := k.managers[context]
if ok && m != nil {
return m, nil
}
baseManager := k.managers[k.defaultContext]
if baseManager.IsInCluster() {
// In cluster mode, so context switching is not applicable
return baseManager, nil
}
m, err := baseManager.newForContext(context)
if err != nil {
return nil, err
}
k.managers[context] = m
return m, nil
}
func (k *kubeConfigClusterProvider) GetDefaultTarget() string {
return k.defaultContext
}
func (k *kubeConfigClusterProvider) WatchTargets(onKubeConfigChanged func() error) {
m := k.managers[k.defaultContext]
m.WatchKubeConfig(onKubeConfigChanged)
}
func (k *kubeConfigClusterProvider) Close() {
m := k.managers[k.defaultContext]
m.Close()
}
func (s *singleClusterProvider) GetTargets(ctx context.Context) ([]string, error) {
return []string{""}, nil
}
func (s *singleClusterProvider) GetManagerFor(ctx context.Context, target string) (*Manager, error) {
if target != "" {
return nil, fmt.Errorf("unable to get manager for other context/cluster with %s strategy", s.strategy)
}
return s.manager, nil
}
func (s *singleClusterProvider) GetDefaultTarget() string {
return ""
}
func (s *singleClusterProvider) GetTargetParameterName() string {
return ""
}
func (s *singleClusterProvider) WatchTargets(watch func() error) {
s.manager.WatchKubeConfig(watch)
}
func (s *singleClusterProvider) Close() {
s.manager.Close()
return factory(m, cfg)
}
func (m *Manager) newForContext(context string) (*Manager, error) {

View File

@@ -0,0 +1,109 @@
package kubernetes
import (
"context"
"fmt"
"github.com/containers/kubernetes-mcp-server/pkg/config"
)
// KubeConfigTargetParameterName is the parameter name used to specify
// the kubeconfig context when using the kubeconfig cluster provider strategy.
const KubeConfigTargetParameterName = "context"
// kubeConfigClusterProvider implements ManagerProvider for managing multiple
// Kubernetes clusters using different contexts from a kubeconfig file.
// It lazily initializes managers for each context as they are requested.
type kubeConfigClusterProvider struct {
defaultContext string
managers map[string]*Manager
}
var _ ManagerProvider = &kubeConfigClusterProvider{}
func init() {
RegisterProvider(config.ClusterProviderKubeConfig, newKubeConfigClusterProvider)
}
// newKubeConfigClusterProvider creates a provider that manages multiple clusters
// via kubeconfig contexts. Returns an error if the manager is in-cluster mode.
func newKubeConfigClusterProvider(m *Manager, cfg *config.StaticConfig) (ManagerProvider, error) {
// Handle in-cluster mode
if m.IsInCluster() {
return nil, fmt.Errorf("kubeconfig ClusterProviderStrategy is invalid for in-cluster deployments")
}
rawConfig, err := m.clientCmdConfig.RawConfig()
if err != nil {
return nil, err
}
allClusterManagers := map[string]*Manager{
rawConfig.CurrentContext: m, // we already initialized a manager for the default context, let's use it
}
for name := range rawConfig.Contexts {
if name == rawConfig.CurrentContext {
continue // already initialized this, don't want to set it to nil
}
allClusterManagers[name] = nil
}
return &kubeConfigClusterProvider{
defaultContext: rawConfig.CurrentContext,
managers: allClusterManagers,
}, nil
}
func (k *kubeConfigClusterProvider) GetTargets(ctx context.Context) ([]string, error) {
contextNames := make([]string, 0, len(k.managers))
for cluster := range k.managers {
contextNames = append(contextNames, cluster)
}
return contextNames, nil
}
func (k *kubeConfigClusterProvider) GetTargetParameterName() string {
return KubeConfigTargetParameterName
}
func (k *kubeConfigClusterProvider) GetManagerFor(ctx context.Context, context string) (*Manager, error) {
m, ok := k.managers[context]
if ok && m != nil {
return m, nil
}
baseManager := k.managers[k.defaultContext]
if baseManager.IsInCluster() {
// In cluster mode, so context switching is not applicable
return baseManager, nil
}
m, err := baseManager.newForContext(context)
if err != nil {
return nil, err
}
k.managers[context] = m
return m, nil
}
func (k *kubeConfigClusterProvider) GetDefaultTarget() string {
return k.defaultContext
}
func (k *kubeConfigClusterProvider) WatchTargets(onKubeConfigChanged func() error) {
m := k.managers[k.defaultContext]
m.WatchKubeConfig(onKubeConfigChanged)
}
func (k *kubeConfigClusterProvider) Close() {
m := k.managers[k.defaultContext]
m.Close()
}

View File

@@ -0,0 +1,47 @@
package kubernetes
import (
"fmt"
"sort"
"github.com/containers/kubernetes-mcp-server/pkg/config"
)
// ProviderFactory creates a new ManagerProvider instance for a given strategy.
// Implementations should validate that the Manager is compatible with their strategy
// (e.g., kubeconfig provider should reject in-cluster managers).
type ProviderFactory func(m *Manager, cfg *config.StaticConfig) (ManagerProvider, error)
var providerFactories = make(map[string]ProviderFactory)
// RegisterProvider registers a provider factory for a given strategy name.
// This should be called from init() functions in provider implementation files.
// Panics if a provider is already registered for the given strategy.
func RegisterProvider(strategy string, factory ProviderFactory) {
if _, exists := providerFactories[strategy]; exists {
panic(fmt.Sprintf("provider already registered for strategy '%s'", strategy))
}
providerFactories[strategy] = factory
}
// getProviderFactory retrieves a registered provider factory by strategy name.
// Returns an error if no provider is registered for the given strategy.
func getProviderFactory(strategy string) (ProviderFactory, error) {
factory, ok := providerFactories[strategy]
if !ok {
available := GetRegisteredStrategies()
return nil, fmt.Errorf("no provider registered for strategy '%s', available strategies: %v", strategy, available)
}
return factory, nil
}
// GetRegisteredStrategies returns a sorted list of all registered strategy names.
// This is useful for error messages and debugging.
func GetRegisteredStrategies() []string {
strategies := make([]string, 0, len(providerFactories))
for strategy := range providerFactories {
strategies = append(strategies, strategy)
}
sort.Strings(strategies)
return strategies
}

View File

@@ -0,0 +1,66 @@
package kubernetes
import (
"context"
"fmt"
"github.com/containers/kubernetes-mcp-server/pkg/config"
)
// singleClusterProvider implements ManagerProvider for managing a single
// Kubernetes cluster. Used for in-cluster deployments or when multi-cluster
// support is disabled.
type singleClusterProvider struct {
strategy string
manager *Manager
}
var _ ManagerProvider = &singleClusterProvider{}
func init() {
RegisterProvider(config.ClusterProviderInCluster, newSingleClusterProvider(config.ClusterProviderInCluster))
RegisterProvider(config.ClusterProviderDisabled, newSingleClusterProvider(config.ClusterProviderDisabled))
}
// newSingleClusterProvider creates a provider that manages a single cluster.
// Validates that the manager is in-cluster when the in-cluster strategy is used.
func newSingleClusterProvider(strategy string) ProviderFactory {
return func(m *Manager, cfg *config.StaticConfig) (ManagerProvider, error) {
if strategy == config.ClusterProviderInCluster && !m.IsInCluster() {
return nil, fmt.Errorf("server must be deployed in cluster for the in-cluster ClusterProviderStrategy")
}
return &singleClusterProvider{
manager: m,
strategy: strategy,
}, nil
}
}
func (s *singleClusterProvider) GetTargets(ctx context.Context) ([]string, error) {
return []string{""}, nil
}
func (s *singleClusterProvider) GetManagerFor(ctx context.Context, target string) (*Manager, error) {
if target != "" {
return nil, fmt.Errorf("unable to get manager for other context/cluster with %s strategy", s.strategy)
}
return s.manager, nil
}
func (s *singleClusterProvider) GetDefaultTarget() string {
return ""
}
func (s *singleClusterProvider) GetTargetParameterName() string {
return ""
}
func (s *singleClusterProvider) WatchTargets(watch func() error) {
s.manager.WatchKubeConfig(watch)
}
func (s *singleClusterProvider) Close() {
s.manager.Close()
}