mirror of
https://github.com/openshift/openshift-mcp-server.git
synced 2025-10-17 14:27:48 +03:00
* feat: add cluster provider for kubeconfig Signed-off-by: Calum Murray <cmurray@redhat.com> * feat: move server to use ClusterProvider interface Signed-off-by: Calum Murray <cmurray@redhat.com> * feat: authentication middleware works with cluster provider Signed-off-by: Calum Murray <cmurray@redhat.com> * fix: unit tests work after cluster provider changes Signed-off-by: Calum Murray <cmurray@redhat.com> * feat: add tool mutator to add cluster parameter Signed-off-by: Calum Murray <cmurray@redhat.com> * test: handle cluster parameter Signed-off-by: Calum Murray <cmurray@redhat.com> * fix: handle lazy init correctly Signed-off-by: Calum Murray <cmurray@redhat.com> * refactor: move to using multi-strategy ManagerProvider Signed-off-by: Calum Murray <cmurray@redhat.com> * feat: add contexts_list tool Signed-off-by: Calum Murray <cmurray@redhat.com> * refactor: make tool mutator generic between cluster/context naming Signed-off-by: Calum Murray <cmurray@redhat.com> * feat: introduce tool filter Signed-off-by: Calum Murray <cmurray@redhat.com> * refactor: use new ManagerProvider/mutator/filter within mcp server Signed-off-by: Calum Murray <cmurray@redhat.com> * fix(test): tests expect context parameter in tool defs Signed-off-by: Calum Murray <cmurray@redhat.com> * feat: auth handles multi-cluster case correctly Signed-off-by: Calum Murray <cmurray@redhat.com> * fix: small changes from local testing Signed-off-by: Calum Murray <cmurray@redhat.com> * chore: fix enum test Signed-off-by: Calum Murray <cmurray@redhat.com> * review: Multi Cluster support (#1) * nit: rename contexts_list to configuration_contexts_list Besides the conventional naming, it helps LLMs understand the context of the tool by providing a certain level of hierarchy. Signed-off-by: Marc Nuri <marc@marcnuri.com> * fix(mcp): ToolMutator doesn't rely on magic strings Signed-off-by: Marc Nuri <marc@marcnuri.com> * refactor(api): don't expose ManagerProvider to toolsets Signed-off-by: Marc Nuri <marc@marcnuri.com> * test(mcp): configuration_contexts_list basic tests Signed-off-by: Marc Nuri <marc@marcnuri.com> * test(toolsets): revert edge-case test This test should not be touched. Signed-off-by: Marc Nuri <marc@marcnuri.com> * test(toolsets): add specific metadata tests for multi-cluster Signed-off-by: Marc Nuri <marc@marcnuri.com> * fix(mcp): ToolFilter doesn't rely on magic strings (partially) Signed-off-by: Marc Nuri <marc@marcnuri.com> * test(api): IsClusterAware and IsTargetListProvider default values Signed-off-by: Marc Nuri <marc@marcnuri.com> * test(mcp): revert unneeded changes in mcp_tools_test.go Signed-off-by: Marc Nuri <marc@marcnuri.com> --------- Signed-off-by: Marc Nuri <marc@marcnuri.com> * fix: always include configuration_contexts_list if contexts > 1 Signed-off-by: Calum Murray <cmurray@redhat.com> * feat: include server urls in configuration_contexts_list Signed-off-by: Calum Murray <cmurray@redhat.com> --------- Signed-off-by: Calum Murray <cmurray@redhat.com> Signed-off-by: Marc Nuri <marc@marcnuri.com> Co-authored-by: Marc Nuri <marc@marcnuri.com>
232 lines
5.5 KiB
Go
232 lines
5.5 KiB
Go
package kubernetes
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
|
|
"github.com/containers/kubernetes-mcp-server/pkg/config"
|
|
"k8s.io/client-go/discovery/cached/memory"
|
|
"k8s.io/client-go/dynamic"
|
|
"k8s.io/client-go/rest"
|
|
"k8s.io/client-go/restmapper"
|
|
"k8s.io/client-go/tools/clientcmd"
|
|
)
|
|
|
|
const (
|
|
KubeConfigTargetParameterName = "context"
|
|
)
|
|
|
|
type ManagerProvider interface {
|
|
GetTargets(ctx context.Context) ([]string, error)
|
|
GetManagerFor(ctx context.Context, target string) (*Manager, error)
|
|
GetDefaultTarget() string
|
|
GetTargetParameterName() string
|
|
WatchTargets(func() error)
|
|
Close()
|
|
}
|
|
|
|
type kubeConfigClusterProvider struct {
|
|
defaultContext string
|
|
managers map[string]*Manager
|
|
}
|
|
|
|
var _ ManagerProvider = &kubeConfigClusterProvider{}
|
|
|
|
type inClusterProvider struct {
|
|
manager *Manager
|
|
}
|
|
|
|
var _ ManagerProvider = &inClusterProvider{}
|
|
|
|
func NewManagerProvider(cfg *config.StaticConfig) (ManagerProvider, error) {
|
|
m, err := NewManager(cfg)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
switch resolveStrategy(cfg, m) {
|
|
case config.ClusterProviderKubeConfig:
|
|
return newKubeConfigClusterProvider(m)
|
|
case config.ClusterProviderInCluster:
|
|
return newInClusterProvider(m)
|
|
default:
|
|
return nil, fmt.Errorf("invalid ClusterProviderStrategy '%s', must be 'kubeconfig' or 'in-cluster'", cfg.ClusterProviderStrategy)
|
|
}
|
|
}
|
|
|
|
func newKubeConfigClusterProvider(m *Manager) (*kubeConfigClusterProvider, error) {
|
|
// Handle in-cluster mode
|
|
if m.IsInCluster() {
|
|
return nil, fmt.Errorf("kubeconfig ClusterProviderStrategy is invalid for in-cluster deployments")
|
|
}
|
|
|
|
rawConfig, err := m.clientCmdConfig.RawConfig()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
allClusterManagers := map[string]*Manager{
|
|
rawConfig.CurrentContext: m, // we already initialized a manager for the default context, let's use it
|
|
}
|
|
|
|
for name := range rawConfig.Contexts {
|
|
if name == rawConfig.CurrentContext {
|
|
continue // already initialized this, don't want to set it to nil
|
|
}
|
|
|
|
allClusterManagers[name] = nil
|
|
}
|
|
|
|
return &kubeConfigClusterProvider{
|
|
defaultContext: rawConfig.CurrentContext,
|
|
managers: allClusterManagers,
|
|
}, nil
|
|
}
|
|
|
|
func newInClusterProvider(m *Manager) (*inClusterProvider, error) {
|
|
return &inClusterProvider{
|
|
manager: m,
|
|
}, nil
|
|
}
|
|
|
|
func (k *kubeConfigClusterProvider) GetTargets(ctx context.Context) ([]string, error) {
|
|
contextNames := make([]string, 0, len(k.managers))
|
|
for cluster := range k.managers {
|
|
contextNames = append(contextNames, cluster)
|
|
}
|
|
|
|
return contextNames, nil
|
|
}
|
|
|
|
func (k *kubeConfigClusterProvider) GetTargetParameterName() string {
|
|
return KubeConfigTargetParameterName
|
|
}
|
|
|
|
func (k *kubeConfigClusterProvider) GetManagerFor(ctx context.Context, context string) (*Manager, error) {
|
|
m, ok := k.managers[context]
|
|
if ok && m != nil {
|
|
return m, nil
|
|
}
|
|
|
|
baseManager := k.managers[k.defaultContext]
|
|
|
|
if baseManager.IsInCluster() {
|
|
// In cluster mode, so context switching is not applicable
|
|
return baseManager, nil
|
|
}
|
|
|
|
m, err := baseManager.newForContext(context)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
k.managers[context] = m
|
|
|
|
return m, nil
|
|
}
|
|
|
|
func (k *kubeConfigClusterProvider) GetDefaultTarget() string {
|
|
return k.defaultContext
|
|
}
|
|
|
|
func (k *kubeConfigClusterProvider) WatchTargets(onKubeConfigChanged func() error) {
|
|
m := k.managers[k.defaultContext]
|
|
|
|
m.WatchKubeConfig(onKubeConfigChanged)
|
|
}
|
|
|
|
func (k *kubeConfigClusterProvider) Close() {
|
|
m := k.managers[k.defaultContext]
|
|
|
|
m.Close()
|
|
}
|
|
|
|
func (i *inClusterProvider) GetTargets(ctx context.Context) ([]string, error) {
|
|
return []string{""}, nil
|
|
}
|
|
|
|
func (i *inClusterProvider) GetManagerFor(ctx context.Context, target string) (*Manager, error) {
|
|
if target != "" {
|
|
return nil, fmt.Errorf("unable to get manager for other context/cluster with in-cluster strategy")
|
|
}
|
|
|
|
return i.manager, nil
|
|
}
|
|
|
|
func (i *inClusterProvider) GetDefaultTarget() string {
|
|
return ""
|
|
}
|
|
|
|
func (i *inClusterProvider) GetTargetParameterName() string {
|
|
return ""
|
|
}
|
|
|
|
func (i *inClusterProvider) WatchTargets(watch func() error) {
|
|
i.manager.WatchKubeConfig(watch)
|
|
}
|
|
|
|
func (i *inClusterProvider) Close() {
|
|
i.manager.Close()
|
|
}
|
|
|
|
func (m *Manager) newForContext(context string) (*Manager, error) {
|
|
pathOptions := clientcmd.NewDefaultPathOptions()
|
|
if m.staticConfig.KubeConfig != "" {
|
|
pathOptions.LoadingRules.ExplicitPath = m.staticConfig.KubeConfig
|
|
}
|
|
|
|
clientCmdConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
|
|
pathOptions.LoadingRules,
|
|
&clientcmd.ConfigOverrides{
|
|
CurrentContext: context,
|
|
},
|
|
)
|
|
|
|
cfg, err := clientCmdConfig.ClientConfig()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
if cfg.UserAgent == "" {
|
|
cfg.UserAgent = rest.DefaultKubernetesUserAgent()
|
|
}
|
|
|
|
manager := &Manager{
|
|
cfg: cfg,
|
|
clientCmdConfig: clientCmdConfig,
|
|
staticConfig: m.staticConfig,
|
|
}
|
|
|
|
// Initialize clients for new manager
|
|
manager.accessControlClientSet, err = NewAccessControlClientset(manager.cfg, manager.staticConfig)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
manager.discoveryClient = memory.NewMemCacheClient(manager.accessControlClientSet.DiscoveryClient())
|
|
|
|
manager.accessControlRESTMapper = NewAccessControlRESTMapper(
|
|
restmapper.NewDeferredDiscoveryRESTMapper(manager.discoveryClient),
|
|
manager.staticConfig,
|
|
)
|
|
|
|
manager.dynamicClient, err = dynamic.NewForConfig(manager.cfg)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return manager, nil
|
|
}
|
|
|
|
func resolveStrategy(cfg *config.StaticConfig, m *Manager) string {
|
|
if cfg.ClusterProviderStrategy != "" {
|
|
return cfg.ClusterProviderStrategy
|
|
}
|
|
|
|
if m.IsInCluster() {
|
|
return config.ClusterProviderInCluster
|
|
}
|
|
|
|
return config.ClusterProviderKubeConfig
|
|
}
|