mirror of
https://github.com/containers/kubernetes-mcp-server.git
synced 2025-10-23 01:22:57 +03:00
refactor(kubernetes): Provider implementations deal with Manager instantiations (#379)
* refactor(kubernetes): Provider implementations deal with Manager instantiations Removed `*Manager` parameter from `ProviderFactory`. Provider implementations should deal with the appropriate (base) Manager instantiation if needed at all. Manager creation function divided into two explicit functions: - NewKubeconfigManager: to be used when using KubeConfig files - NewInClusterManager: to be used inside a cluster New functions contain validations to ensure they are used in the expected places. This ensures that the right manager is used by the provider implementation. Fake kubeconfig for in-cluster Manager is now generated when the Manager is created. This kubeconfig has the "magic" strings (inClusterKubeConfigDefaultContext) that are used by the MCP server and tool mutators. Signed-off-by: Marc Nuri <marc@marcnuri.com> * review: Provider implementation refactor Signed-off-by: Marc Nuri <marc@marcnuri.com> --------- Signed-off-by: Marc Nuri <marc@marcnuri.com>
This commit is contained in:
@@ -38,9 +38,6 @@ func (k *Kubernetes) NamespaceOrDefault(namespace string) string {
|
||||
// ConfigurationContextsDefault returns the current context name
|
||||
// TODO: Should be moved to the Provider level ?
|
||||
func (k *Kubernetes) ConfigurationContextsDefault() (string, error) {
|
||||
if k.manager.inCluster {
|
||||
return inClusterKubeConfigDefaultContext, nil
|
||||
}
|
||||
cfg, err := k.manager.clientCmdConfig.RawConfig()
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -51,9 +48,6 @@ func (k *Kubernetes) ConfigurationContextsDefault() (string, error) {
|
||||
// ConfigurationContextsList returns the list of available context names
|
||||
// TODO: Should be moved to the Provider level ?
|
||||
func (k *Kubernetes) ConfigurationContextsList() (map[string]string, error) {
|
||||
if k.manager.inCluster {
|
||||
return map[string]string{inClusterKubeConfigDefaultContext: ""}, nil
|
||||
}
|
||||
cfg, err := k.manager.clientCmdConfig.RawConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -77,21 +71,7 @@ func (k *Kubernetes) ConfigurationContextsList() (map[string]string, error) {
|
||||
func (k *Kubernetes) ConfigurationView(minify bool) (runtime.Object, error) {
|
||||
var cfg clientcmdapi.Config
|
||||
var err error
|
||||
if k.manager.inCluster {
|
||||
cfg = *clientcmdapi.NewConfig()
|
||||
cfg.Clusters["cluster"] = &clientcmdapi.Cluster{
|
||||
Server: k.manager.cfg.Host,
|
||||
InsecureSkipTLSVerify: k.manager.cfg.Insecure,
|
||||
}
|
||||
cfg.AuthInfos["user"] = &clientcmdapi.AuthInfo{
|
||||
Token: k.manager.cfg.BearerToken,
|
||||
}
|
||||
cfg.Contexts[inClusterKubeConfigDefaultContext] = &clientcmdapi.Context{
|
||||
Cluster: "cluster",
|
||||
AuthInfo: "user",
|
||||
}
|
||||
cfg.CurrentContext = inClusterKubeConfigDefaultContext
|
||||
} else if cfg, err = k.manager.clientCmdConfig.RawConfig(); err != nil {
|
||||
if cfg, err = k.manager.clientCmdConfig.RawConfig(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if minify {
|
||||
|
||||
@@ -47,7 +47,7 @@ users:
|
||||
kubeconfig = "` + strings.ReplaceAll(kubeconfigPath, `\`, `\\`) + `"
|
||||
`)))
|
||||
s.Run("without authorization header returns original manager", func() {
|
||||
testManager, err := NewManager(testStaticConfig, "")
|
||||
testManager, err := NewKubeconfigManager(testStaticConfig, "")
|
||||
s.Require().NoErrorf(err, "failed to create test manager: %v", err)
|
||||
s.T().Cleanup(testManager.Close)
|
||||
|
||||
@@ -58,7 +58,7 @@ users:
|
||||
})
|
||||
|
||||
s.Run("with invalid authorization header returns original manager", func() {
|
||||
testManager, err := NewManager(testStaticConfig, "")
|
||||
testManager, err := NewKubeconfigManager(testStaticConfig, "")
|
||||
s.Require().NoErrorf(err, "failed to create test manager: %v", err)
|
||||
s.T().Cleanup(testManager.Close)
|
||||
|
||||
@@ -70,7 +70,7 @@ users:
|
||||
})
|
||||
|
||||
s.Run("with valid bearer token creates derived manager with correct configuration", func() {
|
||||
testManager, err := NewManager(testStaticConfig, "")
|
||||
testManager, err := NewKubeconfigManager(testStaticConfig, "")
|
||||
s.Require().NoErrorf(err, "failed to create test manager: %v", err)
|
||||
s.T().Cleanup(testManager.Close)
|
||||
|
||||
@@ -138,7 +138,7 @@ users:
|
||||
`)))
|
||||
|
||||
s.Run("with no authorization header returns oauth token required error", func() {
|
||||
testManager, err := NewManager(testStaticConfig, "")
|
||||
testManager, err := NewKubeconfigManager(testStaticConfig, "")
|
||||
s.Require().NoErrorf(err, "failed to create test manager: %v", err)
|
||||
s.T().Cleanup(testManager.Close)
|
||||
|
||||
@@ -149,7 +149,7 @@ users:
|
||||
})
|
||||
|
||||
s.Run("with invalid authorization header returns oauth token required error", func() {
|
||||
testManager, err := NewManager(testStaticConfig, "")
|
||||
testManager, err := NewKubeconfigManager(testStaticConfig, "")
|
||||
s.Require().NoErrorf(err, "failed to create test manager: %v", err)
|
||||
s.T().Cleanup(testManager.Close)
|
||||
|
||||
@@ -161,7 +161,7 @@ users:
|
||||
})
|
||||
|
||||
s.Run("with valid bearer token creates derived manager", func() {
|
||||
testManager, err := NewManager(testStaticConfig, "")
|
||||
testManager, err := NewKubeconfigManager(testStaticConfig, "")
|
||||
s.Require().NoErrorf(err, "failed to create test manager: %v", err)
|
||||
s.T().Cleanup(testManager.Close)
|
||||
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
type Manager struct {
|
||||
cfg *rest.Config
|
||||
clientCmdConfig clientcmd.ClientConfig
|
||||
inCluster bool
|
||||
discoveryClient discovery.CachedDiscoveryInterface
|
||||
accessControlClientSet *AccessControlClientset
|
||||
accessControlRESTMapper *AccessControlRESTMapper
|
||||
@@ -38,33 +37,77 @@ type Manager struct {
|
||||
var _ helm.Kubernetes = (*Manager)(nil)
|
||||
var _ Openshift = (*Manager)(nil)
|
||||
|
||||
func NewManager(config *config.StaticConfig, kubeconfigContext string) (*Manager, error) {
|
||||
k8s := &Manager{
|
||||
staticConfig: config,
|
||||
var (
|
||||
ErrorKubeconfigInClusterNotAllowed = errors.New("kubeconfig manager cannot be used in in-cluster deployments")
|
||||
ErrorInClusterNotInCluster = errors.New("in-cluster manager cannot be used outside of a cluster")
|
||||
)
|
||||
|
||||
func NewKubeconfigManager(config *config.StaticConfig, kubeconfigContext string) (*Manager, error) {
|
||||
if IsInCluster(config) {
|
||||
return nil, ErrorKubeconfigInClusterNotAllowed
|
||||
}
|
||||
|
||||
pathOptions := clientcmd.NewDefaultPathOptions()
|
||||
if k8s.staticConfig.KubeConfig != "" {
|
||||
pathOptions.LoadingRules.ExplicitPath = k8s.staticConfig.KubeConfig
|
||||
if config.KubeConfig != "" {
|
||||
pathOptions.LoadingRules.ExplicitPath = config.KubeConfig
|
||||
}
|
||||
k8s.clientCmdConfig = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
|
||||
clientCmdConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
|
||||
pathOptions.LoadingRules,
|
||||
&clientcmd.ConfigOverrides{
|
||||
ClusterInfo: clientcmdapi.Cluster{Server: ""},
|
||||
CurrentContext: kubeconfigContext,
|
||||
})
|
||||
var err error
|
||||
if IsInCluster(k8s.staticConfig) {
|
||||
k8s.cfg, err = InClusterConfig()
|
||||
k8s.inCluster = true
|
||||
} else {
|
||||
k8s.cfg, err = k8s.clientCmdConfig.ClientConfig()
|
||||
|
||||
restConfig, err := clientCmdConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create kubernetes rest config from kubeconfig: %v", err)
|
||||
}
|
||||
if err != nil || k8s.cfg == nil {
|
||||
return nil, fmt.Errorf("failed to create kubernetes rest config: %v", err)
|
||||
|
||||
return newManager(config, restConfig, clientCmdConfig)
|
||||
}
|
||||
|
||||
func NewInClusterManager(config *config.StaticConfig) (*Manager, error) {
|
||||
if config.KubeConfig != "" {
|
||||
return nil, fmt.Errorf("kubeconfig file %s cannot be used with the in-cluster deployments: %v", config.KubeConfig, ErrorKubeconfigInClusterNotAllowed)
|
||||
}
|
||||
|
||||
if !IsInCluster(config) {
|
||||
return nil, ErrorInClusterNotInCluster
|
||||
}
|
||||
|
||||
restConfig, err := InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create in-cluster kubernetes rest config: %v", err)
|
||||
}
|
||||
|
||||
// Create a dummy kubeconfig clientcmdapi.Config for in-cluster config to be used in places where clientcmd.ClientConfig is required
|
||||
clientCmdConfig := clientcmdapi.NewConfig()
|
||||
clientCmdConfig.Clusters["cluster"] = &clientcmdapi.Cluster{
|
||||
Server: restConfig.Host,
|
||||
InsecureSkipTLSVerify: restConfig.Insecure,
|
||||
}
|
||||
clientCmdConfig.AuthInfos["user"] = &clientcmdapi.AuthInfo{
|
||||
Token: restConfig.BearerToken,
|
||||
}
|
||||
clientCmdConfig.Contexts[inClusterKubeConfigDefaultContext] = &clientcmdapi.Context{
|
||||
Cluster: "cluster",
|
||||
AuthInfo: "user",
|
||||
}
|
||||
clientCmdConfig.CurrentContext = inClusterKubeConfigDefaultContext
|
||||
|
||||
return newManager(config, restConfig, clientcmd.NewDefaultClientConfig(*clientCmdConfig, nil))
|
||||
}
|
||||
|
||||
func newManager(config *config.StaticConfig, restConfig *rest.Config, clientCmdConfig clientcmd.ClientConfig) (*Manager, error) {
|
||||
k8s := &Manager{
|
||||
staticConfig: config,
|
||||
cfg: restConfig,
|
||||
clientCmdConfig: clientCmdConfig,
|
||||
}
|
||||
if k8s.cfg.UserAgent == "" {
|
||||
k8s.cfg.UserAgent = rest.DefaultKubernetesUserAgent()
|
||||
}
|
||||
var err error
|
||||
// TODO: Won't work because not all client-go clients use the shared context (e.g. discovery client uses context.TODO())
|
||||
//k8s.cfg.Wrap(func(original http.RoundTripper) http.RoundTripper {
|
||||
// return &impersonateRoundTripper{original}
|
||||
@@ -229,7 +272,6 @@ func (m *Manager) Derived(ctx context.Context) (*Kubernetes, error) {
|
||||
derived := &Kubernetes{
|
||||
manager: &Manager{
|
||||
clientCmdConfig: clientcmd.NewDefaultClientConfig(clientCmdApiConfig, nil),
|
||||
inCluster: m.inCluster,
|
||||
cfg: derivedCfg,
|
||||
staticConfig: m.staticConfig,
|
||||
},
|
||||
|
||||
@@ -34,126 +34,165 @@ func (s *ManagerTestSuite) TearDownTest() {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TestNewManagerInCluster() {
|
||||
InClusterConfig = func() (*rest.Config, error) {
|
||||
return &rest.Config{}, nil
|
||||
}
|
||||
s.Run("with default StaticConfig (empty kubeconfig)", func() {
|
||||
manager, err := NewManager(&config.StaticConfig{}, "")
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(manager)
|
||||
s.Run("behaves as in cluster", func() {
|
||||
s.True(manager.inCluster, "expected in cluster, got not in cluster")
|
||||
func (s *ManagerTestSuite) TestNewInClusterManager() {
|
||||
s.Run("In cluster", func() {
|
||||
InClusterConfig = func() (*rest.Config, error) {
|
||||
return &rest.Config{}, nil
|
||||
}
|
||||
s.Run("with default StaticConfig (empty kubeconfig)", func() {
|
||||
manager, err := NewInClusterManager(&config.StaticConfig{})
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(manager)
|
||||
s.Run("behaves as in cluster", func() {
|
||||
rawConfig, err := manager.clientCmdConfig.RawConfig()
|
||||
s.Require().NoError(err)
|
||||
s.Equal("in-cluster", rawConfig.CurrentContext, "expected current context to be 'in-cluster'")
|
||||
})
|
||||
s.Run("sets default user-agent", func() {
|
||||
s.Contains(manager.cfg.UserAgent, "("+runtime.GOOS+"/"+runtime.GOARCH+")")
|
||||
})
|
||||
})
|
||||
s.Run("sets default user-agent", func() {
|
||||
s.Contains(manager.cfg.UserAgent, "("+runtime.GOOS+"/"+runtime.GOARCH+")")
|
||||
s.Run("with explicit kubeconfig", func() {
|
||||
manager, err := NewInClusterManager(&config.StaticConfig{
|
||||
KubeConfig: s.mockServer.KubeconfigFile(s.T()),
|
||||
})
|
||||
s.Run("returns error", func() {
|
||||
s.Error(err)
|
||||
s.Nil(manager)
|
||||
s.Regexp("kubeconfig file .+ cannot be used with the in-cluster deployments", err.Error())
|
||||
})
|
||||
})
|
||||
})
|
||||
s.Run("with explicit kubeconfig", func() {
|
||||
manager, err := NewManager(&config.StaticConfig{
|
||||
KubeConfig: s.mockServer.KubeconfigFile(s.T()),
|
||||
}, "")
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(manager)
|
||||
s.Run("behaves as NOT in cluster", func() {
|
||||
s.False(manager.inCluster, "expected not in cluster, got in cluster")
|
||||
s.Run("Out of cluster", func() {
|
||||
InClusterConfig = func() (*rest.Config, error) {
|
||||
return nil, rest.ErrNotInCluster
|
||||
}
|
||||
manager, err := NewInClusterManager(&config.StaticConfig{})
|
||||
s.Run("returns error", func() {
|
||||
s.Error(err)
|
||||
s.Nil(manager)
|
||||
s.ErrorIs(err, ErrorInClusterNotInCluster)
|
||||
s.ErrorContains(err, "in-cluster manager cannot be used outside of a cluster")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TestNewManagerLocal() {
|
||||
InClusterConfig = func() (*rest.Config, error) {
|
||||
return nil, rest.ErrNotInCluster
|
||||
}
|
||||
s.Run("with valid kubeconfig in env", func() {
|
||||
kubeconfig := s.mockServer.KubeconfigFile(s.T())
|
||||
s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfig))
|
||||
manager, err := NewManager(&config.StaticConfig{}, "")
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(manager)
|
||||
s.Run("behaves as NOT in cluster", func() {
|
||||
s.False(manager.inCluster, "expected not in cluster, got in cluster")
|
||||
func (s *ManagerTestSuite) TestNewKubeconfigManager() {
|
||||
s.Run("Out of cluster", func() {
|
||||
InClusterConfig = func() (*rest.Config, error) {
|
||||
return nil, rest.ErrNotInCluster
|
||||
}
|
||||
s.Run("with valid kubeconfig in env", func() {
|
||||
kubeconfig := s.mockServer.KubeconfigFile(s.T())
|
||||
s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfig))
|
||||
manager, err := NewKubeconfigManager(&config.StaticConfig{}, "")
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(manager)
|
||||
s.Run("behaves as NOT in cluster", func() {
|
||||
rawConfig, err := manager.clientCmdConfig.RawConfig()
|
||||
s.Require().NoError(err)
|
||||
s.NotEqual("in-cluster", rawConfig.CurrentContext, "expected current context to NOT be 'in-cluster'")
|
||||
s.Equal("fake-context", rawConfig.CurrentContext, "expected current context to be 'fake-context' as in kubeconfig")
|
||||
})
|
||||
s.Run("loads correct config", func() {
|
||||
s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfig, "expected kubeconfig path to match")
|
||||
})
|
||||
s.Run("sets default user-agent", func() {
|
||||
s.Contains(manager.cfg.UserAgent, "("+runtime.GOOS+"/"+runtime.GOARCH+")")
|
||||
})
|
||||
s.Run("rest config host points to mock server", func() {
|
||||
s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server")
|
||||
})
|
||||
})
|
||||
s.Run("loads correct config", func() {
|
||||
s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfig, "expected kubeconfig path to match")
|
||||
s.Run("with valid kubeconfig in env and explicit kubeconfig in config", func() {
|
||||
kubeconfigInEnv := s.mockServer.KubeconfigFile(s.T())
|
||||
s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigInEnv))
|
||||
kubeconfigExplicit := s.mockServer.KubeconfigFile(s.T())
|
||||
manager, err := NewKubeconfigManager(&config.StaticConfig{
|
||||
KubeConfig: kubeconfigExplicit,
|
||||
}, "")
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(manager)
|
||||
s.Run("behaves as NOT in cluster", func() {
|
||||
rawConfig, err := manager.clientCmdConfig.RawConfig()
|
||||
s.Require().NoError(err)
|
||||
s.NotEqual("in-cluster", rawConfig.CurrentContext, "expected current context to NOT be 'in-cluster'")
|
||||
s.Equal("fake-context", rawConfig.CurrentContext, "expected current context to be 'fake-context' as in kubeconfig")
|
||||
})
|
||||
s.Run("loads correct config (explicit)", func() {
|
||||
s.NotContains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfigInEnv, "expected kubeconfig path to NOT match env")
|
||||
s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfigExplicit, "expected kubeconfig path to match explicit")
|
||||
})
|
||||
s.Run("rest config host points to mock server", func() {
|
||||
s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server")
|
||||
})
|
||||
})
|
||||
s.Run("sets default user-agent", func() {
|
||||
s.Contains(manager.cfg.UserAgent, "("+runtime.GOOS+"/"+runtime.GOARCH+")")
|
||||
s.Run("with valid kubeconfig in env and explicit kubeconfig context (valid)", func() {
|
||||
kubeconfig := s.mockServer.Kubeconfig()
|
||||
kubeconfig.Contexts["not-the-mock-server"] = clientcmdapi.NewContext()
|
||||
kubeconfig.Contexts["not-the-mock-server"].Cluster = "not-the-mock-server"
|
||||
kubeconfig.Clusters["not-the-mock-server"] = clientcmdapi.NewCluster()
|
||||
kubeconfig.Clusters["not-the-mock-server"].Server = "https://not-the-mock-server:6443" // REST configuration should point to mock server, not this
|
||||
kubeconfig.CurrentContext = "not-the-mock-server"
|
||||
kubeconfigFile := test.KubeconfigFile(s.T(), kubeconfig)
|
||||
s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigFile))
|
||||
manager, err := NewKubeconfigManager(&config.StaticConfig{}, "fake-context") // fake-context is the one mock-server serves
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(manager)
|
||||
s.Run("behaves as NOT in cluster", func() {
|
||||
rawConfig, err := manager.clientCmdConfig.RawConfig()
|
||||
s.Require().NoError(err)
|
||||
s.NotEqual("in-cluster", rawConfig.CurrentContext, "expected current context to NOT be 'in-cluster'")
|
||||
s.Equal("not-the-mock-server", rawConfig.CurrentContext, "expected current context to be 'not-the-mock-server' as in explicit context")
|
||||
})
|
||||
s.Run("loads correct config", func() {
|
||||
s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfigFile, "expected kubeconfig path to match")
|
||||
})
|
||||
s.Run("rest config host points to mock server", func() {
|
||||
s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server")
|
||||
})
|
||||
})
|
||||
s.Run("rest config host points to mock server", func() {
|
||||
s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server")
|
||||
s.Run("with valid kubeconfig in env and explicit kubeconfig context (invalid)", func() {
|
||||
kubeconfigInEnv := s.mockServer.KubeconfigFile(s.T())
|
||||
s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigInEnv))
|
||||
manager, err := NewKubeconfigManager(&config.StaticConfig{}, "i-do-not-exist")
|
||||
s.Run("returns error", func() {
|
||||
s.Error(err)
|
||||
s.Nil(manager)
|
||||
s.ErrorContains(err, `failed to create kubernetes rest config from kubeconfig: context "i-do-not-exist" does not exist`)
|
||||
})
|
||||
})
|
||||
s.Run("with invalid path kubeconfig in env", func() {
|
||||
s.Require().NoError(os.Setenv("KUBECONFIG", "i-dont-exist"))
|
||||
manager, err := NewKubeconfigManager(&config.StaticConfig{}, "")
|
||||
s.Run("returns error", func() {
|
||||
s.Error(err)
|
||||
s.Nil(manager)
|
||||
s.ErrorContains(err, "failed to create kubernetes rest config")
|
||||
})
|
||||
})
|
||||
s.Run("with empty kubeconfig in env", func() {
|
||||
kubeconfigPath := filepath.Join(s.T().TempDir(), "config")
|
||||
s.Require().NoError(os.WriteFile(kubeconfigPath, []byte(""), 0644))
|
||||
s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigPath))
|
||||
manager, err := NewKubeconfigManager(&config.StaticConfig{}, "")
|
||||
s.Run("returns error", func() {
|
||||
s.Error(err)
|
||||
s.Nil(manager)
|
||||
s.ErrorContains(err, "no configuration has been provided")
|
||||
})
|
||||
})
|
||||
})
|
||||
s.Run("with valid kubeconfig in env and explicit kubeconfig in config", func() {
|
||||
kubeconfigInEnv := s.mockServer.KubeconfigFile(s.T())
|
||||
s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigInEnv))
|
||||
kubeconfigExplicit := s.mockServer.KubeconfigFile(s.T())
|
||||
manager, err := NewManager(&config.StaticConfig{
|
||||
KubeConfig: kubeconfigExplicit,
|
||||
}, "")
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(manager)
|
||||
s.Run("behaves as NOT in cluster", func() {
|
||||
s.False(manager.inCluster, "expected not in cluster, got in cluster")
|
||||
})
|
||||
s.Run("loads correct config (explicit)", func() {
|
||||
s.NotContains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfigInEnv, "expected kubeconfig path to NOT match env")
|
||||
s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfigExplicit, "expected kubeconfig path to match explicit")
|
||||
})
|
||||
s.Run("rest config host points to mock server", func() {
|
||||
s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server")
|
||||
})
|
||||
})
|
||||
s.Run("with valid kubeconfig in env and explicit kubeconfig context (valid)", func() {
|
||||
kubeconfig := s.mockServer.Kubeconfig()
|
||||
kubeconfig.Contexts["not-the-mock-server"] = clientcmdapi.NewContext()
|
||||
kubeconfig.Contexts["not-the-mock-server"].Cluster = "not-the-mock-server"
|
||||
kubeconfig.Clusters["not-the-mock-server"] = clientcmdapi.NewCluster()
|
||||
kubeconfig.Clusters["not-the-mock-server"].Server = "https://not-the-mock-server:6443" // REST configuration should point to mock server, not this
|
||||
kubeconfig.CurrentContext = "not-the-mock-server"
|
||||
kubeconfigFile := test.KubeconfigFile(s.T(), kubeconfig)
|
||||
s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigFile))
|
||||
manager, err := NewManager(&config.StaticConfig{}, "fake-context") // fake-context is the one mock-server serves
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(manager)
|
||||
s.Run("behaves as NOT in cluster", func() {
|
||||
s.False(manager.inCluster, "expected not in cluster, got in cluster")
|
||||
})
|
||||
s.Run("loads correct config", func() {
|
||||
s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfigFile, "expected kubeconfig path to match")
|
||||
})
|
||||
s.Run("rest config host points to mock server", func() {
|
||||
s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server")
|
||||
})
|
||||
})
|
||||
s.Run("with valid kubeconfig in env and explicit kubeconfig context (invalid)", func() {
|
||||
kubeconfigInEnv := s.mockServer.KubeconfigFile(s.T())
|
||||
s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigInEnv))
|
||||
manager, err := NewManager(&config.StaticConfig{}, "i-do-not-exist")
|
||||
s.Run("In cluster", func() {
|
||||
InClusterConfig = func() (*rest.Config, error) {
|
||||
return &rest.Config{}, nil
|
||||
}
|
||||
manager, err := NewKubeconfigManager(&config.StaticConfig{}, "")
|
||||
s.Run("returns error", func() {
|
||||
s.Error(err)
|
||||
s.Nil(manager)
|
||||
s.ErrorContains(err, `failed to create kubernetes rest config: context "i-do-not-exist" does not exist`)
|
||||
})
|
||||
})
|
||||
s.Run("with invalid path kubeconfig in env", func() {
|
||||
s.Require().NoError(os.Setenv("KUBECONFIG", "i-dont-exist"))
|
||||
manager, err := NewManager(&config.StaticConfig{}, "")
|
||||
s.Run("returns error", func() {
|
||||
s.Error(err)
|
||||
s.Nil(manager)
|
||||
s.ErrorContains(err, "failed to create kubernetes rest config")
|
||||
})
|
||||
})
|
||||
s.Run("with empty kubeconfig in env", func() {
|
||||
kubeconfigPath := filepath.Join(s.T().TempDir(), "config")
|
||||
s.Require().NoError(os.WriteFile(kubeconfigPath, []byte(""), 0644))
|
||||
s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigPath))
|
||||
manager, err := NewManager(&config.StaticConfig{}, "")
|
||||
s.Run("returns error", func() {
|
||||
s.Error(err)
|
||||
s.Nil(manager)
|
||||
s.ErrorContains(err, "no configuration has been provided")
|
||||
s.ErrorIs(err, ErrorKubeconfigInClusterNotAllowed)
|
||||
s.ErrorContains(err, "kubeconfig manager cannot be used in in-cluster deployments")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -30,12 +30,7 @@ func NewProvider(cfg *config.StaticConfig) (Provider, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m, err := NewManager(cfg, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return factory(m, cfg)
|
||||
return factory(cfg)
|
||||
}
|
||||
|
||||
func resolveStrategy(cfg *config.StaticConfig) string {
|
||||
|
||||
@@ -2,6 +2,7 @@ package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/config"
|
||||
@@ -27,11 +28,16 @@ func init() {
|
||||
}
|
||||
|
||||
// newKubeConfigClusterProvider creates a provider that manages multiple clusters
|
||||
// via kubeconfig contexts. Returns an error if the manager is in-cluster mode.
|
||||
func newKubeConfigClusterProvider(m *Manager, cfg *config.StaticConfig) (Provider, error) {
|
||||
// Handle in-cluster mode
|
||||
if IsInCluster(cfg) {
|
||||
return nil, fmt.Errorf("kubeconfig ClusterProviderStrategy is invalid for in-cluster deployments")
|
||||
// via kubeconfig contexts.
|
||||
// Internally, it leverages a KubeconfigManager for each context, initializing them
|
||||
// lazily when requested.
|
||||
func newKubeConfigClusterProvider(cfg *config.StaticConfig) (Provider, error) {
|
||||
m, err := NewKubeconfigManager(cfg, "")
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrorKubeconfigInClusterNotAllowed) {
|
||||
return nil, fmt.Errorf("kubeconfig ClusterProviderStrategy is invalid for in-cluster deployments: %v", err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rawConfig, err := m.clientCmdConfig.RawConfig()
|
||||
@@ -65,7 +71,7 @@ func (p *kubeConfigClusterProvider) managerForContext(context string) (*Manager,
|
||||
|
||||
baseManager := p.managers[p.defaultContext]
|
||||
|
||||
m, err := NewManager(baseManager.staticConfig, context)
|
||||
m, err := NewKubeconfigManager(baseManager.staticConfig, context)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
// ProviderFactory creates a new Provider instance for a given strategy.
|
||||
// Implementations should validate that the Manager is compatible with their strategy
|
||||
// (e.g., kubeconfig provider should reject in-cluster managers).
|
||||
type ProviderFactory func(m *Manager, cfg *config.StaticConfig) (Provider, error)
|
||||
type ProviderFactory func(cfg *config.StaticConfig) (Provider, error)
|
||||
|
||||
var providerFactories = make(map[string]ProviderFactory)
|
||||
|
||||
|
||||
@@ -13,18 +13,18 @@ type ProviderRegistryTestSuite struct {
|
||||
|
||||
func (s *ProviderRegistryTestSuite) TestRegisterProvider() {
|
||||
s.Run("With no pre-existing provider, registers the provider", func() {
|
||||
RegisterProvider("test-strategy", func(m *Manager, cfg *config.StaticConfig) (Provider, error) {
|
||||
RegisterProvider("test-strategy", func(cfg *config.StaticConfig) (Provider, error) {
|
||||
return nil, nil
|
||||
})
|
||||
_, exists := providerFactories["test-strategy"]
|
||||
s.True(exists, "Provider should be registered")
|
||||
})
|
||||
s.Run("With pre-existing provider, panics", func() {
|
||||
RegisterProvider("test-pre-existent", func(m *Manager, cfg *config.StaticConfig) (Provider, error) {
|
||||
RegisterProvider("test-pre-existent", func(cfg *config.StaticConfig) (Provider, error) {
|
||||
return nil, nil
|
||||
})
|
||||
s.Panics(func() {
|
||||
RegisterProvider("test-pre-existent", func(m *Manager, cfg *config.StaticConfig) (Provider, error) {
|
||||
RegisterProvider("test-pre-existent", func(cfg *config.StaticConfig) (Provider, error) {
|
||||
return nil, nil
|
||||
})
|
||||
}, "Registering a provider with an existing strategy should panic")
|
||||
@@ -39,10 +39,10 @@ func (s *ProviderRegistryTestSuite) TestGetRegisteredStrategies() {
|
||||
})
|
||||
s.Run("With multiple registered providers, returns sorted list", func() {
|
||||
providerFactories = make(map[string]ProviderFactory)
|
||||
RegisterProvider("foo-strategy", func(m *Manager, cfg *config.StaticConfig) (Provider, error) {
|
||||
RegisterProvider("foo-strategy", func(cfg *config.StaticConfig) (Provider, error) {
|
||||
return nil, nil
|
||||
})
|
||||
RegisterProvider("bar-strategy", func(m *Manager, cfg *config.StaticConfig) (Provider, error) {
|
||||
RegisterProvider("bar-strategy", func(cfg *config.StaticConfig) (Provider, error) {
|
||||
return nil, nil
|
||||
})
|
||||
strategies := GetRegisteredStrategies()
|
||||
|
||||
@@ -2,6 +2,7 @@ package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/config"
|
||||
@@ -24,14 +25,26 @@ func init() {
|
||||
}
|
||||
|
||||
// newSingleClusterProvider creates a provider that manages a single cluster.
|
||||
// Validates that the manager is in-cluster when the in-cluster strategy is used.
|
||||
// When used within a cluster or with an 'in-cluster' strategy, it uses an InClusterManager.
|
||||
// Otherwise, it uses a KubeconfigManager.
|
||||
func newSingleClusterProvider(strategy string) ProviderFactory {
|
||||
return func(m *Manager, cfg *config.StaticConfig) (Provider, error) {
|
||||
return func(cfg *config.StaticConfig) (Provider, error) {
|
||||
if cfg != nil && cfg.KubeConfig != "" && strategy == config.ClusterProviderInCluster {
|
||||
return nil, fmt.Errorf("kubeconfig file %s cannot be used with the in-cluster ClusterProviderStrategy", cfg.KubeConfig)
|
||||
}
|
||||
if strategy == config.ClusterProviderInCluster && !IsInCluster(cfg) {
|
||||
return nil, fmt.Errorf("server must be deployed in cluster for the in-cluster ClusterProviderStrategy")
|
||||
|
||||
var m *Manager
|
||||
var err error
|
||||
if strategy == config.ClusterProviderInCluster || IsInCluster(cfg) {
|
||||
m, err = NewInClusterManager(cfg)
|
||||
} else {
|
||||
m, err = NewKubeconfigManager(cfg, "")
|
||||
}
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrorInClusterNotInCluster) {
|
||||
return nil, fmt.Errorf("server must be deployed in cluster for the %s ClusterProviderStrategy: %v", strategy, err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &singleClusterProvider{
|
||||
|
||||
@@ -126,6 +126,15 @@ func (s *ProviderTestSuite) TestNewProviderLocal() {
|
||||
s.NotNil(provider, "Expected provider instance")
|
||||
s.IsType(&kubeConfigClusterProvider{}, provider, "Expected kubeConfigClusterProvider type")
|
||||
})
|
||||
s.Run("With cluster_provider_strategy=disabled, returns single-cluster provider", func() {
|
||||
cfg := test.Must(config.ReadToml([]byte(`
|
||||
cluster_provider_strategy = "disabled"
|
||||
`)))
|
||||
provider, err := NewProvider(cfg)
|
||||
s.Require().NoError(err, "Expected no error for disabled strategy")
|
||||
s.NotNil(provider, "Expected provider instance")
|
||||
s.IsType(&singleClusterProvider{}, provider, "Expected singleClusterProvider type")
|
||||
})
|
||||
s.Run("With cluster_provider_strategy=in-cluster, returns error", func() {
|
||||
cfg := test.Must(config.ReadToml([]byte(`
|
||||
cluster_provider_strategy = "in-cluster"
|
||||
@@ -145,7 +154,7 @@ func (s *ProviderTestSuite) TestNewProviderLocal() {
|
||||
s.Regexp("kubeconfig file .+ cannot be used with the in-cluster ClusterProviderStrategy", err.Error())
|
||||
s.Nilf(provider, "Expected no provider instance, got %v", provider)
|
||||
})
|
||||
s.Run("With configured cluster_provider_strategy=non-existent, returns error", func() {
|
||||
s.Run("With cluster_provider_strategy=non-existent, returns error", func() {
|
||||
cfg := test.Must(config.ReadToml([]byte(`
|
||||
cluster_provider_strategy = "i-do-not-exist"
|
||||
`)))
|
||||
|
||||
Reference in New Issue
Block a user