refactor(kubernetes): streamline provider configuration and in-cluster detection (#378)

* refactor(kubernetes): streamline provider configuration and in-cluster detection

- Removed IsInCluster method from Manager and created function scoped to the runtime environment.
  As a method, the implementation was not correct.
  Removed GetAPIServerHost method from Manager which is no used.
- **Temporarily** added an `inCluster` field to the Manager struct but should be eventually removed since it doesn't really make sense to hava a Manager in-cluster or out-of-cluster in the multi-cluster scenario.
- Provider resolution (resolveStrategy) is now clearer, added complete coverage for all scenarios.
- Added additional coverage for provider and manager.

Signed-off-by: Marc Nuri <marc@marcnuri.com>

* refactor(kubernetes): update NewManager to accept kubeconfig context and simplify manager creation

- Removes Provider.newForContext(context string) method.

Signed-off-by: Marc Nuri <marc@marcnuri.com>

---------

Signed-off-by: Marc Nuri <marc@marcnuri.com>
This commit is contained in:
Marc Nuri
2025-10-16 21:35:21 +02:00
committed by GitHub
parent b66719ed8e
commit 9da29f4505
11 changed files with 306 additions and 302 deletions

15
internal/test/env.go Normal file
View File

@@ -0,0 +1,15 @@
package test
import (
"os"
"strings"
)
func RestoreEnv(originalEnv []string) {
os.Clearenv()
for _, env := range originalEnv {
if key, value, found := strings.Cut(env, "="); found {
_ = os.Setenv(key, value)
}
}
}

View File

@@ -1,9 +1,9 @@
package kubernetes
import (
"github.com/containers/kubernetes-mcp-server/pkg/config"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/tools/clientcmd/api/latest"
)
@@ -22,29 +22,13 @@ var InClusterConfig = func() (*rest.Config, error) {
return inClusterConfig, err
}
// resolveKubernetesConfigurations resolves the required kubernetes configurations and sets them in the Kubernetes struct
func resolveKubernetesConfigurations(kubernetes *Manager) error {
// Always set clientCmdConfig
pathOptions := clientcmd.NewDefaultPathOptions()
if kubernetes.staticConfig.KubeConfig != "" {
pathOptions.LoadingRules.ExplicitPath = kubernetes.staticConfig.KubeConfig
func IsInCluster(cfg *config.StaticConfig) bool {
// Even if running in-cluster, if a kubeconfig is provided, we consider it as out-of-cluster
if cfg != nil && cfg.KubeConfig != "" {
return false
}
kubernetes.clientCmdConfig = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
pathOptions.LoadingRules,
&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: ""}})
var err error
if kubernetes.IsInCluster() {
kubernetes.cfg, err = InClusterConfig()
if err == nil && kubernetes.cfg != nil {
return nil
}
}
// Out of cluster
kubernetes.cfg, err = kubernetes.clientCmdConfig.ClientConfig()
if kubernetes.cfg != nil && kubernetes.cfg.UserAgent == "" {
kubernetes.cfg.UserAgent = rest.DefaultKubernetesUserAgent()
}
return err
restConfig, err := InClusterConfig()
return err == nil && restConfig != nil
}
func (k *Kubernetes) NamespaceOrDefault(namespace string) string {
@@ -54,7 +38,7 @@ func (k *Kubernetes) NamespaceOrDefault(namespace string) string {
// ConfigurationContextsDefault returns the current context name
// TODO: Should be moved to the Provider level ?
func (k *Kubernetes) ConfigurationContextsDefault() (string, error) {
if k.manager.IsInCluster() {
if k.manager.inCluster {
return inClusterKubeConfigDefaultContext, nil
}
cfg, err := k.manager.clientCmdConfig.RawConfig()
@@ -67,7 +51,7 @@ func (k *Kubernetes) ConfigurationContextsDefault() (string, error) {
// ConfigurationContextsList returns the list of available context names
// TODO: Should be moved to the Provider level ?
func (k *Kubernetes) ConfigurationContextsList() (map[string]string, error) {
if k.manager.IsInCluster() {
if k.manager.inCluster {
return map[string]string{inClusterKubeConfigDefaultContext: ""}, nil
}
cfg, err := k.manager.clientCmdConfig.RawConfig()
@@ -93,7 +77,7 @@ func (k *Kubernetes) ConfigurationContextsList() (map[string]string, error) {
func (k *Kubernetes) ConfigurationView(minify bool) (runtime.Object, error) {
var cfg clientcmdapi.Config
var err error
if k.manager.IsInCluster() {
if k.manager.inCluster {
cfg = *clientcmdapi.NewConfig()
cfg.Clusters["cluster"] = &clientcmdapi.Cluster{
Server: k.manager.cfg.Host,

View File

@@ -1,155 +0,0 @@
package kubernetes
import (
"errors"
"os"
"path"
"runtime"
"strings"
"testing"
"k8s.io/client-go/rest"
"github.com/containers/kubernetes-mcp-server/pkg/config"
)
func TestKubernetes_IsInCluster(t *testing.T) {
t.Run("with explicit kubeconfig", func(t *testing.T) {
m := Manager{
staticConfig: &config.StaticConfig{
KubeConfig: "kubeconfig",
},
}
if m.IsInCluster() {
t.Errorf("expected not in cluster, got in cluster")
}
})
t.Run("with empty kubeconfig and in cluster", func(t *testing.T) {
originalFunction := InClusterConfig
InClusterConfig = func() (*rest.Config, error) {
return &rest.Config{}, nil
}
defer func() {
InClusterConfig = originalFunction
}()
m := Manager{
staticConfig: &config.StaticConfig{
KubeConfig: "",
},
}
if !m.IsInCluster() {
t.Errorf("expected in cluster, got not in cluster")
}
})
t.Run("with empty kubeconfig and not in cluster (empty)", func(t *testing.T) {
originalFunction := InClusterConfig
InClusterConfig = func() (*rest.Config, error) {
return nil, nil
}
defer func() {
InClusterConfig = originalFunction
}()
m := Manager{
staticConfig: &config.StaticConfig{
KubeConfig: "",
},
}
if m.IsInCluster() {
t.Errorf("expected not in cluster, got in cluster")
}
})
t.Run("with empty kubeconfig and not in cluster (error)", func(t *testing.T) {
originalFunction := InClusterConfig
InClusterConfig = func() (*rest.Config, error) {
return nil, errors.New("error")
}
defer func() {
InClusterConfig = originalFunction
}()
m := Manager{
staticConfig: &config.StaticConfig{
KubeConfig: "",
},
}
if m.IsInCluster() {
t.Errorf("expected not in cluster, got in cluster")
}
})
}
func TestKubernetes_ResolveKubernetesConfigurations_Explicit(t *testing.T) {
t.Run("with missing file", func(t *testing.T) {
if runtime.GOOS != "linux" && runtime.GOOS != "darwin" {
t.Skip("Skipping test on non-linux platforms")
}
tempDir := t.TempDir()
m := Manager{staticConfig: &config.StaticConfig{
KubeConfig: path.Join(tempDir, "config"),
}}
err := resolveKubernetesConfigurations(&m)
if err == nil {
t.Errorf("expected error, got nil")
}
if !errors.Is(err, os.ErrNotExist) {
t.Errorf("expected file not found error, got %v", err)
}
if !strings.HasSuffix(err.Error(), ": no such file or directory") {
t.Errorf("expected file not found error, got %v", err)
}
})
t.Run("with empty file", func(t *testing.T) {
tempDir := t.TempDir()
kubeconfigPath := path.Join(tempDir, "config")
if err := os.WriteFile(kubeconfigPath, []byte(""), 0644); err != nil {
t.Fatalf("failed to create kubeconfig file: %v", err)
}
m := Manager{staticConfig: &config.StaticConfig{
KubeConfig: kubeconfigPath,
}}
err := resolveKubernetesConfigurations(&m)
if err == nil {
t.Errorf("expected error, got nil")
}
if !strings.Contains(err.Error(), "no configuration has been provided") {
t.Errorf("expected no kubeconfig error, got %v", err)
}
})
t.Run("with valid file", func(t *testing.T) {
tempDir := t.TempDir()
kubeconfigPath := path.Join(tempDir, "config")
kubeconfigContent := `
apiVersion: v1
kind: Config
clusters:
- cluster:
server: https://example.com
name: example-cluster
contexts:
- context:
cluster: example-cluster
user: example-user
name: example-context
current-context: example-context
users:
- name: example-user
user:
token: example-token
`
if err := os.WriteFile(kubeconfigPath, []byte(kubeconfigContent), 0644); err != nil {
t.Fatalf("failed to create kubeconfig file: %v", err)
}
m := Manager{staticConfig: &config.StaticConfig{
KubeConfig: kubeconfigPath,
}}
err := resolveKubernetesConfigurations(&m)
if err != nil {
t.Fatalf("expected no error, got %v", err)
}
if m.cfg == nil {
t.Errorf("expected non-nil config, got nil")
}
if m.cfg.Host != "https://example.com" {
t.Errorf("expected host https://example.com, got %s", m.cfg.Host)
}
})
}

View File

@@ -47,7 +47,7 @@ users:
kubeconfig = "` + strings.ReplaceAll(kubeconfigPath, `\`, `\\`) + `"
`)))
s.Run("without authorization header returns original manager", func() {
testManager, err := NewManager(testStaticConfig)
testManager, err := NewManager(testStaticConfig, "")
s.Require().NoErrorf(err, "failed to create test manager: %v", err)
s.T().Cleanup(testManager.Close)
@@ -58,7 +58,7 @@ users:
})
s.Run("with invalid authorization header returns original manager", func() {
testManager, err := NewManager(testStaticConfig)
testManager, err := NewManager(testStaticConfig, "")
s.Require().NoErrorf(err, "failed to create test manager: %v", err)
s.T().Cleanup(testManager.Close)
@@ -70,7 +70,7 @@ users:
})
s.Run("with valid bearer token creates derived manager with correct configuration", func() {
testManager, err := NewManager(testStaticConfig)
testManager, err := NewManager(testStaticConfig, "")
s.Require().NoErrorf(err, "failed to create test manager: %v", err)
s.T().Cleanup(testManager.Close)
@@ -138,7 +138,7 @@ users:
`)))
s.Run("with no authorization header returns oauth token required error", func() {
testManager, err := NewManager(testStaticConfig)
testManager, err := NewManager(testStaticConfig, "")
s.Require().NoErrorf(err, "failed to create test manager: %v", err)
s.T().Cleanup(testManager.Close)
@@ -149,7 +149,7 @@ users:
})
s.Run("with invalid authorization header returns oauth token required error", func() {
testManager, err := NewManager(testStaticConfig)
testManager, err := NewManager(testStaticConfig, "")
s.Require().NoErrorf(err, "failed to create test manager: %v", err)
s.T().Cleanup(testManager.Close)
@@ -161,7 +161,7 @@ users:
})
s.Run("with valid bearer token creates derived manager", func() {
testManager, err := NewManager(testStaticConfig)
testManager, err := NewManager(testStaticConfig, "")
s.Require().NoErrorf(err, "failed to create test manager: %v", err)
s.T().Cleanup(testManager.Close)

View File

@@ -25,6 +25,7 @@ import (
type Manager struct {
cfg *rest.Config
clientCmdConfig clientcmd.ClientConfig
inCluster bool
discoveryClient discovery.CachedDiscoveryInterface
accessControlClientSet *AccessControlClientset
accessControlRESTMapper *AccessControlRESTMapper
@@ -37,18 +38,37 @@ type Manager struct {
var _ helm.Kubernetes = (*Manager)(nil)
var _ Openshift = (*Manager)(nil)
func NewManager(config *config.StaticConfig) (*Manager, error) {
func NewManager(config *config.StaticConfig, kubeconfigContext string) (*Manager, error) {
k8s := &Manager{
staticConfig: config,
}
if err := resolveKubernetesConfigurations(k8s); err != nil {
return nil, err
pathOptions := clientcmd.NewDefaultPathOptions()
if k8s.staticConfig.KubeConfig != "" {
pathOptions.LoadingRules.ExplicitPath = k8s.staticConfig.KubeConfig
}
k8s.clientCmdConfig = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
pathOptions.LoadingRules,
&clientcmd.ConfigOverrides{
ClusterInfo: clientcmdapi.Cluster{Server: ""},
CurrentContext: kubeconfigContext,
})
var err error
if IsInCluster(k8s.staticConfig) {
k8s.cfg, err = InClusterConfig()
k8s.inCluster = true
} else {
k8s.cfg, err = k8s.clientCmdConfig.ClientConfig()
}
if err != nil || k8s.cfg == nil {
return nil, fmt.Errorf("failed to create kubernetes rest config: %v", err)
}
if k8s.cfg.UserAgent == "" {
k8s.cfg.UserAgent = rest.DefaultKubernetesUserAgent()
}
// TODO: Won't work because not all client-go clients use the shared context (e.g. discovery client uses context.TODO())
//k8s.cfg.Wrap(func(original http.RoundTripper) http.RoundTripper {
// return &impersonateRoundTripper{original}
//})
var err error
k8s.accessControlClientSet, err = NewAccessControlClientset(k8s.cfg, k8s.staticConfig)
if err != nil {
return nil, err
@@ -107,21 +127,6 @@ func (m *Manager) Close() {
}
}
func (m *Manager) GetAPIServerHost() string {
if m.cfg == nil {
return ""
}
return m.cfg.Host
}
func (m *Manager) IsInCluster() bool {
if m.staticConfig.KubeConfig != "" {
return false
}
cfg, err := InClusterConfig()
return err == nil && cfg != nil
}
func (m *Manager) configuredNamespace() string {
if ns, _, nsErr := m.clientCmdConfig.Namespace(); nsErr == nil {
return ns
@@ -221,11 +226,14 @@ func (m *Manager) Derived(ctx context.Context) (*Kubernetes, error) {
return &Kubernetes{manager: m}, nil
}
clientCmdApiConfig.AuthInfos = make(map[string]*clientcmdapi.AuthInfo)
derived := &Kubernetes{manager: &Manager{
clientCmdConfig: clientcmd.NewDefaultClientConfig(clientCmdApiConfig, nil),
cfg: derivedCfg,
staticConfig: m.staticConfig,
}}
derived := &Kubernetes{
manager: &Manager{
clientCmdConfig: clientcmd.NewDefaultClientConfig(clientCmdApiConfig, nil),
inCluster: m.inCluster,
cfg: derivedCfg,
staticConfig: m.staticConfig,
},
}
derived.manager.accessControlClientSet, err = NewAccessControlClientset(derived.manager.cfg, derived.manager.staticConfig)
if err != nil {
if m.staticConfig.RequireOAuth {

View File

@@ -0,0 +1,163 @@
package kubernetes
import (
"os"
"path/filepath"
"runtime"
"testing"
"github.com/containers/kubernetes-mcp-server/internal/test"
"github.com/containers/kubernetes-mcp-server/pkg/config"
"github.com/stretchr/testify/suite"
"k8s.io/client-go/rest"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
type ManagerTestSuite struct {
suite.Suite
originalEnv []string
originalInClusterConfig func() (*rest.Config, error)
mockServer *test.MockServer
}
func (s *ManagerTestSuite) SetupTest() {
s.originalEnv = os.Environ()
s.originalInClusterConfig = InClusterConfig
s.mockServer = test.NewMockServer()
}
func (s *ManagerTestSuite) TearDownTest() {
test.RestoreEnv(s.originalEnv)
InClusterConfig = s.originalInClusterConfig
if s.mockServer != nil {
s.mockServer.Close()
}
}
func (s *ManagerTestSuite) TestNewManagerInCluster() {
InClusterConfig = func() (*rest.Config, error) {
return &rest.Config{}, nil
}
s.Run("with default StaticConfig (empty kubeconfig)", func() {
manager, err := NewManager(&config.StaticConfig{}, "")
s.Require().NoError(err)
s.Require().NotNil(manager)
s.Run("behaves as in cluster", func() {
s.True(manager.inCluster, "expected in cluster, got not in cluster")
})
s.Run("sets default user-agent", func() {
s.Contains(manager.cfg.UserAgent, "("+runtime.GOOS+"/"+runtime.GOARCH+")")
})
})
s.Run("with explicit kubeconfig", func() {
manager, err := NewManager(&config.StaticConfig{
KubeConfig: s.mockServer.KubeconfigFile(s.T()),
}, "")
s.Require().NoError(err)
s.Require().NotNil(manager)
s.Run("behaves as NOT in cluster", func() {
s.False(manager.inCluster, "expected not in cluster, got in cluster")
})
})
}
func (s *ManagerTestSuite) TestNewManagerLocal() {
InClusterConfig = func() (*rest.Config, error) {
return nil, rest.ErrNotInCluster
}
s.Run("with valid kubeconfig in env", func() {
kubeconfig := s.mockServer.KubeconfigFile(s.T())
s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfig))
manager, err := NewManager(&config.StaticConfig{}, "")
s.Require().NoError(err)
s.Require().NotNil(manager)
s.Run("behaves as NOT in cluster", func() {
s.False(manager.inCluster, "expected not in cluster, got in cluster")
})
s.Run("loads correct config", func() {
s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfig, "expected kubeconfig path to match")
})
s.Run("sets default user-agent", func() {
s.Contains(manager.cfg.UserAgent, "("+runtime.GOOS+"/"+runtime.GOARCH+")")
})
s.Run("rest config host points to mock server", func() {
s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server")
})
})
s.Run("with valid kubeconfig in env and explicit kubeconfig in config", func() {
kubeconfigInEnv := s.mockServer.KubeconfigFile(s.T())
s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigInEnv))
kubeconfigExplicit := s.mockServer.KubeconfigFile(s.T())
manager, err := NewManager(&config.StaticConfig{
KubeConfig: kubeconfigExplicit,
}, "")
s.Require().NoError(err)
s.Require().NotNil(manager)
s.Run("behaves as NOT in cluster", func() {
s.False(manager.inCluster, "expected not in cluster, got in cluster")
})
s.Run("loads correct config (explicit)", func() {
s.NotContains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfigInEnv, "expected kubeconfig path to NOT match env")
s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfigExplicit, "expected kubeconfig path to match explicit")
})
s.Run("rest config host points to mock server", func() {
s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server")
})
})
s.Run("with valid kubeconfig in env and explicit kubeconfig context (valid)", func() {
kubeconfig := s.mockServer.Kubeconfig()
kubeconfig.Contexts["not-the-mock-server"] = clientcmdapi.NewContext()
kubeconfig.Contexts["not-the-mock-server"].Cluster = "not-the-mock-server"
kubeconfig.Clusters["not-the-mock-server"] = clientcmdapi.NewCluster()
kubeconfig.Clusters["not-the-mock-server"].Server = "https://not-the-mock-server:6443" // REST configuration should point to mock server, not this
kubeconfig.CurrentContext = "not-the-mock-server"
kubeconfigFile := test.KubeconfigFile(s.T(), kubeconfig)
s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigFile))
manager, err := NewManager(&config.StaticConfig{}, "fake-context") // fake-context is the one mock-server serves
s.Require().NoError(err)
s.Require().NotNil(manager)
s.Run("behaves as NOT in cluster", func() {
s.False(manager.inCluster, "expected not in cluster, got in cluster")
})
s.Run("loads correct config", func() {
s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfigFile, "expected kubeconfig path to match")
})
s.Run("rest config host points to mock server", func() {
s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server")
})
})
s.Run("with valid kubeconfig in env and explicit kubeconfig context (invalid)", func() {
kubeconfigInEnv := s.mockServer.KubeconfigFile(s.T())
s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigInEnv))
manager, err := NewManager(&config.StaticConfig{}, "i-do-not-exist")
s.Run("returns error", func() {
s.Error(err)
s.Nil(manager)
s.ErrorContains(err, `failed to create kubernetes rest config: context "i-do-not-exist" does not exist`)
})
})
s.Run("with invalid path kubeconfig in env", func() {
s.Require().NoError(os.Setenv("KUBECONFIG", "i-dont-exist"))
manager, err := NewManager(&config.StaticConfig{}, "")
s.Run("returns error", func() {
s.Error(err)
s.Nil(manager)
s.ErrorContains(err, "failed to create kubernetes rest config")
})
})
s.Run("with empty kubeconfig in env", func() {
kubeconfigPath := filepath.Join(s.T().TempDir(), "config")
s.Require().NoError(os.WriteFile(kubeconfigPath, []byte(""), 0644))
s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigPath))
manager, err := NewManager(&config.StaticConfig{}, "")
s.Run("returns error", func() {
s.Error(err)
s.Nil(manager)
s.ErrorContains(err, "no configuration has been provided")
})
})
}
func TestManager(t *testing.T) {
suite.Run(t, new(ManagerTestSuite))
}

View File

@@ -4,11 +4,6 @@ import (
"context"
"github.com/containers/kubernetes-mcp-server/pkg/config"
"k8s.io/client-go/discovery/cached/memory"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
"k8s.io/client-go/tools/clientcmd"
)
type Provider interface {
@@ -28,14 +23,14 @@ type Provider interface {
}
func NewProvider(cfg *config.StaticConfig) (Provider, error) {
m, err := NewManager(cfg)
strategy := resolveStrategy(cfg)
factory, err := getProviderFactory(strategy)
if err != nil {
return nil, err
}
strategy := resolveStrategy(cfg, m)
factory, err := getProviderFactory(strategy)
m, err := NewManager(cfg, "")
if err != nil {
return nil, err
}
@@ -43,61 +38,16 @@ func NewProvider(cfg *config.StaticConfig) (Provider, error) {
return factory(m, cfg)
}
func (m *Manager) newForContext(context string) (*Manager, error) {
pathOptions := clientcmd.NewDefaultPathOptions()
if m.staticConfig.KubeConfig != "" {
pathOptions.LoadingRules.ExplicitPath = m.staticConfig.KubeConfig
}
clientCmdConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
pathOptions.LoadingRules,
&clientcmd.ConfigOverrides{
CurrentContext: context,
},
)
cfg, err := clientCmdConfig.ClientConfig()
if err != nil {
return nil, err
}
if cfg.UserAgent == "" {
cfg.UserAgent = rest.DefaultKubernetesUserAgent()
}
manager := &Manager{
cfg: cfg,
clientCmdConfig: clientCmdConfig,
staticConfig: m.staticConfig,
}
// Initialize clients for new manager
manager.accessControlClientSet, err = NewAccessControlClientset(manager.cfg, manager.staticConfig)
if err != nil {
return nil, err
}
manager.discoveryClient = memory.NewMemCacheClient(manager.accessControlClientSet.DiscoveryClient())
manager.accessControlRESTMapper = NewAccessControlRESTMapper(
restmapper.NewDeferredDiscoveryRESTMapper(manager.discoveryClient),
manager.staticConfig,
)
manager.dynamicClient, err = dynamic.NewForConfig(manager.cfg)
if err != nil {
return nil, err
}
return manager, nil
}
func resolveStrategy(cfg *config.StaticConfig, m *Manager) string {
func resolveStrategy(cfg *config.StaticConfig) string {
if cfg.ClusterProviderStrategy != "" {
return cfg.ClusterProviderStrategy
}
if m.IsInCluster() {
if cfg.KubeConfig != "" {
return config.ClusterProviderKubeConfig
}
if _, inClusterConfigErr := InClusterConfig(); inClusterConfigErr == nil {
return config.ClusterProviderInCluster
}

View File

@@ -30,7 +30,7 @@ func init() {
// via kubeconfig contexts. Returns an error if the manager is in-cluster mode.
func newKubeConfigClusterProvider(m *Manager, cfg *config.StaticConfig) (Provider, error) {
// Handle in-cluster mode
if m.IsInCluster() {
if IsInCluster(cfg) {
return nil, fmt.Errorf("kubeconfig ClusterProviderStrategy is invalid for in-cluster deployments")
}
@@ -65,12 +65,7 @@ func (p *kubeConfigClusterProvider) managerForContext(context string) (*Manager,
baseManager := p.managers[p.defaultContext]
if baseManager.IsInCluster() {
// In cluster mode, so context switching is not applicable
return baseManager, nil
}
m, err := baseManager.newForContext(context)
m, err := NewManager(baseManager.staticConfig, context)
if err != nil {
return nil, err
}
@@ -92,7 +87,7 @@ func (p *kubeConfigClusterProvider) VerifyToken(ctx context.Context, context, to
return m.VerifyToken(ctx, token, audience)
}
func (p *kubeConfigClusterProvider) GetTargets(ctx context.Context) ([]string, error) {
func (p *kubeConfigClusterProvider) GetTargets(_ context.Context) ([]string, error) {
contextNames := make([]string, 0, len(p.managers))
for contextName := range p.managers {
contextNames = append(contextNames, contextName)

View File

@@ -96,6 +96,13 @@ func (s *ProviderKubeconfigTestSuite) TestVerifyToken() {
s.Len(audiences, 1, "Expected audiences from VerifyToken with empty target")
s.Containsf(audiences, "the-audience", "Expected audience the-audience in %v", audiences)
})
s.Run("VerifyToken returns error for invalid context", func() {
userInfo, audiences, err := s.provider.VerifyToken(s.T().Context(), "invalid-context", "some-token", "the-audience")
s.Require().Error(err, "Expected error from VerifyToken with invalid target")
s.ErrorContainsf(err, `context "invalid-context" does not exist`, "Expected context does not exist error, got: %v", err)
s.Nil(userInfo, "Expected no UserInfo from VerifyToken with invalid target")
s.Nil(audiences, "Expected no audiences from VerifyToken with invalid target")
})
}
func (s *ProviderKubeconfigTestSuite) TestGetTargets() {

View File

@@ -27,7 +27,10 @@ func init() {
// Validates that the manager is in-cluster when the in-cluster strategy is used.
func newSingleClusterProvider(strategy string) ProviderFactory {
return func(m *Manager, cfg *config.StaticConfig) (Provider, error) {
if strategy == config.ClusterProviderInCluster && !m.IsInCluster() {
if cfg != nil && cfg.KubeConfig != "" && strategy == config.ClusterProviderInCluster {
return nil, fmt.Errorf("kubeconfig file %s cannot be used with the in-cluster ClusterProviderStrategy", cfg.KubeConfig)
}
if strategy == config.ClusterProviderInCluster && !IsInCluster(cfg) {
return nil, fmt.Errorf("server must be deployed in cluster for the in-cluster ClusterProviderStrategy")
}
@@ -49,7 +52,7 @@ func (p *singleClusterProvider) VerifyToken(ctx context.Context, target, token,
return p.manager.VerifyToken(ctx, token, audience)
}
func (p *singleClusterProvider) GetTargets(ctx context.Context) ([]string, error) {
func (p *singleClusterProvider) GetTargets(_ context.Context) ([]string, error) {
return []string{""}, nil
}

View File

@@ -1,6 +1,7 @@
package kubernetes
import (
"os"
"strings"
"testing"
@@ -31,13 +32,30 @@ func (s *BaseProviderSuite) TearDownTest() {
type ProviderTestSuite struct {
BaseProviderSuite
originalEnv []string
originalInClusterConfig func() (*rest.Config, error)
mockServer *test.MockServer
kubeconfigPath string
}
func (s *ProviderTestSuite) TestNewManagerProviderInCluster() {
originalIsInClusterConfig := InClusterConfig
s.T().Cleanup(func() {
InClusterConfig = originalIsInClusterConfig
})
func (s *ProviderTestSuite) SetupTest() {
s.BaseProviderSuite.SetupTest()
s.originalEnv = os.Environ()
s.originalInClusterConfig = InClusterConfig
s.mockServer = test.NewMockServer()
s.kubeconfigPath = strings.ReplaceAll(s.mockServer.KubeconfigFile(s.T()), `\`, `\\`)
}
func (s *ProviderTestSuite) TearDownTest() {
s.BaseProviderSuite.TearDownTest()
test.RestoreEnv(s.originalEnv)
InClusterConfig = s.originalInClusterConfig
if s.mockServer != nil {
s.mockServer.Close()
}
}
func (s *ProviderTestSuite) TestNewProviderInCluster() {
InClusterConfig = func() (*rest.Config, error) {
return &rest.Config{}, nil
}
@@ -48,7 +66,7 @@ func (s *ProviderTestSuite) TestNewManagerProviderInCluster() {
s.NotNil(provider, "Expected provider instance")
s.IsType(&singleClusterProvider{}, provider, "Expected singleClusterProvider type")
})
s.Run("With configured in-cluster cluster_provider_strategy, returns single-cluster provider", func() {
s.Run("With cluster_provider_strategy=in-cluster, returns single-cluster provider", func() {
cfg := test.Must(config.ReadToml([]byte(`
cluster_provider_strategy = "in-cluster"
`)))
@@ -57,7 +75,7 @@ func (s *ProviderTestSuite) TestNewManagerProviderInCluster() {
s.NotNil(provider, "Expected provider instance")
s.IsType(&singleClusterProvider{}, provider, "Expected singleClusterProvider type")
})
s.Run("With configured kubeconfig cluster_provider_strategy, returns error", func() {
s.Run("With cluster_provider_strategy=kubeconfig, returns error", func() {
cfg := test.Must(config.ReadToml([]byte(`
cluster_provider_strategy = "kubeconfig"
`)))
@@ -66,7 +84,17 @@ func (s *ProviderTestSuite) TestNewManagerProviderInCluster() {
s.ErrorContains(err, "kubeconfig ClusterProviderStrategy is invalid for in-cluster deployments")
s.Nilf(provider, "Expected no provider instance, got %v", provider)
})
s.Run("With configured non-existent cluster_provider_strategy, returns error", func() {
s.Run("With cluster_provider_strategy=kubeconfig and kubeconfig set to valid path, returns kubeconfig provider", func() {
cfg := test.Must(config.ReadToml([]byte(`
cluster_provider_strategy = "kubeconfig"
kubeconfig = "` + s.kubeconfigPath + `"
`)))
provider, err := NewProvider(cfg)
s.Require().NoError(err, "Expected no error for kubeconfig strategy")
s.NotNil(provider, "Expected provider instance")
s.IsType(&kubeConfigClusterProvider{}, provider, "Expected kubeConfigClusterProvider type")
})
s.Run("With cluster_provider_strategy=non-existent, returns error", func() {
cfg := test.Must(config.ReadToml([]byte(`
cluster_provider_strategy = "i-do-not-exist"
`)))
@@ -77,22 +105,20 @@ func (s *ProviderTestSuite) TestNewManagerProviderInCluster() {
})
}
func (s *ProviderTestSuite) TestNewManagerProviderLocal() {
mockServer := test.NewMockServer()
s.T().Cleanup(mockServer.Close)
kubeconfigPath := strings.ReplaceAll(mockServer.KubeconfigFile(s.T()), `\`, `\\`)
func (s *ProviderTestSuite) TestNewProviderLocal() {
InClusterConfig = func() (*rest.Config, error) {
return nil, rest.ErrNotInCluster
}
s.Require().NoError(os.Setenv("KUBECONFIG", s.kubeconfigPath))
s.Run("With no cluster_provider_strategy, returns kubeconfig provider", func() {
cfg := test.Must(config.ReadToml([]byte(`
kubeconfig = "` + kubeconfigPath + `"
`)))
cfg := test.Must(config.ReadToml([]byte{}))
provider, err := NewProvider(cfg)
s.Require().NoError(err, "Expected no error for kubeconfig provider")
s.NotNil(provider, "Expected provider instance")
s.IsType(&kubeConfigClusterProvider{}, provider, "Expected kubeConfigClusterProvider type")
})
s.Run("With configured kubeconfig cluster_provider_strategy, returns kubeconfig provider", func() {
s.Run("With cluster_provider_strategy=kubeconfig, returns kubeconfig provider", func() {
cfg := test.Must(config.ReadToml([]byte(`
kubeconfig = "` + kubeconfigPath + `"
cluster_provider_strategy = "kubeconfig"
`)))
provider, err := NewProvider(cfg)
@@ -100,9 +126,8 @@ func (s *ProviderTestSuite) TestNewManagerProviderLocal() {
s.NotNil(provider, "Expected provider instance")
s.IsType(&kubeConfigClusterProvider{}, provider, "Expected kubeConfigClusterProvider type")
})
s.Run("With configured in-cluster cluster_provider_strategy, returns error", func() {
s.Run("With cluster_provider_strategy=in-cluster, returns error", func() {
cfg := test.Must(config.ReadToml([]byte(`
kubeconfig = "` + kubeconfigPath + `"
cluster_provider_strategy = "in-cluster"
`)))
provider, err := NewProvider(cfg)
@@ -110,9 +135,18 @@ func (s *ProviderTestSuite) TestNewManagerProviderLocal() {
s.ErrorContains(err, "server must be deployed in cluster for the in-cluster ClusterProviderStrategy")
s.Nilf(provider, "Expected no provider instance, got %v", provider)
})
s.Run("With configured non-existent cluster_provider_strategy, returns error", func() {
s.Run("With cluster_provider_strategy=in-cluster and kubeconfig set to valid path, returns error", func() {
cfg := test.Must(config.ReadToml([]byte(`
kubeconfig = "` + s.kubeconfigPath + `"
cluster_provider_strategy = "in-cluster"
`)))
provider, err := NewProvider(cfg)
s.Require().Error(err, "Expected error for in-cluster strategy")
s.Regexp("kubeconfig file .+ cannot be used with the in-cluster ClusterProviderStrategy", err.Error())
s.Nilf(provider, "Expected no provider instance, got %v", provider)
})
s.Run("With configured cluster_provider_strategy=non-existent, returns error", func() {
cfg := test.Must(config.ReadToml([]byte(`
kubeconfig = "` + kubeconfigPath + `"
cluster_provider_strategy = "i-do-not-exist"
`)))
provider, err := NewProvider(cfg)