Merge pull request #54 from Cali0707/sync-downstream

NO-JIRA: Sync downstream to include provider changes
This commit is contained in:
openshift-merge-bot[bot]
2025-10-15 04:03:59 +00:00
committed by GitHub
53 changed files with 1554 additions and 962 deletions

View File

@@ -43,6 +43,7 @@ If you're using the native binaries you don't need to have Node or Python instal
- **✅ Lightweight**: The server is distributed as a single native binary for Linux, macOS, and Windows.
- **✅ High-Performance / Low-Latency**: Directly interacts with the Kubernetes API server without the overhead of calling and waiting for external commands.
- **✅ Multi-Cluster**: Can interact with multiple Kubernetes clusters simultaneously (as defined in your kubeconfig files).
- **✅ Cross-Platform**: Available as a native binary for Linux, macOS, and Windows, as well as an npm package, a Python package, and container/Docker image.
- **✅ Configurable**: Supports [command-line arguments](#configuration) to configure the server behavior.
- **✅ Well tested**: The server has an extensive test suite to ensure its reliability and correctness across different Kubernetes environments.
@@ -175,15 +176,16 @@ uvx kubernetes-mcp-server@latest --help
### Configuration Options
| Option | Description |
|-------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `--port` | Starts the MCP server in Streamable HTTP mode (path /mcp) and Server-Sent Event (SSE) (path /sse) mode and listens on the specified port . |
| `--log-level` | Sets the logging level (values [from 0-9](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)). Similar to [kubectl logging levels](https://kubernetes.io/docs/reference/kubectl/quick-reference/#kubectl-output-verbosity-and-debugging). |
| `--kubeconfig` | Path to the Kubernetes configuration file. If not provided, it will try to resolve the configuration (in-cluster, default location, etc.). |
| `--list-output` | Output format for resource list operations (one of: yaml, table) (default "table") |
| `--read-only` | If set, the MCP server will run in read-only mode, meaning it will not allow any write operations (create, update, delete) on the Kubernetes cluster. This is useful for debugging or inspecting the cluster without making changes. |
| `--disable-destructive` | If set, the MCP server will disable all destructive operations (delete, update, etc.) on the Kubernetes cluster. This is useful for debugging or inspecting the cluster without accidentally making changes. This option has no effect when `--read-only` is used. |
| `--toolsets` | Comma-separated list of toolsets to enable. Check the [🛠️ Tools and Functionalities](#tools-and-functionalities) section for more information. |
| Option | Description |
|---------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `--port` | Starts the MCP server in Streamable HTTP mode (path /mcp) and Server-Sent Event (SSE) (path /sse) mode and listens on the specified port . |
| `--log-level` | Sets the logging level (values [from 0-9](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)). Similar to [kubectl logging levels](https://kubernetes.io/docs/reference/kubectl/quick-reference/#kubectl-output-verbosity-and-debugging). |
| `--kubeconfig` | Path to the Kubernetes configuration file. If not provided, it will try to resolve the configuration (in-cluster, default location, etc.). |
| `--list-output` | Output format for resource list operations (one of: yaml, table) (default "table") |
| `--read-only` | If set, the MCP server will run in read-only mode, meaning it will not allow any write operations (create, update, delete) on the Kubernetes cluster. This is useful for debugging or inspecting the cluster without making changes. |
| `--disable-destructive` | If set, the MCP server will disable all destructive operations (delete, update, etc.) on the Kubernetes cluster. This is useful for debugging or inspecting the cluster without accidentally making changes. This option has no effect when `--read-only` is used. |
| `--toolsets` | Comma-separated list of toolsets to enable. Check the [🛠️ Tools and Functionalities](#tools-and-functionalities) section for more information. |
| `--disable-multi-cluster` | If set, the MCP server will disable multi-cluster support and will only use the current context from the kubeconfig file. This is useful if you want to restrict the MCP server to a single cluster. |
## 🛠️ Tools and Functionalities <a id="tools-and-functionalities"></a>
@@ -207,12 +209,16 @@ The following sets of tools are available (all on by default):
### Tools
In case multi-cluster support is enabled (default) and you have access to multiple clusters, all applicable tools will include an additional `context` argument to specify the Kubernetes context (cluster) to use for that operation.
<!-- AVAILABLE-TOOLSETS-TOOLS-START -->
<details>
<summary>config</summary>
- **configuration_contexts_list** - List all available context names and associated server urls from the kubeconfig file
- **configuration_view** - Get the current Kubernetes configuration content as a kubeconfig YAML
- `minified` (`boolean`) - Return a minified version of the configuration. If set to true, keeps only the current-context and the relevant pieces of the configuration for that context. If set to false, all contexts, clusters, auth-infos, and users are returned in the configuration. (Optional, default true)

14
go.mod
View File

@@ -14,8 +14,8 @@ require (
github.com/spf13/cobra v1.10.1
github.com/spf13/pflag v1.0.10
github.com/stretchr/testify v1.11.1
golang.org/x/net v0.44.0
golang.org/x/oauth2 v0.31.0
golang.org/x/net v0.46.0
golang.org/x/oauth2 v0.32.0
golang.org/x/sync v0.17.0
helm.sh/helm/v3 v3.19.0
k8s.io/api v0.34.1
@@ -27,7 +27,7 @@ require (
k8s.io/kubectl v0.34.1
k8s.io/metrics v0.34.1
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397
sigs.k8s.io/controller-runtime v0.22.1
sigs.k8s.io/controller-runtime v0.22.3
sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20250211091558-894df3a7e664
sigs.k8s.io/yaml v1.6.0
)
@@ -122,10 +122,10 @@ require (
github.com/yosida95/uritemplate/v3 v3.0.2 // indirect
go.yaml.in/yaml/v2 v2.4.2 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/crypto v0.42.0 // indirect
golang.org/x/sys v0.36.0 // indirect
golang.org/x/term v0.35.0 // indirect
golang.org/x/text v0.29.0 // indirect
golang.org/x/crypto v0.43.0 // indirect
golang.org/x/sys v0.37.0 // indirect
golang.org/x/term v0.36.0 // indirect
golang.org/x/text v0.30.0 // indirect
golang.org/x/time v0.12.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250728155136-f173205681a0 // indirect
google.golang.org/grpc v1.72.1 // indirect

36
go.sum
View File

@@ -357,20 +357,20 @@ go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U=
golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I=
golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
golang.org/x/oauth2 v0.31.0 h1:8Fq0yVZLh4j4YA47vHKFTa9Ew5XIrCP8LC6UeNZnLxo=
golang.org/x/oauth2 v0.31.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY=
golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -382,22 +382,22 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE=
golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -453,8 +453,8 @@ k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc=
oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o=
sigs.k8s.io/controller-runtime v0.22.1 h1:Ah1T7I+0A7ize291nJZdS1CabF/lB4E++WizgV24Eqg=
sigs.k8s.io/controller-runtime v0.22.1/go.mod h1:FwiwRjkRPbiN+zp2QRp7wlTCzbUXxZ/D4OzuQUDwBHY=
sigs.k8s.io/controller-runtime v0.22.3 h1:I7mfqz/a/WdmDCEnXmSPm8/b/yRTy6JsKKENTijTq8Y=
sigs.k8s.io/controller-runtime v0.22.3/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8=
sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20250211091558-894df3a7e664 h1:xC7x7FsPURJYhZnWHsWFd7nkdD/WRtQVWPC28FWt85Y=
sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20250211091558-894df3a7e664/go.mod h1:Cq9jUhwSYol5tNB0O/1vLYxNV9KqnhpvEa6HvJ1w0wY=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=

View File

@@ -9,6 +9,7 @@ import (
const (
ClusterProviderKubeConfig = "kubeconfig"
ClusterProviderInCluster = "in-cluster"
ClusterProviderDisabled = "disabled"
)
// StaticConfig is the configuration for the server.

View File

@@ -23,7 +23,7 @@ import (
type KubernetesApiTokenVerifier interface {
// KubernetesApiVerifyToken TODO: clarify proper implementation
KubernetesApiVerifyToken(ctx context.Context, token, audience, cluster string) (*authenticationapiv1.UserInfo, []string, error)
KubernetesApiVerifyToken(ctx context.Context, cluster, token, audience string) (*authenticationapiv1.UserInfo, []string, error)
// GetTargetParameterName returns the parameter name used for target identification in MCP requests
GetTargetParameterName() string
}
@@ -247,7 +247,7 @@ func (c *JWTClaims) ValidateWithProvider(ctx context.Context, audience string, p
func (c *JWTClaims) ValidateWithKubernetesApi(ctx context.Context, audience, cluster string, verifier KubernetesApiTokenVerifier) error {
if verifier != nil {
_, _, err := verifier.KubernetesApiVerifyToken(ctx, c.Token, audience, cluster)
_, _, err := verifier.KubernetesApiVerifyToken(ctx, cluster, c.Token, audience)
if err != nil {
return fmt.Errorf("kubernetes API token validation error: %v", err)
}

View File

@@ -47,9 +47,34 @@ kubernetes-mcp-server --port 8080
# start a SSE server on port 8443 with a public HTTPS host of example.com
kubernetes-mcp-server --port 8443 --sse-base-url https://example.com:8443
# start a SSE server on port 8080 with multi-cluster tools disabled
kubernetes-mcp-server --port 8080 --disable-multi-cluster
`))
)
const (
flagVersion = "version"
flagLogLevel = "log-level"
flagConfig = "config"
flagSSEPort = "sse-port"
flagHttpPort = "http-port"
flagPort = "port"
flagSSEBaseUrl = "sse-base-url"
flagKubeconfig = "kubeconfig"
flagToolsets = "toolsets"
flagListOutput = "list-output"
flagReadOnly = "read-only"
flagDisableDestructive = "disable-destructive"
flagRequireOAuth = "require-oauth"
flagOAuthAudience = "oauth-audience"
flagValidateToken = "validate-token"
flagAuthorizationURL = "authorization-url"
flagServerUrl = "server-url"
flagCertificateAuthority = "certificate-authority"
flagDisableMultiCluster = "disable-multi-cluster"
)
type MCPServerOptions struct {
Version bool
LogLevel int
@@ -68,6 +93,7 @@ type MCPServerOptions struct {
AuthorizationURL string
CertificateAuthority string
ServerURL string
DisableMultiCluster bool
ConfigPath string
StaticConfig *config.StaticConfig
@@ -104,32 +130,33 @@ func NewMCPServer(streams genericiooptions.IOStreams) *cobra.Command {
},
}
cmd.Flags().BoolVar(&o.Version, "version", o.Version, "Print version information and quit")
cmd.Flags().IntVar(&o.LogLevel, "log-level", o.LogLevel, "Set the log level (from 0 to 9)")
cmd.Flags().StringVar(&o.ConfigPath, "config", o.ConfigPath, "Path of the config file.")
cmd.Flags().IntVar(&o.SSEPort, "sse-port", o.SSEPort, "Start a SSE server on the specified port")
cmd.Flag("sse-port").Deprecated = "Use --port instead"
cmd.Flags().IntVar(&o.HttpPort, "http-port", o.HttpPort, "Start a streamable HTTP server on the specified port")
cmd.Flag("http-port").Deprecated = "Use --port instead"
cmd.Flags().StringVar(&o.Port, "port", o.Port, "Start a streamable HTTP and SSE HTTP server on the specified port (e.g. 8080)")
cmd.Flags().StringVar(&o.SSEBaseUrl, "sse-base-url", o.SSEBaseUrl, "SSE public base URL to use when sending the endpoint message (e.g. https://example.com)")
cmd.Flags().StringVar(&o.Kubeconfig, "kubeconfig", o.Kubeconfig, "Path to the kubeconfig file to use for authentication")
cmd.Flags().StringSliceVar(&o.Toolsets, "toolsets", o.Toolsets, "Comma-separated list of MCP toolsets to use (available toolsets: "+strings.Join(toolsets.ToolsetNames(), ", ")+"). Defaults to "+strings.Join(o.StaticConfig.Toolsets, ", ")+".")
cmd.Flags().StringVar(&o.ListOutput, "list-output", o.ListOutput, "Output format for resource list operations (one of: "+strings.Join(output.Names, ", ")+"). Defaults to "+o.StaticConfig.ListOutput+".")
cmd.Flags().BoolVar(&o.ReadOnly, "read-only", o.ReadOnly, "If true, only tools annotated with readOnlyHint=true are exposed")
cmd.Flags().BoolVar(&o.DisableDestructive, "disable-destructive", o.DisableDestructive, "If true, tools annotated with destructiveHint=true are disabled")
cmd.Flags().BoolVar(&o.RequireOAuth, "require-oauth", o.RequireOAuth, "If true, requires OAuth authorization as defined in the Model Context Protocol (MCP) specification. This flag is ignored if transport type is stdio")
_ = cmd.Flags().MarkHidden("require-oauth")
cmd.Flags().StringVar(&o.OAuthAudience, "oauth-audience", o.OAuthAudience, "OAuth audience for token claims validation. Optional. If not set, the audience is not validated. Only valid if require-oauth is enabled.")
_ = cmd.Flags().MarkHidden("oauth-audience")
cmd.Flags().BoolVar(&o.ValidateToken, "validate-token", o.ValidateToken, "If true, validates the token against the Kubernetes API Server using TokenReview. Optional. If not set, the token is not validated. Only valid if require-oauth is enabled.")
_ = cmd.Flags().MarkHidden("validate-token")
cmd.Flags().StringVar(&o.AuthorizationURL, "authorization-url", o.AuthorizationURL, "OAuth authorization server URL for protected resource endpoint. If not provided, the Kubernetes API server host will be used. Only valid if require-oauth is enabled.")
_ = cmd.Flags().MarkHidden("authorization-url")
cmd.Flags().StringVar(&o.ServerURL, "server-url", o.ServerURL, "Server URL of this application. Optional. If set, this url will be served in protected resource metadata endpoint and tokens will be validated with this audience. If not set, expected audience is kubernetes-mcp-server. Only valid if require-oauth is enabled.")
_ = cmd.Flags().MarkHidden("server-url")
cmd.Flags().StringVar(&o.CertificateAuthority, "certificate-authority", o.CertificateAuthority, "Certificate authority path to verify certificates. Optional. Only valid if require-oauth is enabled.")
_ = cmd.Flags().MarkHidden("certificate-authority")
cmd.Flags().BoolVar(&o.Version, flagVersion, o.Version, "Print version information and quit")
cmd.Flags().IntVar(&o.LogLevel, flagLogLevel, o.LogLevel, "Set the log level (from 0 to 9)")
cmd.Flags().StringVar(&o.ConfigPath, flagConfig, o.ConfigPath, "Path of the config file.")
cmd.Flags().IntVar(&o.SSEPort, flagSSEPort, o.SSEPort, "Start a SSE server on the specified port")
cmd.Flag(flagSSEPort).Deprecated = "Use --port instead"
cmd.Flags().IntVar(&o.HttpPort, flagHttpPort, o.HttpPort, "Start a streamable HTTP server on the specified port")
cmd.Flag(flagHttpPort).Deprecated = "Use --port instead"
cmd.Flags().StringVar(&o.Port, flagPort, o.Port, "Start a streamable HTTP and SSE HTTP server on the specified port (e.g. 8080)")
cmd.Flags().StringVar(&o.SSEBaseUrl, flagSSEBaseUrl, o.SSEBaseUrl, "SSE public base URL to use when sending the endpoint message (e.g. https://example.com)")
cmd.Flags().StringVar(&o.Kubeconfig, flagKubeconfig, o.Kubeconfig, "Path to the kubeconfig file to use for authentication")
cmd.Flags().StringSliceVar(&o.Toolsets, flagToolsets, o.Toolsets, "Comma-separated list of MCP toolsets to use (available toolsets: "+strings.Join(toolsets.ToolsetNames(), ", ")+"). Defaults to "+strings.Join(o.StaticConfig.Toolsets, ", ")+".")
cmd.Flags().StringVar(&o.ListOutput, flagListOutput, o.ListOutput, "Output format for resource list operations (one of: "+strings.Join(output.Names, ", ")+"). Defaults to "+o.StaticConfig.ListOutput+".")
cmd.Flags().BoolVar(&o.ReadOnly, flagReadOnly, o.ReadOnly, "If true, only tools annotated with readOnlyHint=true are exposed")
cmd.Flags().BoolVar(&o.DisableDestructive, flagDisableDestructive, o.DisableDestructive, "If true, tools annotated with destructiveHint=true are disabled")
cmd.Flags().BoolVar(&o.RequireOAuth, flagRequireOAuth, o.RequireOAuth, "If true, requires OAuth authorization as defined in the Model Context Protocol (MCP) specification. This flag is ignored if transport type is stdio")
_ = cmd.Flags().MarkHidden(flagRequireOAuth)
cmd.Flags().StringVar(&o.OAuthAudience, flagOAuthAudience, o.OAuthAudience, "OAuth audience for token claims validation. Optional. If not set, the audience is not validated. Only valid if require-oauth is enabled.")
_ = cmd.Flags().MarkHidden(flagOAuthAudience)
cmd.Flags().BoolVar(&o.ValidateToken, flagValidateToken, o.ValidateToken, "If true, validates the token against the Kubernetes API Server using TokenReview. Optional. If not set, the token is not validated. Only valid if require-oauth is enabled.")
_ = cmd.Flags().MarkHidden(flagValidateToken)
cmd.Flags().StringVar(&o.AuthorizationURL, flagAuthorizationURL, o.AuthorizationURL, "OAuth authorization server URL for protected resource endpoint. If not provided, the Kubernetes API server host will be used. Only valid if require-oauth is enabled.")
_ = cmd.Flags().MarkHidden(flagAuthorizationURL)
cmd.Flags().StringVar(&o.ServerURL, flagServerUrl, o.ServerURL, "Server URL of this application. Optional. If set, this url will be served in protected resource metadata endpoint and tokens will be validated with this audience. If not set, expected audience is kubernetes-mcp-server. Only valid if require-oauth is enabled.")
_ = cmd.Flags().MarkHidden(flagServerUrl)
cmd.Flags().StringVar(&o.CertificateAuthority, flagCertificateAuthority, o.CertificateAuthority, "Certificate authority path to verify certificates. Optional. Only valid if require-oauth is enabled.")
_ = cmd.Flags().MarkHidden(flagCertificateAuthority)
cmd.Flags().BoolVar(&o.DisableMultiCluster, flagDisableMultiCluster, o.DisableMultiCluster, "Disable multi cluster tools. Optional. If true, all tools will be run against the default cluster/context.")
return cmd
}
@@ -156,52 +183,55 @@ func (m *MCPServerOptions) Complete(cmd *cobra.Command) error {
}
func (m *MCPServerOptions) loadFlags(cmd *cobra.Command) {
if cmd.Flag("log-level").Changed {
if cmd.Flag(flagLogLevel).Changed {
m.StaticConfig.LogLevel = m.LogLevel
}
if cmd.Flag("port").Changed {
if cmd.Flag(flagPort).Changed {
m.StaticConfig.Port = m.Port
} else if cmd.Flag("sse-port").Changed {
} else if cmd.Flag(flagSSEPort).Changed {
m.StaticConfig.Port = strconv.Itoa(m.SSEPort)
} else if cmd.Flag("http-port").Changed {
} else if cmd.Flag(flagHttpPort).Changed {
m.StaticConfig.Port = strconv.Itoa(m.HttpPort)
}
if cmd.Flag("sse-base-url").Changed {
if cmd.Flag(flagSSEBaseUrl).Changed {
m.StaticConfig.SSEBaseURL = m.SSEBaseUrl
}
if cmd.Flag("kubeconfig").Changed {
if cmd.Flag(flagKubeconfig).Changed {
m.StaticConfig.KubeConfig = m.Kubeconfig
}
if cmd.Flag("list-output").Changed {
if cmd.Flag(flagListOutput).Changed {
m.StaticConfig.ListOutput = m.ListOutput
}
if cmd.Flag("read-only").Changed {
if cmd.Flag(flagReadOnly).Changed {
m.StaticConfig.ReadOnly = m.ReadOnly
}
if cmd.Flag("disable-destructive").Changed {
if cmd.Flag(flagDisableDestructive).Changed {
m.StaticConfig.DisableDestructive = m.DisableDestructive
}
if cmd.Flag("toolsets").Changed {
if cmd.Flag(flagToolsets).Changed {
m.StaticConfig.Toolsets = m.Toolsets
}
if cmd.Flag("require-oauth").Changed {
if cmd.Flag(flagRequireOAuth).Changed {
m.StaticConfig.RequireOAuth = m.RequireOAuth
}
if cmd.Flag("oauth-audience").Changed {
if cmd.Flag(flagOAuthAudience).Changed {
m.StaticConfig.OAuthAudience = m.OAuthAudience
}
if cmd.Flag("validate-token").Changed {
if cmd.Flag(flagValidateToken).Changed {
m.StaticConfig.ValidateToken = m.ValidateToken
}
if cmd.Flag("authorization-url").Changed {
if cmd.Flag(flagAuthorizationURL).Changed {
m.StaticConfig.AuthorizationURL = m.AuthorizationURL
}
if cmd.Flag("server-url").Changed {
if cmd.Flag(flagServerUrl).Changed {
m.StaticConfig.ServerURL = m.ServerURL
}
if cmd.Flag("certificate-authority").Changed {
if cmd.Flag(flagCertificateAuthority).Changed {
m.StaticConfig.CertificateAuthority = m.CertificateAuthority
}
if cmd.Flag(flagDisableMultiCluster).Changed && m.DisableMultiCluster {
m.StaticConfig.ClusterProviderStrategy = config.ClusterProviderDisabled
}
}
func (m *MCPServerOptions) initializeLogging() {
@@ -258,6 +288,13 @@ func (m *MCPServerOptions) Run() error {
klog.V(1).Infof(" - Read-only mode: %t", m.StaticConfig.ReadOnly)
klog.V(1).Infof(" - Disable destructive tools: %t", m.StaticConfig.DisableDestructive)
strategy := m.StaticConfig.ClusterProviderStrategy
if strategy == "" {
strategy = "auto-detect (it is recommended to set this explicitly in your Config)"
}
klog.V(1).Infof(" - ClusterProviderStrategy: %s", strategy)
if m.Version {
_, _ = fmt.Fprintf(m.Out, "%s\n", version.Version)
return nil

View File

@@ -276,3 +276,24 @@ func TestStdioLogging(t *testing.T) {
assert.Containsf(t, out.String(), "Starting kubernetes-mcp-server", "Expected klog output, got %s", out.String())
})
}
func TestDisableMultiCluster(t *testing.T) {
t.Run("defaults to false", func(t *testing.T) {
ioStreams, out := testStream()
rootCmd := NewMCPServer(ioStreams)
rootCmd.SetArgs([]string{"--version", "--port=1337", "--log-level=1"})
if err := rootCmd.Execute(); !strings.Contains(out.String(), " - ClusterProviderStrategy: auto-detect (it is recommended to set this explicitly in your Config)") {
t.Fatalf("Expected ClusterProviderStrategy kubeconfig, got %s %v", out, err)
}
})
t.Run("set with --disable-multi-cluster", func(t *testing.T) {
ioStreams, out := testStream()
rootCmd := NewMCPServer(ioStreams)
rootCmd.SetArgs([]string{"--version", "--port=1337", "--log-level=1", "--disable-multi-cluster"})
_ = rootCmd.Execute()
expected := `(?m)\" - ClusterProviderStrategy\: disabled\"`
if m, err := regexp.MatchString(expected, out.String()); !m || err != nil {
t.Fatalf("Expected ClusterProviderStrategy %s, got %s %v", expected, out.String(), err)
}
})
}

View File

@@ -47,42 +47,10 @@ func resolveKubernetesConfigurations(kubernetes *Manager) error {
return err
}
func (m *Manager) IsInCluster() bool {
if m.staticConfig.KubeConfig != "" {
return false
}
cfg, err := InClusterConfig()
return err == nil && cfg != nil
}
func (m *Manager) configuredNamespace() string {
if ns, _, nsErr := m.clientCmdConfig.Namespace(); nsErr == nil {
return ns
}
return ""
}
func (m *Manager) NamespaceOrDefault(namespace string) string {
if namespace == "" {
return m.configuredNamespace()
}
return namespace
}
func (k *Kubernetes) NamespaceOrDefault(namespace string) string {
return k.manager.NamespaceOrDefault(namespace)
}
// ToRESTConfig returns the rest.Config object (genericclioptions.RESTClientGetter)
func (m *Manager) ToRESTConfig() (*rest.Config, error) {
return m.cfg, nil
}
// ToRawKubeConfigLoader returns the clientcmd.ClientConfig object (genericclioptions.RESTClientGetter)
func (m *Manager) ToRawKubeConfigLoader() clientcmd.ClientConfig {
return m.clientCmdConfig
}
// ConfigurationContextsDefault returns the current context name
// TODO: Should be moved to the Provider level ?
func (k *Kubernetes) ConfigurationContextsDefault() (string, error) {

View File

@@ -1,27 +1,10 @@
package kubernetes
import (
"context"
"errors"
"strings"
"k8s.io/apimachinery/pkg/runtime"
"github.com/fsnotify/fsnotify"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/client-go/discovery"
"k8s.io/client-go/discovery/cached/memory"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/klog/v2"
"github.com/containers/kubernetes-mcp-server/pkg/config"
"github.com/containers/kubernetes-mcp-server/pkg/helm"
"k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
)
@@ -41,174 +24,15 @@ type Kubernetes struct {
manager *Manager
}
type Manager struct {
cfg *rest.Config
clientCmdConfig clientcmd.ClientConfig
discoveryClient discovery.CachedDiscoveryInterface
accessControlClientSet *AccessControlClientset
accessControlRESTMapper *AccessControlRESTMapper
dynamicClient *dynamic.DynamicClient
staticConfig *config.StaticConfig
CloseWatchKubeConfig CloseWatchKubeConfig
// AccessControlClientset returns the access-controlled clientset
// This ensures that any denied resources configured in the system are properly enforced
func (k *Kubernetes) AccessControlClientset() *AccessControlClientset {
return k.manager.accessControlClientSet
}
var _ helm.Kubernetes = (*Manager)(nil)
var _ Openshift = (*Manager)(nil)
var Scheme = scheme.Scheme
var ParameterCodec = runtime.NewParameterCodec(Scheme)
func NewManager(config *config.StaticConfig) (*Manager, error) {
k8s := &Manager{
staticConfig: config,
}
if err := resolveKubernetesConfigurations(k8s); err != nil {
return nil, err
}
// TODO: Won't work because not all client-go clients use the shared context (e.g. discovery client uses context.TODO())
//k8s.cfg.Wrap(func(original http.RoundTripper) http.RoundTripper {
// return &impersonateRoundTripper{original}
//})
var err error
k8s.accessControlClientSet, err = NewAccessControlClientset(k8s.cfg, k8s.staticConfig)
if err != nil {
return nil, err
}
k8s.discoveryClient = memory.NewMemCacheClient(k8s.accessControlClientSet.DiscoveryClient())
k8s.accessControlRESTMapper = NewAccessControlRESTMapper(
restmapper.NewDeferredDiscoveryRESTMapper(k8s.discoveryClient),
k8s.staticConfig,
)
k8s.dynamicClient, err = dynamic.NewForConfig(k8s.cfg)
if err != nil {
return nil, err
}
return k8s, nil
}
func (m *Manager) WatchKubeConfig(onKubeConfigChange func() error) {
if m.clientCmdConfig == nil {
return
}
kubeConfigFiles := m.clientCmdConfig.ConfigAccess().GetLoadingPrecedence()
if len(kubeConfigFiles) == 0 {
return
}
watcher, err := fsnotify.NewWatcher()
if err != nil {
return
}
for _, file := range kubeConfigFiles {
_ = watcher.Add(file)
}
go func() {
for {
select {
case _, ok := <-watcher.Events:
if !ok {
return
}
_ = onKubeConfigChange()
case _, ok := <-watcher.Errors:
if !ok {
return
}
}
}
}()
if m.CloseWatchKubeConfig != nil {
_ = m.CloseWatchKubeConfig()
}
m.CloseWatchKubeConfig = watcher.Close
}
func (m *Manager) Close() {
if m.CloseWatchKubeConfig != nil {
_ = m.CloseWatchKubeConfig()
}
}
func (m *Manager) GetAPIServerHost() string {
if m.cfg == nil {
return ""
}
return m.cfg.Host
}
func (m *Manager) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
return m.discoveryClient, nil
}
func (m *Manager) ToRESTMapper() (meta.RESTMapper, error) {
return m.accessControlRESTMapper, nil
}
func (m *Manager) Derived(ctx context.Context) (*Kubernetes, error) {
authorization, ok := ctx.Value(OAuthAuthorizationHeader).(string)
if !ok || !strings.HasPrefix(authorization, "Bearer ") {
if m.staticConfig.RequireOAuth {
return nil, errors.New("oauth token required")
}
return &Kubernetes{manager: m}, nil
}
klog.V(5).Infof("%s header found (Bearer), using provided bearer token", OAuthAuthorizationHeader)
derivedCfg := &rest.Config{
Host: m.cfg.Host,
APIPath: m.cfg.APIPath,
// Copy only server verification TLS settings (CA bundle and server name)
TLSClientConfig: rest.TLSClientConfig{
Insecure: m.cfg.Insecure,
ServerName: m.cfg.ServerName,
CAFile: m.cfg.CAFile,
CAData: m.cfg.CAData,
},
BearerToken: strings.TrimPrefix(authorization, "Bearer "),
// pass custom UserAgent to identify the client
UserAgent: CustomUserAgent,
QPS: m.cfg.QPS,
Burst: m.cfg.Burst,
Timeout: m.cfg.Timeout,
Impersonate: rest.ImpersonationConfig{},
}
clientCmdApiConfig, err := m.clientCmdConfig.RawConfig()
if err != nil {
if m.staticConfig.RequireOAuth {
klog.Errorf("failed to get kubeconfig: %v", err)
return nil, errors.New("failed to get kubeconfig")
}
return &Kubernetes{manager: m}, nil
}
clientCmdApiConfig.AuthInfos = make(map[string]*clientcmdapi.AuthInfo)
derived := &Kubernetes{manager: &Manager{
clientCmdConfig: clientcmd.NewDefaultClientConfig(clientCmdApiConfig, nil),
cfg: derivedCfg,
staticConfig: m.staticConfig,
}}
derived.manager.accessControlClientSet, err = NewAccessControlClientset(derived.manager.cfg, derived.manager.staticConfig)
if err != nil {
if m.staticConfig.RequireOAuth {
klog.Errorf("failed to get kubeconfig: %v", err)
return nil, errors.New("failed to get kubeconfig")
}
return &Kubernetes{manager: m}, nil
}
derived.manager.discoveryClient = memory.NewMemCacheClient(derived.manager.accessControlClientSet.DiscoveryClient())
derived.manager.accessControlRESTMapper = NewAccessControlRESTMapper(
restmapper.NewDeferredDiscoveryRESTMapper(derived.manager.discoveryClient),
derived.manager.staticConfig,
)
derived.manager.dynamicClient, err = dynamic.NewForConfig(derived.manager.cfg)
if err != nil {
if m.staticConfig.RequireOAuth {
klog.Errorf("failed to initialize dynamic client: %v", err)
return nil, errors.New("failed to initialize dynamic client")
}
return &Kubernetes{manager: m}, nil
}
return derived, nil
}
func (k *Kubernetes) NewHelm() *helm.Helm {
// This is a derived Kubernetes, so it already has the Helm initialized
return helm.NewHelm(k.manager)

View File

@@ -0,0 +1,185 @@
package kubernetes
import (
"context"
"os"
"path/filepath"
"strings"
"testing"
"github.com/containers/kubernetes-mcp-server/internal/test"
"github.com/containers/kubernetes-mcp-server/pkg/config"
"github.com/stretchr/testify/suite"
)
type DerivedTestSuite struct {
suite.Suite
}
func (s *DerivedTestSuite) TestKubeConfig() {
// Create a temporary kubeconfig file for testing
tempDir := s.T().TempDir()
kubeconfigPath := filepath.Join(tempDir, "config")
kubeconfigContent := `
apiVersion: v1
kind: Config
clusters:
- cluster:
server: https://test-cluster.example.com
name: test-cluster
contexts:
- context:
cluster: test-cluster
user: test-user
name: test-context
current-context: test-context
users:
- name: test-user
user:
username: test-username
password: test-password
`
err := os.WriteFile(kubeconfigPath, []byte(kubeconfigContent), 0644)
s.Require().NoError(err, "failed to create kubeconfig file")
s.Run("with no RequireOAuth (default) config", func() {
testStaticConfig := test.Must(config.ReadToml([]byte(`
kubeconfig = "` + strings.ReplaceAll(kubeconfigPath, `\`, `\\`) + `"
`)))
s.Run("without authorization header returns original manager", func() {
testManager, err := NewManager(testStaticConfig)
s.Require().NoErrorf(err, "failed to create test manager: %v", err)
s.T().Cleanup(testManager.Close)
derived, err := testManager.Derived(s.T().Context())
s.Require().NoErrorf(err, "failed to create derived manager: %v", err)
s.Equal(derived.manager, testManager, "expected original manager, got different manager")
})
s.Run("with invalid authorization header returns original manager", func() {
testManager, err := NewManager(testStaticConfig)
s.Require().NoErrorf(err, "failed to create test manager: %v", err)
s.T().Cleanup(testManager.Close)
ctx := context.WithValue(s.T().Context(), HeaderKey("Authorization"), "invalid-token")
derived, err := testManager.Derived(ctx)
s.Require().NoErrorf(err, "failed to create derived manager: %v", err)
s.Equal(derived.manager, testManager, "expected original manager, got different manager")
})
s.Run("with valid bearer token creates derived manager with correct configuration", func() {
testManager, err := NewManager(testStaticConfig)
s.Require().NoErrorf(err, "failed to create test manager: %v", err)
s.T().Cleanup(testManager.Close)
ctx := context.WithValue(s.T().Context(), HeaderKey("Authorization"), "Bearer aiTana-julIA")
derived, err := testManager.Derived(ctx)
s.Require().NoErrorf(err, "failed to create derived manager: %v", err)
s.NotEqual(derived.manager, testManager, "expected new derived manager, got original manager")
s.Equal(derived.manager.staticConfig, testStaticConfig, "staticConfig not properly wired to derived manager")
s.Run("RestConfig is correctly copied and sensitive fields are omitted", func() {
derivedCfg := derived.manager.cfg
s.Require().NotNil(derivedCfg, "derived config is nil")
originalCfg := testManager.cfg
s.Equalf(originalCfg.Host, derivedCfg.Host, "expected Host %s, got %s", originalCfg.Host, derivedCfg.Host)
s.Equalf(originalCfg.APIPath, derivedCfg.APIPath, "expected APIPath %s, got %s", originalCfg.APIPath, derivedCfg.APIPath)
s.Equalf(originalCfg.QPS, derivedCfg.QPS, "expected QPS %f, got %f", originalCfg.QPS, derivedCfg.QPS)
s.Equalf(originalCfg.Burst, derivedCfg.Burst, "expected Burst %d, got %d", originalCfg.Burst, derivedCfg.Burst)
s.Equalf(originalCfg.Timeout, derivedCfg.Timeout, "expected Timeout %v, got %v", originalCfg.Timeout, derivedCfg.Timeout)
s.Equalf(originalCfg.Insecure, derivedCfg.Insecure, "expected TLS Insecure %v, got %v", originalCfg.Insecure, derivedCfg.Insecure)
s.Equalf(originalCfg.ServerName, derivedCfg.ServerName, "expected TLS ServerName %s, got %s", originalCfg.ServerName, derivedCfg.ServerName)
s.Equalf(originalCfg.CAFile, derivedCfg.CAFile, "expected TLS CAFile %s, got %s", originalCfg.CAFile, derivedCfg.CAFile)
s.Equalf(string(originalCfg.CAData), string(derivedCfg.CAData), "expected TLS CAData %s, got %s", string(originalCfg.CAData), string(derivedCfg.CAData))
s.Equalf("aiTana-julIA", derivedCfg.BearerToken, "expected BearerToken %s, got %s", "aiTana-julIA", derivedCfg.BearerToken)
s.Equalf("kubernetes-mcp-server/bearer-token-auth", derivedCfg.UserAgent, "expected UserAgent \"kubernetes-mcp-server/bearer-token-auth\", got %s", derivedCfg.UserAgent)
// Verify that sensitive fields are NOT copied to prevent credential leakage
// The derived config should only use the bearer token from the Authorization header
// and not inherit any authentication credentials from the original kubeconfig
s.Emptyf(derivedCfg.CertFile, "expected TLS CertFile to be empty, got %s", derivedCfg.CertFile)
s.Emptyf(derivedCfg.KeyFile, "expected TLS KeyFile to be empty, got %s", derivedCfg.KeyFile)
s.Emptyf(len(derivedCfg.CertData), "expected TLS CertData to be empty, got %v", derivedCfg.CertData)
s.Emptyf(len(derivedCfg.KeyData), "expected TLS KeyData to be empty, got %v", derivedCfg.KeyData)
s.Emptyf(derivedCfg.Username, "expected Username to be empty, got %s", derivedCfg.Username)
s.Emptyf(derivedCfg.Password, "expected Password to be empty, got %s", derivedCfg.Password)
s.Nilf(derivedCfg.AuthProvider, "expected AuthProvider to be nil, got %v", derivedCfg.AuthProvider)
s.Nilf(derivedCfg.ExecProvider, "expected ExecProvider to be nil, got %v", derivedCfg.ExecProvider)
s.Emptyf(derivedCfg.BearerTokenFile, "expected BearerTokenFile to be empty, got %s", derivedCfg.BearerTokenFile)
s.Emptyf(derivedCfg.Impersonate.UserName, "expected Impersonate.UserName to be empty, got %s", derivedCfg.Impersonate.UserName)
// Verify that the original manager still has the sensitive data
s.Falsef(originalCfg.Username == "" && originalCfg.Password == "", "original kubeconfig shouldn't be modified")
})
s.Run("derived manager has initialized clients", func() {
// Verify that the derived manager has proper clients initialized
s.NotNilf(derived.manager.accessControlClientSet, "expected accessControlClientSet to be initialized")
s.Equalf(testStaticConfig, derived.manager.accessControlClientSet.staticConfig, "staticConfig not properly wired to derived manager")
s.NotNilf(derived.manager.discoveryClient, "expected discoveryClient to be initialized")
s.NotNilf(derived.manager.accessControlRESTMapper, "expected accessControlRESTMapper to be initialized")
s.Equalf(testStaticConfig, derived.manager.accessControlRESTMapper.staticConfig, "staticConfig not properly wired to derived manager")
s.NotNilf(derived.manager.dynamicClient, "expected dynamicClient to be initialized")
})
})
})
s.Run("with RequireOAuth=true", func() {
testStaticConfig := test.Must(config.ReadToml([]byte(`
kubeconfig = "` + strings.ReplaceAll(kubeconfigPath, `\`, `\\`) + `"
require_oauth = true
`)))
s.Run("with no authorization header returns oauth token required error", func() {
testManager, err := NewManager(testStaticConfig)
s.Require().NoErrorf(err, "failed to create test manager: %v", err)
s.T().Cleanup(testManager.Close)
derived, err := testManager.Derived(s.T().Context())
s.Require().Error(err, "expected error for missing oauth token, got nil")
s.EqualError(err, "oauth token required", "expected error 'oauth token required', got %s", err.Error())
s.Nil(derived, "expected nil derived manager when oauth token required")
})
s.Run("with invalid authorization header returns oauth token required error", func() {
testManager, err := NewManager(testStaticConfig)
s.Require().NoErrorf(err, "failed to create test manager: %v", err)
s.T().Cleanup(testManager.Close)
ctx := context.WithValue(s.T().Context(), HeaderKey("Authorization"), "invalid-token")
derived, err := testManager.Derived(ctx)
s.Require().Error(err, "expected error for invalid oauth token, got nil")
s.EqualError(err, "oauth token required", "expected error 'oauth token required', got %s", err.Error())
s.Nil(derived, "expected nil derived manager when oauth token required")
})
s.Run("with valid bearer token creates derived manager", func() {
testManager, err := NewManager(testStaticConfig)
s.Require().NoErrorf(err, "failed to create test manager: %v", err)
s.T().Cleanup(testManager.Close)
ctx := context.WithValue(s.T().Context(), HeaderKey("Authorization"), "Bearer aiTana-julIA")
derived, err := testManager.Derived(ctx)
s.Require().NoErrorf(err, "failed to create derived manager: %v", err)
s.NotEqual(derived.manager, testManager, "expected new derived manager, got original manager")
s.Equal(derived.manager.staticConfig, testStaticConfig, "staticConfig not properly wired to derived manager")
derivedCfg := derived.manager.cfg
s.Require().NotNil(derivedCfg, "derived config is nil")
s.Equalf("aiTana-julIA", derivedCfg.BearerToken, "expected BearerToken %s, got %s", "aiTana-julIA", derivedCfg.BearerToken)
})
})
}
func TestDerived(t *testing.T) {
suite.Run(t, new(DerivedTestSuite))
}

View File

@@ -1,316 +0,0 @@
package kubernetes
import (
"context"
"os"
"path"
"testing"
"github.com/containers/kubernetes-mcp-server/pkg/config"
)
func TestManager_Derived(t *testing.T) {
// Create a temporary kubeconfig file for testing
tempDir := t.TempDir()
kubeconfigPath := path.Join(tempDir, "config")
kubeconfigContent := `
apiVersion: v1
kind: Config
clusters:
- cluster:
server: https://test-cluster.example.com
name: test-cluster
contexts:
- context:
cluster: test-cluster
user: test-user
name: test-context
current-context: test-context
users:
- name: test-user
user:
username: test-username
password: test-password
`
if err := os.WriteFile(kubeconfigPath, []byte(kubeconfigContent), 0644); err != nil {
t.Fatalf("failed to create kubeconfig file: %v", err)
}
t.Run("without authorization header returns original manager", func(t *testing.T) {
testStaticConfig := &config.StaticConfig{
KubeConfig: kubeconfigPath,
DisabledTools: []string{"configuration_view"},
DeniedResources: []config.GroupVersionKind{
{Group: "apps", Version: "v1", Kind: "Deployment"},
},
}
testManager, err := NewManager(testStaticConfig)
if err != nil {
t.Fatalf("failed to create manager: %v", err)
}
defer testManager.Close()
ctx := context.Background()
derived, err := testManager.Derived(ctx)
if err != nil {
t.Fatalf("failed to create manager: %v", err)
}
if derived.manager != testManager {
t.Errorf("expected original manager, got different manager")
}
})
t.Run("with invalid authorization header returns original manager", func(t *testing.T) {
testStaticConfig := &config.StaticConfig{
KubeConfig: kubeconfigPath,
DisabledTools: []string{"configuration_view"},
DeniedResources: []config.GroupVersionKind{
{Group: "apps", Version: "v1", Kind: "Deployment"},
},
}
testManager, err := NewManager(testStaticConfig)
if err != nil {
t.Fatalf("failed to create manager: %v", err)
}
defer testManager.Close()
ctx := context.WithValue(context.Background(), OAuthAuthorizationHeader, "invalid-token")
derived, err := testManager.Derived(ctx)
if err != nil {
t.Fatalf("failed to create manager: %v", err)
}
if derived.manager != testManager {
t.Errorf("expected original manager, got different manager")
}
})
t.Run("with valid bearer token creates derived manager with correct configuration", func(t *testing.T) {
testStaticConfig := &config.StaticConfig{
KubeConfig: kubeconfigPath,
DisabledTools: []string{"configuration_view"},
DeniedResources: []config.GroupVersionKind{
{Group: "apps", Version: "v1", Kind: "Deployment"},
},
}
testManager, err := NewManager(testStaticConfig)
if err != nil {
t.Fatalf("failed to create manager: %v", err)
}
defer testManager.Close()
testBearerToken := "test-bearer-token-123"
ctx := context.WithValue(context.Background(), OAuthAuthorizationHeader, "Bearer "+testBearerToken)
derived, err := testManager.Derived(ctx)
if err != nil {
t.Fatalf("failed to create manager: %v", err)
}
if derived.manager == testManager {
t.Errorf("expected new derived manager, got original manager")
}
if derived.manager.staticConfig != testStaticConfig {
t.Errorf("staticConfig not properly wired to derived manager")
}
derivedCfg := derived.manager.cfg
if derivedCfg == nil {
t.Fatalf("derived config is nil")
}
originalCfg := testManager.cfg
if derivedCfg.Host != originalCfg.Host {
t.Errorf("expected Host %s, got %s", originalCfg.Host, derivedCfg.Host)
}
if derivedCfg.APIPath != originalCfg.APIPath {
t.Errorf("expected APIPath %s, got %s", originalCfg.APIPath, derivedCfg.APIPath)
}
if derivedCfg.QPS != originalCfg.QPS {
t.Errorf("expected QPS %f, got %f", originalCfg.QPS, derivedCfg.QPS)
}
if derivedCfg.Burst != originalCfg.Burst {
t.Errorf("expected Burst %d, got %d", originalCfg.Burst, derivedCfg.Burst)
}
if derivedCfg.Timeout != originalCfg.Timeout {
t.Errorf("expected Timeout %v, got %v", originalCfg.Timeout, derivedCfg.Timeout)
}
if derivedCfg.Insecure != originalCfg.Insecure {
t.Errorf("expected TLS Insecure %v, got %v", originalCfg.Insecure, derivedCfg.Insecure)
}
if derivedCfg.ServerName != originalCfg.ServerName {
t.Errorf("expected TLS ServerName %s, got %s", originalCfg.ServerName, derivedCfg.ServerName)
}
if derivedCfg.CAFile != originalCfg.CAFile {
t.Errorf("expected TLS CAFile %s, got %s", originalCfg.CAFile, derivedCfg.CAFile)
}
if string(derivedCfg.CAData) != string(originalCfg.CAData) {
t.Errorf("expected TLS CAData %s, got %s", string(originalCfg.CAData), string(derivedCfg.CAData))
}
if derivedCfg.BearerToken != testBearerToken {
t.Errorf("expected BearerToken %s, got %s", testBearerToken, derivedCfg.BearerToken)
}
if derivedCfg.UserAgent != CustomUserAgent {
t.Errorf("expected UserAgent %s, got %s", CustomUserAgent, derivedCfg.UserAgent)
}
// Verify that sensitive fields are NOT copied to prevent credential leakage
// The derived config should only use the bearer token from the Authorization header
// and not inherit any authentication credentials from the original kubeconfig
if derivedCfg.CertFile != "" {
t.Errorf("expected TLS CertFile to be empty, got %s", derivedCfg.CertFile)
}
if derivedCfg.KeyFile != "" {
t.Errorf("expected TLS KeyFile to be empty, got %s", derivedCfg.KeyFile)
}
if len(derivedCfg.CertData) != 0 {
t.Errorf("expected TLS CertData to be empty, got %v", derivedCfg.CertData)
}
if len(derivedCfg.KeyData) != 0 {
t.Errorf("expected TLS KeyData to be empty, got %v", derivedCfg.KeyData)
}
if derivedCfg.Username != "" {
t.Errorf("expected Username to be empty, got %s", derivedCfg.Username)
}
if derivedCfg.Password != "" {
t.Errorf("expected Password to be empty, got %s", derivedCfg.Password)
}
if derivedCfg.AuthProvider != nil {
t.Errorf("expected AuthProvider to be nil, got %v", derivedCfg.AuthProvider)
}
if derivedCfg.ExecProvider != nil {
t.Errorf("expected ExecProvider to be nil, got %v", derivedCfg.ExecProvider)
}
if derivedCfg.BearerTokenFile != "" {
t.Errorf("expected BearerTokenFile to be empty, got %s", derivedCfg.BearerTokenFile)
}
if derivedCfg.Impersonate.UserName != "" {
t.Errorf("expected Impersonate.UserName to be empty, got %s", derivedCfg.Impersonate.UserName)
}
// Verify that the original manager still has the sensitive data
if originalCfg.Username == "" && originalCfg.Password == "" {
t.Logf("original kubeconfig shouldn't be modified")
}
// Verify that the derived manager has proper clients initialized
if derived.manager.accessControlClientSet == nil {
t.Error("expected accessControlClientSet to be initialized")
}
if derived.manager.accessControlClientSet.staticConfig != testStaticConfig {
t.Errorf("staticConfig not properly wired to derived manager")
}
if derived.manager.discoveryClient == nil {
t.Error("expected discoveryClient to be initialized")
}
if derived.manager.accessControlRESTMapper == nil {
t.Error("expected accessControlRESTMapper to be initialized")
}
if derived.manager.accessControlRESTMapper.staticConfig != testStaticConfig {
t.Errorf("staticConfig not properly wired to derived manager")
}
if derived.manager.dynamicClient == nil {
t.Error("expected dynamicClient to be initialized")
}
})
t.Run("with RequireOAuth=true and no authorization header returns oauth token required error", func(t *testing.T) {
testStaticConfig := &config.StaticConfig{
KubeConfig: kubeconfigPath,
RequireOAuth: true,
DisabledTools: []string{"configuration_view"},
DeniedResources: []config.GroupVersionKind{
{Group: "apps", Version: "v1", Kind: "Deployment"},
},
}
testManager, err := NewManager(testStaticConfig)
if err != nil {
t.Fatalf("failed to create manager: %v", err)
}
defer testManager.Close()
ctx := context.Background()
derived, err := testManager.Derived(ctx)
if err == nil {
t.Fatal("expected error for missing oauth token, got nil")
}
if err.Error() != "oauth token required" {
t.Fatalf("expected error 'oauth token required', got %s", err.Error())
}
if derived != nil {
t.Error("expected nil derived manager when oauth token required")
}
})
t.Run("with RequireOAuth=true and invalid authorization header returns oauth token required error", func(t *testing.T) {
testStaticConfig := &config.StaticConfig{
KubeConfig: kubeconfigPath,
RequireOAuth: true,
DisabledTools: []string{"configuration_view"},
DeniedResources: []config.GroupVersionKind{
{Group: "apps", Version: "v1", Kind: "Deployment"},
},
}
testManager, err := NewManager(testStaticConfig)
if err != nil {
t.Fatalf("failed to create manager: %v", err)
}
defer testManager.Close()
ctx := context.WithValue(context.Background(), OAuthAuthorizationHeader, "invalid-token")
derived, err := testManager.Derived(ctx)
if err == nil {
t.Fatal("expected error for invalid oauth token, got nil")
}
if err.Error() != "oauth token required" {
t.Fatalf("expected error 'oauth token required', got %s", err.Error())
}
if derived != nil {
t.Error("expected nil derived manager when oauth token required")
}
})
t.Run("with RequireOAuth=true and valid bearer token creates derived manager", func(t *testing.T) {
testStaticConfig := &config.StaticConfig{
KubeConfig: kubeconfigPath,
RequireOAuth: true,
DisabledTools: []string{"configuration_view"},
DeniedResources: []config.GroupVersionKind{
{Group: "apps", Version: "v1", Kind: "Deployment"},
},
}
testManager, err := NewManager(testStaticConfig)
if err != nil {
t.Fatalf("failed to create manager: %v", err)
}
defer testManager.Close()
testBearerToken := "test-bearer-token-123"
ctx := context.WithValue(context.Background(), OAuthAuthorizationHeader, "Bearer "+testBearerToken)
derived, err := testManager.Derived(ctx)
if err != nil {
t.Fatalf("failed to create manager: %v", err)
}
if derived.manager == testManager {
t.Error("expected new derived manager, got original manager")
}
if derived.manager.staticConfig != testStaticConfig {
t.Error("staticConfig not properly wired to derived manager")
}
derivedCfg := derived.manager.cfg
if derivedCfg == nil {
t.Fatal("derived config is nil")
}
if derivedCfg.BearerToken != testBearerToken {
t.Errorf("expected BearerToken %s, got %s", testBearerToken, derivedCfg.BearerToken)
}
})
}

251
pkg/kubernetes/manager.go Normal file
View File

@@ -0,0 +1,251 @@
package kubernetes
import (
"context"
"errors"
"fmt"
"strings"
"github.com/containers/kubernetes-mcp-server/pkg/config"
"github.com/containers/kubernetes-mcp-server/pkg/helm"
"github.com/fsnotify/fsnotify"
authenticationv1api "k8s.io/api/authentication/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/discovery"
"k8s.io/client-go/discovery/cached/memory"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/klog/v2"
)
type Manager struct {
cfg *rest.Config
clientCmdConfig clientcmd.ClientConfig
discoveryClient discovery.CachedDiscoveryInterface
accessControlClientSet *AccessControlClientset
accessControlRESTMapper *AccessControlRESTMapper
dynamicClient *dynamic.DynamicClient
staticConfig *config.StaticConfig
CloseWatchKubeConfig CloseWatchKubeConfig
}
var _ helm.Kubernetes = (*Manager)(nil)
var _ Openshift = (*Manager)(nil)
func NewManager(config *config.StaticConfig) (*Manager, error) {
k8s := &Manager{
staticConfig: config,
}
if err := resolveKubernetesConfigurations(k8s); err != nil {
return nil, err
}
// TODO: Won't work because not all client-go clients use the shared context (e.g. discovery client uses context.TODO())
//k8s.cfg.Wrap(func(original http.RoundTripper) http.RoundTripper {
// return &impersonateRoundTripper{original}
//})
var err error
k8s.accessControlClientSet, err = NewAccessControlClientset(k8s.cfg, k8s.staticConfig)
if err != nil {
return nil, err
}
k8s.discoveryClient = memory.NewMemCacheClient(k8s.accessControlClientSet.DiscoveryClient())
k8s.accessControlRESTMapper = NewAccessControlRESTMapper(
restmapper.NewDeferredDiscoveryRESTMapper(k8s.discoveryClient),
k8s.staticConfig,
)
k8s.dynamicClient, err = dynamic.NewForConfig(k8s.cfg)
if err != nil {
return nil, err
}
return k8s, nil
}
func (m *Manager) WatchKubeConfig(onKubeConfigChange func() error) {
if m.clientCmdConfig == nil {
return
}
kubeConfigFiles := m.clientCmdConfig.ConfigAccess().GetLoadingPrecedence()
if len(kubeConfigFiles) == 0 {
return
}
watcher, err := fsnotify.NewWatcher()
if err != nil {
return
}
for _, file := range kubeConfigFiles {
_ = watcher.Add(file)
}
go func() {
for {
select {
case _, ok := <-watcher.Events:
if !ok {
return
}
_ = onKubeConfigChange()
case _, ok := <-watcher.Errors:
if !ok {
return
}
}
}
}()
if m.CloseWatchKubeConfig != nil {
_ = m.CloseWatchKubeConfig()
}
m.CloseWatchKubeConfig = watcher.Close
}
func (m *Manager) Close() {
if m.CloseWatchKubeConfig != nil {
_ = m.CloseWatchKubeConfig()
}
}
func (m *Manager) GetAPIServerHost() string {
if m.cfg == nil {
return ""
}
return m.cfg.Host
}
func (m *Manager) IsInCluster() bool {
if m.staticConfig.KubeConfig != "" {
return false
}
cfg, err := InClusterConfig()
return err == nil && cfg != nil
}
func (m *Manager) configuredNamespace() string {
if ns, _, nsErr := m.clientCmdConfig.Namespace(); nsErr == nil {
return ns
}
return ""
}
func (m *Manager) NamespaceOrDefault(namespace string) string {
if namespace == "" {
return m.configuredNamespace()
}
return namespace
}
func (m *Manager) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
return m.discoveryClient, nil
}
func (m *Manager) ToRESTMapper() (meta.RESTMapper, error) {
return m.accessControlRESTMapper, nil
}
// ToRESTConfig returns the rest.Config object (genericclioptions.RESTClientGetter)
func (m *Manager) ToRESTConfig() (*rest.Config, error) {
return m.cfg, nil
}
// ToRawKubeConfigLoader returns the clientcmd.ClientConfig object (genericclioptions.RESTClientGetter)
func (m *Manager) ToRawKubeConfigLoader() clientcmd.ClientConfig {
return m.clientCmdConfig
}
func (m *Manager) VerifyToken(ctx context.Context, token, audience string) (*authenticationv1api.UserInfo, []string, error) {
tokenReviewClient, err := m.accessControlClientSet.TokenReview()
if err != nil {
return nil, nil, err
}
tokenReview := &authenticationv1api.TokenReview{
TypeMeta: metav1.TypeMeta{
APIVersion: "authentication.k8s.io/v1",
Kind: "TokenReview",
},
Spec: authenticationv1api.TokenReviewSpec{
Token: token,
Audiences: []string{audience},
},
}
result, err := tokenReviewClient.Create(ctx, tokenReview, metav1.CreateOptions{})
if err != nil {
return nil, nil, fmt.Errorf("failed to create token review: %v", err)
}
if !result.Status.Authenticated {
if result.Status.Error != "" {
return nil, nil, fmt.Errorf("token authentication failed: %s", result.Status.Error)
}
return nil, nil, fmt.Errorf("token authentication failed")
}
return &result.Status.User, result.Status.Audiences, nil
}
func (m *Manager) Derived(ctx context.Context) (*Kubernetes, error) {
authorization, ok := ctx.Value(OAuthAuthorizationHeader).(string)
if !ok || !strings.HasPrefix(authorization, "Bearer ") {
if m.staticConfig.RequireOAuth {
return nil, errors.New("oauth token required")
}
return &Kubernetes{manager: m}, nil
}
klog.V(5).Infof("%s header found (Bearer), using provided bearer token", OAuthAuthorizationHeader)
derivedCfg := &rest.Config{
Host: m.cfg.Host,
APIPath: m.cfg.APIPath,
// Copy only server verification TLS settings (CA bundle and server name)
TLSClientConfig: rest.TLSClientConfig{
Insecure: m.cfg.Insecure,
ServerName: m.cfg.ServerName,
CAFile: m.cfg.CAFile,
CAData: m.cfg.CAData,
},
BearerToken: strings.TrimPrefix(authorization, "Bearer "),
// pass custom UserAgent to identify the client
UserAgent: CustomUserAgent,
QPS: m.cfg.QPS,
Burst: m.cfg.Burst,
Timeout: m.cfg.Timeout,
Impersonate: rest.ImpersonationConfig{},
}
clientCmdApiConfig, err := m.clientCmdConfig.RawConfig()
if err != nil {
if m.staticConfig.RequireOAuth {
klog.Errorf("failed to get kubeconfig: %v", err)
return nil, errors.New("failed to get kubeconfig")
}
return &Kubernetes{manager: m}, nil
}
clientCmdApiConfig.AuthInfos = make(map[string]*clientcmdapi.AuthInfo)
derived := &Kubernetes{manager: &Manager{
clientCmdConfig: clientcmd.NewDefaultClientConfig(clientCmdApiConfig, nil),
cfg: derivedCfg,
staticConfig: m.staticConfig,
}}
derived.manager.accessControlClientSet, err = NewAccessControlClientset(derived.manager.cfg, derived.manager.staticConfig)
if err != nil {
if m.staticConfig.RequireOAuth {
klog.Errorf("failed to get kubeconfig: %v", err)
return nil, errors.New("failed to get kubeconfig")
}
return &Kubernetes{manager: m}, nil
}
derived.manager.discoveryClient = memory.NewMemCacheClient(derived.manager.accessControlClientSet.DiscoveryClient())
derived.manager.accessControlRESTMapper = NewAccessControlRESTMapper(
restmapper.NewDeferredDiscoveryRESTMapper(derived.manager.discoveryClient),
derived.manager.staticConfig,
)
derived.manager.dynamicClient, err = dynamic.NewForConfig(derived.manager.cfg)
if err != nil {
if m.staticConfig.RequireOAuth {
klog.Errorf("failed to initialize dynamic client: %v", err)
return nil, errors.New("failed to initialize dynamic client")
}
return &Kubernetes{manager: m}, nil
}
return derived, nil
}

View File

@@ -2,7 +2,6 @@ package kubernetes
import (
"context"
"fmt"
"github.com/containers/kubernetes-mcp-server/pkg/config"
"k8s.io/client-go/discovery/cached/memory"
@@ -12,161 +11,36 @@ import (
"k8s.io/client-go/tools/clientcmd"
)
const (
KubeConfigTargetParameterName = "context"
)
type ManagerProvider interface {
type Provider interface {
// Openshift extends the Openshift interface to provide OpenShift specific functionality to toolset providers
// TODO: with the configurable toolset implementation and especially the multi-cluster approach
// extending this interface might not be a good idea anymore.
// For the kubecontext case, a user might be targeting both an OpenShift flavored cluster and a vanilla Kubernetes cluster.
// See: https://github.com/containers/kubernetes-mcp-server/pull/372#discussion_r2421592315
Openshift
TokenVerifier
GetTargets(ctx context.Context) ([]string, error)
GetManagerFor(ctx context.Context, target string) (*Manager, error)
GetDerivedKubernetes(ctx context.Context, target string) (*Kubernetes, error)
GetDefaultTarget() string
GetTargetParameterName() string
WatchTargets(func() error)
Close()
}
type kubeConfigClusterProvider struct {
defaultContext string
managers map[string]*Manager
}
var _ ManagerProvider = &kubeConfigClusterProvider{}
type inClusterProvider struct {
manager *Manager
}
var _ ManagerProvider = &inClusterProvider{}
func NewManagerProvider(cfg *config.StaticConfig) (ManagerProvider, error) {
func NewProvider(cfg *config.StaticConfig) (Provider, error) {
m, err := NewManager(cfg)
if err != nil {
return nil, err
}
switch resolveStrategy(cfg, m) {
case config.ClusterProviderKubeConfig:
return newKubeConfigClusterProvider(m)
case config.ClusterProviderInCluster:
return newInClusterProvider(m)
default:
return nil, fmt.Errorf("invalid ClusterProviderStrategy '%s', must be 'kubeconfig' or 'in-cluster'", cfg.ClusterProviderStrategy)
}
}
strategy := resolveStrategy(cfg, m)
func newKubeConfigClusterProvider(m *Manager) (*kubeConfigClusterProvider, error) {
// Handle in-cluster mode
if m.IsInCluster() {
return nil, fmt.Errorf("kubeconfig ClusterProviderStrategy is invalid for in-cluster deployments")
}
rawConfig, err := m.clientCmdConfig.RawConfig()
factory, err := getProviderFactory(strategy)
if err != nil {
return nil, err
}
allClusterManagers := map[string]*Manager{
rawConfig.CurrentContext: m, // we already initialized a manager for the default context, let's use it
}
for name := range rawConfig.Contexts {
if name == rawConfig.CurrentContext {
continue // already initialized this, don't want to set it to nil
}
allClusterManagers[name] = nil
}
return &kubeConfigClusterProvider{
defaultContext: rawConfig.CurrentContext,
managers: allClusterManagers,
}, nil
}
func newInClusterProvider(m *Manager) (*inClusterProvider, error) {
return &inClusterProvider{
manager: m,
}, nil
}
func (k *kubeConfigClusterProvider) GetTargets(ctx context.Context) ([]string, error) {
contextNames := make([]string, 0, len(k.managers))
for cluster := range k.managers {
contextNames = append(contextNames, cluster)
}
return contextNames, nil
}
func (k *kubeConfigClusterProvider) GetTargetParameterName() string {
return KubeConfigTargetParameterName
}
func (k *kubeConfigClusterProvider) GetManagerFor(ctx context.Context, context string) (*Manager, error) {
m, ok := k.managers[context]
if ok && m != nil {
return m, nil
}
baseManager := k.managers[k.defaultContext]
if baseManager.IsInCluster() {
// In cluster mode, so context switching is not applicable
return baseManager, nil
}
m, err := baseManager.newForContext(context)
if err != nil {
return nil, err
}
k.managers[context] = m
return m, nil
}
func (k *kubeConfigClusterProvider) GetDefaultTarget() string {
return k.defaultContext
}
func (k *kubeConfigClusterProvider) WatchTargets(onKubeConfigChanged func() error) {
m := k.managers[k.defaultContext]
m.WatchKubeConfig(onKubeConfigChanged)
}
func (k *kubeConfigClusterProvider) Close() {
m := k.managers[k.defaultContext]
m.Close()
}
func (i *inClusterProvider) GetTargets(ctx context.Context) ([]string, error) {
return []string{""}, nil
}
func (i *inClusterProvider) GetManagerFor(ctx context.Context, target string) (*Manager, error) {
if target != "" {
return nil, fmt.Errorf("unable to get manager for other context/cluster with in-cluster strategy")
}
return i.manager, nil
}
func (i *inClusterProvider) GetDefaultTarget() string {
return ""
}
func (i *inClusterProvider) GetTargetParameterName() string {
return ""
}
func (i *inClusterProvider) WatchTargets(watch func() error) {
i.manager.WatchKubeConfig(watch)
}
func (i *inClusterProvider) Close() {
i.manager.Close()
return factory(m, cfg)
}
func (m *Manager) newForContext(context string) (*Manager, error) {

View File

@@ -0,0 +1,130 @@
package kubernetes
import (
"context"
"fmt"
"github.com/containers/kubernetes-mcp-server/pkg/config"
authenticationv1api "k8s.io/api/authentication/v1"
)
// KubeConfigTargetParameterName is the parameter name used to specify
// the kubeconfig context when using the kubeconfig cluster provider strategy.
const KubeConfigTargetParameterName = "context"
// kubeConfigClusterProvider implements Provider for managing multiple
// Kubernetes clusters using different contexts from a kubeconfig file.
// It lazily initializes managers for each context as they are requested.
type kubeConfigClusterProvider struct {
defaultContext string
managers map[string]*Manager
}
var _ Provider = &kubeConfigClusterProvider{}
func init() {
RegisterProvider(config.ClusterProviderKubeConfig, newKubeConfigClusterProvider)
}
// newKubeConfigClusterProvider creates a provider that manages multiple clusters
// via kubeconfig contexts. Returns an error if the manager is in-cluster mode.
func newKubeConfigClusterProvider(m *Manager, cfg *config.StaticConfig) (Provider, error) {
// Handle in-cluster mode
if m.IsInCluster() {
return nil, fmt.Errorf("kubeconfig ClusterProviderStrategy is invalid for in-cluster deployments")
}
rawConfig, err := m.clientCmdConfig.RawConfig()
if err != nil {
return nil, err
}
allClusterManagers := map[string]*Manager{
rawConfig.CurrentContext: m, // we already initialized a manager for the default context, let's use it
}
for name := range rawConfig.Contexts {
if name == rawConfig.CurrentContext {
continue // already initialized this, don't want to set it to nil
}
allClusterManagers[name] = nil
}
return &kubeConfigClusterProvider{
defaultContext: rawConfig.CurrentContext,
managers: allClusterManagers,
}, nil
}
func (p *kubeConfigClusterProvider) managerForContext(context string) (*Manager, error) {
m, ok := p.managers[context]
if ok && m != nil {
return m, nil
}
baseManager := p.managers[p.defaultContext]
if baseManager.IsInCluster() {
// In cluster mode, so context switching is not applicable
return baseManager, nil
}
m, err := baseManager.newForContext(context)
if err != nil {
return nil, err
}
p.managers[context] = m
return m, nil
}
func (p *kubeConfigClusterProvider) IsOpenShift(ctx context.Context) bool {
return p.managers[p.defaultContext].IsOpenShift(ctx)
}
func (p *kubeConfigClusterProvider) VerifyToken(ctx context.Context, context, token, audience string) (*authenticationv1api.UserInfo, []string, error) {
m, err := p.managerForContext(context)
if err != nil {
return nil, nil, err
}
return m.VerifyToken(ctx, token, audience)
}
func (p *kubeConfigClusterProvider) GetTargets(ctx context.Context) ([]string, error) {
contextNames := make([]string, 0, len(p.managers))
for contextName := range p.managers {
contextNames = append(contextNames, contextName)
}
return contextNames, nil
}
func (p *kubeConfigClusterProvider) GetTargetParameterName() string {
return KubeConfigTargetParameterName
}
func (p *kubeConfigClusterProvider) GetDerivedKubernetes(ctx context.Context, context string) (*Kubernetes, error) {
m, err := p.managerForContext(context)
if err != nil {
return nil, err
}
return m.Derived(ctx)
}
func (p *kubeConfigClusterProvider) GetDefaultTarget() string {
return p.defaultContext
}
func (p *kubeConfigClusterProvider) WatchTargets(onKubeConfigChanged func() error) {
m := p.managers[p.defaultContext]
m.WatchKubeConfig(onKubeConfigChanged)
}
func (p *kubeConfigClusterProvider) Close() {
m := p.managers[p.defaultContext]
m.Close()
}

View File

@@ -0,0 +1,47 @@
package kubernetes
import (
"fmt"
"sort"
"github.com/containers/kubernetes-mcp-server/pkg/config"
)
// ProviderFactory creates a new Provider instance for a given strategy.
// Implementations should validate that the Manager is compatible with their strategy
// (e.g., kubeconfig provider should reject in-cluster managers).
type ProviderFactory func(m *Manager, cfg *config.StaticConfig) (Provider, error)
var providerFactories = make(map[string]ProviderFactory)
// RegisterProvider registers a provider factory for a given strategy name.
// This should be called from init() functions in provider implementation files.
// Panics if a provider is already registered for the given strategy.
func RegisterProvider(strategy string, factory ProviderFactory) {
if _, exists := providerFactories[strategy]; exists {
panic(fmt.Sprintf("provider already registered for strategy '%s'", strategy))
}
providerFactories[strategy] = factory
}
// getProviderFactory retrieves a registered provider factory by strategy name.
// Returns an error if no provider is registered for the given strategy.
func getProviderFactory(strategy string) (ProviderFactory, error) {
factory, ok := providerFactories[strategy]
if !ok {
available := GetRegisteredStrategies()
return nil, fmt.Errorf("no provider registered for strategy '%s', available strategies: %v", strategy, available)
}
return factory, nil
}
// GetRegisteredStrategies returns a sorted list of all registered strategy names.
// This is useful for error messages and debugging.
func GetRegisteredStrategies() []string {
strategies := make([]string, 0, len(providerFactories))
for strategy := range providerFactories {
strategies = append(strategies, strategy)
}
sort.Strings(strategies)
return strategies
}

View File

@@ -0,0 +1,56 @@
package kubernetes
import (
"testing"
"github.com/containers/kubernetes-mcp-server/pkg/config"
"github.com/stretchr/testify/suite"
)
type ProviderRegistryTestSuite struct {
BaseProviderSuite
}
func (s *ProviderRegistryTestSuite) TestRegisterProvider() {
s.Run("With no pre-existing provider, registers the provider", func() {
RegisterProvider("test-strategy", func(m *Manager, cfg *config.StaticConfig) (Provider, error) {
return nil, nil
})
_, exists := providerFactories["test-strategy"]
s.True(exists, "Provider should be registered")
})
s.Run("With pre-existing provider, panics", func() {
RegisterProvider("test-pre-existent", func(m *Manager, cfg *config.StaticConfig) (Provider, error) {
return nil, nil
})
s.Panics(func() {
RegisterProvider("test-pre-existent", func(m *Manager, cfg *config.StaticConfig) (Provider, error) {
return nil, nil
})
}, "Registering a provider with an existing strategy should panic")
})
}
func (s *ProviderRegistryTestSuite) TestGetRegisteredStrategies() {
s.Run("With no registered providers, returns empty list", func() {
providerFactories = make(map[string]ProviderFactory)
strategies := GetRegisteredStrategies()
s.Empty(strategies, "No strategies should be registered")
})
s.Run("With multiple registered providers, returns sorted list", func() {
providerFactories = make(map[string]ProviderFactory)
RegisterProvider("foo-strategy", func(m *Manager, cfg *config.StaticConfig) (Provider, error) {
return nil, nil
})
RegisterProvider("bar-strategy", func(m *Manager, cfg *config.StaticConfig) (Provider, error) {
return nil, nil
})
strategies := GetRegisteredStrategies()
expected := []string{"bar-strategy", "foo-strategy"}
s.Equal(expected, strategies, "Strategies should be sorted alphabetically")
})
}
func TestProviderRegistry(t *testing.T) {
suite.Run(t, new(ProviderRegistryTestSuite))
}

View File

@@ -0,0 +1,78 @@
package kubernetes
import (
"context"
"fmt"
"github.com/containers/kubernetes-mcp-server/pkg/config"
authenticationv1api "k8s.io/api/authentication/v1"
)
// singleClusterProvider implements Provider for managing a single
// Kubernetes cluster. Used for in-cluster deployments or when multi-cluster
// support is disabled.
type singleClusterProvider struct {
strategy string
manager *Manager
}
var _ Provider = &singleClusterProvider{}
func init() {
RegisterProvider(config.ClusterProviderInCluster, newSingleClusterProvider(config.ClusterProviderInCluster))
RegisterProvider(config.ClusterProviderDisabled, newSingleClusterProvider(config.ClusterProviderDisabled))
}
// newSingleClusterProvider creates a provider that manages a single cluster.
// Validates that the manager is in-cluster when the in-cluster strategy is used.
func newSingleClusterProvider(strategy string) ProviderFactory {
return func(m *Manager, cfg *config.StaticConfig) (Provider, error) {
if strategy == config.ClusterProviderInCluster && !m.IsInCluster() {
return nil, fmt.Errorf("server must be deployed in cluster for the in-cluster ClusterProviderStrategy")
}
return &singleClusterProvider{
manager: m,
strategy: strategy,
}, nil
}
}
func (p *singleClusterProvider) IsOpenShift(ctx context.Context) bool {
return p.manager.IsOpenShift(ctx)
}
func (p *singleClusterProvider) VerifyToken(ctx context.Context, target, token, audience string) (*authenticationv1api.UserInfo, []string, error) {
if target != "" {
return nil, nil, fmt.Errorf("unable to get manager for other context/cluster with %s strategy", p.strategy)
}
return p.manager.VerifyToken(ctx, token, audience)
}
func (p *singleClusterProvider) GetTargets(ctx context.Context) ([]string, error) {
return []string{""}, nil
}
func (p *singleClusterProvider) GetDerivedKubernetes(ctx context.Context, target string) (*Kubernetes, error) {
if target != "" {
return nil, fmt.Errorf("unable to get manager for other context/cluster with %s strategy", p.strategy)
}
return p.manager.Derived(ctx)
}
func (p *singleClusterProvider) GetDefaultTarget() string {
return ""
}
func (p *singleClusterProvider) GetTargetParameterName() string {
return ""
}
func (p *singleClusterProvider) WatchTargets(watch func() error) {
p.manager.WatchKubeConfig(watch)
}
func (p *singleClusterProvider) Close() {
p.manager.Close()
}

View File

@@ -0,0 +1,127 @@
package kubernetes
import (
"strings"
"testing"
"github.com/containers/kubernetes-mcp-server/internal/test"
"github.com/containers/kubernetes-mcp-server/pkg/config"
"github.com/stretchr/testify/suite"
"k8s.io/client-go/rest"
)
type BaseProviderSuite struct {
suite.Suite
originalProviderFactories map[string]ProviderFactory
}
func (s *BaseProviderSuite) SetupTest() {
s.originalProviderFactories = make(map[string]ProviderFactory)
for k, v := range providerFactories {
s.originalProviderFactories[k] = v
}
}
func (s *BaseProviderSuite) TearDownTest() {
providerFactories = make(map[string]ProviderFactory)
for k, v := range s.originalProviderFactories {
providerFactories[k] = v
}
}
type ProviderTestSuite struct {
BaseProviderSuite
}
func (s *ProviderTestSuite) TestNewManagerProviderInCluster() {
originalIsInClusterConfig := InClusterConfig
s.T().Cleanup(func() {
InClusterConfig = originalIsInClusterConfig
})
InClusterConfig = func() (*rest.Config, error) {
return &rest.Config{}, nil
}
s.Run("With no cluster_provider_strategy, returns single-cluster provider", func() {
cfg := test.Must(config.ReadToml([]byte{}))
provider, err := NewProvider(cfg)
s.Require().NoError(err, "Expected no error for in-cluster provider")
s.NotNil(provider, "Expected provider instance")
s.IsType(&singleClusterProvider{}, provider, "Expected singleClusterProvider type")
})
s.Run("With configured in-cluster cluster_provider_strategy, returns single-cluster provider", func() {
cfg := test.Must(config.ReadToml([]byte(`
cluster_provider_strategy = "in-cluster"
`)))
provider, err := NewProvider(cfg)
s.Require().NoError(err, "Expected no error for single-cluster strategy")
s.NotNil(provider, "Expected provider instance")
s.IsType(&singleClusterProvider{}, provider, "Expected singleClusterProvider type")
})
s.Run("With configured kubeconfig cluster_provider_strategy, returns error", func() {
cfg := test.Must(config.ReadToml([]byte(`
cluster_provider_strategy = "kubeconfig"
`)))
provider, err := NewProvider(cfg)
s.Require().Error(err, "Expected error for kubeconfig strategy")
s.ErrorContains(err, "kubeconfig ClusterProviderStrategy is invalid for in-cluster deployments")
s.Nilf(provider, "Expected no provider instance, got %v", provider)
})
s.Run("With configured non-existent cluster_provider_strategy, returns error", func() {
cfg := test.Must(config.ReadToml([]byte(`
cluster_provider_strategy = "i-do-not-exist"
`)))
provider, err := NewProvider(cfg)
s.Require().Error(err, "Expected error for non-existent strategy")
s.ErrorContains(err, "no provider registered for strategy 'i-do-not-exist'")
s.Nilf(provider, "Expected no provider instance, got %v", provider)
})
}
func (s *ProviderTestSuite) TestNewManagerProviderLocal() {
mockServer := test.NewMockServer()
s.T().Cleanup(mockServer.Close)
kubeconfigPath := strings.ReplaceAll(mockServer.KubeconfigFile(s.T()), `\`, `\\`)
s.Run("With no cluster_provider_strategy, returns kubeconfig provider", func() {
cfg := test.Must(config.ReadToml([]byte(`
kubeconfig = "` + kubeconfigPath + `"
`)))
provider, err := NewProvider(cfg)
s.Require().NoError(err, "Expected no error for kubeconfig provider")
s.NotNil(provider, "Expected provider instance")
s.IsType(&kubeConfigClusterProvider{}, provider, "Expected kubeConfigClusterProvider type")
})
s.Run("With configured kubeconfig cluster_provider_strategy, returns kubeconfig provider", func() {
cfg := test.Must(config.ReadToml([]byte(`
kubeconfig = "` + kubeconfigPath + `"
cluster_provider_strategy = "kubeconfig"
`)))
provider, err := NewProvider(cfg)
s.Require().NoError(err, "Expected no error for kubeconfig provider")
s.NotNil(provider, "Expected provider instance")
s.IsType(&kubeConfigClusterProvider{}, provider, "Expected kubeConfigClusterProvider type")
})
s.Run("With configured in-cluster cluster_provider_strategy, returns error", func() {
cfg := test.Must(config.ReadToml([]byte(`
kubeconfig = "` + kubeconfigPath + `"
cluster_provider_strategy = "in-cluster"
`)))
provider, err := NewProvider(cfg)
s.Require().Error(err, "Expected error for in-cluster strategy")
s.ErrorContains(err, "server must be deployed in cluster for the in-cluster ClusterProviderStrategy")
s.Nilf(provider, "Expected no provider instance, got %v", provider)
})
s.Run("With configured non-existent cluster_provider_strategy, returns error", func() {
cfg := test.Must(config.ReadToml([]byte(`
kubeconfig = "` + kubeconfigPath + `"
cluster_provider_strategy = "i-do-not-exist"
`)))
provider, err := NewProvider(cfg)
s.Require().Error(err, "Expected error for non-existent strategy")
s.ErrorContains(err, "no provider registered for strategy 'i-do-not-exist'")
s.Nilf(provider, "Expected no provider instance, got %v", provider)
})
}
func TestProvider(t *testing.T) {
suite.Run(t, new(ProviderTestSuite))
}

View File

@@ -2,39 +2,10 @@ package kubernetes
import (
"context"
"fmt"
authenticationv1api "k8s.io/api/authentication/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func (m *Manager) VerifyToken(ctx context.Context, token, audience string) (*authenticationv1api.UserInfo, []string, error) {
tokenReviewClient, err := m.accessControlClientSet.TokenReview()
if err != nil {
return nil, nil, err
}
tokenReview := &authenticationv1api.TokenReview{
TypeMeta: metav1.TypeMeta{
APIVersion: "authentication.k8s.io/v1",
Kind: "TokenReview",
},
Spec: authenticationv1api.TokenReviewSpec{
Token: token,
Audiences: []string{audience},
},
}
result, err := tokenReviewClient.Create(ctx, tokenReview, metav1.CreateOptions{})
if err != nil {
return nil, nil, fmt.Errorf("failed to create token review: %v", err)
}
if !result.Status.Authenticated {
if result.Status.Error != "" {
return nil, nil, fmt.Errorf("token authentication failed: %s", result.Status.Error)
}
return nil, nil, fmt.Errorf("token authentication failed")
}
return &result.Status.User, result.Status.Audiences, nil
type TokenVerifier interface {
VerifyToken(ctx context.Context, cluster, token, audience string) (*authenticationv1api.UserInfo, []string, error)
}

View File

@@ -39,15 +39,9 @@ func ServerToolToM3LabsServerTool(s *Server, tools []api.ServerTool) ([]server.S
m3labTool.RawInputSchema = schema
}
m3labHandler := func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
// get the correct internalk8s.Manager for the target specified in the request
// get the correct derived Kubernetes client for the target specified in the request
cluster := request.GetString(s.p.GetTargetParameterName(), s.p.GetDefaultTarget())
m, err := s.p.GetManagerFor(ctx, cluster)
if err != nil {
return nil, err
}
// derive the manager based on auth on top of the settings for the cluster
k, err := m.Derived(ctx)
k, err := s.p.GetDerivedKubernetes(ctx, cluster)
if err != nil {
return nil, err
}

View File

@@ -67,7 +67,7 @@ type Server struct {
configuration *Configuration
server *server.MCPServer
enabledTools []string
p internalk8s.ManagerProvider
p internalk8s.Provider
}
func NewServer(configuration Configuration) (*Server, error) {
@@ -101,7 +101,7 @@ func NewServer(configuration Configuration) (*Server, error) {
func (s *Server) reloadKubernetesClusterProvider() error {
ctx := context.Background()
p, err := internalk8s.NewManagerProvider(s.configuration.StaticConfig)
p, err := internalk8s.NewProvider(s.configuration.StaticConfig)
if err != nil {
return err
}
@@ -113,11 +113,6 @@ func (s *Server) reloadKubernetesClusterProvider() error {
s.p = p
k, err := s.p.GetManagerFor(ctx, s.p.GetDefaultTarget())
if err != nil {
return err
}
targets, err := p.GetTargets(ctx)
if err != nil {
return err
@@ -136,7 +131,7 @@ func (s *Server) reloadKubernetesClusterProvider() error {
applicableTools := make([]api.ServerTool, 0)
for _, toolset := range s.configuration.Toolsets() {
for _, tool := range toolset.GetTools(k) {
for _, tool := range toolset.GetTools(p) {
tool := mutator(tool)
if !filter(tool) {
continue
@@ -182,23 +177,11 @@ func (s *Server) ServeHTTP(httpServer *http.Server) *server.StreamableHTTPServer
// KubernetesApiVerifyToken verifies the given token with the audience by
// sending an TokenReview request to API Server for the specified cluster.
func (s *Server) KubernetesApiVerifyToken(ctx context.Context, token string, audience string, cluster string) (*authenticationapiv1.UserInfo, []string, error) {
func (s *Server) KubernetesApiVerifyToken(ctx context.Context, cluster, token, audience string) (*authenticationapiv1.UserInfo, []string, error) {
if s.p == nil {
return nil, nil, fmt.Errorf("kubernetes cluster provider is not initialized")
}
// Use provided cluster or default
if cluster == "" {
cluster = s.p.GetDefaultTarget()
}
// Get the cluster manager for the specified cluster
m, err := s.p.GetManagerFor(ctx, cluster)
if err != nil {
return nil, nil, err
}
return m.VerifyToken(ctx, token, audience)
return s.p.VerifyToken(ctx, cluster, token, audience)
}
// GetTargetParameterName returns the parameter name used for target identification in MCP requests

View File

@@ -32,7 +32,7 @@ func ShouldIncludeTargetListTool(targetName string, targets []string) ToolFilter
// TODO: this check should be removed or make more generic when we have other
if tool.Tool.Name == "configuration_contexts_list" && targetName != kubernetes.KubeConfigTargetParameterName {
// let's not include configuration_contexts_list if we aren't targeting contexts in our ManagerProvider
// let's not include configuration_contexts_list if we aren't targeting contexts in our Provider
return false
}

View File

@@ -299,7 +299,7 @@ func escape(w writer, s string) error {
case '\r':
esc = "&#13;"
default:
panic("unrecognized escape character")
panic("html: unrecognized escape character")
}
s = s[i+1:]
if _, err := w.WriteString(esc); err != nil {

View File

@@ -136,7 +136,7 @@ func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int {
return -1
}
default:
panic("unreachable")
panic(fmt.Sprintf("html: internal error: indexOfElementInScope unknown scope: %d", s))
}
}
switch s {
@@ -179,7 +179,7 @@ func (p *parser) clearStackToContext(s scope) {
return
}
default:
panic("unreachable")
panic(fmt.Sprintf("html: internal error: clearStackToContext unknown scope: %d", s))
}
}
}
@@ -231,7 +231,14 @@ func (p *parser) addChild(n *Node) {
}
if n.Type == ElementNode {
p.oe = append(p.oe, n)
p.insertOpenElement(n)
}
}
func (p *parser) insertOpenElement(n *Node) {
p.oe = append(p.oe, n)
if len(p.oe) > 512 {
panic("html: open stack of elements exceeds 512 nodes")
}
}
@@ -810,7 +817,7 @@ func afterHeadIM(p *parser) bool {
p.im = inFramesetIM
return true
case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title:
p.oe = append(p.oe, p.head)
p.insertOpenElement(p.head)
defer p.oe.remove(p.head)
return inHeadIM(p)
case a.Head:
@@ -1678,7 +1685,7 @@ func inTableBodyIM(p *parser) bool {
return inTableIM(p)
}
// Section 12.2.6.4.14.
// Section 13.2.6.4.14.
func inRowIM(p *parser) bool {
switch p.tok.Type {
case StartTagToken:
@@ -1690,7 +1697,9 @@ func inRowIM(p *parser) bool {
p.im = inCellIM
return true
case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Tfoot, a.Thead, a.Tr:
if p.popUntil(tableScope, a.Tr) {
if p.elementInScope(tableScope, a.Tr) {
p.clearStackToContext(tableRowScope)
p.oe.pop()
p.im = inTableBodyIM
return false
}
@@ -1700,22 +1709,28 @@ func inRowIM(p *parser) bool {
case EndTagToken:
switch p.tok.DataAtom {
case a.Tr:
if p.popUntil(tableScope, a.Tr) {
if p.elementInScope(tableScope, a.Tr) {
p.clearStackToContext(tableRowScope)
p.oe.pop()
p.im = inTableBodyIM
return true
}
// Ignore the token.
return true
case a.Table:
if p.popUntil(tableScope, a.Tr) {
if p.elementInScope(tableScope, a.Tr) {
p.clearStackToContext(tableRowScope)
p.oe.pop()
p.im = inTableBodyIM
return false
}
// Ignore the token.
return true
case a.Tbody, a.Tfoot, a.Thead:
if p.elementInScope(tableScope, p.tok.DataAtom) {
p.parseImpliedToken(EndTagToken, a.Tr, a.Tr.String())
if p.elementInScope(tableScope, p.tok.DataAtom) && p.elementInScope(tableScope, a.Tr) {
p.clearStackToContext(tableRowScope)
p.oe.pop()
p.im = inTableBodyIM
return false
}
// Ignore the token.
@@ -2222,16 +2237,20 @@ func parseForeignContent(p *parser) bool {
p.acknowledgeSelfClosingTag()
}
case EndTagToken:
if strings.EqualFold(p.oe[len(p.oe)-1].Data, p.tok.Data) {
p.oe = p.oe[:len(p.oe)-1]
return true
}
for i := len(p.oe) - 1; i >= 0; i-- {
if p.oe[i].Namespace == "" {
return p.im(p)
}
if strings.EqualFold(p.oe[i].Data, p.tok.Data) {
p.oe = p.oe[:i]
return true
}
if i > 0 && p.oe[i-1].Namespace == "" {
break
}
}
return true
return p.im(p)
default:
// Ignore the token.
}
@@ -2312,9 +2331,13 @@ func (p *parser) parseCurrentToken() {
}
}
func (p *parser) parse() error {
func (p *parser) parse() (err error) {
defer func() {
if panicErr := recover(); panicErr != nil {
err = fmt.Errorf("%s", panicErr)
}
}()
// Iterate until EOF. Any other error will cause an early return.
var err error
for err != io.EOF {
// CDATA sections are allowed only in foreign content.
n := p.oe.top()
@@ -2343,6 +2366,8 @@ func (p *parser) parse() error {
// <tag>s. Conversely, explicit <tag>s in r's data can be silently dropped,
// with no corresponding node in the resulting tree.
//
// Parse will reject HTML that is nested deeper than 512 elements.
//
// The input is assumed to be UTF-8 encoded.
func Parse(r io.Reader) (*Node, error) {
return ParseWithOptions(r)

View File

@@ -184,7 +184,7 @@ func render1(w writer, n *Node) error {
return err
}
// Add initial newline where there is danger of a newline beging ignored.
// Add initial newline where there is danger of a newline being ignored.
if c := n.FirstChild; c != nil && c.Type == TextNode && strings.HasPrefix(c.Data, "\n") {
switch n.Data {
case "pre", "listing", "textarea":

View File

@@ -27,6 +27,7 @@ import (
// - If the resulting value is zero or out of range, use a default.
type http2Config struct {
MaxConcurrentStreams uint32
StrictMaxConcurrentRequests bool
MaxDecoderHeaderTableSize uint32
MaxEncoderHeaderTableSize uint32
MaxReadFrameSize uint32
@@ -64,12 +65,13 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config {
// (the net/http Transport).
func configFromTransport(h2 *Transport) http2Config {
conf := http2Config{
MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
MaxReadFrameSize: h2.MaxReadFrameSize,
SendPingTimeout: h2.ReadIdleTimeout,
PingTimeout: h2.PingTimeout,
WriteByteTimeout: h2.WriteByteTimeout,
StrictMaxConcurrentRequests: h2.StrictMaxConcurrentStreams,
MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
MaxReadFrameSize: h2.MaxReadFrameSize,
SendPingTimeout: h2.ReadIdleTimeout,
PingTimeout: h2.PingTimeout,
WriteByteTimeout: h2.WriteByteTimeout,
}
// Unlike most config fields, where out-of-range values revert to the default,
@@ -128,6 +130,9 @@ func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
if h2.MaxConcurrentStreams != 0 {
conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
}
if http2ConfigStrictMaxConcurrentRequests(h2) {
conf.StrictMaxConcurrentRequests = true
}
if h2.MaxEncoderHeaderTableSize != 0 {
conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
}

15
vendor/golang.org/x/net/http2/config_go125.go generated vendored Normal file
View File

@@ -0,0 +1,15 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.26
package http2
import (
"net/http"
)
func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool {
return false
}

15
vendor/golang.org/x/net/http2/config_go126.go generated vendored Normal file
View File

@@ -0,0 +1,15 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.26
package http2
import (
"net/http"
)
func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool {
return h2.StrictMaxConcurrentRequests
}

View File

@@ -347,7 +347,7 @@ func (fr *Framer) maxHeaderListSize() uint32 {
func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) {
// Write the FrameHeader.
f.wbuf = append(f.wbuf[:0],
0, // 3 bytes of length, filled in in endWrite
0, // 3 bytes of length, filled in endWrite
0,
0,
byte(ftype),
@@ -1152,6 +1152,15 @@ type PriorityFrame struct {
PriorityParam
}
var defaultRFC9218Priority = PriorityParam{
incremental: 0,
urgency: 3,
}
// Note that HTTP/2 has had two different prioritization schemes, and
// PriorityParam struct below is a superset of both schemes. The exported
// symbols are from RFC 7540 and the non-exported ones are from RFC 9218.
// PriorityParam are the stream prioritzation parameters.
type PriorityParam struct {
// StreamDep is a 31-bit stream identifier for the
@@ -1167,6 +1176,20 @@ type PriorityParam struct {
// the spec, "Add one to the value to obtain a weight between
// 1 and 256."
Weight uint8
// "The urgency (u) parameter value is Integer (see Section 3.3.1 of
// [STRUCTURED-FIELDS]), between 0 and 7 inclusive, in descending order of
// priority. The default is 3."
urgency uint8
// "The incremental (i) parameter value is Boolean (see Section 3.3.6 of
// [STRUCTURED-FIELDS]). It indicates if an HTTP response can be processed
// incrementally, i.e., provide some meaningful output as chunks of the
// response arrive."
//
// We use uint8 (i.e. 0 is false, 1 is true) instead of bool so we can
// avoid unnecessary type conversions and because either type takes 1 byte.
incremental uint8
}
func (p PriorityParam) IsZero() bool {

View File

@@ -34,7 +34,6 @@ var (
VerboseLogs bool
logFrameWrites bool
logFrameReads bool
inTests bool
// Enabling extended CONNECT by causes browsers to attempt to use
// WebSockets-over-HTTP/2. This results in problems when the server's websocket

View File

@@ -181,6 +181,10 @@ type Server struct {
type serverInternalState struct {
mu sync.Mutex
activeConns map[*serverConn]struct{}
// Pool of error channels. This is per-Server rather than global
// because channels can't be reused across synctest bubbles.
errChanPool sync.Pool
}
func (s *serverInternalState) registerConn(sc *serverConn) {
@@ -212,6 +216,27 @@ func (s *serverInternalState) startGracefulShutdown() {
s.mu.Unlock()
}
// Global error channel pool used for uninitialized Servers.
// We use a per-Server pool when possible to avoid using channels across synctest bubbles.
var errChanPool = sync.Pool{
New: func() any { return make(chan error, 1) },
}
func (s *serverInternalState) getErrChan() chan error {
if s == nil {
return errChanPool.Get().(chan error) // Server used without calling ConfigureServer
}
return s.errChanPool.Get().(chan error)
}
func (s *serverInternalState) putErrChan(ch chan error) {
if s == nil {
errChanPool.Put(ch) // Server used without calling ConfigureServer
return
}
s.errChanPool.Put(ch)
}
// ConfigureServer adds HTTP/2 support to a net/http Server.
//
// The configuration conf may be nil.
@@ -224,7 +249,10 @@ func ConfigureServer(s *http.Server, conf *Server) error {
if conf == nil {
conf = new(Server)
}
conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})}
conf.state = &serverInternalState{
activeConns: make(map[*serverConn]struct{}),
errChanPool: sync.Pool{New: func() any { return make(chan error, 1) }},
}
if h1, h2 := s, conf; h2.IdleTimeout == 0 {
if h1.IdleTimeout != 0 {
h2.IdleTimeout = h1.IdleTimeout
@@ -1124,25 +1152,6 @@ func (sc *serverConn) readPreface() error {
}
}
var errChanPool = sync.Pool{
New: func() interface{} { return make(chan error, 1) },
}
func getErrChan() chan error {
if inTests {
// Channels cannot be reused across synctest tests.
return make(chan error, 1)
} else {
return errChanPool.Get().(chan error)
}
}
func putErrChan(ch chan error) {
if !inTests {
errChanPool.Put(ch)
}
}
var writeDataPool = sync.Pool{
New: func() interface{} { return new(writeData) },
}
@@ -1150,7 +1159,7 @@ var writeDataPool = sync.Pool{
// writeDataFromHandler writes DATA response frames from a handler on
// the given stream.
func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
ch := getErrChan()
ch := sc.srv.state.getErrChan()
writeArg := writeDataPool.Get().(*writeData)
*writeArg = writeData{stream.id, data, endStream}
err := sc.writeFrameFromHandler(FrameWriteRequest{
@@ -1182,7 +1191,7 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea
return errStreamClosed
}
}
putErrChan(ch)
sc.srv.state.putErrChan(ch)
if frameWriteDone {
writeDataPool.Put(writeArg)
}
@@ -2436,7 +2445,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro
// waiting for this frame to be written, so an http.Flush mid-handler
// writes out the correct value of keys, before a handler later potentially
// mutates it.
errc = getErrChan()
errc = sc.srv.state.getErrChan()
}
if err := sc.writeFrameFromHandler(FrameWriteRequest{
write: headerData,
@@ -2448,7 +2457,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro
if errc != nil {
select {
case err := <-errc:
putErrChan(errc)
sc.srv.state.putErrChan(errc)
return err
case <-sc.doneServing:
return errClientDisconnected
@@ -3129,7 +3138,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
method: opts.Method,
url: u,
header: cloneHeader(opts.Header),
done: getErrChan(),
done: sc.srv.state.getErrChan(),
}
select {
@@ -3146,7 +3155,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
case <-st.cw:
return errStreamClosed
case err := <-msg.done:
putErrChan(msg.done)
sc.srv.state.putErrChan(msg.done)
return err
}
}

View File

@@ -355,6 +355,7 @@ type ClientConn struct {
readIdleTimeout time.Duration
pingTimeout time.Duration
extendedConnectAllowed bool
strictMaxConcurrentStreams bool
// rstStreamPingsBlocked works around an unfortunate gRPC behavior.
// gRPC strictly limits the number of PING frames that it will receive.
@@ -784,7 +785,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
initialWindowSize: 65535, // spec default
initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
strictMaxConcurrentStreams: conf.StrictMaxConcurrentRequests,
peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
streams: make(map[uint32]*clientStream),
singleUse: singleUse,
seenSettingsChan: make(chan struct{}),
@@ -1018,7 +1020,7 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
return
}
var maxConcurrentOkay bool
if cc.t.StrictMaxConcurrentStreams {
if cc.strictMaxConcurrentStreams {
// We'll tell the caller we can take a new request to
// prevent the caller from dialing a new TCP
// connection, but then we'll block later before

View File

@@ -42,6 +42,8 @@ type OpenStreamOptions struct {
// PusherID is zero if the stream was initiated by the client. Otherwise,
// PusherID names the stream that pushed the newly opened stream.
PusherID uint32
// priority is used to set the priority of the newly opened stream.
priority PriorityParam
}
// FrameWriteRequest is a request to write a frame.

View File

@@ -11,7 +11,7 @@ import (
)
// RFC 7540, Section 5.3.5: the default weight is 16.
const priorityDefaultWeight = 15 // 16 = 15 + 1
const priorityDefaultWeightRFC7540 = 15 // 16 = 15 + 1
// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
type PriorityWriteSchedulerConfig struct {
@@ -66,8 +66,8 @@ func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler
}
}
ws := &priorityWriteScheduler{
nodes: make(map[uint32]*priorityNode),
ws := &priorityWriteSchedulerRFC7540{
nodes: make(map[uint32]*priorityNodeRFC7540),
maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
maxIdleNodesInTree: cfg.MaxIdleNodesInTree,
enableWriteThrottle: cfg.ThrottleOutOfOrderWrites,
@@ -81,32 +81,32 @@ func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler
return ws
}
type priorityNodeState int
type priorityNodeStateRFC7540 int
const (
priorityNodeOpen priorityNodeState = iota
priorityNodeClosed
priorityNodeIdle
priorityNodeOpenRFC7540 priorityNodeStateRFC7540 = iota
priorityNodeClosedRFC7540
priorityNodeIdleRFC7540
)
// priorityNode is a node in an HTTP/2 priority tree.
// priorityNodeRFC7540 is a node in an HTTP/2 priority tree.
// Each node is associated with a single stream ID.
// See RFC 7540, Section 5.3.
type priorityNode struct {
q writeQueue // queue of pending frames to write
id uint32 // id of the stream, or 0 for the root of the tree
weight uint8 // the actual weight is weight+1, so the value is in [1,256]
state priorityNodeState // open | closed | idle
bytes int64 // number of bytes written by this node, or 0 if closed
subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
type priorityNodeRFC7540 struct {
q writeQueue // queue of pending frames to write
id uint32 // id of the stream, or 0 for the root of the tree
weight uint8 // the actual weight is weight+1, so the value is in [1,256]
state priorityNodeStateRFC7540 // open | closed | idle
bytes int64 // number of bytes written by this node, or 0 if closed
subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
// These links form the priority tree.
parent *priorityNode
kids *priorityNode // start of the kids list
prev, next *priorityNode // doubly-linked list of siblings
parent *priorityNodeRFC7540
kids *priorityNodeRFC7540 // start of the kids list
prev, next *priorityNodeRFC7540 // doubly-linked list of siblings
}
func (n *priorityNode) setParent(parent *priorityNode) {
func (n *priorityNodeRFC7540) setParent(parent *priorityNodeRFC7540) {
if n == parent {
panic("setParent to self")
}
@@ -141,7 +141,7 @@ func (n *priorityNode) setParent(parent *priorityNode) {
}
}
func (n *priorityNode) addBytes(b int64) {
func (n *priorityNodeRFC7540) addBytes(b int64) {
n.bytes += b
for ; n != nil; n = n.parent {
n.subtreeBytes += b
@@ -154,7 +154,7 @@ func (n *priorityNode) addBytes(b int64) {
//
// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
// if any ancestor p of n is still open (ignoring the root node).
func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool {
func (n *priorityNodeRFC7540) walkReadyInOrder(openParent bool, tmp *[]*priorityNodeRFC7540, f func(*priorityNodeRFC7540, bool) bool) bool {
if !n.q.empty() && f(n, openParent) {
return true
}
@@ -165,7 +165,7 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f
// Don't consider the root "open" when updating openParent since
// we can't send data frames on the root stream (only control frames).
if n.id != 0 {
openParent = openParent || (n.state == priorityNodeOpen)
openParent = openParent || (n.state == priorityNodeOpenRFC7540)
}
// Common case: only one kid or all kids have the same weight.
@@ -195,7 +195,7 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f
*tmp = append(*tmp, n.kids)
n.kids.setParent(nil)
}
sort.Sort(sortPriorityNodeSiblings(*tmp))
sort.Sort(sortPriorityNodeSiblingsRFC7540(*tmp))
for i := len(*tmp) - 1; i >= 0; i-- {
(*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
}
@@ -207,11 +207,11 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f
return false
}
type sortPriorityNodeSiblings []*priorityNode
type sortPriorityNodeSiblingsRFC7540 []*priorityNodeRFC7540
func (z sortPriorityNodeSiblings) Len() int { return len(z) }
func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
func (z sortPriorityNodeSiblings) Less(i, k int) bool {
func (z sortPriorityNodeSiblingsRFC7540) Len() int { return len(z) }
func (z sortPriorityNodeSiblingsRFC7540) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
func (z sortPriorityNodeSiblingsRFC7540) Less(i, k int) bool {
// Prefer the subtree that has sent fewer bytes relative to its weight.
// See sections 5.3.2 and 5.3.4.
wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
@@ -225,13 +225,13 @@ func (z sortPriorityNodeSiblings) Less(i, k int) bool {
return bi/bk <= wi/wk
}
type priorityWriteScheduler struct {
type priorityWriteSchedulerRFC7540 struct {
// root is the root of the priority tree, where root.id = 0.
// The root queues control frames that are not associated with any stream.
root priorityNode
root priorityNodeRFC7540
// nodes maps stream ids to priority tree nodes.
nodes map[uint32]*priorityNode
nodes map[uint32]*priorityNodeRFC7540
// maxID is the maximum stream id in nodes.
maxID uint32
@@ -239,7 +239,7 @@ type priorityWriteScheduler struct {
// lists of nodes that have been closed or are idle, but are kept in
// the tree for improved prioritization. When the lengths exceed either
// maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
closedNodes, idleNodes []*priorityNode
closedNodes, idleNodes []*priorityNodeRFC7540
// From the config.
maxClosedNodesInTree int
@@ -248,19 +248,19 @@ type priorityWriteScheduler struct {
enableWriteThrottle bool
// tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
tmp []*priorityNode
tmp []*priorityNodeRFC7540
// pool of empty queues for reuse.
queuePool writeQueuePool
}
func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
func (ws *priorityWriteSchedulerRFC7540) OpenStream(streamID uint32, options OpenStreamOptions) {
// The stream may be currently idle but cannot be opened or closed.
if curr := ws.nodes[streamID]; curr != nil {
if curr.state != priorityNodeIdle {
if curr.state != priorityNodeIdleRFC7540 {
panic(fmt.Sprintf("stream %d already opened", streamID))
}
curr.state = priorityNodeOpen
curr.state = priorityNodeOpenRFC7540
return
}
@@ -272,11 +272,11 @@ func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStream
if parent == nil {
parent = &ws.root
}
n := &priorityNode{
n := &priorityNodeRFC7540{
q: *ws.queuePool.get(),
id: streamID,
weight: priorityDefaultWeight,
state: priorityNodeOpen,
weight: priorityDefaultWeightRFC7540,
state: priorityNodeOpenRFC7540,
}
n.setParent(parent)
ws.nodes[streamID] = n
@@ -285,19 +285,19 @@ func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStream
}
}
func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
func (ws *priorityWriteSchedulerRFC7540) CloseStream(streamID uint32) {
if streamID == 0 {
panic("violation of WriteScheduler interface: cannot close stream 0")
}
if ws.nodes[streamID] == nil {
panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
}
if ws.nodes[streamID].state != priorityNodeOpen {
if ws.nodes[streamID].state != priorityNodeOpenRFC7540 {
panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
}
n := ws.nodes[streamID]
n.state = priorityNodeClosed
n.state = priorityNodeClosedRFC7540
n.addBytes(-n.bytes)
q := n.q
@@ -310,7 +310,7 @@ func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
}
}
func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
func (ws *priorityWriteSchedulerRFC7540) AdjustStream(streamID uint32, priority PriorityParam) {
if streamID == 0 {
panic("adjustPriority on root")
}
@@ -324,11 +324,11 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit
return
}
ws.maxID = streamID
n = &priorityNode{
n = &priorityNodeRFC7540{
q: *ws.queuePool.get(),
id: streamID,
weight: priorityDefaultWeight,
state: priorityNodeIdle,
weight: priorityDefaultWeightRFC7540,
state: priorityNodeIdleRFC7540,
}
n.setParent(&ws.root)
ws.nodes[streamID] = n
@@ -340,7 +340,7 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit
parent := ws.nodes[priority.StreamDep]
if parent == nil {
n.setParent(&ws.root)
n.weight = priorityDefaultWeight
n.weight = priorityDefaultWeightRFC7540
return
}
@@ -381,8 +381,8 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit
n.weight = priority.Weight
}
func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
var n *priorityNode
func (ws *priorityWriteSchedulerRFC7540) Push(wr FrameWriteRequest) {
var n *priorityNodeRFC7540
if wr.isControl() {
n = &ws.root
} else {
@@ -401,8 +401,8 @@ func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
n.q.push(wr)
}
func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool {
func (ws *priorityWriteSchedulerRFC7540) Pop() (wr FrameWriteRequest, ok bool) {
ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNodeRFC7540, openParent bool) bool {
limit := int32(math.MaxInt32)
if openParent {
limit = ws.writeThrottleLimit
@@ -428,7 +428,7 @@ func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
return wr, ok
}
func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) {
func (ws *priorityWriteSchedulerRFC7540) addClosedOrIdleNode(list *[]*priorityNodeRFC7540, maxSize int, n *priorityNodeRFC7540) {
if maxSize == 0 {
return
}
@@ -442,7 +442,7 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max
*list = append(*list, n)
}
func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
func (ws *priorityWriteSchedulerRFC7540) removeNode(n *priorityNodeRFC7540) {
for n.kids != nil {
n.kids.setParent(n.parent)
}

View File

@@ -0,0 +1,209 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"fmt"
"math"
)
type streamMetadata struct {
location *writeQueue
priority PriorityParam
}
type priorityWriteSchedulerRFC9218 struct {
// control contains control frames (SETTINGS, PING, etc.).
control writeQueue
// heads contain the head of a circular list of streams.
// We put these heads within a nested array that represents urgency and
// incremental, as defined in
// https://www.rfc-editor.org/rfc/rfc9218.html#name-priority-parameters.
// 8 represents u=0 up to u=7, and 2 represents i=false and i=true.
heads [8][2]*writeQueue
// streams contains a mapping between each stream ID and their metadata, so
// we can quickly locate them when needing to, for example, adjust their
// priority.
streams map[uint32]streamMetadata
// queuePool are empty queues for reuse.
queuePool writeQueuePool
// prioritizeIncremental is used to determine whether we should prioritize
// incremental streams or not, when urgency is the same in a given Pop()
// call.
prioritizeIncremental bool
}
func newPriorityWriteSchedulerRFC9128() WriteScheduler {
ws := &priorityWriteSchedulerRFC9218{
streams: make(map[uint32]streamMetadata),
}
return ws
}
func (ws *priorityWriteSchedulerRFC9218) OpenStream(streamID uint32, opt OpenStreamOptions) {
if ws.streams[streamID].location != nil {
panic(fmt.Errorf("stream %d already opened", streamID))
}
q := ws.queuePool.get()
ws.streams[streamID] = streamMetadata{
location: q,
priority: opt.priority,
}
u, i := opt.priority.urgency, opt.priority.incremental
if ws.heads[u][i] == nil {
ws.heads[u][i] = q
q.next = q
q.prev = q
} else {
// Queues are stored in a ring.
// Insert the new stream before ws.head, putting it at the end of the list.
q.prev = ws.heads[u][i].prev
q.next = ws.heads[u][i]
q.prev.next = q
q.next.prev = q
}
}
func (ws *priorityWriteSchedulerRFC9218) CloseStream(streamID uint32) {
metadata := ws.streams[streamID]
q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental
if q == nil {
return
}
if q.next == q {
// This was the only open stream.
ws.heads[u][i] = nil
} else {
q.prev.next = q.next
q.next.prev = q.prev
if ws.heads[u][i] == q {
ws.heads[u][i] = q.next
}
}
delete(ws.streams, streamID)
ws.queuePool.put(q)
}
func (ws *priorityWriteSchedulerRFC9218) AdjustStream(streamID uint32, priority PriorityParam) {
metadata := ws.streams[streamID]
q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental
if q == nil {
return
}
// Remove stream from current location.
if q.next == q {
// This was the only open stream.
ws.heads[u][i] = nil
} else {
q.prev.next = q.next
q.next.prev = q.prev
if ws.heads[u][i] == q {
ws.heads[u][i] = q.next
}
}
// Insert stream to the new queue.
u, i = priority.urgency, priority.incremental
if ws.heads[u][i] == nil {
ws.heads[u][i] = q
q.next = q
q.prev = q
} else {
// Queues are stored in a ring.
// Insert the new stream before ws.head, putting it at the end of the list.
q.prev = ws.heads[u][i].prev
q.next = ws.heads[u][i]
q.prev.next = q
q.next.prev = q
}
// Update the metadata.
ws.streams[streamID] = streamMetadata{
location: q,
priority: priority,
}
}
func (ws *priorityWriteSchedulerRFC9218) Push(wr FrameWriteRequest) {
if wr.isControl() {
ws.control.push(wr)
return
}
q := ws.streams[wr.StreamID()].location
if q == nil {
// This is a closed stream.
// wr should not be a HEADERS or DATA frame.
// We push the request onto the control queue.
if wr.DataSize() > 0 {
panic("add DATA on non-open stream")
}
ws.control.push(wr)
return
}
q.push(wr)
}
func (ws *priorityWriteSchedulerRFC9218) Pop() (FrameWriteRequest, bool) {
// Control and RST_STREAM frames first.
if !ws.control.empty() {
return ws.control.shift(), true
}
// On the next Pop(), we want to prioritize incremental if we prioritized
// non-incremental request of the same urgency this time. Vice-versa.
// i.e. when there are incremental and non-incremental requests at the same
// priority, we give 50% of our bandwidth to the incremental ones in
// aggregate and 50% to the first non-incremental one (since
// non-incremental streams do not use round-robin writes).
ws.prioritizeIncremental = !ws.prioritizeIncremental
// Always prioritize lowest u (i.e. highest urgency level).
for u := range ws.heads {
for i := range ws.heads[u] {
// When we want to prioritize incremental, we try to pop i=true
// first before i=false when u is the same.
if ws.prioritizeIncremental {
i = (i + 1) % 2
}
q := ws.heads[u][i]
if q == nil {
continue
}
for {
if wr, ok := q.consume(math.MaxInt32); ok {
if i == 1 {
// For incremental streams, we update head to q.next so
// we can round-robin between multiple streams that can
// immediately benefit from partial writes.
ws.heads[u][i] = q.next
} else {
// For non-incremental streams, we try to finish one to
// completion rather than doing round-robin. However,
// we update head here so that if q.consume() is !ok
// (e.g. the stream has no more frame to consume), head
// is updated to the next q that has frames to consume
// on future iterations. This way, we do not prioritize
// writing to unavailable stream on next Pop() calls,
// preventing head-of-line blocking.
ws.heads[u][i] = q
}
return wr, true
}
q = q.next
if q == ws.heads[u][i] {
break
}
}
}
}
return FrameWriteRequest{}, false
}

View File

@@ -25,7 +25,7 @@ type roundRobinWriteScheduler struct {
}
// newRoundRobinWriteScheduler constructs a new write scheduler.
// The round robin scheduler priorizes control frames
// The round robin scheduler prioritizes control frames
// like SETTINGS and PING over DATA frames.
// When there are no control frames to send, it performs a round-robin
// selection from the ready streams.

View File

@@ -51,7 +51,7 @@ type EncodeHeadersParam struct {
DefaultUserAgent string
}
// EncodeHeadersParam is the result of EncodeHeaders.
// EncodeHeadersResult is the result of EncodeHeaders.
type EncodeHeadersResult struct {
HasBody bool
HasTrailers bool
@@ -399,7 +399,7 @@ type ServerRequestResult struct {
// If the request should be rejected, this is a short string suitable for passing
// to the http2 package's CountError function.
// It might be a bit odd to return errors this way rather than returing an error,
// It might be a bit odd to return errors this way rather than returning an error,
// but this ensures we don't forget to include a CountError reason.
InvalidReason string
}

View File

@@ -297,7 +297,7 @@ func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter,
b = append(b, up.Username...)
b = append(b, byte(len(up.Password)))
b = append(b, up.Password...)
// TODO(mikio): handle IO deadlines and cancelation if
// TODO(mikio): handle IO deadlines and cancellation if
// necessary
if _, err := rw.Write(b); err != nil {
return err

View File

@@ -5,7 +5,6 @@
package externalaccount
import (
"bytes"
"context"
"crypto/hmac"
"crypto/sha256"
@@ -148,13 +147,13 @@ func canonicalHeaders(req *http.Request) (string, string) {
}
sort.Strings(headers)
var fullHeaders bytes.Buffer
var fullHeaders strings.Builder
for _, header := range headers {
headerValue := strings.Join(lowerCaseHeaders[header], ",")
fullHeaders.WriteString(header)
fullHeaders.WriteRune(':')
fullHeaders.WriteByte(':')
fullHeaders.WriteString(headerValue)
fullHeaders.WriteRune('\n')
fullHeaders.WriteByte('\n')
}
return strings.Join(headers, ";"), fullHeaders.String()

View File

@@ -9,7 +9,6 @@
package oauth2 // import "golang.org/x/oauth2"
import (
"bytes"
"context"
"errors"
"net/http"
@@ -158,7 +157,7 @@ func SetAuthURLParam(key, value string) AuthCodeOption {
// PKCE), https://www.oauth.com/oauth2-servers/pkce/ and
// https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-09.html#name-cross-site-request-forgery (describing both approaches)
func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
var buf bytes.Buffer
var buf strings.Builder
buf.WriteString(c.Endpoint.AuthURL)
v := url.Values{
"response_type": {"code"},

View File

@@ -41,6 +41,15 @@ func (s *CPUSet) Zero() {
clear(s[:])
}
// Fill adds all possible CPU bits to the set s. On Linux, [SchedSetaffinity]
// will silently ignore any invalid CPU bits in [CPUSet] so this is an
// efficient way of resetting the CPU affinity of a process.
func (s *CPUSet) Fill() {
for i := range s {
s[i] = ^cpuMask(0)
}
}
func cpuBitsIndex(cpu int) int {
return cpu / _NCPUBITS
}

View File

@@ -23,7 +23,5 @@ func (fds *FdSet) IsSet(fd int) bool {
// Zero clears the set fds.
func (fds *FdSet) Zero() {
for i := range fds.Bits {
fds.Bits[i] = 0
}
clear(fds.Bits[:])
}

View File

@@ -111,9 +111,7 @@ func (ifr *Ifreq) SetUint32(v uint32) {
// clear zeroes the ifreq's union field to prevent trailing garbage data from
// being sent to the kernel if an ifreq is reused.
func (ifr *Ifreq) clear() {
for i := range ifr.raw.Ifru {
ifr.raw.Ifru[i] = 0
}
clear(ifr.raw.Ifru[:])
}
// TODO(mdlayher): export as IfreqData? For now we can provide helpers such as

View File

@@ -49,6 +49,7 @@ esac
if [[ "$GOOS" = "linux" ]]; then
# Use the Docker-based build system
# Files generated through docker (use $cmd so you can Ctl-C the build or run)
set -e
$cmd docker build --tag generate:$GOOS $GOOS
$cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS
exit

View File

@@ -801,9 +801,7 @@ func (sa *SockaddrPPPoE) sockaddr() (unsafe.Pointer, _Socklen, error) {
// one. The kernel expects SID to be in network byte order.
binary.BigEndian.PutUint16(sa.raw[6:8], sa.SID)
copy(sa.raw[8:14], sa.Remote)
for i := 14; i < 14+IFNAMSIZ; i++ {
sa.raw[i] = 0
}
clear(sa.raw[14 : 14+IFNAMSIZ])
copy(sa.raw[14:], sa.Dev)
return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil
}

View File

@@ -248,6 +248,23 @@ func Statvfs(path string, buf *Statvfs_t) (err error) {
return Statvfs1(path, buf, ST_WAIT)
}
func Getvfsstat(buf []Statvfs_t, flags int) (n int, err error) {
var (
_p0 unsafe.Pointer
bufsize uintptr
)
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
bufsize = unsafe.Sizeof(Statvfs_t{}) * uintptr(len(buf))
}
r0, _, e1 := Syscall(SYS_GETVFSSTAT, uintptr(_p0), bufsize, uintptr(flags))
n = int(r0)
if e1 != 0 {
err = e1
}
return
}
/*
* Exposed directly
*/

View File

@@ -321,6 +321,8 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP
//sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW
//sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW
//sys GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) = kernel32.GetNumberOfConsoleInputEvents
//sys FlushConsoleInputBuffer(console Handle) (err error) = kernel32.FlushConsoleInputBuffer
//sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole
//sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot
//sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW

View File

@@ -65,6 +65,22 @@ var signals = [...]string{
15: "terminated",
}
// File flags for [os.OpenFile]. The O_ prefix is used to indicate
// that these flags are specific to the OpenFile function.
const (
O_FILE_FLAG_OPEN_NO_RECALL = FILE_FLAG_OPEN_NO_RECALL
O_FILE_FLAG_OPEN_REPARSE_POINT = FILE_FLAG_OPEN_REPARSE_POINT
O_FILE_FLAG_SESSION_AWARE = FILE_FLAG_SESSION_AWARE
O_FILE_FLAG_POSIX_SEMANTICS = FILE_FLAG_POSIX_SEMANTICS
O_FILE_FLAG_BACKUP_SEMANTICS = FILE_FLAG_BACKUP_SEMANTICS
O_FILE_FLAG_DELETE_ON_CLOSE = FILE_FLAG_DELETE_ON_CLOSE
O_FILE_FLAG_SEQUENTIAL_SCAN = FILE_FLAG_SEQUENTIAL_SCAN
O_FILE_FLAG_RANDOM_ACCESS = FILE_FLAG_RANDOM_ACCESS
O_FILE_FLAG_NO_BUFFERING = FILE_FLAG_NO_BUFFERING
O_FILE_FLAG_OVERLAPPED = FILE_FLAG_OVERLAPPED
O_FILE_FLAG_WRITE_THROUGH = FILE_FLAG_WRITE_THROUGH
)
const (
FILE_READ_DATA = 0x00000001
FILE_READ_ATTRIBUTES = 0x00000080

View File

@@ -238,6 +238,7 @@ var (
procFindResourceW = modkernel32.NewProc("FindResourceW")
procFindVolumeClose = modkernel32.NewProc("FindVolumeClose")
procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose")
procFlushConsoleInputBuffer = modkernel32.NewProc("FlushConsoleInputBuffer")
procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers")
procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile")
procFormatMessageW = modkernel32.NewProc("FormatMessageW")
@@ -284,6 +285,7 @@ var (
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
procGetNamedPipeServerProcessId = modkernel32.NewProc("GetNamedPipeServerProcessId")
procGetNumberOfConsoleInputEvents = modkernel32.NewProc("GetNumberOfConsoleInputEvents")
procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult")
procGetPriorityClass = modkernel32.NewProc("GetPriorityClass")
procGetProcAddress = modkernel32.NewProc("GetProcAddress")
@@ -2111,6 +2113,14 @@ func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) {
return
}
func FlushConsoleInputBuffer(console Handle) (err error) {
r1, _, e1 := syscall.SyscallN(procFlushConsoleInputBuffer.Addr(), uintptr(console))
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func FlushFileBuffers(handle Handle) (err error) {
r1, _, e1 := syscall.SyscallN(procFlushFileBuffers.Addr(), uintptr(handle))
if r1 == 0 {
@@ -2481,6 +2491,14 @@ func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err erro
return
}
func GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) {
r1, _, e1 := syscall.SyscallN(procGetNumberOfConsoleInputEvents.Addr(), uintptr(console), uintptr(unsafe.Pointer(numevents)))
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) {
var _p0 uint32
if wait {

View File

@@ -427,13 +427,6 @@ type isolatingRunSequence struct {
func (i *isolatingRunSequence) Len() int { return len(i.indexes) }
func maxLevel(a, b level) level {
if a > b {
return a
}
return b
}
// Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types,
// either L or R, for each isolating run sequence.
func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence {
@@ -474,8 +467,8 @@ func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence {
indexes: indexes,
types: types,
level: level,
sos: typeForLevel(maxLevel(prevLevel, level)),
eos: typeForLevel(maxLevel(succLevel, level)),
sos: typeForLevel(max(prevLevel, level)),
eos: typeForLevel(max(succLevel, level)),
}
}

14
vendor/modules.txt vendored
View File

@@ -366,7 +366,7 @@ go.yaml.in/yaml/v2
# go.yaml.in/yaml/v3 v3.0.4
## explicit; go 1.16
go.yaml.in/yaml/v3
# golang.org/x/crypto v0.42.0
# golang.org/x/crypto v0.43.0
## explicit; go 1.24.0
golang.org/x/crypto/bcrypt
golang.org/x/crypto/blowfish
@@ -380,7 +380,7 @@ golang.org/x/crypto/openpgp/packet
golang.org/x/crypto/openpgp/s2k
golang.org/x/crypto/pbkdf2
golang.org/x/crypto/scrypt
# golang.org/x/net v0.44.0
# golang.org/x/net v0.46.0
## explicit; go 1.24.0
golang.org/x/net/context
golang.org/x/net/html
@@ -393,7 +393,7 @@ golang.org/x/net/internal/httpcommon
golang.org/x/net/internal/socks
golang.org/x/net/proxy
golang.org/x/net/websocket
# golang.org/x/oauth2 v0.31.0
# golang.org/x/oauth2 v0.32.0
## explicit; go 1.24.0
golang.org/x/oauth2
golang.org/x/oauth2/google/externalaccount
@@ -404,15 +404,15 @@ golang.org/x/oauth2/internal
## explicit; go 1.24.0
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
# golang.org/x/sys v0.36.0
# golang.org/x/sys v0.37.0
## explicit; go 1.24.0
golang.org/x/sys/plan9
golang.org/x/sys/unix
golang.org/x/sys/windows
# golang.org/x/term v0.35.0
# golang.org/x/term v0.36.0
## explicit; go 1.24.0
golang.org/x/term
# golang.org/x/text v0.29.0
# golang.org/x/text v0.30.0
## explicit; go 1.24.0
golang.org/x/text/encoding
golang.org/x/text/encoding/internal
@@ -1067,7 +1067,7 @@ oras.land/oras-go/v2/registry/remote/credentials/trace
oras.land/oras-go/v2/registry/remote/errcode
oras.land/oras-go/v2/registry/remote/internal/errutil
oras.land/oras-go/v2/registry/remote/retry
# sigs.k8s.io/controller-runtime v0.22.1
# sigs.k8s.io/controller-runtime v0.22.3
## explicit; go 1.24.0
sigs.k8s.io/controller-runtime/pkg/client
sigs.k8s.io/controller-runtime/pkg/client/apiutil

View File

@@ -151,8 +151,7 @@ func newClient(config *rest.Config, options Options) (*client, error) {
mapper: options.Mapper,
codecs: serializer.NewCodecFactory(options.Scheme),
structuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta),
unstructuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta),
resourceByType: make(map[cacheKey]*resourceMeta),
}
rawMetaClient, err := metadata.NewForConfigAndClient(metadata.ConfigFor(config), options.HTTPClient)

View File

@@ -48,11 +48,15 @@ type clientRestResources struct {
// codecs are used to create a REST client for a gvk
codecs serializer.CodecFactory
// structuredResourceByType stores structured type metadata
structuredResourceByType map[schema.GroupVersionKind]*resourceMeta
// unstructuredResourceByType stores unstructured type metadata
unstructuredResourceByType map[schema.GroupVersionKind]*resourceMeta
mu sync.RWMutex
// resourceByType stores type metadata
resourceByType map[cacheKey]*resourceMeta
mu sync.RWMutex
}
type cacheKey struct {
gvk schema.GroupVersionKind
forceDisableProtoBuf bool
}
// newResource maps obj to a Kubernetes Resource and constructs a client for that Resource.
@@ -117,11 +121,11 @@ func (c *clientRestResources) getResource(obj any) (*resourceMeta, error) {
// It's better to do creation work twice than to not let multiple
// people make requests at once
c.mu.RLock()
resourceByType := c.structuredResourceByType
if isUnstructured {
resourceByType = c.unstructuredResourceByType
}
r, known := resourceByType[gvk]
cacheKey := cacheKey{gvk: gvk, forceDisableProtoBuf: forceDisableProtoBuf}
r, known := c.resourceByType[cacheKey]
c.mu.RUnlock()
if known {
@@ -140,7 +144,7 @@ func (c *clientRestResources) getResource(obj any) (*resourceMeta, error) {
if err != nil {
return nil, err
}
resourceByType[gvk] = r
c.resourceByType[cacheKey] = r
return r, err
}