mirror of
https://github.com/containers/kubernetes-mcp-server.git
synced 2025-10-23 01:22:57 +03:00
Compare commits
33 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7fe604e61d | ||
|
|
0c78a1e89d | ||
|
|
c3bc991237 | ||
|
|
ffc7b6c08d | ||
|
|
49afbad502 | ||
|
|
7f4edfd075 | ||
|
|
9da29f4505 | ||
|
|
b66719ed8e | ||
|
|
86628bb1bf | ||
|
|
25032699db | ||
|
|
dfddf23823 | ||
|
|
f3a446676f | ||
|
|
1e154d7587 | ||
|
|
68619b57ad | ||
|
|
086afefc75 | ||
|
|
672b8a5d13 | ||
|
|
65cc304c3c | ||
|
|
3d3eb64582 | ||
|
|
3d5fba8813 | ||
|
|
31e90fbece | ||
|
|
99e954304c | ||
|
|
a056981f53 | ||
|
|
61eaecc38f | ||
|
|
028c6b08c2 | ||
|
|
e8ba1fa0bf | ||
|
|
e86d314ae2 | ||
|
|
a2d16e9f41 | ||
|
|
c447bf819f | ||
|
|
07b1ebc05e | ||
|
|
adc1044615 | ||
|
|
b55f28b36e | ||
|
|
d3723804ed | ||
|
|
792d2f5b80 |
8
.github/workflows/release.yaml
vendored
8
.github/workflows/release.yaml
vendored
@@ -12,11 +12,11 @@ concurrency:
|
||||
|
||||
env:
|
||||
GO_VERSION: 1.23
|
||||
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
UV_PUBLISH_TOKEN: ${{ secrets.UV_PUBLISH_TOKEN }}
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
id-token: write # Required for npmjs OIDC
|
||||
discussions: write
|
||||
|
||||
jobs:
|
||||
@@ -39,6 +39,12 @@ jobs:
|
||||
files: |
|
||||
LICENSE
|
||||
kubernetes-mcp-server-*
|
||||
# Ensure npm 11.5.1 or later is installed (required for https://docs.npmjs.com/trusted-publishers)
|
||||
- name: Setup node
|
||||
uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: 24
|
||||
registry-url: 'https://registry.npmjs.org'
|
||||
- name: Publish npm
|
||||
run:
|
||||
make npm-publish
|
||||
|
||||
52
Makefile
52
Makefile
@@ -16,7 +16,7 @@ LD_FLAGS = -s -w \
|
||||
COMMON_BUILD_ARGS = -ldflags "$(LD_FLAGS)"
|
||||
|
||||
GOLANGCI_LINT = $(shell pwd)/_output/tools/bin/golangci-lint
|
||||
GOLANGCI_LINT_VERSION ?= v2.2.2
|
||||
GOLANGCI_LINT_VERSION ?= v2.5.0
|
||||
|
||||
# NPM version should not append the -dirty flag
|
||||
NPM_VERSION ?= $(shell echo $(shell git describe --tags --always) | sed 's/^v//')
|
||||
@@ -47,12 +47,12 @@ clean: ## Clean up all build artifacts
|
||||
rm -rf $(CLEAN_TARGETS)
|
||||
|
||||
.PHONY: build
|
||||
build: clean tidy format ## Build the project
|
||||
build: clean tidy format lint ## Build the project
|
||||
go build $(COMMON_BUILD_ARGS) -o $(BINARY_NAME) ./cmd/kubernetes-mcp-server
|
||||
|
||||
|
||||
.PHONY: build-all-platforms
|
||||
build-all-platforms: clean tidy format ## Build the project for all platforms
|
||||
build-all-platforms: clean tidy format lint ## Build the project for all platforms
|
||||
$(foreach os,$(OSES),$(foreach arch,$(ARCHS), \
|
||||
GOOS=$(os) GOARCH=$(arch) go build $(COMMON_BUILD_ARGS) -o $(BINARY_NAME)-$(os)-$(arch)$(if $(findstring windows,$(os)),.exe,) ./cmd/kubernetes-mcp-server; \
|
||||
))
|
||||
@@ -71,16 +71,14 @@ npm-publish: npm-copy-binaries ## Publish the npm packages
|
||||
$(foreach os,$(OSES),$(foreach arch,$(ARCHS), \
|
||||
DIRNAME="$(BINARY_NAME)-$(os)-$(arch)"; \
|
||||
cd npm/$$DIRNAME; \
|
||||
echo '//registry.npmjs.org/:_authToken=$(NPM_TOKEN)' >> .npmrc; \
|
||||
jq '.version = "$(NPM_VERSION)"' package.json > tmp.json && mv tmp.json package.json; \
|
||||
npm publish; \
|
||||
npm publish --tag latest; \
|
||||
cd ../..; \
|
||||
))
|
||||
cp README.md LICENSE ./npm/kubernetes-mcp-server/
|
||||
echo '//registry.npmjs.org/:_authToken=$(NPM_TOKEN)' >> ./npm/kubernetes-mcp-server/.npmrc
|
||||
jq '.version = "$(NPM_VERSION)"' ./npm/kubernetes-mcp-server/package.json > tmp.json && mv tmp.json ./npm/kubernetes-mcp-server/package.json; \
|
||||
jq '.optionalDependencies |= with_entries(.value = "$(NPM_VERSION)")' ./npm/kubernetes-mcp-server/package.json > tmp.json && mv tmp.json ./npm/kubernetes-mcp-server/package.json; \
|
||||
cd npm/kubernetes-mcp-server && npm publish
|
||||
cd npm/kubernetes-mcp-server && npm publish --tag latest
|
||||
|
||||
.PHONY: python-publish
|
||||
python-publish: ## Publish the python packages
|
||||
@@ -115,3 +113,43 @@ lint: golangci-lint ## Lint the code
|
||||
.PHONY: update-readme-tools
|
||||
update-readme-tools: ## Update the README.md file with the latest toolsets
|
||||
go run ./internal/tools/update-readme/main.go README.md
|
||||
|
||||
##@ Tools
|
||||
|
||||
.PHONY: tools
|
||||
tools: ## Install all required tools (kind) to ./_output/bin/
|
||||
@echo "Checking and installing required tools to ./_output/bin/ ..."
|
||||
@if [ -f _output/bin/kind ]; then echo "[OK] kind already installed"; else echo "Installing kind..."; $(MAKE) -s kind; fi
|
||||
@echo "All tools ready!"
|
||||
|
||||
##@ Local Development
|
||||
|
||||
.PHONY: local-env-setup
|
||||
local-env-setup: ## Setup complete local development environment with Kind cluster
|
||||
@echo "========================================="
|
||||
@echo "Kubernetes MCP Server - Local Setup"
|
||||
@echo "========================================="
|
||||
$(MAKE) tools
|
||||
$(MAKE) kind-create-cluster
|
||||
$(MAKE) keycloak-install
|
||||
$(MAKE) build
|
||||
@echo ""
|
||||
@echo "========================================="
|
||||
@echo "Local environment ready!"
|
||||
@echo "========================================="
|
||||
@echo ""
|
||||
@echo "Configuration file generated:"
|
||||
@echo " _output/config.toml"
|
||||
@echo ""
|
||||
@echo "Run the MCP server with:"
|
||||
@echo " ./$(BINARY_NAME) --port 8080 --config _output/config.toml"
|
||||
@echo ""
|
||||
@echo "Or run with MCP inspector:"
|
||||
@echo " npx @modelcontextprotocol/inspector@latest \$$(pwd)/$(BINARY_NAME) --config _output/config.toml"
|
||||
|
||||
.PHONY: local-env-teardown
|
||||
local-env-teardown: ## Tear down the local Kind cluster
|
||||
$(MAKE) kind-delete-cluster
|
||||
|
||||
# Include build configuration files
|
||||
-include build/*.mk
|
||||
|
||||
26
README.md
26
README.md
@@ -43,6 +43,7 @@ If you're using the native binaries you don't need to have Node or Python instal
|
||||
|
||||
- **✅ Lightweight**: The server is distributed as a single native binary for Linux, macOS, and Windows.
|
||||
- **✅ High-Performance / Low-Latency**: Directly interacts with the Kubernetes API server without the overhead of calling and waiting for external commands.
|
||||
- **✅ Multi-Cluster**: Can interact with multiple Kubernetes clusters simultaneously (as defined in your kubeconfig files).
|
||||
- **✅ Cross-Platform**: Available as a native binary for Linux, macOS, and Windows, as well as an npm package, a Python package, and container/Docker image.
|
||||
- **✅ Configurable**: Supports [command-line arguments](#configuration) to configure the server behavior.
|
||||
- **✅ Well tested**: The server has an extensive test suite to ensure its reliability and correctness across different Kubernetes environments.
|
||||
@@ -175,15 +176,16 @@ uvx kubernetes-mcp-server@latest --help
|
||||
|
||||
### Configuration Options
|
||||
|
||||
| Option | Description |
|
||||
|-------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `--port` | Starts the MCP server in Streamable HTTP mode (path /mcp) and Server-Sent Event (SSE) (path /sse) mode and listens on the specified port . |
|
||||
| `--log-level` | Sets the logging level (values [from 0-9](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)). Similar to [kubectl logging levels](https://kubernetes.io/docs/reference/kubectl/quick-reference/#kubectl-output-verbosity-and-debugging). |
|
||||
| `--kubeconfig` | Path to the Kubernetes configuration file. If not provided, it will try to resolve the configuration (in-cluster, default location, etc.). |
|
||||
| `--list-output` | Output format for resource list operations (one of: yaml, table) (default "table") |
|
||||
| `--read-only` | If set, the MCP server will run in read-only mode, meaning it will not allow any write operations (create, update, delete) on the Kubernetes cluster. This is useful for debugging or inspecting the cluster without making changes. |
|
||||
| `--disable-destructive` | If set, the MCP server will disable all destructive operations (delete, update, etc.) on the Kubernetes cluster. This is useful for debugging or inspecting the cluster without accidentally making changes. This option has no effect when `--read-only` is used. |
|
||||
| `--toolsets` | Comma-separated list of toolsets to enable. Check the [🛠️ Tools and Functionalities](#tools-and-functionalities) section for more information. |
|
||||
| Option | Description |
|
||||
|---------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `--port` | Starts the MCP server in Streamable HTTP mode (path /mcp) and Server-Sent Event (SSE) (path /sse) mode and listens on the specified port . |
|
||||
| `--log-level` | Sets the logging level (values [from 0-9](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)). Similar to [kubectl logging levels](https://kubernetes.io/docs/reference/kubectl/quick-reference/#kubectl-output-verbosity-and-debugging). |
|
||||
| `--kubeconfig` | Path to the Kubernetes configuration file. If not provided, it will try to resolve the configuration (in-cluster, default location, etc.). |
|
||||
| `--list-output` | Output format for resource list operations (one of: yaml, table) (default "table") |
|
||||
| `--read-only` | If set, the MCP server will run in read-only mode, meaning it will not allow any write operations (create, update, delete) on the Kubernetes cluster. This is useful for debugging or inspecting the cluster without making changes. |
|
||||
| `--disable-destructive` | If set, the MCP server will disable all destructive operations (delete, update, etc.) on the Kubernetes cluster. This is useful for debugging or inspecting the cluster without accidentally making changes. This option has no effect when `--read-only` is used. |
|
||||
| `--toolsets` | Comma-separated list of toolsets to enable. Check the [🛠️ Tools and Functionalities](#tools-and-functionalities) section for more information. |
|
||||
| `--disable-multi-cluster` | If set, the MCP server will disable multi-cluster support and will only use the current context from the kubeconfig file. This is useful if you want to restrict the MCP server to a single cluster. |
|
||||
|
||||
## 🛠️ Tools and Functionalities <a id="tools-and-functionalities"></a>
|
||||
|
||||
@@ -207,12 +209,16 @@ The following sets of tools are available (all on by default):
|
||||
|
||||
### Tools
|
||||
|
||||
In case multi-cluster support is enabled (default) and you have access to multiple clusters, all applicable tools will include an additional `context` argument to specify the Kubernetes context (cluster) to use for that operation.
|
||||
|
||||
<!-- AVAILABLE-TOOLSETS-TOOLS-START -->
|
||||
|
||||
<details>
|
||||
|
||||
<summary>config</summary>
|
||||
|
||||
- **configuration_contexts_list** - List all available context names and associated server urls from the kubeconfig file
|
||||
|
||||
- **configuration_view** - Get the current Kubernetes configuration content as a kubeconfig YAML
|
||||
- `minified` (`boolean`) - Return a minified version of the configuration. If set to true, keeps only the current-context and the relevant pieces of the configuration for that context. If set to false, all contexts, clusters, auth-infos, and users are returned in the configuration. (Optional, default true)
|
||||
|
||||
@@ -261,7 +267,7 @@ The following sets of tools are available (all on by default):
|
||||
- `name` (`string`) **(required)** - Name of the Pod to get the logs from
|
||||
- `namespace` (`string`) - Namespace to get the Pod logs from
|
||||
- `previous` (`boolean`) - Return previous terminated container logs (Optional)
|
||||
- `tail` (`number`) - Number of lines to retrieve from the end of the logs (Optional, default: 100)
|
||||
- `tail` (`integer`) - Number of lines to retrieve from the end of the logs (Optional, default: 100)
|
||||
|
||||
- **pods_run** - Run a Kubernetes Pod in the current or provided namespace with the provided container image and optional name
|
||||
- `image` (`string`) **(required)** - Container Image to run in the Pod
|
||||
|
||||
448
build/keycloak.mk
Normal file
448
build/keycloak.mk
Normal file
@@ -0,0 +1,448 @@
|
||||
# Keycloak IdP for development and testing
|
||||
|
||||
KEYCLOAK_NAMESPACE = keycloak
|
||||
KEYCLOAK_ADMIN_USER = admin
|
||||
KEYCLOAK_ADMIN_PASSWORD = admin
|
||||
|
||||
.PHONY: keycloak-install
|
||||
keycloak-install:
|
||||
@echo "Installing Keycloak (dev mode using official image)..."
|
||||
@kubectl apply -f dev/config/keycloak/deployment.yaml
|
||||
@echo "Applying Keycloak ingress (cert-manager will create TLS certificate)..."
|
||||
@kubectl apply -f dev/config/keycloak/ingress.yaml
|
||||
@echo "Extracting cert-manager CA certificate..."
|
||||
@mkdir -p _output/cert-manager-ca
|
||||
@kubectl get secret selfsigned-ca-secret -n cert-manager -o jsonpath='{.data.ca\.crt}' | base64 -d > _output/cert-manager-ca/ca.crt
|
||||
@echo "✅ cert-manager CA certificate extracted to _output/cert-manager-ca/ca.crt (bind-mounted to API server)"
|
||||
@echo "Restarting Kubernetes API server to pick up new CA..."
|
||||
@docker exec kubernetes-mcp-server-control-plane pkill -f kube-apiserver || \
|
||||
podman exec kubernetes-mcp-server-control-plane pkill -f kube-apiserver
|
||||
@echo "Waiting for API server to restart..."
|
||||
@sleep 5
|
||||
@echo "Waiting for API server to be ready..."
|
||||
@for i in $$(seq 1 30); do \
|
||||
if kubectl get --raw /healthz >/dev/null 2>&1; then \
|
||||
echo "✅ Kubernetes API server updated with cert-manager CA"; \
|
||||
break; \
|
||||
fi; \
|
||||
sleep 2; \
|
||||
done
|
||||
@echo "Waiting for Keycloak to be ready..."
|
||||
@kubectl wait --for=condition=ready pod -l app=keycloak -n $(KEYCLOAK_NAMESPACE) --timeout=120s || true
|
||||
@echo "Waiting for Keycloak HTTP endpoint to be available..."
|
||||
@for i in $$(seq 1 30); do \
|
||||
STATUS=$$(curl -sk -o /dev/null -w "%{http_code}" https://keycloak.127-0-0-1.sslip.io:8443/realms/master 2>/dev/null || echo "000"); \
|
||||
if [ "$$STATUS" = "200" ]; then \
|
||||
echo "✅ Keycloak HTTP endpoint ready"; \
|
||||
break; \
|
||||
fi; \
|
||||
echo " Attempt $$i/30: Waiting for Keycloak (status: $$STATUS)..."; \
|
||||
sleep 3; \
|
||||
done
|
||||
@echo ""
|
||||
@echo "Setting up OpenShift realm..."
|
||||
@$(MAKE) -s keycloak-setup-realm
|
||||
@echo ""
|
||||
@echo "✅ Keycloak installed and configured!"
|
||||
@echo "Access at: https://keycloak.127-0-0-1.sslip.io:8443"
|
||||
|
||||
.PHONY: keycloak-uninstall
|
||||
keycloak-uninstall:
|
||||
@kubectl delete -f dev/config/keycloak/deployment.yaml 2>/dev/null || true
|
||||
|
||||
.PHONY: keycloak-status
|
||||
keycloak-status: ## Show Keycloak status and connection info
|
||||
@if kubectl get svc -n $(KEYCLOAK_NAMESPACE) keycloak >/dev/null 2>&1; then \
|
||||
echo "========================================"; \
|
||||
echo "Keycloak Status"; \
|
||||
echo "========================================"; \
|
||||
echo ""; \
|
||||
echo "Status: Installed"; \
|
||||
echo ""; \
|
||||
echo "Admin Console:"; \
|
||||
echo " URL: https://keycloak.127-0-0-1.sslip.io:8443"; \
|
||||
echo " Username: $(KEYCLOAK_ADMIN_USER)"; \
|
||||
echo " Password: $(KEYCLOAK_ADMIN_PASSWORD)"; \
|
||||
echo ""; \
|
||||
echo "OIDC Endpoints (openshift realm):"; \
|
||||
echo " Discovery: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift/.well-known/openid-configuration"; \
|
||||
echo " Token: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift/protocol/openid-connect/token"; \
|
||||
echo " Authorize: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift/protocol/openid-connect/auth"; \
|
||||
echo " UserInfo: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift/protocol/openid-connect/userinfo"; \
|
||||
echo " JWKS: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift/protocol/openid-connect/certs"; \
|
||||
echo ""; \
|
||||
echo "========================================"; \
|
||||
else \
|
||||
echo "Keycloak is not installed. Run: make keycloak-install"; \
|
||||
fi
|
||||
|
||||
.PHONY: keycloak-logs
|
||||
keycloak-logs: ## Tail Keycloak logs
|
||||
@kubectl logs -n $(KEYCLOAK_NAMESPACE) -l app=keycloak -f --tail=100
|
||||
|
||||
.PHONY: keycloak-setup-realm
|
||||
keycloak-setup-realm:
|
||||
@echo "========================================="
|
||||
@echo "Setting up OpenShift Realm for Token Exchange"
|
||||
@echo "========================================="
|
||||
@echo "Using Keycloak at https://keycloak.127-0-0-1.sslip.io:8443"
|
||||
@echo ""
|
||||
@echo "Getting admin access token..."
|
||||
@RESPONSE=$$(curl -sk -X POST "https://keycloak.127-0-0-1.sslip.io:8443/realms/master/protocol/openid-connect/token" \
|
||||
-H "Content-Type: application/x-www-form-urlencoded" \
|
||||
-d "username=$(KEYCLOAK_ADMIN_USER)" \
|
||||
-d "password=$(KEYCLOAK_ADMIN_PASSWORD)" \
|
||||
-d "grant_type=password" \
|
||||
-d "client_id=admin-cli"); \
|
||||
TOKEN=$$(echo "$$RESPONSE" | jq -r '.access_token // empty' 2>/dev/null); \
|
||||
if [ -z "$$TOKEN" ] || [ "$$TOKEN" = "null" ]; then \
|
||||
echo "❌ Failed to get access token"; \
|
||||
echo "Response was: $$RESPONSE" | head -c 200; \
|
||||
echo ""; \
|
||||
echo "Check if:"; \
|
||||
echo " - Keycloak is running (make keycloak-install)"; \
|
||||
echo " - Keycloak is accessible at https://keycloak.127-0-0-1.sslip.io:8443"; \
|
||||
echo " - Admin credentials are correct: $(KEYCLOAK_ADMIN_USER)/$(KEYCLOAK_ADMIN_PASSWORD)"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
echo "✅ Successfully obtained access token"; \
|
||||
echo ""; \
|
||||
echo "Creating OpenShift realm..."; \
|
||||
REALM_RESPONSE=$$(curl -sk -w "%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms" \
|
||||
-H "Authorization: Bearer $$TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"realm":"openshift","enabled":true}'); \
|
||||
REALM_CODE=$$(echo "$$REALM_RESPONSE" | tail -c 4); \
|
||||
if [ "$$REALM_CODE" = "201" ] || [ "$$REALM_CODE" = "409" ]; then \
|
||||
if [ "$$REALM_CODE" = "201" ]; then echo "✅ OpenShift realm created"; \
|
||||
else echo "✅ OpenShift realm already exists"; fi; \
|
||||
else \
|
||||
echo "❌ Failed to create OpenShift realm (HTTP $$REALM_CODE)"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
echo ""; \
|
||||
echo "Configuring realm events..."; \
|
||||
EVENT_CONFIG_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X PUT "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift" \
|
||||
-H "Authorization: Bearer $$TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"realm":"openshift","enabled":true,"eventsEnabled":true,"eventsListeners":["jboss-logging"],"adminEventsEnabled":true,"adminEventsDetailsEnabled":true}'); \
|
||||
EVENT_CONFIG_CODE=$$(echo "$$EVENT_CONFIG_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
|
||||
if [ "$$EVENT_CONFIG_CODE" = "204" ]; then \
|
||||
echo "✅ User and admin event logging enabled"; \
|
||||
else \
|
||||
echo "⚠️ Could not configure event logging (HTTP $$EVENT_CONFIG_CODE)"; \
|
||||
fi; \
|
||||
echo ""; \
|
||||
echo "Creating mcp:openshift client scope..."; \
|
||||
SCOPE_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes" \
|
||||
-H "Authorization: Bearer $$TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"name":"mcp:openshift","protocol":"openid-connect","attributes":{"display.on.consent.screen":"false","include.in.token.scope":"true"}}'); \
|
||||
SCOPE_CODE=$$(echo "$$SCOPE_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
|
||||
if [ "$$SCOPE_CODE" = "201" ] || [ "$$SCOPE_CODE" = "409" ]; then \
|
||||
if [ "$$SCOPE_CODE" = "201" ]; then echo "✅ mcp:openshift client scope created"; \
|
||||
else echo "✅ mcp:openshift client scope already exists"; fi; \
|
||||
else \
|
||||
echo "❌ Failed to create mcp:openshift scope (HTTP $$SCOPE_CODE)"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
echo ""; \
|
||||
echo "Adding audience mapper to mcp:openshift scope..."; \
|
||||
SCOPES_LIST=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes" \
|
||||
-H "Authorization: Bearer $$TOKEN" \
|
||||
-H "Accept: application/json"); \
|
||||
SCOPE_ID=$$(echo "$$SCOPES_LIST" | jq -r '.[] | select(.name == "mcp:openshift") | .id // empty' 2>/dev/null); \
|
||||
if [ -z "$$SCOPE_ID" ]; then \
|
||||
echo "❌ Failed to find mcp:openshift scope"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
MAPPER_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes/$$SCOPE_ID/protocol-mappers/models" \
|
||||
-H "Authorization: Bearer $$TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"name":"openshift-audience","protocol":"openid-connect","protocolMapper":"oidc-audience-mapper","config":{"included.client.audience":"openshift","id.token.claim":"true","access.token.claim":"true"}}'); \
|
||||
MAPPER_CODE=$$(echo "$$MAPPER_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
|
||||
if [ "$$MAPPER_CODE" = "201" ] || [ "$$MAPPER_CODE" = "409" ]; then \
|
||||
if [ "$$MAPPER_CODE" = "201" ]; then echo "✅ Audience mapper added"; \
|
||||
else echo "✅ Audience mapper already exists"; fi; \
|
||||
else \
|
||||
echo "❌ Failed to create audience mapper (HTTP $$MAPPER_CODE)"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
echo ""; \
|
||||
echo "Creating groups client scope..."; \
|
||||
GROUPS_SCOPE_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes" \
|
||||
-H "Authorization: Bearer $$TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"name":"groups","protocol":"openid-connect","attributes":{"display.on.consent.screen":"false","include.in.token.scope":"true"}}'); \
|
||||
GROUPS_SCOPE_CODE=$$(echo "$$GROUPS_SCOPE_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
|
||||
if [ "$$GROUPS_SCOPE_CODE" = "201" ] || [ "$$GROUPS_SCOPE_CODE" = "409" ]; then \
|
||||
if [ "$$GROUPS_SCOPE_CODE" = "201" ]; then echo "✅ groups client scope created"; \
|
||||
else echo "✅ groups client scope already exists"; fi; \
|
||||
else \
|
||||
echo "❌ Failed to create groups scope (HTTP $$GROUPS_SCOPE_CODE)"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
echo ""; \
|
||||
echo "Adding group membership mapper to groups scope..."; \
|
||||
SCOPES_LIST=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes" \
|
||||
-H "Authorization: Bearer $$TOKEN" \
|
||||
-H "Accept: application/json"); \
|
||||
GROUPS_SCOPE_ID=$$(echo "$$SCOPES_LIST" | jq -r '.[] | select(.name == "groups") | .id // empty' 2>/dev/null); \
|
||||
if [ -z "$$GROUPS_SCOPE_ID" ]; then \
|
||||
echo "❌ Failed to find groups scope"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
GROUPS_MAPPER_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes/$$GROUPS_SCOPE_ID/protocol-mappers/models" \
|
||||
-H "Authorization: Bearer $$TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"name":"groups","protocol":"openid-connect","protocolMapper":"oidc-group-membership-mapper","config":{"claim.name":"groups","full.path":"false","id.token.claim":"true","access.token.claim":"true","userinfo.token.claim":"true"}}'); \
|
||||
GROUPS_MAPPER_CODE=$$(echo "$$GROUPS_MAPPER_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
|
||||
if [ "$$GROUPS_MAPPER_CODE" = "201" ] || [ "$$GROUPS_MAPPER_CODE" = "409" ]; then \
|
||||
if [ "$$GROUPS_MAPPER_CODE" = "201" ]; then echo "✅ Group membership mapper added"; \
|
||||
else echo "✅ Group membership mapper already exists"; fi; \
|
||||
else \
|
||||
echo "❌ Failed to create group mapper (HTTP $$GROUPS_MAPPER_CODE)"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
echo ""; \
|
||||
echo "Creating mcp-server client scope..."; \
|
||||
MCP_SERVER_SCOPE_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes" \
|
||||
-H "Authorization: Bearer $$TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"name":"mcp-server","protocol":"openid-connect","attributes":{"display.on.consent.screen":"false","include.in.token.scope":"true"}}'); \
|
||||
MCP_SERVER_SCOPE_CODE=$$(echo "$$MCP_SERVER_SCOPE_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
|
||||
if [ "$$MCP_SERVER_SCOPE_CODE" = "201" ] || [ "$$MCP_SERVER_SCOPE_CODE" = "409" ]; then \
|
||||
if [ "$$MCP_SERVER_SCOPE_CODE" = "201" ]; then echo "✅ mcp-server client scope created"; \
|
||||
else echo "✅ mcp-server client scope already exists"; fi; \
|
||||
else \
|
||||
echo "❌ Failed to create mcp-server scope (HTTP $$MCP_SERVER_SCOPE_CODE)"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
echo ""; \
|
||||
echo "Adding audience mapper to mcp-server scope..."; \
|
||||
SCOPES_LIST=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes" \
|
||||
-H "Authorization: Bearer $$TOKEN" \
|
||||
-H "Accept: application/json"); \
|
||||
MCP_SERVER_SCOPE_ID=$$(echo "$$SCOPES_LIST" | jq -r '.[] | select(.name == "mcp-server") | .id // empty' 2>/dev/null); \
|
||||
if [ -z "$$MCP_SERVER_SCOPE_ID" ]; then \
|
||||
echo "❌ Failed to find mcp-server scope"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
MCP_SERVER_MAPPER_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes/$$MCP_SERVER_SCOPE_ID/protocol-mappers/models" \
|
||||
-H "Authorization: Bearer $$TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"name":"mcp-server-audience","protocol":"openid-connect","protocolMapper":"oidc-audience-mapper","config":{"included.client.audience":"mcp-server","id.token.claim":"true","access.token.claim":"true"}}'); \
|
||||
MCP_SERVER_MAPPER_CODE=$$(echo "$$MCP_SERVER_MAPPER_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
|
||||
if [ "$$MCP_SERVER_MAPPER_CODE" = "201" ] || [ "$$MCP_SERVER_MAPPER_CODE" = "409" ]; then \
|
||||
if [ "$$MCP_SERVER_MAPPER_CODE" = "201" ]; then echo "✅ mcp-server audience mapper added"; \
|
||||
else echo "✅ mcp-server audience mapper already exists"; fi; \
|
||||
else \
|
||||
echo "❌ Failed to create mcp-server audience mapper (HTTP $$MCP_SERVER_MAPPER_CODE)"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
echo ""; \
|
||||
echo "Creating openshift service client..."; \
|
||||
OPENSHIFT_CLIENT_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients" \
|
||||
-H "Authorization: Bearer $$TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"clientId":"openshift","enabled":true,"publicClient":false,"standardFlowEnabled":true,"directAccessGrantsEnabled":true,"serviceAccountsEnabled":true,"authorizationServicesEnabled":false,"redirectUris":["*"],"defaultClientScopes":["profile","email","groups"],"optionalClientScopes":[]}'); \
|
||||
OPENSHIFT_CLIENT_CODE=$$(echo "$$OPENSHIFT_CLIENT_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
|
||||
if [ "$$OPENSHIFT_CLIENT_CODE" = "201" ] || [ "$$OPENSHIFT_CLIENT_CODE" = "409" ]; then \
|
||||
if [ "$$OPENSHIFT_CLIENT_CODE" = "201" ]; then echo "✅ openshift client created"; \
|
||||
else echo "✅ openshift client already exists"; fi; \
|
||||
else \
|
||||
echo "❌ Failed to create openshift client (HTTP $$OPENSHIFT_CLIENT_CODE)"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
echo ""; \
|
||||
echo "Adding username mapper to openshift client..."; \
|
||||
OPENSHIFT_CLIENTS_LIST=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients" \
|
||||
-H "Authorization: Bearer $$TOKEN" \
|
||||
-H "Accept: application/json"); \
|
||||
OPENSHIFT_CLIENT_ID=$$(echo "$$OPENSHIFT_CLIENTS_LIST" | jq -r '.[] | select(.clientId == "openshift") | .id // empty' 2>/dev/null); \
|
||||
OPENSHIFT_USERNAME_MAPPER_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients/$$OPENSHIFT_CLIENT_ID/protocol-mappers/models" \
|
||||
-H "Authorization: Bearer $$TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{ "name":"username","protocol":"openid-connect","protocolMapper":"oidc-usermodel-property-mapper","config":{"userinfo.token.claim":"true","user.attribute":"username","id.token.claim":"true","access.token.claim":"true","claim.name":"preferred_username","jsonType.label":"String"}}'); \
|
||||
OPENSHIFT_USERNAME_MAPPER_CODE=$$(echo "$$OPENSHIFT_USERNAME_MAPPER_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
|
||||
if [ "$$OPENSHIFT_USERNAME_MAPPER_CODE" = "201" ] || [ "$$OPENSHIFT_USERNAME_MAPPER_CODE" = "409" ]; then \
|
||||
if [ "$$OPENSHIFT_USERNAME_MAPPER_CODE" = "201" ]; then echo "✅ Username mapper added to openshift client"; \
|
||||
else echo "✅ Username mapper already exists on openshift client"; fi; \
|
||||
else \
|
||||
echo "❌ Failed to create username mapper (HTTP $$OPENSHIFT_USERNAME_MAPPER_CODE)"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
echo ""; \
|
||||
echo "Creating mcp-client public client..."; \
|
||||
MCP_PUBLIC_CLIENT_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients" \
|
||||
-H "Authorization: Bearer $$TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"clientId":"mcp-client","enabled":true,"publicClient":true,"standardFlowEnabled":true,"directAccessGrantsEnabled":true,"serviceAccountsEnabled":false,"authorizationServicesEnabled":false,"redirectUris":["*"],"defaultClientScopes":["profile","email"],"optionalClientScopes":["mcp-server"]}'); \
|
||||
MCP_PUBLIC_CLIENT_CODE=$$(echo "$$MCP_PUBLIC_CLIENT_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
|
||||
if [ "$$MCP_PUBLIC_CLIENT_CODE" = "201" ] || [ "$$MCP_PUBLIC_CLIENT_CODE" = "409" ]; then \
|
||||
if [ "$$MCP_PUBLIC_CLIENT_CODE" = "201" ]; then echo "✅ mcp-client public client created"; \
|
||||
else echo "✅ mcp-client public client already exists"; fi; \
|
||||
else \
|
||||
echo "❌ Failed to create mcp-client public client (HTTP $$MCP_PUBLIC_CLIENT_CODE)"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
echo ""; \
|
||||
echo "Adding username mapper to mcp-client..."; \
|
||||
MCP_PUBLIC_CLIENTS_LIST=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients" \
|
||||
-H "Authorization: Bearer $$TOKEN" \
|
||||
-H "Accept: application/json"); \
|
||||
MCP_PUBLIC_CLIENT_ID=$$(echo "$$MCP_PUBLIC_CLIENTS_LIST" | jq -r '.[] | select(.clientId == "mcp-client") | .id // empty' 2>/dev/null); \
|
||||
MCP_PUBLIC_USERNAME_MAPPER_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients/$$MCP_PUBLIC_CLIENT_ID/protocol-mappers/models" \
|
||||
-H "Authorization: Bearer $$TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"name":"username","protocol":"openid-connect","protocolMapper":"oidc-usermodel-property-mapper","config":{"userinfo.token.claim":"true","user.attribute":"username","id.token.claim":"true","access.token.claim":"true","claim.name":"preferred_username","jsonType.label":"String"}}'); \
|
||||
MCP_PUBLIC_USERNAME_MAPPER_CODE=$$(echo "$$MCP_PUBLIC_USERNAME_MAPPER_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
|
||||
if [ "$$MCP_PUBLIC_USERNAME_MAPPER_CODE" = "201" ] || [ "$$MCP_PUBLIC_USERNAME_MAPPER_CODE" = "409" ]; then \
|
||||
if [ "$$MCP_PUBLIC_USERNAME_MAPPER_CODE" = "201" ]; then echo "✅ Username mapper added to mcp-client"; \
|
||||
else echo "✅ Username mapper already exists on mcp-client"; fi; \
|
||||
else \
|
||||
echo "❌ Failed to create username mapper (HTTP $$MCP_PUBLIC_USERNAME_MAPPER_CODE)"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
echo ""; \
|
||||
echo "Creating mcp-server client with token exchange..."; \
|
||||
MCP_CLIENT_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients" \
|
||||
-H "Authorization: Bearer $$TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"clientId":"mcp-server","enabled":true,"publicClient":false,"standardFlowEnabled":true,"directAccessGrantsEnabled":true,"serviceAccountsEnabled":true,"authorizationServicesEnabled":false,"redirectUris":["*"],"defaultClientScopes":["profile","email","groups","mcp-server"],"optionalClientScopes":["mcp:openshift"],"attributes":{"oauth2.device.authorization.grant.enabled":"false","oidc.ciba.grant.enabled":"false","backchannel.logout.session.required":"true","backchannel.logout.revoke.offline.tokens":"false"}}'); \
|
||||
MCP_CLIENT_CODE=$$(echo "$$MCP_CLIENT_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
|
||||
if [ "$$MCP_CLIENT_CODE" = "201" ] || [ "$$MCP_CLIENT_CODE" = "409" ]; then \
|
||||
if [ "$$MCP_CLIENT_CODE" = "201" ]; then echo "✅ mcp-server client created"; \
|
||||
else echo "✅ mcp-server client already exists"; fi; \
|
||||
else \
|
||||
echo "❌ Failed to create mcp-server client (HTTP $$MCP_CLIENT_CODE)"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
echo ""; \
|
||||
echo "Enabling standard token exchange for mcp-server..."; \
|
||||
CLIENTS_LIST=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients" \
|
||||
-H "Authorization: Bearer $$TOKEN" \
|
||||
-H "Accept: application/json"); \
|
||||
MCP_CLIENT_ID=$$(echo "$$CLIENTS_LIST" | jq -r '.[] | select(.clientId == "mcp-server") | .id // empty' 2>/dev/null); \
|
||||
if [ -z "$$MCP_CLIENT_ID" ]; then \
|
||||
echo "❌ Failed to find mcp-server client"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
UPDATE_CLIENT_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X PUT "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients/$$MCP_CLIENT_ID" \
|
||||
-H "Authorization: Bearer $$TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"clientId":"mcp-server","enabled":true,"publicClient":false,"standardFlowEnabled":true,"directAccessGrantsEnabled":true,"serviceAccountsEnabled":true,"authorizationServicesEnabled":false,"redirectUris":["*"],"defaultClientScopes":["profile","email","groups","mcp-server"],"optionalClientScopes":["mcp:openshift"],"attributes":{"oauth2.device.authorization.grant.enabled":"false","oidc.ciba.grant.enabled":"false","backchannel.logout.session.required":"true","backchannel.logout.revoke.offline.tokens":"false","standard.token.exchange.enabled":"true"}}'); \
|
||||
UPDATE_CLIENT_CODE=$$(echo "$$UPDATE_CLIENT_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
|
||||
if [ "$$UPDATE_CLIENT_CODE" = "204" ]; then \
|
||||
echo "✅ Standard token exchange enabled for mcp-server client"; \
|
||||
else \
|
||||
echo "⚠️ Could not enable token exchange (HTTP $$UPDATE_CLIENT_CODE)"; \
|
||||
fi; \
|
||||
echo ""; \
|
||||
echo "Getting mcp-server client secret..."; \
|
||||
SECRET_RESPONSE=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients/$$MCP_CLIENT_ID/client-secret" \
|
||||
-H "Authorization: Bearer $$TOKEN" \
|
||||
-H "Accept: application/json"); \
|
||||
CLIENT_SECRET=$$(echo "$$SECRET_RESPONSE" | jq -r '.value // empty' 2>/dev/null); \
|
||||
if [ -z "$$CLIENT_SECRET" ]; then \
|
||||
echo "❌ Failed to get client secret"; \
|
||||
else \
|
||||
echo "✅ Client secret retrieved"; \
|
||||
fi; \
|
||||
echo ""; \
|
||||
echo "Adding username mapper to mcp-server client..."; \
|
||||
MCP_USERNAME_MAPPER_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients/$$MCP_CLIENT_ID/protocol-mappers/models" \
|
||||
-H "Authorization: Bearer $$TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"name":"username","protocol":"openid-connect","protocolMapper":"oidc-usermodel-property-mapper","config":{"userinfo.token.claim":"true","user.attribute":"username","id.token.claim":"true","access.token.claim":"true","claim.name":"preferred_username","jsonType.label":"String"}}'); \
|
||||
MCP_USERNAME_MAPPER_CODE=$$(echo "$$MCP_USERNAME_MAPPER_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \
|
||||
if [ "$$MCP_USERNAME_MAPPER_CODE" = "201" ] || [ "$$MCP_USERNAME_MAPPER_CODE" = "409" ]; then \
|
||||
if [ "$$MCP_USERNAME_MAPPER_CODE" = "201" ]; then echo "✅ Username mapper added to mcp-server client"; \
|
||||
else echo "✅ Username mapper already exists on mcp-server client"; fi; \
|
||||
else \
|
||||
echo "❌ Failed to create username mapper (HTTP $$MCP_USERNAME_MAPPER_CODE)"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
echo ""; \
|
||||
echo "Creating test user mcp/mcp..."; \
|
||||
USER_RESPONSE=$$(curl -sk -w "%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/users" \
|
||||
-H "Authorization: Bearer $$TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"username":"mcp","email":"mcp@example.com","firstName":"MCP","lastName":"User","enabled":true,"emailVerified":true,"credentials":[{"type":"password","value":"mcp","temporary":false}]}'); \
|
||||
USER_CODE=$$(echo "$$USER_RESPONSE" | tail -c 4); \
|
||||
if [ "$$USER_CODE" = "201" ] || [ "$$USER_CODE" = "409" ]; then \
|
||||
if [ "$$USER_CODE" = "201" ]; then echo "✅ mcp user created"; \
|
||||
else echo "✅ mcp user already exists"; fi; \
|
||||
else \
|
||||
echo "❌ Failed to create mcp user (HTTP $$USER_CODE)"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
echo ""; \
|
||||
echo "Setting up RBAC for mcp user..."; \
|
||||
kubectl apply -f dev/config/keycloak/rbac.yaml; \
|
||||
echo "✅ RBAC binding created for mcp user"; \
|
||||
echo ""; \
|
||||
echo "🎉 OpenShift realm setup complete!"; \
|
||||
echo ""; \
|
||||
echo "========================================"; \
|
||||
echo "Configuration Summary"; \
|
||||
echo "========================================"; \
|
||||
echo "Realm: openshift"; \
|
||||
echo "Authorization URL: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift"; \
|
||||
echo "Issuer URL (for config.toml): https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift"; \
|
||||
echo ""; \
|
||||
echo "Test User:"; \
|
||||
echo " Username: mcp"; \
|
||||
echo " Password: mcp"; \
|
||||
echo " Email: mcp@example.com"; \
|
||||
echo " RBAC: cluster-admin (full cluster access)"; \
|
||||
echo ""; \
|
||||
echo "Clients:"; \
|
||||
echo " mcp-client (public, for browser-based auth)"; \
|
||||
echo " Client ID: mcp-client"; \
|
||||
echo " Optional Scopes: mcp-server"; \
|
||||
echo " mcp-server (confidential, token exchange enabled)"; \
|
||||
echo " Client ID: mcp-server"; \
|
||||
echo " Client Secret: $$CLIENT_SECRET"; \
|
||||
echo " openshift (service account)"; \
|
||||
echo " Client ID: openshift"; \
|
||||
echo ""; \
|
||||
echo "Client Scopes:"; \
|
||||
echo " mcp-server (default) - Audience: mcp-server"; \
|
||||
echo " mcp:openshift (optional) - Audience: openshift"; \
|
||||
echo " groups (default) - Group membership mapper"; \
|
||||
echo ""; \
|
||||
echo "TOML Configuration (config.toml):"; \
|
||||
echo " require_oauth = true"; \
|
||||
echo " oauth_audience = \"mcp-server\""; \
|
||||
echo " oauth_scopes = [\"openid\", \"mcp-server\"]"; \
|
||||
echo " validate_token = false"; \
|
||||
echo " authorization_url = \"https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift\""; \
|
||||
echo " sts_client_id = \"mcp-server\""; \
|
||||
echo " sts_client_secret = \"$$CLIENT_SECRET\""; \
|
||||
echo " sts_audience = \"openshift\""; \
|
||||
echo " sts_scopes = [\"mcp:openshift\"]"; \
|
||||
echo " certificate_authority = \"_output/cert-manager-ca/ca.crt\""; \
|
||||
echo "========================================"; \
|
||||
echo ""; \
|
||||
echo "Note: The Kubernetes API server is configured with:"; \
|
||||
echo " --oidc-issuer-url=https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift"; \
|
||||
echo ""; \
|
||||
echo "Important: The cert-manager CA certificate was extracted to:"; \
|
||||
echo " _output/cert-manager-ca/ca.crt"; \
|
||||
echo ""; \
|
||||
echo "Writing configuration to _output/config.toml..."; \
|
||||
mkdir -p _output; \
|
||||
printf '%s\n' \
|
||||
'require_oauth = true' \
|
||||
'oauth_audience = "mcp-server"' \
|
||||
'oauth_scopes = ["openid", "mcp-server"]' \
|
||||
'validate_token = false' \
|
||||
'authorization_url = "https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift"' \
|
||||
'sts_client_id = "mcp-server"' \
|
||||
"sts_client_secret = \"$$CLIENT_SECRET\"" \
|
||||
'sts_audience = "openshift"' \
|
||||
'sts_scopes = ["mcp:openshift"]' \
|
||||
'certificate_authority = "_output/cert-manager-ca/ca.crt"' \
|
||||
> _output/config.toml; \
|
||||
echo "✅ Configuration written to _output/config.toml"
|
||||
61
build/kind.mk
Normal file
61
build/kind.mk
Normal file
@@ -0,0 +1,61 @@
|
||||
# Kind cluster management
|
||||
|
||||
KIND_CLUSTER_NAME ?= kubernetes-mcp-server
|
||||
|
||||
# Detect container engine (docker or podman)
|
||||
CONTAINER_ENGINE ?= $(shell command -v docker 2>/dev/null || command -v podman 2>/dev/null)
|
||||
|
||||
.PHONY: kind-create-certs
|
||||
kind-create-certs:
|
||||
@if [ ! -f _output/cert-manager-ca/ca.crt ]; then \
|
||||
echo "Creating placeholder CA certificate for bind mount..."; \
|
||||
./hack/generate-placeholder-ca.sh; \
|
||||
else \
|
||||
echo "✅ Placeholder CA already exists"; \
|
||||
fi
|
||||
|
||||
.PHONY: kind-create-cluster
|
||||
kind-create-cluster: kind kind-create-certs
|
||||
@# Set KIND provider for podman on Linux
|
||||
@if [ "$(shell uname -s)" != "Darwin" ] && echo "$(CONTAINER_ENGINE)" | grep -q "podman"; then \
|
||||
export KIND_EXPERIMENTAL_PROVIDER=podman; \
|
||||
fi; \
|
||||
if $(KIND) get clusters 2>/dev/null | grep -q "^$(KIND_CLUSTER_NAME)$$"; then \
|
||||
echo "Kind cluster '$(KIND_CLUSTER_NAME)' already exists, skipping creation"; \
|
||||
else \
|
||||
echo "Creating Kind cluster '$(KIND_CLUSTER_NAME)'..."; \
|
||||
$(KIND) create cluster --name $(KIND_CLUSTER_NAME) --config dev/config/kind/cluster.yaml; \
|
||||
echo "Adding ingress-ready label to control-plane node..."; \
|
||||
kubectl label node $(KIND_CLUSTER_NAME)-control-plane ingress-ready=true --overwrite; \
|
||||
echo "Installing nginx ingress controller..."; \
|
||||
kubectl apply -f dev/config/ingress/nginx-ingress.yaml; \
|
||||
echo "Waiting for ingress controller to be ready..."; \
|
||||
kubectl wait --namespace ingress-nginx --for=condition=ready pod --selector=app.kubernetes.io/component=controller --timeout=90s; \
|
||||
echo "✅ Ingress controller ready"; \
|
||||
echo "Installing cert-manager..."; \
|
||||
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.2/cert-manager.yaml; \
|
||||
echo "Waiting for cert-manager to be ready..."; \
|
||||
kubectl wait --namespace cert-manager --for=condition=available deployment/cert-manager --timeout=120s; \
|
||||
kubectl wait --namespace cert-manager --for=condition=available deployment/cert-manager-cainjector --timeout=120s; \
|
||||
kubectl wait --namespace cert-manager --for=condition=available deployment/cert-manager-webhook --timeout=120s; \
|
||||
echo "✅ cert-manager ready"; \
|
||||
echo "Creating cert-manager ClusterIssuer..."; \
|
||||
sleep 5; \
|
||||
kubectl apply -f dev/config/cert-manager/selfsigned-issuer.yaml; \
|
||||
echo "✅ ClusterIssuer created"; \
|
||||
echo "Adding /etc/hosts entry for Keycloak in control plane..."; \
|
||||
if command -v docker >/dev/null 2>&1 && docker ps --filter "name=$(KIND_CLUSTER_NAME)-control-plane" --format "{{.Names}}" | grep -q "$(KIND_CLUSTER_NAME)-control-plane"; then \
|
||||
docker exec $(KIND_CLUSTER_NAME)-control-plane bash -c 'grep -q "keycloak.127-0-0-1.sslip.io" /etc/hosts || echo "127.0.0.1 keycloak.127-0-0-1.sslip.io" >> /etc/hosts'; \
|
||||
elif command -v podman >/dev/null 2>&1 && podman ps --filter "name=$(KIND_CLUSTER_NAME)-control-plane" --format "{{.Names}}" | grep -q "$(KIND_CLUSTER_NAME)-control-plane"; then \
|
||||
podman exec $(KIND_CLUSTER_NAME)-control-plane bash -c 'grep -q "keycloak.127-0-0-1.sslip.io" /etc/hosts || echo "127.0.0.1 keycloak.127-0-0-1.sslip.io" >> /etc/hosts'; \
|
||||
fi; \
|
||||
echo "✅ /etc/hosts entry added"; \
|
||||
fi
|
||||
|
||||
.PHONY: kind-delete-cluster
|
||||
kind-delete-cluster: kind
|
||||
@# Set KIND provider for podman on Linux
|
||||
@if [ "$(shell uname -s)" != "Darwin" ] && echo "$(CONTAINER_ENGINE)" | grep -q "podman"; then \
|
||||
export KIND_EXPERIMENTAL_PROVIDER=podman; \
|
||||
fi; \
|
||||
$(KIND) delete cluster --name $(KIND_CLUSTER_NAME)
|
||||
20
build/tools.mk
Normal file
20
build/tools.mk
Normal file
@@ -0,0 +1,20 @@
|
||||
# Tools
|
||||
|
||||
# Platform detection
|
||||
OS := $(shell uname -s | tr '[:upper:]' '[:lower:]')
|
||||
ARCH := $(shell uname -m | tr '[:upper:]' '[:lower:]')
|
||||
ifeq ($(ARCH),x86_64)
|
||||
ARCH = amd64
|
||||
endif
|
||||
ifeq ($(ARCH),aarch64)
|
||||
ARCH = arm64
|
||||
endif
|
||||
|
||||
KIND = _output/bin/kind
|
||||
KIND_VERSION = v0.30.0
|
||||
$(KIND):
|
||||
@mkdir -p _output/bin
|
||||
GOBIN=$(PWD)/_output/bin go install sigs.k8s.io/kind@$(KIND_VERSION)
|
||||
|
||||
.PHONY: kind
|
||||
kind: $(KIND) ## Download kind locally if necessary
|
||||
31
dev/config/cert-manager/selfsigned-issuer.yaml
Normal file
31
dev/config/cert-manager/selfsigned-issuer.yaml
Normal file
@@ -0,0 +1,31 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: selfsigned-issuer
|
||||
spec:
|
||||
selfSigned: {}
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: selfsigned-ca
|
||||
namespace: cert-manager
|
||||
spec:
|
||||
isCA: true
|
||||
commonName: selfsigned-ca
|
||||
secretName: selfsigned-ca-secret
|
||||
privateKey:
|
||||
algorithm: ECDSA
|
||||
size: 256
|
||||
issuerRef:
|
||||
name: selfsigned-issuer
|
||||
kind: ClusterIssuer
|
||||
group: cert-manager.io
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: selfsigned-ca-issuer
|
||||
spec:
|
||||
ca:
|
||||
secretName: selfsigned-ca-secret
|
||||
386
dev/config/ingress/nginx-ingress.yaml
Normal file
386
dev/config/ingress/nginx-ingress.yaml
Normal file
@@ -0,0 +1,386 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: ingress-nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
data:
|
||||
allow-snippet-annotations: "true"
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
name: ingress-nginx
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- endpoints
|
||||
- nodes
|
||||
- pods
|
||||
- secrets
|
||||
- namespaces
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingressclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
name: ingress-nginx
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: ingress-nginx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- pods
|
||||
- secrets
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingressclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
resourceNames:
|
||||
- ingress-nginx-leader
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: ingress-nginx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: ingress-nginx
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
appProtocol: http
|
||||
- name: https
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: https
|
||||
appProtocol: https
|
||||
selector:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
name: ingress-nginx-controller
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
minReadySeconds: 0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
spec:
|
||||
dnsPolicy: ClusterFirst
|
||||
containers:
|
||||
- name: controller
|
||||
image: registry.k8s.io/ingress-nginx/controller:v1.11.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /wait-shutdown
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --election-id=ingress-nginx-leader
|
||||
- --controller-class=k8s.io/ingress-nginx
|
||||
- --ingress-class=nginx
|
||||
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
|
||||
- --watch-ingress-without-class=true
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 101
|
||||
allowPrivilegeEscalation: false
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: LD_PRELOAD
|
||||
value: /usr/local/lib/libmimalloc.so
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
protocol: TCP
|
||||
hostPort: 80
|
||||
- name: https
|
||||
containerPort: 443
|
||||
protocol: TCP
|
||||
hostPort: 443
|
||||
- name: https-alt
|
||||
containerPort: 443
|
||||
protocol: TCP
|
||||
hostPort: 8443
|
||||
- name: webhook
|
||||
containerPort: 8443
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 90Mi
|
||||
nodeSelector:
|
||||
ingress-ready: "true"
|
||||
kubernetes.io/os: linux
|
||||
serviceAccountName: ingress-nginx
|
||||
terminationGracePeriodSeconds: 0
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
operator: Equal
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/control-plane
|
||||
operator: Equal
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: IngressClass
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/component: controller
|
||||
name: nginx
|
||||
spec:
|
||||
controller: k8s.io/ingress-nginx
|
||||
71
dev/config/keycloak/deployment.yaml
Normal file
71
dev/config/keycloak/deployment.yaml
Normal file
@@ -0,0 +1,71 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: keycloak
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: keycloak
|
||||
namespace: keycloak
|
||||
labels:
|
||||
app: keycloak
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: keycloak
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: keycloak
|
||||
spec:
|
||||
containers:
|
||||
- name: keycloak
|
||||
image: quay.io/keycloak/keycloak:26.4
|
||||
args: ["start-dev"]
|
||||
env:
|
||||
- name: KC_BOOTSTRAP_ADMIN_USERNAME
|
||||
value: "admin"
|
||||
- name: KC_BOOTSTRAP_ADMIN_PASSWORD
|
||||
value: "admin"
|
||||
- name: KC_HOSTNAME
|
||||
value: "https://keycloak.127-0-0-1.sslip.io:8443"
|
||||
- name: KC_HTTP_ENABLED
|
||||
value: "true"
|
||||
- name: KC_HEALTH_ENABLED
|
||||
value: "true"
|
||||
- name: KC_PROXY_HEADERS
|
||||
value: "xforwarded"
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health/ready
|
||||
port: 9000
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health/live
|
||||
port: 9000
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 30
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: keycloak
|
||||
namespace: keycloak
|
||||
labels:
|
||||
app: keycloak
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app: keycloak
|
||||
type: ClusterIP
|
||||
34
dev/config/keycloak/ingress.yaml
Normal file
34
dev/config/keycloak/ingress.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: keycloak
|
||||
namespace: keycloak
|
||||
labels:
|
||||
app: keycloak
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: "selfsigned-ca-issuer"
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "true"
|
||||
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
|
||||
# Required for Keycloak 26.2.0+ to include port in issuer URLs
|
||||
nginx.ingress.kubernetes.io/configuration-snippet: |
|
||||
proxy_set_header X-Forwarded-Proto https;
|
||||
proxy_set_header X-Forwarded-Port 8443;
|
||||
proxy_set_header X-Forwarded-Host $host:8443;
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
tls:
|
||||
- hosts:
|
||||
- keycloak.127-0-0-1.sslip.io
|
||||
secretName: keycloak-tls-cert
|
||||
rules:
|
||||
- host: keycloak.127-0-0-1.sslip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: keycloak
|
||||
port:
|
||||
number: 80
|
||||
20
dev/config/keycloak/rbac.yaml
Normal file
20
dev/config/keycloak/rbac.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
# RBAC ClusterRoleBinding for mcp user with OIDC authentication
|
||||
#
|
||||
# IMPORTANT: This requires Kubernetes API server to be configured with OIDC:
|
||||
# --oidc-issuer-url=https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift
|
||||
# --oidc-username-claim=preferred_username
|
||||
#
|
||||
# Without OIDC configuration, this binding will not work.
|
||||
#
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: oidc-mcp-cluster-admin
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift#mcp
|
||||
30
dev/config/kind/cluster.yaml
Normal file
30
dev/config/kind/cluster.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
extraMounts:
|
||||
- hostPath: ./_output/cert-manager-ca/ca.crt
|
||||
containerPath: /etc/kubernetes/pki/keycloak-ca.crt
|
||||
readOnly: true
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: InitConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
node-labels: "ingress-ready=true"
|
||||
|
||||
kind: ClusterConfiguration
|
||||
apiServer:
|
||||
extraArgs:
|
||||
oidc-issuer-url: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift
|
||||
oidc-client-id: openshift
|
||||
oidc-username-claim: preferred_username
|
||||
oidc-groups-claim: groups
|
||||
oidc-ca-file: /etc/kubernetes/pki/keycloak-ca.crt
|
||||
extraPortMappings:
|
||||
- containerPort: 80
|
||||
hostPort: 8080
|
||||
protocol: TCP
|
||||
- containerPort: 443
|
||||
hostPort: 8443
|
||||
protocol: TCP
|
||||
20
go.mod
20
go.mod
@@ -4,18 +4,18 @@ go 1.24.1
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.5.0
|
||||
github.com/coreos/go-oidc/v3 v3.15.0
|
||||
github.com/coreos/go-oidc/v3 v3.16.0
|
||||
github.com/fsnotify/fsnotify v1.9.0
|
||||
github.com/go-jose/go-jose/v4 v4.1.2
|
||||
github.com/go-jose/go-jose/v4 v4.1.3
|
||||
github.com/google/jsonschema-go v0.3.0
|
||||
github.com/mark3labs/mcp-go v0.40.0
|
||||
github.com/mark3labs/mcp-go v0.42.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/spf13/afero v1.15.0
|
||||
github.com/spf13/cobra v1.10.1
|
||||
github.com/spf13/pflag v1.0.10
|
||||
github.com/stretchr/testify v1.11.1
|
||||
golang.org/x/net v0.44.0
|
||||
golang.org/x/oauth2 v0.31.0
|
||||
golang.org/x/net v0.46.0
|
||||
golang.org/x/oauth2 v0.32.0
|
||||
golang.org/x/sync v0.17.0
|
||||
helm.sh/helm/v3 v3.19.0
|
||||
k8s.io/api v0.34.1
|
||||
@@ -27,7 +27,7 @@ require (
|
||||
k8s.io/kubectl v0.34.1
|
||||
k8s.io/metrics v0.34.1
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397
|
||||
sigs.k8s.io/controller-runtime v0.22.1
|
||||
sigs.k8s.io/controller-runtime v0.22.3
|
||||
sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20250211091558-894df3a7e664
|
||||
sigs.k8s.io/yaml v1.6.0
|
||||
)
|
||||
@@ -122,10 +122,10 @@ require (
|
||||
github.com/yosida95/uritemplate/v3 v3.0.2 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/crypto v0.42.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/term v0.35.0 // indirect
|
||||
golang.org/x/text v0.29.0 // indirect
|
||||
golang.org/x/crypto v0.43.0 // indirect
|
||||
golang.org/x/sys v0.37.0 // indirect
|
||||
golang.org/x/term v0.36.0 // indirect
|
||||
golang.org/x/text v0.30.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250728155136-f173205681a0 // indirect
|
||||
google.golang.org/grpc v1.72.1 // indirect
|
||||
|
||||
48
go.sum
48
go.sum
@@ -48,8 +48,8 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
|
||||
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
|
||||
github.com/coreos/go-oidc/v3 v3.15.0 h1:R6Oz8Z4bqWR7VFQ+sPSvZPQv4x8M+sJkDO5ojgwlyAg=
|
||||
github.com/coreos/go-oidc/v3 v3.15.0/go.mod h1:HaZ3szPaZ0e4r6ebqvsLWlk2Tn+aejfmrfah6hnSYEU=
|
||||
github.com/coreos/go-oidc/v3 v3.16.0 h1:qRQUCFstKpXwmEjDQTIbyY/5jF00+asXzSkmkoa/mow=
|
||||
github.com/coreos/go-oidc/v3 v3.16.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
@@ -99,8 +99,8 @@ github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxI
|
||||
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
|
||||
github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs=
|
||||
github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw=
|
||||
github.com/go-jose/go-jose/v4 v4.1.2 h1:TK/7NqRQZfgAh+Td8AlsrvtPoUyiHh0LqVvokh+1vHI=
|
||||
github.com/go-jose/go-jose/v4 v4.1.2/go.mod h1:22cg9HWM1pOlnRiY+9cQYJ9XHmya1bYW8OeDM6Ku6Oo=
|
||||
github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs=
|
||||
github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
@@ -187,8 +187,8 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhn
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
|
||||
github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
|
||||
github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
|
||||
github.com/mark3labs/mcp-go v0.40.0 h1:M0oqK412OHBKut9JwXSsj4KanSmEKpzoW8TcxoPOkAU=
|
||||
github.com/mark3labs/mcp-go v0.40.0/go.mod h1:T7tUa2jO6MavG+3P25Oy/jR7iCeJPHImCZHRymCn39g=
|
||||
github.com/mark3labs/mcp-go v0.42.0 h1:gk/8nYJh8t3yroCAOBhNbYsM9TCKvkM13I5t5Hfu6Ls=
|
||||
github.com/mark3labs/mcp-go v0.42.0/go.mod h1:YnJfOL382MIWDx1kMY+2zsRHU/q78dBg9aFb8W6Thdw=
|
||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
@@ -357,20 +357,20 @@ go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
|
||||
golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
|
||||
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
|
||||
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
|
||||
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
|
||||
golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U=
|
||||
golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I=
|
||||
golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/oauth2 v0.31.0 h1:8Fq0yVZLh4j4YA47vHKFTa9Ew5XIrCP8LC6UeNZnLxo=
|
||||
golang.org/x/oauth2 v0.31.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
|
||||
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
|
||||
golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY=
|
||||
golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -382,22 +382,22 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
|
||||
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
|
||||
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
|
||||
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
|
||||
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
|
||||
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
|
||||
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
|
||||
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
|
||||
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
|
||||
golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE=
|
||||
golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -453,8 +453,8 @@ k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc=
|
||||
oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o=
|
||||
sigs.k8s.io/controller-runtime v0.22.1 h1:Ah1T7I+0A7ize291nJZdS1CabF/lB4E++WizgV24Eqg=
|
||||
sigs.k8s.io/controller-runtime v0.22.1/go.mod h1:FwiwRjkRPbiN+zp2QRp7wlTCzbUXxZ/D4OzuQUDwBHY=
|
||||
sigs.k8s.io/controller-runtime v0.22.3 h1:I7mfqz/a/WdmDCEnXmSPm8/b/yRTy6JsKKENTijTq8Y=
|
||||
sigs.k8s.io/controller-runtime v0.22.3/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8=
|
||||
sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20250211091558-894df3a7e664 h1:xC7x7FsPURJYhZnWHsWFd7nkdD/WRtQVWPC28FWt85Y=
|
||||
sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20250211091558-894df3a7e664/go.mod h1:Cq9jUhwSYol5tNB0O/1vLYxNV9KqnhpvEa6HvJ1w0wY=
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
|
||||
|
||||
22
hack/generate-placeholder-ca.sh
Executable file
22
hack/generate-placeholder-ca.sh
Executable file
@@ -0,0 +1,22 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Generate a placeholder self-signed CA certificate for KIND cluster startup
|
||||
# This will be replaced with the real cert-manager CA after the cluster is created
|
||||
|
||||
CERT_DIR="_output/cert-manager-ca"
|
||||
CA_CERT="$CERT_DIR/ca.crt"
|
||||
CA_KEY="$CERT_DIR/ca.key"
|
||||
|
||||
mkdir -p "$CERT_DIR"
|
||||
|
||||
# Generate a self-signed CA certificate (valid placeholder)
|
||||
openssl req -x509 -newkey rsa:2048 -nodes \
|
||||
-keyout "$CA_KEY" \
|
||||
-out "$CA_CERT" \
|
||||
-days 365 \
|
||||
-subj "/CN=placeholder-ca" \
|
||||
2>/dev/null
|
||||
|
||||
echo "✅ Placeholder CA certificate created at $CA_CERT"
|
||||
echo "⚠️ This will be replaced with cert-manager CA after cluster creation"
|
||||
15
internal/test/env.go
Normal file
15
internal/test/env.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func RestoreEnv(originalEnv []string) {
|
||||
os.Clearenv()
|
||||
for _, env := range originalEnv {
|
||||
if key, value, found := strings.Cut(env, "="); found {
|
||||
_ = os.Setenv(key, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -8,15 +8,10 @@ func KubeConfigFake() *clientcmdapi.Config {
|
||||
fakeConfig := clientcmdapi.NewConfig()
|
||||
fakeConfig.Clusters["fake"] = clientcmdapi.NewCluster()
|
||||
fakeConfig.Clusters["fake"].Server = "https://127.0.0.1:6443"
|
||||
fakeConfig.Clusters["additional-cluster"] = clientcmdapi.NewCluster()
|
||||
fakeConfig.AuthInfos["fake"] = clientcmdapi.NewAuthInfo()
|
||||
fakeConfig.AuthInfos["additional-auth"] = clientcmdapi.NewAuthInfo()
|
||||
fakeConfig.Contexts["fake-context"] = clientcmdapi.NewContext()
|
||||
fakeConfig.Contexts["fake-context"].Cluster = "fake"
|
||||
fakeConfig.Contexts["fake-context"].AuthInfo = "fake"
|
||||
fakeConfig.Contexts["additional-context"] = clientcmdapi.NewContext()
|
||||
fakeConfig.Contexts["additional-context"].Cluster = "additional-cluster"
|
||||
fakeConfig.Contexts["additional-context"].AuthInfo = "additional-auth"
|
||||
fakeConfig.CurrentContext = "fake-context"
|
||||
return fakeConfig
|
||||
}
|
||||
|
||||
@@ -73,10 +73,14 @@ func (m *MockServer) Kubeconfig() *api.Config {
|
||||
}
|
||||
|
||||
func (m *MockServer) KubeconfigFile(t *testing.T) string {
|
||||
kubeconfig := filepath.Join(t.TempDir(), "config")
|
||||
err := clientcmd.WriteToFile(*m.Kubeconfig(), kubeconfig)
|
||||
return KubeconfigFile(t, m.Kubeconfig())
|
||||
}
|
||||
|
||||
func KubeconfigFile(t *testing.T, kubeconfig *api.Config) string {
|
||||
kubeconfigFile := filepath.Join(t.TempDir(), "config")
|
||||
err := clientcmd.WriteToFile(*kubeconfig, kubeconfigFile)
|
||||
require.NoError(t, err, "Expected no error writing kubeconfig file")
|
||||
return kubeconfig
|
||||
return kubeconfigFile
|
||||
}
|
||||
|
||||
func WriteObject(w http.ResponseWriter, obj runtime.Object) {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"maps"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
@@ -25,7 +26,14 @@ func (o *OpenShift) IsOpenShift(ctx context.Context) bool {
|
||||
var _ internalk8s.Openshift = (*OpenShift)(nil)
|
||||
|
||||
func main() {
|
||||
readme, err := os.ReadFile(os.Args[1])
|
||||
// Snyk reports false positive unless we flow the args through filepath.Clean and filepath.Localize in this specific order
|
||||
var err error
|
||||
localReadmePath := filepath.Clean(os.Args[1])
|
||||
localReadmePath, err = filepath.Localize(localReadmePath)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
readme, err := os.ReadFile(localReadmePath)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -81,7 +89,7 @@ func main() {
|
||||
toolsetTools.String(),
|
||||
)
|
||||
|
||||
if err := os.WriteFile(os.Args[1], []byte(updated), 0o644); err != nil {
|
||||
if err := os.WriteFile(localReadmePath, []byte(updated), 0o644); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
"name": "kubernetes-mcp-server-darwin-amd64",
|
||||
"version": "0.0.0",
|
||||
"description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/containers/kubernetes-mcp-server.git"
|
||||
},
|
||||
"os": [
|
||||
"darwin"
|
||||
],
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
"name": "kubernetes-mcp-server-darwin-arm64",
|
||||
"version": "0.0.0",
|
||||
"description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/containers/kubernetes-mcp-server.git"
|
||||
},
|
||||
"os": [
|
||||
"darwin"
|
||||
],
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
"name": "kubernetes-mcp-server-linux-amd64",
|
||||
"version": "0.0.0",
|
||||
"description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/containers/kubernetes-mcp-server.git"
|
||||
},
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
"name": "kubernetes-mcp-server-linux-arm64",
|
||||
"version": "0.0.0",
|
||||
"description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/containers/kubernetes-mcp-server.git"
|
||||
},
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
"name": "kubernetes-mcp-server-windows-amd64",
|
||||
"version": "0.0.0",
|
||||
"description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/containers/kubernetes-mcp-server.git"
|
||||
},
|
||||
"os": [
|
||||
"win32"
|
||||
],
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
"name": "kubernetes-mcp-server-windows-arm64",
|
||||
"version": "0.0.0",
|
||||
"description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/containers/kubernetes-mcp-server.git"
|
||||
},
|
||||
"os": [
|
||||
"win32"
|
||||
],
|
||||
|
||||
@@ -10,8 +10,29 @@ import (
|
||||
)
|
||||
|
||||
type ServerTool struct {
|
||||
Tool Tool
|
||||
Handler ToolHandlerFunc
|
||||
Tool Tool
|
||||
Handler ToolHandlerFunc
|
||||
ClusterAware *bool
|
||||
TargetListProvider *bool
|
||||
}
|
||||
|
||||
// IsClusterAware indicates whether the tool can accept a "cluster" or "context" parameter
|
||||
// to operate on a specific Kubernetes cluster context.
|
||||
// Defaults to true if not explicitly set
|
||||
func (s *ServerTool) IsClusterAware() bool {
|
||||
if s.ClusterAware != nil {
|
||||
return *s.ClusterAware
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// IsTargetListProvider indicates whether the tool is used to provide a list of targets (clusters/contexts)
|
||||
// Defaults to false if not explicitly set
|
||||
func (s *ServerTool) IsTargetListProvider() bool {
|
||||
if s.TargetListProvider != nil {
|
||||
return *s.TargetListProvider
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type Toolset interface {
|
||||
|
||||
47
pkg/api/toolsets_test.go
Normal file
47
pkg/api/toolsets_test.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
type ToolsetsSuite struct {
|
||||
suite.Suite
|
||||
}
|
||||
|
||||
func (s *ToolsetsSuite) TestServerTool() {
|
||||
s.Run("IsClusterAware", func() {
|
||||
s.Run("defaults to true", func() {
|
||||
tool := &ServerTool{}
|
||||
s.True(tool.IsClusterAware(), "Expected IsClusterAware to be true by default")
|
||||
})
|
||||
s.Run("can be set to false", func() {
|
||||
tool := &ServerTool{ClusterAware: ptr.To(false)}
|
||||
s.False(tool.IsClusterAware(), "Expected IsClusterAware to be false when set to false")
|
||||
})
|
||||
s.Run("can be set to true", func() {
|
||||
tool := &ServerTool{ClusterAware: ptr.To(true)}
|
||||
s.True(tool.IsClusterAware(), "Expected IsClusterAware to be true when set to true")
|
||||
})
|
||||
})
|
||||
s.Run("IsTargetListProvider", func() {
|
||||
s.Run("defaults to false", func() {
|
||||
tool := &ServerTool{}
|
||||
s.False(tool.IsTargetListProvider(), "Expected IsTargetListProvider to be false by default")
|
||||
})
|
||||
s.Run("can be set to false", func() {
|
||||
tool := &ServerTool{TargetListProvider: ptr.To(false)}
|
||||
s.False(tool.IsTargetListProvider(), "Expected IsTargetListProvider to be false when set to false")
|
||||
})
|
||||
s.Run("can be set to true", func() {
|
||||
tool := &ServerTool{TargetListProvider: ptr.To(true)}
|
||||
s.True(tool.IsTargetListProvider(), "Expected IsTargetListProvider to be true when set to true")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestToolsets(t *testing.T) {
|
||||
suite.Run(t, new(ToolsetsSuite))
|
||||
}
|
||||
@@ -1,11 +1,19 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
)
|
||||
|
||||
const (
|
||||
ClusterProviderKubeConfig = "kubeconfig"
|
||||
ClusterProviderInCluster = "in-cluster"
|
||||
ClusterProviderDisabled = "disabled"
|
||||
)
|
||||
|
||||
// StaticConfig is the configuration for the server.
|
||||
// It allows to configure server specific settings and tools to be enabled or disabled.
|
||||
type StaticConfig struct {
|
||||
@@ -49,6 +57,17 @@ type StaticConfig struct {
|
||||
StsScopes []string `toml:"sts_scopes,omitempty"`
|
||||
CertificateAuthority string `toml:"certificate_authority,omitempty"`
|
||||
ServerURL string `toml:"server_url,omitempty"`
|
||||
// ClusterProviderStrategy is how the server finds clusters.
|
||||
// If set to "kubeconfig", the clusters will be loaded from those in the kubeconfig.
|
||||
// If set to "in-cluster", the server will use the in cluster config
|
||||
ClusterProviderStrategy string `toml:"cluster_provider_strategy,omitempty"`
|
||||
|
||||
// ClusterProvider-specific configurations
|
||||
// This map holds raw TOML primitives that will be parsed by registered provider parsers
|
||||
ClusterProviderConfigs map[string]toml.Primitive `toml:"cluster_provider_configs,omitempty"`
|
||||
|
||||
// Internal: parsed provider configs (not exposed to TOML package)
|
||||
parsedClusterProviderConfigs map[string]ProviderConfig
|
||||
}
|
||||
|
||||
func Default() *StaticConfig {
|
||||
@@ -76,8 +95,46 @@ func Read(configPath string) (*StaticConfig, error) {
|
||||
// ReadToml reads the toml data and returns the StaticConfig.
|
||||
func ReadToml(configData []byte) (*StaticConfig, error) {
|
||||
config := Default()
|
||||
if err := toml.Unmarshal(configData, config); err != nil {
|
||||
md, err := toml.NewDecoder(bytes.NewReader(configData)).Decode(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := config.parseClusterProviderConfigs(md); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func (c *StaticConfig) GetProviderConfig(strategy string) (ProviderConfig, bool) {
|
||||
config, ok := c.parsedClusterProviderConfigs[strategy]
|
||||
|
||||
return config, ok
|
||||
}
|
||||
|
||||
func (c *StaticConfig) parseClusterProviderConfigs(md toml.MetaData) error {
|
||||
if c.parsedClusterProviderConfigs == nil {
|
||||
c.parsedClusterProviderConfigs = make(map[string]ProviderConfig, len(c.ClusterProviderConfigs))
|
||||
}
|
||||
|
||||
for strategy, primitive := range c.ClusterProviderConfigs {
|
||||
parser, ok := getProviderConfigParser(strategy)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
providerConfig, err := parser(primitive, md)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse config for ClusterProvider '%s': %w", strategy, err)
|
||||
}
|
||||
|
||||
if err := providerConfig.Validate(); err != nil {
|
||||
return fmt.Errorf("invalid config file for ClusterProvider '%s': %w", strategy, err)
|
||||
}
|
||||
|
||||
c.parsedClusterProviderConfigs[strategy] = providerConfig
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -11,10 +11,25 @@ import (
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
type ConfigSuite struct {
|
||||
type BaseConfigSuite struct {
|
||||
suite.Suite
|
||||
}
|
||||
|
||||
func (s *BaseConfigSuite) writeConfig(content string) string {
|
||||
s.T().Helper()
|
||||
tempDir := s.T().TempDir()
|
||||
path := filepath.Join(tempDir, "config.toml")
|
||||
err := os.WriteFile(path, []byte(content), 0644)
|
||||
if err != nil {
|
||||
s.T().Fatalf("Failed to write config file %s: %v", path, err)
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
type ConfigSuite struct {
|
||||
BaseConfigSuite
|
||||
}
|
||||
|
||||
func (s *ConfigSuite) TestReadConfigMissingFile() {
|
||||
config, err := Read("non-existent-config.toml")
|
||||
s.Run("returns error for missing file", func() {
|
||||
@@ -159,17 +174,6 @@ func (s *ConfigSuite) TestReadConfigValidPreservesDefaultsForMissingFields() {
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ConfigSuite) writeConfig(content string) string {
|
||||
s.T().Helper()
|
||||
tempDir := s.T().TempDir()
|
||||
path := filepath.Join(tempDir, "config.toml")
|
||||
err := os.WriteFile(path, []byte(content), 0644)
|
||||
if err != nil {
|
||||
s.T().Fatalf("Failed to write config file %s: %v", path, err)
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func TestConfig(t *testing.T) {
|
||||
suite.Run(t, new(ConfigSuite))
|
||||
}
|
||||
|
||||
33
pkg/config/provider_config.go
Normal file
33
pkg/config/provider_config.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
)
|
||||
|
||||
// ProviderConfig is the interface that all provider-specific configurations must implement.
|
||||
// Each provider registers a factory function to parse its config from TOML primitives
|
||||
type ProviderConfig interface {
|
||||
Validate() error
|
||||
}
|
||||
|
||||
type ProviderConfigParser func(primitive toml.Primitive, md toml.MetaData) (ProviderConfig, error)
|
||||
|
||||
var (
|
||||
providerConfigParsers = make(map[string]ProviderConfigParser)
|
||||
)
|
||||
|
||||
func RegisterProviderConfig(strategy string, parser ProviderConfigParser) {
|
||||
if _, exists := providerConfigParsers[strategy]; exists {
|
||||
panic(fmt.Sprintf("provider config parser already registered for strategy '%s'", strategy))
|
||||
}
|
||||
|
||||
providerConfigParsers[strategy] = parser
|
||||
}
|
||||
|
||||
func getProviderConfigParser(strategy string) (ProviderConfigParser, bool) {
|
||||
provider, ok := providerConfigParsers[strategy]
|
||||
|
||||
return provider, ok
|
||||
}
|
||||
157
pkg/config/provider_config_test.go
Normal file
157
pkg/config/provider_config_test.go
Normal file
@@ -0,0 +1,157 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
type ProviderConfigSuite struct {
|
||||
BaseConfigSuite
|
||||
originalProviderConfigParsers map[string]ProviderConfigParser
|
||||
}
|
||||
|
||||
func (s *ProviderConfigSuite) SetupTest() {
|
||||
s.originalProviderConfigParsers = make(map[string]ProviderConfigParser)
|
||||
for k, v := range providerConfigParsers {
|
||||
s.originalProviderConfigParsers[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ProviderConfigSuite) TearDownTest() {
|
||||
providerConfigParsers = make(map[string]ProviderConfigParser)
|
||||
for k, v := range s.originalProviderConfigParsers {
|
||||
providerConfigParsers[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
type ProviderConfigForTest struct {
|
||||
BoolProp bool `toml:"bool_prop"`
|
||||
StrProp string `toml:"str_prop"`
|
||||
IntProp int `toml:"int_prop"`
|
||||
}
|
||||
|
||||
var _ ProviderConfig = (*ProviderConfigForTest)(nil)
|
||||
|
||||
func (p *ProviderConfigForTest) Validate() error {
|
||||
if p.StrProp == "force-error" {
|
||||
return errors.New("validation error forced by test")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func providerConfigForTestParser(primitive toml.Primitive, md toml.MetaData) (ProviderConfig, error) {
|
||||
var providerConfigForTest ProviderConfigForTest
|
||||
if err := md.PrimitiveDecode(primitive, &providerConfigForTest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &providerConfigForTest, nil
|
||||
}
|
||||
|
||||
func (s *ProviderConfigSuite) TestRegisterProviderConfig() {
|
||||
s.Run("panics when registering duplicate provider config parser", func() {
|
||||
s.Panics(func() {
|
||||
RegisterProviderConfig("test", providerConfigForTestParser)
|
||||
RegisterProviderConfig("test", providerConfigForTestParser)
|
||||
}, "Expected panic when registering duplicate provider config parser")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ProviderConfigSuite) TestReadConfigValid() {
|
||||
RegisterProviderConfig("test", providerConfigForTestParser)
|
||||
validConfigPath := s.writeConfig(`
|
||||
cluster_provider_strategy = "test"
|
||||
[cluster_provider_configs.test]
|
||||
bool_prop = true
|
||||
str_prop = "a string"
|
||||
int_prop = 42
|
||||
`)
|
||||
|
||||
config, err := Read(validConfigPath)
|
||||
s.Run("returns no error for valid file with registered provider config", func() {
|
||||
s.Require().NoError(err, "Expected no error for valid file, got %v", err)
|
||||
})
|
||||
s.Run("returns config for valid file with registered provider config", func() {
|
||||
s.Require().NotNil(config, "Expected non-nil config for valid file")
|
||||
})
|
||||
s.Run("parses provider config correctly", func() {
|
||||
providerConfig, ok := config.GetProviderConfig("test")
|
||||
s.Require().True(ok, "Expected to find provider config for strategy 'test'")
|
||||
s.Require().NotNil(providerConfig, "Expected non-nil provider config for strategy 'test'")
|
||||
testProviderConfig, ok := providerConfig.(*ProviderConfigForTest)
|
||||
s.Require().True(ok, "Expected provider config to be of type *ProviderConfigForTest")
|
||||
s.Equal(true, testProviderConfig.BoolProp, "Expected BoolProp to be true")
|
||||
s.Equal("a string", testProviderConfig.StrProp, "Expected StrProp to be 'a string'")
|
||||
s.Equal(42, testProviderConfig.IntProp, "Expected IntProp to be 42")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ProviderConfigSuite) TestReadConfigInvalidProviderConfig() {
|
||||
RegisterProviderConfig("test", providerConfigForTestParser)
|
||||
invalidConfigPath := s.writeConfig(`
|
||||
cluster_provider_strategy = "test"
|
||||
[cluster_provider_configs.test]
|
||||
bool_prop = true
|
||||
str_prop = "force-error"
|
||||
int_prop = 42
|
||||
`)
|
||||
|
||||
config, err := Read(invalidConfigPath)
|
||||
s.Run("returns error for invalid provider config", func() {
|
||||
s.Require().NotNil(err, "Expected error for invalid provider config, got nil")
|
||||
s.ErrorContains(err, "validation error forced by test", "Expected validation error from provider config")
|
||||
})
|
||||
s.Run("returns nil config for invalid provider config", func() {
|
||||
s.Nil(config, "Expected nil config for invalid provider config")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ProviderConfigSuite) TestReadConfigUnregisteredProviderConfig() {
|
||||
invalidConfigPath := s.writeConfig(`
|
||||
cluster_provider_strategy = "unregistered"
|
||||
[cluster_provider_configs.unregistered]
|
||||
bool_prop = true
|
||||
str_prop = "a string"
|
||||
int_prop = 42
|
||||
`)
|
||||
|
||||
config, err := Read(invalidConfigPath)
|
||||
s.Run("returns no error for unregistered provider config", func() {
|
||||
s.Require().NoError(err, "Expected no error for unregistered provider config, got %v", err)
|
||||
})
|
||||
s.Run("returns config for unregistered provider config", func() {
|
||||
s.Require().NotNil(config, "Expected non-nil config for unregistered provider config")
|
||||
})
|
||||
s.Run("does not parse unregistered provider config", func() {
|
||||
_, ok := config.GetProviderConfig("unregistered")
|
||||
s.Require().False(ok, "Expected no provider config for unregistered strategy")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ProviderConfigSuite) TestReadConfigParserError() {
|
||||
RegisterProviderConfig("test", func(primitive toml.Primitive, md toml.MetaData) (ProviderConfig, error) {
|
||||
return nil, errors.New("parser error forced by test")
|
||||
})
|
||||
invalidConfigPath := s.writeConfig(`
|
||||
cluster_provider_strategy = "test"
|
||||
[cluster_provider_configs.test]
|
||||
bool_prop = true
|
||||
str_prop = "a string"
|
||||
int_prop = 42
|
||||
`)
|
||||
|
||||
config, err := Read(invalidConfigPath)
|
||||
s.Run("returns error for provider config parser error", func() {
|
||||
s.Require().NotNil(err, "Expected error for provider config parser error, got nil")
|
||||
s.ErrorContains(err, "parser error forced by test", "Expected parser error from provider config")
|
||||
})
|
||||
s.Run("returns nil config for provider config parser error", func() {
|
||||
s.Nil(config, "Expected nil config for provider config parser error")
|
||||
})
|
||||
}
|
||||
|
||||
func TestProviderConfig(t *testing.T) {
|
||||
suite.Run(t, new(ProviderConfigSuite))
|
||||
}
|
||||
@@ -1,8 +1,11 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
@@ -20,7 +23,50 @@ import (
|
||||
|
||||
type KubernetesApiTokenVerifier interface {
|
||||
// KubernetesApiVerifyToken TODO: clarify proper implementation
|
||||
KubernetesApiVerifyToken(ctx context.Context, token, audience string) (*authenticationapiv1.UserInfo, []string, error)
|
||||
KubernetesApiVerifyToken(ctx context.Context, cluster, token, audience string) (*authenticationapiv1.UserInfo, []string, error)
|
||||
// GetTargetParameterName returns the parameter name used for target identification in MCP requests
|
||||
GetTargetParameterName() string
|
||||
}
|
||||
|
||||
// extractTargetFromRequest extracts cluster parameter from MCP request body
|
||||
func extractTargetFromRequest(r *http.Request, targetName string) (string, error) {
|
||||
if r.Body == nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Read the body
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Restore the body for downstream handlers
|
||||
r.Body = io.NopCloser(bytes.NewBuffer(body))
|
||||
|
||||
// Parse the MCP request
|
||||
var mcpRequest struct {
|
||||
Params struct {
|
||||
Arguments map[string]interface{} `json:"arguments"`
|
||||
} `json:"params"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &mcpRequest); err != nil {
|
||||
// If we can't parse the request, just return empty cluster (will use default)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Extract target parameter
|
||||
if cluster, ok := mcpRequest.Params.Arguments[targetName].(string); ok {
|
||||
return cluster, nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// write401 sends a 401/Unauthorized response with WWW-Authenticate header.
|
||||
func write401(w http.ResponseWriter, wwwAuthenticateHeader, errorType, message string) {
|
||||
w.Header().Set("WWW-Authenticate", wwwAuthenticateHeader+fmt.Sprintf(`, error="%s"`, errorType))
|
||||
http.Error(w, message, http.StatusUnauthorized)
|
||||
}
|
||||
|
||||
// AuthorizationMiddleware validates the OAuth flow for protected resources.
|
||||
@@ -62,7 +108,7 @@ type KubernetesApiTokenVerifier interface {
|
||||
// - If ValidateToken is set, the exchanged token is then used against the Kubernetes API Server for TokenReview.
|
||||
//
|
||||
// see TestAuthorizationOidcTokenExchange
|
||||
func AuthorizationMiddleware(staticConfig *config.StaticConfig, oidcProvider *oidc.Provider, verifier KubernetesApiTokenVerifier) func(http.Handler) http.Handler {
|
||||
func AuthorizationMiddleware(staticConfig *config.StaticConfig, oidcProvider *oidc.Provider, verifier KubernetesApiTokenVerifier, httpClient *http.Client) func(http.Handler) http.Handler {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == healthEndpoint || slices.Contains(WellKnownEndpoints, r.URL.EscapedPath()) {
|
||||
@@ -82,9 +128,7 @@ func AuthorizationMiddleware(staticConfig *config.StaticConfig, oidcProvider *oi
|
||||
authHeader := r.Header.Get("Authorization")
|
||||
if authHeader == "" || !strings.HasPrefix(authHeader, "Bearer ") {
|
||||
klog.V(1).Infof("Authentication failed - missing or invalid bearer token: %s %s from %s", r.Method, r.URL.Path, r.RemoteAddr)
|
||||
|
||||
w.Header().Set("WWW-Authenticate", wwwAuthenticateHeader+", error=\"missing_token\"")
|
||||
http.Error(w, "Unauthorized: Bearer token required", http.StatusUnauthorized)
|
||||
write401(w, wwwAuthenticateHeader, "missing_token", "Unauthorized: Bearer token required")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -115,7 +159,11 @@ func AuthorizationMiddleware(staticConfig *config.StaticConfig, oidcProvider *oi
|
||||
if err == nil && sts.IsEnabled() {
|
||||
var exchangedToken *oauth2.Token
|
||||
// If the token is valid, we can exchange it for a new token with the specified audience and scopes.
|
||||
exchangedToken, err = sts.ExternalAccountTokenExchange(r.Context(), &oauth2.Token{
|
||||
ctx := r.Context()
|
||||
if httpClient != nil {
|
||||
ctx = context.WithValue(ctx, oauth2.HTTPClient, httpClient)
|
||||
}
|
||||
exchangedToken, err = sts.ExternalAccountTokenExchange(ctx, &oauth2.Token{
|
||||
AccessToken: claims.Token,
|
||||
TokenType: "Bearer",
|
||||
})
|
||||
@@ -128,13 +176,16 @@ func AuthorizationMiddleware(staticConfig *config.StaticConfig, oidcProvider *oi
|
||||
}
|
||||
// Kubernetes API Server TokenReview validation
|
||||
if err == nil && staticConfig.ValidateToken {
|
||||
err = claims.ValidateWithKubernetesApi(r.Context(), staticConfig.OAuthAudience, verifier)
|
||||
targetParameterName := verifier.GetTargetParameterName()
|
||||
cluster, clusterErr := extractTargetFromRequest(r, targetParameterName)
|
||||
if clusterErr != nil {
|
||||
klog.V(2).Infof("Failed to extract cluster from request, using default: %v", clusterErr)
|
||||
}
|
||||
err = claims.ValidateWithKubernetesApi(r.Context(), staticConfig.OAuthAudience, cluster, verifier)
|
||||
}
|
||||
if err != nil {
|
||||
klog.V(1).Infof("Authentication failed - JWT validation error: %s %s from %s, error: %v", r.Method, r.URL.Path, r.RemoteAddr, err)
|
||||
|
||||
w.Header().Set("WWW-Authenticate", wwwAuthenticateHeader+", error=\"invalid_token\"")
|
||||
http.Error(w, "Unauthorized: Invalid token", http.StatusUnauthorized)
|
||||
write401(w, wwwAuthenticateHeader, "invalid_token", "Unauthorized: Invalid token")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -198,9 +249,9 @@ func (c *JWTClaims) ValidateWithProvider(ctx context.Context, audience string, p
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *JWTClaims) ValidateWithKubernetesApi(ctx context.Context, audience string, verifier KubernetesApiTokenVerifier) error {
|
||||
func (c *JWTClaims) ValidateWithKubernetesApi(ctx context.Context, audience, cluster string, verifier KubernetesApiTokenVerifier) error {
|
||||
if verifier != nil {
|
||||
_, _, err := verifier.KubernetesApiVerifyToken(ctx, c.Token, audience)
|
||||
_, _, err := verifier.KubernetesApiVerifyToken(ctx, cluster, c.Token, audience)
|
||||
if err != nil {
|
||||
return fmt.Errorf("kubernetes API token validation error: %v", err)
|
||||
}
|
||||
|
||||
@@ -24,11 +24,11 @@ const (
|
||||
sseMessageEndpoint = "/message"
|
||||
)
|
||||
|
||||
func Serve(ctx context.Context, mcpServer *mcp.Server, staticConfig *config.StaticConfig, oidcProvider *oidc.Provider) error {
|
||||
func Serve(ctx context.Context, mcpServer *mcp.Server, staticConfig *config.StaticConfig, oidcProvider *oidc.Provider, httpClient *http.Client) error {
|
||||
mux := http.NewServeMux()
|
||||
|
||||
wrappedMux := RequestMiddleware(
|
||||
AuthorizationMiddleware(staticConfig, oidcProvider, mcpServer)(mux),
|
||||
AuthorizationMiddleware(staticConfig, oidcProvider, mcpServer, httpClient)(mux),
|
||||
)
|
||||
|
||||
httpServer := &http.Server{
|
||||
@@ -44,7 +44,7 @@ func Serve(ctx context.Context, mcpServer *mcp.Server, staticConfig *config.Stat
|
||||
mux.HandleFunc(healthEndpoint, func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
})
|
||||
mux.Handle("/.well-known/", WellKnownHandler(staticConfig))
|
||||
mux.Handle("/.well-known/", WellKnownHandler(staticConfig, httpClient))
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
@@ -89,7 +89,7 @@ func (c *httpContext) beforeEach(t *testing.T) {
|
||||
timeoutCtx, c.timeoutCancel = context.WithTimeout(t.Context(), 10*time.Second)
|
||||
group, gc := errgroup.WithContext(timeoutCtx)
|
||||
cancelCtx, c.StopServer = context.WithCancel(gc)
|
||||
group.Go(func() error { return Serve(cancelCtx, mcpServer, c.StaticConfig, c.OidcProvider) })
|
||||
group.Go(func() error { return Serve(cancelCtx, mcpServer, c.StaticConfig, c.OidcProvider, nil) })
|
||||
c.WaitForShutdown = group.Wait
|
||||
// Wait for HTTP server to start (using net)
|
||||
for i := 0; i < 10; i++ {
|
||||
@@ -292,7 +292,7 @@ func TestHealthCheck(t *testing.T) {
|
||||
})
|
||||
})
|
||||
// Health exposed even when require Authorization
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true}}, func(ctx *httpContext) {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) {
|
||||
resp, err := http.Get(fmt.Sprintf("http://%s/healthz", ctx.HttpAddress))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get health check endpoint with OAuth: %v", err)
|
||||
@@ -313,7 +313,7 @@ func TestWellKnownReverseProxy(t *testing.T) {
|
||||
".well-known/openid-configuration",
|
||||
}
|
||||
// With No Authorization URL configured
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true}}, func(ctx *httpContext) {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) {
|
||||
for _, path := range cases {
|
||||
resp, err := http.Get(fmt.Sprintf("http://%s/%s", ctx.HttpAddress, path))
|
||||
t.Cleanup(func() { _ = resp.Body.Close() })
|
||||
@@ -333,7 +333,12 @@ func TestWellKnownReverseProxy(t *testing.T) {
|
||||
_, _ = w.Write([]byte(`NOT A JSON PAYLOAD`))
|
||||
}))
|
||||
t.Cleanup(invalidPayloadServer.Close)
|
||||
invalidPayloadConfig := &config.StaticConfig{AuthorizationURL: invalidPayloadServer.URL, RequireOAuth: true, ValidateToken: true}
|
||||
invalidPayloadConfig := &config.StaticConfig{
|
||||
AuthorizationURL: invalidPayloadServer.URL,
|
||||
RequireOAuth: true,
|
||||
ValidateToken: true,
|
||||
ClusterProviderStrategy: config.ClusterProviderKubeConfig,
|
||||
}
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: invalidPayloadConfig}, func(ctx *httpContext) {
|
||||
for _, path := range cases {
|
||||
resp, err := http.Get(fmt.Sprintf("http://%s/%s", ctx.HttpAddress, path))
|
||||
@@ -358,7 +363,12 @@ func TestWellKnownReverseProxy(t *testing.T) {
|
||||
_, _ = w.Write([]byte(`{"issuer": "https://example.com","scopes_supported":["mcp-server"]}`))
|
||||
}))
|
||||
t.Cleanup(testServer.Close)
|
||||
staticConfig := &config.StaticConfig{AuthorizationURL: testServer.URL, RequireOAuth: true, ValidateToken: true}
|
||||
staticConfig := &config.StaticConfig{
|
||||
AuthorizationURL: testServer.URL,
|
||||
RequireOAuth: true,
|
||||
ValidateToken: true,
|
||||
ClusterProviderStrategy: config.ClusterProviderKubeConfig,
|
||||
}
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: staticConfig}, func(ctx *httpContext) {
|
||||
for _, path := range cases {
|
||||
resp, err := http.Get(fmt.Sprintf("http://%s/%s", ctx.HttpAddress, path))
|
||||
@@ -401,7 +411,12 @@ func TestWellKnownOverrides(t *testing.T) {
|
||||
}`))
|
||||
}))
|
||||
t.Cleanup(testServer.Close)
|
||||
baseConfig := config.StaticConfig{AuthorizationURL: testServer.URL, RequireOAuth: true, ValidateToken: true}
|
||||
baseConfig := config.StaticConfig{
|
||||
AuthorizationURL: testServer.URL,
|
||||
RequireOAuth: true,
|
||||
ValidateToken: true,
|
||||
ClusterProviderStrategy: config.ClusterProviderKubeConfig,
|
||||
}
|
||||
// With Dynamic Client Registration disabled
|
||||
disableDynamicRegistrationConfig := baseConfig
|
||||
disableDynamicRegistrationConfig.DisableDynamicClientRegistration = true
|
||||
@@ -488,7 +503,7 @@ func TestMiddlewareLogging(t *testing.T) {
|
||||
|
||||
func TestAuthorizationUnauthorized(t *testing.T) {
|
||||
// Missing Authorization header
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true}}, func(ctx *httpContext) {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) {
|
||||
resp, err := http.Get(fmt.Sprintf("http://%s/mcp", ctx.HttpAddress))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get protected endpoint: %v", err)
|
||||
@@ -513,7 +528,7 @@ func TestAuthorizationUnauthorized(t *testing.T) {
|
||||
})
|
||||
})
|
||||
// Authorization header without Bearer prefix
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true}}, func(ctx *httpContext) {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) {
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create request: %v", err)
|
||||
@@ -538,7 +553,7 @@ func TestAuthorizationUnauthorized(t *testing.T) {
|
||||
})
|
||||
})
|
||||
// Invalid Authorization header
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true}}, func(ctx *httpContext) {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) {
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create request: %v", err)
|
||||
@@ -569,7 +584,7 @@ func TestAuthorizationUnauthorized(t *testing.T) {
|
||||
})
|
||||
})
|
||||
// Expired Authorization Bearer token
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true}}, func(ctx *httpContext) {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) {
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create request: %v", err)
|
||||
@@ -600,7 +615,7 @@ func TestAuthorizationUnauthorized(t *testing.T) {
|
||||
})
|
||||
})
|
||||
// Invalid audience claim Bearer token
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "expected-audience", ValidateToken: true}}, func(ctx *httpContext) {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "expected-audience", ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) {
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create request: %v", err)
|
||||
@@ -633,7 +648,7 @@ func TestAuthorizationUnauthorized(t *testing.T) {
|
||||
// Failed OIDC validation
|
||||
oidcTestServer := NewOidcTestServer(t)
|
||||
t.Cleanup(oidcTestServer.Close)
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "mcp-server", ValidateToken: true}, OidcProvider: oidcTestServer.Provider}, func(ctx *httpContext) {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "mcp-server", ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}, OidcProvider: oidcTestServer.Provider}, func(ctx *httpContext) {
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create request: %v", err)
|
||||
@@ -670,7 +685,7 @@ func TestAuthorizationUnauthorized(t *testing.T) {
|
||||
"aud": "mcp-server"
|
||||
}`
|
||||
validOidcToken := oidctest.SignIDToken(oidcTestServer.PrivateKey, "test-oidc-key-id", oidc.RS256, rawClaims)
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "mcp-server", ValidateToken: true}, OidcProvider: oidcTestServer.Provider}, func(ctx *httpContext) {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "mcp-server", ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}, OidcProvider: oidcTestServer.Provider}, func(ctx *httpContext) {
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create request: %v", err)
|
||||
@@ -703,7 +718,7 @@ func TestAuthorizationUnauthorized(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAuthorizationRequireOAuthFalse(t *testing.T) {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: false}}, func(ctx *httpContext) {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: false, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) {
|
||||
resp, err := http.Get(fmt.Sprintf("http://%s/mcp", ctx.HttpAddress))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get protected endpoint: %v", err)
|
||||
@@ -728,7 +743,7 @@ func TestAuthorizationRawToken(t *testing.T) {
|
||||
{"mcp-server", true}, // Audience set, validation enabled
|
||||
}
|
||||
for _, c := range cases {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: c.audience, ValidateToken: c.validateToken}}, func(ctx *httpContext) {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: c.audience, ValidateToken: c.validateToken, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) {
|
||||
tokenReviewed := false
|
||||
ctx.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
if req.URL.EscapedPath() == "/apis/authentication.k8s.io/v1/tokenreviews" {
|
||||
@@ -777,7 +792,7 @@ func TestAuthorizationOidcToken(t *testing.T) {
|
||||
validOidcToken := oidctest.SignIDToken(oidcTestServer.PrivateKey, "test-oidc-key-id", oidc.RS256, rawClaims)
|
||||
cases := []bool{false, true}
|
||||
for _, validateToken := range cases {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "mcp-server", ValidateToken: validateToken}, OidcProvider: oidcTestServer.Provider}, func(ctx *httpContext) {
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "mcp-server", ValidateToken: validateToken, ClusterProviderStrategy: config.ClusterProviderKubeConfig}, OidcProvider: oidcTestServer.Provider}, func(ctx *httpContext) {
|
||||
tokenReviewed := false
|
||||
ctx.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
if req.URL.EscapedPath() == "/apis/authentication.k8s.io/v1/tokenreviews" {
|
||||
@@ -833,13 +848,14 @@ func TestAuthorizationOidcTokenExchange(t *testing.T) {
|
||||
cases := []bool{false, true}
|
||||
for _, validateToken := range cases {
|
||||
staticConfig := &config.StaticConfig{
|
||||
RequireOAuth: true,
|
||||
OAuthAudience: "mcp-server",
|
||||
ValidateToken: validateToken,
|
||||
StsClientId: "test-sts-client-id",
|
||||
StsClientSecret: "test-sts-client-secret",
|
||||
StsAudience: "backend-audience",
|
||||
StsScopes: []string{"backend-scope"},
|
||||
RequireOAuth: true,
|
||||
OAuthAudience: "mcp-server",
|
||||
ValidateToken: validateToken,
|
||||
StsClientId: "test-sts-client-id",
|
||||
StsClientSecret: "test-sts-client-secret",
|
||||
StsAudience: "backend-audience",
|
||||
StsScopes: []string{"backend-scope"},
|
||||
ClusterProviderStrategy: config.ClusterProviderKubeConfig,
|
||||
}
|
||||
testCaseWithContext(t, &httpContext{StaticConfig: staticConfig, OidcProvider: oidcTestServer.Provider}, func(ctx *httpContext) {
|
||||
tokenReviewed := false
|
||||
|
||||
@@ -25,19 +25,24 @@ type WellKnown struct {
|
||||
authorizationUrl string
|
||||
scopesSupported []string
|
||||
disableDynamicClientRegistration bool
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
var _ http.Handler = &WellKnown{}
|
||||
|
||||
func WellKnownHandler(staticConfig *config.StaticConfig) http.Handler {
|
||||
func WellKnownHandler(staticConfig *config.StaticConfig, httpClient *http.Client) http.Handler {
|
||||
authorizationUrl := staticConfig.AuthorizationURL
|
||||
if authorizationUrl != "" && strings.HasSuffix("authorizationUrl", "/") {
|
||||
authorizationUrl = strings.TrimSuffix(authorizationUrl, "/")
|
||||
}
|
||||
if httpClient == nil {
|
||||
httpClient = http.DefaultClient
|
||||
}
|
||||
return &WellKnown{
|
||||
authorizationUrl: authorizationUrl,
|
||||
disableDynamicClientRegistration: staticConfig.DisableDynamicClientRegistration,
|
||||
scopesSupported: staticConfig.OAuthScopes,
|
||||
httpClient: httpClient,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,7 +56,7 @@ func (w WellKnown) ServeHTTP(writer http.ResponseWriter, request *http.Request)
|
||||
http.Error(writer, "Failed to create request: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req.WithContext(request.Context()))
|
||||
resp, err := w.httpClient.Do(req.WithContext(request.Context()))
|
||||
if err != nil {
|
||||
http.Error(writer, "Failed to perform request: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
|
||||
@@ -47,9 +47,34 @@ kubernetes-mcp-server --port 8080
|
||||
|
||||
# start a SSE server on port 8443 with a public HTTPS host of example.com
|
||||
kubernetes-mcp-server --port 8443 --sse-base-url https://example.com:8443
|
||||
|
||||
# start a SSE server on port 8080 with multi-cluster tools disabled
|
||||
kubernetes-mcp-server --port 8080 --disable-multi-cluster
|
||||
`))
|
||||
)
|
||||
|
||||
const (
|
||||
flagVersion = "version"
|
||||
flagLogLevel = "log-level"
|
||||
flagConfig = "config"
|
||||
flagSSEPort = "sse-port"
|
||||
flagHttpPort = "http-port"
|
||||
flagPort = "port"
|
||||
flagSSEBaseUrl = "sse-base-url"
|
||||
flagKubeconfig = "kubeconfig"
|
||||
flagToolsets = "toolsets"
|
||||
flagListOutput = "list-output"
|
||||
flagReadOnly = "read-only"
|
||||
flagDisableDestructive = "disable-destructive"
|
||||
flagRequireOAuth = "require-oauth"
|
||||
flagOAuthAudience = "oauth-audience"
|
||||
flagValidateToken = "validate-token"
|
||||
flagAuthorizationURL = "authorization-url"
|
||||
flagServerUrl = "server-url"
|
||||
flagCertificateAuthority = "certificate-authority"
|
||||
flagDisableMultiCluster = "disable-multi-cluster"
|
||||
)
|
||||
|
||||
type MCPServerOptions struct {
|
||||
Version bool
|
||||
LogLevel int
|
||||
@@ -68,6 +93,7 @@ type MCPServerOptions struct {
|
||||
AuthorizationURL string
|
||||
CertificateAuthority string
|
||||
ServerURL string
|
||||
DisableMultiCluster bool
|
||||
|
||||
ConfigPath string
|
||||
StaticConfig *config.StaticConfig
|
||||
@@ -104,32 +130,33 @@ func NewMCPServer(streams genericiooptions.IOStreams) *cobra.Command {
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVar(&o.Version, "version", o.Version, "Print version information and quit")
|
||||
cmd.Flags().IntVar(&o.LogLevel, "log-level", o.LogLevel, "Set the log level (from 0 to 9)")
|
||||
cmd.Flags().StringVar(&o.ConfigPath, "config", o.ConfigPath, "Path of the config file.")
|
||||
cmd.Flags().IntVar(&o.SSEPort, "sse-port", o.SSEPort, "Start a SSE server on the specified port")
|
||||
cmd.Flag("sse-port").Deprecated = "Use --port instead"
|
||||
cmd.Flags().IntVar(&o.HttpPort, "http-port", o.HttpPort, "Start a streamable HTTP server on the specified port")
|
||||
cmd.Flag("http-port").Deprecated = "Use --port instead"
|
||||
cmd.Flags().StringVar(&o.Port, "port", o.Port, "Start a streamable HTTP and SSE HTTP server on the specified port (e.g. 8080)")
|
||||
cmd.Flags().StringVar(&o.SSEBaseUrl, "sse-base-url", o.SSEBaseUrl, "SSE public base URL to use when sending the endpoint message (e.g. https://example.com)")
|
||||
cmd.Flags().StringVar(&o.Kubeconfig, "kubeconfig", o.Kubeconfig, "Path to the kubeconfig file to use for authentication")
|
||||
cmd.Flags().StringSliceVar(&o.Toolsets, "toolsets", o.Toolsets, "Comma-separated list of MCP toolsets to use (available toolsets: "+strings.Join(toolsets.ToolsetNames(), ", ")+"). Defaults to "+strings.Join(o.StaticConfig.Toolsets, ", ")+".")
|
||||
cmd.Flags().StringVar(&o.ListOutput, "list-output", o.ListOutput, "Output format for resource list operations (one of: "+strings.Join(output.Names, ", ")+"). Defaults to "+o.StaticConfig.ListOutput+".")
|
||||
cmd.Flags().BoolVar(&o.ReadOnly, "read-only", o.ReadOnly, "If true, only tools annotated with readOnlyHint=true are exposed")
|
||||
cmd.Flags().BoolVar(&o.DisableDestructive, "disable-destructive", o.DisableDestructive, "If true, tools annotated with destructiveHint=true are disabled")
|
||||
cmd.Flags().BoolVar(&o.RequireOAuth, "require-oauth", o.RequireOAuth, "If true, requires OAuth authorization as defined in the Model Context Protocol (MCP) specification. This flag is ignored if transport type is stdio")
|
||||
_ = cmd.Flags().MarkHidden("require-oauth")
|
||||
cmd.Flags().StringVar(&o.OAuthAudience, "oauth-audience", o.OAuthAudience, "OAuth audience for token claims validation. Optional. If not set, the audience is not validated. Only valid if require-oauth is enabled.")
|
||||
_ = cmd.Flags().MarkHidden("oauth-audience")
|
||||
cmd.Flags().BoolVar(&o.ValidateToken, "validate-token", o.ValidateToken, "If true, validates the token against the Kubernetes API Server using TokenReview. Optional. If not set, the token is not validated. Only valid if require-oauth is enabled.")
|
||||
_ = cmd.Flags().MarkHidden("validate-token")
|
||||
cmd.Flags().StringVar(&o.AuthorizationURL, "authorization-url", o.AuthorizationURL, "OAuth authorization server URL for protected resource endpoint. If not provided, the Kubernetes API server host will be used. Only valid if require-oauth is enabled.")
|
||||
_ = cmd.Flags().MarkHidden("authorization-url")
|
||||
cmd.Flags().StringVar(&o.ServerURL, "server-url", o.ServerURL, "Server URL of this application. Optional. If set, this url will be served in protected resource metadata endpoint and tokens will be validated with this audience. If not set, expected audience is kubernetes-mcp-server. Only valid if require-oauth is enabled.")
|
||||
_ = cmd.Flags().MarkHidden("server-url")
|
||||
cmd.Flags().StringVar(&o.CertificateAuthority, "certificate-authority", o.CertificateAuthority, "Certificate authority path to verify certificates. Optional. Only valid if require-oauth is enabled.")
|
||||
_ = cmd.Flags().MarkHidden("certificate-authority")
|
||||
cmd.Flags().BoolVar(&o.Version, flagVersion, o.Version, "Print version information and quit")
|
||||
cmd.Flags().IntVar(&o.LogLevel, flagLogLevel, o.LogLevel, "Set the log level (from 0 to 9)")
|
||||
cmd.Flags().StringVar(&o.ConfigPath, flagConfig, o.ConfigPath, "Path of the config file.")
|
||||
cmd.Flags().IntVar(&o.SSEPort, flagSSEPort, o.SSEPort, "Start a SSE server on the specified port")
|
||||
cmd.Flag(flagSSEPort).Deprecated = "Use --port instead"
|
||||
cmd.Flags().IntVar(&o.HttpPort, flagHttpPort, o.HttpPort, "Start a streamable HTTP server on the specified port")
|
||||
cmd.Flag(flagHttpPort).Deprecated = "Use --port instead"
|
||||
cmd.Flags().StringVar(&o.Port, flagPort, o.Port, "Start a streamable HTTP and SSE HTTP server on the specified port (e.g. 8080)")
|
||||
cmd.Flags().StringVar(&o.SSEBaseUrl, flagSSEBaseUrl, o.SSEBaseUrl, "SSE public base URL to use when sending the endpoint message (e.g. https://example.com)")
|
||||
cmd.Flags().StringVar(&o.Kubeconfig, flagKubeconfig, o.Kubeconfig, "Path to the kubeconfig file to use for authentication")
|
||||
cmd.Flags().StringSliceVar(&o.Toolsets, flagToolsets, o.Toolsets, "Comma-separated list of MCP toolsets to use (available toolsets: "+strings.Join(toolsets.ToolsetNames(), ", ")+"). Defaults to "+strings.Join(o.StaticConfig.Toolsets, ", ")+".")
|
||||
cmd.Flags().StringVar(&o.ListOutput, flagListOutput, o.ListOutput, "Output format for resource list operations (one of: "+strings.Join(output.Names, ", ")+"). Defaults to "+o.StaticConfig.ListOutput+".")
|
||||
cmd.Flags().BoolVar(&o.ReadOnly, flagReadOnly, o.ReadOnly, "If true, only tools annotated with readOnlyHint=true are exposed")
|
||||
cmd.Flags().BoolVar(&o.DisableDestructive, flagDisableDestructive, o.DisableDestructive, "If true, tools annotated with destructiveHint=true are disabled")
|
||||
cmd.Flags().BoolVar(&o.RequireOAuth, flagRequireOAuth, o.RequireOAuth, "If true, requires OAuth authorization as defined in the Model Context Protocol (MCP) specification. This flag is ignored if transport type is stdio")
|
||||
_ = cmd.Flags().MarkHidden(flagRequireOAuth)
|
||||
cmd.Flags().StringVar(&o.OAuthAudience, flagOAuthAudience, o.OAuthAudience, "OAuth audience for token claims validation. Optional. If not set, the audience is not validated. Only valid if require-oauth is enabled.")
|
||||
_ = cmd.Flags().MarkHidden(flagOAuthAudience)
|
||||
cmd.Flags().BoolVar(&o.ValidateToken, flagValidateToken, o.ValidateToken, "If true, validates the token against the Kubernetes API Server using TokenReview. Optional. If not set, the token is not validated. Only valid if require-oauth is enabled.")
|
||||
_ = cmd.Flags().MarkHidden(flagValidateToken)
|
||||
cmd.Flags().StringVar(&o.AuthorizationURL, flagAuthorizationURL, o.AuthorizationURL, "OAuth authorization server URL for protected resource endpoint. If not provided, the Kubernetes API server host will be used. Only valid if require-oauth is enabled.")
|
||||
_ = cmd.Flags().MarkHidden(flagAuthorizationURL)
|
||||
cmd.Flags().StringVar(&o.ServerURL, flagServerUrl, o.ServerURL, "Server URL of this application. Optional. If set, this url will be served in protected resource metadata endpoint and tokens will be validated with this audience. If not set, expected audience is kubernetes-mcp-server. Only valid if require-oauth is enabled.")
|
||||
_ = cmd.Flags().MarkHidden(flagServerUrl)
|
||||
cmd.Flags().StringVar(&o.CertificateAuthority, flagCertificateAuthority, o.CertificateAuthority, "Certificate authority path to verify certificates. Optional. Only valid if require-oauth is enabled.")
|
||||
_ = cmd.Flags().MarkHidden(flagCertificateAuthority)
|
||||
cmd.Flags().BoolVar(&o.DisableMultiCluster, flagDisableMultiCluster, o.DisableMultiCluster, "Disable multi cluster tools. Optional. If true, all tools will be run against the default cluster/context.")
|
||||
|
||||
return cmd
|
||||
}
|
||||
@@ -156,52 +183,55 @@ func (m *MCPServerOptions) Complete(cmd *cobra.Command) error {
|
||||
}
|
||||
|
||||
func (m *MCPServerOptions) loadFlags(cmd *cobra.Command) {
|
||||
if cmd.Flag("log-level").Changed {
|
||||
if cmd.Flag(flagLogLevel).Changed {
|
||||
m.StaticConfig.LogLevel = m.LogLevel
|
||||
}
|
||||
if cmd.Flag("port").Changed {
|
||||
if cmd.Flag(flagPort).Changed {
|
||||
m.StaticConfig.Port = m.Port
|
||||
} else if cmd.Flag("sse-port").Changed {
|
||||
} else if cmd.Flag(flagSSEPort).Changed {
|
||||
m.StaticConfig.Port = strconv.Itoa(m.SSEPort)
|
||||
} else if cmd.Flag("http-port").Changed {
|
||||
} else if cmd.Flag(flagHttpPort).Changed {
|
||||
m.StaticConfig.Port = strconv.Itoa(m.HttpPort)
|
||||
}
|
||||
if cmd.Flag("sse-base-url").Changed {
|
||||
if cmd.Flag(flagSSEBaseUrl).Changed {
|
||||
m.StaticConfig.SSEBaseURL = m.SSEBaseUrl
|
||||
}
|
||||
if cmd.Flag("kubeconfig").Changed {
|
||||
if cmd.Flag(flagKubeconfig).Changed {
|
||||
m.StaticConfig.KubeConfig = m.Kubeconfig
|
||||
}
|
||||
if cmd.Flag("list-output").Changed {
|
||||
if cmd.Flag(flagListOutput).Changed {
|
||||
m.StaticConfig.ListOutput = m.ListOutput
|
||||
}
|
||||
if cmd.Flag("read-only").Changed {
|
||||
if cmd.Flag(flagReadOnly).Changed {
|
||||
m.StaticConfig.ReadOnly = m.ReadOnly
|
||||
}
|
||||
if cmd.Flag("disable-destructive").Changed {
|
||||
if cmd.Flag(flagDisableDestructive).Changed {
|
||||
m.StaticConfig.DisableDestructive = m.DisableDestructive
|
||||
}
|
||||
if cmd.Flag("toolsets").Changed {
|
||||
if cmd.Flag(flagToolsets).Changed {
|
||||
m.StaticConfig.Toolsets = m.Toolsets
|
||||
}
|
||||
if cmd.Flag("require-oauth").Changed {
|
||||
if cmd.Flag(flagRequireOAuth).Changed {
|
||||
m.StaticConfig.RequireOAuth = m.RequireOAuth
|
||||
}
|
||||
if cmd.Flag("oauth-audience").Changed {
|
||||
if cmd.Flag(flagOAuthAudience).Changed {
|
||||
m.StaticConfig.OAuthAudience = m.OAuthAudience
|
||||
}
|
||||
if cmd.Flag("validate-token").Changed {
|
||||
if cmd.Flag(flagValidateToken).Changed {
|
||||
m.StaticConfig.ValidateToken = m.ValidateToken
|
||||
}
|
||||
if cmd.Flag("authorization-url").Changed {
|
||||
if cmd.Flag(flagAuthorizationURL).Changed {
|
||||
m.StaticConfig.AuthorizationURL = m.AuthorizationURL
|
||||
}
|
||||
if cmd.Flag("server-url").Changed {
|
||||
if cmd.Flag(flagServerUrl).Changed {
|
||||
m.StaticConfig.ServerURL = m.ServerURL
|
||||
}
|
||||
if cmd.Flag("certificate-authority").Changed {
|
||||
if cmd.Flag(flagCertificateAuthority).Changed {
|
||||
m.StaticConfig.CertificateAuthority = m.CertificateAuthority
|
||||
}
|
||||
if cmd.Flag(flagDisableMultiCluster).Changed && m.DisableMultiCluster {
|
||||
m.StaticConfig.ClusterProviderStrategy = config.ClusterProviderDisabled
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MCPServerOptions) initializeLogging() {
|
||||
@@ -258,16 +288,24 @@ func (m *MCPServerOptions) Run() error {
|
||||
klog.V(1).Infof(" - Read-only mode: %t", m.StaticConfig.ReadOnly)
|
||||
klog.V(1).Infof(" - Disable destructive tools: %t", m.StaticConfig.DisableDestructive)
|
||||
|
||||
strategy := m.StaticConfig.ClusterProviderStrategy
|
||||
if strategy == "" {
|
||||
strategy = "auto-detect (it is recommended to set this explicitly in your Config)"
|
||||
}
|
||||
|
||||
klog.V(1).Infof(" - ClusterProviderStrategy: %s", strategy)
|
||||
|
||||
if m.Version {
|
||||
_, _ = fmt.Fprintf(m.Out, "%s\n", version.Version)
|
||||
return nil
|
||||
}
|
||||
|
||||
var oidcProvider *oidc.Provider
|
||||
var httpClient *http.Client
|
||||
if m.StaticConfig.AuthorizationURL != "" {
|
||||
ctx := context.Background()
|
||||
if m.StaticConfig.CertificateAuthority != "" {
|
||||
httpClient := &http.Client{}
|
||||
httpClient = &http.Client{}
|
||||
caCert, err := os.ReadFile(m.StaticConfig.CertificateAuthority)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read CA certificate from %s: %w", m.StaticConfig.CertificateAuthority, err)
|
||||
@@ -304,7 +342,7 @@ func (m *MCPServerOptions) Run() error {
|
||||
|
||||
if m.StaticConfig.Port != "" {
|
||||
ctx := context.Background()
|
||||
return internalhttp.Serve(ctx, mcpServer, m.StaticConfig, oidcProvider)
|
||||
return internalhttp.Serve(ctx, mcpServer, m.StaticConfig, oidcProvider, httpClient)
|
||||
}
|
||||
|
||||
if err := mcpServer.ServeStdio(); err != nil && !errors.Is(err, context.Canceled) {
|
||||
|
||||
@@ -276,3 +276,24 @@ func TestStdioLogging(t *testing.T) {
|
||||
assert.Containsf(t, out.String(), "Starting kubernetes-mcp-server", "Expected klog output, got %s", out.String())
|
||||
})
|
||||
}
|
||||
|
||||
func TestDisableMultiCluster(t *testing.T) {
|
||||
t.Run("defaults to false", func(t *testing.T) {
|
||||
ioStreams, out := testStream()
|
||||
rootCmd := NewMCPServer(ioStreams)
|
||||
rootCmd.SetArgs([]string{"--version", "--port=1337", "--log-level=1"})
|
||||
if err := rootCmd.Execute(); !strings.Contains(out.String(), " - ClusterProviderStrategy: auto-detect (it is recommended to set this explicitly in your Config)") {
|
||||
t.Fatalf("Expected ClusterProviderStrategy kubeconfig, got %s %v", out, err)
|
||||
}
|
||||
})
|
||||
t.Run("set with --disable-multi-cluster", func(t *testing.T) {
|
||||
ioStreams, out := testStream()
|
||||
rootCmd := NewMCPServer(ioStreams)
|
||||
rootCmd.SetArgs([]string{"--version", "--port=1337", "--log-level=1", "--disable-multi-cluster"})
|
||||
_ = rootCmd.Execute()
|
||||
expected := `(?m)\" - ClusterProviderStrategy\: disabled\"`
|
||||
if m, err := regexp.MatchString(expected, out.String()); !m || err != nil {
|
||||
t.Fatalf("Expected ClusterProviderStrategy %s, got %s %v", expected, out.String(), err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/config"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/tools/clientcmd/api/latest"
|
||||
)
|
||||
|
||||
const inClusterKubeConfigDefaultContext = "in-cluster"
|
||||
|
||||
// InClusterConfig is a variable that holds the function to get the in-cluster config
|
||||
// Exposed for testing
|
||||
var InClusterConfig = func() (*rest.Config, error) {
|
||||
@@ -20,85 +22,56 @@ var InClusterConfig = func() (*rest.Config, error) {
|
||||
return inClusterConfig, err
|
||||
}
|
||||
|
||||
// resolveKubernetesConfigurations resolves the required kubernetes configurations and sets them in the Kubernetes struct
|
||||
func resolveKubernetesConfigurations(kubernetes *Manager) error {
|
||||
// Always set clientCmdConfig
|
||||
pathOptions := clientcmd.NewDefaultPathOptions()
|
||||
if kubernetes.staticConfig.KubeConfig != "" {
|
||||
pathOptions.LoadingRules.ExplicitPath = kubernetes.staticConfig.KubeConfig
|
||||
}
|
||||
kubernetes.clientCmdConfig = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
|
||||
pathOptions.LoadingRules,
|
||||
&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: ""}})
|
||||
var err error
|
||||
if kubernetes.IsInCluster() {
|
||||
kubernetes.cfg, err = InClusterConfig()
|
||||
if err == nil && kubernetes.cfg != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// Out of cluster
|
||||
kubernetes.cfg, err = kubernetes.clientCmdConfig.ClientConfig()
|
||||
if kubernetes.cfg != nil && kubernetes.cfg.UserAgent == "" {
|
||||
kubernetes.cfg.UserAgent = rest.DefaultKubernetesUserAgent()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *Manager) IsInCluster() bool {
|
||||
if m.staticConfig.KubeConfig != "" {
|
||||
func IsInCluster(cfg *config.StaticConfig) bool {
|
||||
// Even if running in-cluster, if a kubeconfig is provided, we consider it as out-of-cluster
|
||||
if cfg != nil && cfg.KubeConfig != "" {
|
||||
return false
|
||||
}
|
||||
cfg, err := InClusterConfig()
|
||||
return err == nil && cfg != nil
|
||||
}
|
||||
|
||||
func (m *Manager) configuredNamespace() string {
|
||||
if ns, _, nsErr := m.clientCmdConfig.Namespace(); nsErr == nil {
|
||||
return ns
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Manager) NamespaceOrDefault(namespace string) string {
|
||||
if namespace == "" {
|
||||
return m.configuredNamespace()
|
||||
}
|
||||
return namespace
|
||||
restConfig, err := InClusterConfig()
|
||||
return err == nil && restConfig != nil
|
||||
}
|
||||
|
||||
func (k *Kubernetes) NamespaceOrDefault(namespace string) string {
|
||||
return k.manager.NamespaceOrDefault(namespace)
|
||||
}
|
||||
|
||||
// ToRESTConfig returns the rest.Config object (genericclioptions.RESTClientGetter)
|
||||
func (m *Manager) ToRESTConfig() (*rest.Config, error) {
|
||||
return m.cfg, nil
|
||||
// ConfigurationContextsDefault returns the current context name
|
||||
// TODO: Should be moved to the Provider level ?
|
||||
func (k *Kubernetes) ConfigurationContextsDefault() (string, error) {
|
||||
cfg, err := k.manager.clientCmdConfig.RawConfig()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return cfg.CurrentContext, nil
|
||||
}
|
||||
|
||||
// ToRawKubeConfigLoader returns the clientcmd.ClientConfig object (genericclioptions.RESTClientGetter)
|
||||
func (m *Manager) ToRawKubeConfigLoader() clientcmd.ClientConfig {
|
||||
return m.clientCmdConfig
|
||||
// ConfigurationContextsList returns the list of available context names
|
||||
// TODO: Should be moved to the Provider level ?
|
||||
func (k *Kubernetes) ConfigurationContextsList() (map[string]string, error) {
|
||||
cfg, err := k.manager.clientCmdConfig.RawConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
contexts := make(map[string]string, len(cfg.Contexts))
|
||||
for name, context := range cfg.Contexts {
|
||||
cluster, ok := cfg.Clusters[context.Cluster]
|
||||
if !ok || cluster.Server == "" {
|
||||
contexts[name] = "unknown"
|
||||
} else {
|
||||
contexts[name] = cluster.Server
|
||||
}
|
||||
}
|
||||
return contexts, nil
|
||||
}
|
||||
|
||||
// ConfigurationView returns the current kubeconfig content as a kubeconfig YAML
|
||||
// If minify is true, keeps only the current-context and the relevant pieces of the configuration for that context.
|
||||
// If minify is false, all contexts, clusters, auth-infos, and users are returned in the configuration.
|
||||
// TODO: Should be moved to the Provider level ?
|
||||
func (k *Kubernetes) ConfigurationView(minify bool) (runtime.Object, error) {
|
||||
var cfg clientcmdapi.Config
|
||||
var err error
|
||||
if k.manager.IsInCluster() {
|
||||
cfg = *clientcmdapi.NewConfig()
|
||||
cfg.Clusters["cluster"] = &clientcmdapi.Cluster{
|
||||
Server: k.manager.cfg.Host,
|
||||
InsecureSkipTLSVerify: k.manager.cfg.Insecure,
|
||||
}
|
||||
cfg.AuthInfos["user"] = &clientcmdapi.AuthInfo{
|
||||
Token: k.manager.cfg.BearerToken,
|
||||
}
|
||||
cfg.Contexts["context"] = &clientcmdapi.Context{
|
||||
Cluster: "cluster",
|
||||
AuthInfo: "user",
|
||||
}
|
||||
cfg.CurrentContext = "context"
|
||||
} else if cfg, err = k.manager.clientCmdConfig.RawConfig(); err != nil {
|
||||
if cfg, err = k.manager.clientCmdConfig.RawConfig(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if minify {
|
||||
|
||||
@@ -1,155 +0,0 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/config"
|
||||
)
|
||||
|
||||
func TestKubernetes_IsInCluster(t *testing.T) {
|
||||
t.Run("with explicit kubeconfig", func(t *testing.T) {
|
||||
m := Manager{
|
||||
staticConfig: &config.StaticConfig{
|
||||
KubeConfig: "kubeconfig",
|
||||
},
|
||||
}
|
||||
if m.IsInCluster() {
|
||||
t.Errorf("expected not in cluster, got in cluster")
|
||||
}
|
||||
})
|
||||
t.Run("with empty kubeconfig and in cluster", func(t *testing.T) {
|
||||
originalFunction := InClusterConfig
|
||||
InClusterConfig = func() (*rest.Config, error) {
|
||||
return &rest.Config{}, nil
|
||||
}
|
||||
defer func() {
|
||||
InClusterConfig = originalFunction
|
||||
}()
|
||||
m := Manager{
|
||||
staticConfig: &config.StaticConfig{
|
||||
KubeConfig: "",
|
||||
},
|
||||
}
|
||||
if !m.IsInCluster() {
|
||||
t.Errorf("expected in cluster, got not in cluster")
|
||||
}
|
||||
})
|
||||
t.Run("with empty kubeconfig and not in cluster (empty)", func(t *testing.T) {
|
||||
originalFunction := InClusterConfig
|
||||
InClusterConfig = func() (*rest.Config, error) {
|
||||
return nil, nil
|
||||
}
|
||||
defer func() {
|
||||
InClusterConfig = originalFunction
|
||||
}()
|
||||
m := Manager{
|
||||
staticConfig: &config.StaticConfig{
|
||||
KubeConfig: "",
|
||||
},
|
||||
}
|
||||
if m.IsInCluster() {
|
||||
t.Errorf("expected not in cluster, got in cluster")
|
||||
}
|
||||
})
|
||||
t.Run("with empty kubeconfig and not in cluster (error)", func(t *testing.T) {
|
||||
originalFunction := InClusterConfig
|
||||
InClusterConfig = func() (*rest.Config, error) {
|
||||
return nil, errors.New("error")
|
||||
}
|
||||
defer func() {
|
||||
InClusterConfig = originalFunction
|
||||
}()
|
||||
m := Manager{
|
||||
staticConfig: &config.StaticConfig{
|
||||
KubeConfig: "",
|
||||
},
|
||||
}
|
||||
if m.IsInCluster() {
|
||||
t.Errorf("expected not in cluster, got in cluster")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestKubernetes_ResolveKubernetesConfigurations_Explicit(t *testing.T) {
|
||||
t.Run("with missing file", func(t *testing.T) {
|
||||
if runtime.GOOS != "linux" && runtime.GOOS != "darwin" {
|
||||
t.Skip("Skipping test on non-linux platforms")
|
||||
}
|
||||
tempDir := t.TempDir()
|
||||
m := Manager{staticConfig: &config.StaticConfig{
|
||||
KubeConfig: path.Join(tempDir, "config"),
|
||||
}}
|
||||
err := resolveKubernetesConfigurations(&m)
|
||||
if err == nil {
|
||||
t.Errorf("expected error, got nil")
|
||||
}
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
t.Errorf("expected file not found error, got %v", err)
|
||||
}
|
||||
if !strings.HasSuffix(err.Error(), ": no such file or directory") {
|
||||
t.Errorf("expected file not found error, got %v", err)
|
||||
}
|
||||
})
|
||||
t.Run("with empty file", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
kubeconfigPath := path.Join(tempDir, "config")
|
||||
if err := os.WriteFile(kubeconfigPath, []byte(""), 0644); err != nil {
|
||||
t.Fatalf("failed to create kubeconfig file: %v", err)
|
||||
}
|
||||
m := Manager{staticConfig: &config.StaticConfig{
|
||||
KubeConfig: kubeconfigPath,
|
||||
}}
|
||||
err := resolveKubernetesConfigurations(&m)
|
||||
if err == nil {
|
||||
t.Errorf("expected error, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "no configuration has been provided") {
|
||||
t.Errorf("expected no kubeconfig error, got %v", err)
|
||||
}
|
||||
})
|
||||
t.Run("with valid file", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
kubeconfigPath := path.Join(tempDir, "config")
|
||||
kubeconfigContent := `
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
server: https://example.com
|
||||
name: example-cluster
|
||||
contexts:
|
||||
- context:
|
||||
cluster: example-cluster
|
||||
user: example-user
|
||||
name: example-context
|
||||
current-context: example-context
|
||||
users:
|
||||
- name: example-user
|
||||
user:
|
||||
token: example-token
|
||||
`
|
||||
if err := os.WriteFile(kubeconfigPath, []byte(kubeconfigContent), 0644); err != nil {
|
||||
t.Fatalf("failed to create kubeconfig file: %v", err)
|
||||
}
|
||||
m := Manager{staticConfig: &config.StaticConfig{
|
||||
KubeConfig: kubeconfigPath,
|
||||
}}
|
||||
err := resolveKubernetesConfigurations(&m)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got %v", err)
|
||||
}
|
||||
if m.cfg == nil {
|
||||
t.Errorf("expected non-nil config, got nil")
|
||||
}
|
||||
if m.cfg.Host != "https://example.com" {
|
||||
t.Errorf("expected host https://example.com, got %s", m.cfg.Host)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,27 +1,10 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/discovery/cached/memory"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/restmapper"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/config"
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/helm"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
||||
)
|
||||
@@ -41,174 +24,15 @@ type Kubernetes struct {
|
||||
manager *Manager
|
||||
}
|
||||
|
||||
type Manager struct {
|
||||
cfg *rest.Config
|
||||
clientCmdConfig clientcmd.ClientConfig
|
||||
discoveryClient discovery.CachedDiscoveryInterface
|
||||
accessControlClientSet *AccessControlClientset
|
||||
accessControlRESTMapper *AccessControlRESTMapper
|
||||
dynamicClient *dynamic.DynamicClient
|
||||
|
||||
staticConfig *config.StaticConfig
|
||||
CloseWatchKubeConfig CloseWatchKubeConfig
|
||||
// AccessControlClientset returns the access-controlled clientset
|
||||
// This ensures that any denied resources configured in the system are properly enforced
|
||||
func (k *Kubernetes) AccessControlClientset() *AccessControlClientset {
|
||||
return k.manager.accessControlClientSet
|
||||
}
|
||||
|
||||
var _ helm.Kubernetes = (*Manager)(nil)
|
||||
var _ Openshift = (*Manager)(nil)
|
||||
|
||||
var Scheme = scheme.Scheme
|
||||
var ParameterCodec = runtime.NewParameterCodec(Scheme)
|
||||
|
||||
func NewManager(config *config.StaticConfig) (*Manager, error) {
|
||||
k8s := &Manager{
|
||||
staticConfig: config,
|
||||
}
|
||||
if err := resolveKubernetesConfigurations(k8s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO: Won't work because not all client-go clients use the shared context (e.g. discovery client uses context.TODO())
|
||||
//k8s.cfg.Wrap(func(original http.RoundTripper) http.RoundTripper {
|
||||
// return &impersonateRoundTripper{original}
|
||||
//})
|
||||
var err error
|
||||
k8s.accessControlClientSet, err = NewAccessControlClientset(k8s.cfg, k8s.staticConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k8s.discoveryClient = memory.NewMemCacheClient(k8s.accessControlClientSet.DiscoveryClient())
|
||||
k8s.accessControlRESTMapper = NewAccessControlRESTMapper(
|
||||
restmapper.NewDeferredDiscoveryRESTMapper(k8s.discoveryClient),
|
||||
k8s.staticConfig,
|
||||
)
|
||||
k8s.dynamicClient, err = dynamic.NewForConfig(k8s.cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return k8s, nil
|
||||
}
|
||||
|
||||
func (m *Manager) WatchKubeConfig(onKubeConfigChange func() error) {
|
||||
if m.clientCmdConfig == nil {
|
||||
return
|
||||
}
|
||||
kubeConfigFiles := m.clientCmdConfig.ConfigAccess().GetLoadingPrecedence()
|
||||
if len(kubeConfigFiles) == 0 {
|
||||
return
|
||||
}
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, file := range kubeConfigFiles {
|
||||
_ = watcher.Add(file)
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case _, ok := <-watcher.Events:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
_ = onKubeConfigChange()
|
||||
case _, ok := <-watcher.Errors:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
if m.CloseWatchKubeConfig != nil {
|
||||
_ = m.CloseWatchKubeConfig()
|
||||
}
|
||||
m.CloseWatchKubeConfig = watcher.Close
|
||||
}
|
||||
|
||||
func (m *Manager) Close() {
|
||||
if m.CloseWatchKubeConfig != nil {
|
||||
_ = m.CloseWatchKubeConfig()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) GetAPIServerHost() string {
|
||||
if m.cfg == nil {
|
||||
return ""
|
||||
}
|
||||
return m.cfg.Host
|
||||
}
|
||||
|
||||
func (m *Manager) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
|
||||
return m.discoveryClient, nil
|
||||
}
|
||||
|
||||
func (m *Manager) ToRESTMapper() (meta.RESTMapper, error) {
|
||||
return m.accessControlRESTMapper, nil
|
||||
}
|
||||
|
||||
func (m *Manager) Derived(ctx context.Context) (*Kubernetes, error) {
|
||||
authorization, ok := ctx.Value(OAuthAuthorizationHeader).(string)
|
||||
if !ok || !strings.HasPrefix(authorization, "Bearer ") {
|
||||
if m.staticConfig.RequireOAuth {
|
||||
return nil, errors.New("oauth token required")
|
||||
}
|
||||
return &Kubernetes{manager: m}, nil
|
||||
}
|
||||
klog.V(5).Infof("%s header found (Bearer), using provided bearer token", OAuthAuthorizationHeader)
|
||||
derivedCfg := &rest.Config{
|
||||
Host: m.cfg.Host,
|
||||
APIPath: m.cfg.APIPath,
|
||||
// Copy only server verification TLS settings (CA bundle and server name)
|
||||
TLSClientConfig: rest.TLSClientConfig{
|
||||
Insecure: m.cfg.Insecure,
|
||||
ServerName: m.cfg.ServerName,
|
||||
CAFile: m.cfg.CAFile,
|
||||
CAData: m.cfg.CAData,
|
||||
},
|
||||
BearerToken: strings.TrimPrefix(authorization, "Bearer "),
|
||||
// pass custom UserAgent to identify the client
|
||||
UserAgent: CustomUserAgent,
|
||||
QPS: m.cfg.QPS,
|
||||
Burst: m.cfg.Burst,
|
||||
Timeout: m.cfg.Timeout,
|
||||
Impersonate: rest.ImpersonationConfig{},
|
||||
}
|
||||
clientCmdApiConfig, err := m.clientCmdConfig.RawConfig()
|
||||
if err != nil {
|
||||
if m.staticConfig.RequireOAuth {
|
||||
klog.Errorf("failed to get kubeconfig: %v", err)
|
||||
return nil, errors.New("failed to get kubeconfig")
|
||||
}
|
||||
return &Kubernetes{manager: m}, nil
|
||||
}
|
||||
clientCmdApiConfig.AuthInfos = make(map[string]*clientcmdapi.AuthInfo)
|
||||
derived := &Kubernetes{manager: &Manager{
|
||||
clientCmdConfig: clientcmd.NewDefaultClientConfig(clientCmdApiConfig, nil),
|
||||
cfg: derivedCfg,
|
||||
staticConfig: m.staticConfig,
|
||||
}}
|
||||
derived.manager.accessControlClientSet, err = NewAccessControlClientset(derived.manager.cfg, derived.manager.staticConfig)
|
||||
if err != nil {
|
||||
if m.staticConfig.RequireOAuth {
|
||||
klog.Errorf("failed to get kubeconfig: %v", err)
|
||||
return nil, errors.New("failed to get kubeconfig")
|
||||
}
|
||||
return &Kubernetes{manager: m}, nil
|
||||
}
|
||||
derived.manager.discoveryClient = memory.NewMemCacheClient(derived.manager.accessControlClientSet.DiscoveryClient())
|
||||
derived.manager.accessControlRESTMapper = NewAccessControlRESTMapper(
|
||||
restmapper.NewDeferredDiscoveryRESTMapper(derived.manager.discoveryClient),
|
||||
derived.manager.staticConfig,
|
||||
)
|
||||
derived.manager.dynamicClient, err = dynamic.NewForConfig(derived.manager.cfg)
|
||||
if err != nil {
|
||||
if m.staticConfig.RequireOAuth {
|
||||
klog.Errorf("failed to initialize dynamic client: %v", err)
|
||||
return nil, errors.New("failed to initialize dynamic client")
|
||||
}
|
||||
return &Kubernetes{manager: m}, nil
|
||||
}
|
||||
return derived, nil
|
||||
}
|
||||
|
||||
func (k *Kubernetes) NewHelm() *helm.Helm {
|
||||
// This is a derived Kubernetes, so it already has the Helm initialized
|
||||
return helm.NewHelm(k.manager)
|
||||
|
||||
185
pkg/kubernetes/kubernetes_derived_test.go
Normal file
185
pkg/kubernetes/kubernetes_derived_test.go
Normal file
@@ -0,0 +1,185 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/internal/test"
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/config"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
type DerivedTestSuite struct {
|
||||
suite.Suite
|
||||
}
|
||||
|
||||
func (s *DerivedTestSuite) TestKubeConfig() {
|
||||
// Create a temporary kubeconfig file for testing
|
||||
tempDir := s.T().TempDir()
|
||||
kubeconfigPath := filepath.Join(tempDir, "config")
|
||||
kubeconfigContent := `
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
server: https://test-cluster.example.com
|
||||
name: test-cluster
|
||||
contexts:
|
||||
- context:
|
||||
cluster: test-cluster
|
||||
user: test-user
|
||||
name: test-context
|
||||
current-context: test-context
|
||||
users:
|
||||
- name: test-user
|
||||
user:
|
||||
username: test-username
|
||||
password: test-password
|
||||
`
|
||||
err := os.WriteFile(kubeconfigPath, []byte(kubeconfigContent), 0644)
|
||||
s.Require().NoError(err, "failed to create kubeconfig file")
|
||||
|
||||
s.Run("with no RequireOAuth (default) config", func() {
|
||||
testStaticConfig := test.Must(config.ReadToml([]byte(`
|
||||
kubeconfig = "` + strings.ReplaceAll(kubeconfigPath, `\`, `\\`) + `"
|
||||
`)))
|
||||
s.Run("without authorization header returns original manager", func() {
|
||||
testManager, err := NewKubeconfigManager(testStaticConfig, "")
|
||||
s.Require().NoErrorf(err, "failed to create test manager: %v", err)
|
||||
s.T().Cleanup(testManager.Close)
|
||||
|
||||
derived, err := testManager.Derived(s.T().Context())
|
||||
s.Require().NoErrorf(err, "failed to create derived manager: %v", err)
|
||||
|
||||
s.Equal(derived.manager, testManager, "expected original manager, got different manager")
|
||||
})
|
||||
|
||||
s.Run("with invalid authorization header returns original manager", func() {
|
||||
testManager, err := NewKubeconfigManager(testStaticConfig, "")
|
||||
s.Require().NoErrorf(err, "failed to create test manager: %v", err)
|
||||
s.T().Cleanup(testManager.Close)
|
||||
|
||||
ctx := context.WithValue(s.T().Context(), HeaderKey("Authorization"), "invalid-token")
|
||||
derived, err := testManager.Derived(ctx)
|
||||
s.Require().NoErrorf(err, "failed to create derived manager: %v", err)
|
||||
|
||||
s.Equal(derived.manager, testManager, "expected original manager, got different manager")
|
||||
})
|
||||
|
||||
s.Run("with valid bearer token creates derived manager with correct configuration", func() {
|
||||
testManager, err := NewKubeconfigManager(testStaticConfig, "")
|
||||
s.Require().NoErrorf(err, "failed to create test manager: %v", err)
|
||||
s.T().Cleanup(testManager.Close)
|
||||
|
||||
ctx := context.WithValue(s.T().Context(), HeaderKey("Authorization"), "Bearer aiTana-julIA")
|
||||
derived, err := testManager.Derived(ctx)
|
||||
s.Require().NoErrorf(err, "failed to create derived manager: %v", err)
|
||||
|
||||
s.NotEqual(derived.manager, testManager, "expected new derived manager, got original manager")
|
||||
s.Equal(derived.manager.staticConfig, testStaticConfig, "staticConfig not properly wired to derived manager")
|
||||
|
||||
s.Run("RestConfig is correctly copied and sensitive fields are omitted", func() {
|
||||
derivedCfg := derived.manager.cfg
|
||||
s.Require().NotNil(derivedCfg, "derived config is nil")
|
||||
|
||||
originalCfg := testManager.cfg
|
||||
s.Equalf(originalCfg.Host, derivedCfg.Host, "expected Host %s, got %s", originalCfg.Host, derivedCfg.Host)
|
||||
s.Equalf(originalCfg.APIPath, derivedCfg.APIPath, "expected APIPath %s, got %s", originalCfg.APIPath, derivedCfg.APIPath)
|
||||
s.Equalf(originalCfg.QPS, derivedCfg.QPS, "expected QPS %f, got %f", originalCfg.QPS, derivedCfg.QPS)
|
||||
s.Equalf(originalCfg.Burst, derivedCfg.Burst, "expected Burst %d, got %d", originalCfg.Burst, derivedCfg.Burst)
|
||||
s.Equalf(originalCfg.Timeout, derivedCfg.Timeout, "expected Timeout %v, got %v", originalCfg.Timeout, derivedCfg.Timeout)
|
||||
|
||||
s.Equalf(originalCfg.Insecure, derivedCfg.Insecure, "expected TLS Insecure %v, got %v", originalCfg.Insecure, derivedCfg.Insecure)
|
||||
s.Equalf(originalCfg.ServerName, derivedCfg.ServerName, "expected TLS ServerName %s, got %s", originalCfg.ServerName, derivedCfg.ServerName)
|
||||
s.Equalf(originalCfg.CAFile, derivedCfg.CAFile, "expected TLS CAFile %s, got %s", originalCfg.CAFile, derivedCfg.CAFile)
|
||||
s.Equalf(string(originalCfg.CAData), string(derivedCfg.CAData), "expected TLS CAData %s, got %s", string(originalCfg.CAData), string(derivedCfg.CAData))
|
||||
|
||||
s.Equalf("aiTana-julIA", derivedCfg.BearerToken, "expected BearerToken %s, got %s", "aiTana-julIA", derivedCfg.BearerToken)
|
||||
s.Equalf("kubernetes-mcp-server/bearer-token-auth", derivedCfg.UserAgent, "expected UserAgent \"kubernetes-mcp-server/bearer-token-auth\", got %s", derivedCfg.UserAgent)
|
||||
|
||||
// Verify that sensitive fields are NOT copied to prevent credential leakage
|
||||
// The derived config should only use the bearer token from the Authorization header
|
||||
// and not inherit any authentication credentials from the original kubeconfig
|
||||
s.Emptyf(derivedCfg.CertFile, "expected TLS CertFile to be empty, got %s", derivedCfg.CertFile)
|
||||
s.Emptyf(derivedCfg.KeyFile, "expected TLS KeyFile to be empty, got %s", derivedCfg.KeyFile)
|
||||
s.Emptyf(len(derivedCfg.CertData), "expected TLS CertData to be empty, got %v", derivedCfg.CertData)
|
||||
s.Emptyf(len(derivedCfg.KeyData), "expected TLS KeyData to be empty, got %v", derivedCfg.KeyData)
|
||||
|
||||
s.Emptyf(derivedCfg.Username, "expected Username to be empty, got %s", derivedCfg.Username)
|
||||
s.Emptyf(derivedCfg.Password, "expected Password to be empty, got %s", derivedCfg.Password)
|
||||
s.Nilf(derivedCfg.AuthProvider, "expected AuthProvider to be nil, got %v", derivedCfg.AuthProvider)
|
||||
s.Nilf(derivedCfg.ExecProvider, "expected ExecProvider to be nil, got %v", derivedCfg.ExecProvider)
|
||||
s.Emptyf(derivedCfg.BearerTokenFile, "expected BearerTokenFile to be empty, got %s", derivedCfg.BearerTokenFile)
|
||||
s.Emptyf(derivedCfg.Impersonate.UserName, "expected Impersonate.UserName to be empty, got %s", derivedCfg.Impersonate.UserName)
|
||||
|
||||
// Verify that the original manager still has the sensitive data
|
||||
s.Falsef(originalCfg.Username == "" && originalCfg.Password == "", "original kubeconfig shouldn't be modified")
|
||||
|
||||
})
|
||||
s.Run("derived manager has initialized clients", func() {
|
||||
// Verify that the derived manager has proper clients initialized
|
||||
s.NotNilf(derived.manager.accessControlClientSet, "expected accessControlClientSet to be initialized")
|
||||
s.Equalf(testStaticConfig, derived.manager.accessControlClientSet.staticConfig, "staticConfig not properly wired to derived manager")
|
||||
s.NotNilf(derived.manager.discoveryClient, "expected discoveryClient to be initialized")
|
||||
s.NotNilf(derived.manager.accessControlRESTMapper, "expected accessControlRESTMapper to be initialized")
|
||||
s.Equalf(testStaticConfig, derived.manager.accessControlRESTMapper.staticConfig, "staticConfig not properly wired to derived manager")
|
||||
s.NotNilf(derived.manager.dynamicClient, "expected dynamicClient to be initialized")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
s.Run("with RequireOAuth=true", func() {
|
||||
testStaticConfig := test.Must(config.ReadToml([]byte(`
|
||||
kubeconfig = "` + strings.ReplaceAll(kubeconfigPath, `\`, `\\`) + `"
|
||||
require_oauth = true
|
||||
`)))
|
||||
|
||||
s.Run("with no authorization header returns oauth token required error", func() {
|
||||
testManager, err := NewKubeconfigManager(testStaticConfig, "")
|
||||
s.Require().NoErrorf(err, "failed to create test manager: %v", err)
|
||||
s.T().Cleanup(testManager.Close)
|
||||
|
||||
derived, err := testManager.Derived(s.T().Context())
|
||||
s.Require().Error(err, "expected error for missing oauth token, got nil")
|
||||
s.EqualError(err, "oauth token required", "expected error 'oauth token required', got %s", err.Error())
|
||||
s.Nil(derived, "expected nil derived manager when oauth token required")
|
||||
})
|
||||
|
||||
s.Run("with invalid authorization header returns oauth token required error", func() {
|
||||
testManager, err := NewKubeconfigManager(testStaticConfig, "")
|
||||
s.Require().NoErrorf(err, "failed to create test manager: %v", err)
|
||||
s.T().Cleanup(testManager.Close)
|
||||
|
||||
ctx := context.WithValue(s.T().Context(), HeaderKey("Authorization"), "invalid-token")
|
||||
derived, err := testManager.Derived(ctx)
|
||||
s.Require().Error(err, "expected error for invalid oauth token, got nil")
|
||||
s.EqualError(err, "oauth token required", "expected error 'oauth token required', got %s", err.Error())
|
||||
s.Nil(derived, "expected nil derived manager when oauth token required")
|
||||
})
|
||||
|
||||
s.Run("with valid bearer token creates derived manager", func() {
|
||||
testManager, err := NewKubeconfigManager(testStaticConfig, "")
|
||||
s.Require().NoErrorf(err, "failed to create test manager: %v", err)
|
||||
s.T().Cleanup(testManager.Close)
|
||||
|
||||
ctx := context.WithValue(s.T().Context(), HeaderKey("Authorization"), "Bearer aiTana-julIA")
|
||||
derived, err := testManager.Derived(ctx)
|
||||
s.Require().NoErrorf(err, "failed to create derived manager: %v", err)
|
||||
|
||||
s.NotEqual(derived.manager, testManager, "expected new derived manager, got original manager")
|
||||
s.Equal(derived.manager.staticConfig, testStaticConfig, "staticConfig not properly wired to derived manager")
|
||||
|
||||
derivedCfg := derived.manager.cfg
|
||||
s.Require().NotNil(derivedCfg, "derived config is nil")
|
||||
|
||||
s.Equalf("aiTana-julIA", derivedCfg.BearerToken, "expected BearerToken %s, got %s", "aiTana-julIA", derivedCfg.BearerToken)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestDerived(t *testing.T) {
|
||||
suite.Run(t, new(DerivedTestSuite))
|
||||
}
|
||||
@@ -1,316 +0,0 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/config"
|
||||
)
|
||||
|
||||
func TestManager_Derived(t *testing.T) {
|
||||
// Create a temporary kubeconfig file for testing
|
||||
tempDir := t.TempDir()
|
||||
kubeconfigPath := path.Join(tempDir, "config")
|
||||
kubeconfigContent := `
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
server: https://test-cluster.example.com
|
||||
name: test-cluster
|
||||
contexts:
|
||||
- context:
|
||||
cluster: test-cluster
|
||||
user: test-user
|
||||
name: test-context
|
||||
current-context: test-context
|
||||
users:
|
||||
- name: test-user
|
||||
user:
|
||||
username: test-username
|
||||
password: test-password
|
||||
`
|
||||
if err := os.WriteFile(kubeconfigPath, []byte(kubeconfigContent), 0644); err != nil {
|
||||
t.Fatalf("failed to create kubeconfig file: %v", err)
|
||||
}
|
||||
|
||||
t.Run("without authorization header returns original manager", func(t *testing.T) {
|
||||
testStaticConfig := &config.StaticConfig{
|
||||
KubeConfig: kubeconfigPath,
|
||||
DisabledTools: []string{"configuration_view"},
|
||||
DeniedResources: []config.GroupVersionKind{
|
||||
{Group: "apps", Version: "v1", Kind: "Deployment"},
|
||||
},
|
||||
}
|
||||
|
||||
testManager, err := NewManager(testStaticConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create manager: %v", err)
|
||||
}
|
||||
defer testManager.Close()
|
||||
ctx := context.Background()
|
||||
derived, err := testManager.Derived(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create manager: %v", err)
|
||||
}
|
||||
|
||||
if derived.manager != testManager {
|
||||
t.Errorf("expected original manager, got different manager")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("with invalid authorization header returns original manager", func(t *testing.T) {
|
||||
testStaticConfig := &config.StaticConfig{
|
||||
KubeConfig: kubeconfigPath,
|
||||
DisabledTools: []string{"configuration_view"},
|
||||
DeniedResources: []config.GroupVersionKind{
|
||||
{Group: "apps", Version: "v1", Kind: "Deployment"},
|
||||
},
|
||||
}
|
||||
|
||||
testManager, err := NewManager(testStaticConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create manager: %v", err)
|
||||
}
|
||||
defer testManager.Close()
|
||||
ctx := context.WithValue(context.Background(), OAuthAuthorizationHeader, "invalid-token")
|
||||
derived, err := testManager.Derived(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create manager: %v", err)
|
||||
}
|
||||
|
||||
if derived.manager != testManager {
|
||||
t.Errorf("expected original manager, got different manager")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("with valid bearer token creates derived manager with correct configuration", func(t *testing.T) {
|
||||
testStaticConfig := &config.StaticConfig{
|
||||
KubeConfig: kubeconfigPath,
|
||||
DisabledTools: []string{"configuration_view"},
|
||||
DeniedResources: []config.GroupVersionKind{
|
||||
{Group: "apps", Version: "v1", Kind: "Deployment"},
|
||||
},
|
||||
}
|
||||
|
||||
testManager, err := NewManager(testStaticConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create manager: %v", err)
|
||||
}
|
||||
defer testManager.Close()
|
||||
testBearerToken := "test-bearer-token-123"
|
||||
ctx := context.WithValue(context.Background(), OAuthAuthorizationHeader, "Bearer "+testBearerToken)
|
||||
derived, err := testManager.Derived(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create manager: %v", err)
|
||||
}
|
||||
|
||||
if derived.manager == testManager {
|
||||
t.Errorf("expected new derived manager, got original manager")
|
||||
}
|
||||
|
||||
if derived.manager.staticConfig != testStaticConfig {
|
||||
t.Errorf("staticConfig not properly wired to derived manager")
|
||||
}
|
||||
|
||||
derivedCfg := derived.manager.cfg
|
||||
if derivedCfg == nil {
|
||||
t.Fatalf("derived config is nil")
|
||||
}
|
||||
|
||||
originalCfg := testManager.cfg
|
||||
if derivedCfg.Host != originalCfg.Host {
|
||||
t.Errorf("expected Host %s, got %s", originalCfg.Host, derivedCfg.Host)
|
||||
}
|
||||
if derivedCfg.APIPath != originalCfg.APIPath {
|
||||
t.Errorf("expected APIPath %s, got %s", originalCfg.APIPath, derivedCfg.APIPath)
|
||||
}
|
||||
if derivedCfg.QPS != originalCfg.QPS {
|
||||
t.Errorf("expected QPS %f, got %f", originalCfg.QPS, derivedCfg.QPS)
|
||||
}
|
||||
if derivedCfg.Burst != originalCfg.Burst {
|
||||
t.Errorf("expected Burst %d, got %d", originalCfg.Burst, derivedCfg.Burst)
|
||||
}
|
||||
if derivedCfg.Timeout != originalCfg.Timeout {
|
||||
t.Errorf("expected Timeout %v, got %v", originalCfg.Timeout, derivedCfg.Timeout)
|
||||
}
|
||||
|
||||
if derivedCfg.Insecure != originalCfg.Insecure {
|
||||
t.Errorf("expected TLS Insecure %v, got %v", originalCfg.Insecure, derivedCfg.Insecure)
|
||||
}
|
||||
if derivedCfg.ServerName != originalCfg.ServerName {
|
||||
t.Errorf("expected TLS ServerName %s, got %s", originalCfg.ServerName, derivedCfg.ServerName)
|
||||
}
|
||||
if derivedCfg.CAFile != originalCfg.CAFile {
|
||||
t.Errorf("expected TLS CAFile %s, got %s", originalCfg.CAFile, derivedCfg.CAFile)
|
||||
}
|
||||
if string(derivedCfg.CAData) != string(originalCfg.CAData) {
|
||||
t.Errorf("expected TLS CAData %s, got %s", string(originalCfg.CAData), string(derivedCfg.CAData))
|
||||
}
|
||||
|
||||
if derivedCfg.BearerToken != testBearerToken {
|
||||
t.Errorf("expected BearerToken %s, got %s", testBearerToken, derivedCfg.BearerToken)
|
||||
}
|
||||
if derivedCfg.UserAgent != CustomUserAgent {
|
||||
t.Errorf("expected UserAgent %s, got %s", CustomUserAgent, derivedCfg.UserAgent)
|
||||
}
|
||||
|
||||
// Verify that sensitive fields are NOT copied to prevent credential leakage
|
||||
// The derived config should only use the bearer token from the Authorization header
|
||||
// and not inherit any authentication credentials from the original kubeconfig
|
||||
if derivedCfg.CertFile != "" {
|
||||
t.Errorf("expected TLS CertFile to be empty, got %s", derivedCfg.CertFile)
|
||||
}
|
||||
if derivedCfg.KeyFile != "" {
|
||||
t.Errorf("expected TLS KeyFile to be empty, got %s", derivedCfg.KeyFile)
|
||||
}
|
||||
if len(derivedCfg.CertData) != 0 {
|
||||
t.Errorf("expected TLS CertData to be empty, got %v", derivedCfg.CertData)
|
||||
}
|
||||
if len(derivedCfg.KeyData) != 0 {
|
||||
t.Errorf("expected TLS KeyData to be empty, got %v", derivedCfg.KeyData)
|
||||
}
|
||||
|
||||
if derivedCfg.Username != "" {
|
||||
t.Errorf("expected Username to be empty, got %s", derivedCfg.Username)
|
||||
}
|
||||
if derivedCfg.Password != "" {
|
||||
t.Errorf("expected Password to be empty, got %s", derivedCfg.Password)
|
||||
}
|
||||
if derivedCfg.AuthProvider != nil {
|
||||
t.Errorf("expected AuthProvider to be nil, got %v", derivedCfg.AuthProvider)
|
||||
}
|
||||
if derivedCfg.ExecProvider != nil {
|
||||
t.Errorf("expected ExecProvider to be nil, got %v", derivedCfg.ExecProvider)
|
||||
}
|
||||
if derivedCfg.BearerTokenFile != "" {
|
||||
t.Errorf("expected BearerTokenFile to be empty, got %s", derivedCfg.BearerTokenFile)
|
||||
}
|
||||
if derivedCfg.Impersonate.UserName != "" {
|
||||
t.Errorf("expected Impersonate.UserName to be empty, got %s", derivedCfg.Impersonate.UserName)
|
||||
}
|
||||
|
||||
// Verify that the original manager still has the sensitive data
|
||||
if originalCfg.Username == "" && originalCfg.Password == "" {
|
||||
t.Logf("original kubeconfig shouldn't be modified")
|
||||
}
|
||||
|
||||
// Verify that the derived manager has proper clients initialized
|
||||
if derived.manager.accessControlClientSet == nil {
|
||||
t.Error("expected accessControlClientSet to be initialized")
|
||||
}
|
||||
if derived.manager.accessControlClientSet.staticConfig != testStaticConfig {
|
||||
t.Errorf("staticConfig not properly wired to derived manager")
|
||||
}
|
||||
if derived.manager.discoveryClient == nil {
|
||||
t.Error("expected discoveryClient to be initialized")
|
||||
}
|
||||
if derived.manager.accessControlRESTMapper == nil {
|
||||
t.Error("expected accessControlRESTMapper to be initialized")
|
||||
}
|
||||
if derived.manager.accessControlRESTMapper.staticConfig != testStaticConfig {
|
||||
t.Errorf("staticConfig not properly wired to derived manager")
|
||||
}
|
||||
if derived.manager.dynamicClient == nil {
|
||||
t.Error("expected dynamicClient to be initialized")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("with RequireOAuth=true and no authorization header returns oauth token required error", func(t *testing.T) {
|
||||
testStaticConfig := &config.StaticConfig{
|
||||
KubeConfig: kubeconfigPath,
|
||||
RequireOAuth: true,
|
||||
DisabledTools: []string{"configuration_view"},
|
||||
DeniedResources: []config.GroupVersionKind{
|
||||
{Group: "apps", Version: "v1", Kind: "Deployment"},
|
||||
},
|
||||
}
|
||||
|
||||
testManager, err := NewManager(testStaticConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create manager: %v", err)
|
||||
}
|
||||
defer testManager.Close()
|
||||
ctx := context.Background()
|
||||
derived, err := testManager.Derived(ctx)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for missing oauth token, got nil")
|
||||
}
|
||||
if err.Error() != "oauth token required" {
|
||||
t.Fatalf("expected error 'oauth token required', got %s", err.Error())
|
||||
}
|
||||
if derived != nil {
|
||||
t.Error("expected nil derived manager when oauth token required")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("with RequireOAuth=true and invalid authorization header returns oauth token required error", func(t *testing.T) {
|
||||
testStaticConfig := &config.StaticConfig{
|
||||
KubeConfig: kubeconfigPath,
|
||||
RequireOAuth: true,
|
||||
DisabledTools: []string{"configuration_view"},
|
||||
DeniedResources: []config.GroupVersionKind{
|
||||
{Group: "apps", Version: "v1", Kind: "Deployment"},
|
||||
},
|
||||
}
|
||||
|
||||
testManager, err := NewManager(testStaticConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create manager: %v", err)
|
||||
}
|
||||
defer testManager.Close()
|
||||
ctx := context.WithValue(context.Background(), OAuthAuthorizationHeader, "invalid-token")
|
||||
derived, err := testManager.Derived(ctx)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for invalid oauth token, got nil")
|
||||
}
|
||||
if err.Error() != "oauth token required" {
|
||||
t.Fatalf("expected error 'oauth token required', got %s", err.Error())
|
||||
}
|
||||
if derived != nil {
|
||||
t.Error("expected nil derived manager when oauth token required")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("with RequireOAuth=true and valid bearer token creates derived manager", func(t *testing.T) {
|
||||
testStaticConfig := &config.StaticConfig{
|
||||
KubeConfig: kubeconfigPath,
|
||||
RequireOAuth: true,
|
||||
DisabledTools: []string{"configuration_view"},
|
||||
DeniedResources: []config.GroupVersionKind{
|
||||
{Group: "apps", Version: "v1", Kind: "Deployment"},
|
||||
},
|
||||
}
|
||||
|
||||
testManager, err := NewManager(testStaticConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create manager: %v", err)
|
||||
}
|
||||
defer testManager.Close()
|
||||
testBearerToken := "test-bearer-token-123"
|
||||
ctx := context.WithValue(context.Background(), OAuthAuthorizationHeader, "Bearer "+testBearerToken)
|
||||
derived, err := testManager.Derived(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create manager: %v", err)
|
||||
}
|
||||
|
||||
if derived.manager == testManager {
|
||||
t.Error("expected new derived manager, got original manager")
|
||||
}
|
||||
|
||||
if derived.manager.staticConfig != testStaticConfig {
|
||||
t.Error("staticConfig not properly wired to derived manager")
|
||||
}
|
||||
|
||||
derivedCfg := derived.manager.cfg
|
||||
if derivedCfg == nil {
|
||||
t.Fatal("derived config is nil")
|
||||
}
|
||||
|
||||
if derivedCfg.BearerToken != testBearerToken {
|
||||
t.Errorf("expected BearerToken %s, got %s", testBearerToken, derivedCfg.BearerToken)
|
||||
}
|
||||
})
|
||||
}
|
||||
301
pkg/kubernetes/manager.go
Normal file
301
pkg/kubernetes/manager.go
Normal file
@@ -0,0 +1,301 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/config"
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/helm"
|
||||
"github.com/fsnotify/fsnotify"
|
||||
authenticationv1api "k8s.io/api/authentication/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/discovery/cached/memory"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/restmapper"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
type Manager struct {
|
||||
cfg *rest.Config
|
||||
clientCmdConfig clientcmd.ClientConfig
|
||||
discoveryClient discovery.CachedDiscoveryInterface
|
||||
accessControlClientSet *AccessControlClientset
|
||||
accessControlRESTMapper *AccessControlRESTMapper
|
||||
dynamicClient *dynamic.DynamicClient
|
||||
|
||||
staticConfig *config.StaticConfig
|
||||
CloseWatchKubeConfig CloseWatchKubeConfig
|
||||
}
|
||||
|
||||
var _ helm.Kubernetes = (*Manager)(nil)
|
||||
var _ Openshift = (*Manager)(nil)
|
||||
|
||||
var (
|
||||
ErrorKubeconfigInClusterNotAllowed = errors.New("kubeconfig manager cannot be used in in-cluster deployments")
|
||||
ErrorInClusterNotInCluster = errors.New("in-cluster manager cannot be used outside of a cluster")
|
||||
)
|
||||
|
||||
func NewKubeconfigManager(config *config.StaticConfig, kubeconfigContext string) (*Manager, error) {
|
||||
if IsInCluster(config) {
|
||||
return nil, ErrorKubeconfigInClusterNotAllowed
|
||||
}
|
||||
|
||||
pathOptions := clientcmd.NewDefaultPathOptions()
|
||||
if config.KubeConfig != "" {
|
||||
pathOptions.LoadingRules.ExplicitPath = config.KubeConfig
|
||||
}
|
||||
clientCmdConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
|
||||
pathOptions.LoadingRules,
|
||||
&clientcmd.ConfigOverrides{
|
||||
ClusterInfo: clientcmdapi.Cluster{Server: ""},
|
||||
CurrentContext: kubeconfigContext,
|
||||
})
|
||||
|
||||
restConfig, err := clientCmdConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create kubernetes rest config from kubeconfig: %v", err)
|
||||
}
|
||||
|
||||
return newManager(config, restConfig, clientCmdConfig)
|
||||
}
|
||||
|
||||
func NewInClusterManager(config *config.StaticConfig) (*Manager, error) {
|
||||
if config.KubeConfig != "" {
|
||||
return nil, fmt.Errorf("kubeconfig file %s cannot be used with the in-cluster deployments: %v", config.KubeConfig, ErrorKubeconfigInClusterNotAllowed)
|
||||
}
|
||||
|
||||
if !IsInCluster(config) {
|
||||
return nil, ErrorInClusterNotInCluster
|
||||
}
|
||||
|
||||
restConfig, err := InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create in-cluster kubernetes rest config: %v", err)
|
||||
}
|
||||
|
||||
// Create a dummy kubeconfig clientcmdapi.Config for in-cluster config to be used in places where clientcmd.ClientConfig is required
|
||||
clientCmdConfig := clientcmdapi.NewConfig()
|
||||
clientCmdConfig.Clusters["cluster"] = &clientcmdapi.Cluster{
|
||||
Server: restConfig.Host,
|
||||
InsecureSkipTLSVerify: restConfig.Insecure,
|
||||
}
|
||||
clientCmdConfig.AuthInfos["user"] = &clientcmdapi.AuthInfo{
|
||||
Token: restConfig.BearerToken,
|
||||
}
|
||||
clientCmdConfig.Contexts[inClusterKubeConfigDefaultContext] = &clientcmdapi.Context{
|
||||
Cluster: "cluster",
|
||||
AuthInfo: "user",
|
||||
}
|
||||
clientCmdConfig.CurrentContext = inClusterKubeConfigDefaultContext
|
||||
|
||||
return newManager(config, restConfig, clientcmd.NewDefaultClientConfig(*clientCmdConfig, nil))
|
||||
}
|
||||
|
||||
func newManager(config *config.StaticConfig, restConfig *rest.Config, clientCmdConfig clientcmd.ClientConfig) (*Manager, error) {
|
||||
k8s := &Manager{
|
||||
staticConfig: config,
|
||||
cfg: restConfig,
|
||||
clientCmdConfig: clientCmdConfig,
|
||||
}
|
||||
if k8s.cfg.UserAgent == "" {
|
||||
k8s.cfg.UserAgent = rest.DefaultKubernetesUserAgent()
|
||||
}
|
||||
var err error
|
||||
// TODO: Won't work because not all client-go clients use the shared context (e.g. discovery client uses context.TODO())
|
||||
//k8s.cfg.Wrap(func(original http.RoundTripper) http.RoundTripper {
|
||||
// return &impersonateRoundTripper{original}
|
||||
//})
|
||||
k8s.accessControlClientSet, err = NewAccessControlClientset(k8s.cfg, k8s.staticConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k8s.discoveryClient = memory.NewMemCacheClient(k8s.accessControlClientSet.DiscoveryClient())
|
||||
k8s.accessControlRESTMapper = NewAccessControlRESTMapper(
|
||||
restmapper.NewDeferredDiscoveryRESTMapper(k8s.discoveryClient),
|
||||
k8s.staticConfig,
|
||||
)
|
||||
k8s.dynamicClient, err = dynamic.NewForConfig(k8s.cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return k8s, nil
|
||||
}
|
||||
|
||||
func (m *Manager) WatchKubeConfig(onKubeConfigChange func() error) {
|
||||
if m.clientCmdConfig == nil {
|
||||
return
|
||||
}
|
||||
kubeConfigFiles := m.clientCmdConfig.ConfigAccess().GetLoadingPrecedence()
|
||||
if len(kubeConfigFiles) == 0 {
|
||||
return
|
||||
}
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, file := range kubeConfigFiles {
|
||||
_ = watcher.Add(file)
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case _, ok := <-watcher.Events:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
_ = onKubeConfigChange()
|
||||
case _, ok := <-watcher.Errors:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
if m.CloseWatchKubeConfig != nil {
|
||||
_ = m.CloseWatchKubeConfig()
|
||||
}
|
||||
m.CloseWatchKubeConfig = watcher.Close
|
||||
}
|
||||
|
||||
func (m *Manager) Close() {
|
||||
if m.CloseWatchKubeConfig != nil {
|
||||
_ = m.CloseWatchKubeConfig()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) configuredNamespace() string {
|
||||
if ns, _, nsErr := m.clientCmdConfig.Namespace(); nsErr == nil {
|
||||
return ns
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Manager) NamespaceOrDefault(namespace string) string {
|
||||
if namespace == "" {
|
||||
return m.configuredNamespace()
|
||||
}
|
||||
return namespace
|
||||
}
|
||||
|
||||
func (m *Manager) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
|
||||
return m.discoveryClient, nil
|
||||
}
|
||||
|
||||
func (m *Manager) ToRESTMapper() (meta.RESTMapper, error) {
|
||||
return m.accessControlRESTMapper, nil
|
||||
}
|
||||
|
||||
// ToRESTConfig returns the rest.Config object (genericclioptions.RESTClientGetter)
|
||||
func (m *Manager) ToRESTConfig() (*rest.Config, error) {
|
||||
return m.cfg, nil
|
||||
}
|
||||
|
||||
// ToRawKubeConfigLoader returns the clientcmd.ClientConfig object (genericclioptions.RESTClientGetter)
|
||||
func (m *Manager) ToRawKubeConfigLoader() clientcmd.ClientConfig {
|
||||
return m.clientCmdConfig
|
||||
}
|
||||
|
||||
func (m *Manager) VerifyToken(ctx context.Context, token, audience string) (*authenticationv1api.UserInfo, []string, error) {
|
||||
tokenReviewClient, err := m.accessControlClientSet.TokenReview()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
tokenReview := &authenticationv1api.TokenReview{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "authentication.k8s.io/v1",
|
||||
Kind: "TokenReview",
|
||||
},
|
||||
Spec: authenticationv1api.TokenReviewSpec{
|
||||
Token: token,
|
||||
Audiences: []string{audience},
|
||||
},
|
||||
}
|
||||
|
||||
result, err := tokenReviewClient.Create(ctx, tokenReview, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create token review: %v", err)
|
||||
}
|
||||
|
||||
if !result.Status.Authenticated {
|
||||
if result.Status.Error != "" {
|
||||
return nil, nil, fmt.Errorf("token authentication failed: %s", result.Status.Error)
|
||||
}
|
||||
return nil, nil, fmt.Errorf("token authentication failed")
|
||||
}
|
||||
|
||||
return &result.Status.User, result.Status.Audiences, nil
|
||||
}
|
||||
|
||||
func (m *Manager) Derived(ctx context.Context) (*Kubernetes, error) {
|
||||
authorization, ok := ctx.Value(OAuthAuthorizationHeader).(string)
|
||||
if !ok || !strings.HasPrefix(authorization, "Bearer ") {
|
||||
if m.staticConfig.RequireOAuth {
|
||||
return nil, errors.New("oauth token required")
|
||||
}
|
||||
return &Kubernetes{manager: m}, nil
|
||||
}
|
||||
klog.V(5).Infof("%s header found (Bearer), using provided bearer token", OAuthAuthorizationHeader)
|
||||
derivedCfg := &rest.Config{
|
||||
Host: m.cfg.Host,
|
||||
APIPath: m.cfg.APIPath,
|
||||
// Copy only server verification TLS settings (CA bundle and server name)
|
||||
TLSClientConfig: rest.TLSClientConfig{
|
||||
Insecure: m.cfg.Insecure,
|
||||
ServerName: m.cfg.ServerName,
|
||||
CAFile: m.cfg.CAFile,
|
||||
CAData: m.cfg.CAData,
|
||||
},
|
||||
BearerToken: strings.TrimPrefix(authorization, "Bearer "),
|
||||
// pass custom UserAgent to identify the client
|
||||
UserAgent: CustomUserAgent,
|
||||
QPS: m.cfg.QPS,
|
||||
Burst: m.cfg.Burst,
|
||||
Timeout: m.cfg.Timeout,
|
||||
Impersonate: rest.ImpersonationConfig{},
|
||||
}
|
||||
clientCmdApiConfig, err := m.clientCmdConfig.RawConfig()
|
||||
if err != nil {
|
||||
if m.staticConfig.RequireOAuth {
|
||||
klog.Errorf("failed to get kubeconfig: %v", err)
|
||||
return nil, errors.New("failed to get kubeconfig")
|
||||
}
|
||||
return &Kubernetes{manager: m}, nil
|
||||
}
|
||||
clientCmdApiConfig.AuthInfos = make(map[string]*clientcmdapi.AuthInfo)
|
||||
derived := &Kubernetes{
|
||||
manager: &Manager{
|
||||
clientCmdConfig: clientcmd.NewDefaultClientConfig(clientCmdApiConfig, nil),
|
||||
cfg: derivedCfg,
|
||||
staticConfig: m.staticConfig,
|
||||
},
|
||||
}
|
||||
derived.manager.accessControlClientSet, err = NewAccessControlClientset(derived.manager.cfg, derived.manager.staticConfig)
|
||||
if err != nil {
|
||||
if m.staticConfig.RequireOAuth {
|
||||
klog.Errorf("failed to get kubeconfig: %v", err)
|
||||
return nil, errors.New("failed to get kubeconfig")
|
||||
}
|
||||
return &Kubernetes{manager: m}, nil
|
||||
}
|
||||
derived.manager.discoveryClient = memory.NewMemCacheClient(derived.manager.accessControlClientSet.DiscoveryClient())
|
||||
derived.manager.accessControlRESTMapper = NewAccessControlRESTMapper(
|
||||
restmapper.NewDeferredDiscoveryRESTMapper(derived.manager.discoveryClient),
|
||||
derived.manager.staticConfig,
|
||||
)
|
||||
derived.manager.dynamicClient, err = dynamic.NewForConfig(derived.manager.cfg)
|
||||
if err != nil {
|
||||
if m.staticConfig.RequireOAuth {
|
||||
klog.Errorf("failed to initialize dynamic client: %v", err)
|
||||
return nil, errors.New("failed to initialize dynamic client")
|
||||
}
|
||||
return &Kubernetes{manager: m}, nil
|
||||
}
|
||||
return derived, nil
|
||||
}
|
||||
202
pkg/kubernetes/manager_test.go
Normal file
202
pkg/kubernetes/manager_test.go
Normal file
@@ -0,0 +1,202 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/internal/test"
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/config"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"k8s.io/client-go/rest"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
)
|
||||
|
||||
type ManagerTestSuite struct {
|
||||
suite.Suite
|
||||
originalEnv []string
|
||||
originalInClusterConfig func() (*rest.Config, error)
|
||||
mockServer *test.MockServer
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) SetupTest() {
|
||||
s.originalEnv = os.Environ()
|
||||
s.originalInClusterConfig = InClusterConfig
|
||||
s.mockServer = test.NewMockServer()
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TearDownTest() {
|
||||
test.RestoreEnv(s.originalEnv)
|
||||
InClusterConfig = s.originalInClusterConfig
|
||||
if s.mockServer != nil {
|
||||
s.mockServer.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TestNewInClusterManager() {
|
||||
s.Run("In cluster", func() {
|
||||
InClusterConfig = func() (*rest.Config, error) {
|
||||
return &rest.Config{}, nil
|
||||
}
|
||||
s.Run("with default StaticConfig (empty kubeconfig)", func() {
|
||||
manager, err := NewInClusterManager(&config.StaticConfig{})
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(manager)
|
||||
s.Run("behaves as in cluster", func() {
|
||||
rawConfig, err := manager.clientCmdConfig.RawConfig()
|
||||
s.Require().NoError(err)
|
||||
s.Equal("in-cluster", rawConfig.CurrentContext, "expected current context to be 'in-cluster'")
|
||||
})
|
||||
s.Run("sets default user-agent", func() {
|
||||
s.Contains(manager.cfg.UserAgent, "("+runtime.GOOS+"/"+runtime.GOARCH+")")
|
||||
})
|
||||
})
|
||||
s.Run("with explicit kubeconfig", func() {
|
||||
manager, err := NewInClusterManager(&config.StaticConfig{
|
||||
KubeConfig: s.mockServer.KubeconfigFile(s.T()),
|
||||
})
|
||||
s.Run("returns error", func() {
|
||||
s.Error(err)
|
||||
s.Nil(manager)
|
||||
s.Regexp("kubeconfig file .+ cannot be used with the in-cluster deployments", err.Error())
|
||||
})
|
||||
})
|
||||
})
|
||||
s.Run("Out of cluster", func() {
|
||||
InClusterConfig = func() (*rest.Config, error) {
|
||||
return nil, rest.ErrNotInCluster
|
||||
}
|
||||
manager, err := NewInClusterManager(&config.StaticConfig{})
|
||||
s.Run("returns error", func() {
|
||||
s.Error(err)
|
||||
s.Nil(manager)
|
||||
s.ErrorIs(err, ErrorInClusterNotInCluster)
|
||||
s.ErrorContains(err, "in-cluster manager cannot be used outside of a cluster")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ManagerTestSuite) TestNewKubeconfigManager() {
|
||||
s.Run("Out of cluster", func() {
|
||||
InClusterConfig = func() (*rest.Config, error) {
|
||||
return nil, rest.ErrNotInCluster
|
||||
}
|
||||
s.Run("with valid kubeconfig in env", func() {
|
||||
kubeconfig := s.mockServer.KubeconfigFile(s.T())
|
||||
s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfig))
|
||||
manager, err := NewKubeconfigManager(&config.StaticConfig{}, "")
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(manager)
|
||||
s.Run("behaves as NOT in cluster", func() {
|
||||
rawConfig, err := manager.clientCmdConfig.RawConfig()
|
||||
s.Require().NoError(err)
|
||||
s.NotEqual("in-cluster", rawConfig.CurrentContext, "expected current context to NOT be 'in-cluster'")
|
||||
s.Equal("fake-context", rawConfig.CurrentContext, "expected current context to be 'fake-context' as in kubeconfig")
|
||||
})
|
||||
s.Run("loads correct config", func() {
|
||||
s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfig, "expected kubeconfig path to match")
|
||||
})
|
||||
s.Run("sets default user-agent", func() {
|
||||
s.Contains(manager.cfg.UserAgent, "("+runtime.GOOS+"/"+runtime.GOARCH+")")
|
||||
})
|
||||
s.Run("rest config host points to mock server", func() {
|
||||
s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server")
|
||||
})
|
||||
})
|
||||
s.Run("with valid kubeconfig in env and explicit kubeconfig in config", func() {
|
||||
kubeconfigInEnv := s.mockServer.KubeconfigFile(s.T())
|
||||
s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigInEnv))
|
||||
kubeconfigExplicit := s.mockServer.KubeconfigFile(s.T())
|
||||
manager, err := NewKubeconfigManager(&config.StaticConfig{
|
||||
KubeConfig: kubeconfigExplicit,
|
||||
}, "")
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(manager)
|
||||
s.Run("behaves as NOT in cluster", func() {
|
||||
rawConfig, err := manager.clientCmdConfig.RawConfig()
|
||||
s.Require().NoError(err)
|
||||
s.NotEqual("in-cluster", rawConfig.CurrentContext, "expected current context to NOT be 'in-cluster'")
|
||||
s.Equal("fake-context", rawConfig.CurrentContext, "expected current context to be 'fake-context' as in kubeconfig")
|
||||
})
|
||||
s.Run("loads correct config (explicit)", func() {
|
||||
s.NotContains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfigInEnv, "expected kubeconfig path to NOT match env")
|
||||
s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfigExplicit, "expected kubeconfig path to match explicit")
|
||||
})
|
||||
s.Run("rest config host points to mock server", func() {
|
||||
s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server")
|
||||
})
|
||||
})
|
||||
s.Run("with valid kubeconfig in env and explicit kubeconfig context (valid)", func() {
|
||||
kubeconfig := s.mockServer.Kubeconfig()
|
||||
kubeconfig.Contexts["not-the-mock-server"] = clientcmdapi.NewContext()
|
||||
kubeconfig.Contexts["not-the-mock-server"].Cluster = "not-the-mock-server"
|
||||
kubeconfig.Clusters["not-the-mock-server"] = clientcmdapi.NewCluster()
|
||||
kubeconfig.Clusters["not-the-mock-server"].Server = "https://not-the-mock-server:6443" // REST configuration should point to mock server, not this
|
||||
kubeconfig.CurrentContext = "not-the-mock-server"
|
||||
kubeconfigFile := test.KubeconfigFile(s.T(), kubeconfig)
|
||||
s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigFile))
|
||||
manager, err := NewKubeconfigManager(&config.StaticConfig{}, "fake-context") // fake-context is the one mock-server serves
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(manager)
|
||||
s.Run("behaves as NOT in cluster", func() {
|
||||
rawConfig, err := manager.clientCmdConfig.RawConfig()
|
||||
s.Require().NoError(err)
|
||||
s.NotEqual("in-cluster", rawConfig.CurrentContext, "expected current context to NOT be 'in-cluster'")
|
||||
s.Equal("not-the-mock-server", rawConfig.CurrentContext, "expected current context to be 'not-the-mock-server' as in explicit context")
|
||||
})
|
||||
s.Run("loads correct config", func() {
|
||||
s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfigFile, "expected kubeconfig path to match")
|
||||
})
|
||||
s.Run("rest config host points to mock server", func() {
|
||||
s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server")
|
||||
})
|
||||
})
|
||||
s.Run("with valid kubeconfig in env and explicit kubeconfig context (invalid)", func() {
|
||||
kubeconfigInEnv := s.mockServer.KubeconfigFile(s.T())
|
||||
s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigInEnv))
|
||||
manager, err := NewKubeconfigManager(&config.StaticConfig{}, "i-do-not-exist")
|
||||
s.Run("returns error", func() {
|
||||
s.Error(err)
|
||||
s.Nil(manager)
|
||||
s.ErrorContains(err, `failed to create kubernetes rest config from kubeconfig: context "i-do-not-exist" does not exist`)
|
||||
})
|
||||
})
|
||||
s.Run("with invalid path kubeconfig in env", func() {
|
||||
s.Require().NoError(os.Setenv("KUBECONFIG", "i-dont-exist"))
|
||||
manager, err := NewKubeconfigManager(&config.StaticConfig{}, "")
|
||||
s.Run("returns error", func() {
|
||||
s.Error(err)
|
||||
s.Nil(manager)
|
||||
s.ErrorContains(err, "failed to create kubernetes rest config")
|
||||
})
|
||||
})
|
||||
s.Run("with empty kubeconfig in env", func() {
|
||||
kubeconfigPath := filepath.Join(s.T().TempDir(), "config")
|
||||
s.Require().NoError(os.WriteFile(kubeconfigPath, []byte(""), 0644))
|
||||
s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigPath))
|
||||
manager, err := NewKubeconfigManager(&config.StaticConfig{}, "")
|
||||
s.Run("returns error", func() {
|
||||
s.Error(err)
|
||||
s.Nil(manager)
|
||||
s.ErrorContains(err, "no configuration has been provided")
|
||||
})
|
||||
})
|
||||
})
|
||||
s.Run("In cluster", func() {
|
||||
InClusterConfig = func() (*rest.Config, error) {
|
||||
return &rest.Config{}, nil
|
||||
}
|
||||
manager, err := NewKubeconfigManager(&config.StaticConfig{}, "")
|
||||
s.Run("returns error", func() {
|
||||
s.Error(err)
|
||||
s.Nil(manager)
|
||||
s.ErrorIs(err, ErrorKubeconfigInClusterNotAllowed)
|
||||
s.ErrorContains(err, "kubeconfig manager cannot be used in in-cluster deployments")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestManager(t *testing.T) {
|
||||
suite.Run(t, new(ManagerTestSuite))
|
||||
}
|
||||
50
pkg/kubernetes/provider.go
Normal file
50
pkg/kubernetes/provider.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/config"
|
||||
)
|
||||
|
||||
type Provider interface {
|
||||
// Openshift extends the Openshift interface to provide OpenShift specific functionality to toolset providers
|
||||
// TODO: with the configurable toolset implementation and especially the multi-cluster approach
|
||||
// extending this interface might not be a good idea anymore.
|
||||
// For the kubecontext case, a user might be targeting both an OpenShift flavored cluster and a vanilla Kubernetes cluster.
|
||||
// See: https://github.com/containers/kubernetes-mcp-server/pull/372#discussion_r2421592315
|
||||
Openshift
|
||||
TokenVerifier
|
||||
GetTargets(ctx context.Context) ([]string, error)
|
||||
GetDerivedKubernetes(ctx context.Context, target string) (*Kubernetes, error)
|
||||
GetDefaultTarget() string
|
||||
GetTargetParameterName() string
|
||||
WatchTargets(func() error)
|
||||
Close()
|
||||
}
|
||||
|
||||
func NewProvider(cfg *config.StaticConfig) (Provider, error) {
|
||||
strategy := resolveStrategy(cfg)
|
||||
|
||||
factory, err := getProviderFactory(strategy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return factory(cfg)
|
||||
}
|
||||
|
||||
func resolveStrategy(cfg *config.StaticConfig) string {
|
||||
if cfg.ClusterProviderStrategy != "" {
|
||||
return cfg.ClusterProviderStrategy
|
||||
}
|
||||
|
||||
if cfg.KubeConfig != "" {
|
||||
return config.ClusterProviderKubeConfig
|
||||
}
|
||||
|
||||
if _, inClusterConfigErr := InClusterConfig(); inClusterConfigErr == nil {
|
||||
return config.ClusterProviderInCluster
|
||||
}
|
||||
|
||||
return config.ClusterProviderKubeConfig
|
||||
}
|
||||
131
pkg/kubernetes/provider_kubeconfig.go
Normal file
131
pkg/kubernetes/provider_kubeconfig.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/config"
|
||||
authenticationv1api "k8s.io/api/authentication/v1"
|
||||
)
|
||||
|
||||
// KubeConfigTargetParameterName is the parameter name used to specify
|
||||
// the kubeconfig context when using the kubeconfig cluster provider strategy.
|
||||
const KubeConfigTargetParameterName = "context"
|
||||
|
||||
// kubeConfigClusterProvider implements Provider for managing multiple
|
||||
// Kubernetes clusters using different contexts from a kubeconfig file.
|
||||
// It lazily initializes managers for each context as they are requested.
|
||||
type kubeConfigClusterProvider struct {
|
||||
defaultContext string
|
||||
managers map[string]*Manager
|
||||
}
|
||||
|
||||
var _ Provider = &kubeConfigClusterProvider{}
|
||||
|
||||
func init() {
|
||||
RegisterProvider(config.ClusterProviderKubeConfig, newKubeConfigClusterProvider)
|
||||
}
|
||||
|
||||
// newKubeConfigClusterProvider creates a provider that manages multiple clusters
|
||||
// via kubeconfig contexts.
|
||||
// Internally, it leverages a KubeconfigManager for each context, initializing them
|
||||
// lazily when requested.
|
||||
func newKubeConfigClusterProvider(cfg *config.StaticConfig) (Provider, error) {
|
||||
m, err := NewKubeconfigManager(cfg, "")
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrorKubeconfigInClusterNotAllowed) {
|
||||
return nil, fmt.Errorf("kubeconfig ClusterProviderStrategy is invalid for in-cluster deployments: %v", err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rawConfig, err := m.clientCmdConfig.RawConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allClusterManagers := map[string]*Manager{
|
||||
rawConfig.CurrentContext: m, // we already initialized a manager for the default context, let's use it
|
||||
}
|
||||
|
||||
for name := range rawConfig.Contexts {
|
||||
if name == rawConfig.CurrentContext {
|
||||
continue // already initialized this, don't want to set it to nil
|
||||
}
|
||||
|
||||
allClusterManagers[name] = nil
|
||||
}
|
||||
|
||||
return &kubeConfigClusterProvider{
|
||||
defaultContext: rawConfig.CurrentContext,
|
||||
managers: allClusterManagers,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *kubeConfigClusterProvider) managerForContext(context string) (*Manager, error) {
|
||||
m, ok := p.managers[context]
|
||||
if ok && m != nil {
|
||||
return m, nil
|
||||
}
|
||||
|
||||
baseManager := p.managers[p.defaultContext]
|
||||
|
||||
m, err := NewKubeconfigManager(baseManager.staticConfig, context)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.managers[context] = m
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (p *kubeConfigClusterProvider) IsOpenShift(ctx context.Context) bool {
|
||||
return p.managers[p.defaultContext].IsOpenShift(ctx)
|
||||
}
|
||||
|
||||
func (p *kubeConfigClusterProvider) VerifyToken(ctx context.Context, context, token, audience string) (*authenticationv1api.UserInfo, []string, error) {
|
||||
m, err := p.managerForContext(context)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return m.VerifyToken(ctx, token, audience)
|
||||
}
|
||||
|
||||
func (p *kubeConfigClusterProvider) GetTargets(_ context.Context) ([]string, error) {
|
||||
contextNames := make([]string, 0, len(p.managers))
|
||||
for contextName := range p.managers {
|
||||
contextNames = append(contextNames, contextName)
|
||||
}
|
||||
|
||||
return contextNames, nil
|
||||
}
|
||||
|
||||
func (p *kubeConfigClusterProvider) GetTargetParameterName() string {
|
||||
return KubeConfigTargetParameterName
|
||||
}
|
||||
|
||||
func (p *kubeConfigClusterProvider) GetDerivedKubernetes(ctx context.Context, context string) (*Kubernetes, error) {
|
||||
m, err := p.managerForContext(context)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m.Derived(ctx)
|
||||
}
|
||||
|
||||
func (p *kubeConfigClusterProvider) GetDefaultTarget() string {
|
||||
return p.defaultContext
|
||||
}
|
||||
|
||||
func (p *kubeConfigClusterProvider) WatchTargets(onKubeConfigChanged func() error) {
|
||||
m := p.managers[p.defaultContext]
|
||||
|
||||
m.WatchKubeConfig(onKubeConfigChanged)
|
||||
}
|
||||
|
||||
func (p *kubeConfigClusterProvider) Close() {
|
||||
m := p.managers[p.defaultContext]
|
||||
|
||||
m.Close()
|
||||
}
|
||||
151
pkg/kubernetes/provider_kubeconfig_test.go
Normal file
151
pkg/kubernetes/provider_kubeconfig_test.go
Normal file
@@ -0,0 +1,151 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/internal/test"
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/config"
|
||||
"github.com/stretchr/testify/suite"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
)
|
||||
|
||||
type ProviderKubeconfigTestSuite struct {
|
||||
BaseProviderSuite
|
||||
mockServer *test.MockServer
|
||||
provider Provider
|
||||
}
|
||||
|
||||
func (s *ProviderKubeconfigTestSuite) SetupTest() {
|
||||
// Kubeconfig provider is used when the multi-cluster feature is enabled with the kubeconfig strategy.
|
||||
// For this test suite we simulate a kubeconfig with multiple contexts.
|
||||
s.mockServer = test.NewMockServer()
|
||||
kubeconfig := s.mockServer.Kubeconfig()
|
||||
for i := 0; i < 10; i++ {
|
||||
// Add multiple fake contexts to force multi-cluster behavior
|
||||
kubeconfig.Contexts[fmt.Sprintf("context-%d", i)] = clientcmdapi.NewContext()
|
||||
}
|
||||
provider, err := NewProvider(&config.StaticConfig{KubeConfig: test.KubeconfigFile(s.T(), kubeconfig)})
|
||||
s.Require().NoError(err, "Expected no error creating provider with kubeconfig")
|
||||
s.provider = provider
|
||||
}
|
||||
|
||||
func (s *ProviderKubeconfigTestSuite) TearDownTest() {
|
||||
if s.mockServer != nil {
|
||||
s.mockServer.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ProviderKubeconfigTestSuite) TestType() {
|
||||
s.IsType(&kubeConfigClusterProvider{}, s.provider)
|
||||
}
|
||||
|
||||
func (s *ProviderKubeconfigTestSuite) TestWithNonOpenShiftCluster() {
|
||||
s.Run("IsOpenShift returns false", func() {
|
||||
inOpenShift := s.provider.IsOpenShift(s.T().Context())
|
||||
s.False(inOpenShift, "Expected InOpenShift to return false")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ProviderKubeconfigTestSuite) TestWithOpenShiftCluster() {
|
||||
s.mockServer.Handle(&test.InOpenShiftHandler{})
|
||||
s.Run("IsOpenShift returns true", func() {
|
||||
inOpenShift := s.provider.IsOpenShift(s.T().Context())
|
||||
s.True(inOpenShift, "Expected InOpenShift to return true")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ProviderKubeconfigTestSuite) TestVerifyToken() {
|
||||
s.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
if req.URL.EscapedPath() == "/apis/authentication.k8s.io/v1/tokenreviews" {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, _ = w.Write([]byte(`
|
||||
{
|
||||
"kind": "TokenReview",
|
||||
"apiVersion": "authentication.k8s.io/v1",
|
||||
"spec": {"token": "the-token"},
|
||||
"status": {
|
||||
"authenticated": true,
|
||||
"user": {
|
||||
"username": "test-user",
|
||||
"groups": ["system:authenticated"]
|
||||
},
|
||||
"audiences": ["the-audience"]
|
||||
}
|
||||
}`))
|
||||
}
|
||||
}))
|
||||
s.Run("VerifyToken returns UserInfo for non-empty context", func() {
|
||||
userInfo, audiences, err := s.provider.VerifyToken(s.T().Context(), "fake-context", "some-token", "the-audience")
|
||||
s.Require().NoError(err, "Expected no error from VerifyToken with empty target")
|
||||
s.Require().NotNil(userInfo, "Expected UserInfo from VerifyToken with empty target")
|
||||
s.Equalf(userInfo.Username, "test-user", "Expected username test-user, got: %s", userInfo.Username)
|
||||
s.Containsf(userInfo.Groups, "system:authenticated", "Expected group system:authenticated in %v", userInfo.Groups)
|
||||
s.Require().NotNil(audiences, "Expected audiences from VerifyToken with empty target")
|
||||
s.Len(audiences, 1, "Expected audiences from VerifyToken with empty target")
|
||||
s.Containsf(audiences, "the-audience", "Expected audience the-audience in %v", audiences)
|
||||
})
|
||||
s.Run("VerifyToken returns UserInfo for empty context (default context)", func() {
|
||||
userInfo, audiences, err := s.provider.VerifyToken(s.T().Context(), "", "the-token", "the-audience")
|
||||
s.Require().NoError(err, "Expected no error from VerifyToken with empty target")
|
||||
s.Require().NotNil(userInfo, "Expected UserInfo from VerifyToken with empty target")
|
||||
s.Equalf(userInfo.Username, "test-user", "Expected username test-user, got: %s", userInfo.Username)
|
||||
s.Containsf(userInfo.Groups, "system:authenticated", "Expected group system:authenticated in %v", userInfo.Groups)
|
||||
s.Require().NotNil(audiences, "Expected audiences from VerifyToken with empty target")
|
||||
s.Len(audiences, 1, "Expected audiences from VerifyToken with empty target")
|
||||
s.Containsf(audiences, "the-audience", "Expected audience the-audience in %v", audiences)
|
||||
})
|
||||
s.Run("VerifyToken returns error for invalid context", func() {
|
||||
userInfo, audiences, err := s.provider.VerifyToken(s.T().Context(), "invalid-context", "some-token", "the-audience")
|
||||
s.Require().Error(err, "Expected error from VerifyToken with invalid target")
|
||||
s.ErrorContainsf(err, `context "invalid-context" does not exist`, "Expected context does not exist error, got: %v", err)
|
||||
s.Nil(userInfo, "Expected no UserInfo from VerifyToken with invalid target")
|
||||
s.Nil(audiences, "Expected no audiences from VerifyToken with invalid target")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ProviderKubeconfigTestSuite) TestGetTargets() {
|
||||
s.Run("GetTargets returns all contexts defined in kubeconfig", func() {
|
||||
targets, err := s.provider.GetTargets(s.T().Context())
|
||||
s.Require().NoError(err, "Expected no error from GetTargets")
|
||||
s.Len(targets, 11, "Expected 11 targets from GetTargets")
|
||||
s.Contains(targets, "fake-context", "Expected fake-context in targets from GetTargets")
|
||||
for i := 0; i < 10; i++ {
|
||||
s.Contains(targets, fmt.Sprintf("context-%d", i), "Expected context-%d in targets from GetTargets", i)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ProviderKubeconfigTestSuite) TestGetDerivedKubernetes() {
|
||||
s.Run("GetDerivedKubernetes returns Kubernetes for valid context", func() {
|
||||
k8s, err := s.provider.GetDerivedKubernetes(s.T().Context(), "fake-context")
|
||||
s.Require().NoError(err, "Expected no error from GetDerivedKubernetes with valid context")
|
||||
s.NotNil(k8s, "Expected Kubernetes from GetDerivedKubernetes with valid context")
|
||||
})
|
||||
s.Run("GetDerivedKubernetes returns Kubernetes for empty context (default)", func() {
|
||||
k8s, err := s.provider.GetDerivedKubernetes(s.T().Context(), "")
|
||||
s.Require().NoError(err, "Expected no error from GetDerivedKubernetes with empty context")
|
||||
s.NotNil(k8s, "Expected Kubernetes from GetDerivedKubernetes with empty context")
|
||||
})
|
||||
s.Run("GetDerivedKubernetes returns error for invalid context", func() {
|
||||
k8s, err := s.provider.GetDerivedKubernetes(s.T().Context(), "invalid-context")
|
||||
s.Require().Error(err, "Expected error from GetDerivedKubernetes with invalid context")
|
||||
s.ErrorContainsf(err, `context "invalid-context" does not exist`, "Expected context does not exist error, got: %v", err)
|
||||
s.Nil(k8s, "Expected no Kubernetes from GetDerivedKubernetes with invalid context")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ProviderKubeconfigTestSuite) TestGetDefaultTarget() {
|
||||
s.Run("GetDefaultTarget returns current-context defined in kubeconfig", func() {
|
||||
s.Equal("fake-context", s.provider.GetDefaultTarget(), "Expected fake-context as default target")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ProviderKubeconfigTestSuite) TestGetTargetParameterName() {
|
||||
s.Equal("context", s.provider.GetTargetParameterName(), "Expected context as target parameter name")
|
||||
}
|
||||
|
||||
func TestProviderKubeconfig(t *testing.T) {
|
||||
suite.Run(t, new(ProviderKubeconfigTestSuite))
|
||||
}
|
||||
47
pkg/kubernetes/provider_registry.go
Normal file
47
pkg/kubernetes/provider_registry.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/config"
|
||||
)
|
||||
|
||||
// ProviderFactory creates a new Provider instance for a given strategy.
|
||||
// Implementations should validate that the Manager is compatible with their strategy
|
||||
// (e.g., kubeconfig provider should reject in-cluster managers).
|
||||
type ProviderFactory func(cfg *config.StaticConfig) (Provider, error)
|
||||
|
||||
var providerFactories = make(map[string]ProviderFactory)
|
||||
|
||||
// RegisterProvider registers a provider factory for a given strategy name.
|
||||
// This should be called from init() functions in provider implementation files.
|
||||
// Panics if a provider is already registered for the given strategy.
|
||||
func RegisterProvider(strategy string, factory ProviderFactory) {
|
||||
if _, exists := providerFactories[strategy]; exists {
|
||||
panic(fmt.Sprintf("provider already registered for strategy '%s'", strategy))
|
||||
}
|
||||
providerFactories[strategy] = factory
|
||||
}
|
||||
|
||||
// getProviderFactory retrieves a registered provider factory by strategy name.
|
||||
// Returns an error if no provider is registered for the given strategy.
|
||||
func getProviderFactory(strategy string) (ProviderFactory, error) {
|
||||
factory, ok := providerFactories[strategy]
|
||||
if !ok {
|
||||
available := GetRegisteredStrategies()
|
||||
return nil, fmt.Errorf("no provider registered for strategy '%s', available strategies: %v", strategy, available)
|
||||
}
|
||||
return factory, nil
|
||||
}
|
||||
|
||||
// GetRegisteredStrategies returns a sorted list of all registered strategy names.
|
||||
// This is useful for error messages and debugging.
|
||||
func GetRegisteredStrategies() []string {
|
||||
strategies := make([]string, 0, len(providerFactories))
|
||||
for strategy := range providerFactories {
|
||||
strategies = append(strategies, strategy)
|
||||
}
|
||||
sort.Strings(strategies)
|
||||
return strategies
|
||||
}
|
||||
56
pkg/kubernetes/provider_registry_test.go
Normal file
56
pkg/kubernetes/provider_registry_test.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/config"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
type ProviderRegistryTestSuite struct {
|
||||
BaseProviderSuite
|
||||
}
|
||||
|
||||
func (s *ProviderRegistryTestSuite) TestRegisterProvider() {
|
||||
s.Run("With no pre-existing provider, registers the provider", func() {
|
||||
RegisterProvider("test-strategy", func(cfg *config.StaticConfig) (Provider, error) {
|
||||
return nil, nil
|
||||
})
|
||||
_, exists := providerFactories["test-strategy"]
|
||||
s.True(exists, "Provider should be registered")
|
||||
})
|
||||
s.Run("With pre-existing provider, panics", func() {
|
||||
RegisterProvider("test-pre-existent", func(cfg *config.StaticConfig) (Provider, error) {
|
||||
return nil, nil
|
||||
})
|
||||
s.Panics(func() {
|
||||
RegisterProvider("test-pre-existent", func(cfg *config.StaticConfig) (Provider, error) {
|
||||
return nil, nil
|
||||
})
|
||||
}, "Registering a provider with an existing strategy should panic")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ProviderRegistryTestSuite) TestGetRegisteredStrategies() {
|
||||
s.Run("With no registered providers, returns empty list", func() {
|
||||
providerFactories = make(map[string]ProviderFactory)
|
||||
strategies := GetRegisteredStrategies()
|
||||
s.Empty(strategies, "No strategies should be registered")
|
||||
})
|
||||
s.Run("With multiple registered providers, returns sorted list", func() {
|
||||
providerFactories = make(map[string]ProviderFactory)
|
||||
RegisterProvider("foo-strategy", func(cfg *config.StaticConfig) (Provider, error) {
|
||||
return nil, nil
|
||||
})
|
||||
RegisterProvider("bar-strategy", func(cfg *config.StaticConfig) (Provider, error) {
|
||||
return nil, nil
|
||||
})
|
||||
strategies := GetRegisteredStrategies()
|
||||
expected := []string{"bar-strategy", "foo-strategy"}
|
||||
s.Equal(expected, strategies, "Strategies should be sorted alphabetically")
|
||||
})
|
||||
}
|
||||
|
||||
func TestProviderRegistry(t *testing.T) {
|
||||
suite.Run(t, new(ProviderRegistryTestSuite))
|
||||
}
|
||||
94
pkg/kubernetes/provider_single.go
Normal file
94
pkg/kubernetes/provider_single.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/config"
|
||||
authenticationv1api "k8s.io/api/authentication/v1"
|
||||
)
|
||||
|
||||
// singleClusterProvider implements Provider for managing a single
|
||||
// Kubernetes cluster. Used for in-cluster deployments or when multi-cluster
|
||||
// support is disabled.
|
||||
type singleClusterProvider struct {
|
||||
strategy string
|
||||
manager *Manager
|
||||
}
|
||||
|
||||
var _ Provider = &singleClusterProvider{}
|
||||
|
||||
func init() {
|
||||
RegisterProvider(config.ClusterProviderInCluster, newSingleClusterProvider(config.ClusterProviderInCluster))
|
||||
RegisterProvider(config.ClusterProviderDisabled, newSingleClusterProvider(config.ClusterProviderDisabled))
|
||||
}
|
||||
|
||||
// newSingleClusterProvider creates a provider that manages a single cluster.
|
||||
// When used within a cluster or with an 'in-cluster' strategy, it uses an InClusterManager.
|
||||
// Otherwise, it uses a KubeconfigManager.
|
||||
func newSingleClusterProvider(strategy string) ProviderFactory {
|
||||
return func(cfg *config.StaticConfig) (Provider, error) {
|
||||
if cfg != nil && cfg.KubeConfig != "" && strategy == config.ClusterProviderInCluster {
|
||||
return nil, fmt.Errorf("kubeconfig file %s cannot be used with the in-cluster ClusterProviderStrategy", cfg.KubeConfig)
|
||||
}
|
||||
|
||||
var m *Manager
|
||||
var err error
|
||||
if strategy == config.ClusterProviderInCluster || IsInCluster(cfg) {
|
||||
m, err = NewInClusterManager(cfg)
|
||||
} else {
|
||||
m, err = NewKubeconfigManager(cfg, "")
|
||||
}
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrorInClusterNotInCluster) {
|
||||
return nil, fmt.Errorf("server must be deployed in cluster for the %s ClusterProviderStrategy: %v", strategy, err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &singleClusterProvider{
|
||||
manager: m,
|
||||
strategy: strategy,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (p *singleClusterProvider) IsOpenShift(ctx context.Context) bool {
|
||||
return p.manager.IsOpenShift(ctx)
|
||||
}
|
||||
|
||||
func (p *singleClusterProvider) VerifyToken(ctx context.Context, target, token, audience string) (*authenticationv1api.UserInfo, []string, error) {
|
||||
if target != "" {
|
||||
return nil, nil, fmt.Errorf("unable to get manager for other context/cluster with %s strategy", p.strategy)
|
||||
}
|
||||
return p.manager.VerifyToken(ctx, token, audience)
|
||||
}
|
||||
|
||||
func (p *singleClusterProvider) GetTargets(_ context.Context) ([]string, error) {
|
||||
return []string{""}, nil
|
||||
}
|
||||
|
||||
func (p *singleClusterProvider) GetDerivedKubernetes(ctx context.Context, target string) (*Kubernetes, error) {
|
||||
if target != "" {
|
||||
return nil, fmt.Errorf("unable to get manager for other context/cluster with %s strategy", p.strategy)
|
||||
}
|
||||
|
||||
return p.manager.Derived(ctx)
|
||||
}
|
||||
|
||||
func (p *singleClusterProvider) GetDefaultTarget() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (p *singleClusterProvider) GetTargetParameterName() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (p *singleClusterProvider) WatchTargets(watch func() error) {
|
||||
p.manager.WatchKubeConfig(watch)
|
||||
}
|
||||
|
||||
func (p *singleClusterProvider) Close() {
|
||||
p.manager.Close()
|
||||
}
|
||||
133
pkg/kubernetes/provider_single_test.go
Normal file
133
pkg/kubernetes/provider_single_test.go
Normal file
@@ -0,0 +1,133 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/internal/test"
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/config"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
type ProviderSingleTestSuite struct {
|
||||
BaseProviderSuite
|
||||
mockServer *test.MockServer
|
||||
originalIsInClusterConfig func() (*rest.Config, error)
|
||||
provider Provider
|
||||
}
|
||||
|
||||
func (s *ProviderSingleTestSuite) SetupTest() {
|
||||
// Single cluster provider is used when in-cluster or when the multi-cluster feature is disabled.
|
||||
// For this test suite we simulate an in-cluster deployment.
|
||||
s.originalIsInClusterConfig = InClusterConfig
|
||||
s.mockServer = test.NewMockServer()
|
||||
InClusterConfig = func() (*rest.Config, error) {
|
||||
return s.mockServer.Config(), nil
|
||||
}
|
||||
provider, err := NewProvider(&config.StaticConfig{})
|
||||
s.Require().NoError(err, "Expected no error creating provider with kubeconfig")
|
||||
s.provider = provider
|
||||
}
|
||||
|
||||
func (s *ProviderSingleTestSuite) TearDownTest() {
|
||||
InClusterConfig = s.originalIsInClusterConfig
|
||||
if s.mockServer != nil {
|
||||
s.mockServer.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ProviderSingleTestSuite) TestType() {
|
||||
s.IsType(&singleClusterProvider{}, s.provider)
|
||||
}
|
||||
|
||||
func (s *ProviderSingleTestSuite) TestWithNonOpenShiftCluster() {
|
||||
s.Run("IsOpenShift returns false", func() {
|
||||
inOpenShift := s.provider.IsOpenShift(s.T().Context())
|
||||
s.False(inOpenShift, "Expected InOpenShift to return false")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ProviderSingleTestSuite) TestWithOpenShiftCluster() {
|
||||
s.mockServer.Handle(&test.InOpenShiftHandler{})
|
||||
s.Run("IsOpenShift returns true", func() {
|
||||
inOpenShift := s.provider.IsOpenShift(s.T().Context())
|
||||
s.True(inOpenShift, "Expected InOpenShift to return true")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ProviderSingleTestSuite) TestVerifyToken() {
|
||||
s.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
if req.URL.EscapedPath() == "/apis/authentication.k8s.io/v1/tokenreviews" {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, _ = w.Write([]byte(`
|
||||
{
|
||||
"kind": "TokenReview",
|
||||
"apiVersion": "authentication.k8s.io/v1",
|
||||
"spec": {"token": "the-token"},
|
||||
"status": {
|
||||
"authenticated": true,
|
||||
"user": {
|
||||
"username": "test-user",
|
||||
"groups": ["system:authenticated"]
|
||||
},
|
||||
"audiences": ["the-audience"]
|
||||
}
|
||||
}`))
|
||||
}
|
||||
}))
|
||||
s.Run("VerifyToken returns UserInfo for empty target (default target)", func() {
|
||||
userInfo, audiences, err := s.provider.VerifyToken(s.T().Context(), "", "the-token", "the-audience")
|
||||
s.Require().NoError(err, "Expected no error from VerifyToken with empty target")
|
||||
s.Require().NotNil(userInfo, "Expected UserInfo from VerifyToken with empty target")
|
||||
s.Equalf(userInfo.Username, "test-user", "Expected username test-user, got: %s", userInfo.Username)
|
||||
s.Containsf(userInfo.Groups, "system:authenticated", "Expected group system:authenticated in %v", userInfo.Groups)
|
||||
s.Require().NotNil(audiences, "Expected audiences from VerifyToken with empty target")
|
||||
s.Len(audiences, 1, "Expected audiences from VerifyToken with empty target")
|
||||
s.Containsf(audiences, "the-audience", "Expected audience the-audience in %v", audiences)
|
||||
})
|
||||
s.Run("VerifyToken returns error for non-empty context", func() {
|
||||
userInfo, audiences, err := s.provider.VerifyToken(s.T().Context(), "non-empty", "the-token", "the-audience")
|
||||
s.Require().Error(err, "Expected error from VerifyToken with non-empty target")
|
||||
s.ErrorContains(err, "unable to get manager for other context/cluster with in-cluster strategy", "Expected error about trying to get other cluster")
|
||||
s.Nil(userInfo, "Expected no UserInfo from VerifyToken with non-empty target")
|
||||
s.Nil(audiences, "Expected no audiences from VerifyToken with non-empty target")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ProviderSingleTestSuite) TestGetTargets() {
|
||||
s.Run("GetTargets returns single empty target", func() {
|
||||
targets, err := s.provider.GetTargets(s.T().Context())
|
||||
s.Require().NoError(err, "Expected no error from GetTargets")
|
||||
s.Len(targets, 1, "Expected 1 targets from GetTargets")
|
||||
s.Contains(targets, "", "Expected empty target from GetTargets")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ProviderSingleTestSuite) TestGetDerivedKubernetes() {
|
||||
s.Run("GetDerivedKubernetes returns Kubernetes for empty target", func() {
|
||||
k8s, err := s.provider.GetDerivedKubernetes(s.T().Context(), "")
|
||||
s.Require().NoError(err, "Expected no error from GetDerivedKubernetes with empty target")
|
||||
s.NotNil(k8s, "Expected Kubernetes from GetDerivedKubernetes with empty target")
|
||||
})
|
||||
s.Run("GetDerivedKubernetes returns error for non-empty target", func() {
|
||||
k8s, err := s.provider.GetDerivedKubernetes(s.T().Context(), "non-empty-target")
|
||||
s.Require().Error(err, "Expected error from GetDerivedKubernetes with non-empty target")
|
||||
s.ErrorContains(err, "unable to get manager for other context/cluster with in-cluster strategy", "Expected error about trying to get other cluster")
|
||||
s.Nil(k8s, "Expected no Kubernetes from GetDerivedKubernetes with non-empty target")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ProviderSingleTestSuite) TestGetDefaultTarget() {
|
||||
s.Run("GetDefaultTarget returns empty string", func() {
|
||||
s.Empty(s.provider.GetDefaultTarget(), "Expected fake-context as default target")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ProviderSingleTestSuite) TestGetTargetParameterName() {
|
||||
s.Empty(s.provider.GetTargetParameterName(), "Expected empty string as target parameter name")
|
||||
}
|
||||
|
||||
func TestProviderSingle(t *testing.T) {
|
||||
suite.Run(t, new(ProviderSingleTestSuite))
|
||||
}
|
||||
170
pkg/kubernetes/provider_test.go
Normal file
170
pkg/kubernetes/provider_test.go
Normal file
@@ -0,0 +1,170 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/internal/test"
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/config"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
type BaseProviderSuite struct {
|
||||
suite.Suite
|
||||
originalProviderFactories map[string]ProviderFactory
|
||||
}
|
||||
|
||||
func (s *BaseProviderSuite) SetupTest() {
|
||||
s.originalProviderFactories = make(map[string]ProviderFactory)
|
||||
for k, v := range providerFactories {
|
||||
s.originalProviderFactories[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
func (s *BaseProviderSuite) TearDownTest() {
|
||||
providerFactories = make(map[string]ProviderFactory)
|
||||
for k, v := range s.originalProviderFactories {
|
||||
providerFactories[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
type ProviderTestSuite struct {
|
||||
BaseProviderSuite
|
||||
originalEnv []string
|
||||
originalInClusterConfig func() (*rest.Config, error)
|
||||
mockServer *test.MockServer
|
||||
kubeconfigPath string
|
||||
}
|
||||
|
||||
func (s *ProviderTestSuite) SetupTest() {
|
||||
s.BaseProviderSuite.SetupTest()
|
||||
s.originalEnv = os.Environ()
|
||||
s.originalInClusterConfig = InClusterConfig
|
||||
s.mockServer = test.NewMockServer()
|
||||
s.kubeconfigPath = strings.ReplaceAll(s.mockServer.KubeconfigFile(s.T()), `\`, `\\`)
|
||||
}
|
||||
|
||||
func (s *ProviderTestSuite) TearDownTest() {
|
||||
s.BaseProviderSuite.TearDownTest()
|
||||
test.RestoreEnv(s.originalEnv)
|
||||
InClusterConfig = s.originalInClusterConfig
|
||||
if s.mockServer != nil {
|
||||
s.mockServer.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ProviderTestSuite) TestNewProviderInCluster() {
|
||||
InClusterConfig = func() (*rest.Config, error) {
|
||||
return &rest.Config{}, nil
|
||||
}
|
||||
s.Run("With no cluster_provider_strategy, returns single-cluster provider", func() {
|
||||
cfg := test.Must(config.ReadToml([]byte{}))
|
||||
provider, err := NewProvider(cfg)
|
||||
s.Require().NoError(err, "Expected no error for in-cluster provider")
|
||||
s.NotNil(provider, "Expected provider instance")
|
||||
s.IsType(&singleClusterProvider{}, provider, "Expected singleClusterProvider type")
|
||||
})
|
||||
s.Run("With cluster_provider_strategy=in-cluster, returns single-cluster provider", func() {
|
||||
cfg := test.Must(config.ReadToml([]byte(`
|
||||
cluster_provider_strategy = "in-cluster"
|
||||
`)))
|
||||
provider, err := NewProvider(cfg)
|
||||
s.Require().NoError(err, "Expected no error for single-cluster strategy")
|
||||
s.NotNil(provider, "Expected provider instance")
|
||||
s.IsType(&singleClusterProvider{}, provider, "Expected singleClusterProvider type")
|
||||
})
|
||||
s.Run("With cluster_provider_strategy=kubeconfig, returns error", func() {
|
||||
cfg := test.Must(config.ReadToml([]byte(`
|
||||
cluster_provider_strategy = "kubeconfig"
|
||||
`)))
|
||||
provider, err := NewProvider(cfg)
|
||||
s.Require().Error(err, "Expected error for kubeconfig strategy")
|
||||
s.ErrorContains(err, "kubeconfig ClusterProviderStrategy is invalid for in-cluster deployments")
|
||||
s.Nilf(provider, "Expected no provider instance, got %v", provider)
|
||||
})
|
||||
s.Run("With cluster_provider_strategy=kubeconfig and kubeconfig set to valid path, returns kubeconfig provider", func() {
|
||||
cfg := test.Must(config.ReadToml([]byte(`
|
||||
cluster_provider_strategy = "kubeconfig"
|
||||
kubeconfig = "` + s.kubeconfigPath + `"
|
||||
`)))
|
||||
provider, err := NewProvider(cfg)
|
||||
s.Require().NoError(err, "Expected no error for kubeconfig strategy")
|
||||
s.NotNil(provider, "Expected provider instance")
|
||||
s.IsType(&kubeConfigClusterProvider{}, provider, "Expected kubeConfigClusterProvider type")
|
||||
})
|
||||
s.Run("With cluster_provider_strategy=non-existent, returns error", func() {
|
||||
cfg := test.Must(config.ReadToml([]byte(`
|
||||
cluster_provider_strategy = "i-do-not-exist"
|
||||
`)))
|
||||
provider, err := NewProvider(cfg)
|
||||
s.Require().Error(err, "Expected error for non-existent strategy")
|
||||
s.ErrorContains(err, "no provider registered for strategy 'i-do-not-exist'")
|
||||
s.Nilf(provider, "Expected no provider instance, got %v", provider)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ProviderTestSuite) TestNewProviderLocal() {
|
||||
InClusterConfig = func() (*rest.Config, error) {
|
||||
return nil, rest.ErrNotInCluster
|
||||
}
|
||||
s.Require().NoError(os.Setenv("KUBECONFIG", s.kubeconfigPath))
|
||||
s.Run("With no cluster_provider_strategy, returns kubeconfig provider", func() {
|
||||
cfg := test.Must(config.ReadToml([]byte{}))
|
||||
provider, err := NewProvider(cfg)
|
||||
s.Require().NoError(err, "Expected no error for kubeconfig provider")
|
||||
s.NotNil(provider, "Expected provider instance")
|
||||
s.IsType(&kubeConfigClusterProvider{}, provider, "Expected kubeConfigClusterProvider type")
|
||||
})
|
||||
s.Run("With cluster_provider_strategy=kubeconfig, returns kubeconfig provider", func() {
|
||||
cfg := test.Must(config.ReadToml([]byte(`
|
||||
cluster_provider_strategy = "kubeconfig"
|
||||
`)))
|
||||
provider, err := NewProvider(cfg)
|
||||
s.Require().NoError(err, "Expected no error for kubeconfig provider")
|
||||
s.NotNil(provider, "Expected provider instance")
|
||||
s.IsType(&kubeConfigClusterProvider{}, provider, "Expected kubeConfigClusterProvider type")
|
||||
})
|
||||
s.Run("With cluster_provider_strategy=disabled, returns single-cluster provider", func() {
|
||||
cfg := test.Must(config.ReadToml([]byte(`
|
||||
cluster_provider_strategy = "disabled"
|
||||
`)))
|
||||
provider, err := NewProvider(cfg)
|
||||
s.Require().NoError(err, "Expected no error for disabled strategy")
|
||||
s.NotNil(provider, "Expected provider instance")
|
||||
s.IsType(&singleClusterProvider{}, provider, "Expected singleClusterProvider type")
|
||||
})
|
||||
s.Run("With cluster_provider_strategy=in-cluster, returns error", func() {
|
||||
cfg := test.Must(config.ReadToml([]byte(`
|
||||
cluster_provider_strategy = "in-cluster"
|
||||
`)))
|
||||
provider, err := NewProvider(cfg)
|
||||
s.Require().Error(err, "Expected error for in-cluster strategy")
|
||||
s.ErrorContains(err, "server must be deployed in cluster for the in-cluster ClusterProviderStrategy")
|
||||
s.Nilf(provider, "Expected no provider instance, got %v", provider)
|
||||
})
|
||||
s.Run("With cluster_provider_strategy=in-cluster and kubeconfig set to valid path, returns error", func() {
|
||||
cfg := test.Must(config.ReadToml([]byte(`
|
||||
kubeconfig = "` + s.kubeconfigPath + `"
|
||||
cluster_provider_strategy = "in-cluster"
|
||||
`)))
|
||||
provider, err := NewProvider(cfg)
|
||||
s.Require().Error(err, "Expected error for in-cluster strategy")
|
||||
s.Regexp("kubeconfig file .+ cannot be used with the in-cluster ClusterProviderStrategy", err.Error())
|
||||
s.Nilf(provider, "Expected no provider instance, got %v", provider)
|
||||
})
|
||||
s.Run("With cluster_provider_strategy=non-existent, returns error", func() {
|
||||
cfg := test.Must(config.ReadToml([]byte(`
|
||||
cluster_provider_strategy = "i-do-not-exist"
|
||||
`)))
|
||||
provider, err := NewProvider(cfg)
|
||||
s.Require().Error(err, "Expected error for non-existent strategy")
|
||||
s.ErrorContains(err, "no provider registered for strategy 'i-do-not-exist'")
|
||||
s.Nilf(provider, "Expected no provider instance, got %v", provider)
|
||||
})
|
||||
}
|
||||
|
||||
func TestProvider(t *testing.T) {
|
||||
suite.Run(t, new(ProviderTestSuite))
|
||||
}
|
||||
@@ -2,39 +2,10 @@ package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
authenticationv1api "k8s.io/api/authentication/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func (m *Manager) VerifyToken(ctx context.Context, token, audience string) (*authenticationv1api.UserInfo, []string, error) {
|
||||
tokenReviewClient, err := m.accessControlClientSet.TokenReview()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
tokenReview := &authenticationv1api.TokenReview{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "authentication.k8s.io/v1",
|
||||
Kind: "TokenReview",
|
||||
},
|
||||
Spec: authenticationv1api.TokenReviewSpec{
|
||||
Token: token,
|
||||
Audiences: []string{audience},
|
||||
},
|
||||
}
|
||||
|
||||
result, err := tokenReviewClient.Create(ctx, tokenReview, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create token review: %v", err)
|
||||
}
|
||||
|
||||
if !result.Status.Authenticated {
|
||||
if result.Status.Error != "" {
|
||||
return nil, nil, fmt.Errorf("token authentication failed: %s", result.Status.Error)
|
||||
}
|
||||
return nil, nil, fmt.Errorf("token authentication failed")
|
||||
}
|
||||
|
||||
return &result.Status.User, result.Status.Audiences, nil
|
||||
type TokenVerifier interface {
|
||||
VerifyToken(ctx context.Context, cluster, token, audience string) (*authenticationv1api.UserInfo, []string, error)
|
||||
}
|
||||
|
||||
@@ -219,7 +219,7 @@ func (c *mcpContext) withKubeConfig(rc *rest.Config) *clientcmdapi.Config {
|
||||
_ = clientcmd.WriteToFile(*fakeConfig, kubeConfig)
|
||||
_ = os.Setenv("KUBECONFIG", kubeConfig)
|
||||
if c.mcpServer != nil {
|
||||
if err := c.mcpServer.reloadKubernetesClient(); err != nil {
|
||||
if err := c.mcpServer.reloadKubernetesClusterProvider(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
@@ -436,7 +436,7 @@ func (s *BaseMcpSuite) SetupTest() {
|
||||
|
||||
func (s *BaseMcpSuite) TearDownTest() {
|
||||
if s.McpClient != nil {
|
||||
s.McpClient.Close()
|
||||
s.Close()
|
||||
}
|
||||
if s.mcpServer != nil {
|
||||
s.mcpServer.Close()
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
package mcp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/mark3labs/mcp-go/mcp"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"k8s.io/client-go/rest"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
v1 "k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
@@ -22,7 +24,37 @@ func (s *ConfigurationSuite) SetupTest() {
|
||||
// Use mock server for predictable kubeconfig content
|
||||
mockServer := test.NewMockServer()
|
||||
s.T().Cleanup(mockServer.Close)
|
||||
s.Cfg.KubeConfig = mockServer.KubeconfigFile(s.T())
|
||||
kubeconfig := mockServer.Kubeconfig()
|
||||
for i := 0; i < 10; i++ {
|
||||
// Add multiple fake contexts to force configuration_contexts_list tool to appear
|
||||
// and test minification in configuration_view tool
|
||||
name := fmt.Sprintf("cluster-%d", i)
|
||||
kubeconfig.Contexts[name] = clientcmdapi.NewContext()
|
||||
kubeconfig.Clusters[name+"-cluster"] = clientcmdapi.NewCluster()
|
||||
kubeconfig.AuthInfos[name+"-auth"] = clientcmdapi.NewAuthInfo()
|
||||
kubeconfig.Contexts[name].Cluster = name + "-cluster"
|
||||
kubeconfig.Contexts[name].AuthInfo = name + "-auth"
|
||||
}
|
||||
s.Cfg.KubeConfig = test.KubeconfigFile(s.T(), kubeconfig)
|
||||
}
|
||||
|
||||
func (s *ConfigurationSuite) TestContextsList() {
|
||||
s.InitMcpClient()
|
||||
s.Run("configuration_contexts_list", func() {
|
||||
toolResult, err := s.CallTool("configuration_contexts_list", map[string]interface{}{})
|
||||
s.Run("returns contexts", func() {
|
||||
s.Nilf(err, "call tool failed %v", err)
|
||||
})
|
||||
s.Require().NotNil(toolResult, "Expected tool result from call")
|
||||
s.Lenf(toolResult.Content, 1, "invalid tool result content length %v", len(toolResult.Content))
|
||||
s.Run("contains context count", func() {
|
||||
s.Regexpf(`^Available Kubernetes contexts \(11 total`, toolResult.Content[0].(mcp.TextContent).Text, "invalid tool count result content %v", toolResult.Content[0].(mcp.TextContent).Text)
|
||||
})
|
||||
s.Run("contains default context name", func() {
|
||||
s.Regexpf(`^Available Kubernetes contexts \(\d+ total, default: fake-context\)`, toolResult.Content[0].(mcp.TextContent).Text, "invalid tool context default result content %v", toolResult.Content[0].(mcp.TextContent).Text)
|
||||
s.Regexpf(`(?m)^\*fake-context -> http:\/\/127\.0\.0\.1:\d*$`, toolResult.Content[0].(mcp.TextContent).Text, "invalid tool context default result content %v", toolResult.Content[0].(mcp.TextContent).Text)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ConfigurationSuite) TestConfigurationView() {
|
||||
@@ -70,19 +102,23 @@ func (s *ConfigurationSuite) TestConfigurationView() {
|
||||
s.Nilf(err, "invalid tool result content %v", err)
|
||||
})
|
||||
s.Run("returns additional context info", func() {
|
||||
s.Lenf(decoded.Contexts, 2, "invalid context count, expected 2, got %v", len(decoded.Contexts))
|
||||
s.Equalf("additional-context", decoded.Contexts[0].Name, "additional-context not found: %v", decoded.Contexts)
|
||||
s.Equalf("additional-cluster", decoded.Contexts[0].Context.Cluster, "additional-cluster not found: %v", decoded.Contexts)
|
||||
s.Equalf("additional-auth", decoded.Contexts[0].Context.AuthInfo, "additional-auth not found: %v", decoded.Contexts)
|
||||
s.Equalf("fake-context", decoded.Contexts[1].Name, "fake-context not found: %v", decoded.Contexts)
|
||||
s.Lenf(decoded.Contexts, 11, "invalid context count, expected 12, got %v", len(decoded.Contexts))
|
||||
s.Equalf("cluster-0", decoded.Contexts[0].Name, "cluster-0 not found: %v", decoded.Contexts)
|
||||
s.Equalf("cluster-0-cluster", decoded.Contexts[0].Context.Cluster, "cluster-0-cluster not found: %v", decoded.Contexts)
|
||||
s.Equalf("cluster-0-auth", decoded.Contexts[0].Context.AuthInfo, "cluster-0-auth not found: %v", decoded.Contexts)
|
||||
s.Equalf("fake", decoded.Contexts[10].Context.Cluster, "fake not found: %v", decoded.Contexts)
|
||||
s.Equalf("fake", decoded.Contexts[10].Context.AuthInfo, "fake not found: %v", decoded.Contexts)
|
||||
s.Equalf("fake-context", decoded.Contexts[10].Name, "fake-context not found: %v", decoded.Contexts)
|
||||
})
|
||||
s.Run("returns cluster info", func() {
|
||||
s.Lenf(decoded.Clusters, 2, "invalid cluster count, expected 2, got %v", len(decoded.Clusters))
|
||||
s.Equalf("additional-cluster", decoded.Clusters[0].Name, "additional-cluster not found: %v", decoded.Clusters)
|
||||
s.Lenf(decoded.Clusters, 11, "invalid cluster count, expected 2, got %v", len(decoded.Clusters))
|
||||
s.Equalf("cluster-0-cluster", decoded.Clusters[0].Name, "cluster-0-cluster not found: %v", decoded.Clusters)
|
||||
s.Equalf("fake", decoded.Clusters[10].Name, "fake not found: %v", decoded.Clusters)
|
||||
})
|
||||
s.Run("configuration_view with minified=false returns auth info", func() {
|
||||
s.Lenf(decoded.AuthInfos, 2, "invalid auth info count, expected 2, got %v", len(decoded.AuthInfos))
|
||||
s.Equalf("additional-auth", decoded.AuthInfos[0].Name, "additional-auth not found: %v", decoded.AuthInfos)
|
||||
s.Lenf(decoded.AuthInfos, 11, "invalid auth info count, expected 2, got %v", len(decoded.AuthInfos))
|
||||
s.Equalf("cluster-0-auth", decoded.AuthInfos[0].Name, "cluster-0-auth not found: %v", decoded.AuthInfos)
|
||||
s.Equalf("fake", decoded.AuthInfos[10].Name, "fake not found: %v", decoded.AuthInfos)
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -109,11 +145,11 @@ func (s *ConfigurationSuite) TestConfigurationViewInCluster() {
|
||||
s.Nilf(err, "invalid tool result content %v", err)
|
||||
})
|
||||
s.Run("returns current-context", func() {
|
||||
s.Equalf("context", decoded.CurrentContext, "context not found: %v", decoded.CurrentContext)
|
||||
s.Equalf("in-cluster", decoded.CurrentContext, "context not found: %v", decoded.CurrentContext)
|
||||
})
|
||||
s.Run("returns context info", func() {
|
||||
s.Lenf(decoded.Contexts, 1, "invalid context count, expected 1, got %v", len(decoded.Contexts))
|
||||
s.Equalf("context", decoded.Contexts[0].Name, "context not found: %v", decoded.Contexts)
|
||||
s.Equalf("in-cluster", decoded.Contexts[0].Name, "context not found: %v", decoded.Contexts)
|
||||
s.Equalf("cluster", decoded.Contexts[0].Context.Cluster, "cluster not found: %v", decoded.Contexts)
|
||||
s.Equalf("user", decoded.Contexts[0].Context.AuthInfo, "user not found: %v", decoded.Contexts)
|
||||
})
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package mcp
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
@@ -9,6 +10,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
type EventsSuite struct {
|
||||
@@ -24,7 +26,7 @@ func (s *EventsSuite) TestEventsList() {
|
||||
s.Falsef(toolResult.IsError, "call tool failed")
|
||||
})
|
||||
s.Run("returns no events message", func() {
|
||||
s.Equal("No events found", toolResult.Content[0].(mcp.TextContent).Text)
|
||||
s.Equal("# No events found", toolResult.Content[0].(mcp.TextContent).Text)
|
||||
})
|
||||
})
|
||||
s.Run("events_list (with events)", func() {
|
||||
@@ -50,8 +52,16 @@ func (s *EventsSuite) TestEventsList() {
|
||||
s.Nilf(err, "call tool failed %v", err)
|
||||
s.Falsef(toolResult.IsError, "call tool failed")
|
||||
})
|
||||
s.Run("has yaml comment indicating output format", func() {
|
||||
s.Truef(strings.HasPrefix(toolResult.Content[0].(mcp.TextContent).Text, "# The following events (YAML format) were found:\n"), "unexpected result %v", toolResult.Content[0].(mcp.TextContent).Text)
|
||||
})
|
||||
var decoded []v1.Event
|
||||
err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded)
|
||||
s.Run("has yaml content", func() {
|
||||
s.Nilf(err, "unmarshal failed %v", err)
|
||||
})
|
||||
s.Run("returns all events", func() {
|
||||
s.Equalf("The following events (YAML format) were found:\n"+
|
||||
s.YAMLEqf(""+
|
||||
"- InvolvedObject:\n"+
|
||||
" Kind: Pod\n"+
|
||||
" Name: a-pod\n"+
|
||||
@@ -83,8 +93,16 @@ func (s *EventsSuite) TestEventsList() {
|
||||
s.Nilf(err, "call tool failed %v", err)
|
||||
s.Falsef(toolResult.IsError, "call tool failed")
|
||||
})
|
||||
s.Run("has yaml comment indicating output format", func() {
|
||||
s.Truef(strings.HasPrefix(toolResult.Content[0].(mcp.TextContent).Text, "# The following events (YAML format) were found:\n"), "unexpected result %v", toolResult.Content[0].(mcp.TextContent).Text)
|
||||
})
|
||||
var decoded []v1.Event
|
||||
err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded)
|
||||
s.Run("has yaml content", func() {
|
||||
s.Nilf(err, "unmarshal failed %v", err)
|
||||
})
|
||||
s.Run("returns events from namespace", func() {
|
||||
s.Equalf("The following events (YAML format) were found:\n"+
|
||||
s.YAMLEqf(""+
|
||||
"- InvolvedObject:\n"+
|
||||
" Kind: Pod\n"+
|
||||
" Name: a-pod\n"+
|
||||
|
||||
@@ -39,10 +39,13 @@ func ServerToolToM3LabsServerTool(s *Server, tools []api.ServerTool) ([]server.S
|
||||
m3labTool.RawInputSchema = schema
|
||||
}
|
||||
m3labHandler := func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) {
|
||||
k, err := s.k.Derived(ctx)
|
||||
// get the correct derived Kubernetes client for the target specified in the request
|
||||
cluster := request.GetString(s.p.GetTargetParameterName(), s.p.GetDefaultTarget())
|
||||
k, err := s.p.GetDerivedKubernetes(ctx, cluster)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result, err := tool.Handler(api.ToolHandlerParams{
|
||||
Context: ctx,
|
||||
Kubernetes: k,
|
||||
|
||||
@@ -48,16 +48,16 @@ func (c *Configuration) ListOutput() output.Output {
|
||||
}
|
||||
|
||||
func (c *Configuration) isToolApplicable(tool api.ServerTool) bool {
|
||||
if c.StaticConfig.ReadOnly && !ptr.Deref(tool.Tool.Annotations.ReadOnlyHint, false) {
|
||||
if c.ReadOnly && !ptr.Deref(tool.Tool.Annotations.ReadOnlyHint, false) {
|
||||
return false
|
||||
}
|
||||
if c.StaticConfig.DisableDestructive && ptr.Deref(tool.Tool.Annotations.DestructiveHint, false) {
|
||||
if c.DisableDestructive && ptr.Deref(tool.Tool.Annotations.DestructiveHint, false) {
|
||||
return false
|
||||
}
|
||||
if c.StaticConfig.EnabledTools != nil && !slices.Contains(c.StaticConfig.EnabledTools, tool.Tool.Name) {
|
||||
if c.EnabledTools != nil && !slices.Contains(c.EnabledTools, tool.Tool.Name) {
|
||||
return false
|
||||
}
|
||||
if c.StaticConfig.DisabledTools != nil && slices.Contains(c.StaticConfig.DisabledTools, tool.Tool.Name) {
|
||||
if c.DisabledTools != nil && slices.Contains(c.DisabledTools, tool.Tool.Name) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
@@ -67,7 +67,7 @@ type Server struct {
|
||||
configuration *Configuration
|
||||
server *server.MCPServer
|
||||
enabledTools []string
|
||||
k *internalk8s.Manager
|
||||
p internalk8s.Provider
|
||||
}
|
||||
|
||||
func NewServer(configuration Configuration) (*Server, error) {
|
||||
@@ -79,7 +79,7 @@ func NewServer(configuration Configuration) (*Server, error) {
|
||||
server.WithLogging(),
|
||||
server.WithToolHandlerMiddleware(toolCallLoggingMiddleware),
|
||||
)
|
||||
if configuration.StaticConfig.RequireOAuth && false { // TODO: Disabled scope auth validation for now
|
||||
if configuration.RequireOAuth && false { // TODO: Disabled scope auth validation for now
|
||||
serverOptions = append(serverOptions, server.WithToolHandlerMiddleware(toolScopedAuthorizationMiddleware))
|
||||
}
|
||||
|
||||
@@ -91,26 +91,52 @@ func NewServer(configuration Configuration) (*Server, error) {
|
||||
serverOptions...,
|
||||
),
|
||||
}
|
||||
if err := s.reloadKubernetesClient(); err != nil {
|
||||
if err := s.reloadKubernetesClusterProvider(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.k.WatchKubeConfig(s.reloadKubernetesClient)
|
||||
s.p.WatchTargets(s.reloadKubernetesClusterProvider)
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Server) reloadKubernetesClient() error {
|
||||
k, err := internalk8s.NewManager(s.configuration.StaticConfig)
|
||||
func (s *Server) reloadKubernetesClusterProvider() error {
|
||||
ctx := context.Background()
|
||||
p, err := internalk8s.NewProvider(s.configuration.StaticConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.k = k
|
||||
|
||||
// close the old provider
|
||||
if s.p != nil {
|
||||
s.p.Close()
|
||||
}
|
||||
|
||||
s.p = p
|
||||
|
||||
targets, err := p.GetTargets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
filter := CompositeFilter(
|
||||
s.configuration.isToolApplicable,
|
||||
ShouldIncludeTargetListTool(p.GetTargetParameterName(), targets),
|
||||
)
|
||||
|
||||
mutator := WithTargetParameter(
|
||||
p.GetDefaultTarget(),
|
||||
p.GetTargetParameterName(),
|
||||
targets,
|
||||
)
|
||||
|
||||
applicableTools := make([]api.ServerTool, 0)
|
||||
for _, toolset := range s.configuration.Toolsets() {
|
||||
for _, tool := range toolset.GetTools(s.k) {
|
||||
if !s.configuration.isToolApplicable(tool) {
|
||||
for _, tool := range toolset.GetTools(p) {
|
||||
tool := mutator(tool)
|
||||
if !filter(tool) {
|
||||
continue
|
||||
}
|
||||
|
||||
applicableTools = append(applicableTools, tool)
|
||||
s.enabledTools = append(s.enabledTools, tool.Tool.Name)
|
||||
}
|
||||
@@ -119,7 +145,11 @@ func (s *Server) reloadKubernetesClient() error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert tools: %v", err)
|
||||
}
|
||||
|
||||
s.server.SetTools(m3labsServerTools...)
|
||||
|
||||
// start new watch
|
||||
s.p.WatchTargets(s.reloadKubernetesClusterProvider)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -146,20 +176,20 @@ func (s *Server) ServeHTTP(httpServer *http.Server) *server.StreamableHTTPServer
|
||||
}
|
||||
|
||||
// KubernetesApiVerifyToken verifies the given token with the audience by
|
||||
// sending an TokenReview request to API Server.
|
||||
func (s *Server) KubernetesApiVerifyToken(ctx context.Context, token string, audience string) (*authenticationapiv1.UserInfo, []string, error) {
|
||||
if s.k == nil {
|
||||
return nil, nil, fmt.Errorf("kubernetes manager is not initialized")
|
||||
// sending an TokenReview request to API Server for the specified cluster.
|
||||
func (s *Server) KubernetesApiVerifyToken(ctx context.Context, cluster, token, audience string) (*authenticationapiv1.UserInfo, []string, error) {
|
||||
if s.p == nil {
|
||||
return nil, nil, fmt.Errorf("kubernetes cluster provider is not initialized")
|
||||
}
|
||||
return s.k.VerifyToken(ctx, token, audience)
|
||||
return s.p.VerifyToken(ctx, cluster, token, audience)
|
||||
}
|
||||
|
||||
// GetKubernetesAPIServerHost returns the Kubernetes API server host from the configuration.
|
||||
func (s *Server) GetKubernetesAPIServerHost() string {
|
||||
if s.k == nil {
|
||||
return ""
|
||||
// GetTargetParameterName returns the parameter name used for target identification in MCP requests
|
||||
func (s *Server) GetTargetParameterName() string {
|
||||
if s.p == nil {
|
||||
return "" // fallback for uninitialized provider
|
||||
}
|
||||
return s.k.GetAPIServerHost()
|
||||
return s.p.GetTargetParameterName()
|
||||
}
|
||||
|
||||
func (s *Server) GetEnabledTools() []string {
|
||||
@@ -167,8 +197,8 @@ func (s *Server) GetEnabledTools() []string {
|
||||
}
|
||||
|
||||
func (s *Server) Close() {
|
||||
if s.k != nil {
|
||||
s.k.Close()
|
||||
if s.p != nil {
|
||||
s.p.Close()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
680
pkg/mcp/testdata/toolsets-full-tools-multicluster-enum.json
vendored
Normal file
680
pkg/mcp/testdata/toolsets-full-tools-multicluster-enum.json
vendored
Normal file
@@ -0,0 +1,680 @@
|
||||
[
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Configuration: Contexts List",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": true,
|
||||
"openWorldHint": false
|
||||
},
|
||||
"description": "List all available context names and associated server urls from the kubeconfig file",
|
||||
"inputSchema": {
|
||||
"type": "object"
|
||||
},
|
||||
"name": "configuration_contexts_list"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Configuration: View",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Get the current Kubernetes configuration content as a kubeconfig YAML",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"minified": {
|
||||
"description": "Return a minified version of the configuration. If set to true, keeps only the current-context and the relevant pieces of the configuration for that context. If set to false, all contexts, clusters, auth-infos, and users are returned in the configuration. (Optional, default true)",
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": "configuration_view"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Events: List",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List all the Kubernetes events in the current cluster from all namespaces",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Optional Namespace to retrieve the events from. If not provided, will list events from all namespaces",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": "events_list"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Helm: Install",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Install a Helm chart in the current or provided namespace",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"chart": {
|
||||
"description": "Chart reference to install (for example: stable/grafana, oci://ghcr.io/nginxinc/charts/nginx-ingress)",
|
||||
"type": "string"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Helm release (Optional, random name if not provided)",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to install the Helm chart in (Optional, current namespace if not provided)",
|
||||
"type": "string"
|
||||
},
|
||||
"values": {
|
||||
"description": "Values to pass to the Helm chart (Optional)",
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"chart"
|
||||
]
|
||||
},
|
||||
"name": "helm_install"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Helm: List",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List all the Helm releases in the current or provided namespace (or in all namespaces if specified)",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"all_namespaces": {
|
||||
"description": "If true, lists all Helm releases in all namespaces ignoring the namespace argument (Optional)",
|
||||
"type": "boolean"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to list Helm releases from (Optional, all namespaces if not provided)",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": "helm_list"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Helm: Uninstall",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": true,
|
||||
"idempotentHint": true,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Uninstall a Helm release in the current or provided namespace",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Helm release to uninstall",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to uninstall the Helm release from (Optional, current namespace if not provided)",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"name": "helm_uninstall"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Namespaces: List",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List all the Kubernetes namespaces in the current cluster",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": "namespaces_list"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: Delete",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": true,
|
||||
"idempotentHint": true,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Delete a Kubernetes Pod in the current or provided namespace with the provided name",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Pod to delete",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to delete the Pod from",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"name": "pods_delete"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: Exec",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": true,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Execute a command in a Kubernetes Pod in the current or provided namespace with the provided name and command",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"command": {
|
||||
"description": "Command to execute in the Pod container. The first item is the command to be run, and the rest are the arguments to that command. Example: [\"ls\", \"-l\", \"/tmp\"]",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"container": {
|
||||
"description": "Name of the Pod container where the command will be executed (Optional)",
|
||||
"type": "string"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Pod where the command will be executed",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace of the Pod where the command will be executed",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"name",
|
||||
"command"
|
||||
]
|
||||
},
|
||||
"name": "pods_exec"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: Get",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Get a Kubernetes Pod in the current or provided namespace with the provided name",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Pod",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to get the Pod from",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"name": "pods_get"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: List",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List all the Kubernetes pods in the current cluster from all namespaces",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"labelSelector": {
|
||||
"description": "Optional Kubernetes label selector (e.g. 'app=myapp,env=prod' or 'app in (myapp,yourapp)'), use this option when you want to filter the pods by label",
|
||||
"pattern": "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": "pods_list"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: List in Namespace",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List all the Kubernetes pods in the specified namespace in the current cluster",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"labelSelector": {
|
||||
"description": "Optional Kubernetes label selector (e.g. 'app=myapp,env=prod' or 'app in (myapp,yourapp)'), use this option when you want to filter the pods by label",
|
||||
"pattern": "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to list pods from",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"namespace"
|
||||
]
|
||||
},
|
||||
"name": "pods_list_in_namespace"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: Log",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Get the logs of a Kubernetes Pod in the current or provided namespace with the provided name",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"container": {
|
||||
"description": "Name of the Pod container to get the logs from (Optional)",
|
||||
"type": "string"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Pod to get the logs from",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to get the Pod logs from",
|
||||
"type": "string"
|
||||
},
|
||||
"previous": {
|
||||
"description": "Return previous terminated container logs (Optional)",
|
||||
"type": "boolean"
|
||||
},
|
||||
"tail": {
|
||||
"default": 100,
|
||||
"description": "Number of lines to retrieve from the end of the logs (Optional, default: 100)",
|
||||
"minimum": 0,
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"name": "pods_log"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: Run",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Run a Kubernetes Pod in the current or provided namespace with the provided container image and optional name",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"image": {
|
||||
"description": "Container Image to run in the Pod",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Pod (Optional, random name if not provided)",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to run the Pod in",
|
||||
"type": "string"
|
||||
},
|
||||
"port": {
|
||||
"description": "TCP/IP port to expose from the Pod container (Optional, no port exposed if not provided)",
|
||||
"type": "number"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"image"
|
||||
]
|
||||
},
|
||||
"name": "pods_run"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: Top",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": true,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List the resource consumption (CPU and memory) as recorded by the Kubernetes Metrics Server for the specified Kubernetes Pods in the all namespaces, the provided namespace, or the current namespace",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"all_namespaces": {
|
||||
"default": true,
|
||||
"description": "If true, list the resource consumption for all Pods in all namespaces. If false, list the resource consumption for Pods in the provided namespace or the current namespace",
|
||||
"type": "boolean"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"label_selector": {
|
||||
"description": "Kubernetes label selector (e.g. 'app=myapp,env=prod' or 'app in (myapp,yourapp)'), use this option when you want to filter the pods by label (Optional, only applicable when name is not provided)",
|
||||
"pattern": "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Pod to get the resource consumption from (Optional, all Pods in the namespace if not provided)",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to get the Pods resource consumption from (Optional, current namespace if not provided and all_namespaces is false)",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": "pods_top"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Resources: Create or Update",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": true,
|
||||
"idempotentHint": true,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Create or update a Kubernetes resource in the current cluster by providing a YAML or JSON representation of the resource\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"resource": {
|
||||
"description": "A JSON or YAML containing a representation of the Kubernetes resource. Should include top-level fields such as apiVersion,kind,metadata, and spec",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"resource"
|
||||
]
|
||||
},
|
||||
"name": "resources_create_or_update"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Resources: Delete",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": true,
|
||||
"idempotentHint": true,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Delete a Kubernetes resource in the current cluster by providing its apiVersion, kind, optionally the namespace, and its name\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"apiVersion": {
|
||||
"description": "apiVersion of the resource (examples of valid apiVersion are: v1, apps/v1, networking.k8s.io/v1)",
|
||||
"type": "string"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"kind": {
|
||||
"description": "kind of the resource (examples of valid kind are: Pod, Service, Deployment, Ingress)",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the resource",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Optional Namespace to delete the namespaced resource from (ignored in case of cluster scoped resources). If not provided, will delete resource from configured namespace",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"apiVersion",
|
||||
"kind",
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"name": "resources_delete"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Resources: Get",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Get a Kubernetes resource in the current cluster by providing its apiVersion, kind, optionally the namespace, and its name\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"apiVersion": {
|
||||
"description": "apiVersion of the resource (examples of valid apiVersion are: v1, apps/v1, networking.k8s.io/v1)",
|
||||
"type": "string"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"kind": {
|
||||
"description": "kind of the resource (examples of valid kind are: Pod, Service, Deployment, Ingress)",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the resource",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Optional Namespace to retrieve the namespaced resource from (ignored in case of cluster scoped resources). If not provided, will get resource from configured namespace",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"apiVersion",
|
||||
"kind",
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"name": "resources_get"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Resources: List",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List Kubernetes resources and objects in the current cluster by providing their apiVersion and kind and optionally the namespace and label selector\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"apiVersion": {
|
||||
"description": "apiVersion of the resources (examples of valid apiVersion are: v1, apps/v1, networking.k8s.io/v1)",
|
||||
"type": "string"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"enum": [
|
||||
"extra-cluster",
|
||||
"fake-context"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"kind": {
|
||||
"description": "kind of the resources (examples of valid kind are: Pod, Service, Deployment, Ingress)",
|
||||
"type": "string"
|
||||
},
|
||||
"labelSelector": {
|
||||
"description": "Optional Kubernetes label selector (e.g. 'app=myapp,env=prod' or 'app in (myapp,yourapp)'), use this option when you want to filter the pods by label",
|
||||
"pattern": "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Optional Namespace to retrieve the namespaced resources from (ignored in case of cluster scoped resources). If not provided, will list resources from all namespaces",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"apiVersion",
|
||||
"kind"
|
||||
]
|
||||
},
|
||||
"name": "resources_list"
|
||||
}
|
||||
]
|
||||
612
pkg/mcp/testdata/toolsets-full-tools-multicluster.json
vendored
Normal file
612
pkg/mcp/testdata/toolsets-full-tools-multicluster.json
vendored
Normal file
@@ -0,0 +1,612 @@
|
||||
[
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Configuration: Contexts List",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": true,
|
||||
"openWorldHint": false
|
||||
},
|
||||
"description": "List all available context names and associated server urls from the kubeconfig file",
|
||||
"inputSchema": {
|
||||
"type": "object"
|
||||
},
|
||||
"name": "configuration_contexts_list"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Configuration: View",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Get the current Kubernetes configuration content as a kubeconfig YAML",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"minified": {
|
||||
"description": "Return a minified version of the configuration. If set to true, keeps only the current-context and the relevant pieces of the configuration for that context. If set to false, all contexts, clusters, auth-infos, and users are returned in the configuration. (Optional, default true)",
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": "configuration_view"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Events: List",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List all the Kubernetes events in the current cluster from all namespaces",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Optional Namespace to retrieve the events from. If not provided, will list events from all namespaces",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": "events_list"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Helm: Install",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Install a Helm chart in the current or provided namespace",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"chart": {
|
||||
"description": "Chart reference to install (for example: stable/grafana, oci://ghcr.io/nginxinc/charts/nginx-ingress)",
|
||||
"type": "string"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Helm release (Optional, random name if not provided)",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to install the Helm chart in (Optional, current namespace if not provided)",
|
||||
"type": "string"
|
||||
},
|
||||
"values": {
|
||||
"description": "Values to pass to the Helm chart (Optional)",
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"chart"
|
||||
]
|
||||
},
|
||||
"name": "helm_install"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Helm: List",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List all the Helm releases in the current or provided namespace (or in all namespaces if specified)",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"all_namespaces": {
|
||||
"description": "If true, lists all Helm releases in all namespaces ignoring the namespace argument (Optional)",
|
||||
"type": "boolean"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to list Helm releases from (Optional, all namespaces if not provided)",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": "helm_list"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Helm: Uninstall",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": true,
|
||||
"idempotentHint": true,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Uninstall a Helm release in the current or provided namespace",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Helm release to uninstall",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to uninstall the Helm release from (Optional, current namespace if not provided)",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"name": "helm_uninstall"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Namespaces: List",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List all the Kubernetes namespaces in the current cluster",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": "namespaces_list"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: Delete",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": true,
|
||||
"idempotentHint": true,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Delete a Kubernetes Pod in the current or provided namespace with the provided name",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Pod to delete",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to delete the Pod from",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"name": "pods_delete"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: Exec",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": true,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Execute a command in a Kubernetes Pod in the current or provided namespace with the provided name and command",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"command": {
|
||||
"description": "Command to execute in the Pod container. The first item is the command to be run, and the rest are the arguments to that command. Example: [\"ls\", \"-l\", \"/tmp\"]",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"container": {
|
||||
"description": "Name of the Pod container where the command will be executed (Optional)",
|
||||
"type": "string"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Pod where the command will be executed",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace of the Pod where the command will be executed",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"name",
|
||||
"command"
|
||||
]
|
||||
},
|
||||
"name": "pods_exec"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: Get",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Get a Kubernetes Pod in the current or provided namespace with the provided name",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Pod",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to get the Pod from",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"name": "pods_get"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: List",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List all the Kubernetes pods in the current cluster from all namespaces",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"labelSelector": {
|
||||
"description": "Optional Kubernetes label selector (e.g. 'app=myapp,env=prod' or 'app in (myapp,yourapp)'), use this option when you want to filter the pods by label",
|
||||
"pattern": "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": "pods_list"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: List in Namespace",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List all the Kubernetes pods in the specified namespace in the current cluster",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"labelSelector": {
|
||||
"description": "Optional Kubernetes label selector (e.g. 'app=myapp,env=prod' or 'app in (myapp,yourapp)'), use this option when you want to filter the pods by label",
|
||||
"pattern": "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to list pods from",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"namespace"
|
||||
]
|
||||
},
|
||||
"name": "pods_list_in_namespace"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: Log",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Get the logs of a Kubernetes Pod in the current or provided namespace with the provided name",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"container": {
|
||||
"description": "Name of the Pod container to get the logs from (Optional)",
|
||||
"type": "string"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Pod to get the logs from",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to get the Pod logs from",
|
||||
"type": "string"
|
||||
},
|
||||
"previous": {
|
||||
"description": "Return previous terminated container logs (Optional)",
|
||||
"type": "boolean"
|
||||
},
|
||||
"tail": {
|
||||
"default": 100,
|
||||
"description": "Number of lines to retrieve from the end of the logs (Optional, default: 100)",
|
||||
"minimum": 0,
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"name": "pods_log"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: Run",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Run a Kubernetes Pod in the current or provided namespace with the provided container image and optional name",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"image": {
|
||||
"description": "Container Image to run in the Pod",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Pod (Optional, random name if not provided)",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to run the Pod in",
|
||||
"type": "string"
|
||||
},
|
||||
"port": {
|
||||
"description": "TCP/IP port to expose from the Pod container (Optional, no port exposed if not provided)",
|
||||
"type": "number"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"image"
|
||||
]
|
||||
},
|
||||
"name": "pods_run"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Pods: Top",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": true,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List the resource consumption (CPU and memory) as recorded by the Kubernetes Metrics Server for the specified Kubernetes Pods in the all namespaces, the provided namespace, or the current namespace",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"all_namespaces": {
|
||||
"default": true,
|
||||
"description": "If true, list the resource consumption for all Pods in all namespaces. If false, list the resource consumption for Pods in the provided namespace or the current namespace",
|
||||
"type": "boolean"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"label_selector": {
|
||||
"description": "Kubernetes label selector (e.g. 'app=myapp,env=prod' or 'app in (myapp,yourapp)'), use this option when you want to filter the pods by label (Optional, only applicable when name is not provided)",
|
||||
"pattern": "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the Pod to get the resource consumption from (Optional, all Pods in the namespace if not provided)",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Namespace to get the Pods resource consumption from (Optional, current namespace if not provided and all_namespaces is false)",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": "pods_top"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Resources: Create or Update",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": true,
|
||||
"idempotentHint": true,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Create or update a Kubernetes resource in the current cluster by providing a YAML or JSON representation of the resource\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"resource": {
|
||||
"description": "A JSON or YAML containing a representation of the Kubernetes resource. Should include top-level fields such as apiVersion,kind,metadata, and spec",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"resource"
|
||||
]
|
||||
},
|
||||
"name": "resources_create_or_update"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Resources: Delete",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": true,
|
||||
"idempotentHint": true,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Delete a Kubernetes resource in the current cluster by providing its apiVersion, kind, optionally the namespace, and its name\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"apiVersion": {
|
||||
"description": "apiVersion of the resource (examples of valid apiVersion are: v1, apps/v1, networking.k8s.io/v1)",
|
||||
"type": "string"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"kind": {
|
||||
"description": "kind of the resource (examples of valid kind are: Pod, Service, Deployment, Ingress)",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the resource",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Optional Namespace to delete the namespaced resource from (ignored in case of cluster scoped resources). If not provided, will delete resource from configured namespace",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"apiVersion",
|
||||
"kind",
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"name": "resources_delete"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Resources: Get",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "Get a Kubernetes resource in the current cluster by providing its apiVersion, kind, optionally the namespace, and its name\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"apiVersion": {
|
||||
"description": "apiVersion of the resource (examples of valid apiVersion are: v1, apps/v1, networking.k8s.io/v1)",
|
||||
"type": "string"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"kind": {
|
||||
"description": "kind of the resource (examples of valid kind are: Pod, Service, Deployment, Ingress)",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "Name of the resource",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Optional Namespace to retrieve the namespaced resource from (ignored in case of cluster scoped resources). If not provided, will get resource from configured namespace",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"apiVersion",
|
||||
"kind",
|
||||
"name"
|
||||
]
|
||||
},
|
||||
"name": "resources_get"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"title": "Resources: List",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
},
|
||||
"description": "List Kubernetes resources and objects in the current cluster by providing their apiVersion and kind and optionally the namespace and label selector\n(common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress)",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"apiVersion": {
|
||||
"description": "apiVersion of the resources (examples of valid apiVersion are: v1, apps/v1, networking.k8s.io/v1)",
|
||||
"type": "string"
|
||||
},
|
||||
"context": {
|
||||
"description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set",
|
||||
"type": "string"
|
||||
},
|
||||
"kind": {
|
||||
"description": "kind of the resources (examples of valid kind are: Pod, Service, Deployment, Ingress)",
|
||||
"type": "string"
|
||||
},
|
||||
"labelSelector": {
|
||||
"description": "Optional Kubernetes label selector (e.g. 'app=myapp,env=prod' or 'app in (myapp,yourapp)'), use this option when you want to filter the pods by label",
|
||||
"pattern": "([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]",
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"description": "Optional Namespace to retrieve the namespaced resources from (ignored in case of cluster scoped resources). If not provided, will list resources from all namespaces",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"apiVersion",
|
||||
"kind"
|
||||
]
|
||||
},
|
||||
"name": "resources_list"
|
||||
}
|
||||
]
|
||||
41
pkg/mcp/tool_filter.go
Normal file
41
pkg/mcp/tool_filter.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package mcp
|
||||
|
||||
import (
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/api"
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/kubernetes"
|
||||
)
|
||||
|
||||
// ToolFilter is a function that takes a ServerTool and returns a boolean indicating whether to include the tool
|
||||
type ToolFilter func(tool api.ServerTool) bool
|
||||
|
||||
func CompositeFilter(filters ...ToolFilter) ToolFilter {
|
||||
return func(tool api.ServerTool) bool {
|
||||
for _, f := range filters {
|
||||
if !f(tool) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func ShouldIncludeTargetListTool(targetName string, targets []string) ToolFilter {
|
||||
return func(tool api.ServerTool) bool {
|
||||
if !tool.IsTargetListProvider() {
|
||||
return true
|
||||
}
|
||||
if len(targets) <= 1 {
|
||||
// there is no need to provide a tool to list the single available target
|
||||
return false
|
||||
}
|
||||
|
||||
// TODO: this check should be removed or make more generic when we have other
|
||||
if tool.Tool.Name == "configuration_contexts_list" && targetName != kubernetes.KubeConfigTargetParameterName {
|
||||
// let's not include configuration_contexts_list if we aren't targeting contexts in our Provider
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
84
pkg/mcp/tool_filter_test.go
Normal file
84
pkg/mcp/tool_filter_test.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package mcp
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/api"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
type ToolFilterSuite struct {
|
||||
suite.Suite
|
||||
}
|
||||
|
||||
func (s *ToolFilterSuite) TestToolFilterType() {
|
||||
s.Run("ToolFilter type can be used as function", func() {
|
||||
var mutator ToolFilter = func(tool api.ServerTool) bool {
|
||||
return tool.Tool.Name == "included"
|
||||
}
|
||||
s.Run("returns true for included tool", func() {
|
||||
tool := api.ServerTool{Tool: api.Tool{Name: "included"}}
|
||||
s.True(mutator(tool))
|
||||
})
|
||||
s.Run("returns false for excluded tool", func() {
|
||||
tool := api.ServerTool{Tool: api.Tool{Name: "excluded"}}
|
||||
s.False(mutator(tool))
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ToolFilterSuite) TestCompositeFilter() {
|
||||
s.Run("returns true if all filters return true", func() {
|
||||
filter := CompositeFilter(
|
||||
func(tool api.ServerTool) bool { return true },
|
||||
func(tool api.ServerTool) bool { return true },
|
||||
)
|
||||
tool := api.ServerTool{Tool: api.Tool{Name: "test"}}
|
||||
s.True(filter(tool))
|
||||
})
|
||||
s.Run("returns false if any filter returns false", func() {
|
||||
filter := CompositeFilter(
|
||||
func(tool api.ServerTool) bool { return true },
|
||||
func(tool api.ServerTool) bool { return false },
|
||||
)
|
||||
tool := api.ServerTool{Tool: api.Tool{Name: "test"}}
|
||||
s.False(filter(tool))
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ToolFilterSuite) TestShouldIncludeTargetListTool() {
|
||||
s.Run("non-target-list-provider tools: returns true ", func() {
|
||||
filter := ShouldIncludeTargetListTool("any", []string{"a", "b", "c", "d", "e", "f"})
|
||||
tool := api.ServerTool{Tool: api.Tool{Name: "test"}, TargetListProvider: ptr.To(false)}
|
||||
s.True(filter(tool))
|
||||
})
|
||||
s.Run("target-list-provider tools", func() {
|
||||
s.Run("with targets == 1: returns false", func() {
|
||||
filter := ShouldIncludeTargetListTool("any", []string{"1"})
|
||||
tool := api.ServerTool{Tool: api.Tool{Name: "test"}, TargetListProvider: ptr.To(true)}
|
||||
s.False(filter(tool))
|
||||
})
|
||||
s.Run("with targets == 1", func() {
|
||||
s.Run("and tool is configuration_contexts_list and targetName is not context: returns false", func() {
|
||||
filter := ShouldIncludeTargetListTool("not_context", []string{"1"})
|
||||
tool := api.ServerTool{Tool: api.Tool{Name: "configuration_contexts_list"}, TargetListProvider: ptr.To(true)}
|
||||
s.False(filter(tool))
|
||||
})
|
||||
s.Run("and tool is configuration_contexts_list and targetName is context: returns false", func() {
|
||||
filter := ShouldIncludeTargetListTool("context", []string{"1"})
|
||||
tool := api.ServerTool{Tool: api.Tool{Name: "configuration_contexts_list"}, TargetListProvider: ptr.To(true)}
|
||||
s.False(filter(tool))
|
||||
})
|
||||
s.Run("and tool is not configuration_contexts_list: returns false", func() {
|
||||
filter := ShouldIncludeTargetListTool("any", []string{"1"})
|
||||
tool := api.ServerTool{Tool: api.Tool{Name: "other_tool"}, TargetListProvider: ptr.To(true)}
|
||||
s.False(filter(tool))
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestToolFilter(t *testing.T) {
|
||||
suite.Run(t, new(ToolFilterSuite))
|
||||
}
|
||||
64
pkg/mcp/tool_mutator.go
Normal file
64
pkg/mcp/tool_mutator.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package mcp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/api"
|
||||
"github.com/google/jsonschema-go/jsonschema"
|
||||
)
|
||||
|
||||
type ToolMutator func(tool api.ServerTool) api.ServerTool
|
||||
|
||||
const maxTargetsInEnum = 5 // TODO: test and validate that this is a reasonable cutoff
|
||||
|
||||
// WithTargetParameter adds a target selection parameter to the tool's input schema if the tool is cluster-aware
|
||||
func WithTargetParameter(defaultCluster, targetParameterName string, targets []string) ToolMutator {
|
||||
return func(tool api.ServerTool) api.ServerTool {
|
||||
if !tool.IsClusterAware() {
|
||||
return tool
|
||||
}
|
||||
|
||||
if tool.Tool.InputSchema == nil {
|
||||
tool.Tool.InputSchema = &jsonschema.Schema{Type: "object"}
|
||||
}
|
||||
|
||||
if tool.Tool.InputSchema.Properties == nil {
|
||||
tool.Tool.InputSchema.Properties = make(map[string]*jsonschema.Schema)
|
||||
}
|
||||
|
||||
if len(targets) > 1 {
|
||||
tool.Tool.InputSchema.Properties[targetParameterName] = createTargetProperty(
|
||||
defaultCluster,
|
||||
targetParameterName,
|
||||
targets,
|
||||
)
|
||||
}
|
||||
|
||||
return tool
|
||||
}
|
||||
}
|
||||
|
||||
func createTargetProperty(defaultCluster, targetName string, targets []string) *jsonschema.Schema {
|
||||
baseSchema := &jsonschema.Schema{
|
||||
Type: "string",
|
||||
Description: fmt.Sprintf(
|
||||
"Optional parameter selecting which %s to run the tool in. Defaults to %s if not set",
|
||||
targetName,
|
||||
defaultCluster,
|
||||
),
|
||||
}
|
||||
|
||||
if len(targets) <= maxTargetsInEnum {
|
||||
// Sort clusters to ensure consistent enum ordering
|
||||
sort.Strings(targets)
|
||||
|
||||
enumValues := make([]any, 0, len(targets))
|
||||
for _, c := range targets {
|
||||
enumValues = append(enumValues, c)
|
||||
}
|
||||
baseSchema.Enum = enumValues
|
||||
}
|
||||
|
||||
return baseSchema
|
||||
}
|
||||
347
pkg/mcp/tool_mutator_test.go
Normal file
347
pkg/mcp/tool_mutator_test.go
Normal file
@@ -0,0 +1,347 @@
|
||||
package mcp
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/api"
|
||||
"github.com/google/jsonschema-go/jsonschema"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
// createTestTool creates a basic ServerTool for testing
|
||||
func createTestTool(name string) api.ServerTool {
|
||||
return api.ServerTool{
|
||||
Tool: api.Tool{
|
||||
Name: name,
|
||||
Description: "A test tool",
|
||||
InputSchema: &jsonschema.Schema{
|
||||
Type: "object",
|
||||
Properties: make(map[string]*jsonschema.Schema),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// createTestToolWithNilSchema creates a ServerTool with nil InputSchema for testing
|
||||
func createTestToolWithNilSchema(name string) api.ServerTool {
|
||||
return api.ServerTool{
|
||||
Tool: api.Tool{
|
||||
Name: name,
|
||||
Description: "A test tool",
|
||||
InputSchema: nil,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// createTestToolWithNilProperties creates a ServerTool with nil Properties for testing
|
||||
func createTestToolWithNilProperties(name string) api.ServerTool {
|
||||
return api.ServerTool{
|
||||
Tool: api.Tool{
|
||||
Name: name,
|
||||
Description: "A test tool",
|
||||
InputSchema: &jsonschema.Schema{
|
||||
Type: "object",
|
||||
Properties: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// createTestToolWithExistingProperties creates a ServerTool with existing properties for testing
|
||||
func createTestToolWithExistingProperties(name string) api.ServerTool {
|
||||
return api.ServerTool{
|
||||
Tool: api.Tool{
|
||||
Name: name,
|
||||
Description: "A test tool",
|
||||
InputSchema: &jsonschema.Schema{
|
||||
Type: "object",
|
||||
Properties: map[string]*jsonschema.Schema{
|
||||
"existing-prop": {Type: "string"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestWithClusterParameter(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
defaultCluster string
|
||||
targetParameterName string
|
||||
clusters []string
|
||||
toolName string
|
||||
toolFactory func(string) api.ServerTool
|
||||
expectCluster bool
|
||||
expectEnum bool
|
||||
enumCount int
|
||||
}{
|
||||
{
|
||||
name: "adds cluster parameter when multiple clusters provided",
|
||||
defaultCluster: "default-cluster",
|
||||
clusters: []string{"cluster1", "cluster2", "cluster3"},
|
||||
toolName: "test-tool",
|
||||
toolFactory: createTestTool,
|
||||
expectCluster: true,
|
||||
expectEnum: true,
|
||||
enumCount: 3,
|
||||
},
|
||||
{
|
||||
name: "does not add cluster parameter when single cluster provided",
|
||||
defaultCluster: "default-cluster",
|
||||
clusters: []string{"single-cluster"},
|
||||
toolName: "test-tool",
|
||||
toolFactory: createTestTool,
|
||||
expectCluster: false,
|
||||
expectEnum: false,
|
||||
enumCount: 0,
|
||||
},
|
||||
{
|
||||
name: "creates InputSchema when nil",
|
||||
defaultCluster: "default-cluster",
|
||||
clusters: []string{"cluster1", "cluster2"},
|
||||
toolName: "test-tool",
|
||||
toolFactory: createTestToolWithNilSchema,
|
||||
expectCluster: true,
|
||||
expectEnum: true,
|
||||
enumCount: 2,
|
||||
},
|
||||
{
|
||||
name: "creates Properties map when nil",
|
||||
defaultCluster: "default-cluster",
|
||||
clusters: []string{"cluster1", "cluster2"},
|
||||
toolName: "test-tool",
|
||||
toolFactory: createTestToolWithNilProperties,
|
||||
expectCluster: true,
|
||||
expectEnum: true,
|
||||
enumCount: 2,
|
||||
},
|
||||
{
|
||||
name: "preserves existing properties",
|
||||
defaultCluster: "default-cluster",
|
||||
clusters: []string{"cluster1", "cluster2"},
|
||||
toolName: "test-tool",
|
||||
toolFactory: createTestToolWithExistingProperties,
|
||||
expectCluster: true,
|
||||
expectEnum: true,
|
||||
enumCount: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.targetParameterName == "" {
|
||||
tt.targetParameterName = "cluster"
|
||||
}
|
||||
mutator := WithTargetParameter(tt.defaultCluster, tt.targetParameterName, tt.clusters)
|
||||
tool := tt.toolFactory(tt.toolName)
|
||||
originalTool := tool // Keep reference to check if tool was unchanged
|
||||
|
||||
result := mutator(tool)
|
||||
|
||||
if !tt.expectCluster {
|
||||
if tt.toolName == "skip-this-tool" {
|
||||
// For skipped tools, the entire tool should be unchanged
|
||||
assert.Equal(t, originalTool, result)
|
||||
} else {
|
||||
// For single cluster, schema should exist but no cluster property
|
||||
require.NotNil(t, result.Tool.InputSchema)
|
||||
require.NotNil(t, result.Tool.InputSchema.Properties)
|
||||
_, exists := result.Tool.InputSchema.Properties["cluster"]
|
||||
assert.False(t, exists, "cluster property should not exist")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Common assertions for cases where cluster parameter should be added
|
||||
require.NotNil(t, result.Tool.InputSchema)
|
||||
assert.Equal(t, "object", result.Tool.InputSchema.Type)
|
||||
require.NotNil(t, result.Tool.InputSchema.Properties)
|
||||
|
||||
clusterProperty, exists := result.Tool.InputSchema.Properties["cluster"]
|
||||
assert.True(t, exists, "cluster property should exist")
|
||||
assert.NotNil(t, clusterProperty)
|
||||
assert.Equal(t, "string", clusterProperty.Type)
|
||||
assert.Contains(t, clusterProperty.Description, tt.defaultCluster)
|
||||
|
||||
if tt.expectEnum {
|
||||
assert.NotNil(t, clusterProperty.Enum)
|
||||
assert.Equal(t, tt.enumCount, len(clusterProperty.Enum))
|
||||
for _, cluster := range tt.clusters {
|
||||
assert.Contains(t, clusterProperty.Enum, cluster)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateClusterProperty(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
defaultCluster string
|
||||
targetName string
|
||||
clusters []string
|
||||
expectEnum bool
|
||||
expectedCount int
|
||||
}{
|
||||
{
|
||||
name: "creates property with enum when clusters <= maxClustersInEnum",
|
||||
defaultCluster: "default",
|
||||
targetName: "cluster",
|
||||
clusters: []string{"cluster1", "cluster2", "cluster3"},
|
||||
expectEnum: true,
|
||||
expectedCount: 3,
|
||||
},
|
||||
{
|
||||
name: "creates property without enum when clusters > maxClustersInEnum",
|
||||
defaultCluster: "default",
|
||||
targetName: "cluster",
|
||||
clusters: make([]string, maxTargetsInEnum+5), // 20 clusters
|
||||
expectEnum: false,
|
||||
expectedCount: 0,
|
||||
},
|
||||
{
|
||||
name: "creates property with exact maxClustersInEnum clusters",
|
||||
defaultCluster: "default",
|
||||
targetName: "cluster",
|
||||
clusters: make([]string, maxTargetsInEnum),
|
||||
expectEnum: true,
|
||||
expectedCount: maxTargetsInEnum,
|
||||
},
|
||||
{
|
||||
name: "handles single cluster",
|
||||
defaultCluster: "default",
|
||||
targetName: "cluster",
|
||||
clusters: []string{"single-cluster"},
|
||||
expectEnum: true,
|
||||
expectedCount: 1,
|
||||
},
|
||||
{
|
||||
name: "handles empty clusters list",
|
||||
defaultCluster: "default",
|
||||
targetName: "cluster",
|
||||
clusters: []string{},
|
||||
expectEnum: true,
|
||||
expectedCount: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Initialize clusters with names if they were created with make()
|
||||
if len(tt.clusters) > 3 && tt.clusters[0] == "" {
|
||||
for i := range tt.clusters {
|
||||
tt.clusters[i] = "cluster" + string(rune('A'+i))
|
||||
}
|
||||
}
|
||||
|
||||
property := createTargetProperty(tt.defaultCluster, tt.targetName, tt.clusters)
|
||||
|
||||
assert.Equal(t, "string", property.Type)
|
||||
assert.Contains(t, property.Description, tt.defaultCluster)
|
||||
assert.Contains(t, property.Description, "Defaults to "+tt.defaultCluster+" if not set")
|
||||
|
||||
if tt.expectEnum {
|
||||
assert.NotNil(t, property.Enum, "enum should be created")
|
||||
assert.Equal(t, tt.expectedCount, len(property.Enum))
|
||||
if tt.expectedCount > 0 && tt.expectedCount <= 3 {
|
||||
// Only check specific values for small, predefined lists
|
||||
for _, cluster := range tt.clusters {
|
||||
assert.Contains(t, property.Enum, cluster)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
assert.Nil(t, property.Enum, "enum should not be created for too many clusters")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestToolMutatorType(t *testing.T) {
|
||||
t.Run("ToolMutator type can be used as function", func(t *testing.T) {
|
||||
var mutator ToolMutator = func(tool api.ServerTool) api.ServerTool {
|
||||
tool.Tool.Name = "modified-" + tool.Tool.Name
|
||||
return tool
|
||||
}
|
||||
|
||||
originalTool := createTestTool("original")
|
||||
result := mutator(originalTool)
|
||||
assert.Equal(t, "modified-original", result.Tool.Name)
|
||||
})
|
||||
}
|
||||
|
||||
func TestMaxClustersInEnumConstant(t *testing.T) {
|
||||
t.Run("maxClustersInEnum has expected value", func(t *testing.T) {
|
||||
assert.Equal(t, 5, maxTargetsInEnum, "maxClustersInEnum should be 5")
|
||||
})
|
||||
}
|
||||
|
||||
type TargetParameterToolMutatorSuite struct {
|
||||
suite.Suite
|
||||
}
|
||||
|
||||
func (s *TargetParameterToolMutatorSuite) TestClusterAwareTool() {
|
||||
tm := WithTargetParameter("default-cluster", "cluster", []string{"cluster-1", "cluster-2", "cluster-3"})
|
||||
tool := createTestTool("cluster-aware-tool")
|
||||
// Tools are cluster-aware by default
|
||||
tm(tool)
|
||||
s.Require().NotNil(tool.Tool.InputSchema.Properties)
|
||||
s.Run("adds cluster parameter", func() {
|
||||
s.NotNil(tool.Tool.InputSchema.Properties["cluster"], "Expected cluster property to be added")
|
||||
})
|
||||
s.Run("adds correct description", func() {
|
||||
desc := tool.Tool.InputSchema.Properties["cluster"].Description
|
||||
s.Contains(desc, "Optional parameter selecting which cluster to run the tool in", "Expected description to mention cluster selection")
|
||||
s.Contains(desc, "Defaults to default-cluster if not set", "Expected description to mention default cluster")
|
||||
})
|
||||
s.Run("adds enum with clusters", func() {
|
||||
s.Require().NotNil(tool.Tool.InputSchema.Properties["cluster"])
|
||||
enum := tool.Tool.InputSchema.Properties["cluster"].Enum
|
||||
s.NotNilf(enum, "Expected enum to be set")
|
||||
s.Equal(3, len(enum), "Expected enum to have 3 entries")
|
||||
s.Contains(enum, "cluster-1", "Expected enum to contain cluster-1")
|
||||
s.Contains(enum, "cluster-2", "Expected enum to contain cluster-2")
|
||||
s.Contains(enum, "cluster-3", "Expected enum to contain cluster-3")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *TargetParameterToolMutatorSuite) TestClusterAwareToolSingleCluster() {
|
||||
tm := WithTargetParameter("default", "cluster", []string{"only-cluster"})
|
||||
tool := createTestTool("cluster-aware-tool-single-cluster")
|
||||
// Tools are cluster-aware by default
|
||||
tm(tool)
|
||||
s.Run("does not add cluster parameter for single cluster", func() {
|
||||
s.Nilf(tool.Tool.InputSchema.Properties["cluster"], "Expected cluster property to not be added for single cluster")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *TargetParameterToolMutatorSuite) TestClusterAwareToolMultipleClusters() {
|
||||
tm := WithTargetParameter("default", "cluster", []string{"cluster-1", "cluster-2", "cluster-3", "cluster-4", "cluster-5", "cluster-6"})
|
||||
tool := createTestTool("cluster-aware-tool-multiple-clusters")
|
||||
// Tools are cluster-aware by default
|
||||
tm(tool)
|
||||
s.Run("adds cluster parameter", func() {
|
||||
s.NotNilf(tool.Tool.InputSchema.Properties["cluster"], "Expected cluster property to be added")
|
||||
})
|
||||
s.Run("does not add enum when list of clusters is > 5", func() {
|
||||
s.Require().NotNil(tool.Tool.InputSchema.Properties["cluster"])
|
||||
enum := tool.Tool.InputSchema.Properties["cluster"].Enum
|
||||
s.Nilf(enum, "Expected enum to not be set for too many clusters")
|
||||
})
|
||||
}
|
||||
|
||||
func (s *TargetParameterToolMutatorSuite) TestNonClusterAwareTool() {
|
||||
tm := WithTargetParameter("default", "cluster", []string{"cluster-1", "cluster-2"})
|
||||
tool := createTestTool("non-cluster-aware-tool")
|
||||
tool.ClusterAware = ptr.To(false)
|
||||
tm(tool)
|
||||
s.Run("does not add cluster parameter", func() {
|
||||
s.Nilf(tool.Tool.InputSchema.Properties["cluster"], "Expected cluster property to not be added")
|
||||
})
|
||||
}
|
||||
|
||||
func TestTargetParameterToolMutator(t *testing.T) {
|
||||
suite.Run(t, new(TargetParameterToolMutatorSuite))
|
||||
}
|
||||
@@ -2,11 +2,9 @@ package mcp
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/mark3labs/mcp-go/mcp"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/internal/test"
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/api"
|
||||
configuration "github.com/containers/kubernetes-mcp-server/pkg/config"
|
||||
@@ -14,6 +12,9 @@ import (
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/toolsets/config"
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/toolsets/core"
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/toolsets/helm"
|
||||
"github.com/mark3labs/mcp-go/mcp"
|
||||
"github.com/stretchr/testify/suite"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
)
|
||||
|
||||
type ToolsetsSuite struct {
|
||||
@@ -29,7 +30,7 @@ func (s *ToolsetsSuite) SetupTest() {
|
||||
s.originalToolsets = toolsets.Toolsets()
|
||||
s.MockServer = test.NewMockServer()
|
||||
s.Cfg = configuration.Default()
|
||||
s.Cfg.KubeConfig = s.MockServer.KubeconfigFile(s.T())
|
||||
s.Cfg.KubeConfig = s.KubeconfigFile(s.T())
|
||||
}
|
||||
|
||||
func (s *ToolsetsSuite) TearDownTest() {
|
||||
@@ -98,6 +99,50 @@ func (s *ToolsetsSuite) TestDefaultToolsetsToolsInOpenShift() {
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ToolsetsSuite) TestDefaultToolsetsToolsInMultiCluster() {
|
||||
s.Run("Default configuration toolsets in multi-cluster (with 11 clusters)", func() {
|
||||
kubeconfig := s.Kubeconfig()
|
||||
for i := 0; i < 10; i++ {
|
||||
// Add multiple fake contexts to force multi-cluster behavior
|
||||
kubeconfig.Contexts[strconv.Itoa(i)] = clientcmdapi.NewContext()
|
||||
}
|
||||
s.Cfg.KubeConfig = test.KubeconfigFile(s.T(), kubeconfig)
|
||||
s.InitMcpClient()
|
||||
tools, err := s.ListTools(s.T().Context(), mcp.ListToolsRequest{})
|
||||
s.Run("ListTools returns tools", func() {
|
||||
s.NotNil(tools, "Expected tools from ListTools")
|
||||
s.NoError(err, "Expected no error from ListTools")
|
||||
})
|
||||
s.Run("ListTools returns correct Tool metadata", func() {
|
||||
expectedMetadata := test.ReadFile("testdata", "toolsets-full-tools-multicluster.json")
|
||||
metadata, err := json.MarshalIndent(tools.Tools, "", " ")
|
||||
s.Require().NoErrorf(err, "failed to marshal tools metadata: %v", err)
|
||||
s.JSONEq(expectedMetadata, string(metadata), "tools metadata does not match expected")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ToolsetsSuite) TestDefaultToolsetsToolsInMultiClusterEnum() {
|
||||
s.Run("Default configuration toolsets in multi-cluster (with 2 clusters)", func() {
|
||||
kubeconfig := s.Kubeconfig()
|
||||
// Add additional cluster to force multi-cluster behavior with enum parameter
|
||||
kubeconfig.Contexts["extra-cluster"] = clientcmdapi.NewContext()
|
||||
s.Cfg.KubeConfig = test.KubeconfigFile(s.T(), kubeconfig)
|
||||
s.InitMcpClient()
|
||||
tools, err := s.ListTools(s.T().Context(), mcp.ListToolsRequest{})
|
||||
s.Run("ListTools returns tools", func() {
|
||||
s.NotNil(tools, "Expected tools from ListTools")
|
||||
s.NoError(err, "Expected no error from ListTools")
|
||||
})
|
||||
s.Run("ListTools returns correct Tool metadata", func() {
|
||||
expectedMetadata := test.ReadFile("testdata", "toolsets-full-tools-multicluster-enum.json")
|
||||
metadata, err := json.MarshalIndent(tools.Tools, "", " ")
|
||||
s.Require().NoErrorf(err, "failed to marshal tools metadata: %v", err)
|
||||
s.JSONEq(expectedMetadata, string(metadata), "tools metadata does not match expected")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (s *ToolsetsSuite) TestGranularToolsetsTools() {
|
||||
testCases := []api.Toolset{
|
||||
&core.Toolset{},
|
||||
|
||||
@@ -12,33 +12,91 @@ import (
|
||||
|
||||
func initConfiguration() []api.ServerTool {
|
||||
tools := []api.ServerTool{
|
||||
{Tool: api.Tool{
|
||||
Name: "configuration_view",
|
||||
Description: "Get the current Kubernetes configuration content as a kubeconfig YAML",
|
||||
InputSchema: &jsonschema.Schema{
|
||||
Type: "object",
|
||||
Properties: map[string]*jsonschema.Schema{
|
||||
"minified": {
|
||||
Type: "boolean",
|
||||
Description: "Return a minified version of the configuration. " +
|
||||
"If set to true, keeps only the current-context and the relevant pieces of the configuration for that context. " +
|
||||
"If set to false, all contexts, clusters, auth-infos, and users are returned in the configuration. " +
|
||||
"(Optional, default true)",
|
||||
},
|
||||
{
|
||||
Tool: api.Tool{
|
||||
Name: "configuration_contexts_list",
|
||||
Description: "List all available context names and associated server urls from the kubeconfig file",
|
||||
InputSchema: &jsonschema.Schema{
|
||||
Type: "object",
|
||||
},
|
||||
Annotations: api.ToolAnnotations{
|
||||
Title: "Configuration: Contexts List",
|
||||
ReadOnlyHint: ptr.To(true),
|
||||
DestructiveHint: ptr.To(false),
|
||||
IdempotentHint: ptr.To(true),
|
||||
OpenWorldHint: ptr.To(false),
|
||||
},
|
||||
},
|
||||
Annotations: api.ToolAnnotations{
|
||||
Title: "Configuration: View",
|
||||
ReadOnlyHint: ptr.To(true),
|
||||
DestructiveHint: ptr.To(false),
|
||||
IdempotentHint: ptr.To(false),
|
||||
OpenWorldHint: ptr.To(true),
|
||||
ClusterAware: ptr.To(false),
|
||||
TargetListProvider: ptr.To(true),
|
||||
Handler: contextsList,
|
||||
},
|
||||
{
|
||||
Tool: api.Tool{
|
||||
Name: "configuration_view",
|
||||
Description: "Get the current Kubernetes configuration content as a kubeconfig YAML",
|
||||
InputSchema: &jsonschema.Schema{
|
||||
Type: "object",
|
||||
Properties: map[string]*jsonschema.Schema{
|
||||
"minified": {
|
||||
Type: "boolean",
|
||||
Description: "Return a minified version of the configuration. " +
|
||||
"If set to true, keeps only the current-context and the relevant pieces of the configuration for that context. " +
|
||||
"If set to false, all contexts, clusters, auth-infos, and users are returned in the configuration. " +
|
||||
"(Optional, default true)",
|
||||
},
|
||||
},
|
||||
},
|
||||
Annotations: api.ToolAnnotations{
|
||||
Title: "Configuration: View",
|
||||
ReadOnlyHint: ptr.To(true),
|
||||
DestructiveHint: ptr.To(false),
|
||||
IdempotentHint: ptr.To(false),
|
||||
OpenWorldHint: ptr.To(true),
|
||||
},
|
||||
},
|
||||
}, Handler: configurationView},
|
||||
ClusterAware: ptr.To(false),
|
||||
Handler: configurationView,
|
||||
},
|
||||
}
|
||||
return tools
|
||||
}
|
||||
|
||||
func contextsList(params api.ToolHandlerParams) (*api.ToolCallResult, error) {
|
||||
contexts, err := params.ConfigurationContextsList()
|
||||
if err != nil {
|
||||
return api.NewToolCallResult("", fmt.Errorf("failed to list contexts: %v", err)), nil
|
||||
}
|
||||
|
||||
if len(contexts) == 0 {
|
||||
return api.NewToolCallResult("No contexts found in kubeconfig", nil), nil
|
||||
}
|
||||
|
||||
defaultContext, err := params.ConfigurationContextsDefault()
|
||||
if err != nil {
|
||||
return api.NewToolCallResult("", fmt.Errorf("failed to get default context: %v", err)), nil
|
||||
}
|
||||
|
||||
result := fmt.Sprintf("Available Kubernetes contexts (%d total, default: %s):\n\n", len(contexts), defaultContext)
|
||||
result += "Format: [*] CONTEXT_NAME -> SERVER_URL\n"
|
||||
result += " (* indicates the default context used in tools if context is not set)\n\n"
|
||||
result += "Contexts:\n---------\n"
|
||||
for context, server := range contexts {
|
||||
marker := " "
|
||||
if context == defaultContext {
|
||||
marker = "*"
|
||||
}
|
||||
|
||||
result += fmt.Sprintf("%s%s -> %s\n", marker, context, server)
|
||||
}
|
||||
result += "---------\n\n"
|
||||
|
||||
result += "To use a specific context with any tool, set the 'context' parameter in the tool call arguments"
|
||||
|
||||
// TODO: Review output format, current is not parseable and might not be ideal for LLM consumption
|
||||
return api.NewToolCallResult(result, nil), nil
|
||||
}
|
||||
|
||||
func configurationView(params api.ToolHandlerParams) (*api.ToolCallResult, error) {
|
||||
minify := true
|
||||
minified := params.GetArguments()["minified"]
|
||||
|
||||
@@ -45,11 +45,11 @@ func eventsList(params api.ToolHandlerParams) (*api.ToolCallResult, error) {
|
||||
return api.NewToolCallResult("", fmt.Errorf("failed to list events in all namespaces: %v", err)), nil
|
||||
}
|
||||
if len(eventMap) == 0 {
|
||||
return api.NewToolCallResult("No events found", nil), nil
|
||||
return api.NewToolCallResult("# No events found", nil), nil
|
||||
}
|
||||
yamlEvents, err := output.MarshalYaml(eventMap)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to list events in all namespaces: %v", err)
|
||||
}
|
||||
return api.NewToolCallResult(fmt.Sprintf("The following events (YAML format) were found:\n%s", yamlEvents), err), nil
|
||||
return api.NewToolCallResult(fmt.Sprintf("# The following events (YAML format) were found:\n%s", yamlEvents), err), nil
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/api"
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/kubernetes"
|
||||
internalk8s "github.com/containers/kubernetes-mcp-server/pkg/kubernetes"
|
||||
)
|
||||
|
||||
@@ -52,7 +51,7 @@ func initNamespaces(o internalk8s.Openshift) []api.ServerTool {
|
||||
}
|
||||
|
||||
func namespacesList(params api.ToolHandlerParams) (*api.ToolCallResult, error) {
|
||||
ret, err := params.NamespacesList(params, kubernetes.ResourceListOptions{AsTable: params.ListOutput.AsTable()})
|
||||
ret, err := params.NamespacesList(params, internalk8s.ResourceListOptions{AsTable: params.ListOutput.AsTable()})
|
||||
if err != nil {
|
||||
return api.NewToolCallResult("", fmt.Errorf("failed to list namespaces: %v", err)), nil
|
||||
}
|
||||
@@ -60,7 +59,7 @@ func namespacesList(params api.ToolHandlerParams) (*api.ToolCallResult, error) {
|
||||
}
|
||||
|
||||
func projectsList(params api.ToolHandlerParams) (*api.ToolCallResult, error) {
|
||||
ret, err := params.ProjectsList(params, kubernetes.ResourceListOptions{AsTable: params.ListOutput.AsTable()})
|
||||
ret, err := params.ProjectsList(params, internalk8s.ResourceListOptions{AsTable: params.ListOutput.AsTable()})
|
||||
if err != nil {
|
||||
return api.NewToolCallResult("", fmt.Errorf("failed to list projects: %v", err)), nil
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/api"
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/kubernetes"
|
||||
internalk8s "github.com/containers/kubernetes-mcp-server/pkg/kubernetes"
|
||||
"github.com/containers/kubernetes-mcp-server/pkg/output"
|
||||
)
|
||||
@@ -152,7 +151,7 @@ func resourcesList(params api.ToolHandlerParams) (*api.ToolCallResult, error) {
|
||||
namespace = ""
|
||||
}
|
||||
labelSelector := params.GetArguments()["labelSelector"]
|
||||
resourceListOptions := kubernetes.ResourceListOptions{
|
||||
resourceListOptions := internalk8s.ResourceListOptions{
|
||||
AsTable: params.ListOutput.AsTable(),
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user