mirror of
				https://github.com/containers/kubernetes-mcp-server.git
				synced 2025-10-23 01:22:57 +03:00 
			
		
		
		
	Compare commits
	
		
			362 Commits
		
	
	
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|   | 7fe604e61d | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 0c78a1e89d | ||
|   | c3bc991237 | ||
|   | ffc7b6c08d | ||
|   | 49afbad502 | ||
|   | 7f4edfd075 | ||
|   | 9da29f4505 | ||
|   | b66719ed8e | ||
|   | 86628bb1bf | ||
|   | 25032699db | ||
|   | dfddf23823 | ||
|   | f3a446676f | ||
|   | 1e154d7587 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 68619b57ad | ||
|   | 086afefc75 | ||
|   | 672b8a5d13 | ||
|   | 65cc304c3c | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 3d3eb64582 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 3d5fba8813 | ||
|   | 31e90fbece | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 99e954304c | ||
|   | a056981f53 | ||
|   | 61eaecc38f | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 028c6b08c2 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | e8ba1fa0bf | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | e86d314ae2 | ||
|   | a2d16e9f41 | ||
|   | c447bf819f | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 07b1ebc05e | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | adc1044615 | ||
|   | b55f28b36e | ||
|   | d3723804ed | ||
|   | 792d2f5b80 | ||
|   | c69e90c70d | ||
|   | 053fb2e31c | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 8e666d4c67 | ||
|   | 4c5bce1b65 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 2b72f5f9ab | ||
|   | 5b33e1a065 | ||
|   | 8af889bc8f | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 76e22321f5 | ||
|   | 6e29a2ada5 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 22de31d04d | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 97236a7bcd | ||
|   | 94baad6570 | ||
|   | e16114dfc5 | ||
|   | 2bf6c549fe | ||
|   | d6936f42d3 | ||
|   | f496c643e7 | ||
|   | d9d35b9834 | ||
|   | 48cf204a89 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 3fc4fa49bb | ||
|   | 209e8434d5 | ||
|   | 2b6c886d95 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 4361a9e7d8 | ||
|   | ea641e6796 | ||
|   | 6c573f31c8 | ||
|   | 10c82f7bff | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 467e7e6757 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | a8b6041dbf | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 193ac1f239 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 25033531bd | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 36bb0b9e51 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | a6904999d0 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 6bfa127841 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 18005a33b4 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | f2a6b90d24 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | a8b3f546fe | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 406279cac6 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 2dbaf2be70 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 25f82ef62c | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 7fb1740a87 | ||
|   | 1bd0b32976 | ||
|   | 0ec2599bd8 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | f63ac7efb5 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 55c74c5b93 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 7a3d6683ce | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 8c1df25c5a | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 21cf1c245b | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | d16b6c3251 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | ae02c77ddd | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 11c01269a3 | ||
|   | 19a92418e4 | ||
|   | 90d4bb03f3 | ||
|   | 58c47dc95c | ||
|   | fde4b1dc0f | ||
|   | dfcecd5089 | ||
|   | 7b11c1667a | ||
|   | b0da9fb459 | ||
|   | cfc42b3bd3 | ||
|   | 43744f2978 | ||
|   | 9ec5c829db | ||
| ![red-hat-konflux[bot]](/assets/img/avatar_default.png)  | 40326c1c6a | ||
|   | aba5f548d8 | ||
|   | 94b85990e3 | ||
|   | 4dcede178b | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 4302a438ab | ||
|   | c1af9c0335 | ||
|   | 29b65fd565 | ||
| ![red-hat-konflux[bot]](/assets/img/avatar_default.png)  | 73af678b96 | ||
| ![red-hat-konflux[bot]](/assets/img/avatar_default.png)  | 19422923dc | ||
| ![red-hat-konflux[bot]](/assets/img/avatar_default.png)  | 7c25e61fb8 | ||
| ![red-hat-konflux[bot]](/assets/img/avatar_default.png)  | b8394ae7db | ||
| ![red-hat-konflux[bot]](/assets/img/avatar_default.png)  | 970c3ce2a9 | ||
| ![red-hat-konflux[bot]](/assets/img/avatar_default.png)  | 13739f5424 | ||
| ![red-hat-konflux[bot]](/assets/img/avatar_default.png)  | d8fa986170 | ||
| ![red-hat-konflux[bot]](/assets/img/avatar_default.png)  | 2ce26e6973 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 3e0ecda680 | ||
|   | 9cc7192d4d | ||
|   | be80db1a01 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | d4f3bd4a99 | ||
|   | 4a7e05151a | ||
|   | 49dcff3f21 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 94f7055c0c | ||
|   | 5889fdb252 | ||
|   | 1f670ebec6 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | aa14e31eba | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 512896d082 | ||
| ![red-hat-konflux[bot]](/assets/img/avatar_default.png)  | eb48b9c594 | ||
| ![red-hat-konflux[bot]](/assets/img/avatar_default.png)  | 57d16cc4cf | ||
| ![red-hat-konflux[bot]](/assets/img/avatar_default.png)  | ae49d65b37 | ||
| ![red-hat-konflux[bot]](/assets/img/avatar_default.png)  | 88a8aa20c6 | ||
| ![red-hat-konflux[bot]](/assets/img/avatar_default.png)  | 2225c2ca2a | ||
| ![red-hat-konflux[bot]](/assets/img/avatar_default.png)  | fafc824568 | ||
| ![red-hat-konflux[bot]](/assets/img/avatar_default.png)  | d0f48f789b | ||
| ![red-hat-konflux[bot]](/assets/img/avatar_default.png)  | 47caa9d593 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | da73cad280 | ||
|   | cad863ff22 | ||
|   | 9856802fe9 | ||
| ![red-hat-konflux[bot]](/assets/img/avatar_default.png)  | c6349f46de | ||
|   | 9e3811a737 | ||
|   | 0ad8726d01 | ||
|   | ca0aa4648d | ||
|   | 3fbfd8d7cb | ||
|   | a3e8818ffe | ||
|   | 775fa21bd1 | ||
|   | 73e9e845c4 | ||
|   | cb9f296566 | ||
|   | f6e9702009 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 4d994d3790 | ||
|   | e4a8f604a1 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 796333891a | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 4cae032e84 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 255750a767 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 6d3ac81fdd | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 92cad86e9e | ||
|   | bfa699049e | ||
|   | 77671617df | ||
|   | 5c753275ab | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 83c37ce02f | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 6a95f35285 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 288b330b5a | ||
|   | 275b91a00d | ||
|   | 114726fb7c | ||
|   | c5b2223249 | ||
|   | 42e8e3496f | ||
|   | 00e4f1816f | ||
|   | 9ffb818ab2 | ||
|   | 524e4f5d2a | ||
|   | ebe0ba9816 | ||
|   | e6b19034aa | ||
|   | 186f445ca2 | ||
|   | af2a8cd19d | ||
|   | 2a1a3e4fbd | ||
|   | b777972c14 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | cd1cb1a630 | ||
|   | 1968652aca | ||
|   | f3915cd13e | ||
|   | bca2cda21a | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | a568ac1d88 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 6f7eb53fd8 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 21e8aa38a2 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 69d1e2895b | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | c21f97057a | ||
|   | f668658217 | ||
|   | 754da19d81 | ||
|   | 25608daf4a | ||
|   | 2957faa771 | ||
|   | f138b06ba8 | ||
|   | 4a3ff2f2ce | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 85876a4621 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 813bba5285 | ||
|   | 2c18ca0822 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | b07cd04d60 | ||
|   | 1a4605dc2d | ||
|   | 84782048a6 | ||
|   | 2a9dddfb0a | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 0eaf2888cc | ||
|   | 187efaaedc | ||
|   | 54d3726620 | ||
|   | 7e10e82a3a | ||
|   | 155fe6847f | ||
|   | d070de86eb | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | b8212b6f6c | ||
|   | 6da90015a1 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | a9a81614ba | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 6f58ed591a | ||
|   | 9b517585fb | ||
|   | f80d8df3c4 | ||
|   | 9830e2249d | ||
|   | 714d2ba56f | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | dce5f4fac3 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | d785764510 | ||
|   | c8e8a30909 | ||
|   | 24174efe6b | ||
|   | e6f20fc777 | ||
|   | 5f279a81d8 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 219f1b470c | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | d3265243bd | ||
|   | 35d3a17122 | ||
|   | fe7f25948b | ||
|   | dfb96adebc | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 69c521b892 | ||
|   | 3753f98ecc | ||
|   | 2994699504 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | ba2b072942 | ||
|   | 3b41e2846f | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | ac4b87fc76 | ||
|   | e0fe25af3c | ||
|   | 6c51c9d9e6 | ||
|   | d26f896a8e | ||
|   | 20cb33130a | ||
|   | 1f00601f43 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | f23c153eb1 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | bdc84bd574 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | f1b398c58f | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 9e4cb8ed89 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 865c9dd03c | ||
|   | 1f22f5b23f | ||
|   | 6afb60f73a | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | f94de90b27 | ||
|   | 22669e72be | ||
|   | 0284cdce29 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 8a4c392c5f | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 971011650a | ||
|   | 8f1cfc3e1c | ||
|   | b4928f8230 | ||
|   | 34eabdef13 | ||
|   | 91dec084f3 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 935afe29e5 | ||
|   | 628da8e004 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | f8bb00a31b | ||
|   | bcd7712d30 | ||
|   | 9ba998f523 | ||
|   | 4c94fcbe29 | ||
|   | 37d7175cd6 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 22a7125f4a | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 825e6e1f50 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 9a1ef34d6d | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 8cbe06d8c9 | ||
|   | 6bbe5e0a9a | ||
|   | fa5bb81fe5 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 5d3c7f39cf | ||
|   | 7cb6e9b7b4 | ||
|   | 3beac11981 | ||
|   | 79b0f2805c | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | aff0fccb47 | ||
|   | a9f17db295 | ||
|   | bdad2677d8 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | baf1595201 | ||
|   | ec82de1712 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 705a8febfa | ||
|   | e8722663b5 | ||
|   | 7b9b59a7bc | ||
|   | 8c80cfebab | ||
|   | b3f38444ec | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 2d7f6088b9 | ||
|   | 25200da8f8 | ||
|   | 470d1c6327 | ||
|   | dac20e4ee3 | ||
|   | a276dc20a9 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | c146bd67aa | ||
|   | 0b659559eb | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 3970689d9c | ||
|   | bea026a13e | ||
|   | fafdf9af5d | ||
|   | 3a49d872be | ||
|   | 2a11784688 | ||
|   | cbf0299e97 | ||
|   | 8dc7160ff0 | ||
|   | 61289cf1df | ||
|   | b08fe66d56 | ||
|   | 83bad6993e | ||
|   | d5cacb9527 | ||
|   | 72ede2ea10 | ||
|   | 3b60f5dfc1 | ||
|   | d74398f85b | ||
|   | 868e5fc636 | ||
|   | f57f203830 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 9b9a32f890 | ||
|   | bf552fe14b | ||
|   | 50277ce954 | ||
|   | fe62e31626 | ||
|   | 5c6c7a031c | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | d8ad7c3e8f | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 0c8eef7bb1 | ||
|   | a98e69102c | ||
|   | c9def7dd46 | ||
|   | 32b388aab3 | ||
|   | 094da788e7 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | b63139319c | ||
|   | 9248c5d734 | ||
|   | 8b3ddab9dd | ||
|   | efa09cbfb3 | ||
|   | cb8e402113 | ||
|   | b0cd15e7ae | ||
|   | 3cc4f32ca0 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 8961435642 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 1ea50f9ebd | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | f8564ad786 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 7db0f14964 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 9c2d7bdc37 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | e28a385298 | ||
|   | eacdb95fec | ||
|   | 39d21d77b0 | ||
|   | d7075f2c78 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | f712653853 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 1ecf1b6db8 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | bfb1147259 | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | 24991ecc6e | ||
| ![dependabot[bot]](/assets/img/avatar_default.png)  | e466ea84dd | ||
|   | 30c407f63c | ||
|   | 7b129281f4 | ||
|   | fa4c4728f4 | ||
|   | 900e1c7ca7 | ||
|   | f4d692ed86 | ||
|   | 99ec862739 | ||
|   | c81ce4f039 | ||
|   | 5baebfc8f7 | ||
|   | f80a3636e9 | ||
|   | 3522e4fb44 | ||
|   | 90c2802429 | ||
|   | d3754585ec | ||
|   | 40ff50e04d | ||
|   | 838e5863fa | ||
|   | 78729106a4 | ||
|   | 9e680707fe | ||
|   | 5904fc4c9b | ||
|   | e6ab757915 | ||
|   | 4c5aa9ab38 | ||
|   | 67b9d8a2c0 | ||
|   | dde6c8ca76 | ||
|   | 6cb56266f3 | ||
|   | 7317fc08d5 | ||
|   | 5be9852fb7 | ||
|   | a8bb7c01a7 | ||
|   | 3ea23f3d61 | ||
|   | b91f948cb4 | ||
|   | 6ae9247bae | ||
|   | 3bf7a0fd63 | ||
|   | f591e2b06b | ||
|   | 0f12797365 | ||
|   | 30951a3d44 | ||
|   | 183f72522c | ||
|   | 5af6c3b6c4 | ||
|   | 9ad87d362d | ||
|   | e1432e7222 | ||
|   | 07762e9a7a | ||
|   | ab7d26f8ef | ||
|   | b8e9b845e9 | ||
|   | b14d8f46bc | ||
|   | 227b9a72e2 | 
							
								
								
									
										7
									
								
								.github/dependabot.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										7
									
								
								.github/dependabot.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,7 @@ | ||||
| version: 2 | ||||
| updates: | ||||
|   - package-ecosystem: "gomod" | ||||
|     directory: "/" | ||||
|     schedule: | ||||
|       interval: "daily" | ||||
|     open-pull-requests-limit: 10 | ||||
							
								
								
									
										65
									
								
								.github/workflows/release-image.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										65
									
								
								.github/workflows/release-image.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,65 @@ | ||||
| name: Release as container image | ||||
|  | ||||
| on: | ||||
|   push: | ||||
|     branches: | ||||
|       - main | ||||
|     tags: | ||||
|       - '*' | ||||
|  | ||||
| env: | ||||
|   IMAGE_NAME: quay.io/manusa/kubernetes_mcp_server | ||||
|   TAG: ${{ github.ref_name == 'main' && 'latest' || github.ref_type == 'tag' && github.ref_name && startsWith(github.ref_name, 'v') && github.ref_name || 'unknown' }} | ||||
|  | ||||
| jobs: | ||||
|   publish-platform-images: | ||||
|     name: 'Publish: linux-${{ matrix.platform.tag }}' | ||||
|     strategy: | ||||
|       fail-fast: true | ||||
|       matrix: | ||||
|         platform: | ||||
|           - runner: ubuntu-latest | ||||
|             tag: amd64 | ||||
|           - runner: ubuntu-24.04-arm | ||||
|             tag: arm64 | ||||
|     runs-on: ${{ matrix.platform.runner }} | ||||
|     steps: | ||||
|       - name: Checkout | ||||
|         uses: actions/checkout@v4 | ||||
|       - name: Install Podman # Not available in arm64 image | ||||
|         run: | | ||||
|           sudo apt-get update | ||||
|           sudo apt-get install -y podman | ||||
|       - name: Quay Login | ||||
|         run: | | ||||
|           echo ${{ secrets.QUAY_PASSWORD }} | podman login quay.io -u ${{ secrets.QUAY_USERNAME }} --password-stdin | ||||
|       - name: Build Image | ||||
|         run: | | ||||
|           podman build \ | ||||
|             --platform "linux/${{ matrix.platform.tag }}" \ | ||||
|             -f Dockerfile \ | ||||
|             -t "${{ env.IMAGE_NAME }}:${{ env.TAG }}-linux-${{ matrix.platform.tag }}" \ | ||||
|             . | ||||
|       - name: Push Image | ||||
|         run: | | ||||
|           podman push \ | ||||
|             "${{ env.IMAGE_NAME }}:${{ env.TAG }}-linux-${{ matrix.platform.tag }}" | ||||
|  | ||||
|   publish-manifest: | ||||
|     name: Publish Manifest | ||||
|     runs-on: ubuntu-latest | ||||
|     needs: publish-platform-images | ||||
|     steps: | ||||
|       - name: Quay Login | ||||
|         run: | | ||||
|           echo ${{ secrets.QUAY_PASSWORD }} | podman login quay.io -u ${{ secrets.QUAY_USERNAME }} --password-stdin | ||||
|       - name: Create Manifest | ||||
|         run: | | ||||
|           podman manifest create \ | ||||
|             "${{ env.IMAGE_NAME }}:${{ env.TAG }}" \ | ||||
|             "${{ env.IMAGE_NAME }}:${{ env.TAG }}-linux-amd64" \ | ||||
|             "${{ env.IMAGE_NAME }}:${{ env.TAG }}-linux-arm64" | ||||
|       - name: Push Manifest | ||||
|         run: | | ||||
|           podman manifest push \ | ||||
|             "${{ env.IMAGE_NAME }}:${{ env.TAG }}" | ||||
							
								
								
									
										21
									
								
								.github/workflows/release.yaml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										21
									
								
								.github/workflows/release.yaml
									
									
									
									
										vendored
									
									
								
							| @@ -12,10 +12,11 @@ concurrency: | ||||
|  | ||||
| env: | ||||
|   GO_VERSION: 1.23 | ||||
|   NPM_TOKEN: ${{ secrets.NPM_TOKEN }} | ||||
|   UV_PUBLISH_TOKEN: ${{ secrets.UV_PUBLISH_TOKEN }} | ||||
|  | ||||
| permissions: | ||||
|   contents: write | ||||
|   id-token: write  # Required for npmjs OIDC | ||||
|   discussions: write | ||||
|  | ||||
| jobs: | ||||
| @@ -38,6 +39,24 @@ jobs: | ||||
|           files: | | ||||
|             LICENSE | ||||
|             kubernetes-mcp-server-* | ||||
|       # Ensure npm 11.5.1 or later is installed (required for https://docs.npmjs.com/trusted-publishers) | ||||
|       - name: Setup node | ||||
|         uses: actions/setup-node@v6 | ||||
|         with: | ||||
|           node-version: 24 | ||||
|           registry-url: 'https://registry.npmjs.org' | ||||
|       - name: Publish npm | ||||
|         run: | ||||
|           make npm-publish | ||||
|   python: | ||||
|     name: Release Python | ||||
|     # Python logic requires the tag/release version to be available from GitHub | ||||
|     needs: release | ||||
|     runs-on: ubuntu-latest | ||||
|     steps: | ||||
|       - name: Checkout | ||||
|         uses: actions/checkout@v4 | ||||
|       - uses: astral-sh/setup-uv@v5 | ||||
|       - name: Publish Python | ||||
|         run: | ||||
|           make python-publish | ||||
|   | ||||
							
								
								
									
										15
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										15
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @@ -1,7 +1,16 @@ | ||||
| _output/ | ||||
| .idea/ | ||||
| .vscode/ | ||||
| .docusaurus/ | ||||
| node_modules/ | ||||
|  | ||||
| .npmrc | ||||
| kubernetes-mcp-server | ||||
| !cmd/kubernetes-mcp-server | ||||
| !pkg/kubernetes-mcp-server | ||||
| npm/kubernetes-mcp-server/README.md | ||||
| npm/kubernetes-mcp-server/LICENSE | ||||
| !npm/kubernetes-mcp-server | ||||
| kubernetes-mcp-server-darwin-amd64 | ||||
| !npm/kubernetes-mcp-server-darwin-amd64/ | ||||
| kubernetes-mcp-server-darwin-arm64 | ||||
| @@ -12,3 +21,9 @@ kubernetes-mcp-server-linux-arm64 | ||||
| !npm/kubernetes-mcp-server-linux-arm64 | ||||
| kubernetes-mcp-server-windows-amd64.exe | ||||
| kubernetes-mcp-server-windows-arm64.exe | ||||
|  | ||||
| python/.venv/ | ||||
| python/build/ | ||||
| python/dist/ | ||||
| python/kubernetes_mcp_server.egg-info/ | ||||
| !python/kubernetes-mcp-server | ||||
|   | ||||
							
								
								
									
										128
									
								
								AGENTS.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										128
									
								
								AGENTS.md
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,128 @@ | ||||
| # Project Agents.md for Kubernetes MCP Server | ||||
|  | ||||
| This Agents.md file provides comprehensive guidance for AI assistants and coding agents (like Claude, Gemini, Cursor, and others) to work with this codebase. | ||||
|  | ||||
| This repository contains the kubernetes-mcp-server project, | ||||
| a powerful Go-based Model Context Protocol (MCP) server that provides native Kubernetes and OpenShift cluster management capabilities without external dependencies. | ||||
| This MCP server enables AI assistants (like Claude, Gemini, Cursor, and others) to interact with Kubernetes clusters using the Model Context Protocol (MCP). | ||||
|  | ||||
| ## Project Structure and Repository layout | ||||
|  | ||||
| - Go package layout follows the standard Go conventions: | ||||
|   - `cmd/kubernetes-mcp-server/` – main application entry point using Cobra CLI framework. | ||||
|   - `pkg/` – libraries grouped by domain. | ||||
|     - `config/` – configuration management. | ||||
|     - `helm/` - Helm chart operations integration. | ||||
|     - `http/` - HTTP server and authorization middleware. | ||||
|     - `kubernetes/` - Kubernetes client management, authentication, and access control. | ||||
|     - `mcp/` - Model Context Protocol (MCP) server implementation with tool registration and STDIO/HTTP support. | ||||
|     - `output/` - output formatting and rendering. | ||||
| - `.github/` – GitHub-related configuration (Actions workflows, issue templates...). | ||||
| - `docs/` – documentation files. | ||||
| - `npm/` – Node packages that wraps the compiled binaries for distribution through npmjs.com. | ||||
| - `python/` – Python package providing a script that downloads the correct platform binary from the GitHub releases page and runs it for distribution through pypi.org. | ||||
| - `Dockerfile` - container image description file to distribute the server as a container image. | ||||
| - `Makefile` – tasks for building, formatting, linting and testing. | ||||
|  | ||||
| ## Feature development | ||||
|  | ||||
| Implement new functionality in the Go sources under `cmd/` and `pkg/`. | ||||
| The JavaScript (`npm/`) and Python (`python/`) directories only wrap the compiled binary for distribution (npm and PyPI). | ||||
| Most changes will not require touching them unless the version or packaging needs to be updated. | ||||
|  | ||||
| ## Building | ||||
|  | ||||
| Use the provided Makefile targets: | ||||
|  | ||||
| ```bash | ||||
| # Format source and build the binary | ||||
| make build | ||||
|  | ||||
| # Build for all supported platforms | ||||
| make build-all-platforms | ||||
| ``` | ||||
|  | ||||
| `make build` will run `go fmt` and `go mod tidy` before compiling. | ||||
| The resulting executable is `kubernetes-mcp-server`. | ||||
|  | ||||
| ## Running | ||||
|  | ||||
| The README demonstrates running the server via | ||||
| [`mcp-inspector`](https://modelcontextprotocol.io/docs/tools/inspector): | ||||
|  | ||||
| ```bash | ||||
| make build | ||||
| npx @modelcontextprotocol/inspector@latest $(pwd)/kubernetes-mcp-server | ||||
| ``` | ||||
|  | ||||
| To run the server locally, you can use `npx`, `uvx` or execute the binary directly: | ||||
|  | ||||
| ```bash | ||||
| # Using npx (Node.js package runner) | ||||
| npx -y kubernetes-mcp-server@latest | ||||
|  | ||||
| # Using uvx (Python package runner) | ||||
| uvx kubernetes-mcp-server@latest | ||||
|  | ||||
| # Binary execution | ||||
| ./kubernetes-mcp-server | ||||
| ``` | ||||
|  | ||||
| This MCP server is designed to run both locally and remotely. | ||||
|  | ||||
| ### Local Execution | ||||
|  | ||||
| When running locally, the server connects to a Kubernetes or OpenShift cluster using the kubeconfig file. | ||||
| It reads the kubeconfig from the `--kubeconfig` flag, the `KUBECONFIG` environment variable, or defaults to `~/.kube/config`. | ||||
|  | ||||
| This means that `npx -y kubernetes-mcp-server@latest` on a workstation will talk to whatever cluster your current kubeconfig points to (e.g. a local Kind cluster). | ||||
|  | ||||
| ### Remote Execution | ||||
|  | ||||
| When running remotely, the server can be deployed as a container image in a Kubernetes or OpenShift cluster. | ||||
| The server can be run as a Deployment, StatefulSet, or any other Kubernetes resource that suits your needs. | ||||
| The server will automatically use the in-cluster configuration to connect to the Kubernetes API server. | ||||
|  | ||||
| ## Tests | ||||
|  | ||||
| Run all Go tests with: | ||||
|  | ||||
| ```bash | ||||
| make test | ||||
| ``` | ||||
|  | ||||
| The test suite relies on the `setup-envtest` tooling from `sigs.k8s.io/controller-runtime`. | ||||
| The first run downloads a Kubernetes `envtest` environment from the internet, so network access is required. | ||||
| Without it some tests will fail during setup. | ||||
|  | ||||
| ## Linting | ||||
|  | ||||
| Static analysis is performed with `golangci-lint`: | ||||
|  | ||||
| ```bash | ||||
| make lint | ||||
| ``` | ||||
|  | ||||
| The `lint` target downloads the specified `golangci-lint` version if it is not already present under `_output/tools/bin/`. | ||||
|  | ||||
| ## Dependencies | ||||
|  | ||||
| When introducing new modules run `make tidy` so that `go.mod` and `go.sum` remain tidy. | ||||
|  | ||||
| ## Coding style | ||||
|  | ||||
| - Go modules target Go **1.24** (see `go.mod`). | ||||
| - Tests are written with the standard library `testing` package. | ||||
| - Build, test and lint steps are defined in the Makefile—keep them working. | ||||
|  | ||||
| ## Distribution Methods | ||||
|  | ||||
| The server is distributed as a binary executable, a Docker image, an npm package, and a Python package. | ||||
|  | ||||
| - **Native binaries** for Linux, macOS, and Windows are available in the GitHub releases. | ||||
| - A **container image** (Docker) is built and pushed to the `quay.io/manusa/kubernetes_mcp_server` repository. | ||||
| - An **npm** package is available at [npmjs.com](https://www.npmjs.com/package/kubernetes-mcp-server). | ||||
|   It wraps the platform-specific binary and provides a convenient way to run the server using `npx`. | ||||
| - A **Python** package is available at [pypi.org](https://pypi.org/project/kubernetes-mcp-server/). | ||||
|   It provides a script that downloads the correct platform binary from the GitHub releases page and runs it. | ||||
|   It provides a convenient way to run the server using `uvx` or `python -m kubernetes_mcp_server`. | ||||
							
								
								
									
										14
									
								
								Dockerfile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								Dockerfile
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,14 @@ | ||||
| FROM golang:latest AS builder | ||||
|  | ||||
| WORKDIR /app | ||||
|  | ||||
| COPY ./ ./ | ||||
| RUN make build | ||||
|  | ||||
| FROM registry.access.redhat.com/ubi9/ubi-minimal:latest | ||||
| WORKDIR /app | ||||
| COPY --from=builder /app/kubernetes-mcp-server /app/kubernetes-mcp-server | ||||
| USER 65532:65532 | ||||
| ENTRYPOINT ["/app/kubernetes-mcp-server", "--port", "8080"] | ||||
|  | ||||
| EXPOSE 8080 | ||||
							
								
								
									
										95
									
								
								Makefile
									
									
									
									
									
								
							
							
						
						
									
										95
									
								
								Makefile
									
									
									
									
									
								
							| @@ -1,10 +1,6 @@ | ||||
| # If you update this file, please follow | ||||
| # https://suva.sh/posts/well-documented-makefiles | ||||
|  | ||||
| ## -------------------------------------- | ||||
| ## General | ||||
| ## -------------------------------------- | ||||
|  | ||||
| .DEFAULT_GOAL := help | ||||
|  | ||||
| PACKAGE = $(shell go list -m) | ||||
| @@ -19,7 +15,11 @@ LD_FLAGS = -s -w \ | ||||
| 	-X '$(PACKAGE)/pkg/version.BinaryName=$(BINARY_NAME)' | ||||
| COMMON_BUILD_ARGS = -ldflags "$(LD_FLAGS)" | ||||
|  | ||||
| NPM_VERSION ?= $(shell echo $(GIT_VERSION) | sed 's/^v//') | ||||
| GOLANGCI_LINT = $(shell pwd)/_output/tools/bin/golangci-lint | ||||
| GOLANGCI_LINT_VERSION ?= v2.5.0 | ||||
|  | ||||
| # NPM version should not append the -dirty flag | ||||
| NPM_VERSION ?= $(shell echo $(shell git describe --tags --always) | sed 's/^v//') | ||||
| OSES = darwin linux windows | ||||
| ARCHS = amd64 arm64 | ||||
|  | ||||
| @@ -27,7 +27,7 @@ CLEAN_TARGETS := | ||||
| CLEAN_TARGETS += '$(BINARY_NAME)' | ||||
| CLEAN_TARGETS += $(foreach os,$(OSES),$(foreach arch,$(ARCHS),$(BINARY_NAME)-$(os)-$(arch)$(if $(findstring windows,$(os)),.exe,))) | ||||
| CLEAN_TARGETS += $(foreach os,$(OSES),$(foreach arch,$(ARCHS),./npm/$(BINARY_NAME)-$(os)-$(arch)/bin/)) | ||||
| CLEAN_TARGETS += ./npm/.npmrc | ||||
| CLEAN_TARGETS += ./npm/kubernetes-mcp-server/.npmrc ./npm/kubernetes-mcp-server/LICENSE ./npm/kubernetes-mcp-server/README.md | ||||
| CLEAN_TARGETS += $(foreach os,$(OSES),$(foreach arch,$(ARCHS),./npm/$(BINARY_NAME)-$(os)-$(arch)/.npmrc)) | ||||
|  | ||||
| # The help will print out all targets with their descriptions organized bellow their categories. The categories are represented by `##@` and the target descriptions by `##`. | ||||
| @@ -47,18 +47,18 @@ clean: ## Clean up all build artifacts | ||||
| 	rm -rf $(CLEAN_TARGETS) | ||||
|  | ||||
| .PHONY: build | ||||
| build: clean tidy format ## Build the project | ||||
| build: clean tidy format lint ## Build the project | ||||
| 	go build $(COMMON_BUILD_ARGS) -o $(BINARY_NAME) ./cmd/kubernetes-mcp-server | ||||
|  | ||||
|  | ||||
| .PHONY: build-all-platforms | ||||
| build-all-platforms: clean tidy format ## Build the project for all platforms | ||||
| build-all-platforms: clean tidy format lint ## Build the project for all platforms | ||||
| 	$(foreach os,$(OSES),$(foreach arch,$(ARCHS), \ | ||||
| 		GOOS=$(os) GOARCH=$(arch) go build $(COMMON_BUILD_ARGS) -o $(BINARY_NAME)-$(os)-$(arch)$(if $(findstring windows,$(os)),.exe,) ./cmd/kubernetes-mcp-server; \ | ||||
| 	)) | ||||
|  | ||||
| .PHONY: npm | ||||
| npm: build-all-platforms ## Create the npm packages | ||||
| .PHONY: npm-copy-binaries | ||||
| npm-copy-binaries: build-all-platforms ## Copy the binaries to each npm package | ||||
| 	$(foreach os,$(OSES),$(foreach arch,$(ARCHS), \ | ||||
| 		EXECUTABLE=./$(BINARY_NAME)-$(os)-$(arch)$(if $(findstring windows,$(os)),.exe,); \ | ||||
| 		DIRNAME=$(BINARY_NAME)-$(os)-$(arch); \ | ||||
| @@ -67,19 +67,25 @@ npm: build-all-platforms ## Create the npm packages | ||||
| 	)) | ||||
|  | ||||
| .PHONY: npm-publish | ||||
| npm-publish: npm ## Publish the npm packages | ||||
| npm-publish: npm-copy-binaries ## Publish the npm packages | ||||
| 	$(foreach os,$(OSES),$(foreach arch,$(ARCHS), \ | ||||
| 		DIRNAME="$(BINARY_NAME)-$(os)-$(arch)"; \ | ||||
| 		cd npm/$$DIRNAME; \ | ||||
| 		echo '//registry.npmjs.org/:_authToken=\$(NPM_TOKEN)' >> .npmrc; \ | ||||
| 		jq '.version = "$(NPM_VERSION)"' package.json > tmp.json && mv tmp.json package.json; \ | ||||
| 		npm publish; \ | ||||
| 		npm publish --tag latest; \ | ||||
| 		cd ../..; \ | ||||
| 	)) | ||||
| 	echo '//registry.npmjs.org/:_authToken=\$(NPM_TOKEN)' >> ./npm/.npmrc | ||||
| 	jq '.version = "$(NPM_VERSION)"' ./npm/package.json > tmp.json && mv tmp.json ./npm/package.json; \ | ||||
| 	jq '.optionalDependencies |= with_entries(.value = "$(NPM_VERSION)")' ./npm/package.json > tmp.json && mv tmp.json ./npm/package.json; \ | ||||
| 	cd npm && npm publish | ||||
| 	cp README.md LICENSE ./npm/kubernetes-mcp-server/ | ||||
| 	jq '.version = "$(NPM_VERSION)"' ./npm/kubernetes-mcp-server/package.json > tmp.json && mv tmp.json ./npm/kubernetes-mcp-server/package.json; \ | ||||
| 	jq '.optionalDependencies |= with_entries(.value = "$(NPM_VERSION)")' ./npm/kubernetes-mcp-server/package.json > tmp.json && mv tmp.json ./npm/kubernetes-mcp-server/package.json; \ | ||||
| 	cd npm/kubernetes-mcp-server && npm publish --tag latest | ||||
|  | ||||
| .PHONY: python-publish | ||||
| python-publish: ## Publish the python packages | ||||
| 	cd ./python && \ | ||||
| 	sed -i "s/version = \".*\"/version = \"$(NPM_VERSION)\"/" pyproject.toml && \ | ||||
| 	uv build && \ | ||||
| 	uv publish | ||||
|  | ||||
| .PHONY: test | ||||
| test: ## Run the tests | ||||
| @@ -92,3 +98,58 @@ format: ## Format the code | ||||
| .PHONY: tidy | ||||
| tidy: ## Tidy up the go modules | ||||
| 	go mod tidy | ||||
|  | ||||
| .PHONY: golangci-lint | ||||
| golangci-lint: ## Download and install golangci-lint if not already installed | ||||
| 		@[ -f $(GOLANGCI_LINT) ] || { \ | ||||
|     	set -e ;\ | ||||
|     	curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell dirname $(GOLANGCI_LINT)) $(GOLANGCI_LINT_VERSION) ;\ | ||||
|     	} | ||||
|  | ||||
| .PHONY: lint | ||||
| lint: golangci-lint ## Lint the code | ||||
| 	$(GOLANGCI_LINT) run --verbose --print-resources-usage | ||||
|  | ||||
| .PHONY: update-readme-tools | ||||
| update-readme-tools: ## Update the README.md file with the latest toolsets | ||||
| 	go run ./internal/tools/update-readme/main.go README.md | ||||
|  | ||||
| ##@ Tools | ||||
|  | ||||
| .PHONY: tools | ||||
| tools: ## Install all required tools (kind) to ./_output/bin/ | ||||
| 	@echo "Checking and installing required tools to ./_output/bin/ ..." | ||||
| 	@if [ -f _output/bin/kind ]; then echo "[OK] kind already installed"; else echo "Installing kind..."; $(MAKE) -s kind; fi | ||||
| 	@echo "All tools ready!" | ||||
|  | ||||
| ##@ Local Development | ||||
|  | ||||
| .PHONY: local-env-setup | ||||
| local-env-setup: ## Setup complete local development environment with Kind cluster | ||||
| 	@echo "=========================================" | ||||
| 	@echo "Kubernetes MCP Server - Local Setup" | ||||
| 	@echo "=========================================" | ||||
| 	$(MAKE) tools | ||||
| 	$(MAKE) kind-create-cluster | ||||
| 	$(MAKE) keycloak-install | ||||
| 	$(MAKE) build | ||||
| 	@echo "" | ||||
| 	@echo "=========================================" | ||||
| 	@echo "Local environment ready!" | ||||
| 	@echo "=========================================" | ||||
| 	@echo "" | ||||
| 	@echo "Configuration file generated:" | ||||
| 	@echo "  _output/config.toml" | ||||
| 	@echo "" | ||||
| 	@echo "Run the MCP server with:" | ||||
| 	@echo "  ./$(BINARY_NAME) --port 8080 --config _output/config.toml" | ||||
| 	@echo "" | ||||
| 	@echo "Or run with MCP inspector:" | ||||
| 	@echo "  npx @modelcontextprotocol/inspector@latest \$$(pwd)/$(BINARY_NAME) --config _output/config.toml" | ||||
|  | ||||
| .PHONY: local-env-teardown | ||||
| local-env-teardown: ## Tear down the local Kind cluster | ||||
| 	$(MAKE) kind-delete-cluster | ||||
|  | ||||
| # Include build configuration files | ||||
| -include build/*.mk | ||||
|   | ||||
							
								
								
									
										351
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										351
									
								
								README.md
									
									
									
									
									
								
							| @@ -1,28 +1,339 @@ | ||||
| # Kubernetes MCP Server | ||||
|  | ||||
| <p align="center"> | ||||
|   <a href="https://github.com/manusa/kubernetes-mcp-server/blob/main/LICENSE"> | ||||
|     <img alt="GitHub License" src="https://img.shields.io/github/license/manusa/kubernetes-mcp-server" /></a> | ||||
|   <a href="https://github.com/manusa/kubernetes-mcp-server/actions/workflows/build.yaml"> | ||||
|     <img src="https://github.com/manusa/kubernetes-mcp-server/actions/workflows/build.yaml/badge.svg" alt="Build status badge" /></a> | ||||
| </p> | ||||
| [](https://github.com/containers/kubernetes-mcp-server/blob/main/LICENSE) | ||||
| [](https://www.npmjs.com/package/kubernetes-mcp-server) | ||||
| [](https://pypi.org/project/kubernetes-mcp-server/) | ||||
| [](https://github.com/containers/kubernetes-mcp-server/releases/latest) | ||||
| [](https://github.com/containers/kubernetes-mcp-server/actions/workflows/build.yaml) | ||||
|  | ||||
| [✨ Features](#features) | [🚀 Getting Started](#getting-started) | [🎥 Demos](#demos) | [⚙️ Configuration](#configuration) | [🛠️ Tools](#tools-and-functionalities) | [🧑💻 Development](#development) | ||||
|  | ||||
| https://github.com/user-attachments/assets/be2b67b3-fc1c-4d11-ae46-93deba8ed98e | ||||
|  | ||||
| ## ✨ Features <a id="features"></a> | ||||
|  | ||||
| A powerful and flexible Kubernetes [Model Context Protocol (MCP)](https://blog.marcnuri.com/model-context-protocol-mcp-introduction) server implementation with support for **Kubernetes** and **OpenShift**. | ||||
|  | ||||
| - **✅ Configuration**: | ||||
|   - Automatically detect changes in the Kubernetes configuration and update the MCP server. | ||||
|   - **View** and manage the current [Kubernetes `.kube/config`](https://blog.marcnuri.com/where-is-my-default-kubeconfig-file) or in-cluster configuration. | ||||
| - **✅ Generic Kubernetes Resources**: Perform operations on **any** Kubernetes or OpenShift resource. | ||||
|   - Any CRUD operation (Create or Update, Get, List, Delete). | ||||
| - **✅ Pods**: Perform Pod-specific operations. | ||||
|   - **List** pods in all namespaces or in a specific namespace. | ||||
|   - **Get** a pod by name from the specified namespace. | ||||
|   - **Delete** a pod by name from the specified namespace. | ||||
|   - **Show logs** for a pod by name from the specified namespace. | ||||
|   - **Top** gets resource usage metrics for all pods or a specific pod in the specified namespace. | ||||
|   - **Exec** into a pod and run a command. | ||||
|   - **Run** a container image in a pod and optionally expose it. | ||||
| - **✅ Namespaces**: List Kubernetes Namespaces. | ||||
| - **✅ Events**: View Kubernetes events in all namespaces or in a specific namespace. | ||||
| - **✅ Projects**: List OpenShift Projects. | ||||
| - **☸️ Helm**: | ||||
|   - **Install** a Helm chart in the current or provided namespace. | ||||
|   - **List** Helm releases in all namespaces or in a specific namespace. | ||||
|   - **Uninstall** a Helm release in the current or provided namespace. | ||||
|  | ||||
| Unlike other Kubernetes MCP server implementations, this **IS NOT** just a wrapper around `kubectl` or `helm` command-line tools. | ||||
| It is a **Go-based native implementation** that interacts directly with the Kubernetes API server. | ||||
|  | ||||
| There is **NO NEED** for external dependencies or tools to be installed on the system. | ||||
| If you're using the native binaries you don't need to have Node or Python installed on your system. | ||||
|  | ||||
| - **✅ Lightweight**: The server is distributed as a single native binary for Linux, macOS, and Windows. | ||||
| - **✅ High-Performance / Low-Latency**: Directly interacts with the Kubernetes API server without the overhead of calling and waiting for external commands. | ||||
| - **✅ Multi-Cluster**: Can interact with multiple Kubernetes clusters simultaneously (as defined in your kubeconfig files). | ||||
| - **✅ Cross-Platform**: Available as a native binary for Linux, macOS, and Windows, as well as an npm package, a Python package, and container/Docker image. | ||||
| - **✅ Configurable**: Supports [command-line arguments](#configuration)  to configure the server behavior. | ||||
| - **✅ Well tested**: The server has an extensive test suite to ensure its reliability and correctness across different Kubernetes environments. | ||||
|  | ||||
| ## 🚀 Getting Started <a id="getting-started"></a> | ||||
|  | ||||
| ### Requirements | ||||
|  | ||||
| - Access to a Kubernetes cluster. | ||||
|  | ||||
| ### Claude Desktop | ||||
|  | ||||
| #### Using npx | ||||
|  | ||||
| If you have npm installed, this is the fastest way to get started with `kubernetes-mcp-server` on Claude Desktop. | ||||
|  | ||||
| Open your `claude_desktop_config.json` and add the mcp server to the list of `mcpServers`: | ||||
| ``` json | ||||
| { | ||||
|   "mcpServers": { | ||||
|     "kubernetes": { | ||||
|       "command": "npx", | ||||
|       "args": [ | ||||
|         "-y", | ||||
|         "kubernetes-mcp-server@latest" | ||||
|       ] | ||||
|     } | ||||
|   } | ||||
| } | ||||
| ``` | ||||
|  | ||||
| ### VS Code / VS Code Insiders | ||||
|  | ||||
| Install the Kubernetes MCP server extension in VS Code Insiders by pressing the following link: | ||||
|  | ||||
| [<img src="https://img.shields.io/badge/VS_Code-VS_Code?style=flat-square&label=Install%20Server&color=0098FF" alt="Install in VS Code">](https://insiders.vscode.dev/redirect?url=vscode%3Amcp%2Finstall%3F%257B%2522name%2522%253A%2522kubernetes%2522%252C%2522command%2522%253A%2522npx%2522%252C%2522args%2522%253A%255B%2522-y%2522%252C%2522kubernetes-mcp-server%2540latest%2522%255D%257D) | ||||
| [<img alt="Install in VS Code Insiders" src="https://img.shields.io/badge/VS_Code_Insiders-VS_Code_Insiders?style=flat-square&label=Install%20Server&color=24bfa5">](https://insiders.vscode.dev/redirect?url=vscode-insiders%3Amcp%2Finstall%3F%257B%2522name%2522%253A%2522kubernetes%2522%252C%2522command%2522%253A%2522npx%2522%252C%2522args%2522%253A%255B%2522-y%2522%252C%2522kubernetes-mcp-server%2540latest%2522%255D%257D) | ||||
|  | ||||
| Alternatively, you can install the extension manually by running the following command: | ||||
|  | ||||
| ```shell | ||||
| # For VS Code | ||||
| code --add-mcp '{"name":"kubernetes","command":"npx","args":["kubernetes-mcp-server@latest"]}' | ||||
| # For VS Code Insiders | ||||
| code-insiders --add-mcp '{"name":"kubernetes","command":"npx","args":["kubernetes-mcp-server@latest"]}' | ||||
| ``` | ||||
|  | ||||
| ### Cursor | ||||
|  | ||||
| Install the Kubernetes MCP server extension in Cursor by pressing the following link: | ||||
|  | ||||
| [](https://cursor.com/en/install-mcp?name=kubernetes-mcp-server&config=eyJjb21tYW5kIjoibnB4IC15IGt1YmVybmV0ZXMtbWNwLXNlcnZlckBsYXRlc3QifQ%3D%3D) | ||||
|  | ||||
| Alternatively, you can install the extension manually by editing the `mcp.json` file: | ||||
|  | ||||
| ```json | ||||
| { | ||||
|   "mcpServers": { | ||||
|     "kubernetes-mcp-server": { | ||||
|       "command": "npx", | ||||
|       "args": ["-y", "kubernetes-mcp-server@latest"] | ||||
|     } | ||||
|   } | ||||
| } | ||||
| ``` | ||||
|  | ||||
| ### Goose CLI | ||||
|  | ||||
| [Goose CLI](https://blog.marcnuri.com/goose-on-machine-ai-agent-cli-introduction) is the easiest (and cheapest) way to get rolling with artificial intelligence (AI) agents. | ||||
|  | ||||
| #### Using npm | ||||
|  | ||||
| If you have npm installed, this is the fastest way to get started with `kubernetes-mcp-server`. | ||||
|  | ||||
| Open your goose `config.yaml` and add the mcp server to the list of `mcpServers`: | ||||
| ```yaml | ||||
| extensions: | ||||
|   kubernetes: | ||||
|     command: npx | ||||
|     args: | ||||
|       - -y | ||||
|       - kubernetes-mcp-server@latest | ||||
|  | ||||
| ``` | ||||
|  | ||||
| ## 🎥 Demos <a id="demos"></a> | ||||
|  | ||||
| ### Diagnosing and automatically fixing an OpenShift Deployment | ||||
|  | ||||
| Demo showcasing how Kubernetes MCP server is leveraged by Claude Desktop to automatically diagnose and fix a deployment in OpenShift without any user assistance. | ||||
|  | ||||
| https://github.com/user-attachments/assets/a576176d-a142-4c19-b9aa-a83dc4b8d941 | ||||
|  | ||||
| ### _Vibe Coding_ a simple game and deploying it to OpenShift | ||||
|  | ||||
| In this demo, I walk you through the process of _Vibe Coding_ a simple game using VS Code and how to leverage [Podman MCP server](https://github.com/manusa/podman-mcp-server) and Kubernetes MCP server to deploy it to OpenShift. | ||||
|  | ||||
| <a href="https://www.youtube.com/watch?v=l05jQDSrzVI" target="_blank"> | ||||
|  <img src="docs/images/vibe-coding.jpg" alt="Vibe Coding: Build & Deploy a Game on Kubernetes" width="240"  /> | ||||
| </a> | ||||
|  | ||||
| ### Supercharge GitHub Copilot with Kubernetes MCP Server in VS Code - One-Click Setup! | ||||
|  | ||||
| In this demo, I'll show you how to set up Kubernetes MCP server in VS code just by clicking a link. | ||||
|  | ||||
| <a href="https://youtu.be/AI4ljYMkgtA" target="_blank"> | ||||
|  <img src="docs/images/kubernetes-mcp-server-github-copilot.jpg" alt="Supercharge GitHub Copilot with Kubernetes MCP Server in VS Code - One-Click Setup!" width="240"  /> | ||||
| </a> | ||||
|  | ||||
| ## ⚙️ Configuration <a id="configuration"></a> | ||||
|  | ||||
| The Kubernetes MCP server can be configured using command line (CLI) arguments. | ||||
|  | ||||
| You can run the CLI executable either by using `npx`, `uvx`, or by downloading the [latest release binary](https://github.com/containers/kubernetes-mcp-server/releases/latest). | ||||
|  | ||||
| ```shell | ||||
| # Run the Kubernetes MCP server using npx (in case you have npm and node installed) | ||||
| npx kubernetes-mcp-server@latest --help | ||||
| ``` | ||||
|  | ||||
| ```shell | ||||
| # Run the Kubernetes MCP server using uvx (in case you have uv and python installed) | ||||
| uvx kubernetes-mcp-server@latest --help | ||||
| ``` | ||||
|  | ||||
| ```shell | ||||
| # Run the Kubernetes MCP server using the latest release binary | ||||
| ./kubernetes-mcp-server --help | ||||
| ``` | ||||
|  | ||||
| ### Configuration Options | ||||
|  | ||||
| | Option                    | Description                                                                                                                                                                                                                                                                                   | | ||||
| |---------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | ||||
| | `--port`                  | Starts the MCP server in Streamable HTTP mode (path /mcp) and Server-Sent Event (SSE) (path /sse) mode and listens on the specified port .                                                                                                                                                    | | ||||
| | `--log-level`             | Sets the logging level (values [from 0-9](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)). Similar to [kubectl logging levels](https://kubernetes.io/docs/reference/kubectl/quick-reference/#kubectl-output-verbosity-and-debugging). | | ||||
| | `--kubeconfig`            | Path to the Kubernetes configuration file. If not provided, it will try to resolve the configuration (in-cluster, default location, etc.).                                                                                                                                                    | | ||||
| | `--list-output`           | Output format for resource list operations (one of: yaml, table) (default "table")                                                                                                                                                                                                            | | ||||
| | `--read-only`             | If set, the MCP server will run in read-only mode, meaning it will not allow any write operations (create, update, delete) on the Kubernetes cluster. This is useful for debugging or inspecting the cluster without making changes.                                                          | | ||||
| | `--disable-destructive`   | If set, the MCP server will disable all destructive operations (delete, update, etc.) on the Kubernetes cluster. This is useful for debugging or inspecting the cluster without accidentally making changes. This option has no effect when `--read-only` is used.                            | | ||||
| | `--toolsets`              | Comma-separated list of toolsets to enable. Check the [🛠️ Tools and Functionalities](#tools-and-functionalities) section for more information.                                                                                                                                               | | ||||
| | `--disable-multi-cluster` | If set, the MCP server will disable multi-cluster support and will only use the current context from the kubeconfig file. This is useful if you want to restrict the MCP server to a single cluster.                                                                                          | | ||||
|  | ||||
| ## 🛠️ Tools and Functionalities <a id="tools-and-functionalities"></a> | ||||
|  | ||||
| The Kubernetes MCP server supports enabling or disabling specific groups of tools and functionalities (tools, resources, prompts, and so on) via the `--toolsets` command-line flag or `toolsets` configuration option. | ||||
| This allows you to control which Kubernetes functionalities are available to your AI tools. | ||||
| Enabling only the toolsets you need can help reduce the context size and improve the LLM's tool selection accuracy. | ||||
|  | ||||
| ### Available Toolsets | ||||
|  | ||||
| The following sets of tools are available (all on by default): | ||||
|  | ||||
| <!-- AVAILABLE-TOOLSETS-START --> | ||||
|  | ||||
| | Toolset | Description                                                                         | | ||||
| |---------|-------------------------------------------------------------------------------------| | ||||
| | config  | View and manage the current local Kubernetes configuration (kubeconfig)             | | ||||
| | core    | Most common tools for Kubernetes management (Pods, Generic Resources, Events, etc.) | | ||||
| | helm    | Tools for managing Helm charts and releases                                         | | ||||
|  | ||||
| <!-- AVAILABLE-TOOLSETS-END --> | ||||
|  | ||||
| ### Tools | ||||
|  | ||||
| In case multi-cluster support is enabled (default) and you have access to multiple clusters, all applicable tools will include an additional `context` argument to specify the Kubernetes context (cluster) to use for that operation. | ||||
|  | ||||
| <!-- AVAILABLE-TOOLSETS-TOOLS-START --> | ||||
|  | ||||
| <details> | ||||
|  | ||||
| <summary>config</summary> | ||||
|  | ||||
| - **configuration_contexts_list** - List all available context names and associated server urls from the kubeconfig file | ||||
|  | ||||
| - **configuration_view** - Get the current Kubernetes configuration content as a kubeconfig YAML | ||||
|   - `minified` (`boolean`) - Return a minified version of the configuration. If set to true, keeps only the current-context and the relevant pieces of the configuration for that context. If set to false, all contexts, clusters, auth-infos, and users are returned in the configuration. (Optional, default true) | ||||
|  | ||||
| </details> | ||||
|  | ||||
| <details> | ||||
|  | ||||
| <summary>core</summary> | ||||
|  | ||||
| - **events_list** - List all the Kubernetes events in the current cluster from all namespaces | ||||
|   - `namespace` (`string`) - Optional Namespace to retrieve the events from. If not provided, will list events from all namespaces | ||||
|  | ||||
| - **namespaces_list** - List all the Kubernetes namespaces in the current cluster | ||||
|  | ||||
| - **projects_list** - List all the OpenShift projects in the current cluster | ||||
|  | ||||
| - **pods_list** - List all the Kubernetes pods in the current cluster from all namespaces | ||||
|   - `labelSelector` (`string`) - Optional Kubernetes label selector (e.g. 'app=myapp,env=prod' or 'app in (myapp,yourapp)'), use this option when you want to filter the pods by label | ||||
|  | ||||
| - **pods_list_in_namespace** - List all the Kubernetes pods in the specified namespace in the current cluster | ||||
|   - `labelSelector` (`string`) - Optional Kubernetes label selector (e.g. 'app=myapp,env=prod' or 'app in (myapp,yourapp)'), use this option when you want to filter the pods by label | ||||
|   - `namespace` (`string`) **(required)** - Namespace to list pods from | ||||
|  | ||||
| - **pods_get** - Get a Kubernetes Pod in the current or provided namespace with the provided name | ||||
|   - `name` (`string`) **(required)** - Name of the Pod | ||||
|   - `namespace` (`string`) - Namespace to get the Pod from | ||||
|  | ||||
| - **pods_delete** - Delete a Kubernetes Pod in the current or provided namespace with the provided name | ||||
|   - `name` (`string`) **(required)** - Name of the Pod to delete | ||||
|   - `namespace` (`string`) - Namespace to delete the Pod from | ||||
|  | ||||
| - **pods_top** - List the resource consumption (CPU and memory) as recorded by the Kubernetes Metrics Server for the specified Kubernetes Pods in the all namespaces, the provided namespace, or the current namespace | ||||
|   - `all_namespaces` (`boolean`) - If true, list the resource consumption for all Pods in all namespaces. If false, list the resource consumption for Pods in the provided namespace or the current namespace | ||||
|   - `label_selector` (`string`) - Kubernetes label selector (e.g. 'app=myapp,env=prod' or 'app in (myapp,yourapp)'), use this option when you want to filter the pods by label (Optional, only applicable when name is not provided) | ||||
|   - `name` (`string`) - Name of the Pod to get the resource consumption from (Optional, all Pods in the namespace if not provided) | ||||
|   - `namespace` (`string`) - Namespace to get the Pods resource consumption from (Optional, current namespace if not provided and all_namespaces is false) | ||||
|  | ||||
| - **pods_exec** - Execute a command in a Kubernetes Pod in the current or provided namespace with the provided name and command | ||||
|   - `command` (`array`) **(required)** - Command to execute in the Pod container. The first item is the command to be run, and the rest are the arguments to that command. Example: ["ls", "-l", "/tmp"] | ||||
|   - `container` (`string`) - Name of the Pod container where the command will be executed (Optional) | ||||
|   - `name` (`string`) **(required)** - Name of the Pod where the command will be executed | ||||
|   - `namespace` (`string`) - Namespace of the Pod where the command will be executed | ||||
|  | ||||
| - **pods_log** - Get the logs of a Kubernetes Pod in the current or provided namespace with the provided name | ||||
|   - `container` (`string`) - Name of the Pod container to get the logs from (Optional) | ||||
|   - `name` (`string`) **(required)** - Name of the Pod to get the logs from | ||||
|   - `namespace` (`string`) - Namespace to get the Pod logs from | ||||
|   - `previous` (`boolean`) - Return previous terminated container logs (Optional) | ||||
|   - `tail` (`integer`) - Number of lines to retrieve from the end of the logs (Optional, default: 100) | ||||
|  | ||||
| - **pods_run** - Run a Kubernetes Pod in the current or provided namespace with the provided container image and optional name | ||||
|   - `image` (`string`) **(required)** - Container Image to run in the Pod | ||||
|   - `name` (`string`) - Name of the Pod (Optional, random name if not provided) | ||||
|   - `namespace` (`string`) - Namespace to run the Pod in | ||||
|   - `port` (`number`) - TCP/IP port to expose from the Pod container (Optional, no port exposed if not provided) | ||||
|  | ||||
| - **resources_list** - List Kubernetes resources and objects in the current cluster by providing their apiVersion and kind and optionally the namespace and label selector | ||||
| (common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress, route.openshift.io/v1 Route) | ||||
|   - `apiVersion` (`string`) **(required)** - apiVersion of the resources (examples of valid apiVersion are: v1, apps/v1, networking.k8s.io/v1) | ||||
|   - `kind` (`string`) **(required)** - kind of the resources (examples of valid kind are: Pod, Service, Deployment, Ingress) | ||||
|   - `labelSelector` (`string`) - Optional Kubernetes label selector (e.g. 'app=myapp,env=prod' or 'app in (myapp,yourapp)'), use this option when you want to filter the pods by label | ||||
|   - `namespace` (`string`) - Optional Namespace to retrieve the namespaced resources from (ignored in case of cluster scoped resources). If not provided, will list resources from all namespaces | ||||
|  | ||||
| - **resources_get** - Get a Kubernetes resource in the current cluster by providing its apiVersion, kind, optionally the namespace, and its name | ||||
| (common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress, route.openshift.io/v1 Route) | ||||
|   - `apiVersion` (`string`) **(required)** - apiVersion of the resource (examples of valid apiVersion are: v1, apps/v1, networking.k8s.io/v1) | ||||
|   - `kind` (`string`) **(required)** - kind of the resource (examples of valid kind are: Pod, Service, Deployment, Ingress) | ||||
|   - `name` (`string`) **(required)** - Name of the resource | ||||
|   - `namespace` (`string`) - Optional Namespace to retrieve the namespaced resource from (ignored in case of cluster scoped resources). If not provided, will get resource from configured namespace | ||||
|  | ||||
| - **resources_create_or_update** - Create or update a Kubernetes resource in the current cluster by providing a YAML or JSON representation of the resource | ||||
| (common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress, route.openshift.io/v1 Route) | ||||
|   - `resource` (`string`) **(required)** - A JSON or YAML containing a representation of the Kubernetes resource. Should include top-level fields such as apiVersion,kind,metadata, and spec | ||||
|  | ||||
| - **resources_delete** - Delete a Kubernetes resource in the current cluster by providing its apiVersion, kind, optionally the namespace, and its name | ||||
| (common apiVersion and kind include: v1 Pod, v1 Service, v1 Node, apps/v1 Deployment, networking.k8s.io/v1 Ingress, route.openshift.io/v1 Route) | ||||
|   - `apiVersion` (`string`) **(required)** - apiVersion of the resource (examples of valid apiVersion are: v1, apps/v1, networking.k8s.io/v1) | ||||
|   - `kind` (`string`) **(required)** - kind of the resource (examples of valid kind are: Pod, Service, Deployment, Ingress) | ||||
|   - `name` (`string`) **(required)** - Name of the resource | ||||
|   - `namespace` (`string`) - Optional Namespace to delete the namespaced resource from (ignored in case of cluster scoped resources). If not provided, will delete resource from configured namespace | ||||
|  | ||||
| </details> | ||||
|  | ||||
| <details> | ||||
|  | ||||
| <summary>helm</summary> | ||||
|  | ||||
| - **helm_install** - Install a Helm chart in the current or provided namespace | ||||
|   - `chart` (`string`) **(required)** - Chart reference to install (for example: stable/grafana, oci://ghcr.io/nginxinc/charts/nginx-ingress) | ||||
|   - `name` (`string`) - Name of the Helm release (Optional, random name if not provided) | ||||
|   - `namespace` (`string`) - Namespace to install the Helm chart in (Optional, current namespace if not provided) | ||||
|   - `values` (`object`) - Values to pass to the Helm chart (Optional) | ||||
|  | ||||
| - **helm_list** - List all the Helm releases in the current or provided namespace (or in all namespaces if specified) | ||||
|   - `all_namespaces` (`boolean`) - If true, lists all Helm releases in all namespaces ignoring the namespace argument (Optional) | ||||
|   - `namespace` (`string`) - Namespace to list Helm releases from (Optional, all namespaces if not provided) | ||||
|  | ||||
| - **helm_uninstall** - Uninstall a Helm release in the current or provided namespace | ||||
|   - `name` (`string`) **(required)** - Name of the Helm release to uninstall | ||||
|   - `namespace` (`string`) - Namespace to uninstall the Helm release from (Optional, current namespace if not provided) | ||||
|  | ||||
| </details> | ||||
|  | ||||
|  | ||||
| <p align="center"> | ||||
|   <a href="#features">Features</a> | ||||
| </p> | ||||
| <!-- AVAILABLE-TOOLSETS-TOOLS-END --> | ||||
|  | ||||
| ## ✨ Features <a id="features" /> | ||||
| ## 🧑💻 Development <a id="development"></a> | ||||
|  | ||||
| ## Notes | ||||
| ### Running with mcp-inspector | ||||
|  | ||||
| Available MCP server implementation libraries: | ||||
| Compile the project and run the Kubernetes MCP server with [mcp-inspector](https://modelcontextprotocol.io/docs/tools/inspector) to inspect the MCP server. | ||||
|  | ||||
| - https://github.com/mark3labs/mcp-go | ||||
| - https://github.com/metoro-io/mcp-golang | ||||
| - https://github.com/llmcontext/gomcp | ||||
|  | ||||
|  | ||||
| ## MCP Resources | ||||
|  | ||||
| - https://github.com/MCP-Mirror/MCP-Mirror | ||||
| ```shell | ||||
| # Compile the project | ||||
| make build | ||||
| # Run the Kubernetes MCP server with mcp-inspector | ||||
| npx @modelcontextprotocol/inspector@latest $(pwd)/kubernetes-mcp-server | ||||
| ``` | ||||
|   | ||||
							
								
								
									
										448
									
								
								build/keycloak.mk
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										448
									
								
								build/keycloak.mk
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,448 @@ | ||||
| # Keycloak IdP for development and testing | ||||
|  | ||||
| KEYCLOAK_NAMESPACE = keycloak | ||||
| KEYCLOAK_ADMIN_USER = admin | ||||
| KEYCLOAK_ADMIN_PASSWORD = admin | ||||
|  | ||||
| .PHONY: keycloak-install | ||||
| keycloak-install: | ||||
| 	@echo "Installing Keycloak (dev mode using official image)..." | ||||
| 	@kubectl apply -f dev/config/keycloak/deployment.yaml | ||||
| 	@echo "Applying Keycloak ingress (cert-manager will create TLS certificate)..." | ||||
| 	@kubectl apply -f dev/config/keycloak/ingress.yaml | ||||
| 	@echo "Extracting cert-manager CA certificate..." | ||||
| 	@mkdir -p _output/cert-manager-ca | ||||
| 	@kubectl get secret selfsigned-ca-secret -n cert-manager -o jsonpath='{.data.ca\.crt}' | base64 -d > _output/cert-manager-ca/ca.crt | ||||
| 	@echo "✅ cert-manager CA certificate extracted to _output/cert-manager-ca/ca.crt (bind-mounted to API server)" | ||||
| 	@echo "Restarting Kubernetes API server to pick up new CA..." | ||||
| 	@docker exec kubernetes-mcp-server-control-plane pkill -f kube-apiserver || \ | ||||
| 		podman exec kubernetes-mcp-server-control-plane pkill -f kube-apiserver | ||||
| 	@echo "Waiting for API server to restart..." | ||||
| 	@sleep 5 | ||||
| 	@echo "Waiting for API server to be ready..." | ||||
| 	@for i in $$(seq 1 30); do \ | ||||
| 		if kubectl get --raw /healthz >/dev/null 2>&1; then \ | ||||
| 			echo "✅ Kubernetes API server updated with cert-manager CA"; \ | ||||
| 			break; \ | ||||
| 		fi; \ | ||||
| 		sleep 2; \ | ||||
| 	done | ||||
| 	@echo "Waiting for Keycloak to be ready..." | ||||
| 	@kubectl wait --for=condition=ready pod -l app=keycloak -n $(KEYCLOAK_NAMESPACE) --timeout=120s || true | ||||
| 	@echo "Waiting for Keycloak HTTP endpoint to be available..." | ||||
| 	@for i in $$(seq 1 30); do \ | ||||
| 		STATUS=$$(curl -sk -o /dev/null -w "%{http_code}" https://keycloak.127-0-0-1.sslip.io:8443/realms/master 2>/dev/null || echo "000"); \ | ||||
| 		if [ "$$STATUS" = "200" ]; then \ | ||||
| 			echo "✅ Keycloak HTTP endpoint ready"; \ | ||||
| 			break; \ | ||||
| 		fi; \ | ||||
| 		echo "  Attempt $$i/30: Waiting for Keycloak (status: $$STATUS)..."; \ | ||||
| 		sleep 3; \ | ||||
| 	done | ||||
| 	@echo "" | ||||
| 	@echo "Setting up OpenShift realm..." | ||||
| 	@$(MAKE) -s keycloak-setup-realm | ||||
| 	@echo "" | ||||
| 	@echo "✅ Keycloak installed and configured!" | ||||
| 	@echo "Access at: https://keycloak.127-0-0-1.sslip.io:8443" | ||||
|  | ||||
| .PHONY: keycloak-uninstall | ||||
| keycloak-uninstall: | ||||
| 	@kubectl delete -f dev/config/keycloak/deployment.yaml 2>/dev/null || true | ||||
|  | ||||
| .PHONY: keycloak-status | ||||
| keycloak-status: ## Show Keycloak status and connection info | ||||
| 	@if kubectl get svc -n $(KEYCLOAK_NAMESPACE) keycloak >/dev/null 2>&1; then \ | ||||
| 		echo "========================================"; \ | ||||
| 		echo "Keycloak Status"; \ | ||||
| 		echo "========================================"; \ | ||||
| 		echo ""; \ | ||||
| 		echo "Status: Installed"; \ | ||||
| 		echo ""; \ | ||||
| 		echo "Admin Console:"; \ | ||||
| 		echo "  URL: https://keycloak.127-0-0-1.sslip.io:8443"; \ | ||||
| 		echo "  Username: $(KEYCLOAK_ADMIN_USER)"; \ | ||||
| 		echo "  Password: $(KEYCLOAK_ADMIN_PASSWORD)"; \ | ||||
| 		echo ""; \ | ||||
| 		echo "OIDC Endpoints (openshift realm):"; \ | ||||
| 		echo "  Discovery: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift/.well-known/openid-configuration"; \ | ||||
| 		echo "  Token:     https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift/protocol/openid-connect/token"; \ | ||||
| 		echo "  Authorize: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift/protocol/openid-connect/auth"; \ | ||||
| 		echo "  UserInfo:  https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift/protocol/openid-connect/userinfo"; \ | ||||
| 		echo "  JWKS:      https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift/protocol/openid-connect/certs"; \ | ||||
| 		echo ""; \ | ||||
| 		echo "========================================"; \ | ||||
| 	else \ | ||||
| 		echo "Keycloak is not installed. Run: make keycloak-install"; \ | ||||
| 	fi | ||||
|  | ||||
| .PHONY: keycloak-logs | ||||
| keycloak-logs: ## Tail Keycloak logs | ||||
| 	@kubectl logs -n $(KEYCLOAK_NAMESPACE) -l app=keycloak -f --tail=100 | ||||
|  | ||||
| .PHONY: keycloak-setup-realm | ||||
| keycloak-setup-realm: | ||||
| 	@echo "=========================================" | ||||
| 	@echo "Setting up OpenShift Realm for Token Exchange" | ||||
| 	@echo "=========================================" | ||||
| 	@echo "Using Keycloak at https://keycloak.127-0-0-1.sslip.io:8443" | ||||
| 	@echo "" | ||||
| 	@echo "Getting admin access token..." | ||||
| 	@RESPONSE=$$(curl -sk -X POST "https://keycloak.127-0-0-1.sslip.io:8443/realms/master/protocol/openid-connect/token" \ | ||||
| 		-H "Content-Type: application/x-www-form-urlencoded" \ | ||||
| 		-d "username=$(KEYCLOAK_ADMIN_USER)" \ | ||||
| 		-d "password=$(KEYCLOAK_ADMIN_PASSWORD)" \ | ||||
| 		-d "grant_type=password" \ | ||||
| 		-d "client_id=admin-cli"); \ | ||||
| 	TOKEN=$$(echo "$$RESPONSE" | jq -r '.access_token // empty' 2>/dev/null); \ | ||||
| 	if [ -z "$$TOKEN" ] || [ "$$TOKEN" = "null" ]; then \ | ||||
| 		echo "❌ Failed to get access token"; \ | ||||
| 		echo "Response was: $$RESPONSE" | head -c 200; \ | ||||
| 		echo ""; \ | ||||
| 		echo "Check if:"; \ | ||||
| 		echo "  - Keycloak is running (make keycloak-install)"; \ | ||||
| 		echo "  - Keycloak is accessible at https://keycloak.127-0-0-1.sslip.io:8443"; \ | ||||
| 		echo "  - Admin credentials are correct: $(KEYCLOAK_ADMIN_USER)/$(KEYCLOAK_ADMIN_PASSWORD)"; \ | ||||
| 		exit 1; \ | ||||
| 	fi; \ | ||||
| 	echo "✅ Successfully obtained access token"; \ | ||||
| 	echo ""; \ | ||||
| 	echo "Creating OpenShift realm..."; \ | ||||
| 	REALM_RESPONSE=$$(curl -sk -w "%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms" \ | ||||
| 		-H "Authorization: Bearer $$TOKEN" \ | ||||
| 		-H "Content-Type: application/json" \ | ||||
| 		-d '{"realm":"openshift","enabled":true}'); \ | ||||
| 	REALM_CODE=$$(echo "$$REALM_RESPONSE" | tail -c 4); \ | ||||
| 	if [ "$$REALM_CODE" = "201" ] || [ "$$REALM_CODE" = "409" ]; then \ | ||||
| 		if [ "$$REALM_CODE" = "201" ]; then echo "✅ OpenShift realm created"; \ | ||||
| 		else echo "✅ OpenShift realm already exists"; fi; \ | ||||
| 	else \ | ||||
| 		echo "❌ Failed to create OpenShift realm (HTTP $$REALM_CODE)"; \ | ||||
| 		exit 1; \ | ||||
| 	fi; \ | ||||
| 	echo ""; \ | ||||
| 	echo "Configuring realm events..."; \ | ||||
| 	EVENT_CONFIG_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X PUT "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift" \ | ||||
| 		-H "Authorization: Bearer $$TOKEN" \ | ||||
| 		-H "Content-Type: application/json" \ | ||||
| 		-d '{"realm":"openshift","enabled":true,"eventsEnabled":true,"eventsListeners":["jboss-logging"],"adminEventsEnabled":true,"adminEventsDetailsEnabled":true}'); \ | ||||
| 	EVENT_CONFIG_CODE=$$(echo "$$EVENT_CONFIG_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ | ||||
| 	if [ "$$EVENT_CONFIG_CODE" = "204" ]; then \ | ||||
| 		echo "✅ User and admin event logging enabled"; \ | ||||
| 	else \ | ||||
| 		echo "⚠️  Could not configure event logging (HTTP $$EVENT_CONFIG_CODE)"; \ | ||||
| 	fi; \ | ||||
| 	echo ""; \ | ||||
| 	echo "Creating mcp:openshift client scope..."; \ | ||||
| 	SCOPE_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes" \ | ||||
| 		-H "Authorization: Bearer $$TOKEN" \ | ||||
| 		-H "Content-Type: application/json" \ | ||||
| 		-d '{"name":"mcp:openshift","protocol":"openid-connect","attributes":{"display.on.consent.screen":"false","include.in.token.scope":"true"}}'); \ | ||||
| 	SCOPE_CODE=$$(echo "$$SCOPE_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ | ||||
| 	if [ "$$SCOPE_CODE" = "201" ] || [ "$$SCOPE_CODE" = "409" ]; then \ | ||||
| 		if [ "$$SCOPE_CODE" = "201" ]; then echo "✅ mcp:openshift client scope created"; \ | ||||
| 		else echo "✅ mcp:openshift client scope already exists"; fi; \ | ||||
| 	else \ | ||||
| 		echo "❌ Failed to create mcp:openshift scope (HTTP $$SCOPE_CODE)"; \ | ||||
| 		exit 1; \ | ||||
| 	fi; \ | ||||
| 	echo ""; \ | ||||
| 	echo "Adding audience mapper to mcp:openshift scope..."; \ | ||||
| 	SCOPES_LIST=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes" \ | ||||
| 		-H "Authorization: Bearer $$TOKEN" \ | ||||
| 		-H "Accept: application/json"); \ | ||||
| 	SCOPE_ID=$$(echo "$$SCOPES_LIST" | jq -r '.[] | select(.name == "mcp:openshift") | .id // empty' 2>/dev/null); \ | ||||
| 	if [ -z "$$SCOPE_ID" ]; then \ | ||||
| 		echo "❌ Failed to find mcp:openshift scope"; \ | ||||
| 		exit 1; \ | ||||
| 	fi; \ | ||||
| 	MAPPER_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes/$$SCOPE_ID/protocol-mappers/models" \ | ||||
| 		-H "Authorization: Bearer $$TOKEN" \ | ||||
| 		-H "Content-Type: application/json" \ | ||||
| 		-d '{"name":"openshift-audience","protocol":"openid-connect","protocolMapper":"oidc-audience-mapper","config":{"included.client.audience":"openshift","id.token.claim":"true","access.token.claim":"true"}}'); \ | ||||
| 	MAPPER_CODE=$$(echo "$$MAPPER_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ | ||||
| 	if [ "$$MAPPER_CODE" = "201" ] || [ "$$MAPPER_CODE" = "409" ]; then \ | ||||
| 		if [ "$$MAPPER_CODE" = "201" ]; then echo "✅ Audience mapper added"; \ | ||||
| 		else echo "✅ Audience mapper already exists"; fi; \ | ||||
| 	else \ | ||||
| 		echo "❌ Failed to create audience mapper (HTTP $$MAPPER_CODE)"; \ | ||||
| 		exit 1; \ | ||||
| 	fi; \ | ||||
| 	echo ""; \ | ||||
| 	echo "Creating groups client scope..."; \ | ||||
| 	GROUPS_SCOPE_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes" \ | ||||
| 		-H "Authorization: Bearer $$TOKEN" \ | ||||
| 		-H "Content-Type: application/json" \ | ||||
| 		-d '{"name":"groups","protocol":"openid-connect","attributes":{"display.on.consent.screen":"false","include.in.token.scope":"true"}}'); \ | ||||
| 	GROUPS_SCOPE_CODE=$$(echo "$$GROUPS_SCOPE_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ | ||||
| 	if [ "$$GROUPS_SCOPE_CODE" = "201" ] || [ "$$GROUPS_SCOPE_CODE" = "409" ]; then \ | ||||
| 		if [ "$$GROUPS_SCOPE_CODE" = "201" ]; then echo "✅ groups client scope created"; \ | ||||
| 		else echo "✅ groups client scope already exists"; fi; \ | ||||
| 	else \ | ||||
| 		echo "❌ Failed to create groups scope (HTTP $$GROUPS_SCOPE_CODE)"; \ | ||||
| 		exit 1; \ | ||||
| 	fi; \ | ||||
| 	echo ""; \ | ||||
| 	echo "Adding group membership mapper to groups scope..."; \ | ||||
| 	SCOPES_LIST=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes" \ | ||||
| 		-H "Authorization: Bearer $$TOKEN" \ | ||||
| 		-H "Accept: application/json"); \ | ||||
| 	GROUPS_SCOPE_ID=$$(echo "$$SCOPES_LIST" | jq -r '.[] | select(.name == "groups") | .id // empty' 2>/dev/null); \ | ||||
| 	if [ -z "$$GROUPS_SCOPE_ID" ]; then \ | ||||
| 		echo "❌ Failed to find groups scope"; \ | ||||
| 		exit 1; \ | ||||
| 	fi; \ | ||||
| 	GROUPS_MAPPER_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes/$$GROUPS_SCOPE_ID/protocol-mappers/models" \ | ||||
| 		-H "Authorization: Bearer $$TOKEN" \ | ||||
| 		-H "Content-Type: application/json" \ | ||||
| 		-d '{"name":"groups","protocol":"openid-connect","protocolMapper":"oidc-group-membership-mapper","config":{"claim.name":"groups","full.path":"false","id.token.claim":"true","access.token.claim":"true","userinfo.token.claim":"true"}}'); \ | ||||
| 	GROUPS_MAPPER_CODE=$$(echo "$$GROUPS_MAPPER_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ | ||||
| 	if [ "$$GROUPS_MAPPER_CODE" = "201" ] || [ "$$GROUPS_MAPPER_CODE" = "409" ]; then \ | ||||
| 		if [ "$$GROUPS_MAPPER_CODE" = "201" ]; then echo "✅ Group membership mapper added"; \ | ||||
| 		else echo "✅ Group membership mapper already exists"; fi; \ | ||||
| 	else \ | ||||
| 		echo "❌ Failed to create group mapper (HTTP $$GROUPS_MAPPER_CODE)"; \ | ||||
| 		exit 1; \ | ||||
| 	fi; \ | ||||
| 	echo ""; \ | ||||
| 	echo "Creating mcp-server client scope..."; \ | ||||
| 	MCP_SERVER_SCOPE_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes" \ | ||||
| 		-H "Authorization: Bearer $$TOKEN" \ | ||||
| 		-H "Content-Type: application/json" \ | ||||
| 		-d '{"name":"mcp-server","protocol":"openid-connect","attributes":{"display.on.consent.screen":"false","include.in.token.scope":"true"}}'); \ | ||||
| 	MCP_SERVER_SCOPE_CODE=$$(echo "$$MCP_SERVER_SCOPE_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ | ||||
| 	if [ "$$MCP_SERVER_SCOPE_CODE" = "201" ] || [ "$$MCP_SERVER_SCOPE_CODE" = "409" ]; then \ | ||||
| 		if [ "$$MCP_SERVER_SCOPE_CODE" = "201" ]; then echo "✅ mcp-server client scope created"; \ | ||||
| 		else echo "✅ mcp-server client scope already exists"; fi; \ | ||||
| 	else \ | ||||
| 		echo "❌ Failed to create mcp-server scope (HTTP $$MCP_SERVER_SCOPE_CODE)"; \ | ||||
| 		exit 1; \ | ||||
| 	fi; \ | ||||
| 	echo ""; \ | ||||
| 	echo "Adding audience mapper to mcp-server scope..."; \ | ||||
| 	SCOPES_LIST=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes" \ | ||||
| 		-H "Authorization: Bearer $$TOKEN" \ | ||||
| 		-H "Accept: application/json"); \ | ||||
| 	MCP_SERVER_SCOPE_ID=$$(echo "$$SCOPES_LIST" | jq -r '.[] | select(.name == "mcp-server") | .id // empty' 2>/dev/null); \ | ||||
| 	if [ -z "$$MCP_SERVER_SCOPE_ID" ]; then \ | ||||
| 		echo "❌ Failed to find mcp-server scope"; \ | ||||
| 		exit 1; \ | ||||
| 	fi; \ | ||||
| 	MCP_SERVER_MAPPER_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes/$$MCP_SERVER_SCOPE_ID/protocol-mappers/models" \ | ||||
| 		-H "Authorization: Bearer $$TOKEN" \ | ||||
| 		-H "Content-Type: application/json" \ | ||||
| 		-d '{"name":"mcp-server-audience","protocol":"openid-connect","protocolMapper":"oidc-audience-mapper","config":{"included.client.audience":"mcp-server","id.token.claim":"true","access.token.claim":"true"}}'); \ | ||||
| 	MCP_SERVER_MAPPER_CODE=$$(echo "$$MCP_SERVER_MAPPER_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ | ||||
| 	if [ "$$MCP_SERVER_MAPPER_CODE" = "201" ] || [ "$$MCP_SERVER_MAPPER_CODE" = "409" ]; then \ | ||||
| 		if [ "$$MCP_SERVER_MAPPER_CODE" = "201" ]; then echo "✅ mcp-server audience mapper added"; \ | ||||
| 		else echo "✅ mcp-server audience mapper already exists"; fi; \ | ||||
| 	else \ | ||||
| 		echo "❌ Failed to create mcp-server audience mapper (HTTP $$MCP_SERVER_MAPPER_CODE)"; \ | ||||
| 		exit 1; \ | ||||
| 	fi; \ | ||||
| 	echo ""; \ | ||||
| 	echo "Creating openshift service client..."; \ | ||||
| 	OPENSHIFT_CLIENT_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients" \ | ||||
| 		-H "Authorization: Bearer $$TOKEN" \ | ||||
| 		-H "Content-Type: application/json" \ | ||||
| 		-d '{"clientId":"openshift","enabled":true,"publicClient":false,"standardFlowEnabled":true,"directAccessGrantsEnabled":true,"serviceAccountsEnabled":true,"authorizationServicesEnabled":false,"redirectUris":["*"],"defaultClientScopes":["profile","email","groups"],"optionalClientScopes":[]}'); \ | ||||
| 	OPENSHIFT_CLIENT_CODE=$$(echo "$$OPENSHIFT_CLIENT_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ | ||||
| 	if [ "$$OPENSHIFT_CLIENT_CODE" = "201" ] || [ "$$OPENSHIFT_CLIENT_CODE" = "409" ]; then \ | ||||
| 		if [ "$$OPENSHIFT_CLIENT_CODE" = "201" ]; then echo "✅ openshift client created"; \ | ||||
| 		else echo "✅ openshift client already exists"; fi; \ | ||||
| 	else \ | ||||
| 		echo "❌ Failed to create openshift client (HTTP $$OPENSHIFT_CLIENT_CODE)"; \ | ||||
| 		exit 1; \ | ||||
| 	fi; \ | ||||
| 	echo ""; \ | ||||
| 	echo "Adding username mapper to openshift client..."; \ | ||||
| 	OPENSHIFT_CLIENTS_LIST=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients" \ | ||||
| 		-H "Authorization: Bearer $$TOKEN" \ | ||||
| 		-H "Accept: application/json"); \ | ||||
| 	OPENSHIFT_CLIENT_ID=$$(echo "$$OPENSHIFT_CLIENTS_LIST" | jq -r '.[] | select(.clientId == "openshift") | .id // empty' 2>/dev/null); \ | ||||
| 	OPENSHIFT_USERNAME_MAPPER_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients/$$OPENSHIFT_CLIENT_ID/protocol-mappers/models" \ | ||||
| 		-H "Authorization: Bearer $$TOKEN" \ | ||||
| 		-H "Content-Type: application/json" \ | ||||
| 		-d '{	"name":"username","protocol":"openid-connect","protocolMapper":"oidc-usermodel-property-mapper","config":{"userinfo.token.claim":"true","user.attribute":"username","id.token.claim":"true","access.token.claim":"true","claim.name":"preferred_username","jsonType.label":"String"}}'); \ | ||||
| 	OPENSHIFT_USERNAME_MAPPER_CODE=$$(echo "$$OPENSHIFT_USERNAME_MAPPER_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ | ||||
| 	if [ "$$OPENSHIFT_USERNAME_MAPPER_CODE" = "201" ] || [ "$$OPENSHIFT_USERNAME_MAPPER_CODE" = "409" ]; then \ | ||||
| 		if [ "$$OPENSHIFT_USERNAME_MAPPER_CODE" = "201" ]; then echo "✅ Username mapper added to openshift client"; \ | ||||
| 		else echo "✅ Username mapper already exists on openshift client"; fi; \ | ||||
| 	else \ | ||||
| 		echo "❌ Failed to create username mapper (HTTP $$OPENSHIFT_USERNAME_MAPPER_CODE)"; \ | ||||
| 		exit 1; \ | ||||
| 	fi; \ | ||||
| 	echo ""; \ | ||||
| 	echo "Creating mcp-client public client..."; \ | ||||
| 	MCP_PUBLIC_CLIENT_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients" \ | ||||
| 		-H "Authorization: Bearer $$TOKEN" \ | ||||
| 		-H "Content-Type: application/json" \ | ||||
| 		-d '{"clientId":"mcp-client","enabled":true,"publicClient":true,"standardFlowEnabled":true,"directAccessGrantsEnabled":true,"serviceAccountsEnabled":false,"authorizationServicesEnabled":false,"redirectUris":["*"],"defaultClientScopes":["profile","email"],"optionalClientScopes":["mcp-server"]}'); \ | ||||
| 	MCP_PUBLIC_CLIENT_CODE=$$(echo "$$MCP_PUBLIC_CLIENT_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ | ||||
| 	if [ "$$MCP_PUBLIC_CLIENT_CODE" = "201" ] || [ "$$MCP_PUBLIC_CLIENT_CODE" = "409" ]; then \ | ||||
| 		if [ "$$MCP_PUBLIC_CLIENT_CODE" = "201" ]; then echo "✅ mcp-client public client created"; \ | ||||
| 		else echo "✅ mcp-client public client already exists"; fi; \ | ||||
| 	else \ | ||||
| 		echo "❌ Failed to create mcp-client public client (HTTP $$MCP_PUBLIC_CLIENT_CODE)"; \ | ||||
| 		exit 1; \ | ||||
| 	fi; \ | ||||
| 	echo ""; \ | ||||
| 	echo "Adding username mapper to mcp-client..."; \ | ||||
| 	MCP_PUBLIC_CLIENTS_LIST=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients" \ | ||||
| 		-H "Authorization: Bearer $$TOKEN" \ | ||||
| 		-H "Accept: application/json"); \ | ||||
| 	MCP_PUBLIC_CLIENT_ID=$$(echo "$$MCP_PUBLIC_CLIENTS_LIST" | jq -r '.[] | select(.clientId == "mcp-client") | .id // empty' 2>/dev/null); \ | ||||
| 	MCP_PUBLIC_USERNAME_MAPPER_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients/$$MCP_PUBLIC_CLIENT_ID/protocol-mappers/models" \ | ||||
| 		-H "Authorization: Bearer $$TOKEN" \ | ||||
| 		-H "Content-Type: application/json" \ | ||||
| 		-d '{"name":"username","protocol":"openid-connect","protocolMapper":"oidc-usermodel-property-mapper","config":{"userinfo.token.claim":"true","user.attribute":"username","id.token.claim":"true","access.token.claim":"true","claim.name":"preferred_username","jsonType.label":"String"}}'); \ | ||||
| 	MCP_PUBLIC_USERNAME_MAPPER_CODE=$$(echo "$$MCP_PUBLIC_USERNAME_MAPPER_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ | ||||
| 	if [ "$$MCP_PUBLIC_USERNAME_MAPPER_CODE" = "201" ] || [ "$$MCP_PUBLIC_USERNAME_MAPPER_CODE" = "409" ]; then \ | ||||
| 		if [ "$$MCP_PUBLIC_USERNAME_MAPPER_CODE" = "201" ]; then echo "✅ Username mapper added to mcp-client"; \ | ||||
| 		else echo "✅ Username mapper already exists on mcp-client"; fi; \ | ||||
| 	else \ | ||||
| 		echo "❌ Failed to create username mapper (HTTP $$MCP_PUBLIC_USERNAME_MAPPER_CODE)"; \ | ||||
| 		exit 1; \ | ||||
| 	fi; \ | ||||
| 	echo ""; \ | ||||
| 	echo "Creating mcp-server client with token exchange..."; \ | ||||
| 	MCP_CLIENT_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients" \ | ||||
| 		-H "Authorization: Bearer $$TOKEN" \ | ||||
| 		-H "Content-Type: application/json" \ | ||||
| 		-d '{"clientId":"mcp-server","enabled":true,"publicClient":false,"standardFlowEnabled":true,"directAccessGrantsEnabled":true,"serviceAccountsEnabled":true,"authorizationServicesEnabled":false,"redirectUris":["*"],"defaultClientScopes":["profile","email","groups","mcp-server"],"optionalClientScopes":["mcp:openshift"],"attributes":{"oauth2.device.authorization.grant.enabled":"false","oidc.ciba.grant.enabled":"false","backchannel.logout.session.required":"true","backchannel.logout.revoke.offline.tokens":"false"}}'); \ | ||||
| 	MCP_CLIENT_CODE=$$(echo "$$MCP_CLIENT_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ | ||||
| 	if [ "$$MCP_CLIENT_CODE" = "201" ] || [ "$$MCP_CLIENT_CODE" = "409" ]; then \ | ||||
| 		if [ "$$MCP_CLIENT_CODE" = "201" ]; then echo "✅ mcp-server client created"; \ | ||||
| 		else echo "✅ mcp-server client already exists"; fi; \ | ||||
| 	else \ | ||||
| 		echo "❌ Failed to create mcp-server client (HTTP $$MCP_CLIENT_CODE)"; \ | ||||
| 		exit 1; \ | ||||
| 	fi; \ | ||||
| 	echo ""; \ | ||||
| 	echo "Enabling standard token exchange for mcp-server..."; \ | ||||
| 	CLIENTS_LIST=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients" \ | ||||
| 		-H "Authorization: Bearer $$TOKEN" \ | ||||
| 		-H "Accept: application/json"); \ | ||||
| 	MCP_CLIENT_ID=$$(echo "$$CLIENTS_LIST" | jq -r '.[] | select(.clientId == "mcp-server") | .id // empty' 2>/dev/null); \ | ||||
| 	if [ -z "$$MCP_CLIENT_ID" ]; then \ | ||||
| 		echo "❌ Failed to find mcp-server client"; \ | ||||
| 		exit 1; \ | ||||
| 	fi; \ | ||||
| 	UPDATE_CLIENT_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X PUT "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients/$$MCP_CLIENT_ID" \ | ||||
| 		-H "Authorization: Bearer $$TOKEN" \ | ||||
| 		-H "Content-Type: application/json" \ | ||||
| 		-d '{"clientId":"mcp-server","enabled":true,"publicClient":false,"standardFlowEnabled":true,"directAccessGrantsEnabled":true,"serviceAccountsEnabled":true,"authorizationServicesEnabled":false,"redirectUris":["*"],"defaultClientScopes":["profile","email","groups","mcp-server"],"optionalClientScopes":["mcp:openshift"],"attributes":{"oauth2.device.authorization.grant.enabled":"false","oidc.ciba.grant.enabled":"false","backchannel.logout.session.required":"true","backchannel.logout.revoke.offline.tokens":"false","standard.token.exchange.enabled":"true"}}'); \ | ||||
| 	UPDATE_CLIENT_CODE=$$(echo "$$UPDATE_CLIENT_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ | ||||
| 	if [ "$$UPDATE_CLIENT_CODE" = "204" ]; then \ | ||||
| 		echo "✅ Standard token exchange enabled for mcp-server client"; \ | ||||
| 	else \ | ||||
| 		echo "⚠️  Could not enable token exchange (HTTP $$UPDATE_CLIENT_CODE)"; \ | ||||
| 	fi; \ | ||||
| 	echo ""; \ | ||||
| 	echo "Getting mcp-server client secret..."; \ | ||||
| 	SECRET_RESPONSE=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients/$$MCP_CLIENT_ID/client-secret" \ | ||||
| 		-H "Authorization: Bearer $$TOKEN" \ | ||||
| 		-H "Accept: application/json"); \ | ||||
| 	CLIENT_SECRET=$$(echo "$$SECRET_RESPONSE" | jq -r '.value // empty' 2>/dev/null); \ | ||||
| 	if [ -z "$$CLIENT_SECRET" ]; then \ | ||||
| 		echo "❌ Failed to get client secret"; \ | ||||
| 	else \ | ||||
| 		echo "✅ Client secret retrieved"; \ | ||||
| 	fi; \ | ||||
| 	echo ""; \ | ||||
| 	echo "Adding username mapper to mcp-server client..."; \ | ||||
| 	MCP_USERNAME_MAPPER_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients/$$MCP_CLIENT_ID/protocol-mappers/models" \ | ||||
| 		-H "Authorization: Bearer $$TOKEN" \ | ||||
| 		-H "Content-Type: application/json" \ | ||||
| 		-d '{"name":"username","protocol":"openid-connect","protocolMapper":"oidc-usermodel-property-mapper","config":{"userinfo.token.claim":"true","user.attribute":"username","id.token.claim":"true","access.token.claim":"true","claim.name":"preferred_username","jsonType.label":"String"}}'); \ | ||||
| 	MCP_USERNAME_MAPPER_CODE=$$(echo "$$MCP_USERNAME_MAPPER_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ | ||||
| 	if [ "$$MCP_USERNAME_MAPPER_CODE" = "201" ] || [ "$$MCP_USERNAME_MAPPER_CODE" = "409" ]; then \ | ||||
| 		if [ "$$MCP_USERNAME_MAPPER_CODE" = "201" ]; then echo "✅ Username mapper added to mcp-server client"; \ | ||||
| 		else echo "✅ Username mapper already exists on mcp-server client"; fi; \ | ||||
| 	else \ | ||||
| 		echo "❌ Failed to create username mapper (HTTP $$MCP_USERNAME_MAPPER_CODE)"; \ | ||||
| 		exit 1; \ | ||||
| 	fi; \ | ||||
| 	echo ""; \ | ||||
| 	echo "Creating test user mcp/mcp..."; \ | ||||
| 	USER_RESPONSE=$$(curl -sk -w "%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/users" \ | ||||
| 		-H "Authorization: Bearer $$TOKEN" \ | ||||
| 		-H "Content-Type: application/json" \ | ||||
| 		-d '{"username":"mcp","email":"mcp@example.com","firstName":"MCP","lastName":"User","enabled":true,"emailVerified":true,"credentials":[{"type":"password","value":"mcp","temporary":false}]}'); \ | ||||
| 	USER_CODE=$$(echo "$$USER_RESPONSE" | tail -c 4); \ | ||||
| 	if [ "$$USER_CODE" = "201" ] || [ "$$USER_CODE" = "409" ]; then \ | ||||
| 		if [ "$$USER_CODE" = "201" ]; then echo "✅ mcp user created"; \ | ||||
| 		else echo "✅ mcp user already exists"; fi; \ | ||||
| 	else \ | ||||
| 		echo "❌ Failed to create mcp user (HTTP $$USER_CODE)"; \ | ||||
| 		exit 1; \ | ||||
| 	fi; \ | ||||
| 	echo ""; \ | ||||
| 	echo "Setting up RBAC for mcp user..."; \ | ||||
| 	kubectl apply -f dev/config/keycloak/rbac.yaml; \ | ||||
| 	echo "✅ RBAC binding created for mcp user"; \ | ||||
| 	echo ""; \ | ||||
| 	echo "🎉 OpenShift realm setup complete!"; \ | ||||
| 	echo ""; \ | ||||
| 	echo "========================================"; \ | ||||
| 	echo "Configuration Summary"; \ | ||||
| 	echo "========================================"; \ | ||||
| 	echo "Realm: openshift"; \ | ||||
| 	echo "Authorization URL: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift"; \ | ||||
| 	echo "Issuer URL (for config.toml): https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift"; \ | ||||
| 	echo ""; \ | ||||
| 	echo "Test User:"; \ | ||||
| 	echo "  Username: mcp"; \ | ||||
| 	echo "  Password: mcp"; \ | ||||
| 	echo "  Email: mcp@example.com"; \ | ||||
| 	echo "  RBAC: cluster-admin (full cluster access)"; \ | ||||
| 	echo ""; \ | ||||
| 	echo "Clients:"; \ | ||||
| 	echo "  mcp-client (public, for browser-based auth)"; \ | ||||
| 	echo "    Client ID: mcp-client"; \ | ||||
| 	echo "    Optional Scopes: mcp-server"; \ | ||||
| 	echo "  mcp-server (confidential, token exchange enabled)"; \ | ||||
| 	echo "    Client ID: mcp-server"; \ | ||||
| 	echo "    Client Secret: $$CLIENT_SECRET"; \ | ||||
| 	echo "  openshift (service account)"; \ | ||||
| 	echo "    Client ID: openshift"; \ | ||||
| 	echo ""; \ | ||||
| 	echo "Client Scopes:"; \ | ||||
| 	echo "  mcp-server (default) - Audience: mcp-server"; \ | ||||
| 	echo "  mcp:openshift (optional) - Audience: openshift"; \ | ||||
| 	echo "  groups (default) - Group membership mapper"; \ | ||||
| 	echo ""; \ | ||||
| 	echo "TOML Configuration (config.toml):"; \ | ||||
| 	echo "  require_oauth = true"; \ | ||||
| 	echo "  oauth_audience = \"mcp-server\""; \ | ||||
| 	echo "  oauth_scopes = [\"openid\", \"mcp-server\"]"; \ | ||||
| 	echo "  validate_token = false"; \ | ||||
| 	echo "  authorization_url = \"https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift\""; \ | ||||
| 	echo "  sts_client_id = \"mcp-server\""; \ | ||||
| 	echo "  sts_client_secret = \"$$CLIENT_SECRET\""; \ | ||||
| 	echo "  sts_audience = \"openshift\""; \ | ||||
| 	echo "  sts_scopes = [\"mcp:openshift\"]"; \ | ||||
| 	echo "  certificate_authority = \"_output/cert-manager-ca/ca.crt\""; \ | ||||
| 	echo "========================================"; \ | ||||
| 	echo ""; \ | ||||
| 	echo "Note: The Kubernetes API server is configured with:"; \ | ||||
| 	echo "  --oidc-issuer-url=https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift"; \ | ||||
| 	echo ""; \ | ||||
| 	echo "Important: The cert-manager CA certificate was extracted to:"; \ | ||||
| 	echo "  _output/cert-manager-ca/ca.crt"; \ | ||||
| 	echo ""; \ | ||||
| 	echo "Writing configuration to _output/config.toml..."; \ | ||||
| 	mkdir -p _output; \ | ||||
| 	printf '%s\n' \ | ||||
| 		'require_oauth = true' \ | ||||
| 		'oauth_audience = "mcp-server"' \ | ||||
| 		'oauth_scopes = ["openid", "mcp-server"]' \ | ||||
| 		'validate_token = false' \ | ||||
| 		'authorization_url = "https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift"' \ | ||||
| 		'sts_client_id = "mcp-server"' \ | ||||
| 		"sts_client_secret = \"$$CLIENT_SECRET\"" \ | ||||
| 		'sts_audience = "openshift"' \ | ||||
| 		'sts_scopes = ["mcp:openshift"]' \ | ||||
| 		'certificate_authority = "_output/cert-manager-ca/ca.crt"' \ | ||||
| 		> _output/config.toml; \ | ||||
| 	echo "✅ Configuration written to _output/config.toml" | ||||
							
								
								
									
										61
									
								
								build/kind.mk
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										61
									
								
								build/kind.mk
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,61 @@ | ||||
| # Kind cluster management | ||||
|  | ||||
| KIND_CLUSTER_NAME ?= kubernetes-mcp-server | ||||
|  | ||||
| # Detect container engine (docker or podman) | ||||
| CONTAINER_ENGINE ?= $(shell command -v docker 2>/dev/null || command -v podman 2>/dev/null) | ||||
|  | ||||
| .PHONY: kind-create-certs | ||||
| kind-create-certs: | ||||
| 	@if [ ! -f _output/cert-manager-ca/ca.crt ]; then \ | ||||
| 		echo "Creating placeholder CA certificate for bind mount..."; \ | ||||
| 		./hack/generate-placeholder-ca.sh; \ | ||||
| 	else \ | ||||
| 		echo "✅ Placeholder CA already exists"; \ | ||||
| 	fi | ||||
|  | ||||
| .PHONY: kind-create-cluster | ||||
| kind-create-cluster: kind kind-create-certs | ||||
| 	@# Set KIND provider for podman on Linux | ||||
| 	@if [ "$(shell uname -s)" != "Darwin" ] && echo "$(CONTAINER_ENGINE)" | grep -q "podman"; then \ | ||||
| 		export KIND_EXPERIMENTAL_PROVIDER=podman; \ | ||||
| 	fi; \ | ||||
| 	if $(KIND) get clusters 2>/dev/null | grep -q "^$(KIND_CLUSTER_NAME)$$"; then \ | ||||
| 		echo "Kind cluster '$(KIND_CLUSTER_NAME)' already exists, skipping creation"; \ | ||||
| 	else \ | ||||
| 		echo "Creating Kind cluster '$(KIND_CLUSTER_NAME)'..."; \ | ||||
| 		$(KIND) create cluster --name $(KIND_CLUSTER_NAME) --config dev/config/kind/cluster.yaml; \ | ||||
| 		echo "Adding ingress-ready label to control-plane node..."; \ | ||||
| 		kubectl label node $(KIND_CLUSTER_NAME)-control-plane ingress-ready=true --overwrite; \ | ||||
| 		echo "Installing nginx ingress controller..."; \ | ||||
| 		kubectl apply -f dev/config/ingress/nginx-ingress.yaml; \ | ||||
| 		echo "Waiting for ingress controller to be ready..."; \ | ||||
| 		kubectl wait --namespace ingress-nginx --for=condition=ready pod --selector=app.kubernetes.io/component=controller --timeout=90s; \ | ||||
| 		echo "✅ Ingress controller ready"; \ | ||||
| 		echo "Installing cert-manager..."; \ | ||||
| 		kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.2/cert-manager.yaml; \ | ||||
| 		echo "Waiting for cert-manager to be ready..."; \ | ||||
| 		kubectl wait --namespace cert-manager --for=condition=available deployment/cert-manager --timeout=120s; \ | ||||
| 		kubectl wait --namespace cert-manager --for=condition=available deployment/cert-manager-cainjector --timeout=120s; \ | ||||
| 		kubectl wait --namespace cert-manager --for=condition=available deployment/cert-manager-webhook --timeout=120s; \ | ||||
| 		echo "✅ cert-manager ready"; \ | ||||
| 		echo "Creating cert-manager ClusterIssuer..."; \ | ||||
| 		sleep 5; \ | ||||
| 		kubectl apply -f dev/config/cert-manager/selfsigned-issuer.yaml; \ | ||||
| 		echo "✅ ClusterIssuer created"; \ | ||||
| 		echo "Adding /etc/hosts entry for Keycloak in control plane..."; \ | ||||
| 		if command -v docker >/dev/null 2>&1 && docker ps --filter "name=$(KIND_CLUSTER_NAME)-control-plane" --format "{{.Names}}" | grep -q "$(KIND_CLUSTER_NAME)-control-plane"; then \ | ||||
| 			docker exec $(KIND_CLUSTER_NAME)-control-plane bash -c 'grep -q "keycloak.127-0-0-1.sslip.io" /etc/hosts || echo "127.0.0.1 keycloak.127-0-0-1.sslip.io" >> /etc/hosts'; \ | ||||
| 		elif command -v podman >/dev/null 2>&1 && podman ps --filter "name=$(KIND_CLUSTER_NAME)-control-plane" --format "{{.Names}}" | grep -q "$(KIND_CLUSTER_NAME)-control-plane"; then \ | ||||
| 			podman exec $(KIND_CLUSTER_NAME)-control-plane bash -c 'grep -q "keycloak.127-0-0-1.sslip.io" /etc/hosts || echo "127.0.0.1 keycloak.127-0-0-1.sslip.io" >> /etc/hosts'; \ | ||||
| 		fi; \ | ||||
| 		echo "✅ /etc/hosts entry added"; \ | ||||
| 	fi | ||||
|  | ||||
| .PHONY: kind-delete-cluster | ||||
| kind-delete-cluster: kind | ||||
| 	@# Set KIND provider for podman on Linux | ||||
| 	@if [ "$(shell uname -s)" != "Darwin" ] && echo "$(CONTAINER_ENGINE)" | grep -q "podman"; then \ | ||||
| 		export KIND_EXPERIMENTAL_PROVIDER=podman; \ | ||||
| 	fi; \ | ||||
| 	$(KIND) delete cluster --name $(KIND_CLUSTER_NAME) | ||||
							
								
								
									
										20
									
								
								build/tools.mk
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								build/tools.mk
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,20 @@ | ||||
| # Tools | ||||
|  | ||||
| # Platform detection | ||||
| OS := $(shell uname -s | tr '[:upper:]' '[:lower:]') | ||||
| ARCH := $(shell uname -m | tr '[:upper:]' '[:lower:]') | ||||
| ifeq ($(ARCH),x86_64) | ||||
|     ARCH = amd64 | ||||
| endif | ||||
| ifeq ($(ARCH),aarch64) | ||||
|     ARCH = arm64 | ||||
| endif | ||||
|  | ||||
| KIND = _output/bin/kind | ||||
| KIND_VERSION = v0.30.0 | ||||
| $(KIND): | ||||
| 	@mkdir -p _output/bin | ||||
| 	GOBIN=$(PWD)/_output/bin go install sigs.k8s.io/kind@$(KIND_VERSION) | ||||
|  | ||||
| .PHONY: kind | ||||
| kind: $(KIND) ## Download kind locally if necessary | ||||
| @@ -1,7 +1,20 @@ | ||||
| package main | ||||
|  | ||||
| import "github.com/manusa/kubernetes-mcp-server/pkg/kubernetes-mcp-server/cmd" | ||||
| import ( | ||||
| 	"os" | ||||
|  | ||||
| 	"github.com/spf13/pflag" | ||||
| 	"k8s.io/cli-runtime/pkg/genericiooptions" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/kubernetes-mcp-server/cmd" | ||||
| ) | ||||
|  | ||||
| func main() { | ||||
| 	cmd.Execute() | ||||
| 	flags := pflag.NewFlagSet("kubernetes-mcp-server", pflag.ExitOnError) | ||||
| 	pflag.CommandLine = flags | ||||
|  | ||||
| 	root := cmd.NewMCPServer(genericiooptions.IOStreams{In: os.Stdin, Out: os.Stdout, ErrOut: os.Stderr}) | ||||
| 	if err := root.Execute(); err != nil { | ||||
| 		os.Exit(1) | ||||
| 	} | ||||
| } | ||||
|   | ||||
| @@ -4,8 +4,7 @@ import ( | ||||
| 	"os" | ||||
| ) | ||||
|  | ||||
| //goland:noinspection GoTestName | ||||
| func ExampleVersion() { | ||||
| func Example_version() { | ||||
| 	oldArgs := os.Args | ||||
| 	defer func() { os.Args = oldArgs }() | ||||
| 	os.Args = []string{"kubernetes-mcp-server", "--version"} | ||||
|   | ||||
							
								
								
									
										31
									
								
								dev/config/cert-manager/selfsigned-issuer.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										31
									
								
								dev/config/cert-manager/selfsigned-issuer.yaml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,31 @@ | ||||
| apiVersion: cert-manager.io/v1 | ||||
| kind: ClusterIssuer | ||||
| metadata: | ||||
|   name: selfsigned-issuer | ||||
| spec: | ||||
|   selfSigned: {} | ||||
| --- | ||||
| apiVersion: cert-manager.io/v1 | ||||
| kind: Certificate | ||||
| metadata: | ||||
|   name: selfsigned-ca | ||||
|   namespace: cert-manager | ||||
| spec: | ||||
|   isCA: true | ||||
|   commonName: selfsigned-ca | ||||
|   secretName: selfsigned-ca-secret | ||||
|   privateKey: | ||||
|     algorithm: ECDSA | ||||
|     size: 256 | ||||
|   issuerRef: | ||||
|     name: selfsigned-issuer | ||||
|     kind: ClusterIssuer | ||||
|     group: cert-manager.io | ||||
| --- | ||||
| apiVersion: cert-manager.io/v1 | ||||
| kind: ClusterIssuer | ||||
| metadata: | ||||
|   name: selfsigned-ca-issuer | ||||
| spec: | ||||
|   ca: | ||||
|     secretName: selfsigned-ca-secret | ||||
							
								
								
									
										386
									
								
								dev/config/ingress/nginx-ingress.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										386
									
								
								dev/config/ingress/nginx-ingress.yaml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,386 @@ | ||||
| --- | ||||
| apiVersion: v1 | ||||
| kind: Namespace | ||||
| metadata: | ||||
|   name: ingress-nginx | ||||
|   labels: | ||||
|     app.kubernetes.io/name: ingress-nginx | ||||
|     app.kubernetes.io/instance: ingress-nginx | ||||
| --- | ||||
| apiVersion: v1 | ||||
| kind: ServiceAccount | ||||
| metadata: | ||||
|   labels: | ||||
|     app.kubernetes.io/name: ingress-nginx | ||||
|     app.kubernetes.io/instance: ingress-nginx | ||||
|     app.kubernetes.io/component: controller | ||||
|   name: ingress-nginx | ||||
|   namespace: ingress-nginx | ||||
| --- | ||||
| apiVersion: v1 | ||||
| kind: ConfigMap | ||||
| metadata: | ||||
|   labels: | ||||
|     app.kubernetes.io/name: ingress-nginx | ||||
|     app.kubernetes.io/instance: ingress-nginx | ||||
|     app.kubernetes.io/component: controller | ||||
|   name: ingress-nginx-controller | ||||
|   namespace: ingress-nginx | ||||
| data: | ||||
|   allow-snippet-annotations: "true" | ||||
| --- | ||||
| apiVersion: rbac.authorization.k8s.io/v1 | ||||
| kind: ClusterRole | ||||
| metadata: | ||||
|   labels: | ||||
|     app.kubernetes.io/name: ingress-nginx | ||||
|     app.kubernetes.io/instance: ingress-nginx | ||||
|   name: ingress-nginx | ||||
| rules: | ||||
|   - apiGroups: | ||||
|       - "" | ||||
|     resources: | ||||
|       - configmaps | ||||
|       - endpoints | ||||
|       - nodes | ||||
|       - pods | ||||
|       - secrets | ||||
|       - namespaces | ||||
|     verbs: | ||||
|       - list | ||||
|       - watch | ||||
|   - apiGroups: | ||||
|       - coordination.k8s.io | ||||
|     resources: | ||||
|       - leases | ||||
|     verbs: | ||||
|       - list | ||||
|       - watch | ||||
|   - apiGroups: | ||||
|       - "" | ||||
|     resources: | ||||
|       - nodes | ||||
|     verbs: | ||||
|       - get | ||||
|   - apiGroups: | ||||
|       - "" | ||||
|     resources: | ||||
|       - services | ||||
|     verbs: | ||||
|       - get | ||||
|       - list | ||||
|       - watch | ||||
|   - apiGroups: | ||||
|       - networking.k8s.io | ||||
|     resources: | ||||
|       - ingresses | ||||
|     verbs: | ||||
|       - get | ||||
|       - list | ||||
|       - watch | ||||
|   - apiGroups: | ||||
|       - "" | ||||
|     resources: | ||||
|       - events | ||||
|     verbs: | ||||
|       - create | ||||
|       - patch | ||||
|   - apiGroups: | ||||
|       - networking.k8s.io | ||||
|     resources: | ||||
|       - ingresses/status | ||||
|     verbs: | ||||
|       - update | ||||
|   - apiGroups: | ||||
|       - networking.k8s.io | ||||
|     resources: | ||||
|       - ingressclasses | ||||
|     verbs: | ||||
|       - get | ||||
|       - list | ||||
|       - watch | ||||
|   - apiGroups: | ||||
|       - discovery.k8s.io | ||||
|     resources: | ||||
|       - endpointslices | ||||
|     verbs: | ||||
|       - list | ||||
|       - watch | ||||
|       - get | ||||
| --- | ||||
| apiVersion: rbac.authorization.k8s.io/v1 | ||||
| kind: ClusterRoleBinding | ||||
| metadata: | ||||
|   labels: | ||||
|     app.kubernetes.io/name: ingress-nginx | ||||
|     app.kubernetes.io/instance: ingress-nginx | ||||
|   name: ingress-nginx | ||||
| roleRef: | ||||
|   apiGroup: rbac.authorization.k8s.io | ||||
|   kind: ClusterRole | ||||
|   name: ingress-nginx | ||||
| subjects: | ||||
|   - kind: ServiceAccount | ||||
|     name: ingress-nginx | ||||
|     namespace: ingress-nginx | ||||
| --- | ||||
| apiVersion: rbac.authorization.k8s.io/v1 | ||||
| kind: Role | ||||
| metadata: | ||||
|   labels: | ||||
|     app.kubernetes.io/name: ingress-nginx | ||||
|     app.kubernetes.io/instance: ingress-nginx | ||||
|     app.kubernetes.io/component: controller | ||||
|   name: ingress-nginx | ||||
|   namespace: ingress-nginx | ||||
| rules: | ||||
|   - apiGroups: | ||||
|       - "" | ||||
|     resources: | ||||
|       - namespaces | ||||
|     verbs: | ||||
|       - get | ||||
|   - apiGroups: | ||||
|       - "" | ||||
|     resources: | ||||
|       - configmaps | ||||
|       - pods | ||||
|       - secrets | ||||
|       - endpoints | ||||
|     verbs: | ||||
|       - get | ||||
|       - list | ||||
|       - watch | ||||
|   - apiGroups: | ||||
|       - "" | ||||
|     resources: | ||||
|       - services | ||||
|     verbs: | ||||
|       - get | ||||
|       - list | ||||
|       - watch | ||||
|   - apiGroups: | ||||
|       - networking.k8s.io | ||||
|     resources: | ||||
|       - ingresses | ||||
|     verbs: | ||||
|       - get | ||||
|       - list | ||||
|       - watch | ||||
|   - apiGroups: | ||||
|       - networking.k8s.io | ||||
|     resources: | ||||
|       - ingresses/status | ||||
|     verbs: | ||||
|       - update | ||||
|   - apiGroups: | ||||
|       - networking.k8s.io | ||||
|     resources: | ||||
|       - ingressclasses | ||||
|     verbs: | ||||
|       - get | ||||
|       - list | ||||
|       - watch | ||||
|   - apiGroups: | ||||
|       - coordination.k8s.io | ||||
|     resources: | ||||
|       - leases | ||||
|     resourceNames: | ||||
|       - ingress-nginx-leader | ||||
|     verbs: | ||||
|       - get | ||||
|       - update | ||||
|   - apiGroups: | ||||
|       - coordination.k8s.io | ||||
|     resources: | ||||
|       - leases | ||||
|     verbs: | ||||
|       - create | ||||
|   - apiGroups: | ||||
|       - "" | ||||
|     resources: | ||||
|       - events | ||||
|     verbs: | ||||
|       - create | ||||
|       - patch | ||||
|   - apiGroups: | ||||
|       - discovery.k8s.io | ||||
|     resources: | ||||
|       - endpointslices | ||||
|     verbs: | ||||
|       - list | ||||
|       - watch | ||||
|       - get | ||||
| --- | ||||
| apiVersion: rbac.authorization.k8s.io/v1 | ||||
| kind: RoleBinding | ||||
| metadata: | ||||
|   labels: | ||||
|     app.kubernetes.io/name: ingress-nginx | ||||
|     app.kubernetes.io/instance: ingress-nginx | ||||
|     app.kubernetes.io/component: controller | ||||
|   name: ingress-nginx | ||||
|   namespace: ingress-nginx | ||||
| roleRef: | ||||
|   apiGroup: rbac.authorization.k8s.io | ||||
|   kind: Role | ||||
|   name: ingress-nginx | ||||
| subjects: | ||||
|   - kind: ServiceAccount | ||||
|     name: ingress-nginx | ||||
|     namespace: ingress-nginx | ||||
| --- | ||||
| apiVersion: v1 | ||||
| kind: Service | ||||
| metadata: | ||||
|   labels: | ||||
|     app.kubernetes.io/name: ingress-nginx | ||||
|     app.kubernetes.io/instance: ingress-nginx | ||||
|     app.kubernetes.io/component: controller | ||||
|   name: ingress-nginx-controller | ||||
|   namespace: ingress-nginx | ||||
| spec: | ||||
|   type: NodePort | ||||
|   ports: | ||||
|     - name: http | ||||
|       port: 80 | ||||
|       protocol: TCP | ||||
|       targetPort: http | ||||
|       appProtocol: http | ||||
|     - name: https | ||||
|       port: 443 | ||||
|       protocol: TCP | ||||
|       targetPort: https | ||||
|       appProtocol: https | ||||
|   selector: | ||||
|     app.kubernetes.io/name: ingress-nginx | ||||
|     app.kubernetes.io/instance: ingress-nginx | ||||
|     app.kubernetes.io/component: controller | ||||
| --- | ||||
| apiVersion: apps/v1 | ||||
| kind: Deployment | ||||
| metadata: | ||||
|   labels: | ||||
|     app.kubernetes.io/name: ingress-nginx | ||||
|     app.kubernetes.io/instance: ingress-nginx | ||||
|     app.kubernetes.io/component: controller | ||||
|   name: ingress-nginx-controller | ||||
|   namespace: ingress-nginx | ||||
| spec: | ||||
|   selector: | ||||
|     matchLabels: | ||||
|       app.kubernetes.io/name: ingress-nginx | ||||
|       app.kubernetes.io/instance: ingress-nginx | ||||
|       app.kubernetes.io/component: controller | ||||
|   replicas: 1 | ||||
|   revisionHistoryLimit: 10 | ||||
|   minReadySeconds: 0 | ||||
|   template: | ||||
|     metadata: | ||||
|       labels: | ||||
|         app.kubernetes.io/name: ingress-nginx | ||||
|         app.kubernetes.io/instance: ingress-nginx | ||||
|         app.kubernetes.io/component: controller | ||||
|     spec: | ||||
|       dnsPolicy: ClusterFirst | ||||
|       containers: | ||||
|         - name: controller | ||||
|           image: registry.k8s.io/ingress-nginx/controller:v1.11.1 | ||||
|           imagePullPolicy: IfNotPresent | ||||
|           lifecycle: | ||||
|             preStop: | ||||
|               exec: | ||||
|                 command: | ||||
|                   - /wait-shutdown | ||||
|           args: | ||||
|             - /nginx-ingress-controller | ||||
|             - --election-id=ingress-nginx-leader | ||||
|             - --controller-class=k8s.io/ingress-nginx | ||||
|             - --ingress-class=nginx | ||||
|             - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller | ||||
|             - --watch-ingress-without-class=true | ||||
|           securityContext: | ||||
|             runAsNonRoot: true | ||||
|             runAsUser: 101 | ||||
|             allowPrivilegeEscalation: false | ||||
|             seccompProfile: | ||||
|               type: RuntimeDefault | ||||
|             capabilities: | ||||
|               drop: | ||||
|                 - ALL | ||||
|               add: | ||||
|                 - NET_BIND_SERVICE | ||||
|           env: | ||||
|             - name: POD_NAME | ||||
|               valueFrom: | ||||
|                 fieldRef: | ||||
|                   fieldPath: metadata.name | ||||
|             - name: POD_NAMESPACE | ||||
|               valueFrom: | ||||
|                 fieldRef: | ||||
|                   fieldPath: metadata.namespace | ||||
|             - name: LD_PRELOAD | ||||
|               value: /usr/local/lib/libmimalloc.so | ||||
|           livenessProbe: | ||||
|             failureThreshold: 5 | ||||
|             httpGet: | ||||
|               path: /healthz | ||||
|               port: 10254 | ||||
|               scheme: HTTP | ||||
|             initialDelaySeconds: 10 | ||||
|             periodSeconds: 10 | ||||
|             successThreshold: 1 | ||||
|             timeoutSeconds: 1 | ||||
|           readinessProbe: | ||||
|             failureThreshold: 3 | ||||
|             httpGet: | ||||
|               path: /healthz | ||||
|               port: 10254 | ||||
|               scheme: HTTP | ||||
|             initialDelaySeconds: 10 | ||||
|             periodSeconds: 10 | ||||
|             successThreshold: 1 | ||||
|             timeoutSeconds: 1 | ||||
|           ports: | ||||
|             - name: http | ||||
|               containerPort: 80 | ||||
|               protocol: TCP | ||||
|               hostPort: 80 | ||||
|             - name: https | ||||
|               containerPort: 443 | ||||
|               protocol: TCP | ||||
|               hostPort: 443 | ||||
|             - name: https-alt | ||||
|               containerPort: 443 | ||||
|               protocol: TCP | ||||
|               hostPort: 8443 | ||||
|             - name: webhook | ||||
|               containerPort: 8443 | ||||
|               protocol: TCP | ||||
|           resources: | ||||
|             requests: | ||||
|               cpu: 100m | ||||
|               memory: 90Mi | ||||
|       nodeSelector: | ||||
|         ingress-ready: "true" | ||||
|         kubernetes.io/os: linux | ||||
|       serviceAccountName: ingress-nginx | ||||
|       terminationGracePeriodSeconds: 0 | ||||
|       tolerations: | ||||
|         - effect: NoSchedule | ||||
|           key: node-role.kubernetes.io/master | ||||
|           operator: Equal | ||||
|         - effect: NoSchedule | ||||
|           key: node-role.kubernetes.io/control-plane | ||||
|           operator: Equal | ||||
| --- | ||||
| apiVersion: networking.k8s.io/v1 | ||||
| kind: IngressClass | ||||
| metadata: | ||||
|   labels: | ||||
|     app.kubernetes.io/name: ingress-nginx | ||||
|     app.kubernetes.io/instance: ingress-nginx | ||||
|     app.kubernetes.io/component: controller | ||||
|   name: nginx | ||||
| spec: | ||||
|   controller: k8s.io/ingress-nginx | ||||
							
								
								
									
										71
									
								
								dev/config/keycloak/deployment.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										71
									
								
								dev/config/keycloak/deployment.yaml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,71 @@ | ||||
| --- | ||||
| apiVersion: v1 | ||||
| kind: Namespace | ||||
| metadata: | ||||
|   name: keycloak | ||||
| --- | ||||
| apiVersion: apps/v1 | ||||
| kind: Deployment | ||||
| metadata: | ||||
|   name: keycloak | ||||
|   namespace: keycloak | ||||
|   labels: | ||||
|     app: keycloak | ||||
| spec: | ||||
|   replicas: 1 | ||||
|   selector: | ||||
|     matchLabels: | ||||
|       app: keycloak | ||||
|   template: | ||||
|     metadata: | ||||
|       labels: | ||||
|         app: keycloak | ||||
|     spec: | ||||
|       containers: | ||||
|       - name: keycloak | ||||
|         image: quay.io/keycloak/keycloak:26.4 | ||||
|         args: ["start-dev"] | ||||
|         env: | ||||
|         - name: KC_BOOTSTRAP_ADMIN_USERNAME | ||||
|           value: "admin" | ||||
|         - name: KC_BOOTSTRAP_ADMIN_PASSWORD | ||||
|           value: "admin" | ||||
|         - name: KC_HOSTNAME | ||||
|           value: "https://keycloak.127-0-0-1.sslip.io:8443" | ||||
|         - name: KC_HTTP_ENABLED | ||||
|           value: "true" | ||||
|         - name: KC_HEALTH_ENABLED | ||||
|           value: "true" | ||||
|         - name: KC_PROXY_HEADERS | ||||
|           value: "xforwarded" | ||||
|         ports: | ||||
|         - name: http | ||||
|           containerPort: 8080 | ||||
|         readinessProbe: | ||||
|           httpGet: | ||||
|             path: /health/ready | ||||
|             port: 9000 | ||||
|           initialDelaySeconds: 30 | ||||
|           periodSeconds: 10 | ||||
|         livenessProbe: | ||||
|           httpGet: | ||||
|             path: /health/live | ||||
|             port: 9000 | ||||
|           initialDelaySeconds: 60 | ||||
|           periodSeconds: 30 | ||||
| --- | ||||
| apiVersion: v1 | ||||
| kind: Service | ||||
| metadata: | ||||
|   name: keycloak | ||||
|   namespace: keycloak | ||||
|   labels: | ||||
|     app: keycloak | ||||
| spec: | ||||
|   ports: | ||||
|   - name: http | ||||
|     port: 80 | ||||
|     targetPort: 8080 | ||||
|   selector: | ||||
|     app: keycloak | ||||
|   type: ClusterIP | ||||
							
								
								
									
										34
									
								
								dev/config/keycloak/ingress.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										34
									
								
								dev/config/keycloak/ingress.yaml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,34 @@ | ||||
| --- | ||||
| apiVersion: networking.k8s.io/v1 | ||||
| kind: Ingress | ||||
| metadata: | ||||
|   name: keycloak | ||||
|   namespace: keycloak | ||||
|   labels: | ||||
|     app: keycloak | ||||
|   annotations: | ||||
|     cert-manager.io/cluster-issuer: "selfsigned-ca-issuer" | ||||
|     nginx.ingress.kubernetes.io/ssl-redirect: "true" | ||||
|     nginx.ingress.kubernetes.io/backend-protocol: "HTTP" | ||||
|     # Required for Keycloak 26.2.0+ to include port in issuer URLs | ||||
|     nginx.ingress.kubernetes.io/configuration-snippet: | | ||||
|       proxy_set_header X-Forwarded-Proto https; | ||||
|       proxy_set_header X-Forwarded-Port 8443; | ||||
|       proxy_set_header X-Forwarded-Host $host:8443; | ||||
| spec: | ||||
|   ingressClassName: nginx | ||||
|   tls: | ||||
|   - hosts: | ||||
|     - keycloak.127-0-0-1.sslip.io | ||||
|     secretName: keycloak-tls-cert | ||||
|   rules: | ||||
|   - host: keycloak.127-0-0-1.sslip.io | ||||
|     http: | ||||
|       paths: | ||||
|       - path: / | ||||
|         pathType: Prefix | ||||
|         backend: | ||||
|           service: | ||||
|             name: keycloak | ||||
|             port: | ||||
|               number: 80 | ||||
							
								
								
									
										20
									
								
								dev/config/keycloak/rbac.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								dev/config/keycloak/rbac.yaml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,20 @@ | ||||
| # RBAC ClusterRoleBinding for mcp user with OIDC authentication | ||||
| # | ||||
| # IMPORTANT: This requires Kubernetes API server to be configured with OIDC: | ||||
| #   --oidc-issuer-url=https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift | ||||
| #   --oidc-username-claim=preferred_username | ||||
| # | ||||
| # Without OIDC configuration, this binding will not work. | ||||
| # | ||||
| apiVersion: rbac.authorization.k8s.io/v1 | ||||
| kind: ClusterRoleBinding | ||||
| metadata: | ||||
|   name: oidc-mcp-cluster-admin | ||||
| roleRef: | ||||
|   apiGroup: rbac.authorization.k8s.io | ||||
|   kind: ClusterRole | ||||
|   name: cluster-admin | ||||
| subjects: | ||||
| - apiGroup: rbac.authorization.k8s.io | ||||
|   kind: User | ||||
|   name: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift#mcp | ||||
							
								
								
									
										30
									
								
								dev/config/kind/cluster.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										30
									
								
								dev/config/kind/cluster.yaml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,30 @@ | ||||
| kind: Cluster | ||||
| apiVersion: kind.x-k8s.io/v1alpha4 | ||||
| nodes: | ||||
| - role: control-plane | ||||
|   extraMounts: | ||||
|   - hostPath: ./_output/cert-manager-ca/ca.crt | ||||
|     containerPath: /etc/kubernetes/pki/keycloak-ca.crt | ||||
|     readOnly: true | ||||
|   kubeadmConfigPatches: | ||||
|   - | | ||||
|     kind: InitConfiguration | ||||
|     nodeRegistration: | ||||
|       kubeletExtraArgs: | ||||
|         node-labels: "ingress-ready=true" | ||||
|  | ||||
|     kind: ClusterConfiguration | ||||
|     apiServer: | ||||
|       extraArgs: | ||||
|         oidc-issuer-url: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift | ||||
|         oidc-client-id: openshift | ||||
|         oidc-username-claim: preferred_username | ||||
|         oidc-groups-claim: groups | ||||
|         oidc-ca-file: /etc/kubernetes/pki/keycloak-ca.crt | ||||
|   extraPortMappings: | ||||
|   - containerPort: 80 | ||||
|     hostPort: 8080 | ||||
|     protocol: TCP | ||||
|   - containerPort: 443 | ||||
|     hostPort: 8443 | ||||
|     protocol: TCP | ||||
							
								
								
									
										
											BIN
										
									
								
								docs/images/kubernetes-mcp-server-github-copilot.jpg
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								docs/images/kubernetes-mcp-server-github-copilot.jpg
									
									
									
									
									
										Executable file
									
								
							
										
											Binary file not shown.
										
									
								
							| After Width: | Height: | Size: 375 KiB | 
							
								
								
									
										
											BIN
										
									
								
								docs/images/vibe-coding.jpg
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								docs/images/vibe-coding.jpg
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							| After Width: | Height: | Size: 190 KiB | 
							
								
								
									
										178
									
								
								go.mod
									
									
									
									
									
								
							
							
						
						
									
										178
									
								
								go.mod
									
									
									
									
									
								
							| @@ -1,101 +1,145 @@ | ||||
| module github.com/manusa/kubernetes-mcp-server | ||||
| module github.com/containers/kubernetes-mcp-server | ||||
|  | ||||
| go 1.23.5 | ||||
| go 1.24.1 | ||||
|  | ||||
| require ( | ||||
| 	github.com/mark3labs/mcp-go v0.8.3 | ||||
| 	github.com/spf13/afero v1.12.0 | ||||
| 	github.com/spf13/cobra v1.8.1 | ||||
| 	github.com/spf13/viper v1.19.0 | ||||
| 	golang.org/x/net v0.33.0 | ||||
| 	k8s.io/api v0.32.1 | ||||
| 	k8s.io/apimachinery v0.32.1 | ||||
| 	k8s.io/cli-runtime v0.32.1 | ||||
| 	k8s.io/client-go v0.32.1 | ||||
| 	k8s.io/component-base v0.32.1 | ||||
| 	k8s.io/kubectl v0.32.1 | ||||
| 	sigs.k8s.io/controller-runtime v0.20.1 | ||||
| 	github.com/BurntSushi/toml v1.5.0 | ||||
| 	github.com/coreos/go-oidc/v3 v3.16.0 | ||||
| 	github.com/fsnotify/fsnotify v1.9.0 | ||||
| 	github.com/go-jose/go-jose/v4 v4.1.3 | ||||
| 	github.com/google/jsonschema-go v0.3.0 | ||||
| 	github.com/mark3labs/mcp-go v0.42.0 | ||||
| 	github.com/pkg/errors v0.9.1 | ||||
| 	github.com/spf13/afero v1.15.0 | ||||
| 	github.com/spf13/cobra v1.10.1 | ||||
| 	github.com/spf13/pflag v1.0.10 | ||||
| 	github.com/stretchr/testify v1.11.1 | ||||
| 	golang.org/x/net v0.46.0 | ||||
| 	golang.org/x/oauth2 v0.32.0 | ||||
| 	golang.org/x/sync v0.17.0 | ||||
| 	helm.sh/helm/v3 v3.19.0 | ||||
| 	k8s.io/api v0.34.1 | ||||
| 	k8s.io/apiextensions-apiserver v0.34.1 | ||||
| 	k8s.io/apimachinery v0.34.1 | ||||
| 	k8s.io/cli-runtime v0.34.1 | ||||
| 	k8s.io/client-go v0.34.1 | ||||
| 	k8s.io/klog/v2 v2.130.1 | ||||
| 	k8s.io/kubectl v0.34.1 | ||||
| 	k8s.io/metrics v0.34.1 | ||||
| 	k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 | ||||
| 	sigs.k8s.io/controller-runtime v0.22.3 | ||||
| 	sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20250211091558-894df3a7e664 | ||||
| 	sigs.k8s.io/yaml v1.6.0 | ||||
| ) | ||||
|  | ||||
| // TODO: Remove once https://github.com/mark3labs/mcp-go/pull/18 is merged | ||||
| replace github.com/mark3labs/mcp-go => github.com/marcnuri-forks/mcp-go v0.0.0-20250213043348-ce583581e6be | ||||
|  | ||||
| require ( | ||||
| 	github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect | ||||
| 	dario.cat/mergo v1.0.2 // indirect | ||||
| 	github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect | ||||
| 	github.com/MakeNowJust/heredoc v1.0.0 // indirect | ||||
| 	github.com/Masterminds/goutils v1.1.1 // indirect | ||||
| 	github.com/Masterminds/semver/v3 v3.4.0 // indirect | ||||
| 	github.com/Masterminds/sprig/v3 v3.3.0 // indirect | ||||
| 	github.com/Masterminds/squirrel v1.5.4 // indirect | ||||
| 	github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect | ||||
| 	github.com/bahlo/generic-list-go v0.2.0 // indirect | ||||
| 	github.com/beorn7/perks v1.0.1 // indirect | ||||
| 	github.com/blang/semver/v4 v4.0.0 // indirect | ||||
| 	github.com/chai2010/gettext-go v1.0.2 // indirect | ||||
| 	github.com/buger/jsonparser v1.1.1 // indirect | ||||
| 	github.com/cespare/xxhash/v2 v2.3.0 // indirect | ||||
| 	github.com/chai2010/gettext-go v1.0.3 // indirect | ||||
| 	github.com/containerd/containerd v1.7.28 // indirect | ||||
| 	github.com/containerd/errdefs v0.3.0 // indirect | ||||
| 	github.com/containerd/log v0.1.0 // indirect | ||||
| 	github.com/containerd/platforms v0.2.1 // indirect | ||||
| 	github.com/cyphar/filepath-securejoin v0.4.1 // indirect | ||||
| 	github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect | ||||
| 	github.com/emicklei/go-restful/v3 v3.11.0 // indirect | ||||
| 	github.com/evanphx/json-patch/v5 v5.9.0 // indirect | ||||
| 	github.com/emicklei/go-restful/v3 v3.12.2 // indirect | ||||
| 	github.com/evanphx/json-patch v5.9.11+incompatible // indirect | ||||
| 	github.com/evanphx/json-patch/v5 v5.9.11 // indirect | ||||
| 	github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect | ||||
| 	github.com/fatih/camelcase v1.0.0 // indirect | ||||
| 	github.com/fsnotify/fsnotify v1.7.0 // indirect | ||||
| 	github.com/fxamacker/cbor/v2 v2.7.0 // indirect | ||||
| 	github.com/fatih/color v1.18.0 // indirect | ||||
| 	github.com/fxamacker/cbor/v2 v2.9.0 // indirect | ||||
| 	github.com/go-errors/errors v1.4.2 // indirect | ||||
| 	github.com/go-logr/logr v1.4.2 // indirect | ||||
| 	github.com/go-openapi/jsonpointer v0.21.0 // indirect | ||||
| 	github.com/go-openapi/jsonreference v0.20.2 // indirect | ||||
| 	github.com/go-openapi/swag v0.23.0 // indirect | ||||
| 	github.com/go-gorp/gorp/v3 v3.1.0 // indirect | ||||
| 	github.com/go-logr/logr v1.4.3 // indirect | ||||
| 	github.com/go-openapi/jsonpointer v0.21.1 // indirect | ||||
| 	github.com/go-openapi/jsonreference v0.21.0 // indirect | ||||
| 	github.com/go-openapi/swag v0.23.1 // indirect | ||||
| 	github.com/gobwas/glob v0.2.3 // indirect | ||||
| 	github.com/gogo/protobuf v1.3.2 // indirect | ||||
| 	github.com/golang/protobuf v1.5.4 // indirect | ||||
| 	github.com/google/btree v1.1.3 // indirect | ||||
| 	github.com/google/gnostic-models v0.6.8 // indirect | ||||
| 	github.com/google/go-cmp v0.6.0 // indirect | ||||
| 	github.com/google/gofuzz v1.2.0 // indirect | ||||
| 	github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect | ||||
| 	github.com/google/gnostic-models v0.7.0 // indirect | ||||
| 	github.com/google/go-cmp v0.7.0 // indirect | ||||
| 	github.com/google/uuid v1.6.0 // indirect | ||||
| 	github.com/gorilla/websocket v1.5.0 // indirect | ||||
| 	github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect | ||||
| 	github.com/gosuri/uitable v0.0.4 // indirect | ||||
| 	github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect | ||||
| 	github.com/hashicorp/hcl v1.0.0 // indirect | ||||
| 	github.com/hashicorp/errwrap v1.1.0 // indirect | ||||
| 	github.com/hashicorp/go-multierror v1.1.1 // indirect | ||||
| 	github.com/huandu/xstrings v1.5.0 // indirect | ||||
| 	github.com/inconshreveable/mousetrap v1.1.0 // indirect | ||||
| 	github.com/invopop/jsonschema v0.13.0 // indirect | ||||
| 	github.com/jmoiron/sqlx v1.4.0 // indirect | ||||
| 	github.com/josharian/intern v1.0.0 // indirect | ||||
| 	github.com/json-iterator/go v1.1.12 // indirect | ||||
| 	github.com/klauspost/compress v1.18.0 // indirect | ||||
| 	github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect | ||||
| 	github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect | ||||
| 	github.com/lib/pq v1.10.9 // indirect | ||||
| 	github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect | ||||
| 	github.com/magiconair/properties v1.8.7 // indirect | ||||
| 	github.com/mailru/easyjson v0.7.7 // indirect | ||||
| 	github.com/mailru/easyjson v0.9.0 // indirect | ||||
| 	github.com/mattn/go-colorable v0.1.14 // indirect | ||||
| 	github.com/mattn/go-isatty v0.0.20 // indirect | ||||
| 	github.com/mattn/go-runewidth v0.0.16 // indirect | ||||
| 	github.com/mitchellh/copystructure v1.2.0 // indirect | ||||
| 	github.com/mitchellh/go-wordwrap v1.0.1 // indirect | ||||
| 	github.com/mitchellh/mapstructure v1.5.0 // indirect | ||||
| 	github.com/mitchellh/reflectwalk v1.0.2 // indirect | ||||
| 	github.com/moby/spdystream v0.5.0 // indirect | ||||
| 	github.com/moby/term v0.5.0 // indirect | ||||
| 	github.com/moby/term v0.5.2 // indirect | ||||
| 	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect | ||||
| 	github.com/modern-go/reflect2 v1.0.2 // indirect | ||||
| 	github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect | ||||
| 	github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect | ||||
| 	github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect | ||||
| 	github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect | ||||
| 	github.com/pelletier/go-toml/v2 v2.2.2 // indirect | ||||
| 	github.com/opencontainers/go-digest v1.0.0 // indirect | ||||
| 	github.com/opencontainers/image-spec v1.1.1 // indirect | ||||
| 	github.com/peterbourgon/diskv v2.0.1+incompatible // indirect | ||||
| 	github.com/pkg/errors v0.9.1 // indirect | ||||
| 	github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect | ||||
| 	github.com/prometheus/client_golang v1.22.0 // indirect | ||||
| 	github.com/prometheus/client_model v0.6.1 // indirect | ||||
| 	github.com/prometheus/common v0.62.0 // indirect | ||||
| 	github.com/prometheus/procfs v0.15.1 // indirect | ||||
| 	github.com/rivo/uniseg v0.2.0 // indirect | ||||
| 	github.com/rubenv/sql-migrate v1.8.0 // indirect | ||||
| 	github.com/russross/blackfriday/v2 v2.1.0 // indirect | ||||
| 	github.com/sagikazarmark/locafero v0.4.0 // indirect | ||||
| 	github.com/sagikazarmark/slog-shim v0.1.0 // indirect | ||||
| 	github.com/sourcegraph/conc v0.3.0 // indirect | ||||
| 	github.com/spf13/cast v1.6.0 // indirect | ||||
| 	github.com/spf13/pflag v1.0.6 // indirect | ||||
| 	github.com/subosito/gotenv v1.6.0 // indirect | ||||
| 	github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect | ||||
| 	github.com/shopspring/decimal v1.4.0 // indirect | ||||
| 	github.com/sirupsen/logrus v1.9.3 // indirect | ||||
| 	github.com/spf13/cast v1.7.1 // indirect | ||||
| 	github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect | ||||
| 	github.com/x448/float16 v0.8.4 // indirect | ||||
| 	github.com/xlab/treeprint v1.2.0 // indirect | ||||
| 	go.uber.org/multierr v1.11.0 // indirect | ||||
| 	golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect | ||||
| 	golang.org/x/oauth2 v0.25.0 // indirect | ||||
| 	golang.org/x/sync v0.10.0 // indirect | ||||
| 	golang.org/x/sys v0.29.0 // indirect | ||||
| 	golang.org/x/term v0.27.0 // indirect | ||||
| 	golang.org/x/text v0.21.0 // indirect | ||||
| 	golang.org/x/time v0.8.0 // indirect | ||||
| 	google.golang.org/protobuf v1.36.1 // indirect | ||||
| 	github.com/yosida95/uritemplate/v3 v3.0.2 // indirect | ||||
| 	go.yaml.in/yaml/v2 v2.4.2 // indirect | ||||
| 	go.yaml.in/yaml/v3 v3.0.4 // indirect | ||||
| 	golang.org/x/crypto v0.43.0 // indirect | ||||
| 	golang.org/x/sys v0.37.0 // indirect | ||||
| 	golang.org/x/term v0.36.0 // indirect | ||||
| 	golang.org/x/text v0.30.0 // indirect | ||||
| 	golang.org/x/time v0.12.0 // indirect | ||||
| 	google.golang.org/genproto/googleapis/rpc v0.0.0-20250728155136-f173205681a0 // indirect | ||||
| 	google.golang.org/grpc v1.72.1 // indirect | ||||
| 	google.golang.org/protobuf v1.36.6 // indirect | ||||
| 	gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect | ||||
| 	gopkg.in/inf.v0 v0.9.1 // indirect | ||||
| 	gopkg.in/ini.v1 v1.67.0 // indirect | ||||
| 	gopkg.in/yaml.v3 v3.0.1 // indirect | ||||
| 	k8s.io/apiextensions-apiserver v0.32.0 // indirect | ||||
| 	k8s.io/klog/v2 v2.130.1 // indirect | ||||
| 	k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect | ||||
| 	k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect | ||||
| 	sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect | ||||
| 	sigs.k8s.io/kustomize/api v0.18.0 // indirect | ||||
| 	sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect | ||||
| 	sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect | ||||
| 	sigs.k8s.io/yaml v1.4.0 // indirect | ||||
| 	k8s.io/apiserver v0.34.1 // indirect | ||||
| 	k8s.io/component-base v0.34.1 // indirect | ||||
| 	k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect | ||||
| 	oras.land/oras-go/v2 v2.6.0 // indirect | ||||
| 	sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect | ||||
| 	sigs.k8s.io/kustomize/api v0.20.1 // indirect | ||||
| 	sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect | ||||
| 	sigs.k8s.io/randfill v1.0.0 // indirect | ||||
| 	sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect | ||||
| ) | ||||
|   | ||||
							
								
								
									
										467
									
								
								go.sum
									
									
									
									
									
								
							
							
						
						
									
										467
									
								
								go.sum
									
									
									
									
									
								
							| @@ -1,119 +1,220 @@ | ||||
| github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= | ||||
| github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= | ||||
| dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= | ||||
| dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= | ||||
| filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= | ||||
| filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= | ||||
| github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= | ||||
| github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= | ||||
| github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= | ||||
| github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= | ||||
| github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= | ||||
| github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= | ||||
| github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= | ||||
| github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= | ||||
| github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= | ||||
| github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= | ||||
| github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= | ||||
| github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= | ||||
| github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= | ||||
| github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= | ||||
| github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= | ||||
| github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= | ||||
| github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= | ||||
| github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= | ||||
| github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= | ||||
| github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= | ||||
| github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= | ||||
| github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= | ||||
| github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= | ||||
| github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= | ||||
| github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= | ||||
| github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= | ||||
| github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= | ||||
| github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= | ||||
| github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= | ||||
| github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= | ||||
| github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= | ||||
| github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= | ||||
| github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= | ||||
| github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= | ||||
| github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= | ||||
| github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= | ||||
| github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= | ||||
| github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= | ||||
| github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= | ||||
| github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= | ||||
| github.com/chai2010/gettext-go v1.0.3 h1:9liNh8t+u26xl5ddmWLmsOsdNLwkdRTg5AG+JnTiM80= | ||||
| github.com/chai2010/gettext-go v1.0.3/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= | ||||
| github.com/containerd/containerd v1.7.28 h1:Nsgm1AtcmEh4AHAJ4gGlNSaKgXiNccU270Dnf81FQ3c= | ||||
| github.com/containerd/containerd v1.7.28/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs= | ||||
| github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4= | ||||
| github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= | ||||
| github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= | ||||
| github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= | ||||
| github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= | ||||
| github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= | ||||
| github.com/coreos/go-oidc/v3 v3.16.0 h1:qRQUCFstKpXwmEjDQTIbyY/5jF00+asXzSkmkoa/mow= | ||||
| github.com/coreos/go-oidc/v3 v3.16.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8= | ||||
| github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= | ||||
| github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= | ||||
| github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= | ||||
| github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= | ||||
| github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= | ||||
| github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= | ||||
| github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= | ||||
| github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | ||||
| github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | ||||
| github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= | ||||
| github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | ||||
| github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= | ||||
| github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= | ||||
| github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= | ||||
| github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= | ||||
| github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= | ||||
| github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= | ||||
| github.com/distribution/distribution/v3 v3.0.0 h1:q4R8wemdRQDClzoNNStftB2ZAfqOiN6UX90KJc4HjyM= | ||||
| github.com/distribution/distribution/v3 v3.0.0/go.mod h1:tRNuFoZsUdyRVegq8xGNeds4KLjwLCRin/tTo6i1DhU= | ||||
| github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= | ||||
| github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= | ||||
| github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= | ||||
| github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= | ||||
| github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= | ||||
| github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= | ||||
| github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= | ||||
| github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= | ||||
| github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= | ||||
| github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= | ||||
| github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= | ||||
| github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= | ||||
| github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8= | ||||
| github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= | ||||
| github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= | ||||
| github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= | ||||
| github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= | ||||
| github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= | ||||
| github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= | ||||
| github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= | ||||
| github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= | ||||
| github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= | ||||
| github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= | ||||
| github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= | ||||
| github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= | ||||
| github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= | ||||
| github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= | ||||
| github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= | ||||
| github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= | ||||
| github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= | ||||
| github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= | ||||
| github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= | ||||
| github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= | ||||
| github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= | ||||
| github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= | ||||
| github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= | ||||
| github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= | ||||
| github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= | ||||
| github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= | ||||
| github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= | ||||
| github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= | ||||
| github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= | ||||
| github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= | ||||
| github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= | ||||
| github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= | ||||
| github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= | ||||
| github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= | ||||
| github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= | ||||
| github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= | ||||
| github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= | ||||
| github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= | ||||
| github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= | ||||
| github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= | ||||
| github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= | ||||
| github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= | ||||
| github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= | ||||
| github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= | ||||
| github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= | ||||
| github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= | ||||
| github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= | ||||
| github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= | ||||
| github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= | ||||
| github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= | ||||
| github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= | ||||
| github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= | ||||
| github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= | ||||
| github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= | ||||
| github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= | ||||
| github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= | ||||
| github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= | ||||
| github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= | ||||
| github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= | ||||
| github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= | ||||
| github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= | ||||
| github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= | ||||
| github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= | ||||
| github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= | ||||
| github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= | ||||
| github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= | ||||
| github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= | ||||
| github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= | ||||
| github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= | ||||
| github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= | ||||
| github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= | ||||
| github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= | ||||
| github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= | ||||
| github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= | ||||
| github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= | ||||
| github.com/google/jsonschema-go v0.3.0 h1:6AH2TxVNtk3IlvkkhjrtbUc4S8AvO0Xii0DxIygDg+Q= | ||||
| github.com/google/jsonschema-go v0.3.0/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE= | ||||
| github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= | ||||
| github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= | ||||
| github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= | ||||
| github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= | ||||
| github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= | ||||
| github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= | ||||
| github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= | ||||
| github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= | ||||
| github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= | ||||
| github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= | ||||
| github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= | ||||
| github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= | ||||
| github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= | ||||
| github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= | ||||
| github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= | ||||
| github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= | ||||
| github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= | ||||
| github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= | ||||
| github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= | ||||
| github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= | ||||
| github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= | ||||
| github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= | ||||
| github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= | ||||
| github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= | ||||
| github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= | ||||
| github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= | ||||
| github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= | ||||
| github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw= | ||||
| github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU= | ||||
| github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= | ||||
| github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= | ||||
| github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= | ||||
| github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= | ||||
| github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= | ||||
| github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= | ||||
| github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E= | ||||
| github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= | ||||
| github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= | ||||
| github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= | ||||
| github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= | ||||
| github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= | ||||
| github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= | ||||
| github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= | ||||
| github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= | ||||
| github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= | ||||
| github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= | ||||
| github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= | ||||
| github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= | ||||
| github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= | ||||
| github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= | ||||
| github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= | ||||
| github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= | ||||
| github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= | ||||
| github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= | ||||
| github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= | ||||
| github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= | ||||
| github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= | ||||
| github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= | ||||
| github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= | ||||
| github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= | ||||
| github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= | ||||
| github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= | ||||
| github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= | ||||
| github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= | ||||
| github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= | ||||
| github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= | ||||
| github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= | ||||
| github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= | ||||
| github.com/marcnuri-forks/mcp-go v0.0.0-20250213043348-ce583581e6be h1:CZb4WEZAFCob2RAELPW0oUG8fLksdwqoUuU2k4ST/GU= | ||||
| github.com/marcnuri-forks/mcp-go v0.0.0-20250213043348-ce583581e6be/go.mod h1:cjMlBU0cv/cj9kjlgmRhoJ5JREdS7YX83xeIG9Ko/jE= | ||||
| github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= | ||||
| github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= | ||||
| github.com/mark3labs/mcp-go v0.42.0 h1:gk/8nYJh8t3yroCAOBhNbYsM9TCKvkM13I5t5Hfu6Ls= | ||||
| github.com/mark3labs/mcp-go v0.42.0/go.mod h1:YnJfOL382MIWDx1kMY+2zsRHU/q78dBg9aFb8W6Thdw= | ||||
| github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= | ||||
| github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= | ||||
| github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= | ||||
| github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= | ||||
| github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= | ||||
| github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= | ||||
| github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= | ||||
| github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= | ||||
| github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM= | ||||
| github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk= | ||||
| github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= | ||||
| github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= | ||||
| github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= | ||||
| github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= | ||||
| github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= | ||||
| github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= | ||||
| github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= | ||||
| github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= | ||||
| github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= | ||||
| github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= | ||||
| github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= | ||||
| github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= | ||||
| github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= | ||||
| github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= | ||||
| github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= | ||||
| github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= | ||||
| github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= | ||||
| github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= | ||||
| github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= | ||||
| github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= | ||||
| github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= | ||||
| github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= | ||||
| github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= | ||||
| github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= | ||||
| @@ -124,122 +225,194 @@ github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU | ||||
| github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk= | ||||
| github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= | ||||
| github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= | ||||
| github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= | ||||
| github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= | ||||
| github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= | ||||
| github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= | ||||
| github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= | ||||
| github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= | ||||
| github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= | ||||
| github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= | ||||
| github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= | ||||
| github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= | ||||
| github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= | ||||
| github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= | ||||
| github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= | ||||
| github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= | ||||
| github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= | ||||
| github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= | ||||
| github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= | ||||
| github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= | ||||
| github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= | ||||
| github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= | ||||
| github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= | ||||
| github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= | ||||
| github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= | ||||
| github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= | ||||
| github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= | ||||
| github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= | ||||
| github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= | ||||
| github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= | ||||
| github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= | ||||
| github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= | ||||
| github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= | ||||
| github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fOGwTfezUiUJMaIcaho= | ||||
| github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5/go.mod h1:fyalQWdtzDBECAQFBJuQe5bzQ02jGd5Qcbgb97Flm7U= | ||||
| github.com/redis/go-redis/extra/redisotel/v9 v9.0.5 h1:EfpWLLCyXw8PSM2/XNJLjI3Pb27yVE+gIAfeqp8LUCc= | ||||
| github.com/redis/go-redis/extra/redisotel/v9 v9.0.5/go.mod h1:WZjPDy7VNzn77AAfnAfVjZNvfJTYfPetfZk5yoSTLaQ= | ||||
| github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM= | ||||
| github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= | ||||
| github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= | ||||
| github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= | ||||
| github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= | ||||
| github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= | ||||
| github.com/rubenv/sql-migrate v1.8.0 h1:dXnYiJk9k3wetp7GfQbKJcPHjVJL6YK19tKj8t2Ns0o= | ||||
| github.com/rubenv/sql-migrate v1.8.0/go.mod h1:F2bGFBwCU+pnmbtNYDeKvSuvL6lBVtXDXUUv5t+u1qw= | ||||
| github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= | ||||
| github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= | ||||
| github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= | ||||
| github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= | ||||
| github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= | ||||
| github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= | ||||
| github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ= | ||||
| github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= | ||||
| github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= | ||||
| github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= | ||||
| github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= | ||||
| github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= | ||||
| github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= | ||||
| github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= | ||||
| github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= | ||||
| github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= | ||||
| github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= | ||||
| github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= | ||||
| github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= | ||||
| github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= | ||||
| github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= | ||||
| github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= | ||||
| github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= | ||||
| github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= | ||||
| github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= | ||||
| github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= | ||||
| github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= | ||||
| github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= | ||||
| github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= | ||||
| github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= | ||||
| github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= | ||||
| github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= | ||||
| github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= | ||||
| github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= | ||||
| github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= | ||||
| github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= | ||||
| github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= | ||||
| github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= | ||||
| github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= | ||||
| github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= | ||||
| github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= | ||||
| github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= | ||||
| github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= | ||||
| github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= | ||||
| github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= | ||||
| github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= | ||||
| github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= | ||||
| github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= | ||||
| github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= | ||||
| github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= | ||||
| github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= | ||||
| github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= | ||||
| github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= | ||||
| github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= | ||||
| github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= | ||||
| github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= | ||||
| github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= | ||||
| github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= | ||||
| github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= | ||||
| github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= | ||||
| github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= | ||||
| github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= | ||||
| github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= | ||||
| github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= | ||||
| github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= | ||||
| go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= | ||||
| go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= | ||||
| go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w= | ||||
| go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk= | ||||
| go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4= | ||||
| go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g= | ||||
| go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= | ||||
| go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= | ||||
| go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= | ||||
| go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= | ||||
| go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls= | ||||
| go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs= | ||||
| go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8= | ||||
| go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0/go.mod h1:5KXybFvPGds3QinJWQT7pmXf+TN5YIa7CNYObWRkj50= | ||||
| go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0= | ||||
| go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= | ||||
| go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU= | ||||
| go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8= | ||||
| go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= | ||||
| go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= | ||||
| go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U= | ||||
| go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE= | ||||
| go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw= | ||||
| go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI= | ||||
| go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU= | ||||
| go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU= | ||||
| go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 h1:CHXNXwfKWfzS65yrlB2PVds1IBZcdsX8Vepy9of0iRU= | ||||
| go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0/go.mod h1:zKU4zUgKiaRxrdovSS2amdM5gOc59slmo/zJwGX+YBg= | ||||
| go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 h1:SZmDnHcgp3zwlPBS2JX2urGYe/jBKEIT6ZedHRUyCz8= | ||||
| go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0/go.mod h1:fdWW0HtZJ7+jNpTKUR0GpMEDP69nR8YBJQxNiVCE3jk= | ||||
| go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsux7Qmq8ToKAx1XCilTQECZ0KDZyTw= | ||||
| go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s= | ||||
| go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= | ||||
| go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= | ||||
| go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= | ||||
| go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= | ||||
| go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= | ||||
| go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= | ||||
| go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= | ||||
| go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= | ||||
| go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= | ||||
| go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= | ||||
| go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= | ||||
| go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= | ||||
| go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= | ||||
| go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= | ||||
| go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= | ||||
| go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= | ||||
| go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= | ||||
| go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= | ||||
| go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= | ||||
| go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= | ||||
| go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= | ||||
| go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= | ||||
| go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= | ||||
| go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= | ||||
| golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= | ||||
| golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | ||||
| golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= | ||||
| golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= | ||||
| golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= | ||||
| golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= | ||||
| golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= | ||||
| golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= | ||||
| golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= | ||||
| golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U= | ||||
| golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI= | ||||
| golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= | ||||
| golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | ||||
| golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | ||||
| golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= | ||||
| golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= | ||||
| golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= | ||||
| golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= | ||||
| golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= | ||||
| golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= | ||||
| golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= | ||||
| golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= | ||||
| golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= | ||||
| golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= | ||||
| golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= | ||||
| golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= | ||||
| golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= | ||||
| golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||
| golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | ||||
| golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= | ||||
| golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= | ||||
| golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= | ||||
| golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= | ||||
| golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | ||||
| golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | ||||
| golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= | ||||
| golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= | ||||
| golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= | ||||
| golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= | ||||
| golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= | ||||
| golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= | ||||
| golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= | ||||
| golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= | ||||
| golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= | ||||
| golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= | ||||
| golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= | ||||
| golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= | ||||
| golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= | ||||
| golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= | ||||
| golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= | ||||
| golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= | ||||
| golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= | ||||
| golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= | ||||
| golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= | ||||
| golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= | ||||
| golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= | ||||
| golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= | ||||
| golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | ||||
| golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | ||||
| golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | ||||
| golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | ||||
| gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= | ||||
| gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= | ||||
| google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= | ||||
| google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= | ||||
| google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 h1:1hfbdAfFbkmpg41000wDVqr7jUpK/Yo+LPnIxxGzmkg= | ||||
| google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= | ||||
| google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= | ||||
| google.golang.org/genproto/googleapis/rpc v0.0.0-20250728155136-f173205681a0 h1:MAKi5q709QWfnkkpNQ0M12hYJ1+e8qYVDyowc4U1XZM= | ||||
| google.golang.org/genproto/googleapis/rpc v0.0.0-20250728155136-f173205681a0/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= | ||||
| google.golang.org/grpc v1.72.1 h1:HR03wO6eyZ7lknl75XlxABNVLLFc2PAb6mHlYh756mA= | ||||
| google.golang.org/grpc v1.72.1/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= | ||||
| google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= | ||||
| google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= | ||||
| gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= | ||||
| gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= | ||||
| gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= | ||||
| @@ -247,42 +420,52 @@ gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSP | ||||
| gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= | ||||
| gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= | ||||
| gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= | ||||
| gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= | ||||
| gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= | ||||
| gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= | ||||
| gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= | ||||
| gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | ||||
| gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= | ||||
| gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | ||||
| k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc= | ||||
| k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k= | ||||
| k8s.io/apiextensions-apiserver v0.32.0 h1:S0Xlqt51qzzqjKPxfgX1xh4HBZE+p8KKBq+k2SWNOE0= | ||||
| k8s.io/apiextensions-apiserver v0.32.0/go.mod h1:86hblMvN5yxMvZrZFX2OhIHAuFIMJIZ19bTvzkP+Fmw= | ||||
| k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs= | ||||
| k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= | ||||
| k8s.io/cli-runtime v0.32.1 h1:19nwZPlYGJPUDbhAxDIS2/oydCikvKMHsxroKNGA2mM= | ||||
| k8s.io/cli-runtime v0.32.1/go.mod h1:NJPbeadVFnV2E7B7vF+FvU09mpwYlZCu8PqjzfuOnkY= | ||||
| k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= | ||||
| k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= | ||||
| k8s.io/component-base v0.32.1 h1:/5IfJ0dHIKBWysGV0yKTFfacZ5yNV1sulPh3ilJjRZk= | ||||
| k8s.io/component-base v0.32.1/go.mod h1:j1iMMHi/sqAHeG5z+O9BFNCF698a1u0186zkjMZQ28w= | ||||
| helm.sh/helm/v3 v3.19.0 h1:krVyCGa8fa/wzTZgqw0DUiXuRT5BPdeqE/sQXujQ22k= | ||||
| helm.sh/helm/v3 v3.19.0/go.mod h1:Lk/SfzN0w3a3C3o+TdAKrLwJ0wcZ//t1/SDXAvfgDdc= | ||||
| k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM= | ||||
| k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk= | ||||
| k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI= | ||||
| k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc= | ||||
| k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4= | ||||
| k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= | ||||
| k8s.io/apiserver v0.34.1 h1:U3JBGdgANK3dfFcyknWde1G6X1F4bg7PXuvlqt8lITA= | ||||
| k8s.io/apiserver v0.34.1/go.mod h1:eOOc9nrVqlBI1AFCvVzsob0OxtPZUCPiUJL45JOTBG0= | ||||
| k8s.io/cli-runtime v0.34.1 h1:btlgAgTrYd4sk8vJTRG6zVtqBKt9ZMDeQZo2PIzbL7M= | ||||
| k8s.io/cli-runtime v0.34.1/go.mod h1:aVA65c+f0MZiMUPbseU/M9l1Wo2byeaGwUuQEQVVveE= | ||||
| k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY= | ||||
| k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8= | ||||
| k8s.io/component-base v0.34.1 h1:v7xFgG+ONhytZNFpIz5/kecwD+sUhVE6HU7qQUiRM4A= | ||||
| k8s.io/component-base v0.34.1/go.mod h1:mknCpLlTSKHzAQJJnnHVKqjxR7gBeHRv0rPXA7gdtQ0= | ||||
| k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= | ||||
| k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= | ||||
| k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= | ||||
| k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= | ||||
| k8s.io/kubectl v0.32.1 h1:/btLtXLQUU1rWx8AEvX9jrb9LaI6yeezt3sFALhB8M8= | ||||
| k8s.io/kubectl v0.32.1/go.mod h1:sezNuyWi1STk4ZNPVRIFfgjqMI6XMf+oCVLjZen/pFQ= | ||||
| k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= | ||||
| k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= | ||||
| sigs.k8s.io/controller-runtime v0.20.1 h1:JbGMAG/X94NeM3xvjenVUaBjy6Ui4Ogd/J5ZtjZnHaE= | ||||
| sigs.k8s.io/controller-runtime v0.20.1/go.mod h1:BrP3w158MwvB3ZbNpaAcIKkHQ7YGpYnzpoSTZ8E14WU= | ||||
| k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= | ||||
| k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= | ||||
| k8s.io/kubectl v0.34.1 h1:1qP1oqT5Xc93K+H8J7ecpBjaz511gan89KO9Vbsh/OI= | ||||
| k8s.io/kubectl v0.34.1/go.mod h1:JRYlhJpGPyk3dEmJ+BuBiOB9/dAvnrALJEiY/C5qa6A= | ||||
| k8s.io/metrics v0.34.1 h1:374Rexmp1xxgRt64Bi0TsjAM8cA/Y8skwCoPdjtIslE= | ||||
| k8s.io/metrics v0.34.1/go.mod h1:Drf5kPfk2NJrlpcNdSiAAHn/7Y9KqxpRNagByM7Ei80= | ||||
| k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= | ||||
| k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= | ||||
| oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= | ||||
| oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= | ||||
| sigs.k8s.io/controller-runtime v0.22.3 h1:I7mfqz/a/WdmDCEnXmSPm8/b/yRTy6JsKKENTijTq8Y= | ||||
| sigs.k8s.io/controller-runtime v0.22.3/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= | ||||
| sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20250211091558-894df3a7e664 h1:xC7x7FsPURJYhZnWHsWFd7nkdD/WRtQVWPC28FWt85Y= | ||||
| sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20250211091558-894df3a7e664/go.mod h1:Cq9jUhwSYol5tNB0O/1vLYxNV9KqnhpvEa6HvJ1w0wY= | ||||
| sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= | ||||
| sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= | ||||
| sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo= | ||||
| sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U= | ||||
| sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E= | ||||
| sigs.k8s.io/kustomize/kyaml v0.18.1/go.mod h1:C3L2BFVU1jgcddNBE1TxuVLgS46TjObMwW5FT9FcjYo= | ||||
| sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= | ||||
| sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= | ||||
| sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= | ||||
| sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= | ||||
| sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= | ||||
| sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= | ||||
| sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I= | ||||
| sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM= | ||||
| sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78= | ||||
| sigs.k8s.io/kustomize/kyaml v0.20.1/go.mod h1:0EmkQHRUsJxY8Ug9Niig1pUMSCGHxQ5RklbpV/Ri6po= | ||||
| sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= | ||||
| sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= | ||||
| sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= | ||||
| sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= | ||||
| sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= | ||||
| sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= | ||||
|   | ||||
							
								
								
									
										22
									
								
								hack/generate-placeholder-ca.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										22
									
								
								hack/generate-placeholder-ca.sh
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,22 @@ | ||||
| #!/bin/bash | ||||
| set -e | ||||
|  | ||||
| # Generate a placeholder self-signed CA certificate for KIND cluster startup | ||||
| # This will be replaced with the real cert-manager CA after the cluster is created | ||||
|  | ||||
| CERT_DIR="_output/cert-manager-ca" | ||||
| CA_CERT="$CERT_DIR/ca.crt" | ||||
| CA_KEY="$CERT_DIR/ca.key" | ||||
|  | ||||
| mkdir -p "$CERT_DIR" | ||||
|  | ||||
| # Generate a self-signed CA certificate (valid placeholder) | ||||
| openssl req -x509 -newkey rsa:2048 -nodes \ | ||||
|   -keyout "$CA_KEY" \ | ||||
|   -out "$CA_CERT" \ | ||||
|   -days 365 \ | ||||
|   -subj "/CN=placeholder-ca" \ | ||||
|   2>/dev/null | ||||
|  | ||||
| echo "✅ Placeholder CA certificate created at $CA_CERT" | ||||
| echo "⚠️  This will be replaced with cert-manager CA after cluster creation" | ||||
							
								
								
									
										15
									
								
								internal/test/env.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								internal/test/env.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,15 @@ | ||||
| package test | ||||
|  | ||||
| import ( | ||||
| 	"os" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| func RestoreEnv(originalEnv []string) { | ||||
| 	os.Clearenv() | ||||
| 	for _, env := range originalEnv { | ||||
| 		if key, value, found := strings.Cut(env, "="); found { | ||||
| 			_ = os.Setenv(key, value) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										17
									
								
								internal/test/kubernetes.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										17
									
								
								internal/test/kubernetes.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,17 @@ | ||||
| package test | ||||
|  | ||||
| import ( | ||||
| 	clientcmdapi "k8s.io/client-go/tools/clientcmd/api" | ||||
| ) | ||||
|  | ||||
| func KubeConfigFake() *clientcmdapi.Config { | ||||
| 	fakeConfig := clientcmdapi.NewConfig() | ||||
| 	fakeConfig.Clusters["fake"] = clientcmdapi.NewCluster() | ||||
| 	fakeConfig.Clusters["fake"].Server = "https://127.0.0.1:6443" | ||||
| 	fakeConfig.AuthInfos["fake"] = clientcmdapi.NewAuthInfo() | ||||
| 	fakeConfig.Contexts["fake-context"] = clientcmdapi.NewContext() | ||||
| 	fakeConfig.Contexts["fake-context"].Cluster = "fake" | ||||
| 	fakeConfig.Contexts["fake-context"].AuthInfo = "fake" | ||||
| 	fakeConfig.CurrentContext = "fake-context" | ||||
| 	return fakeConfig | ||||
| } | ||||
							
								
								
									
										52
									
								
								internal/test/mcp.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										52
									
								
								internal/test/mcp.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,52 @@ | ||||
| package test | ||||
|  | ||||
| import ( | ||||
| 	"net/http/httptest" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/mark3labs/mcp-go/client" | ||||
| 	"github.com/mark3labs/mcp-go/mcp" | ||||
| 	"github.com/mark3labs/mcp-go/server" | ||||
| 	"github.com/stretchr/testify/require" | ||||
| 	"golang.org/x/net/context" | ||||
| ) | ||||
|  | ||||
| type McpClient struct { | ||||
| 	ctx        context.Context | ||||
| 	testServer *httptest.Server | ||||
| 	*client.Client | ||||
| } | ||||
|  | ||||
| func NewMcpClient(t *testing.T, mcpHttpServer *server.StreamableHTTPServer) *McpClient { | ||||
| 	require.NotNil(t, mcpHttpServer, "McpHttpServer must be provided") | ||||
| 	var err error | ||||
| 	ret := &McpClient{ctx: t.Context()} | ||||
| 	ret.testServer = httptest.NewServer(mcpHttpServer) | ||||
| 	ret.Client, err = client.NewStreamableHttpClient(ret.testServer.URL + "/mcp") | ||||
| 	require.NoError(t, err, "Expected no error creating MCP client") | ||||
| 	err = ret.Start(t.Context()) | ||||
| 	require.NoError(t, err, "Expected no error starting MCP client") | ||||
| 	initRequest := mcp.InitializeRequest{} | ||||
| 	initRequest.Params.ProtocolVersion = mcp.LATEST_PROTOCOL_VERSION | ||||
| 	initRequest.Params.ClientInfo = mcp.Implementation{Name: "test", Version: "1.33.7"} | ||||
| 	_, err = ret.Initialize(t.Context(), initRequest) | ||||
| 	require.NoError(t, err, "Expected no error initializing MCP client") | ||||
| 	return ret | ||||
| } | ||||
|  | ||||
| func (m *McpClient) Close() { | ||||
| 	if m.Client != nil { | ||||
| 		_ = m.Client.Close() | ||||
| 	} | ||||
| 	if m.testServer != nil { | ||||
| 		m.testServer.Close() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // CallTool helper function to call a tool by name with arguments | ||||
| func (m *McpClient) CallTool(name string, args map[string]interface{}) (*mcp.CallToolResult, error) { | ||||
| 	callToolRequest := mcp.CallToolRequest{} | ||||
| 	callToolRequest.Params.Name = name | ||||
| 	callToolRequest.Params.Arguments = args | ||||
| 	return m.Client.CallTool(m.ctx, callToolRequest) | ||||
| } | ||||
							
								
								
									
										218
									
								
								internal/test/mock_server.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										218
									
								
								internal/test/mock_server.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,218 @@ | ||||
| package test | ||||
|  | ||||
| import ( | ||||
| 	"encoding/json" | ||||
| 	"errors" | ||||
| 	"io" | ||||
| 	"net/http" | ||||
| 	"net/http/httptest" | ||||
| 	"path/filepath" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/stretchr/testify/require" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	apierrors "k8s.io/apimachinery/pkg/api/errors" | ||||
| 	"k8s.io/apimachinery/pkg/runtime" | ||||
| 	"k8s.io/apimachinery/pkg/runtime/serializer" | ||||
| 	"k8s.io/apimachinery/pkg/util/httpstream" | ||||
| 	"k8s.io/apimachinery/pkg/util/httpstream/spdy" | ||||
| 	"k8s.io/client-go/rest" | ||||
| 	"k8s.io/client-go/tools/clientcmd" | ||||
| 	"k8s.io/client-go/tools/clientcmd/api" | ||||
| ) | ||||
|  | ||||
| type MockServer struct { | ||||
| 	server       *httptest.Server | ||||
| 	config       *rest.Config | ||||
| 	restHandlers []http.HandlerFunc | ||||
| } | ||||
|  | ||||
| func NewMockServer() *MockServer { | ||||
| 	ms := &MockServer{} | ||||
| 	scheme := runtime.NewScheme() | ||||
| 	codecs := serializer.NewCodecFactory(scheme) | ||||
| 	ms.server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { | ||||
| 		for _, handler := range ms.restHandlers { | ||||
| 			handler(w, req) | ||||
| 		} | ||||
| 	})) | ||||
| 	ms.config = &rest.Config{ | ||||
| 		Host:    ms.server.URL, | ||||
| 		APIPath: "/api", | ||||
| 		ContentConfig: rest.ContentConfig{ | ||||
| 			NegotiatedSerializer: codecs, | ||||
| 			ContentType:          runtime.ContentTypeJSON, | ||||
| 			GroupVersion:         &v1.SchemeGroupVersion, | ||||
| 		}, | ||||
| 	} | ||||
| 	ms.restHandlers = make([]http.HandlerFunc, 0) | ||||
| 	return ms | ||||
| } | ||||
|  | ||||
| func (m *MockServer) Close() { | ||||
| 	if m.server != nil { | ||||
| 		m.server.Close() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (m *MockServer) Handle(handler http.Handler) { | ||||
| 	m.restHandlers = append(m.restHandlers, handler.ServeHTTP) | ||||
| } | ||||
|  | ||||
| func (m *MockServer) Config() *rest.Config { | ||||
| 	return m.config | ||||
| } | ||||
|  | ||||
| func (m *MockServer) Kubeconfig() *api.Config { | ||||
| 	fakeConfig := KubeConfigFake() | ||||
| 	fakeConfig.Clusters["fake"].Server = m.config.Host | ||||
| 	fakeConfig.Clusters["fake"].CertificateAuthorityData = m.config.CAData | ||||
| 	fakeConfig.AuthInfos["fake"].ClientKeyData = m.config.KeyData | ||||
| 	fakeConfig.AuthInfos["fake"].ClientCertificateData = m.config.CertData | ||||
| 	return fakeConfig | ||||
| } | ||||
|  | ||||
| func (m *MockServer) KubeconfigFile(t *testing.T) string { | ||||
| 	return KubeconfigFile(t, m.Kubeconfig()) | ||||
| } | ||||
|  | ||||
| func KubeconfigFile(t *testing.T, kubeconfig *api.Config) string { | ||||
| 	kubeconfigFile := filepath.Join(t.TempDir(), "config") | ||||
| 	err := clientcmd.WriteToFile(*kubeconfig, kubeconfigFile) | ||||
| 	require.NoError(t, err, "Expected no error writing kubeconfig file") | ||||
| 	return kubeconfigFile | ||||
| } | ||||
|  | ||||
| func WriteObject(w http.ResponseWriter, obj runtime.Object) { | ||||
| 	w.Header().Set("Content-Type", runtime.ContentTypeJSON) | ||||
| 	if err := json.NewEncoder(w).Encode(obj); err != nil { | ||||
| 		http.Error(w, err.Error(), http.StatusInternalServerError) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type streamAndReply struct { | ||||
| 	httpstream.Stream | ||||
| 	replySent <-chan struct{} | ||||
| } | ||||
|  | ||||
| type StreamContext struct { | ||||
| 	Closer       io.Closer | ||||
| 	StdinStream  io.ReadCloser | ||||
| 	StdoutStream io.WriteCloser | ||||
| 	StderrStream io.WriteCloser | ||||
| 	writeStatus  func(status *apierrors.StatusError) error | ||||
| } | ||||
|  | ||||
| type StreamOptions struct { | ||||
| 	Stdin  io.Reader | ||||
| 	Stdout io.Writer | ||||
| 	Stderr io.Writer | ||||
| } | ||||
|  | ||||
| func v4WriteStatusFunc(stream io.Writer) func(status *apierrors.StatusError) error { | ||||
| 	return func(status *apierrors.StatusError) error { | ||||
| 		bs, err := json.Marshal(status.Status()) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		_, err = stream.Write(bs) | ||||
| 		return err | ||||
| 	} | ||||
| } | ||||
| func CreateHTTPStreams(w http.ResponseWriter, req *http.Request, opts *StreamOptions) (*StreamContext, error) { | ||||
| 	_, err := httpstream.Handshake(req, w, []string{"v4.channel.k8s.io"}) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	upgrader := spdy.NewResponseUpgrader() | ||||
| 	streamCh := make(chan streamAndReply) | ||||
| 	connection := upgrader.UpgradeResponse(w, req, func(stream httpstream.Stream, replySent <-chan struct{}) error { | ||||
| 		streamCh <- streamAndReply{Stream: stream, replySent: replySent} | ||||
| 		return nil | ||||
| 	}) | ||||
| 	ctx := &StreamContext{ | ||||
| 		Closer: connection, | ||||
| 	} | ||||
|  | ||||
| 	// wait for stream | ||||
| 	replyChan := make(chan struct{}, 4) | ||||
| 	defer close(replyChan) | ||||
| 	receivedStreams := 0 | ||||
| 	expectedStreams := 1 | ||||
| 	if opts.Stdout != nil { | ||||
| 		expectedStreams++ | ||||
| 	} | ||||
| 	if opts.Stdin != nil { | ||||
| 		expectedStreams++ | ||||
| 	} | ||||
| 	if opts.Stderr != nil { | ||||
| 		expectedStreams++ | ||||
| 	} | ||||
| WaitForStreams: | ||||
| 	for { | ||||
| 		select { | ||||
| 		case stream := <-streamCh: | ||||
| 			streamType := stream.Headers().Get(v1.StreamType) | ||||
| 			switch streamType { | ||||
| 			case v1.StreamTypeError: | ||||
| 				replyChan <- struct{}{} | ||||
| 				ctx.writeStatus = v4WriteStatusFunc(stream) | ||||
| 			case v1.StreamTypeStdout: | ||||
| 				replyChan <- struct{}{} | ||||
| 				ctx.StdoutStream = stream | ||||
| 			case v1.StreamTypeStdin: | ||||
| 				replyChan <- struct{}{} | ||||
| 				ctx.StdinStream = stream | ||||
| 			case v1.StreamTypeStderr: | ||||
| 				replyChan <- struct{}{} | ||||
| 				ctx.StderrStream = stream | ||||
| 			default: | ||||
| 				// add other stream ... | ||||
| 				return nil, errors.New("unimplemented stream type") | ||||
| 			} | ||||
| 		case <-replyChan: | ||||
| 			receivedStreams++ | ||||
| 			if receivedStreams == expectedStreams { | ||||
| 				break WaitForStreams | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return ctx, nil | ||||
| } | ||||
|  | ||||
| type InOpenShiftHandler struct { | ||||
| } | ||||
|  | ||||
| var _ http.Handler = (*InOpenShiftHandler)(nil) | ||||
|  | ||||
| func (h *InOpenShiftHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { | ||||
| 	w.Header().Set("Content-Type", "application/json") | ||||
| 	// Request Performed by DiscoveryClient to Kube API (Get API Groups legacy -core-) | ||||
| 	if req.URL.Path == "/api" { | ||||
| 		_, _ = w.Write([]byte(`{"kind":"APIVersions","versions":[],"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0"}]}`)) | ||||
| 		return | ||||
| 	} | ||||
| 	// Request Performed by DiscoveryClient to Kube API (Get API Groups) | ||||
| 	if req.URL.Path == "/apis" { | ||||
| 		_, _ = w.Write([]byte(`{ | ||||
| 			"kind":"APIGroupList", | ||||
| 			"groups":[{ | ||||
| 				"name":"project.openshift.io", | ||||
| 				"versions":[{"groupVersion":"project.openshift.io/v1","version":"v1"}], | ||||
| 				"preferredVersion":{"groupVersion":"project.openshift.io/v1","version":"v1"} | ||||
| 			}]}`)) | ||||
| 		return | ||||
| 	} | ||||
| 	if req.URL.Path == "/apis/project.openshift.io/v1" { | ||||
| 		_, _ = w.Write([]byte(`{ | ||||
| 			"kind":"APIResourceList", | ||||
| 			"apiVersion":"v1", | ||||
| 			"groupVersion":"project.openshift.io/v1", | ||||
| 			"resources":[ | ||||
| 				{"name":"projects","singularName":"","namespaced":false,"kind":"Project","verbs":["create","delete","get","list","patch","update","watch"],"shortNames":["pr"]} | ||||
| 			]}`)) | ||||
| 		return | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										21
									
								
								internal/test/test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								internal/test/test.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,21 @@ | ||||
| package test | ||||
|  | ||||
| import ( | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"runtime" | ||||
| ) | ||||
|  | ||||
| func Must[T any](v T, err error) T { | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	return v | ||||
| } | ||||
|  | ||||
| func ReadFile(path ...string) string { | ||||
| 	_, file, _, _ := runtime.Caller(1) | ||||
| 	filePath := filepath.Join(append([]string{filepath.Dir(file)}, path...)...) | ||||
| 	fileBytes := Must(os.ReadFile(filePath)) | ||||
| 	return string(fileBytes) | ||||
| } | ||||
							
								
								
									
										107
									
								
								internal/tools/update-readme/main.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										107
									
								
								internal/tools/update-readme/main.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,107 @@ | ||||
| package main | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"maps" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"slices" | ||||
| 	"strings" | ||||
|  | ||||
| 	internalk8s "github.com/containers/kubernetes-mcp-server/pkg/kubernetes" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/toolsets" | ||||
|  | ||||
| 	_ "github.com/containers/kubernetes-mcp-server/pkg/toolsets/config" | ||||
| 	_ "github.com/containers/kubernetes-mcp-server/pkg/toolsets/core" | ||||
| 	_ "github.com/containers/kubernetes-mcp-server/pkg/toolsets/helm" | ||||
| ) | ||||
|  | ||||
| type OpenShift struct{} | ||||
|  | ||||
| func (o *OpenShift) IsOpenShift(ctx context.Context) bool { | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| var _ internalk8s.Openshift = (*OpenShift)(nil) | ||||
|  | ||||
| func main() { | ||||
| 	// Snyk reports false positive unless we flow the args through filepath.Clean and filepath.Localize in this specific order | ||||
| 	var err error | ||||
| 	localReadmePath := filepath.Clean(os.Args[1]) | ||||
| 	localReadmePath, err = filepath.Localize(localReadmePath) | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	readme, err := os.ReadFile(localReadmePath) | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	// Available Toolsets | ||||
| 	toolsetsList := toolsets.Toolsets() | ||||
| 	maxNameLen, maxDescLen := len("Toolset"), len("Description") | ||||
| 	for _, toolset := range toolsetsList { | ||||
| 		nameLen := len(toolset.GetName()) | ||||
| 		descLen := len(toolset.GetDescription()) | ||||
| 		if nameLen > maxNameLen { | ||||
| 			maxNameLen = nameLen | ||||
| 		} | ||||
| 		if descLen > maxDescLen { | ||||
| 			maxDescLen = descLen | ||||
| 		} | ||||
| 	} | ||||
| 	availableToolsets := strings.Builder{} | ||||
| 	availableToolsets.WriteString(fmt.Sprintf("| %-*s | %-*s |\n", maxNameLen, "Toolset", maxDescLen, "Description")) | ||||
| 	availableToolsets.WriteString(fmt.Sprintf("|-%s-|-%s-|\n", strings.Repeat("-", maxNameLen), strings.Repeat("-", maxDescLen))) | ||||
| 	for _, toolset := range toolsetsList { | ||||
| 		availableToolsets.WriteString(fmt.Sprintf("| %-*s | %-*s |\n", maxNameLen, toolset.GetName(), maxDescLen, toolset.GetDescription())) | ||||
| 	} | ||||
| 	updated := replaceBetweenMarkers( | ||||
| 		string(readme), | ||||
| 		"<!-- AVAILABLE-TOOLSETS-START -->", | ||||
| 		"<!-- AVAILABLE-TOOLSETS-END -->", | ||||
| 		availableToolsets.String(), | ||||
| 	) | ||||
|  | ||||
| 	// Available Toolset Tools | ||||
| 	toolsetTools := strings.Builder{} | ||||
| 	for _, toolset := range toolsetsList { | ||||
| 		toolsetTools.WriteString("<details>\n\n<summary>" + toolset.GetName() + "</summary>\n\n") | ||||
| 		tools := toolset.GetTools(&OpenShift{}) | ||||
| 		for _, tool := range tools { | ||||
| 			toolsetTools.WriteString(fmt.Sprintf("- **%s** - %s\n", tool.Tool.Name, tool.Tool.Description)) | ||||
| 			for _, propName := range slices.Sorted(maps.Keys(tool.Tool.InputSchema.Properties)) { | ||||
| 				property := tool.Tool.InputSchema.Properties[propName] | ||||
| 				toolsetTools.WriteString(fmt.Sprintf("  - `%s` (`%s`)", propName, property.Type)) | ||||
| 				if slices.Contains(tool.Tool.InputSchema.Required, propName) { | ||||
| 					toolsetTools.WriteString(" **(required)**") | ||||
| 				} | ||||
| 				toolsetTools.WriteString(fmt.Sprintf(" - %s\n", property.Description)) | ||||
| 			} | ||||
| 			toolsetTools.WriteString("\n") | ||||
| 		} | ||||
| 		toolsetTools.WriteString("</details>\n\n") | ||||
| 	} | ||||
| 	updated = replaceBetweenMarkers( | ||||
| 		updated, | ||||
| 		"<!-- AVAILABLE-TOOLSETS-TOOLS-START -->", | ||||
| 		"<!-- AVAILABLE-TOOLSETS-TOOLS-END -->", | ||||
| 		toolsetTools.String(), | ||||
| 	) | ||||
|  | ||||
| 	if err := os.WriteFile(localReadmePath, []byte(updated), 0o644); err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func replaceBetweenMarkers(content, startMarker, endMarker, replacement string) string { | ||||
| 	startIdx := strings.Index(content, startMarker) | ||||
| 	if startIdx == -1 { | ||||
| 		return content | ||||
| 	} | ||||
| 	endIdx := strings.Index(content, endMarker) | ||||
| 	if endIdx == -1 || endIdx <= startIdx { | ||||
| 		return content | ||||
| 	} | ||||
| 	return content[:startIdx+len(startMarker)] + "\n\n" + replacement + "\n" + content[endIdx:] | ||||
| } | ||||
| @@ -1,19 +0,0 @@ | ||||
| const childProcess = require("child_process"); | ||||
|  | ||||
| const BINARY_MAP = { | ||||
|   darwin_x86: {name: "kubernetes-mcp-server-darwin-amd64", suffix: ''}, | ||||
|   darwin_arm64: {name: "kubernetes-mcp-server-darwin-arm64", suffix: ''}, | ||||
|   linux_x86: {name: "kubernetes-mcp-server-linux-amd64", suffix: ''}, | ||||
|   linux_arm64: {name: "kubernetes-mcp-server-linux-arm64", suffix: ''}, | ||||
|   win32_x86: {name: "kubernetes-mcp-server-windows-amd64", suffix: '.exe'}, | ||||
|   win32_arm64: {name: "kubernetes-mcp-server-windows-arm64", suffix: '.exe'}, | ||||
| }; | ||||
|  | ||||
| const binary = BINARY_MAP[`${process.platform}_${process.arch}`]; | ||||
|  | ||||
| module.exports.runBinary = function (...args) { | ||||
|   // Resolving will fail if the optionalDependency was not installed | ||||
|   childProcess.execFileSync(require.resolve(`${binary.name}/bin/${binary.name}+${binary.suffix}`), args, { | ||||
|     stdio: "inherit", | ||||
|   }); | ||||
| }; | ||||
| @@ -2,6 +2,10 @@ | ||||
|   "name": "kubernetes-mcp-server-darwin-amd64", | ||||
|   "version": "0.0.0", | ||||
|   "description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift", | ||||
|   "repository": { | ||||
|     "type": "git", | ||||
|     "url": "git+https://github.com/containers/kubernetes-mcp-server.git" | ||||
|   }, | ||||
|   "os": [ | ||||
|     "darwin" | ||||
|   ], | ||||
|   | ||||
| @@ -2,6 +2,10 @@ | ||||
|   "name": "kubernetes-mcp-server-darwin-arm64", | ||||
|   "version": "0.0.0", | ||||
|   "description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift", | ||||
|   "repository": { | ||||
|     "type": "git", | ||||
|     "url": "git+https://github.com/containers/kubernetes-mcp-server.git" | ||||
|   }, | ||||
|   "os": [ | ||||
|     "darwin" | ||||
|   ], | ||||
|   | ||||
| @@ -2,6 +2,10 @@ | ||||
|   "name": "kubernetes-mcp-server-linux-amd64", | ||||
|   "version": "0.0.0", | ||||
|   "description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift", | ||||
|   "repository": { | ||||
|     "type": "git", | ||||
|     "url": "git+https://github.com/containers/kubernetes-mcp-server.git" | ||||
|   }, | ||||
|   "os": [ | ||||
|     "linux" | ||||
|   ], | ||||
|   | ||||
| @@ -2,6 +2,10 @@ | ||||
|   "name": "kubernetes-mcp-server-linux-arm64", | ||||
|   "version": "0.0.0", | ||||
|   "description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift", | ||||
|   "repository": { | ||||
|     "type": "git", | ||||
|     "url": "git+https://github.com/containers/kubernetes-mcp-server.git" | ||||
|   }, | ||||
|   "os": [ | ||||
|     "linux" | ||||
|   ], | ||||
|   | ||||
| @@ -2,6 +2,10 @@ | ||||
|   "name": "kubernetes-mcp-server-windows-amd64", | ||||
|   "version": "0.0.0", | ||||
|   "description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift", | ||||
|   "repository": { | ||||
|     "type": "git", | ||||
|     "url": "git+https://github.com/containers/kubernetes-mcp-server.git" | ||||
|   }, | ||||
|   "os": [ | ||||
|     "win32" | ||||
|   ], | ||||
|   | ||||
| @@ -2,10 +2,14 @@ | ||||
|   "name": "kubernetes-mcp-server-windows-arm64", | ||||
|   "version": "0.0.0", | ||||
|   "description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift", | ||||
|   "repository": { | ||||
|     "type": "git", | ||||
|     "url": "git+https://github.com/containers/kubernetes-mcp-server.git" | ||||
|   }, | ||||
|   "os": [ | ||||
|     "arm64" | ||||
|     "win32" | ||||
|   ], | ||||
|   "cpu": [ | ||||
|     "x64" | ||||
|     "arm64" | ||||
|   ] | ||||
| } | ||||
|   | ||||
							
								
								
									
										46
									
								
								npm/kubernetes-mcp-server/bin/index.js
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										46
									
								
								npm/kubernetes-mcp-server/bin/index.js
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,46 @@ | ||||
| #!/usr/bin/env node | ||||
|  | ||||
| const childProcess = require('child_process'); | ||||
|  | ||||
| const BINARY_MAP = { | ||||
|   darwin_x64: {name: 'kubernetes-mcp-server-darwin-amd64', suffix: ''}, | ||||
|   darwin_arm64: {name: 'kubernetes-mcp-server-darwin-arm64', suffix: ''}, | ||||
|   linux_x64: {name: 'kubernetes-mcp-server-linux-amd64', suffix: ''}, | ||||
|   linux_arm64: {name: 'kubernetes-mcp-server-linux-arm64', suffix: ''}, | ||||
|   win32_x64: {name: 'kubernetes-mcp-server-windows-amd64', suffix: '.exe'}, | ||||
|   win32_arm64: {name: 'kubernetes-mcp-server-windows-arm64', suffix: '.exe'}, | ||||
| }; | ||||
|  | ||||
| // Resolving will fail if the optionalDependency was not installed or the platform/arch is not supported | ||||
| const resolveBinaryPath = () => { | ||||
|   try { | ||||
|     const binary = BINARY_MAP[`${process.platform}_${process.arch}`]; | ||||
|     return require.resolve(`${binary.name}/bin/${binary.name}${binary.suffix}`); | ||||
|   } catch (e) { | ||||
|     throw new Error(`Could not resolve binary path for platform/arch: ${process.platform}/${process.arch}`); | ||||
|   } | ||||
| }; | ||||
|  | ||||
| const child = childProcess.spawn(resolveBinaryPath(), process.argv.slice(2), { | ||||
|   stdio: 'inherit', | ||||
| }); | ||||
|  | ||||
| const handleSignal = () => (signal) => { | ||||
|   console.log(`Received ${signal}, terminating child process...`); | ||||
|   if (child && !child.killed) { | ||||
|     child.kill(signal); | ||||
|   } | ||||
| }; | ||||
|  | ||||
| ['SIGTERM', 'SIGINT', 'SIGHUP'].forEach((signal) => { | ||||
|   process.on(signal, handleSignal(signal)); | ||||
| }); | ||||
|  | ||||
| child.on('close', (code, signal) => { | ||||
|   if (signal) { | ||||
|     console.log(`Child process terminated by signal: ${signal}`); | ||||
|     process.exit(128 + (signal === 'SIGTERM' ? 15 : signal === 'SIGINT' ? 2 : 1)); | ||||
|   } else { | ||||
|     process.exit(code || 0); | ||||
|   } | ||||
| }); | ||||
| @@ -2,26 +2,27 @@ | ||||
|   "name": "kubernetes-mcp-server", | ||||
|   "version": "0.0.0", | ||||
|   "description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift", | ||||
|   "main": "bin/kubernetes-mcp-server.js", | ||||
|   "main": "./bin/index.js", | ||||
|   "bin": { | ||||
|     "kubernetes-mcp-server": "bin/kubernetes-mcp-server.js" | ||||
|     "kubernetes-mcp-server": "bin/index.js" | ||||
|   }, | ||||
|   "optionalDependencies": { | ||||
|     "kubernetes-mcp-server-darwin-amd64": "0.0.0", | ||||
|     "kubernetes-mcp-server-darwin-arm64": "0.0.0", | ||||
|     "kubernetes-mcp-server-linux-amd64": "0.0.0", | ||||
|     "kubernetes-mcp-server-linux-arm64": "0.0.0", | ||||
|     "kubernetes-mcp-server-win32-amd64": "0.0.0", | ||||
|     "kubernetes-mcp-server-win32-arm64": "0.0.0" | ||||
|     "kubernetes-mcp-server-windows-amd64": "0.0.0", | ||||
|     "kubernetes-mcp-server-windows-arm64": "0.0.0" | ||||
|   }, | ||||
|   "repository": { | ||||
|     "type": "git", | ||||
|     "url": "git+https://github.com/manusa/kubernetes-mcp-server.git" | ||||
|     "url": "git+https://github.com/containers/kubernetes-mcp-server.git" | ||||
|   }, | ||||
|   "keywords": [ | ||||
|     "mcp", | ||||
|     "kubernetes", | ||||
|     "openshift", | ||||
|     "model context protocol", | ||||
|     "model", | ||||
|     "context", | ||||
|     "protocol" | ||||
| @@ -32,7 +33,7 @@ | ||||
|   }, | ||||
|   "license": "Apache-2.0", | ||||
|   "bugs": { | ||||
|     "url": "https://github.com/manusa/kubernetes-mcp-server/issues" | ||||
|     "url": "https://github.com/containers/kubernetes-mcp-server/issues" | ||||
|   }, | ||||
|   "homepage": "https://github.com/manusa/kubernetes-mcp-server#readme" | ||||
|   "homepage": "https://github.com/containers/kubernetes-mcp-server#readme" | ||||
| } | ||||
							
								
								
									
										24
									
								
								npm/package-lock.json
									
									
									
										generated
									
									
									
								
							
							
						
						
									
										24
									
								
								npm/package-lock.json
									
									
									
										generated
									
									
									
								
							| @@ -1,24 +0,0 @@ | ||||
| { | ||||
|   "name": "kubernetes-mcp-server", | ||||
|   "version": "0.0.0", | ||||
|   "lockfileVersion": 3, | ||||
|   "requires": true, | ||||
|   "packages": { | ||||
|     "": { | ||||
|       "name": "kubernetes-mcp-server", | ||||
|       "version": "0.0.0", | ||||
|       "license": "Apache-2.0", | ||||
|       "bin": { | ||||
|         "kubernetes-mcp-server": "bin/kubernetes-mcp-server.js" | ||||
|       }, | ||||
|       "optionalDependencies": { | ||||
|         "kubernetes-mcp-server-darwin-amd64": "0.0.0", | ||||
|         "kubernetes-mcp-server-darwin-arm64": "0.0.0", | ||||
|         "kubernetes-mcp-server-linux-amd64": "0.0.0", | ||||
|         "kubernetes-mcp-server-linux-arm64": "0.0.0", | ||||
|         "kubernetes-mcp-server-win32-amd64": "0.0.0", | ||||
|         "kubernetes-mcp-server-win32-arm64": "0.0.0" | ||||
|       } | ||||
|     } | ||||
|   } | ||||
| } | ||||
							
								
								
									
										120
									
								
								pkg/api/toolsets.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										120
									
								
								pkg/api/toolsets.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,120 @@ | ||||
| package api | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"encoding/json" | ||||
|  | ||||
| 	internalk8s "github.com/containers/kubernetes-mcp-server/pkg/kubernetes" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/output" | ||||
| 	"github.com/google/jsonschema-go/jsonschema" | ||||
| ) | ||||
|  | ||||
| type ServerTool struct { | ||||
| 	Tool               Tool | ||||
| 	Handler            ToolHandlerFunc | ||||
| 	ClusterAware       *bool | ||||
| 	TargetListProvider *bool | ||||
| } | ||||
|  | ||||
| // IsClusterAware indicates whether the tool can accept a "cluster" or "context" parameter | ||||
| // to operate on a specific Kubernetes cluster context. | ||||
| // Defaults to true if not explicitly set | ||||
| func (s *ServerTool) IsClusterAware() bool { | ||||
| 	if s.ClusterAware != nil { | ||||
| 		return *s.ClusterAware | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // IsTargetListProvider indicates whether the tool is used to provide a list of targets (clusters/contexts) | ||||
| // Defaults to false if not explicitly set | ||||
| func (s *ServerTool) IsTargetListProvider() bool { | ||||
| 	if s.TargetListProvider != nil { | ||||
| 		return *s.TargetListProvider | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| type Toolset interface { | ||||
| 	// GetName returns the name of the toolset. | ||||
| 	// Used to identify the toolset in configuration, logs, and command-line arguments. | ||||
| 	// Examples: "core", "metrics", "helm" | ||||
| 	GetName() string | ||||
| 	GetDescription() string | ||||
| 	GetTools(o internalk8s.Openshift) []ServerTool | ||||
| } | ||||
|  | ||||
| type ToolCallRequest interface { | ||||
| 	GetArguments() map[string]any | ||||
| } | ||||
|  | ||||
| type ToolCallResult struct { | ||||
| 	// Raw content returned by the tool. | ||||
| 	Content string | ||||
| 	// Error (non-protocol) to send back to the LLM. | ||||
| 	Error error | ||||
| } | ||||
|  | ||||
| func NewToolCallResult(content string, err error) *ToolCallResult { | ||||
| 	return &ToolCallResult{ | ||||
| 		Content: content, | ||||
| 		Error:   err, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type ToolHandlerParams struct { | ||||
| 	context.Context | ||||
| 	*internalk8s.Kubernetes | ||||
| 	ToolCallRequest | ||||
| 	ListOutput output.Output | ||||
| } | ||||
|  | ||||
| type ToolHandlerFunc func(params ToolHandlerParams) (*ToolCallResult, error) | ||||
|  | ||||
| type Tool struct { | ||||
| 	// The name of the tool. | ||||
| 	// Intended for programmatic or logical use, but used as a display name in past | ||||
| 	// specs or fallback (if title isn't present). | ||||
| 	Name string `json:"name"` | ||||
| 	// A human-readable description of the tool. | ||||
| 	// | ||||
| 	// This can be used by clients to improve the LLM's understanding of available | ||||
| 	// tools. It can be thought of like a "hint" to the model. | ||||
| 	Description string `json:"description,omitempty"` | ||||
| 	// Additional tool information. | ||||
| 	Annotations ToolAnnotations `json:"annotations"` | ||||
| 	// A JSON Schema object defining the expected parameters for the tool. | ||||
| 	InputSchema *jsonschema.Schema | ||||
| } | ||||
|  | ||||
| type ToolAnnotations struct { | ||||
| 	// Human-readable title for the tool | ||||
| 	Title string `json:"title,omitempty"` | ||||
| 	// If true, the tool does not modify its environment. | ||||
| 	ReadOnlyHint *bool `json:"readOnlyHint,omitempty"` | ||||
| 	// If true, the tool may perform destructive updates to its environment. If | ||||
| 	// false, the tool performs only additive updates. | ||||
| 	// | ||||
| 	// (This property is meaningful only when ReadOnlyHint == false.) | ||||
| 	DestructiveHint *bool `json:"destructiveHint,omitempty"` | ||||
| 	// If true, calling the tool repeatedly with the same arguments will have no | ||||
| 	// additional effect on its environment. | ||||
| 	// | ||||
| 	// (This property is meaningful only when ReadOnlyHint == false.) | ||||
| 	IdempotentHint *bool `json:"idempotentHint,omitempty"` | ||||
| 	// If true, this tool may interact with an "open world" of external entities. If | ||||
| 	// false, the tool's domain of interaction is closed. For example, the world of | ||||
| 	// a web search tool is open, whereas that of a memory tool is not. | ||||
| 	OpenWorldHint *bool `json:"openWorldHint,omitempty"` | ||||
| } | ||||
|  | ||||
| func ToRawMessage(v any) json.RawMessage { | ||||
| 	if v == nil { | ||||
| 		return nil | ||||
| 	} | ||||
| 	b, err := json.Marshal(v) | ||||
| 	if err != nil { | ||||
| 		return nil | ||||
| 	} | ||||
| 	return b | ||||
| } | ||||
							
								
								
									
										47
									
								
								pkg/api/toolsets_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										47
									
								
								pkg/api/toolsets_test.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,47 @@ | ||||
| package api | ||||
|  | ||||
| import ( | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/stretchr/testify/suite" | ||||
| 	"k8s.io/utils/ptr" | ||||
| ) | ||||
|  | ||||
| type ToolsetsSuite struct { | ||||
| 	suite.Suite | ||||
| } | ||||
|  | ||||
| func (s *ToolsetsSuite) TestServerTool() { | ||||
| 	s.Run("IsClusterAware", func() { | ||||
| 		s.Run("defaults to true", func() { | ||||
| 			tool := &ServerTool{} | ||||
| 			s.True(tool.IsClusterAware(), "Expected IsClusterAware to be true by default") | ||||
| 		}) | ||||
| 		s.Run("can be set to false", func() { | ||||
| 			tool := &ServerTool{ClusterAware: ptr.To(false)} | ||||
| 			s.False(tool.IsClusterAware(), "Expected IsClusterAware to be false when set to false") | ||||
| 		}) | ||||
| 		s.Run("can be set to true", func() { | ||||
| 			tool := &ServerTool{ClusterAware: ptr.To(true)} | ||||
| 			s.True(tool.IsClusterAware(), "Expected IsClusterAware to be true when set to true") | ||||
| 		}) | ||||
| 	}) | ||||
| 	s.Run("IsTargetListProvider", func() { | ||||
| 		s.Run("defaults to false", func() { | ||||
| 			tool := &ServerTool{} | ||||
| 			s.False(tool.IsTargetListProvider(), "Expected IsTargetListProvider to be false by default") | ||||
| 		}) | ||||
| 		s.Run("can be set to false", func() { | ||||
| 			tool := &ServerTool{TargetListProvider: ptr.To(false)} | ||||
| 			s.False(tool.IsTargetListProvider(), "Expected IsTargetListProvider to be false when set to false") | ||||
| 		}) | ||||
| 		s.Run("can be set to true", func() { | ||||
| 			tool := &ServerTool{TargetListProvider: ptr.To(true)} | ||||
| 			s.True(tool.IsTargetListProvider(), "Expected IsTargetListProvider to be true when set to true") | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestToolsets(t *testing.T) { | ||||
| 	suite.Run(t, new(ToolsetsSuite)) | ||||
| } | ||||
							
								
								
									
										140
									
								
								pkg/config/config.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										140
									
								
								pkg/config/config.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,140 @@ | ||||
| package config | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"os" | ||||
|  | ||||
| 	"github.com/BurntSushi/toml" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	ClusterProviderKubeConfig = "kubeconfig" | ||||
| 	ClusterProviderInCluster  = "in-cluster" | ||||
| 	ClusterProviderDisabled   = "disabled" | ||||
| ) | ||||
|  | ||||
| // StaticConfig is the configuration for the server. | ||||
| // It allows to configure server specific settings and tools to be enabled or disabled. | ||||
| type StaticConfig struct { | ||||
| 	DeniedResources []GroupVersionKind `toml:"denied_resources"` | ||||
|  | ||||
| 	LogLevel   int    `toml:"log_level,omitempty"` | ||||
| 	Port       string `toml:"port,omitempty"` | ||||
| 	SSEBaseURL string `toml:"sse_base_url,omitempty"` | ||||
| 	KubeConfig string `toml:"kubeconfig,omitempty"` | ||||
| 	ListOutput string `toml:"list_output,omitempty"` | ||||
| 	// When true, expose only tools annotated with readOnlyHint=true | ||||
| 	ReadOnly bool `toml:"read_only,omitempty"` | ||||
| 	// When true, disable tools annotated with destructiveHint=true | ||||
| 	DisableDestructive bool     `toml:"disable_destructive,omitempty"` | ||||
| 	Toolsets           []string `toml:"toolsets,omitempty"` | ||||
| 	EnabledTools       []string `toml:"enabled_tools,omitempty"` | ||||
| 	DisabledTools      []string `toml:"disabled_tools,omitempty"` | ||||
|  | ||||
| 	// Authorization-related fields | ||||
| 	// RequireOAuth indicates whether the server requires OAuth for authentication. | ||||
| 	RequireOAuth bool `toml:"require_oauth,omitempty"` | ||||
| 	// OAuthAudience is the valid audience for the OAuth tokens, used for offline JWT claim validation. | ||||
| 	OAuthAudience string `toml:"oauth_audience,omitempty"` | ||||
| 	// ValidateToken indicates whether the server should validate the token against the Kubernetes API Server using TokenReview. | ||||
| 	ValidateToken bool `toml:"validate_token,omitempty"` | ||||
| 	// AuthorizationURL is the URL of the OIDC authorization server. | ||||
| 	// It is used for token validation and for STS token exchange. | ||||
| 	AuthorizationURL string `toml:"authorization_url,omitempty"` | ||||
| 	// DisableDynamicClientRegistration indicates whether dynamic client registration is disabled. | ||||
| 	// If true, the .well-known endpoints will not expose the registration endpoint. | ||||
| 	DisableDynamicClientRegistration bool `toml:"disable_dynamic_client_registration,omitempty"` | ||||
| 	// OAuthScopes are the supported **client** scopes requested during the **client/frontend** OAuth flow. | ||||
| 	OAuthScopes []string `toml:"oauth_scopes,omitempty"` | ||||
| 	// StsClientId is the OAuth client ID used for backend token exchange | ||||
| 	StsClientId string `toml:"sts_client_id,omitempty"` | ||||
| 	// StsClientSecret is the OAuth client secret used for backend token exchange | ||||
| 	StsClientSecret string `toml:"sts_client_secret,omitempty"` | ||||
| 	// StsAudience is the audience for the STS token exchange. | ||||
| 	StsAudience string `toml:"sts_audience,omitempty"` | ||||
| 	// StsScopes is the scopes for the STS token exchange. | ||||
| 	StsScopes            []string `toml:"sts_scopes,omitempty"` | ||||
| 	CertificateAuthority string   `toml:"certificate_authority,omitempty"` | ||||
| 	ServerURL            string   `toml:"server_url,omitempty"` | ||||
| 	// ClusterProviderStrategy is how the server finds clusters. | ||||
| 	// If set to "kubeconfig", the clusters will be loaded from those in the kubeconfig. | ||||
| 	// If set to "in-cluster", the server will use the in cluster config | ||||
| 	ClusterProviderStrategy string `toml:"cluster_provider_strategy,omitempty"` | ||||
|  | ||||
| 	// ClusterProvider-specific configurations | ||||
| 	// This map holds raw TOML primitives that will be parsed by registered provider parsers | ||||
| 	ClusterProviderConfigs map[string]toml.Primitive `toml:"cluster_provider_configs,omitempty"` | ||||
|  | ||||
| 	// Internal: parsed provider configs (not exposed to TOML package) | ||||
| 	parsedClusterProviderConfigs map[string]ProviderConfig | ||||
| } | ||||
|  | ||||
| func Default() *StaticConfig { | ||||
| 	return &StaticConfig{ | ||||
| 		ListOutput: "table", | ||||
| 		Toolsets:   []string{"core", "config", "helm"}, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type GroupVersionKind struct { | ||||
| 	Group   string `toml:"group"` | ||||
| 	Version string `toml:"version"` | ||||
| 	Kind    string `toml:"kind,omitempty"` | ||||
| } | ||||
|  | ||||
| // Read reads the toml file and returns the StaticConfig. | ||||
| func Read(configPath string) (*StaticConfig, error) { | ||||
| 	configData, err := os.ReadFile(configPath) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return ReadToml(configData) | ||||
| } | ||||
|  | ||||
| // ReadToml reads the toml data and returns the StaticConfig. | ||||
| func ReadToml(configData []byte) (*StaticConfig, error) { | ||||
| 	config := Default() | ||||
| 	md, err := toml.NewDecoder(bytes.NewReader(configData)).Decode(config) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	if err := config.parseClusterProviderConfigs(md); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return config, nil | ||||
| } | ||||
|  | ||||
| func (c *StaticConfig) GetProviderConfig(strategy string) (ProviderConfig, bool) { | ||||
| 	config, ok := c.parsedClusterProviderConfigs[strategy] | ||||
|  | ||||
| 	return config, ok | ||||
| } | ||||
|  | ||||
| func (c *StaticConfig) parseClusterProviderConfigs(md toml.MetaData) error { | ||||
| 	if c.parsedClusterProviderConfigs == nil { | ||||
| 		c.parsedClusterProviderConfigs = make(map[string]ProviderConfig, len(c.ClusterProviderConfigs)) | ||||
| 	} | ||||
|  | ||||
| 	for strategy, primitive := range c.ClusterProviderConfigs { | ||||
| 		parser, ok := getProviderConfigParser(strategy) | ||||
| 		if !ok { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		providerConfig, err := parser(primitive, md) | ||||
| 		if err != nil { | ||||
| 			return fmt.Errorf("failed to parse config for ClusterProvider '%s': %w", strategy, err) | ||||
| 		} | ||||
|  | ||||
| 		if err := providerConfig.Validate(); err != nil { | ||||
| 			return fmt.Errorf("invalid config file for ClusterProvider '%s': %w", strategy, err) | ||||
| 		} | ||||
|  | ||||
| 		c.parsedClusterProviderConfigs[strategy] = providerConfig | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
							
								
								
									
										179
									
								
								pkg/config/config_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										179
									
								
								pkg/config/config_test.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,179 @@ | ||||
| package config | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"io/fs" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"strings" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/stretchr/testify/suite" | ||||
| ) | ||||
|  | ||||
| type BaseConfigSuite struct { | ||||
| 	suite.Suite | ||||
| } | ||||
|  | ||||
| func (s *BaseConfigSuite) writeConfig(content string) string { | ||||
| 	s.T().Helper() | ||||
| 	tempDir := s.T().TempDir() | ||||
| 	path := filepath.Join(tempDir, "config.toml") | ||||
| 	err := os.WriteFile(path, []byte(content), 0644) | ||||
| 	if err != nil { | ||||
| 		s.T().Fatalf("Failed to write config file %s: %v", path, err) | ||||
| 	} | ||||
| 	return path | ||||
| } | ||||
|  | ||||
| type ConfigSuite struct { | ||||
| 	BaseConfigSuite | ||||
| } | ||||
|  | ||||
| func (s *ConfigSuite) TestReadConfigMissingFile() { | ||||
| 	config, err := Read("non-existent-config.toml") | ||||
| 	s.Run("returns error for missing file", func() { | ||||
| 		s.Require().NotNil(err, "Expected error for missing file, got nil") | ||||
| 		s.True(errors.Is(err, fs.ErrNotExist), "Expected ErrNotExist, got %v", err) | ||||
| 	}) | ||||
| 	s.Run("returns nil config for missing file", func() { | ||||
| 		s.Nil(config, "Expected nil config for missing file") | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *ConfigSuite) TestReadConfigInvalid() { | ||||
| 	invalidConfigPath := s.writeConfig(` | ||||
| 		[[denied_resources]] | ||||
| 		group = "apps" | ||||
| 		version = "v1" | ||||
| 		kind = "Deployment" | ||||
| 		[[denied_resources]] | ||||
| 		group = "rbac.authorization.k8s.io" | ||||
| 		version = "v1" | ||||
| 		kind = "Role | ||||
| 	`) | ||||
|  | ||||
| 	config, err := Read(invalidConfigPath) | ||||
| 	s.Run("returns error for invalid file", func() { | ||||
| 		s.Require().NotNil(err, "Expected error for invalid file, got nil") | ||||
| 	}) | ||||
| 	s.Run("error message contains toml error with line number", func() { | ||||
| 		expectedError := "toml: line 9" | ||||
| 		s.Truef(strings.HasPrefix(err.Error(), expectedError), "Expected error message to contain line number, got %v", err) | ||||
| 	}) | ||||
| 	s.Run("returns nil config for invalid file", func() { | ||||
| 		s.Nil(config, "Expected nil config for missing file") | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *ConfigSuite) TestReadConfigValid() { | ||||
| 	validConfigPath := s.writeConfig(` | ||||
| 		log_level = 1 | ||||
| 		port = "9999" | ||||
| 		sse_base_url = "https://example.com" | ||||
| 		kubeconfig = "./path/to/config" | ||||
| 		list_output = "yaml" | ||||
| 		read_only = true | ||||
| 		disable_destructive = true | ||||
|  | ||||
| 		toolsets = ["core", "config", "helm", "metrics"] | ||||
| 		 | ||||
| 		enabled_tools = ["configuration_view", "events_list", "namespaces_list", "pods_list", "resources_list", "resources_get", "resources_create_or_update", "resources_delete"] | ||||
| 		disabled_tools = ["pods_delete", "pods_top", "pods_log", "pods_run", "pods_exec"] | ||||
|  | ||||
| 		denied_resources = [ | ||||
| 			{group = "apps", version = "v1", kind = "Deployment"}, | ||||
| 			{group = "rbac.authorization.k8s.io", version = "v1", kind = "Role"} | ||||
| 		] | ||||
| 		 | ||||
| 	`) | ||||
|  | ||||
| 	config, err := Read(validConfigPath) | ||||
| 	s.Require().NotNil(config) | ||||
| 	s.Run("reads and unmarshalls file", func() { | ||||
| 		s.Nil(err, "Expected nil error for valid file") | ||||
| 		s.Require().NotNil(config, "Expected non-nil config for valid file") | ||||
| 	}) | ||||
| 	s.Run("log_level parsed correctly", func() { | ||||
| 		s.Equalf(1, config.LogLevel, "Expected LogLevel to be 1, got %d", config.LogLevel) | ||||
| 	}) | ||||
| 	s.Run("port parsed correctly", func() { | ||||
| 		s.Equalf("9999", config.Port, "Expected Port to be 9999, got %s", config.Port) | ||||
| 	}) | ||||
| 	s.Run("sse_base_url parsed correctly", func() { | ||||
| 		s.Equalf("https://example.com", config.SSEBaseURL, "Expected SSEBaseURL to be https://example.com, got %s", config.SSEBaseURL) | ||||
| 	}) | ||||
| 	s.Run("kubeconfig parsed correctly", func() { | ||||
| 		s.Equalf("./path/to/config", config.KubeConfig, "Expected KubeConfig to be ./path/to/config, got %s", config.KubeConfig) | ||||
| 	}) | ||||
| 	s.Run("list_output parsed correctly", func() { | ||||
| 		s.Equalf("yaml", config.ListOutput, "Expected ListOutput to be yaml, got %s", config.ListOutput) | ||||
| 	}) | ||||
| 	s.Run("read_only parsed correctly", func() { | ||||
| 		s.Truef(config.ReadOnly, "Expected ReadOnly to be true, got %v", config.ReadOnly) | ||||
| 	}) | ||||
| 	s.Run("disable_destructive parsed correctly", func() { | ||||
| 		s.Truef(config.DisableDestructive, "Expected DisableDestructive to be true, got %v", config.DisableDestructive) | ||||
| 	}) | ||||
| 	s.Run("toolsets", func() { | ||||
| 		s.Require().Lenf(config.Toolsets, 4, "Expected 4 toolsets, got %d", len(config.Toolsets)) | ||||
| 		for _, toolset := range []string{"core", "config", "helm", "metrics"} { | ||||
| 			s.Containsf(config.Toolsets, toolset, "Expected toolsets to contain %s", toolset) | ||||
| 		} | ||||
| 	}) | ||||
| 	s.Run("enabled_tools", func() { | ||||
| 		s.Require().Lenf(config.EnabledTools, 8, "Expected 8 enabled tools, got %d", len(config.EnabledTools)) | ||||
| 		for _, tool := range []string{"configuration_view", "events_list", "namespaces_list", "pods_list", "resources_list", "resources_get", "resources_create_or_update", "resources_delete"} { | ||||
| 			s.Containsf(config.EnabledTools, tool, "Expected enabled tools to contain %s", tool) | ||||
| 		} | ||||
| 	}) | ||||
| 	s.Run("disabled_tools", func() { | ||||
| 		s.Require().Lenf(config.DisabledTools, 5, "Expected 5 disabled tools, got %d", len(config.DisabledTools)) | ||||
| 		for _, tool := range []string{"pods_delete", "pods_top", "pods_log", "pods_run", "pods_exec"} { | ||||
| 			s.Containsf(config.DisabledTools, tool, "Expected disabled tools to contain %s", tool) | ||||
| 		} | ||||
| 	}) | ||||
| 	s.Run("denied_resources", func() { | ||||
| 		s.Require().Lenf(config.DeniedResources, 2, "Expected 2 denied resources, got %d", len(config.DeniedResources)) | ||||
| 		s.Run("contains apps/v1/Deployment", func() { | ||||
| 			s.Contains(config.DeniedResources, GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"}, | ||||
| 				"Expected denied resources to contain apps/v1/Deployment") | ||||
| 		}) | ||||
| 		s.Run("contains rbac.authorization.k8s.io/v1/Role", func() { | ||||
| 			s.Contains(config.DeniedResources, GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "Role"}, | ||||
| 				"Expected denied resources to contain rbac.authorization.k8s.io/v1/Role") | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *ConfigSuite) TestReadConfigValidPreservesDefaultsForMissingFields() { | ||||
| 	validConfigPath := s.writeConfig(` | ||||
| 		port = "1337" | ||||
| 	`) | ||||
|  | ||||
| 	config, err := Read(validConfigPath) | ||||
| 	s.Require().NotNil(config) | ||||
| 	s.Run("reads and unmarshalls file", func() { | ||||
| 		s.Nil(err, "Expected nil error for valid file") | ||||
| 		s.Require().NotNil(config, "Expected non-nil config for valid file") | ||||
| 	}) | ||||
| 	s.Run("log_level defaulted correctly", func() { | ||||
| 		s.Equalf(0, config.LogLevel, "Expected LogLevel to be 0, got %d", config.LogLevel) | ||||
| 	}) | ||||
| 	s.Run("port parsed correctly", func() { | ||||
| 		s.Equalf("1337", config.Port, "Expected Port to be 9999, got %s", config.Port) | ||||
| 	}) | ||||
| 	s.Run("list_output defaulted correctly", func() { | ||||
| 		s.Equalf("table", config.ListOutput, "Expected ListOutput to be table, got %s", config.ListOutput) | ||||
| 	}) | ||||
| 	s.Run("toolsets defaulted correctly", func() { | ||||
| 		s.Require().Lenf(config.Toolsets, 3, "Expected 3 toolsets, got %d", len(config.Toolsets)) | ||||
| 		for _, toolset := range []string{"core", "config", "helm"} { | ||||
| 			s.Containsf(config.Toolsets, toolset, "Expected toolsets to contain %s", toolset) | ||||
| 		} | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestConfig(t *testing.T) { | ||||
| 	suite.Run(t, new(ConfigSuite)) | ||||
| } | ||||
							
								
								
									
										33
									
								
								pkg/config/provider_config.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										33
									
								
								pkg/config/provider_config.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,33 @@ | ||||
| package config | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
|  | ||||
| 	"github.com/BurntSushi/toml" | ||||
| ) | ||||
|  | ||||
| // ProviderConfig is the interface that all provider-specific configurations must implement. | ||||
| // Each provider registers a factory function to parse its config from TOML primitives | ||||
| type ProviderConfig interface { | ||||
| 	Validate() error | ||||
| } | ||||
|  | ||||
| type ProviderConfigParser func(primitive toml.Primitive, md toml.MetaData) (ProviderConfig, error) | ||||
|  | ||||
| var ( | ||||
| 	providerConfigParsers = make(map[string]ProviderConfigParser) | ||||
| ) | ||||
|  | ||||
| func RegisterProviderConfig(strategy string, parser ProviderConfigParser) { | ||||
| 	if _, exists := providerConfigParsers[strategy]; exists { | ||||
| 		panic(fmt.Sprintf("provider config parser already registered for strategy '%s'", strategy)) | ||||
| 	} | ||||
|  | ||||
| 	providerConfigParsers[strategy] = parser | ||||
| } | ||||
|  | ||||
| func getProviderConfigParser(strategy string) (ProviderConfigParser, bool) { | ||||
| 	provider, ok := providerConfigParsers[strategy] | ||||
|  | ||||
| 	return provider, ok | ||||
| } | ||||
							
								
								
									
										157
									
								
								pkg/config/provider_config_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										157
									
								
								pkg/config/provider_config_test.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,157 @@ | ||||
| package config | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/BurntSushi/toml" | ||||
| 	"github.com/stretchr/testify/suite" | ||||
| ) | ||||
|  | ||||
| type ProviderConfigSuite struct { | ||||
| 	BaseConfigSuite | ||||
| 	originalProviderConfigParsers map[string]ProviderConfigParser | ||||
| } | ||||
|  | ||||
| func (s *ProviderConfigSuite) SetupTest() { | ||||
| 	s.originalProviderConfigParsers = make(map[string]ProviderConfigParser) | ||||
| 	for k, v := range providerConfigParsers { | ||||
| 		s.originalProviderConfigParsers[k] = v | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (s *ProviderConfigSuite) TearDownTest() { | ||||
| 	providerConfigParsers = make(map[string]ProviderConfigParser) | ||||
| 	for k, v := range s.originalProviderConfigParsers { | ||||
| 		providerConfigParsers[k] = v | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type ProviderConfigForTest struct { | ||||
| 	BoolProp bool   `toml:"bool_prop"` | ||||
| 	StrProp  string `toml:"str_prop"` | ||||
| 	IntProp  int    `toml:"int_prop"` | ||||
| } | ||||
|  | ||||
| var _ ProviderConfig = (*ProviderConfigForTest)(nil) | ||||
|  | ||||
| func (p *ProviderConfigForTest) Validate() error { | ||||
| 	if p.StrProp == "force-error" { | ||||
| 		return errors.New("validation error forced by test") | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func providerConfigForTestParser(primitive toml.Primitive, md toml.MetaData) (ProviderConfig, error) { | ||||
| 	var providerConfigForTest ProviderConfigForTest | ||||
| 	if err := md.PrimitiveDecode(primitive, &providerConfigForTest); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return &providerConfigForTest, nil | ||||
| } | ||||
|  | ||||
| func (s *ProviderConfigSuite) TestRegisterProviderConfig() { | ||||
| 	s.Run("panics when registering duplicate provider config parser", func() { | ||||
| 		s.Panics(func() { | ||||
| 			RegisterProviderConfig("test", providerConfigForTestParser) | ||||
| 			RegisterProviderConfig("test", providerConfigForTestParser) | ||||
| 		}, "Expected panic when registering duplicate provider config parser") | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *ProviderConfigSuite) TestReadConfigValid() { | ||||
| 	RegisterProviderConfig("test", providerConfigForTestParser) | ||||
| 	validConfigPath := s.writeConfig(` | ||||
| 		cluster_provider_strategy = "test" | ||||
| 		[cluster_provider_configs.test] | ||||
| 		bool_prop = true | ||||
| 		str_prop = "a string" | ||||
| 		int_prop = 42 | ||||
| 	`) | ||||
|  | ||||
| 	config, err := Read(validConfigPath) | ||||
| 	s.Run("returns no error for valid file with registered provider config", func() { | ||||
| 		s.Require().NoError(err, "Expected no error for valid file, got %v", err) | ||||
| 	}) | ||||
| 	s.Run("returns config for valid file with registered provider config", func() { | ||||
| 		s.Require().NotNil(config, "Expected non-nil config for valid file") | ||||
| 	}) | ||||
| 	s.Run("parses provider config correctly", func() { | ||||
| 		providerConfig, ok := config.GetProviderConfig("test") | ||||
| 		s.Require().True(ok, "Expected to find provider config for strategy 'test'") | ||||
| 		s.Require().NotNil(providerConfig, "Expected non-nil provider config for strategy 'test'") | ||||
| 		testProviderConfig, ok := providerConfig.(*ProviderConfigForTest) | ||||
| 		s.Require().True(ok, "Expected provider config to be of type *ProviderConfigForTest") | ||||
| 		s.Equal(true, testProviderConfig.BoolProp, "Expected BoolProp to be true") | ||||
| 		s.Equal("a string", testProviderConfig.StrProp, "Expected StrProp to be 'a string'") | ||||
| 		s.Equal(42, testProviderConfig.IntProp, "Expected IntProp to be 42") | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *ProviderConfigSuite) TestReadConfigInvalidProviderConfig() { | ||||
| 	RegisterProviderConfig("test", providerConfigForTestParser) | ||||
| 	invalidConfigPath := s.writeConfig(` | ||||
| 		cluster_provider_strategy = "test" | ||||
| 		[cluster_provider_configs.test] | ||||
| 		bool_prop = true | ||||
| 		str_prop = "force-error" | ||||
| 		int_prop = 42 | ||||
| 	`) | ||||
|  | ||||
| 	config, err := Read(invalidConfigPath) | ||||
| 	s.Run("returns error for invalid provider config", func() { | ||||
| 		s.Require().NotNil(err, "Expected error for invalid provider config, got nil") | ||||
| 		s.ErrorContains(err, "validation error forced by test", "Expected validation error from provider config") | ||||
| 	}) | ||||
| 	s.Run("returns nil config for invalid provider config", func() { | ||||
| 		s.Nil(config, "Expected nil config for invalid provider config") | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *ProviderConfigSuite) TestReadConfigUnregisteredProviderConfig() { | ||||
| 	invalidConfigPath := s.writeConfig(` | ||||
| 		cluster_provider_strategy = "unregistered" | ||||
| 		[cluster_provider_configs.unregistered] | ||||
| 		bool_prop = true | ||||
| 		str_prop = "a string" | ||||
| 		int_prop = 42 | ||||
| 	`) | ||||
|  | ||||
| 	config, err := Read(invalidConfigPath) | ||||
| 	s.Run("returns no error for unregistered provider config", func() { | ||||
| 		s.Require().NoError(err, "Expected no error for unregistered provider config, got %v", err) | ||||
| 	}) | ||||
| 	s.Run("returns config for unregistered provider config", func() { | ||||
| 		s.Require().NotNil(config, "Expected non-nil config for unregistered provider config") | ||||
| 	}) | ||||
| 	s.Run("does not parse unregistered provider config", func() { | ||||
| 		_, ok := config.GetProviderConfig("unregistered") | ||||
| 		s.Require().False(ok, "Expected no provider config for unregistered strategy") | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *ProviderConfigSuite) TestReadConfigParserError() { | ||||
| 	RegisterProviderConfig("test", func(primitive toml.Primitive, md toml.MetaData) (ProviderConfig, error) { | ||||
| 		return nil, errors.New("parser error forced by test") | ||||
| 	}) | ||||
| 	invalidConfigPath := s.writeConfig(` | ||||
| 		cluster_provider_strategy = "test" | ||||
| 		[cluster_provider_configs.test] | ||||
| 		bool_prop = true | ||||
| 		str_prop = "a string" | ||||
| 		int_prop = 42 | ||||
| 	`) | ||||
|  | ||||
| 	config, err := Read(invalidConfigPath) | ||||
| 	s.Run("returns error for provider config parser error", func() { | ||||
| 		s.Require().NotNil(err, "Expected error for provider config parser error, got nil") | ||||
| 		s.ErrorContains(err, "parser error forced by test", "Expected parser error from provider config") | ||||
| 	}) | ||||
| 	s.Run("returns nil config for provider config parser error", func() { | ||||
| 		s.Nil(config, "Expected nil config for provider config parser error") | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestProviderConfig(t *testing.T) { | ||||
| 	suite.Run(t, new(ProviderConfigSuite)) | ||||
| } | ||||
							
								
								
									
										142
									
								
								pkg/helm/helm.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										142
									
								
								pkg/helm/helm.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,142 @@ | ||||
| package helm | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"helm.sh/helm/v3/pkg/action" | ||||
| 	"helm.sh/helm/v3/pkg/chart/loader" | ||||
| 	"helm.sh/helm/v3/pkg/cli" | ||||
| 	"helm.sh/helm/v3/pkg/registry" | ||||
| 	"helm.sh/helm/v3/pkg/release" | ||||
| 	"k8s.io/cli-runtime/pkg/genericclioptions" | ||||
| 	"log" | ||||
| 	"sigs.k8s.io/yaml" | ||||
| 	"time" | ||||
| ) | ||||
|  | ||||
| type Kubernetes interface { | ||||
| 	genericclioptions.RESTClientGetter | ||||
| 	NamespaceOrDefault(namespace string) string | ||||
| } | ||||
|  | ||||
| type Helm struct { | ||||
| 	kubernetes Kubernetes | ||||
| } | ||||
|  | ||||
| // NewHelm creates a new Helm instance | ||||
| func NewHelm(kubernetes Kubernetes) *Helm { | ||||
| 	return &Helm{kubernetes: kubernetes} | ||||
| } | ||||
|  | ||||
| func (h *Helm) Install(ctx context.Context, chart string, values map[string]interface{}, name string, namespace string) (string, error) { | ||||
| 	cfg, err := h.newAction(h.kubernetes.NamespaceOrDefault(namespace), false) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	install := action.NewInstall(cfg) | ||||
| 	if name == "" { | ||||
| 		install.GenerateName = true | ||||
| 		install.ReleaseName, _, _ = install.NameAndChart([]string{chart}) | ||||
| 	} else { | ||||
| 		install.ReleaseName = name | ||||
| 	} | ||||
| 	install.Namespace = h.kubernetes.NamespaceOrDefault(namespace) | ||||
| 	install.Wait = true | ||||
| 	install.Timeout = 5 * time.Minute | ||||
| 	install.DryRun = false | ||||
|  | ||||
| 	chartRequested, err := install.LocateChart(chart, cli.New()) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	chartLoaded, err := loader.Load(chartRequested) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
|  | ||||
| 	installedRelease, err := install.RunWithContext(ctx, chartLoaded, values) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	ret, err := yaml.Marshal(simplify(installedRelease)) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	return string(ret), nil | ||||
| } | ||||
|  | ||||
| // List lists all the releases for the specified namespace (or current namespace if). Or allNamespaces is true, it lists all releases across all namespaces. | ||||
| func (h *Helm) List(namespace string, allNamespaces bool) (string, error) { | ||||
| 	cfg, err := h.newAction(namespace, allNamespaces) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	list := action.NewList(cfg) | ||||
| 	list.AllNamespaces = allNamespaces | ||||
| 	releases, err := list.Run() | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} else if len(releases) == 0 { | ||||
| 		return "No Helm releases found", nil | ||||
| 	} | ||||
| 	ret, err := yaml.Marshal(simplify(releases...)) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	return string(ret), nil | ||||
| } | ||||
|  | ||||
| func (h *Helm) Uninstall(name string, namespace string) (string, error) { | ||||
| 	cfg, err := h.newAction(h.kubernetes.NamespaceOrDefault(namespace), false) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	uninstall := action.NewUninstall(cfg) | ||||
| 	uninstall.IgnoreNotFound = true | ||||
| 	uninstall.Wait = true | ||||
| 	uninstall.Timeout = 5 * time.Minute | ||||
| 	uninstalledRelease, err := uninstall.Run(name) | ||||
| 	if uninstalledRelease == nil && err == nil { | ||||
| 		return fmt.Sprintf("Release %s not found", name), nil | ||||
| 	} else if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	return fmt.Sprintf("Uninstalled release %s %s", uninstalledRelease.Release.Name, uninstalledRelease.Info), nil | ||||
| } | ||||
|  | ||||
| func (h *Helm) newAction(namespace string, allNamespaces bool) (*action.Configuration, error) { | ||||
| 	cfg := new(action.Configuration) | ||||
| 	applicableNamespace := "" | ||||
| 	if !allNamespaces { | ||||
| 		applicableNamespace = h.kubernetes.NamespaceOrDefault(namespace) | ||||
| 	} | ||||
| 	registryClient, err := registry.NewClient() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	cfg.RegistryClient = registryClient | ||||
| 	return cfg, cfg.Init(h.kubernetes, applicableNamespace, "", log.Printf) | ||||
| } | ||||
|  | ||||
| func simplify(release ...*release.Release) []map[string]interface{} { | ||||
| 	ret := make([]map[string]interface{}, len(release)) | ||||
| 	for i, r := range release { | ||||
| 		ret[i] = map[string]interface{}{ | ||||
| 			"name":      r.Name, | ||||
| 			"namespace": r.Namespace, | ||||
| 			"revision":  r.Version, | ||||
| 		} | ||||
| 		if r.Chart != nil { | ||||
| 			ret[i]["chart"] = r.Chart.Metadata.Name | ||||
| 			ret[i]["chartVersion"] = r.Chart.Metadata.Version | ||||
| 			ret[i]["appVersion"] = r.Chart.Metadata.AppVersion | ||||
| 		} | ||||
| 		if r.Info != nil { | ||||
| 			ret[i]["status"] = r.Info.Status.String() | ||||
| 			if !r.Info.LastDeployed.IsZero() { | ||||
| 				ret[i]["lastDeployed"] = r.Info.LastDeployed.Format(time.RFC1123Z) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return ret | ||||
| } | ||||
							
								
								
									
										271
									
								
								pkg/http/authorization.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										271
									
								
								pkg/http/authorization.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,271 @@ | ||||
| package http | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"context" | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"net/http" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/coreos/go-oidc/v3/oidc" | ||||
| 	"github.com/go-jose/go-jose/v4" | ||||
| 	"github.com/go-jose/go-jose/v4/jwt" | ||||
| 	"golang.org/x/oauth2" | ||||
| 	authenticationapiv1 "k8s.io/api/authentication/v1" | ||||
| 	"k8s.io/klog/v2" | ||||
| 	"k8s.io/utils/strings/slices" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/mcp" | ||||
| ) | ||||
|  | ||||
| type KubernetesApiTokenVerifier interface { | ||||
| 	// KubernetesApiVerifyToken TODO: clarify proper implementation | ||||
| 	KubernetesApiVerifyToken(ctx context.Context, cluster, token, audience string) (*authenticationapiv1.UserInfo, []string, error) | ||||
| 	// GetTargetParameterName returns the parameter name used for target identification in MCP requests | ||||
| 	GetTargetParameterName() string | ||||
| } | ||||
|  | ||||
| // extractTargetFromRequest extracts cluster parameter from MCP request body | ||||
| func extractTargetFromRequest(r *http.Request, targetName string) (string, error) { | ||||
| 	if r.Body == nil { | ||||
| 		return "", nil | ||||
| 	} | ||||
|  | ||||
| 	// Read the body | ||||
| 	body, err := io.ReadAll(r.Body) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
|  | ||||
| 	// Restore the body for downstream handlers | ||||
| 	r.Body = io.NopCloser(bytes.NewBuffer(body)) | ||||
|  | ||||
| 	// Parse the MCP request | ||||
| 	var mcpRequest struct { | ||||
| 		Params struct { | ||||
| 			Arguments map[string]interface{} `json:"arguments"` | ||||
| 		} `json:"params"` | ||||
| 	} | ||||
|  | ||||
| 	if err := json.Unmarshal(body, &mcpRequest); err != nil { | ||||
| 		// If we can't parse the request, just return empty cluster (will use default) | ||||
| 		return "", nil | ||||
| 	} | ||||
|  | ||||
| 	// Extract target parameter | ||||
| 	if cluster, ok := mcpRequest.Params.Arguments[targetName].(string); ok { | ||||
| 		return cluster, nil | ||||
| 	} | ||||
|  | ||||
| 	return "", nil | ||||
| } | ||||
|  | ||||
| // write401 sends a 401/Unauthorized response with WWW-Authenticate header. | ||||
| func write401(w http.ResponseWriter, wwwAuthenticateHeader, errorType, message string) { | ||||
| 	w.Header().Set("WWW-Authenticate", wwwAuthenticateHeader+fmt.Sprintf(`, error="%s"`, errorType)) | ||||
| 	http.Error(w, message, http.StatusUnauthorized) | ||||
| } | ||||
|  | ||||
| // AuthorizationMiddleware validates the OAuth flow for protected resources. | ||||
| // | ||||
| // The flow is skipped for unprotected resources, such as health checks and well-known endpoints. | ||||
| // | ||||
| //	There are several auth scenarios supported by this middleware: | ||||
| // | ||||
| //	 1. requireOAuth is false: | ||||
| // | ||||
| //	    - The OAuth flow is skipped, and the server is effectively unprotected. | ||||
| //	    - The request is passed to the next handler without any validation. | ||||
| // | ||||
| //	    see TestAuthorizationRequireOAuthFalse | ||||
| // | ||||
| //	 2. requireOAuth is set to true, server is protected: | ||||
| // | ||||
| //	    2.1. Raw Token Validation (oidcProvider is nil): | ||||
| //	         - The token is validated offline for basic sanity checks (expiration). | ||||
| //	         - If OAuthAudience is set, the token is validated against the audience. | ||||
| //	         - If ValidateToken is set, the token is then used against the Kubernetes API Server for TokenReview. | ||||
| // | ||||
| //	         see TestAuthorizationRawToken | ||||
| // | ||||
| //	    2.2. OIDC Provider Validation (oidcProvider is not nil): | ||||
| //	         - The token is validated offline for basic sanity checks (audience and expiration). | ||||
| //	         - If OAuthAudience is set, the token is validated against the audience. | ||||
| //	         - The token is then validated against the OIDC Provider. | ||||
| //	         - If ValidateToken is set, the token is then used against the Kubernetes API Server for TokenReview. | ||||
| // | ||||
| //	         see TestAuthorizationOidcToken | ||||
| // | ||||
| //	    2.3. OIDC Token Exchange (oidcProvider is not nil, StsClientId and StsAudience are set): | ||||
| //	         - The token is validated offline for basic sanity checks (audience and expiration). | ||||
| //	         - If OAuthAudience is set, the token is validated against the audience. | ||||
| //	         - The token is then validated against the OIDC Provider. | ||||
| //	         - If the token is valid, an external account token exchange is performed using | ||||
| //	           the OIDC Provider to obtain a new token with the specified audience and scopes. | ||||
| //	         - If ValidateToken is set, the exchanged token is then used against the Kubernetes API Server for TokenReview. | ||||
| // | ||||
| //	         see TestAuthorizationOidcTokenExchange | ||||
| func AuthorizationMiddleware(staticConfig *config.StaticConfig, oidcProvider *oidc.Provider, verifier KubernetesApiTokenVerifier, httpClient *http.Client) func(http.Handler) http.Handler { | ||||
| 	return func(next http.Handler) http.Handler { | ||||
| 		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
| 			if r.URL.Path == healthEndpoint || slices.Contains(WellKnownEndpoints, r.URL.EscapedPath()) { | ||||
| 				next.ServeHTTP(w, r) | ||||
| 				return | ||||
| 			} | ||||
| 			if !staticConfig.RequireOAuth { | ||||
| 				next.ServeHTTP(w, r) | ||||
| 				return | ||||
| 			} | ||||
|  | ||||
| 			wwwAuthenticateHeader := "Bearer realm=\"Kubernetes MCP Server\"" | ||||
| 			if staticConfig.OAuthAudience != "" { | ||||
| 				wwwAuthenticateHeader += fmt.Sprintf(`, audience="%s"`, staticConfig.OAuthAudience) | ||||
| 			} | ||||
|  | ||||
| 			authHeader := r.Header.Get("Authorization") | ||||
| 			if authHeader == "" || !strings.HasPrefix(authHeader, "Bearer ") { | ||||
| 				klog.V(1).Infof("Authentication failed - missing or invalid bearer token: %s %s from %s", r.Method, r.URL.Path, r.RemoteAddr) | ||||
| 				write401(w, wwwAuthenticateHeader, "missing_token", "Unauthorized: Bearer token required") | ||||
| 				return | ||||
| 			} | ||||
|  | ||||
| 			token := strings.TrimPrefix(authHeader, "Bearer ") | ||||
|  | ||||
| 			claims, err := ParseJWTClaims(token) | ||||
| 			if err == nil && claims == nil { | ||||
| 				// Impossible case, but just in case | ||||
| 				err = fmt.Errorf("failed to parse JWT claims from token") | ||||
| 			} | ||||
| 			// Offline validation | ||||
| 			if err == nil { | ||||
| 				err = claims.ValidateOffline(staticConfig.OAuthAudience) | ||||
| 			} | ||||
| 			// Online OIDC provider validation | ||||
| 			if err == nil { | ||||
| 				err = claims.ValidateWithProvider(r.Context(), staticConfig.OAuthAudience, oidcProvider) | ||||
| 			} | ||||
| 			// Scopes propagation, they are likely to be used for authorization. | ||||
| 			if err == nil { | ||||
| 				scopes := claims.GetScopes() | ||||
| 				klog.V(2).Infof("JWT token validated - Scopes: %v", scopes) | ||||
| 				r = r.WithContext(context.WithValue(r.Context(), mcp.TokenScopesContextKey, scopes)) | ||||
| 			} | ||||
| 			// Token exchange with OIDC provider | ||||
| 			sts := NewFromConfig(staticConfig, oidcProvider) | ||||
| 			// TODO: Maybe the token had already been exchanged, if it has the right audience and scopes, we can skip this step. | ||||
| 			if err == nil && sts.IsEnabled() { | ||||
| 				var exchangedToken *oauth2.Token | ||||
| 				// If the token is valid, we can exchange it for a new token with the specified audience and scopes. | ||||
| 				ctx := r.Context() | ||||
| 				if httpClient != nil { | ||||
| 					ctx = context.WithValue(ctx, oauth2.HTTPClient, httpClient) | ||||
| 				} | ||||
| 				exchangedToken, err = sts.ExternalAccountTokenExchange(ctx, &oauth2.Token{ | ||||
| 					AccessToken: claims.Token, | ||||
| 					TokenType:   "Bearer", | ||||
| 				}) | ||||
| 				if err == nil { | ||||
| 					// Replace the original token with the exchanged token | ||||
| 					token = exchangedToken.AccessToken | ||||
| 					claims, err = ParseJWTClaims(token) | ||||
| 					r.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) // TODO: Implement test to verify, THIS IS A CRITICAL PART | ||||
| 				} | ||||
| 			} | ||||
| 			// Kubernetes API Server TokenReview validation | ||||
| 			if err == nil && staticConfig.ValidateToken { | ||||
| 				targetParameterName := verifier.GetTargetParameterName() | ||||
| 				cluster, clusterErr := extractTargetFromRequest(r, targetParameterName) | ||||
| 				if clusterErr != nil { | ||||
| 					klog.V(2).Infof("Failed to extract cluster from request, using default: %v", clusterErr) | ||||
| 				} | ||||
| 				err = claims.ValidateWithKubernetesApi(r.Context(), staticConfig.OAuthAudience, cluster, verifier) | ||||
| 			} | ||||
| 			if err != nil { | ||||
| 				klog.V(1).Infof("Authentication failed - JWT validation error: %s %s from %s, error: %v", r.Method, r.URL.Path, r.RemoteAddr, err) | ||||
| 				write401(w, wwwAuthenticateHeader, "invalid_token", "Unauthorized: Invalid token") | ||||
| 				return | ||||
| 			} | ||||
|  | ||||
| 			next.ServeHTTP(w, r) | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| var allSignatureAlgorithms = []jose.SignatureAlgorithm{ | ||||
| 	jose.EdDSA, | ||||
| 	jose.HS256, | ||||
| 	jose.HS384, | ||||
| 	jose.HS512, | ||||
| 	jose.RS256, | ||||
| 	jose.RS384, | ||||
| 	jose.RS512, | ||||
| 	jose.ES256, | ||||
| 	jose.ES384, | ||||
| 	jose.ES512, | ||||
| 	jose.PS256, | ||||
| 	jose.PS384, | ||||
| 	jose.PS512, | ||||
| } | ||||
|  | ||||
| type JWTClaims struct { | ||||
| 	jwt.Claims | ||||
| 	Token string `json:"-"` | ||||
| 	Scope string `json:"scope,omitempty"` | ||||
| } | ||||
|  | ||||
| func (c *JWTClaims) GetScopes() []string { | ||||
| 	if c.Scope == "" { | ||||
| 		return nil | ||||
| 	} | ||||
| 	return strings.Fields(c.Scope) | ||||
| } | ||||
|  | ||||
| // ValidateOffline Checks if the JWT claims are valid and if the audience matches the expected one. | ||||
| func (c *JWTClaims) ValidateOffline(audience string) error { | ||||
| 	expected := jwt.Expected{} | ||||
| 	if audience != "" { | ||||
| 		expected.AnyAudience = jwt.Audience{audience} | ||||
| 	} | ||||
| 	if err := c.Validate(expected); err != nil { | ||||
| 		return fmt.Errorf("JWT token validation error: %v", err) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // ValidateWithProvider validates the JWT claims against the OIDC provider. | ||||
| func (c *JWTClaims) ValidateWithProvider(ctx context.Context, audience string, provider *oidc.Provider) error { | ||||
| 	if provider != nil { | ||||
| 		verifier := provider.Verifier(&oidc.Config{ | ||||
| 			ClientID: audience, | ||||
| 		}) | ||||
| 		_, err := verifier.Verify(ctx, c.Token) | ||||
| 		if err != nil { | ||||
| 			return fmt.Errorf("OIDC token validation error: %v", err) | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (c *JWTClaims) ValidateWithKubernetesApi(ctx context.Context, audience, cluster string, verifier KubernetesApiTokenVerifier) error { | ||||
| 	if verifier != nil { | ||||
| 		_, _, err := verifier.KubernetesApiVerifyToken(ctx, cluster, c.Token, audience) | ||||
| 		if err != nil { | ||||
| 			return fmt.Errorf("kubernetes API token validation error: %v", err) | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func ParseJWTClaims(token string) (*JWTClaims, error) { | ||||
| 	tkn, err := jwt.ParseSigned(token, allSignatureAlgorithms) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("failed to parse JWT token: %w", err) | ||||
| 	} | ||||
| 	claims := &JWTClaims{} | ||||
| 	err = tkn.UnsafeClaimsWithoutVerification(claims) | ||||
| 	claims.Token = token | ||||
| 	return claims, err | ||||
| } | ||||
							
								
								
									
										220
									
								
								pkg/http/authorization_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										220
									
								
								pkg/http/authorization_test.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,220 @@ | ||||
| package http | ||||
|  | ||||
| import ( | ||||
| 	"strings" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/go-jose/go-jose/v4/jwt" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	// https://jwt.io/#token=eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJtY3Atc2VydmVyIl0sImV4cCI6MjUzNDAyMjk3MTk5LCJpYXQiOjAsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwianRpIjoiOTkyMjJkNTYtMzQwZS00ZWI2LTg1ODgtMjYxNDExZjM1ZDI2Iiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJkZWZhdWx0Iiwic2VydmljZWFjY291bnQiOnsibmFtZSI6ImRlZmF1bHQiLCJ1aWQiOiJlYWNiNmFkMi04MGI3LTQxNzktODQzZC05MmViMWU2YmJiYTYifX0sIm5iZiI6MCwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50OmRlZmF1bHQ6ZGVmYXVsdCJ9.ld9aJaQX5k44KOV1bv8MCY2RceAZ9jAjN2vKswKmINNiOpRMl0f8Y0trrq7gdRlKwGLsCUjz8hbHsGcM43QtNrcwfvH5imRnlAKANPUgswwEadCTjASihlo6ADsn9fjAWB4viplFwq8VdzcwpcyActYJi2TBFoRq204STZJIcAW_B40HOuCB2XxQ81V4_XWLzL03Bt-YmYUhliiiE5YSKS1WEEWIbdel--b7Gvp-VS1I2eeiOqV3SelMBHbF9EwKGAkyObg0JhGqr5XHLd6WOmhvLus4eCkyakQMgr2tZIdvbt2yEUDiId6r27tlgAPLmqlyYMEhyiM212_Sth3T3Q // notsecret | ||||
| 	tokenBasicNotExpired = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJtY3Atc2VydmVyIl0sImV4cCI6MjUzNDAyMjk3MTk5LCJpYXQiOjAsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwianRpIjoiOTkyMjJkNTYtMzQwZS00ZWI2LTg1ODgtMjYxNDExZjM1ZDI2Iiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJkZWZhdWx0Iiwic2VydmljZWFjY291bnQiOnsibmFtZSI6ImRlZmF1bHQiLCJ1aWQiOiJlYWNiNmFkMi04MGI3LTQxNzktODQzZC05MmViMWU2YmJiYTYifX0sIm5iZiI6MCwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50OmRlZmF1bHQ6ZGVmYXVsdCJ9.ld9aJaQX5k44KOV1bv8MCY2RceAZ9jAjN2vKswKmINNiOpRMl0f8Y0trrq7gdRlKwGLsCUjz8hbHsGcM43QtNrcwfvH5imRnlAKANPUgswwEadCTjASihlo6ADsn9fjAWB4viplFwq8VdzcwpcyActYJi2TBFoRq204STZJIcAW_B40HOuCB2XxQ81V4_XWLzL03Bt-YmYUhliiiE5YSKS1WEEWIbdel--b7Gvp-VS1I2eeiOqV3SelMBHbF9EwKGAkyObg0JhGqr5XHLd6WOmhvLus4eCkyakQMgr2tZIdvbt2yEUDiId6r27tlgAPLmqlyYMEhyiM212_Sth3T3Q" // notsecret | ||||
| 	// https://jwt.io/#token=eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6Ijk4ZDU3YmUwNWI3ZjUzNWIwMzYyYjg2MDJhNTJlNGYxIn0.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJtY3Atc2VydmVyIl0sImV4cCI6MSwiaWF0IjowLCJpc3MiOiJodHRwczovL2t1YmVybmV0ZXMuZGVmYXVsdC5zdmMuY2x1c3Rlci5sb2NhbCIsImp0aSI6Ijk5MjIyZDU2LTM0MGUtNGViNi04NTg4LTI2MTQxMWYzNWQyNiIsImt1YmVybmV0ZXMuaW8iOnsibmFtZXNwYWNlIjoiZGVmYXVsdCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJkZWZhdWx0IiwidWlkIjoiZWFjYjZhZDItODBiNy00MTc5LTg0M2QtOTJlYjFlNmJiYmE2In19LCJuYmYiOjAsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpkZWZhdWx0OmRlZmF1bHQifQ.iVrxt6glbY3Qe_mEtK-lYpx4Z3VC1a7zgGRSmfu29pMmnKhlTk56y0Wx45DQ4PSYCTwC6CJnGGZNbJyr4JS8PQ // notsecret | ||||
| 	tokenBasicExpired = "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6Ijk4ZDU3YmUwNWI3ZjUzNWIwMzYyYjg2MDJhNTJlNGYxIn0.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJtY3Atc2VydmVyIl0sImV4cCI6MSwiaWF0IjowLCJpc3MiOiJodHRwczovL2t1YmVybmV0ZXMuZGVmYXVsdC5zdmMuY2x1c3Rlci5sb2NhbCIsImp0aSI6Ijk5MjIyZDU2LTM0MGUtNGViNi04NTg4LTI2MTQxMWYzNWQyNiIsImt1YmVybmV0ZXMuaW8iOnsibmFtZXNwYWNlIjoiZGVmYXVsdCIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJkZWZhdWx0IiwidWlkIjoiZWFjYjZhZDItODBiNy00MTc5LTg0M2QtOTJlYjFlNmJiYmE2In19LCJuYmYiOjAsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpkZWZhdWx0OmRlZmF1bHQifQ.iVrxt6glbY3Qe_mEtK-lYpx4Z3VC1a7zgGRSmfu29pMmnKhlTk56y0Wx45DQ4PSYCTwC6CJnGGZNbJyr4JS8PQ" // notsecret | ||||
| 	// https://jwt.io/#token=eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6Ijk4ZDU3YmUwNWI3ZjUzNWIwMzYyYjg2MDJhNTJlNGYxIn0.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJtY3Atc2VydmVyIl0sImV4cCI6MjUzNDAyMjk3MTk5LCJpYXQiOjAsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwianRpIjoiOTkyMjJkNTYtMzQwZS00ZWI2LTg1ODgtMjYxNDExZjM1ZDI2Iiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJkZWZhdWx0Iiwic2VydmljZWFjY291bnQiOnsibmFtZSI6ImRlZmF1bHQiLCJ1aWQiOiJlYWNiNmFkMi04MGI3LTQxNzktODQzZC05MmViMWU2YmJiYTYifX0sIm5iZiI6MCwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50OmRlZmF1bHQ6ZGVmYXVsdCIsInNjb3BlIjoicmVhZCB3cml0ZSJ9.m5mFXp0TDSvgLevQ76nX65N14w1RxTClMaannLLOuBIUEsmXhMYZjGtf5mWMcxVOkSh65rLFiKugaMXgv877Mg // notsecret | ||||
| 	tokenMultipleAudienceNotExpired = "eyJ0eXAiOiJKV1QiLCJhbGciOiJFUzI1NiIsImtpZCI6Ijk4ZDU3YmUwNWI3ZjUzNWIwMzYyYjg2MDJhNTJlNGYxIn0.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJtY3Atc2VydmVyIl0sImV4cCI6MjUzNDAyMjk3MTk5LCJpYXQiOjAsImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwianRpIjoiOTkyMjJkNTYtMzQwZS00ZWI2LTg1ODgtMjYxNDExZjM1ZDI2Iiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJkZWZhdWx0Iiwic2VydmljZWFjY291bnQiOnsibmFtZSI6ImRlZmF1bHQiLCJ1aWQiOiJlYWNiNmFkMi04MGI3LTQxNzktODQzZC05MmViMWU2YmJiYTYifX0sIm5iZiI6MCwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50OmRlZmF1bHQ6ZGVmYXVsdCIsInNjb3BlIjoicmVhZCB3cml0ZSJ9.m5mFXp0TDSvgLevQ76nX65N14w1RxTClMaannLLOuBIUEsmXhMYZjGtf5mWMcxVOkSh65rLFiKugaMXgv877Mg" // notsecret | ||||
| ) | ||||
|  | ||||
| func TestParseJWTClaimsPayloadValid(t *testing.T) { | ||||
| 	basicClaims, err := ParseJWTClaims(tokenBasicNotExpired) | ||||
| 	t.Run("Is parseable", func(t *testing.T) { | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("expected no error, got %v", err) | ||||
| 		} | ||||
| 		if basicClaims == nil { | ||||
| 			t.Fatal("expected claims, got nil") | ||||
| 		} | ||||
| 	}) | ||||
| 	t.Run("Parses issuer", func(t *testing.T) { | ||||
| 		if basicClaims.Issuer != "https://kubernetes.default.svc.cluster.local" { | ||||
| 			t.Errorf("expected issuer 'https://kubernetes.default.svc.cluster.local', got %s", basicClaims.Issuer) | ||||
| 		} | ||||
| 	}) | ||||
| 	t.Run("Parses audience", func(t *testing.T) { | ||||
| 		expectedAudiences := []string{"https://kubernetes.default.svc.cluster.local", "mcp-server"} | ||||
| 		for _, expected := range expectedAudiences { | ||||
| 			if !basicClaims.Audience.Contains(expected) { | ||||
| 				t.Errorf("expected audience to contain %s", expected) | ||||
| 			} | ||||
| 		} | ||||
| 	}) | ||||
| 	t.Run("Parses expiration", func(t *testing.T) { | ||||
| 		if *basicClaims.Expiry != jwt.NumericDate(253402297199) { | ||||
| 			t.Errorf("expected expiration 253402297199, got %d", basicClaims.Expiry) | ||||
| 		} | ||||
| 	}) | ||||
| 	t.Run("Parses scope", func(t *testing.T) { | ||||
| 		scopeClaims, err := ParseJWTClaims(tokenMultipleAudienceNotExpired) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("expected no error, got %v", err) | ||||
| 		} | ||||
| 		if scopeClaims == nil { | ||||
| 			t.Fatal("expected claims, got nil") | ||||
| 		} | ||||
|  | ||||
| 		scopes := scopeClaims.GetScopes() | ||||
|  | ||||
| 		expectedScopes := []string{"read", "write"} | ||||
| 		if len(scopes) != len(expectedScopes) { | ||||
| 			t.Errorf("expected %d scopes, got %d", len(expectedScopes), len(scopes)) | ||||
| 		} | ||||
| 		for i, expectedScope := range expectedScopes { | ||||
| 			if scopes[i] != expectedScope { | ||||
| 				t.Errorf("expected scope[%d] to be '%s', got '%s'", i, expectedScope, scopes[i]) | ||||
| 			} | ||||
| 		} | ||||
| 	}) | ||||
| 	t.Run("Parses expired token", func(t *testing.T) { | ||||
| 		expiredClaims, err := ParseJWTClaims(tokenBasicExpired) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("expected no error, got %v", err) | ||||
| 		} | ||||
|  | ||||
| 		if *expiredClaims.Expiry != jwt.NumericDate(1) { | ||||
| 			t.Errorf("expected expiration 1, got %d", basicClaims.Expiry) | ||||
| 		} | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestParseJWTClaimsPayloadInvalid(t *testing.T) { | ||||
| 	t.Run("invalid token segments", func(t *testing.T) { | ||||
| 		invalidToken := "header.payload.signature.extra" | ||||
|  | ||||
| 		_, err := ParseJWTClaims(invalidToken) | ||||
| 		if err == nil { | ||||
| 			t.Fatal("expected error for invalid token segments, got nil") | ||||
| 		} | ||||
|  | ||||
| 		if !strings.Contains(err.Error(), "compact JWS format must have three parts") { | ||||
| 			t.Errorf("expected invalid token segments error message, got %v", err) | ||||
| 		} | ||||
| 	}) | ||||
| 	t.Run("invalid base64 payload", func(t *testing.T) { | ||||
| 		invalidPayload := strings.ReplaceAll(tokenBasicNotExpired, ".", ".invalid") | ||||
|  | ||||
| 		_, err := ParseJWTClaims(invalidPayload) | ||||
| 		if err == nil { | ||||
| 			t.Fatal("expected error for invalid base64, got nil") | ||||
| 		} | ||||
|  | ||||
| 		if !strings.Contains(err.Error(), "illegal base64 data") { | ||||
| 			t.Errorf("expected decode error message, got %v", err) | ||||
| 		} | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestJWTTokenValidateOffline(t *testing.T) { | ||||
| 	t.Run("expired token returns error", func(t *testing.T) { | ||||
| 		claims, err := ParseJWTClaims(tokenBasicExpired) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("expected no error for expired token parsing, got %v", err) | ||||
| 		} | ||||
|  | ||||
| 		err = claims.ValidateOffline("mcp-server") | ||||
| 		if err == nil { | ||||
| 			t.Fatalf("expected error for expired token, got nil") | ||||
| 		} | ||||
|  | ||||
| 		if !strings.Contains(err.Error(), "token is expired (exp)") { | ||||
| 			t.Errorf("expected expiration error message, got %v", err) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	t.Run("multiple audiences with correct one", func(t *testing.T) { | ||||
| 		claims, err := ParseJWTClaims(tokenMultipleAudienceNotExpired) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("expected no error for multiple audience token parsing, got %v", err) | ||||
| 		} | ||||
| 		if claims == nil { | ||||
| 			t.Fatalf("expected claims to be returned, got nil") | ||||
| 		} | ||||
|  | ||||
| 		err = claims.ValidateOffline("mcp-server") | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("expected no error for valid audience, got %v", err) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	t.Run("multiple audiences with mismatch returns error", func(t *testing.T) { | ||||
| 		claims, err := ParseJWTClaims(tokenMultipleAudienceNotExpired) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("expected no error for multiple audience token parsing, got %v", err) | ||||
| 		} | ||||
| 		if claims == nil { | ||||
| 			t.Fatalf("expected claims to be returned, got nil") | ||||
| 		} | ||||
|  | ||||
| 		err = claims.ValidateOffline("missing-audience") | ||||
| 		if err == nil { | ||||
| 			t.Fatalf("expected error for token with wrong audience, got nil") | ||||
| 		} | ||||
|  | ||||
| 		if !strings.Contains(err.Error(), "invalid audience claim (aud)") { | ||||
| 			t.Errorf("expected audience mismatch error, got %v", err) | ||||
| 		} | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestJWTClaimsGetScopes(t *testing.T) { | ||||
| 	t.Run("no scopes", func(t *testing.T) { | ||||
| 		claims, err := ParseJWTClaims(tokenBasicExpired) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("expected no error for parsing token, got %v", err) | ||||
| 		} | ||||
|  | ||||
| 		if scopes := claims.GetScopes(); len(scopes) != 0 { | ||||
| 			t.Errorf("expected no scopes, got %d", len(scopes)) | ||||
| 		} | ||||
| 	}) | ||||
| 	t.Run("single scope", func(t *testing.T) { | ||||
| 		claims := &JWTClaims{ | ||||
| 			Scope: "read", | ||||
| 		} | ||||
| 		scopes := claims.GetScopes() | ||||
| 		expected := []string{"read"} | ||||
|  | ||||
| 		if len(scopes) != 1 { | ||||
| 			t.Errorf("expected 1 scope, got %d", len(scopes)) | ||||
| 		} | ||||
| 		if scopes[0] != expected[0] { | ||||
| 			t.Errorf("expected scope 'read', got '%s'", scopes[0]) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	t.Run("multiple scopes", func(t *testing.T) { | ||||
| 		claims := &JWTClaims{ | ||||
| 			Scope: "read write admin", | ||||
| 		} | ||||
| 		scopes := claims.GetScopes() | ||||
| 		expected := []string{"read", "write", "admin"} | ||||
|  | ||||
| 		if len(scopes) != 3 { | ||||
| 			t.Errorf("expected 3 scopes, got %d", len(scopes)) | ||||
| 		} | ||||
|  | ||||
| 		for i, expectedScope := range expected { | ||||
| 			if i >= len(scopes) || scopes[i] != expectedScope { | ||||
| 				t.Errorf("expected scope[%d] to be '%s', got '%s'", i, expectedScope, scopes[i]) | ||||
| 			} | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	t.Run("scopes with extra whitespace", func(t *testing.T) { | ||||
| 		claims := &JWTClaims{ | ||||
| 			Scope: "  read   write   admin  ", | ||||
| 		} | ||||
| 		scopes := claims.GetScopes() | ||||
| 		expected := []string{"read", "write", "admin"} | ||||
|  | ||||
| 		if len(scopes) != 3 { | ||||
| 			t.Errorf("expected 3 scopes, got %d", len(scopes)) | ||||
| 		} | ||||
|  | ||||
| 		for i, expectedScope := range expected { | ||||
| 			if i >= len(scopes) || scopes[i] != expectedScope { | ||||
| 				t.Errorf("expected scope[%d] to be '%s', got '%s'", i, expectedScope, scopes[i]) | ||||
| 			} | ||||
| 		} | ||||
| 	}) | ||||
| } | ||||
							
								
								
									
										85
									
								
								pkg/http/http.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										85
									
								
								pkg/http/http.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,85 @@ | ||||
| package http | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
| 	"net/http" | ||||
| 	"os" | ||||
| 	"os/signal" | ||||
| 	"syscall" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/coreos/go-oidc/v3/oidc" | ||||
|  | ||||
| 	"k8s.io/klog/v2" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/mcp" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	healthEndpoint     = "/healthz" | ||||
| 	mcpEndpoint        = "/mcp" | ||||
| 	sseEndpoint        = "/sse" | ||||
| 	sseMessageEndpoint = "/message" | ||||
| ) | ||||
|  | ||||
| func Serve(ctx context.Context, mcpServer *mcp.Server, staticConfig *config.StaticConfig, oidcProvider *oidc.Provider, httpClient *http.Client) error { | ||||
| 	mux := http.NewServeMux() | ||||
|  | ||||
| 	wrappedMux := RequestMiddleware( | ||||
| 		AuthorizationMiddleware(staticConfig, oidcProvider, mcpServer, httpClient)(mux), | ||||
| 	) | ||||
|  | ||||
| 	httpServer := &http.Server{ | ||||
| 		Addr:    ":" + staticConfig.Port, | ||||
| 		Handler: wrappedMux, | ||||
| 	} | ||||
|  | ||||
| 	sseServer := mcpServer.ServeSse(staticConfig.SSEBaseURL, httpServer) | ||||
| 	streamableHttpServer := mcpServer.ServeHTTP(httpServer) | ||||
| 	mux.Handle(sseEndpoint, sseServer) | ||||
| 	mux.Handle(sseMessageEndpoint, sseServer) | ||||
| 	mux.Handle(mcpEndpoint, streamableHttpServer) | ||||
| 	mux.HandleFunc(healthEndpoint, func(w http.ResponseWriter, r *http.Request) { | ||||
| 		w.WriteHeader(http.StatusOK) | ||||
| 	}) | ||||
| 	mux.Handle("/.well-known/", WellKnownHandler(staticConfig, httpClient)) | ||||
|  | ||||
| 	ctx, cancel := context.WithCancel(ctx) | ||||
| 	defer cancel() | ||||
|  | ||||
| 	sigChan := make(chan os.Signal, 1) | ||||
| 	signal.Notify(sigChan, syscall.SIGINT, syscall.SIGHUP, syscall.SIGTERM) | ||||
|  | ||||
| 	serverErr := make(chan error, 1) | ||||
| 	go func() { | ||||
| 		klog.V(0).Infof("Streaming and SSE HTTP servers starting on port %s and paths /mcp, /sse, /message", staticConfig.Port) | ||||
| 		if err := httpServer.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) { | ||||
| 			serverErr <- err | ||||
| 		} | ||||
| 	}() | ||||
|  | ||||
| 	select { | ||||
| 	case sig := <-sigChan: | ||||
| 		klog.V(0).Infof("Received signal %v, initiating graceful shutdown", sig) | ||||
| 		cancel() | ||||
| 	case <-ctx.Done(): | ||||
| 		klog.V(0).Infof("Context cancelled, initiating graceful shutdown") | ||||
| 	case err := <-serverErr: | ||||
| 		klog.Errorf("HTTP server error: %v", err) | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 10*time.Second) | ||||
| 	defer shutdownCancel() | ||||
|  | ||||
| 	klog.V(0).Infof("Shutting down HTTP server gracefully...") | ||||
| 	if err := httpServer.Shutdown(shutdownCtx); err != nil { | ||||
| 		klog.Errorf("HTTP server shutdown error: %v", err) | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	klog.V(0).Infof("HTTP server shutdown complete") | ||||
| 	return nil | ||||
| } | ||||
							
								
								
									
										895
									
								
								pkg/http/http_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										895
									
								
								pkg/http/http_test.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,895 @@ | ||||
| package http | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"bytes" | ||||
| 	"context" | ||||
| 	"crypto/rand" | ||||
| 	"crypto/rsa" | ||||
| 	"flag" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"net" | ||||
| 	"net/http" | ||||
| 	"net/http/httptest" | ||||
| 	"os" | ||||
| 	"regexp" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"testing" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/internal/test" | ||||
| 	"github.com/coreos/go-oidc/v3/oidc" | ||||
| 	"github.com/coreos/go-oidc/v3/oidc/oidctest" | ||||
| 	"golang.org/x/sync/errgroup" | ||||
| 	"k8s.io/klog/v2" | ||||
| 	"k8s.io/klog/v2/textlogger" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/mcp" | ||||
| ) | ||||
|  | ||||
| type httpContext struct { | ||||
| 	klogState       klog.State | ||||
| 	mockServer      *test.MockServer | ||||
| 	LogBuffer       bytes.Buffer | ||||
| 	HttpAddress     string             // HTTP server address | ||||
| 	timeoutCancel   context.CancelFunc // Release resources if test completes before the timeout | ||||
| 	StopServer      context.CancelFunc | ||||
| 	WaitForShutdown func() error | ||||
| 	StaticConfig    *config.StaticConfig | ||||
| 	OidcProvider    *oidc.Provider | ||||
| } | ||||
|  | ||||
| const tokenReviewSuccessful = ` | ||||
| 	{ | ||||
| 		"kind": "TokenReview", | ||||
| 		"apiVersion": "authentication.k8s.io/v1", | ||||
| 		"spec": {"token": "valid-token"}, | ||||
| 		"status": { | ||||
| 			"authenticated": true, | ||||
| 			"user": { | ||||
| 				"username": "test-user", | ||||
| 				"groups": ["system:authenticated"] | ||||
| 			} | ||||
| 		} | ||||
| 	}` | ||||
|  | ||||
| func (c *httpContext) beforeEach(t *testing.T) { | ||||
| 	t.Helper() | ||||
| 	http.DefaultClient.Timeout = 10 * time.Second | ||||
| 	if c.StaticConfig == nil { | ||||
| 		c.StaticConfig = config.Default() | ||||
| 	} | ||||
| 	c.mockServer = test.NewMockServer() | ||||
| 	// Fake Kubernetes configuration | ||||
| 	c.StaticConfig.KubeConfig = c.mockServer.KubeconfigFile(t) | ||||
| 	// Capture logging | ||||
| 	c.klogState = klog.CaptureState() | ||||
| 	flags := flag.NewFlagSet("test", flag.ContinueOnError) | ||||
| 	klog.InitFlags(flags) | ||||
| 	_ = flags.Set("v", "5") | ||||
| 	klog.SetLogger(textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(5), textlogger.Output(&c.LogBuffer)))) | ||||
| 	// Start server in random port | ||||
| 	ln, err := net.Listen("tcp", "0.0.0.0:0") | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Failed to find random port for HTTP server: %v", err) | ||||
| 	} | ||||
| 	c.HttpAddress = ln.Addr().String() | ||||
| 	if randomPortErr := ln.Close(); randomPortErr != nil { | ||||
| 		t.Fatalf("Failed to close random port listener: %v", randomPortErr) | ||||
| 	} | ||||
| 	c.StaticConfig.Port = fmt.Sprintf("%d", ln.Addr().(*net.TCPAddr).Port) | ||||
| 	mcpServer, err := mcp.NewServer(mcp.Configuration{StaticConfig: c.StaticConfig}) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Failed to create MCP server: %v", err) | ||||
| 	} | ||||
| 	var timeoutCtx, cancelCtx context.Context | ||||
| 	timeoutCtx, c.timeoutCancel = context.WithTimeout(t.Context(), 10*time.Second) | ||||
| 	group, gc := errgroup.WithContext(timeoutCtx) | ||||
| 	cancelCtx, c.StopServer = context.WithCancel(gc) | ||||
| 	group.Go(func() error { return Serve(cancelCtx, mcpServer, c.StaticConfig, c.OidcProvider, nil) }) | ||||
| 	c.WaitForShutdown = group.Wait | ||||
| 	// Wait for HTTP server to start (using net) | ||||
| 	for i := 0; i < 10; i++ { | ||||
| 		conn, err := net.Dial("tcp", c.HttpAddress) | ||||
| 		if err == nil { | ||||
| 			_ = conn.Close() | ||||
| 			break | ||||
| 		} | ||||
| 		time.Sleep(50 * time.Millisecond) // Wait before retrying | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (c *httpContext) afterEach(t *testing.T) { | ||||
| 	t.Helper() | ||||
| 	c.mockServer.Close() | ||||
| 	c.StopServer() | ||||
| 	err := c.WaitForShutdown() | ||||
| 	if err != nil { | ||||
| 		t.Errorf("HTTP server did not shut down gracefully: %v", err) | ||||
| 	} | ||||
| 	c.timeoutCancel() | ||||
| 	c.klogState.Restore() | ||||
| 	_ = os.Setenv("KUBECONFIG", "") | ||||
| } | ||||
|  | ||||
| func testCase(t *testing.T, test func(c *httpContext)) { | ||||
| 	testCaseWithContext(t, &httpContext{}, test) | ||||
| } | ||||
|  | ||||
| func testCaseWithContext(t *testing.T, httpCtx *httpContext, test func(c *httpContext)) { | ||||
| 	httpCtx.beforeEach(t) | ||||
| 	t.Cleanup(func() { httpCtx.afterEach(t) }) | ||||
| 	test(httpCtx) | ||||
| } | ||||
|  | ||||
| type OidcTestServer struct { | ||||
| 	*rsa.PrivateKey | ||||
| 	*oidc.Provider | ||||
| 	*httptest.Server | ||||
| 	TokenEndpointHandler http.HandlerFunc | ||||
| } | ||||
|  | ||||
| func NewOidcTestServer(t *testing.T) (oidcTestServer *OidcTestServer) { | ||||
| 	t.Helper() | ||||
| 	var err error | ||||
| 	oidcTestServer = &OidcTestServer{} | ||||
| 	oidcTestServer.PrivateKey, err = rsa.GenerateKey(rand.Reader, 2048) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("failed to generate private key for oidc: %v", err) | ||||
| 	} | ||||
| 	oidcServer := &oidctest.Server{ | ||||
| 		Algorithms: []string{oidc.RS256, oidc.ES256}, | ||||
| 		PublicKeys: []oidctest.PublicKey{ | ||||
| 			{ | ||||
| 				PublicKey: oidcTestServer.Public(), | ||||
| 				KeyID:     "test-oidc-key-id", | ||||
| 				Algorithm: oidc.RS256, | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
| 	oidcTestServer.Server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
| 		if r.URL.Path == "/token" && oidcTestServer.TokenEndpointHandler != nil { | ||||
| 			oidcTestServer.TokenEndpointHandler.ServeHTTP(w, r) | ||||
| 			return | ||||
| 		} | ||||
| 		oidcServer.ServeHTTP(w, r) | ||||
| 	})) | ||||
| 	oidcServer.SetIssuer(oidcTestServer.URL) | ||||
| 	oidcTestServer.Provider, err = oidc.NewProvider(t.Context(), oidcTestServer.URL) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("failed to create OIDC provider: %v", err) | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func TestGracefulShutdown(t *testing.T) { | ||||
| 	testCase(t, func(ctx *httpContext) { | ||||
| 		ctx.StopServer() | ||||
| 		err := ctx.WaitForShutdown() | ||||
| 		t.Run("Stops gracefully", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Errorf("Expected graceful shutdown, but got error: %v", err) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("Stops on context cancel", func(t *testing.T) { | ||||
| 			if !strings.Contains(ctx.LogBuffer.String(), "Context cancelled, initiating graceful shutdown") { | ||||
| 				t.Errorf("Context cancelled, initiating graceful shutdown, got: %s", ctx.LogBuffer.String()) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("Starts server shutdown", func(t *testing.T) { | ||||
| 			if !strings.Contains(ctx.LogBuffer.String(), "Shutting down HTTP server gracefully") { | ||||
| 				t.Errorf("Expected graceful shutdown log, got: %s", ctx.LogBuffer.String()) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("Server shutdown completes", func(t *testing.T) { | ||||
| 			if !strings.Contains(ctx.LogBuffer.String(), "HTTP server shutdown complete") { | ||||
| 				t.Errorf("Expected HTTP server shutdown completed log, got: %s", ctx.LogBuffer.String()) | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestSseTransport(t *testing.T) { | ||||
| 	testCase(t, func(ctx *httpContext) { | ||||
| 		sseResp, sseErr := http.Get(fmt.Sprintf("http://%s/sse", ctx.HttpAddress)) | ||||
| 		t.Cleanup(func() { _ = sseResp.Body.Close() }) | ||||
| 		t.Run("Exposes SSE endpoint at /sse", func(t *testing.T) { | ||||
| 			if sseErr != nil { | ||||
| 				t.Fatalf("Failed to get SSE endpoint: %v", sseErr) | ||||
| 			} | ||||
| 			if sseResp.StatusCode != http.StatusOK { | ||||
| 				t.Errorf("Expected HTTP 200 OK, got %d", sseResp.StatusCode) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("SSE endpoint returns text/event-stream content type", func(t *testing.T) { | ||||
| 			if sseResp.Header.Get("Content-Type") != "text/event-stream" { | ||||
| 				t.Errorf("Expected Content-Type text/event-stream, got %s", sseResp.Header.Get("Content-Type")) | ||||
| 			} | ||||
| 		}) | ||||
| 		responseReader := bufio.NewReader(sseResp.Body) | ||||
| 		event, eventErr := responseReader.ReadString('\n') | ||||
| 		endpoint, endpointErr := responseReader.ReadString('\n') | ||||
| 		t.Run("SSE endpoint returns stream with messages endpoint", func(t *testing.T) { | ||||
| 			if eventErr != nil { | ||||
| 				t.Fatalf("Failed to read SSE response body (event): %v", eventErr) | ||||
| 			} | ||||
| 			if event != "event: endpoint\n" { | ||||
| 				t.Errorf("Expected SSE event 'endpoint', got %s", event) | ||||
| 			} | ||||
| 			if endpointErr != nil { | ||||
| 				t.Fatalf("Failed to read SSE response body (endpoint): %v", endpointErr) | ||||
| 			} | ||||
| 			if !strings.HasPrefix(endpoint, "data: /message?sessionId=") { | ||||
| 				t.Errorf("Expected SSE data: '/message', got %s", endpoint) | ||||
| 			} | ||||
| 		}) | ||||
| 		messageResp, messageErr := http.Post( | ||||
| 			fmt.Sprintf("http://%s/message?sessionId=%s", ctx.HttpAddress, strings.TrimSpace(endpoint[25:])), | ||||
| 			"application/json", | ||||
| 			bytes.NewBufferString("{}"), | ||||
| 		) | ||||
| 		t.Cleanup(func() { _ = messageResp.Body.Close() }) | ||||
| 		t.Run("Exposes message endpoint at /message", func(t *testing.T) { | ||||
| 			if messageErr != nil { | ||||
| 				t.Fatalf("Failed to get message endpoint: %v", messageErr) | ||||
| 			} | ||||
| 			if messageResp.StatusCode != http.StatusAccepted { | ||||
| 				t.Errorf("Expected HTTP 202 OK, got %d", messageResp.StatusCode) | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestStreamableHttpTransport(t *testing.T) { | ||||
| 	testCase(t, func(ctx *httpContext) { | ||||
| 		mcpGetResp, mcpGetErr := http.Get(fmt.Sprintf("http://%s/mcp", ctx.HttpAddress)) | ||||
| 		t.Cleanup(func() { _ = mcpGetResp.Body.Close() }) | ||||
| 		t.Run("Exposes MCP GET endpoint at /mcp", func(t *testing.T) { | ||||
| 			if mcpGetErr != nil { | ||||
| 				t.Fatalf("Failed to get MCP endpoint: %v", mcpGetErr) | ||||
| 			} | ||||
| 			if mcpGetResp.StatusCode != http.StatusOK { | ||||
| 				t.Errorf("Expected HTTP 200 OK, got %d", mcpGetResp.StatusCode) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("MCP GET endpoint returns text/event-stream content type", func(t *testing.T) { | ||||
| 			if mcpGetResp.Header.Get("Content-Type") != "text/event-stream" { | ||||
| 				t.Errorf("Expected Content-Type text/event-stream (GET), got %s", mcpGetResp.Header.Get("Content-Type")) | ||||
| 			} | ||||
| 		}) | ||||
| 		mcpPostResp, mcpPostErr := http.Post(fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), "application/json", bytes.NewBufferString("{}")) | ||||
| 		t.Cleanup(func() { _ = mcpPostResp.Body.Close() }) | ||||
| 		t.Run("Exposes MCP POST endpoint at /mcp", func(t *testing.T) { | ||||
| 			if mcpPostErr != nil { | ||||
| 				t.Fatalf("Failed to post to MCP endpoint: %v", mcpPostErr) | ||||
| 			} | ||||
| 			if mcpPostResp.StatusCode != http.StatusOK { | ||||
| 				t.Errorf("Expected HTTP 200 OK, got %d", mcpPostResp.StatusCode) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("MCP POST endpoint returns application/json content type", func(t *testing.T) { | ||||
| 			if mcpPostResp.Header.Get("Content-Type") != "application/json" { | ||||
| 				t.Errorf("Expected Content-Type application/json (POST), got %s", mcpPostResp.Header.Get("Content-Type")) | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestHealthCheck(t *testing.T) { | ||||
| 	testCase(t, func(ctx *httpContext) { | ||||
| 		t.Run("Exposes health check endpoint at /healthz", func(t *testing.T) { | ||||
| 			resp, err := http.Get(fmt.Sprintf("http://%s/healthz", ctx.HttpAddress)) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("Failed to get health check endpoint: %v", err) | ||||
| 			} | ||||
| 			t.Cleanup(func() { _ = resp.Body.Close }) | ||||
| 			if resp.StatusCode != http.StatusOK { | ||||
| 				t.Errorf("Expected HTTP 200 OK, got %d", resp.StatusCode) | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| 	// Health exposed even when require Authorization | ||||
| 	testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) { | ||||
| 		resp, err := http.Get(fmt.Sprintf("http://%s/healthz", ctx.HttpAddress)) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Failed to get health check endpoint with OAuth: %v", err) | ||||
| 		} | ||||
| 		t.Cleanup(func() { _ = resp.Body.Close() }) | ||||
| 		t.Run("Health check with OAuth returns HTTP 200 OK", func(t *testing.T) { | ||||
| 			if resp.StatusCode != http.StatusOK { | ||||
| 				t.Errorf("Expected HTTP 200 OK, got %d", resp.StatusCode) | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestWellKnownReverseProxy(t *testing.T) { | ||||
| 	cases := []string{ | ||||
| 		".well-known/oauth-authorization-server", | ||||
| 		".well-known/oauth-protected-resource", | ||||
| 		".well-known/openid-configuration", | ||||
| 	} | ||||
| 	// With No Authorization URL configured | ||||
| 	testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) { | ||||
| 		for _, path := range cases { | ||||
| 			resp, err := http.Get(fmt.Sprintf("http://%s/%s", ctx.HttpAddress, path)) | ||||
| 			t.Cleanup(func() { _ = resp.Body.Close() }) | ||||
| 			t.Run("Protected resource '"+path+"' without Authorization URL returns 404 - Not Found", func(t *testing.T) { | ||||
| 				if err != nil { | ||||
| 					t.Fatalf("Failed to get %s endpoint: %v", path, err) | ||||
| 				} | ||||
| 				if resp.StatusCode != http.StatusNotFound { | ||||
| 					t.Errorf("Expected HTTP 404 Not Found, got %d", resp.StatusCode) | ||||
| 				} | ||||
| 			}) | ||||
| 		} | ||||
| 	}) | ||||
| 	// With Authorization URL configured but invalid payload | ||||
| 	invalidPayloadServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
| 		w.Header().Set("Content-Type", "application/json") | ||||
| 		_, _ = w.Write([]byte(`NOT A JSON PAYLOAD`)) | ||||
| 	})) | ||||
| 	t.Cleanup(invalidPayloadServer.Close) | ||||
| 	invalidPayloadConfig := &config.StaticConfig{ | ||||
| 		AuthorizationURL:        invalidPayloadServer.URL, | ||||
| 		RequireOAuth:            true, | ||||
| 		ValidateToken:           true, | ||||
| 		ClusterProviderStrategy: config.ClusterProviderKubeConfig, | ||||
| 	} | ||||
| 	testCaseWithContext(t, &httpContext{StaticConfig: invalidPayloadConfig}, func(ctx *httpContext) { | ||||
| 		for _, path := range cases { | ||||
| 			resp, err := http.Get(fmt.Sprintf("http://%s/%s", ctx.HttpAddress, path)) | ||||
| 			t.Cleanup(func() { _ = resp.Body.Close() }) | ||||
| 			t.Run("Protected resource '"+path+"' with invalid Authorization URL payload returns 500 - Internal Server Error", func(t *testing.T) { | ||||
| 				if err != nil { | ||||
| 					t.Fatalf("Failed to get %s endpoint: %v", path, err) | ||||
| 				} | ||||
| 				if resp.StatusCode != http.StatusInternalServerError { | ||||
| 					t.Errorf("Expected HTTP 500 Internal Server Error, got %d", resp.StatusCode) | ||||
| 				} | ||||
| 			}) | ||||
| 		} | ||||
| 	}) | ||||
| 	// With Authorization URL configured and valid payload | ||||
| 	testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
| 		if !strings.HasPrefix(r.URL.EscapedPath(), "/.well-known/") { | ||||
| 			http.NotFound(w, r) | ||||
| 			return | ||||
| 		} | ||||
| 		w.Header().Set("Content-Type", "application/json") | ||||
| 		_, _ = w.Write([]byte(`{"issuer": "https://example.com","scopes_supported":["mcp-server"]}`)) | ||||
| 	})) | ||||
| 	t.Cleanup(testServer.Close) | ||||
| 	staticConfig := &config.StaticConfig{ | ||||
| 		AuthorizationURL:        testServer.URL, | ||||
| 		RequireOAuth:            true, | ||||
| 		ValidateToken:           true, | ||||
| 		ClusterProviderStrategy: config.ClusterProviderKubeConfig, | ||||
| 	} | ||||
| 	testCaseWithContext(t, &httpContext{StaticConfig: staticConfig}, func(ctx *httpContext) { | ||||
| 		for _, path := range cases { | ||||
| 			resp, err := http.Get(fmt.Sprintf("http://%s/%s", ctx.HttpAddress, path)) | ||||
| 			t.Cleanup(func() { _ = resp.Body.Close() }) | ||||
| 			t.Run("Exposes "+path+" endpoint", func(t *testing.T) { | ||||
| 				if err != nil { | ||||
| 					t.Fatalf("Failed to get %s endpoint: %v", path, err) | ||||
| 				} | ||||
| 				if resp.StatusCode != http.StatusOK { | ||||
| 					t.Errorf("Expected HTTP 200 OK, got %d", resp.StatusCode) | ||||
| 				} | ||||
| 			}) | ||||
| 			t.Run(path+" returns application/json content type", func(t *testing.T) { | ||||
| 				if resp.Header.Get("Content-Type") != "application/json" { | ||||
| 					t.Errorf("Expected Content-Type application/json, got %s", resp.Header.Get("Content-Type")) | ||||
| 				} | ||||
| 			}) | ||||
| 		} | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestWellKnownOverrides(t *testing.T) { | ||||
| 	cases := []string{ | ||||
| 		".well-known/oauth-authorization-server", | ||||
| 		".well-known/oauth-protected-resource", | ||||
| 		".well-known/openid-configuration", | ||||
| 	} | ||||
| 	testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
| 		if !strings.HasPrefix(r.URL.EscapedPath(), "/.well-known/") { | ||||
| 			http.NotFound(w, r) | ||||
| 			return | ||||
| 		} | ||||
| 		w.Header().Set("Content-Type", "application/json") | ||||
| 		_, _ = w.Write([]byte(` | ||||
| 			{ | ||||
| 				"issuer": "https://localhost", | ||||
| 				"registration_endpoint": "https://localhost/clients-registrations/openid-connect", | ||||
| 				"require_request_uri_registration": true, | ||||
| 				"scopes_supported":["scope-1", "scope-2"] | ||||
| 			}`)) | ||||
| 	})) | ||||
| 	t.Cleanup(testServer.Close) | ||||
| 	baseConfig := config.StaticConfig{ | ||||
| 		AuthorizationURL:        testServer.URL, | ||||
| 		RequireOAuth:            true, | ||||
| 		ValidateToken:           true, | ||||
| 		ClusterProviderStrategy: config.ClusterProviderKubeConfig, | ||||
| 	} | ||||
| 	// With Dynamic Client Registration disabled | ||||
| 	disableDynamicRegistrationConfig := baseConfig | ||||
| 	disableDynamicRegistrationConfig.DisableDynamicClientRegistration = true | ||||
| 	testCaseWithContext(t, &httpContext{StaticConfig: &disableDynamicRegistrationConfig}, func(ctx *httpContext) { | ||||
| 		for _, path := range cases { | ||||
| 			resp, _ := http.Get(fmt.Sprintf("http://%s/%s", ctx.HttpAddress, path)) | ||||
| 			t.Cleanup(func() { _ = resp.Body.Close() }) | ||||
| 			body, err := io.ReadAll(resp.Body) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("Failed to read response body: %v", err) | ||||
| 			} | ||||
| 			t.Run("DisableDynamicClientRegistration removes registration_endpoint field", func(t *testing.T) { | ||||
| 				if strings.Contains(string(body), "registration_endpoint") { | ||||
| 					t.Error("Expected registration_endpoint to be removed, but it was found in the response") | ||||
| 				} | ||||
| 			}) | ||||
| 			t.Run("DisableDynamicClientRegistration sets require_request_uri_registration = false", func(t *testing.T) { | ||||
| 				if !strings.Contains(string(body), `"require_request_uri_registration":false`) { | ||||
| 					t.Error("Expected require_request_uri_registration to be false, but it was not found in the response") | ||||
| 				} | ||||
| 			}) | ||||
| 			t.Run("DisableDynamicClientRegistration includes/preserves scopes_supported", func(t *testing.T) { | ||||
| 				if !strings.Contains(string(body), `"scopes_supported":["scope-1","scope-2"]`) { | ||||
| 					t.Error("Expected scopes_supported to be present, but it was not found in the response") | ||||
| 				} | ||||
| 			}) | ||||
| 		} | ||||
| 	}) | ||||
| 	// With overrides for OAuth scopes (client/frontend) | ||||
| 	oAuthScopesConfig := baseConfig | ||||
| 	oAuthScopesConfig.OAuthScopes = []string{"openid", "mcp-server"} | ||||
| 	testCaseWithContext(t, &httpContext{StaticConfig: &oAuthScopesConfig}, func(ctx *httpContext) { | ||||
| 		for _, path := range cases { | ||||
| 			resp, _ := http.Get(fmt.Sprintf("http://%s/%s", ctx.HttpAddress, path)) | ||||
| 			t.Cleanup(func() { _ = resp.Body.Close() }) | ||||
| 			body, err := io.ReadAll(resp.Body) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("Failed to read response body: %v", err) | ||||
| 			} | ||||
| 			t.Run("OAuthScopes overrides scopes_supported", func(t *testing.T) { | ||||
| 				if !strings.Contains(string(body), `"scopes_supported":["openid","mcp-server"]`) { | ||||
| 					t.Errorf("Expected scopes_supported to be overridden, but original was preserved, response: %s", string(body)) | ||||
| 				} | ||||
| 			}) | ||||
| 			t.Run("OAuthScopes preserves other fields", func(t *testing.T) { | ||||
| 				if !strings.Contains(string(body), `"issuer":"https://localhost"`) { | ||||
| 					t.Errorf("Expected issuer to be preserved, but got: %s", string(body)) | ||||
| 				} | ||||
| 				if !strings.Contains(string(body), `"registration_endpoint":"https://localhost`) { | ||||
| 					t.Errorf("Expected registration_endpoint to be preserved, but got: %s", string(body)) | ||||
| 				} | ||||
| 				if !strings.Contains(string(body), `"require_request_uri_registration":true`) { | ||||
| 					t.Error("Expected require_request_uri_registration to be true, but it was not found in the response") | ||||
| 				} | ||||
| 			}) | ||||
| 		} | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestMiddlewareLogging(t *testing.T) { | ||||
| 	testCase(t, func(ctx *httpContext) { | ||||
| 		_, _ = http.Get(fmt.Sprintf("http://%s/.well-known/oauth-protected-resource", ctx.HttpAddress)) | ||||
| 		t.Run("Logs HTTP requests and responses", func(t *testing.T) { | ||||
| 			if !strings.Contains(ctx.LogBuffer.String(), "GET /.well-known/oauth-protected-resource 404") { | ||||
| 				t.Errorf("Expected log entry for GET /.well-known/oauth-protected-resource, got: %s", ctx.LogBuffer.String()) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("Logs HTTP request duration", func(t *testing.T) { | ||||
| 			expected := `"GET /.well-known/oauth-protected-resource 404 (.+)"` | ||||
| 			m := regexp.MustCompile(expected).FindStringSubmatch(ctx.LogBuffer.String()) | ||||
| 			if len(m) != 2 { | ||||
| 				t.Fatalf("Expected log entry to contain duration, got %s", ctx.LogBuffer.String()) | ||||
| 			} | ||||
| 			duration, err := time.ParseDuration(m[1]) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("Failed to parse duration from log entry: %v", err) | ||||
| 			} | ||||
| 			if duration < 0 { | ||||
| 				t.Errorf("Expected duration to be non-negative, got %v", duration) | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestAuthorizationUnauthorized(t *testing.T) { | ||||
| 	// Missing Authorization header | ||||
| 	testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) { | ||||
| 		resp, err := http.Get(fmt.Sprintf("http://%s/mcp", ctx.HttpAddress)) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Failed to get protected endpoint: %v", err) | ||||
| 		} | ||||
| 		t.Cleanup(func() { _ = resp.Body.Close }) | ||||
| 		t.Run("Protected resource with MISSING Authorization header returns 401 - Unauthorized", func(t *testing.T) { | ||||
| 			if resp.StatusCode != 401 { | ||||
| 				t.Errorf("Expected HTTP 401, got %d", resp.StatusCode) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("Protected resource with MISSING Authorization header returns WWW-Authenticate header", func(t *testing.T) { | ||||
| 			authHeader := resp.Header.Get("WWW-Authenticate") | ||||
| 			expected := `Bearer realm="Kubernetes MCP Server", error="missing_token"` | ||||
| 			if authHeader != expected { | ||||
| 				t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("Protected resource with MISSING Authorization header logs error", func(t *testing.T) { | ||||
| 			if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - missing or invalid bearer token") { | ||||
| 				t.Errorf("Expected log entry for missing or invalid bearer token, got: %s", ctx.LogBuffer.String()) | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| 	// Authorization header without Bearer prefix | ||||
| 	testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) { | ||||
| 		req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Failed to create request: %v", err) | ||||
| 		} | ||||
| 		req.Header.Set("Authorization", "Basic YWxhZGRpbjpvcGVuc2VzYW1l") | ||||
| 		resp, err := http.DefaultClient.Do(req) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Failed to get protected endpoint: %v", err) | ||||
| 		} | ||||
| 		t.Cleanup(func() { _ = resp.Body.Close }) | ||||
| 		t.Run("Protected resource with INCOMPATIBLE Authorization header returns WWW-Authenticate header", func(t *testing.T) { | ||||
| 			authHeader := resp.Header.Get("WWW-Authenticate") | ||||
| 			expected := `Bearer realm="Kubernetes MCP Server", error="missing_token"` | ||||
| 			if authHeader != expected { | ||||
| 				t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("Protected resource with INCOMPATIBLE Authorization header logs error", func(t *testing.T) { | ||||
| 			if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - missing or invalid bearer token") { | ||||
| 				t.Errorf("Expected log entry for missing or invalid bearer token, got: %s", ctx.LogBuffer.String()) | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| 	// Invalid Authorization header | ||||
| 	testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) { | ||||
| 		req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Failed to create request: %v", err) | ||||
| 		} | ||||
| 		req.Header.Set("Authorization", "Bearer "+strings.ReplaceAll(tokenBasicNotExpired, ".", ".invalid")) | ||||
| 		resp, err := http.DefaultClient.Do(req) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Failed to get protected endpoint: %v", err) | ||||
| 		} | ||||
| 		t.Cleanup(func() { _ = resp.Body.Close }) | ||||
| 		t.Run("Protected resource with INVALID Authorization header returns 401 - Unauthorized", func(t *testing.T) { | ||||
| 			if resp.StatusCode != 401 { | ||||
| 				t.Errorf("Expected HTTP 401, got %d", resp.StatusCode) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("Protected resource with INVALID Authorization header returns WWW-Authenticate header", func(t *testing.T) { | ||||
| 			authHeader := resp.Header.Get("WWW-Authenticate") | ||||
| 			expected := `Bearer realm="Kubernetes MCP Server", error="invalid_token"` | ||||
| 			if authHeader != expected { | ||||
| 				t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("Protected resource with INVALID Authorization header logs error", func(t *testing.T) { | ||||
| 			if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - JWT validation error") || | ||||
| 				!strings.Contains(ctx.LogBuffer.String(), "error: failed to parse JWT token: illegal base64 data") { | ||||
| 				t.Errorf("Expected log entry for JWT validation error, got: %s", ctx.LogBuffer.String()) | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| 	// Expired Authorization Bearer token | ||||
| 	testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) { | ||||
| 		req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Failed to create request: %v", err) | ||||
| 		} | ||||
| 		req.Header.Set("Authorization", "Bearer "+tokenBasicExpired) | ||||
| 		resp, err := http.DefaultClient.Do(req) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Failed to get protected endpoint: %v", err) | ||||
| 		} | ||||
| 		t.Cleanup(func() { _ = resp.Body.Close }) | ||||
| 		t.Run("Protected resource with EXPIRED Authorization header returns 401 - Unauthorized", func(t *testing.T) { | ||||
| 			if resp.StatusCode != 401 { | ||||
| 				t.Errorf("Expected HTTP 401, got %d", resp.StatusCode) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("Protected resource with EXPIRED Authorization header returns WWW-Authenticate header", func(t *testing.T) { | ||||
| 			authHeader := resp.Header.Get("WWW-Authenticate") | ||||
| 			expected := `Bearer realm="Kubernetes MCP Server", error="invalid_token"` | ||||
| 			if authHeader != expected { | ||||
| 				t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("Protected resource with EXPIRED Authorization header logs error", func(t *testing.T) { | ||||
| 			if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - JWT validation error") || | ||||
| 				!strings.Contains(ctx.LogBuffer.String(), "validation failed, token is expired (exp)") { | ||||
| 				t.Errorf("Expected log entry for JWT validation error, got: %s", ctx.LogBuffer.String()) | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| 	// Invalid audience claim Bearer token | ||||
| 	testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "expected-audience", ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) { | ||||
| 		req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Failed to create request: %v", err) | ||||
| 		} | ||||
| 		req.Header.Set("Authorization", "Bearer "+tokenBasicExpired) | ||||
| 		resp, err := http.DefaultClient.Do(req) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Failed to get protected endpoint: %v", err) | ||||
| 		} | ||||
| 		t.Cleanup(func() { _ = resp.Body.Close }) | ||||
| 		t.Run("Protected resource with INVALID AUDIENCE Authorization header returns 401 - Unauthorized", func(t *testing.T) { | ||||
| 			if resp.StatusCode != 401 { | ||||
| 				t.Errorf("Expected HTTP 401, got %d", resp.StatusCode) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("Protected resource with INVALID AUDIENCE Authorization header returns WWW-Authenticate header", func(t *testing.T) { | ||||
| 			authHeader := resp.Header.Get("WWW-Authenticate") | ||||
| 			expected := `Bearer realm="Kubernetes MCP Server", audience="expected-audience", error="invalid_token"` | ||||
| 			if authHeader != expected { | ||||
| 				t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("Protected resource with INVALID AUDIENCE Authorization header logs error", func(t *testing.T) { | ||||
| 			if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - JWT validation error") || | ||||
| 				!strings.Contains(ctx.LogBuffer.String(), "invalid audience claim (aud)") { | ||||
| 				t.Errorf("Expected log entry for JWT validation error, got: %s", ctx.LogBuffer.String()) | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| 	// Failed OIDC validation | ||||
| 	oidcTestServer := NewOidcTestServer(t) | ||||
| 	t.Cleanup(oidcTestServer.Close) | ||||
| 	testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "mcp-server", ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}, OidcProvider: oidcTestServer.Provider}, func(ctx *httpContext) { | ||||
| 		req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Failed to create request: %v", err) | ||||
| 		} | ||||
| 		req.Header.Set("Authorization", "Bearer "+tokenBasicNotExpired) | ||||
| 		resp, err := http.DefaultClient.Do(req) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Failed to get protected endpoint: %v", err) | ||||
| 		} | ||||
| 		t.Cleanup(func() { _ = resp.Body.Close }) | ||||
| 		t.Run("Protected resource with INVALID OIDC Authorization header returns 401 - Unauthorized", func(t *testing.T) { | ||||
| 			if resp.StatusCode != 401 { | ||||
| 				t.Errorf("Expected HTTP 401, got %d", resp.StatusCode) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("Protected resource with INVALID OIDC Authorization header returns WWW-Authenticate header", func(t *testing.T) { | ||||
| 			authHeader := resp.Header.Get("WWW-Authenticate") | ||||
| 			expected := `Bearer realm="Kubernetes MCP Server", audience="mcp-server", error="invalid_token"` | ||||
| 			if authHeader != expected { | ||||
| 				t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("Protected resource with INVALID OIDC Authorization header logs error", func(t *testing.T) { | ||||
| 			if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - JWT validation error") || | ||||
| 				!strings.Contains(ctx.LogBuffer.String(), "OIDC token validation error: failed to verify signature") { | ||||
| 				t.Errorf("Expected log entry for OIDC validation error, got: %s", ctx.LogBuffer.String()) | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| 	// Failed Kubernetes TokenReview | ||||
| 	rawClaims := `{ | ||||
| 		"iss": "` + oidcTestServer.URL + `", | ||||
| 		"exp": ` + strconv.FormatInt(time.Now().Add(time.Hour).Unix(), 10) + `, | ||||
| 		"aud": "mcp-server" | ||||
| 	}` | ||||
| 	validOidcToken := oidctest.SignIDToken(oidcTestServer.PrivateKey, "test-oidc-key-id", oidc.RS256, rawClaims) | ||||
| 	testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "mcp-server", ValidateToken: true, ClusterProviderStrategy: config.ClusterProviderKubeConfig}, OidcProvider: oidcTestServer.Provider}, func(ctx *httpContext) { | ||||
| 		req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Failed to create request: %v", err) | ||||
| 		} | ||||
| 		req.Header.Set("Authorization", "Bearer "+validOidcToken) | ||||
| 		resp, err := http.DefaultClient.Do(req) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Failed to get protected endpoint: %v", err) | ||||
| 		} | ||||
| 		t.Cleanup(func() { _ = resp.Body.Close }) | ||||
| 		t.Run("Protected resource with INVALID KUBERNETES Authorization header returns 401 - Unauthorized", func(t *testing.T) { | ||||
| 			if resp.StatusCode != 401 { | ||||
| 				t.Errorf("Expected HTTP 401, got %d", resp.StatusCode) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("Protected resource with INVALID KUBERNETES Authorization header returns WWW-Authenticate header", func(t *testing.T) { | ||||
| 			authHeader := resp.Header.Get("WWW-Authenticate") | ||||
| 			expected := `Bearer realm="Kubernetes MCP Server", audience="mcp-server", error="invalid_token"` | ||||
| 			if authHeader != expected { | ||||
| 				t.Errorf("Expected WWW-Authenticate header to be %q, got %q", expected, authHeader) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("Protected resource with INVALID KUBERNETES Authorization header logs error", func(t *testing.T) { | ||||
| 			if !strings.Contains(ctx.LogBuffer.String(), "Authentication failed - JWT validation error") || | ||||
| 				!strings.Contains(ctx.LogBuffer.String(), "kubernetes API token validation error: failed to create token review") { | ||||
| 				t.Errorf("Expected log entry for Kubernetes TokenReview error, got: %s", ctx.LogBuffer.String()) | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestAuthorizationRequireOAuthFalse(t *testing.T) { | ||||
| 	testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: false, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) { | ||||
| 		resp, err := http.Get(fmt.Sprintf("http://%s/mcp", ctx.HttpAddress)) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Failed to get protected endpoint: %v", err) | ||||
| 		} | ||||
| 		t.Cleanup(func() { _ = resp.Body.Close() }) | ||||
| 		t.Run("Protected resource with MISSING Authorization header returns 200 - OK)", func(t *testing.T) { | ||||
| 			if resp.StatusCode != http.StatusOK { | ||||
| 				t.Errorf("Expected HTTP 200 OK, got %d", resp.StatusCode) | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestAuthorizationRawToken(t *testing.T) { | ||||
| 	cases := []struct { | ||||
| 		audience      string | ||||
| 		validateToken bool | ||||
| 	}{ | ||||
| 		{"", false},           // No audience, no validation | ||||
| 		{"", true},            // No audience, validation enabled | ||||
| 		{"mcp-server", false}, // Audience set, no validation | ||||
| 		{"mcp-server", true},  // Audience set, validation enabled | ||||
| 	} | ||||
| 	for _, c := range cases { | ||||
| 		testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: c.audience, ValidateToken: c.validateToken, ClusterProviderStrategy: config.ClusterProviderKubeConfig}}, func(ctx *httpContext) { | ||||
| 			tokenReviewed := false | ||||
| 			ctx.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { | ||||
| 				if req.URL.EscapedPath() == "/apis/authentication.k8s.io/v1/tokenreviews" { | ||||
| 					w.Header().Set("Content-Type", "application/json") | ||||
| 					_, _ = w.Write([]byte(tokenReviewSuccessful)) | ||||
| 					tokenReviewed = true | ||||
| 					return | ||||
| 				} | ||||
| 			})) | ||||
| 			req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("Failed to create request: %v", err) | ||||
| 			} | ||||
| 			req.Header.Set("Authorization", "Bearer "+tokenBasicNotExpired) | ||||
| 			resp, err := http.DefaultClient.Do(req) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("Failed to get protected endpoint: %v", err) | ||||
| 			} | ||||
| 			t.Cleanup(func() { _ = resp.Body.Close() }) | ||||
| 			t.Run(fmt.Sprintf("Protected resource with audience = '%s' and validate-token = '%t', with VALID Authorization header returns 200 - OK", c.audience, c.validateToken), func(t *testing.T) { | ||||
| 				if resp.StatusCode != http.StatusOK { | ||||
| 					t.Errorf("Expected HTTP 200 OK, got %d", resp.StatusCode) | ||||
| 				} | ||||
| 			}) | ||||
| 			t.Run(fmt.Sprintf("Protected resource with audience = '%s' and validate-token = '%t', with VALID Authorization header performs token validation accordingly", c.audience, c.validateToken), func(t *testing.T) { | ||||
| 				if tokenReviewed == true && !c.validateToken { | ||||
| 					t.Errorf("Expected token review to be skipped when validate-token is false, but it was performed") | ||||
| 				} | ||||
| 				if tokenReviewed == false && c.validateToken { | ||||
| 					t.Errorf("Expected token review to be performed when validate-token is true, but it was skipped") | ||||
| 				} | ||||
| 			}) | ||||
| 		}) | ||||
| 	} | ||||
|  | ||||
| } | ||||
|  | ||||
| func TestAuthorizationOidcToken(t *testing.T) { | ||||
| 	oidcTestServer := NewOidcTestServer(t) | ||||
| 	t.Cleanup(oidcTestServer.Close) | ||||
| 	rawClaims := `{ | ||||
| 		"iss": "` + oidcTestServer.URL + `", | ||||
| 		"exp": ` + strconv.FormatInt(time.Now().Add(time.Hour).Unix(), 10) + `, | ||||
| 		"aud": "mcp-server" | ||||
| 	}` | ||||
| 	validOidcToken := oidctest.SignIDToken(oidcTestServer.PrivateKey, "test-oidc-key-id", oidc.RS256, rawClaims) | ||||
| 	cases := []bool{false, true} | ||||
| 	for _, validateToken := range cases { | ||||
| 		testCaseWithContext(t, &httpContext{StaticConfig: &config.StaticConfig{RequireOAuth: true, OAuthAudience: "mcp-server", ValidateToken: validateToken, ClusterProviderStrategy: config.ClusterProviderKubeConfig}, OidcProvider: oidcTestServer.Provider}, func(ctx *httpContext) { | ||||
| 			tokenReviewed := false | ||||
| 			ctx.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { | ||||
| 				if req.URL.EscapedPath() == "/apis/authentication.k8s.io/v1/tokenreviews" { | ||||
| 					w.Header().Set("Content-Type", "application/json") | ||||
| 					_, _ = w.Write([]byte(tokenReviewSuccessful)) | ||||
| 					tokenReviewed = true | ||||
| 					return | ||||
| 				} | ||||
| 			})) | ||||
| 			req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("Failed to create request: %v", err) | ||||
| 			} | ||||
| 			req.Header.Set("Authorization", "Bearer "+validOidcToken) | ||||
| 			resp, err := http.DefaultClient.Do(req) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("Failed to get protected endpoint: %v", err) | ||||
| 			} | ||||
| 			t.Cleanup(func() { _ = resp.Body.Close() }) | ||||
| 			t.Run(fmt.Sprintf("Protected resource with validate-token='%t' with VALID OIDC Authorization header returns 200 - OK", validateToken), func(t *testing.T) { | ||||
| 				if resp.StatusCode != http.StatusOK { | ||||
| 					t.Errorf("Expected HTTP 200 OK, got %d", resp.StatusCode) | ||||
| 				} | ||||
| 			}) | ||||
| 			t.Run(fmt.Sprintf("Protected resource with validate-token='%t' with VALID OIDC Authorization header performs token validation accordingly", validateToken), func(t *testing.T) { | ||||
| 				if tokenReviewed == true && !validateToken { | ||||
| 					t.Errorf("Expected token review to be skipped when validate-token is false, but it was performed") | ||||
| 				} | ||||
| 				if tokenReviewed == false && validateToken { | ||||
| 					t.Errorf("Expected token review to be performed when validate-token is true, but it was skipped") | ||||
| 				} | ||||
| 			}) | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestAuthorizationOidcTokenExchange(t *testing.T) { | ||||
| 	oidcTestServer := NewOidcTestServer(t) | ||||
| 	t.Cleanup(oidcTestServer.Close) | ||||
| 	rawClaims := `{ | ||||
| 		"iss": "` + oidcTestServer.URL + `", | ||||
| 		"exp": ` + strconv.FormatInt(time.Now().Add(time.Hour).Unix(), 10) + `, | ||||
| 		"aud": "%s" | ||||
| 	}` | ||||
| 	validOidcClientToken := oidctest.SignIDToken(oidcTestServer.PrivateKey, "test-oidc-key-id", oidc.RS256, | ||||
| 		fmt.Sprintf(rawClaims, "mcp-server")) | ||||
| 	validOidcBackendToken := oidctest.SignIDToken(oidcTestServer.PrivateKey, "test-oidc-key-id", oidc.RS256, | ||||
| 		fmt.Sprintf(rawClaims, "backend-audience")) | ||||
| 	oidcTestServer.TokenEndpointHandler = func(w http.ResponseWriter, r *http.Request) { | ||||
| 		w.Header().Set("Content-Type", "application/json") | ||||
| 		_, _ = fmt.Fprintf(w, `{"access_token":"%s","token_type":"Bearer","expires_in":253402297199}`, validOidcBackendToken) | ||||
| 	} | ||||
| 	cases := []bool{false, true} | ||||
| 	for _, validateToken := range cases { | ||||
| 		staticConfig := &config.StaticConfig{ | ||||
| 			RequireOAuth:            true, | ||||
| 			OAuthAudience:           "mcp-server", | ||||
| 			ValidateToken:           validateToken, | ||||
| 			StsClientId:             "test-sts-client-id", | ||||
| 			StsClientSecret:         "test-sts-client-secret", | ||||
| 			StsAudience:             "backend-audience", | ||||
| 			StsScopes:               []string{"backend-scope"}, | ||||
| 			ClusterProviderStrategy: config.ClusterProviderKubeConfig, | ||||
| 		} | ||||
| 		testCaseWithContext(t, &httpContext{StaticConfig: staticConfig, OidcProvider: oidcTestServer.Provider}, func(ctx *httpContext) { | ||||
| 			tokenReviewed := false | ||||
| 			ctx.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { | ||||
| 				if req.URL.EscapedPath() == "/apis/authentication.k8s.io/v1/tokenreviews" { | ||||
| 					w.Header().Set("Content-Type", "application/json") | ||||
| 					_, _ = w.Write([]byte(tokenReviewSuccessful)) | ||||
| 					tokenReviewed = true | ||||
| 					return | ||||
| 				} | ||||
| 			})) | ||||
| 			req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/mcp", ctx.HttpAddress), nil) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("Failed to create request: %v", err) | ||||
| 			} | ||||
| 			req.Header.Set("Authorization", "Bearer "+validOidcClientToken) | ||||
| 			resp, err := http.DefaultClient.Do(req) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("Failed to get protected endpoint: %v", err) | ||||
| 			} | ||||
| 			t.Cleanup(func() { _ = resp.Body.Close() }) | ||||
| 			t.Run(fmt.Sprintf("Protected resource with validate-token='%t' with VALID OIDC EXCHANGE Authorization header returns 200 - OK", validateToken), func(t *testing.T) { | ||||
| 				if resp.StatusCode != http.StatusOK { | ||||
| 					t.Errorf("Expected HTTP 200 OK, got %d", resp.StatusCode) | ||||
| 				} | ||||
| 			}) | ||||
| 			t.Run(fmt.Sprintf("Protected resource with validate-token='%t' with VALID OIDC EXCHANGE Authorization header performs token validation accordingly", validateToken), func(t *testing.T) { | ||||
| 				if tokenReviewed == true && !validateToken { | ||||
| 					t.Errorf("Expected token review to be skipped when validate-token is false, but it was performed") | ||||
| 				} | ||||
| 				if tokenReviewed == false && validateToken { | ||||
| 					t.Errorf("Expected token review to be performed when validate-token is true, but it was skipped") | ||||
| 				} | ||||
| 			}) | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										66
									
								
								pkg/http/middleware.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										66
									
								
								pkg/http/middleware.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,66 @@ | ||||
| package http | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"net" | ||||
| 	"net/http" | ||||
| 	"time" | ||||
|  | ||||
| 	"k8s.io/klog/v2" | ||||
| ) | ||||
|  | ||||
| func RequestMiddleware(next http.Handler) http.Handler { | ||||
| 	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
| 		if r.URL.Path == "/healthz" { | ||||
| 			next.ServeHTTP(w, r) | ||||
| 			return | ||||
| 		} | ||||
|  | ||||
| 		start := time.Now() | ||||
|  | ||||
| 		lrw := &loggingResponseWriter{ | ||||
| 			ResponseWriter: w, | ||||
| 			statusCode:     http.StatusOK, | ||||
| 		} | ||||
|  | ||||
| 		next.ServeHTTP(lrw, r) | ||||
|  | ||||
| 		duration := time.Since(start) | ||||
| 		klog.V(5).Infof("%s %s %d %v", r.Method, r.URL.Path, lrw.statusCode, duration) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| type loggingResponseWriter struct { | ||||
| 	http.ResponseWriter | ||||
| 	statusCode    int | ||||
| 	headerWritten bool | ||||
| } | ||||
|  | ||||
| func (lrw *loggingResponseWriter) WriteHeader(code int) { | ||||
| 	if !lrw.headerWritten { | ||||
| 		lrw.statusCode = code | ||||
| 		lrw.headerWritten = true | ||||
| 		lrw.ResponseWriter.WriteHeader(code) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (lrw *loggingResponseWriter) Write(b []byte) (int, error) { | ||||
| 	if !lrw.headerWritten { | ||||
| 		lrw.statusCode = http.StatusOK | ||||
| 		lrw.headerWritten = true | ||||
| 	} | ||||
| 	return lrw.ResponseWriter.Write(b) | ||||
| } | ||||
|  | ||||
| func (lrw *loggingResponseWriter) Flush() { | ||||
| 	if flusher, ok := lrw.ResponseWriter.(http.Flusher); ok { | ||||
| 		flusher.Flush() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (lrw *loggingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { | ||||
| 	if hijacker, ok := lrw.ResponseWriter.(http.Hijacker); ok { | ||||
| 		return hijacker.Hijack() | ||||
| 	} | ||||
| 	return nil, nil, http.ErrNotSupported | ||||
| } | ||||
							
								
								
									
										59
									
								
								pkg/http/sts.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										59
									
								
								pkg/http/sts.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,59 @@ | ||||
| package http | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
|  | ||||
| 	"github.com/coreos/go-oidc/v3/oidc" | ||||
| 	"golang.org/x/oauth2" | ||||
| 	"golang.org/x/oauth2/google/externalaccount" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| ) | ||||
|  | ||||
| type staticSubjectTokenSupplier struct { | ||||
| 	token string | ||||
| } | ||||
|  | ||||
| func (s *staticSubjectTokenSupplier) SubjectToken(_ context.Context, _ externalaccount.SupplierOptions) (string, error) { | ||||
| 	return s.token, nil | ||||
| } | ||||
|  | ||||
| var _ externalaccount.SubjectTokenSupplier = &staticSubjectTokenSupplier{} | ||||
|  | ||||
| type SecurityTokenService struct { | ||||
| 	*oidc.Provider | ||||
| 	ClientId                string | ||||
| 	ClientSecret            string | ||||
| 	ExternalAccountAudience string | ||||
| 	ExternalAccountScopes   []string | ||||
| } | ||||
|  | ||||
| func NewFromConfig(config *config.StaticConfig, provider *oidc.Provider) *SecurityTokenService { | ||||
| 	return &SecurityTokenService{ | ||||
| 		Provider:                provider, | ||||
| 		ClientId:                config.StsClientId, | ||||
| 		ClientSecret:            config.StsClientSecret, | ||||
| 		ExternalAccountAudience: config.StsAudience, | ||||
| 		ExternalAccountScopes:   config.StsScopes, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (sts *SecurityTokenService) IsEnabled() bool { | ||||
| 	return sts.Provider != nil && sts.ClientId != "" && sts.ExternalAccountAudience != "" | ||||
| } | ||||
|  | ||||
| func (sts *SecurityTokenService) ExternalAccountTokenExchange(ctx context.Context, originalToken *oauth2.Token) (*oauth2.Token, error) { | ||||
| 	ts, err := externalaccount.NewTokenSource(ctx, externalaccount.Config{ | ||||
| 		TokenURL:             sts.Endpoint().TokenURL, | ||||
| 		ClientID:             sts.ClientId, | ||||
| 		ClientSecret:         sts.ClientSecret, | ||||
| 		Audience:             sts.ExternalAccountAudience, | ||||
| 		SubjectTokenType:     "urn:ietf:params:oauth:token-type:access_token", | ||||
| 		SubjectTokenSupplier: &staticSubjectTokenSupplier{token: originalToken.AccessToken}, | ||||
| 		Scopes:               sts.ExternalAccountScopes, | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return ts.Token() | ||||
| } | ||||
							
								
								
									
										153
									
								
								pkg/http/sts_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										153
									
								
								pkg/http/sts_test.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,153 @@ | ||||
| package http | ||||
|  | ||||
| import ( | ||||
| 	"encoding/base64" | ||||
| 	"fmt" | ||||
| 	"net/http" | ||||
| 	"strings" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/internal/test" | ||||
| 	"github.com/coreos/go-oidc/v3/oidc" | ||||
| 	"golang.org/x/oauth2" | ||||
| ) | ||||
|  | ||||
| func TestIsEnabled(t *testing.T) { | ||||
| 	disabledCases := []SecurityTokenService{ | ||||
| 		{}, | ||||
| 		{Provider: nil}, | ||||
| 		{Provider: &oidc.Provider{}}, | ||||
| 		{Provider: &oidc.Provider{}, ClientId: "test-client-id", ClientSecret: "test-client-secret"}, | ||||
| 		{ClientId: "test-client-id", ClientSecret: "test-client-secret", ExternalAccountAudience: "test-audience"}, | ||||
| 		{Provider: &oidc.Provider{}, ClientSecret: "test-client-secret", ExternalAccountAudience: "test-audience"}, | ||||
| 	} | ||||
| 	for _, sts := range disabledCases { | ||||
| 		t.Run(fmt.Sprintf("SecurityTokenService{%+v}.IsEnabled() = false", sts), func(t *testing.T) { | ||||
| 			if sts.IsEnabled() { | ||||
| 				t.Errorf("SecurityTokenService{%+v}.IsEnabled() = true; want false", sts) | ||||
| 			} | ||||
| 		}) | ||||
| 	} | ||||
| 	enabledCases := []SecurityTokenService{ | ||||
| 		{Provider: &oidc.Provider{}, ClientId: "test-client-id", ExternalAccountAudience: "test-audience"}, | ||||
| 		{Provider: &oidc.Provider{}, ClientId: "test-client-id", ExternalAccountAudience: "test-audience", ClientSecret: "test-client-secret"}, | ||||
| 		{Provider: &oidc.Provider{}, ClientId: "test-client-id", ExternalAccountAudience: "test-audience", ClientSecret: "test-client-secret", ExternalAccountScopes: []string{"test-scope"}}, | ||||
| 	} | ||||
| 	for _, sts := range enabledCases { | ||||
| 		t.Run(fmt.Sprintf("SecurityTokenService{%+v}.IsEnabled() = true", sts), func(t *testing.T) { | ||||
| 			if !sts.IsEnabled() { | ||||
| 				t.Errorf("SecurityTokenService{%+v}.IsEnabled() = false; want true", sts) | ||||
| 			} | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestExternalAccountTokenExchange(t *testing.T) { | ||||
| 	mockServer := test.NewMockServer() | ||||
| 	authServer := mockServer.Config().Host | ||||
| 	var tokenExchangeRequest *http.Request | ||||
| 	mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { | ||||
| 		if req.URL.Path == "/.well-known/openid-configuration" { | ||||
| 			w.Header().Set("Content-Type", "application/json") | ||||
| 			_, _ = fmt.Fprintf(w, `{ | ||||
| 				"issuer": "%s", | ||||
| 				"authorization_endpoint": "https://mock-oidc-provider/authorize", | ||||
| 				"token_endpoint": "%s/token" | ||||
| 			}`, authServer, authServer) | ||||
| 			return | ||||
| 		} | ||||
| 		if req.URL.Path == "/token" { | ||||
| 			tokenExchangeRequest = req | ||||
| 			_ = tokenExchangeRequest.ParseForm() | ||||
| 			if tokenExchangeRequest.PostForm.Get("subject_token") != "the-original-access-token" { | ||||
| 				http.Error(w, "Invalid subject_token", http.StatusUnauthorized) | ||||
| 				return | ||||
| 			} | ||||
| 			w.Header().Set("Content-Type", "application/json") | ||||
| 			_, _ = w.Write([]byte(`{"access_token":"exchanged-access-token","token_type":"Bearer","expires_in":253402297199}`)) | ||||
| 			return | ||||
| 		} | ||||
| 	})) | ||||
| 	t.Cleanup(mockServer.Close) | ||||
| 	provider, err := oidc.NewProvider(t.Context(), authServer) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("oidc.NewProvider() error = %v; want nil", err) | ||||
| 	} | ||||
| 	// With missing Token Source information | ||||
| 	_, err = (&SecurityTokenService{Provider: provider}).ExternalAccountTokenExchange(t.Context(), &oauth2.Token{}) | ||||
| 	t.Run("ExternalAccountTokenExchange with missing token source returns error", func(t *testing.T) { | ||||
| 		if err == nil { | ||||
| 			t.Fatalf("ExternalAccountTokenExchange() error = nil; want error") | ||||
| 		} | ||||
| 		if !strings.Contains(err.Error(), "must be set") { | ||||
| 			t.Errorf("ExternalAccountTokenExchange() error = %v; want missing required field", err) | ||||
| 		} | ||||
| 	}) | ||||
| 	// With valid Token Source information | ||||
| 	sts := SecurityTokenService{ | ||||
| 		Provider:                provider, | ||||
| 		ClientId:                "test-client-id", | ||||
| 		ClientSecret:            "test-client-secret", | ||||
| 		ExternalAccountAudience: "test-audience", | ||||
| 		ExternalAccountScopes:   []string{"test-scope"}, | ||||
| 	} | ||||
| 	// With Invalid token | ||||
| 	_, err = sts.ExternalAccountTokenExchange(t.Context(), &oauth2.Token{ | ||||
| 		AccessToken: "invalid-access-token", | ||||
| 		TokenType:   "Bearer", | ||||
| 	}) | ||||
| 	t.Run("ExternalAccountTokenExchange with invalid token returns error", func(t *testing.T) { | ||||
| 		if err == nil { | ||||
| 			t.Fatalf("ExternalAccountTokenExchange() error = nil; want error") | ||||
| 		} | ||||
| 		if !strings.Contains(err.Error(), "status code 401: Invalid subject_token") { | ||||
| 			t.Errorf("ExternalAccountTokenExchange() error = %v; want invalid_grant: Invalid subject_token", err) | ||||
| 		} | ||||
| 	}) | ||||
| 	// With Valid token | ||||
| 	exchangeToken, err := sts.ExternalAccountTokenExchange(t.Context(), &oauth2.Token{ | ||||
| 		AccessToken: "the-original-access-token", | ||||
| 		TokenType:   "Bearer", | ||||
| 	}) | ||||
| 	t.Run("ExternalAccountTokenExchange with valid token returns new token", func(t *testing.T) { | ||||
| 		if err != nil { | ||||
| 			t.Errorf("ExternalAccountTokenExchange() error = %v; want nil", err) | ||||
| 		} | ||||
| 		if exchangeToken == nil { | ||||
| 			t.Fatal("ExternalAccountTokenExchange() = nil; want token") | ||||
| 		} | ||||
| 		if exchangeToken.AccessToken != "exchanged-access-token" { | ||||
| 			t.Errorf("exchangeToken.AccessToken = %s; want exchanged-access-token", exchangeToken.AccessToken) | ||||
| 		} | ||||
| 	}) | ||||
| 	t.Run("ExternalAccountTokenExchange with valid token sends POST request", func(t *testing.T) { | ||||
| 		if tokenExchangeRequest == nil { | ||||
| 			t.Fatal("tokenExchangeRequest is nil; want request") | ||||
| 		} | ||||
| 		if tokenExchangeRequest.Method != "POST" { | ||||
| 			t.Errorf("tokenExchangeRequest.Method = %s; want POST", tokenExchangeRequest.Method) | ||||
| 		} | ||||
| 	}) | ||||
| 	t.Run("ExternalAccountTokenExchange with valid token has correct form data", func(t *testing.T) { | ||||
| 		if tokenExchangeRequest.Header.Get("Content-Type") != "application/x-www-form-urlencoded" { | ||||
| 			t.Errorf("tokenExchangeRequest.Content-Type = %s; want application/x-www-form-urlencoded", tokenExchangeRequest.Header.Get("Content-Type")) | ||||
| 		} | ||||
| 		if tokenExchangeRequest.PostForm.Get("audience") != "test-audience" { | ||||
| 			t.Errorf("tokenExchangeRequest.PostForm[audience] = %s; want test-audience", tokenExchangeRequest.PostForm.Get("audience")) | ||||
| 		} | ||||
| 		if tokenExchangeRequest.PostForm.Get("subject_token_type") != "urn:ietf:params:oauth:token-type:access_token" { | ||||
| 			t.Errorf("tokenExchangeRequest.PostForm[subject_token_type] = %s; want urn:ietf:params:oauth:token-type:access_token", tokenExchangeRequest.PostForm.Get("subject_token_type")) | ||||
| 		} | ||||
| 		if tokenExchangeRequest.PostForm.Get("subject_token") != "the-original-access-token" { | ||||
| 			t.Errorf("tokenExchangeRequest.PostForm[subject_token] = %s; want the-original-access-token", tokenExchangeRequest.PostForm.Get("subject_token")) | ||||
| 		} | ||||
| 		if len(tokenExchangeRequest.PostForm["scope"]) == 0 || tokenExchangeRequest.PostForm["scope"][0] != "test-scope" { | ||||
| 			t.Errorf("tokenExchangeRequest.PostForm[scope] = %v; want [test-scope]", tokenExchangeRequest.PostForm["scope"]) | ||||
| 		} | ||||
| 	}) | ||||
| 	t.Run("ExternalAccountTokenExchange with valid token sends correct client credentials header", func(t *testing.T) { | ||||
| 		if tokenExchangeRequest.Header.Get("Authorization") != "Basic "+base64.StdEncoding.EncodeToString([]byte("test-client-id:test-client-secret")) { | ||||
| 			t.Errorf("tokenExchangeRequest.Header[Authorization] = %s; want Basic base64(test-client-id:test-client-secret)", tokenExchangeRequest.Header.Get("Authorization")) | ||||
| 		} | ||||
| 	}) | ||||
| } | ||||
							
								
								
									
										91
									
								
								pkg/http/wellknown.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										91
									
								
								pkg/http/wellknown.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,91 @@ | ||||
| package http | ||||
|  | ||||
| import ( | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"net/http" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	oauthAuthorizationServerEndpoint = "/.well-known/oauth-authorization-server" | ||||
| 	oauthProtectedResourceEndpoint   = "/.well-known/oauth-protected-resource" | ||||
| 	openIDConfigurationEndpoint      = "/.well-known/openid-configuration" | ||||
| ) | ||||
|  | ||||
| var WellKnownEndpoints = []string{ | ||||
| 	oauthAuthorizationServerEndpoint, | ||||
| 	oauthProtectedResourceEndpoint, | ||||
| 	openIDConfigurationEndpoint, | ||||
| } | ||||
|  | ||||
| type WellKnown struct { | ||||
| 	authorizationUrl                 string | ||||
| 	scopesSupported                  []string | ||||
| 	disableDynamicClientRegistration bool | ||||
| 	httpClient                       *http.Client | ||||
| } | ||||
|  | ||||
| var _ http.Handler = &WellKnown{} | ||||
|  | ||||
| func WellKnownHandler(staticConfig *config.StaticConfig, httpClient *http.Client) http.Handler { | ||||
| 	authorizationUrl := staticConfig.AuthorizationURL | ||||
| 	if authorizationUrl != "" && strings.HasSuffix("authorizationUrl", "/") { | ||||
| 		authorizationUrl = strings.TrimSuffix(authorizationUrl, "/") | ||||
| 	} | ||||
| 	if httpClient == nil { | ||||
| 		httpClient = http.DefaultClient | ||||
| 	} | ||||
| 	return &WellKnown{ | ||||
| 		authorizationUrl:                 authorizationUrl, | ||||
| 		disableDynamicClientRegistration: staticConfig.DisableDynamicClientRegistration, | ||||
| 		scopesSupported:                  staticConfig.OAuthScopes, | ||||
| 		httpClient:                       httpClient, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (w WellKnown) ServeHTTP(writer http.ResponseWriter, request *http.Request) { | ||||
| 	if w.authorizationUrl == "" { | ||||
| 		http.Error(writer, "Authorization URL is not configured", http.StatusNotFound) | ||||
| 		return | ||||
| 	} | ||||
| 	req, err := http.NewRequest(request.Method, w.authorizationUrl+request.URL.EscapedPath(), nil) | ||||
| 	if err != nil { | ||||
| 		http.Error(writer, "Failed to create request: "+err.Error(), http.StatusInternalServerError) | ||||
| 		return | ||||
| 	} | ||||
| 	resp, err := w.httpClient.Do(req.WithContext(request.Context())) | ||||
| 	if err != nil { | ||||
| 		http.Error(writer, "Failed to perform request: "+err.Error(), http.StatusInternalServerError) | ||||
| 		return | ||||
| 	} | ||||
| 	defer func() { _ = resp.Body.Close() }() | ||||
| 	var resourceMetadata map[string]interface{} | ||||
| 	err = json.NewDecoder(resp.Body).Decode(&resourceMetadata) | ||||
| 	if err != nil { | ||||
| 		http.Error(writer, "Failed to read response body: "+err.Error(), http.StatusInternalServerError) | ||||
| 		return | ||||
| 	} | ||||
| 	if w.disableDynamicClientRegistration { | ||||
| 		delete(resourceMetadata, "registration_endpoint") | ||||
| 		resourceMetadata["require_request_uri_registration"] = false | ||||
| 	} | ||||
| 	if len(w.scopesSupported) > 0 { | ||||
| 		resourceMetadata["scopes_supported"] = w.scopesSupported | ||||
| 	} | ||||
| 	body, err := json.Marshal(resourceMetadata) | ||||
| 	if err != nil { | ||||
| 		http.Error(writer, "Failed to marshal response body: "+err.Error(), http.StatusInternalServerError) | ||||
| 		return | ||||
| 	} | ||||
| 	for key, values := range resp.Header { | ||||
| 		for _, value := range values { | ||||
| 			writer.Header().Add(key, value) | ||||
| 		} | ||||
| 	} | ||||
| 	writer.Header().Set("Content-Length", fmt.Sprintf("%d", len(body))) | ||||
| 	writer.WriteHeader(resp.StatusCode) | ||||
| 	_, _ = writer.Write(body) | ||||
| } | ||||
| @@ -1,46 +1,353 @@ | ||||
| package cmd | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"crypto/tls" | ||||
| 	"crypto/x509" | ||||
| 	"errors" | ||||
| 	"flag" | ||||
| 	"fmt" | ||||
| 	"github.com/manusa/kubernetes-mcp-server/pkg/mcp" | ||||
| 	"github.com/manusa/kubernetes-mcp-server/pkg/version" | ||||
| 	"net/http" | ||||
| 	"net/url" | ||||
| 	"os" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/coreos/go-oidc/v3/oidc" | ||||
| 	"github.com/spf13/cobra" | ||||
| 	"github.com/spf13/viper" | ||||
| 	"golang.org/x/net/context" | ||||
|  | ||||
| 	"k8s.io/cli-runtime/pkg/genericiooptions" | ||||
| 	"k8s.io/klog/v2" | ||||
| 	"k8s.io/klog/v2/textlogger" | ||||
| 	"k8s.io/kubectl/pkg/util/i18n" | ||||
| 	"k8s.io/kubectl/pkg/util/templates" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| 	internalhttp "github.com/containers/kubernetes-mcp-server/pkg/http" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/mcp" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/output" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/toolsets" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/version" | ||||
| ) | ||||
|  | ||||
| var rootCmd = &cobra.Command{ | ||||
| 	Use:   "kubernetes-mcp-server [command] [options]", | ||||
| 	Short: "Kubernetes Model Context Protocol (MCP) server", | ||||
| 	Long: ` | ||||
| Kubernetes Model Context Protocol (MCP) server | ||||
| var ( | ||||
| 	long     = templates.LongDesc(i18n.T("Kubernetes Model Context Protocol (MCP) server")) | ||||
| 	examples = templates.Examples(i18n.T(` | ||||
| # show this help | ||||
| kubernetes-mcp-server -h | ||||
|  | ||||
|   # show this help | ||||
|   kubernetes-mcp-server -h | ||||
| # shows version information | ||||
| kubernetes-mcp-server --version | ||||
|  | ||||
|   # shows version information | ||||
|   kubernetes-mcp-server --version | ||||
| # start STDIO server | ||||
| kubernetes-mcp-server | ||||
|  | ||||
|   # TODO: add more examples`, | ||||
| 	Run: func(cmd *cobra.Command, args []string) { | ||||
| 		if viper.GetBool("version") { | ||||
| 			fmt.Println(version.Version) | ||||
| 			return | ||||
| 		} | ||||
| 		if err := mcp.NewSever().ServeStdio(); err != nil && !errors.Is(err, context.Canceled) { | ||||
| 			panic(err) | ||||
| 		} | ||||
| 	}, | ||||
| # start a SSE server on port 8080 | ||||
| kubernetes-mcp-server --port 8080 | ||||
|  | ||||
| # start a SSE server on port 8443 with a public HTTPS host of example.com | ||||
| kubernetes-mcp-server --port 8443 --sse-base-url https://example.com:8443 | ||||
|  | ||||
| # start a SSE server on port 8080 with multi-cluster tools disabled | ||||
| kubernetes-mcp-server --port 8080 --disable-multi-cluster | ||||
| `)) | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	flagVersion              = "version" | ||||
| 	flagLogLevel             = "log-level" | ||||
| 	flagConfig               = "config" | ||||
| 	flagSSEPort              = "sse-port" | ||||
| 	flagHttpPort             = "http-port" | ||||
| 	flagPort                 = "port" | ||||
| 	flagSSEBaseUrl           = "sse-base-url" | ||||
| 	flagKubeconfig           = "kubeconfig" | ||||
| 	flagToolsets             = "toolsets" | ||||
| 	flagListOutput           = "list-output" | ||||
| 	flagReadOnly             = "read-only" | ||||
| 	flagDisableDestructive   = "disable-destructive" | ||||
| 	flagRequireOAuth         = "require-oauth" | ||||
| 	flagOAuthAudience        = "oauth-audience" | ||||
| 	flagValidateToken        = "validate-token" | ||||
| 	flagAuthorizationURL     = "authorization-url" | ||||
| 	flagServerUrl            = "server-url" | ||||
| 	flagCertificateAuthority = "certificate-authority" | ||||
| 	flagDisableMultiCluster  = "disable-multi-cluster" | ||||
| ) | ||||
|  | ||||
| type MCPServerOptions struct { | ||||
| 	Version              bool | ||||
| 	LogLevel             int | ||||
| 	Port                 string | ||||
| 	SSEPort              int | ||||
| 	HttpPort             int | ||||
| 	SSEBaseUrl           string | ||||
| 	Kubeconfig           string | ||||
| 	Toolsets             []string | ||||
| 	ListOutput           string | ||||
| 	ReadOnly             bool | ||||
| 	DisableDestructive   bool | ||||
| 	RequireOAuth         bool | ||||
| 	OAuthAudience        string | ||||
| 	ValidateToken        bool | ||||
| 	AuthorizationURL     string | ||||
| 	CertificateAuthority string | ||||
| 	ServerURL            string | ||||
| 	DisableMultiCluster  bool | ||||
|  | ||||
| 	ConfigPath   string | ||||
| 	StaticConfig *config.StaticConfig | ||||
|  | ||||
| 	genericiooptions.IOStreams | ||||
| } | ||||
|  | ||||
| func init() { | ||||
| 	rootCmd.Flags().BoolP("version", "v", false, "Print version information and quit") | ||||
| 	_ = viper.BindPFlags(rootCmd.Flags()) | ||||
| } | ||||
|  | ||||
| func Execute() { | ||||
| 	if err := rootCmd.Execute(); err != nil { | ||||
| 		panic(err) | ||||
| func NewMCPServerOptions(streams genericiooptions.IOStreams) *MCPServerOptions { | ||||
| 	return &MCPServerOptions{ | ||||
| 		IOStreams:    streams, | ||||
| 		StaticConfig: config.Default(), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func NewMCPServer(streams genericiooptions.IOStreams) *cobra.Command { | ||||
| 	o := NewMCPServerOptions(streams) | ||||
| 	cmd := &cobra.Command{ | ||||
| 		Use:     "kubernetes-mcp-server [command] [options]", | ||||
| 		Short:   "Kubernetes Model Context Protocol (MCP) server", | ||||
| 		Long:    long, | ||||
| 		Example: examples, | ||||
| 		RunE: func(c *cobra.Command, args []string) error { | ||||
| 			if err := o.Complete(c); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 			if err := o.Validate(); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 			if err := o.Run(); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
|  | ||||
| 			return nil | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	cmd.Flags().BoolVar(&o.Version, flagVersion, o.Version, "Print version information and quit") | ||||
| 	cmd.Flags().IntVar(&o.LogLevel, flagLogLevel, o.LogLevel, "Set the log level (from 0 to 9)") | ||||
| 	cmd.Flags().StringVar(&o.ConfigPath, flagConfig, o.ConfigPath, "Path of the config file.") | ||||
| 	cmd.Flags().IntVar(&o.SSEPort, flagSSEPort, o.SSEPort, "Start a SSE server on the specified port") | ||||
| 	cmd.Flag(flagSSEPort).Deprecated = "Use --port instead" | ||||
| 	cmd.Flags().IntVar(&o.HttpPort, flagHttpPort, o.HttpPort, "Start a streamable HTTP server on the specified port") | ||||
| 	cmd.Flag(flagHttpPort).Deprecated = "Use --port instead" | ||||
| 	cmd.Flags().StringVar(&o.Port, flagPort, o.Port, "Start a streamable HTTP and SSE HTTP server on the specified port (e.g. 8080)") | ||||
| 	cmd.Flags().StringVar(&o.SSEBaseUrl, flagSSEBaseUrl, o.SSEBaseUrl, "SSE public base URL to use when sending the endpoint message (e.g. https://example.com)") | ||||
| 	cmd.Flags().StringVar(&o.Kubeconfig, flagKubeconfig, o.Kubeconfig, "Path to the kubeconfig file to use for authentication") | ||||
| 	cmd.Flags().StringSliceVar(&o.Toolsets, flagToolsets, o.Toolsets, "Comma-separated list of MCP toolsets to use (available toolsets: "+strings.Join(toolsets.ToolsetNames(), ", ")+"). Defaults to "+strings.Join(o.StaticConfig.Toolsets, ", ")+".") | ||||
| 	cmd.Flags().StringVar(&o.ListOutput, flagListOutput, o.ListOutput, "Output format for resource list operations (one of: "+strings.Join(output.Names, ", ")+"). Defaults to "+o.StaticConfig.ListOutput+".") | ||||
| 	cmd.Flags().BoolVar(&o.ReadOnly, flagReadOnly, o.ReadOnly, "If true, only tools annotated with readOnlyHint=true are exposed") | ||||
| 	cmd.Flags().BoolVar(&o.DisableDestructive, flagDisableDestructive, o.DisableDestructive, "If true, tools annotated with destructiveHint=true are disabled") | ||||
| 	cmd.Flags().BoolVar(&o.RequireOAuth, flagRequireOAuth, o.RequireOAuth, "If true, requires OAuth authorization as defined in the Model Context Protocol (MCP) specification. This flag is ignored if transport type is stdio") | ||||
| 	_ = cmd.Flags().MarkHidden(flagRequireOAuth) | ||||
| 	cmd.Flags().StringVar(&o.OAuthAudience, flagOAuthAudience, o.OAuthAudience, "OAuth audience for token claims validation. Optional. If not set, the audience is not validated. Only valid if require-oauth is enabled.") | ||||
| 	_ = cmd.Flags().MarkHidden(flagOAuthAudience) | ||||
| 	cmd.Flags().BoolVar(&o.ValidateToken, flagValidateToken, o.ValidateToken, "If true, validates the token against the Kubernetes API Server using TokenReview. Optional. If not set, the token is not validated. Only valid if require-oauth is enabled.") | ||||
| 	_ = cmd.Flags().MarkHidden(flagValidateToken) | ||||
| 	cmd.Flags().StringVar(&o.AuthorizationURL, flagAuthorizationURL, o.AuthorizationURL, "OAuth authorization server URL for protected resource endpoint. If not provided, the Kubernetes API server host will be used. Only valid if require-oauth is enabled.") | ||||
| 	_ = cmd.Flags().MarkHidden(flagAuthorizationURL) | ||||
| 	cmd.Flags().StringVar(&o.ServerURL, flagServerUrl, o.ServerURL, "Server URL of this application. Optional. If set, this url will be served in protected resource metadata endpoint and tokens will be validated with this audience. If not set, expected audience is kubernetes-mcp-server. Only valid if require-oauth is enabled.") | ||||
| 	_ = cmd.Flags().MarkHidden(flagServerUrl) | ||||
| 	cmd.Flags().StringVar(&o.CertificateAuthority, flagCertificateAuthority, o.CertificateAuthority, "Certificate authority path to verify certificates. Optional. Only valid if require-oauth is enabled.") | ||||
| 	_ = cmd.Flags().MarkHidden(flagCertificateAuthority) | ||||
| 	cmd.Flags().BoolVar(&o.DisableMultiCluster, flagDisableMultiCluster, o.DisableMultiCluster, "Disable multi cluster tools. Optional. If true, all tools will be run against the default cluster/context.") | ||||
|  | ||||
| 	return cmd | ||||
| } | ||||
|  | ||||
| func (m *MCPServerOptions) Complete(cmd *cobra.Command) error { | ||||
| 	if m.ConfigPath != "" { | ||||
| 		cnf, err := config.Read(m.ConfigPath) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		m.StaticConfig = cnf | ||||
| 	} | ||||
|  | ||||
| 	m.loadFlags(cmd) | ||||
|  | ||||
| 	m.initializeLogging() | ||||
|  | ||||
| 	if m.StaticConfig.RequireOAuth && m.StaticConfig.Port == "" { | ||||
| 		// RequireOAuth is not relevant flow for STDIO transport | ||||
| 		m.StaticConfig.RequireOAuth = false | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *MCPServerOptions) loadFlags(cmd *cobra.Command) { | ||||
| 	if cmd.Flag(flagLogLevel).Changed { | ||||
| 		m.StaticConfig.LogLevel = m.LogLevel | ||||
| 	} | ||||
| 	if cmd.Flag(flagPort).Changed { | ||||
| 		m.StaticConfig.Port = m.Port | ||||
| 	} else if cmd.Flag(flagSSEPort).Changed { | ||||
| 		m.StaticConfig.Port = strconv.Itoa(m.SSEPort) | ||||
| 	} else if cmd.Flag(flagHttpPort).Changed { | ||||
| 		m.StaticConfig.Port = strconv.Itoa(m.HttpPort) | ||||
| 	} | ||||
| 	if cmd.Flag(flagSSEBaseUrl).Changed { | ||||
| 		m.StaticConfig.SSEBaseURL = m.SSEBaseUrl | ||||
| 	} | ||||
| 	if cmd.Flag(flagKubeconfig).Changed { | ||||
| 		m.StaticConfig.KubeConfig = m.Kubeconfig | ||||
| 	} | ||||
| 	if cmd.Flag(flagListOutput).Changed { | ||||
| 		m.StaticConfig.ListOutput = m.ListOutput | ||||
| 	} | ||||
| 	if cmd.Flag(flagReadOnly).Changed { | ||||
| 		m.StaticConfig.ReadOnly = m.ReadOnly | ||||
| 	} | ||||
| 	if cmd.Flag(flagDisableDestructive).Changed { | ||||
| 		m.StaticConfig.DisableDestructive = m.DisableDestructive | ||||
| 	} | ||||
| 	if cmd.Flag(flagToolsets).Changed { | ||||
| 		m.StaticConfig.Toolsets = m.Toolsets | ||||
| 	} | ||||
| 	if cmd.Flag(flagRequireOAuth).Changed { | ||||
| 		m.StaticConfig.RequireOAuth = m.RequireOAuth | ||||
| 	} | ||||
| 	if cmd.Flag(flagOAuthAudience).Changed { | ||||
| 		m.StaticConfig.OAuthAudience = m.OAuthAudience | ||||
| 	} | ||||
| 	if cmd.Flag(flagValidateToken).Changed { | ||||
| 		m.StaticConfig.ValidateToken = m.ValidateToken | ||||
| 	} | ||||
| 	if cmd.Flag(flagAuthorizationURL).Changed { | ||||
| 		m.StaticConfig.AuthorizationURL = m.AuthorizationURL | ||||
| 	} | ||||
| 	if cmd.Flag(flagServerUrl).Changed { | ||||
| 		m.StaticConfig.ServerURL = m.ServerURL | ||||
| 	} | ||||
| 	if cmd.Flag(flagCertificateAuthority).Changed { | ||||
| 		m.StaticConfig.CertificateAuthority = m.CertificateAuthority | ||||
| 	} | ||||
| 	if cmd.Flag(flagDisableMultiCluster).Changed && m.DisableMultiCluster { | ||||
| 		m.StaticConfig.ClusterProviderStrategy = config.ClusterProviderDisabled | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (m *MCPServerOptions) initializeLogging() { | ||||
| 	flagSet := flag.NewFlagSet("klog", flag.ContinueOnError) | ||||
| 	klog.InitFlags(flagSet) | ||||
| 	if m.StaticConfig.Port == "" { | ||||
| 		// disable klog output for stdio mode | ||||
| 		// this is needed to avoid klog writing to stderr and breaking the protocol | ||||
| 		_ = flagSet.Parse([]string{"-logtostderr=false", "-alsologtostderr=false", "-stderrthreshold=FATAL"}) | ||||
| 		return | ||||
| 	} | ||||
| 	loggerOptions := []textlogger.ConfigOption{textlogger.Output(m.Out)} | ||||
| 	if m.StaticConfig.LogLevel >= 0 { | ||||
| 		loggerOptions = append(loggerOptions, textlogger.Verbosity(m.StaticConfig.LogLevel)) | ||||
| 		_ = flagSet.Parse([]string{"--v", strconv.Itoa(m.StaticConfig.LogLevel)}) | ||||
| 	} | ||||
| 	logger := textlogger.NewLogger(textlogger.NewConfig(loggerOptions...)) | ||||
| 	klog.SetLoggerWithOptions(logger) | ||||
| } | ||||
|  | ||||
| func (m *MCPServerOptions) Validate() error { | ||||
| 	if m.Port != "" && (m.SSEPort > 0 || m.HttpPort > 0) { | ||||
| 		return fmt.Errorf("--port is mutually exclusive with deprecated --http-port and --sse-port flags") | ||||
| 	} | ||||
| 	if output.FromString(m.StaticConfig.ListOutput) == nil { | ||||
| 		return fmt.Errorf("invalid output name: %s, valid names are: %s", m.StaticConfig.ListOutput, strings.Join(output.Names, ", ")) | ||||
| 	} | ||||
| 	if err := toolsets.Validate(m.StaticConfig.Toolsets); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	if !m.StaticConfig.RequireOAuth && (m.StaticConfig.ValidateToken || m.StaticConfig.OAuthAudience != "" || m.StaticConfig.AuthorizationURL != "" || m.StaticConfig.ServerURL != "" || m.StaticConfig.CertificateAuthority != "") { | ||||
| 		return fmt.Errorf("validate-token, oauth-audience, authorization-url, server-url and certificate-authority are only valid if require-oauth is enabled. Missing --port may implicitly set require-oauth to false") | ||||
| 	} | ||||
| 	if m.StaticConfig.AuthorizationURL != "" { | ||||
| 		u, err := url.Parse(m.StaticConfig.AuthorizationURL) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		if u.Scheme != "https" && u.Scheme != "http" { | ||||
| 			return fmt.Errorf("--authorization-url must be a valid URL") | ||||
| 		} | ||||
| 		if u.Scheme == "http" { | ||||
| 			klog.Warningf("authorization-url is using http://, this is not recommended production use") | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *MCPServerOptions) Run() error { | ||||
| 	klog.V(1).Info("Starting kubernetes-mcp-server") | ||||
| 	klog.V(1).Infof(" - Config: %s", m.ConfigPath) | ||||
| 	klog.V(1).Infof(" - Toolsets: %s", strings.Join(m.StaticConfig.Toolsets, ", ")) | ||||
| 	klog.V(1).Infof(" - ListOutput: %s", m.StaticConfig.ListOutput) | ||||
| 	klog.V(1).Infof(" - Read-only mode: %t", m.StaticConfig.ReadOnly) | ||||
| 	klog.V(1).Infof(" - Disable destructive tools: %t", m.StaticConfig.DisableDestructive) | ||||
|  | ||||
| 	strategy := m.StaticConfig.ClusterProviderStrategy | ||||
| 	if strategy == "" { | ||||
| 		strategy = "auto-detect (it is recommended to set this explicitly in your Config)" | ||||
| 	} | ||||
|  | ||||
| 	klog.V(1).Infof(" - ClusterProviderStrategy: %s", strategy) | ||||
|  | ||||
| 	if m.Version { | ||||
| 		_, _ = fmt.Fprintf(m.Out, "%s\n", version.Version) | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	var oidcProvider *oidc.Provider | ||||
| 	var httpClient *http.Client | ||||
| 	if m.StaticConfig.AuthorizationURL != "" { | ||||
| 		ctx := context.Background() | ||||
| 		if m.StaticConfig.CertificateAuthority != "" { | ||||
| 			httpClient = &http.Client{} | ||||
| 			caCert, err := os.ReadFile(m.StaticConfig.CertificateAuthority) | ||||
| 			if err != nil { | ||||
| 				return fmt.Errorf("failed to read CA certificate from %s: %w", m.StaticConfig.CertificateAuthority, err) | ||||
| 			} | ||||
| 			caCertPool := x509.NewCertPool() | ||||
| 			if !caCertPool.AppendCertsFromPEM(caCert) { | ||||
| 				return fmt.Errorf("failed to append CA certificate from %s to pool", m.StaticConfig.CertificateAuthority) | ||||
| 			} | ||||
|  | ||||
| 			if caCertPool.Equal(x509.NewCertPool()) { | ||||
| 				caCertPool = nil | ||||
| 			} | ||||
|  | ||||
| 			transport := &http.Transport{ | ||||
| 				TLSClientConfig: &tls.Config{ | ||||
| 					RootCAs: caCertPool, | ||||
| 				}, | ||||
| 			} | ||||
| 			httpClient.Transport = transport | ||||
| 			ctx = oidc.ClientContext(ctx, httpClient) | ||||
| 		} | ||||
| 		provider, err := oidc.NewProvider(ctx, m.StaticConfig.AuthorizationURL) | ||||
| 		if err != nil { | ||||
| 			return fmt.Errorf("unable to setup OIDC provider: %w", err) | ||||
| 		} | ||||
| 		oidcProvider = provider | ||||
| 	} | ||||
|  | ||||
| 	mcpServer, err := mcp.NewServer(mcp.Configuration{StaticConfig: m.StaticConfig}) | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("failed to initialize MCP server: %w", err) | ||||
| 	} | ||||
| 	defer mcpServer.Close() | ||||
|  | ||||
| 	if m.StaticConfig.Port != "" { | ||||
| 		ctx := context.Background() | ||||
| 		return internalhttp.Serve(ctx, mcpServer, m.StaticConfig, oidcProvider, httpClient) | ||||
| 	} | ||||
|  | ||||
| 	if err := mcpServer.ServeStdio(); err != nil && !errors.Is(err, context.Canceled) { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|   | ||||
| @@ -1,9 +1,18 @@ | ||||
| package cmd | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"io" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"regexp" | ||||
| 	"runtime" | ||||
| 	"strings" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/stretchr/testify/assert" | ||||
| 	"github.com/stretchr/testify/require" | ||||
| 	"k8s.io/cli-runtime/pkg/genericiooptions" | ||||
| ) | ||||
|  | ||||
| func captureOutput(f func() error) (string, error) { | ||||
| @@ -19,11 +28,272 @@ func captureOutput(f func() error) (string, error) { | ||||
| 	return string(out), err | ||||
| } | ||||
|  | ||||
| func testStream() (genericiooptions.IOStreams, *bytes.Buffer) { | ||||
| 	out := &bytes.Buffer{} | ||||
| 	return genericiooptions.IOStreams{ | ||||
| 		In:     &bytes.Buffer{}, | ||||
| 		Out:    out, | ||||
| 		ErrOut: io.Discard, | ||||
| 	}, out | ||||
| } | ||||
|  | ||||
| func TestVersion(t *testing.T) { | ||||
| 	ioStreams, out := testStream() | ||||
| 	rootCmd := NewMCPServer(ioStreams) | ||||
| 	rootCmd.SetArgs([]string{"--version"}) | ||||
| 	version, err := captureOutput(rootCmd.Execute) | ||||
| 	if version != "0.0.0\n" { | ||||
| 		t.Fatalf("Expected version 0.0.0, got %s %v", version, err) | ||||
| 		return | ||||
| 	if err := rootCmd.Execute(); out.String() != "0.0.0\n" { | ||||
| 		t.Fatalf("Expected version 0.0.0, got %s %v", out.String(), err) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestConfig(t *testing.T) { | ||||
| 	t.Run("defaults to none", func(t *testing.T) { | ||||
| 		ioStreams, out := testStream() | ||||
| 		rootCmd := NewMCPServer(ioStreams) | ||||
| 		rootCmd.SetArgs([]string{"--version", "--port=1337", "--log-level=1"}) | ||||
| 		expectedConfig := `" - Config: "` | ||||
| 		if err := rootCmd.Execute(); !strings.Contains(out.String(), expectedConfig) { | ||||
| 			t.Fatalf("Expected config to be %s, got %s %v", expectedConfig, out.String(), err) | ||||
| 		} | ||||
| 	}) | ||||
| 	t.Run("set with --config", func(t *testing.T) { | ||||
| 		ioStreams, out := testStream() | ||||
| 		rootCmd := NewMCPServer(ioStreams) | ||||
| 		_, file, _, _ := runtime.Caller(0) | ||||
| 		emptyConfigPath := filepath.Join(filepath.Dir(file), "testdata", "empty-config.toml") | ||||
| 		rootCmd.SetArgs([]string{"--version", "--port=1337", "--log-level=1", "--config", emptyConfigPath}) | ||||
| 		_ = rootCmd.Execute() | ||||
| 		expected := `(?m)\" - Config\:[^\"]+empty-config\.toml\"` | ||||
| 		if m, err := regexp.MatchString(expected, out.String()); !m || err != nil { | ||||
| 			t.Fatalf("Expected config to be %s, got %s %v", expected, out.String(), err) | ||||
| 		} | ||||
| 	}) | ||||
| 	t.Run("invalid path throws error", func(t *testing.T) { | ||||
| 		ioStreams, _ := testStream() | ||||
| 		rootCmd := NewMCPServer(ioStreams) | ||||
| 		rootCmd.SetArgs([]string{"--version", "--port=1337", "--log-level=1", "--config", "invalid-path-to-config.toml"}) | ||||
| 		err := rootCmd.Execute() | ||||
| 		if err == nil { | ||||
| 			t.Fatal("Expected error for invalid config path, got nil") | ||||
| 		} | ||||
| 		expected := "open invalid-path-to-config.toml: " | ||||
| 		if !strings.HasPrefix(err.Error(), expected) { | ||||
| 			t.Fatalf("Expected error to be %s, got %s", expected, err.Error()) | ||||
| 		} | ||||
| 	}) | ||||
| 	t.Run("set with valid --config", func(t *testing.T) { | ||||
| 		ioStreams, out := testStream() | ||||
| 		rootCmd := NewMCPServer(ioStreams) | ||||
| 		_, file, _, _ := runtime.Caller(0) | ||||
| 		validConfigPath := filepath.Join(filepath.Dir(file), "testdata", "valid-config.toml") | ||||
| 		rootCmd.SetArgs([]string{"--version", "--config", validConfigPath}) | ||||
| 		_ = rootCmd.Execute() | ||||
| 		expectedConfig := `(?m)\" - Config\:[^\"]+valid-config\.toml\"` | ||||
| 		if m, err := regexp.MatchString(expectedConfig, out.String()); !m || err != nil { | ||||
| 			t.Fatalf("Expected config to be %s, got %s %v", expectedConfig, out.String(), err) | ||||
| 		} | ||||
| 		expectedListOutput := `(?m)\" - ListOutput\: yaml"` | ||||
| 		if m, err := regexp.MatchString(expectedListOutput, out.String()); !m || err != nil { | ||||
| 			t.Fatalf("Expected config to be %s, got %s %v", expectedListOutput, out.String(), err) | ||||
| 		} | ||||
| 		expectedReadOnly := `(?m)\" - Read-only mode: true"` | ||||
| 		if m, err := regexp.MatchString(expectedReadOnly, out.String()); !m || err != nil { | ||||
| 			t.Fatalf("Expected config to be %s, got %s %v", expectedReadOnly, out.String(), err) | ||||
| 		} | ||||
| 		expectedDisableDestruction := `(?m)\" - Disable destructive tools: true"` | ||||
| 		if m, err := regexp.MatchString(expectedDisableDestruction, out.String()); !m || err != nil { | ||||
| 			t.Fatalf("Expected config to be %s, got %s %v", expectedDisableDestruction, out.String(), err) | ||||
| 		} | ||||
| 	}) | ||||
| 	t.Run("set with valid --config, flags take precedence", func(t *testing.T) { | ||||
| 		ioStreams, out := testStream() | ||||
| 		rootCmd := NewMCPServer(ioStreams) | ||||
| 		_, file, _, _ := runtime.Caller(0) | ||||
| 		validConfigPath := filepath.Join(filepath.Dir(file), "testdata", "valid-config.toml") | ||||
| 		rootCmd.SetArgs([]string{"--version", "--list-output=table", "--disable-destructive=false", "--read-only=false", "--config", validConfigPath}) | ||||
| 		_ = rootCmd.Execute() | ||||
| 		expected := `(?m)\" - Config\:[^\"]+valid-config\.toml\"` | ||||
| 		if m, err := regexp.MatchString(expected, out.String()); !m || err != nil { | ||||
| 			t.Fatalf("Expected config to be %s, got %s %v", expected, out.String(), err) | ||||
| 		} | ||||
| 		expectedListOutput := `(?m)\" - ListOutput\: table"` | ||||
| 		if m, err := regexp.MatchString(expectedListOutput, out.String()); !m || err != nil { | ||||
| 			t.Fatalf("Expected config to be %s, got %s %v", expectedListOutput, out.String(), err) | ||||
| 		} | ||||
| 		expectedReadOnly := `(?m)\" - Read-only mode: false"` | ||||
| 		if m, err := regexp.MatchString(expectedReadOnly, out.String()); !m || err != nil { | ||||
| 			t.Fatalf("Expected config to be %s, got %s %v", expectedReadOnly, out.String(), err) | ||||
| 		} | ||||
| 		expectedDisableDestruction := `(?m)\" - Disable destructive tools: false"` | ||||
| 		if m, err := regexp.MatchString(expectedDisableDestruction, out.String()); !m || err != nil { | ||||
| 			t.Fatalf("Expected config to be %s, got %s %v", expectedDisableDestruction, out.String(), err) | ||||
| 		} | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestToolsets(t *testing.T) { | ||||
| 	t.Run("available", func(t *testing.T) { | ||||
| 		ioStreams, _ := testStream() | ||||
| 		rootCmd := NewMCPServer(ioStreams) | ||||
| 		rootCmd.SetArgs([]string{"--help"}) | ||||
| 		o, err := captureOutput(rootCmd.Execute) // --help doesn't use logger/klog, cobra prints directly to stdout | ||||
| 		if !strings.Contains(o, "Comma-separated list of MCP toolsets to use (available toolsets: config, core, helm).") { | ||||
| 			t.Fatalf("Expected all available toolsets, got %s %v", o, err) | ||||
| 		} | ||||
| 	}) | ||||
| 	t.Run("default", func(t *testing.T) { | ||||
| 		ioStreams, out := testStream() | ||||
| 		rootCmd := NewMCPServer(ioStreams) | ||||
| 		rootCmd.SetArgs([]string{"--version", "--port=1337", "--log-level=1"}) | ||||
| 		if err := rootCmd.Execute(); !strings.Contains(out.String(), "- Toolsets: core, config, helm") { | ||||
| 			t.Fatalf("Expected toolsets 'full', got %s %v", out, err) | ||||
| 		} | ||||
| 	}) | ||||
| 	t.Run("set with --toolsets", func(t *testing.T) { | ||||
| 		ioStreams, out := testStream() | ||||
| 		rootCmd := NewMCPServer(ioStreams) | ||||
| 		rootCmd.SetArgs([]string{"--version", "--port=1337", "--log-level=1", "--toolsets", "helm,config"}) | ||||
| 		_ = rootCmd.Execute() | ||||
| 		expected := `(?m)\" - Toolsets\: helm, config\"` | ||||
| 		if m, err := regexp.MatchString(expected, out.String()); !m || err != nil { | ||||
| 			t.Fatalf("Expected toolset to be %s, got %s %v", expected, out.String(), err) | ||||
| 		} | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestListOutput(t *testing.T) { | ||||
| 	t.Run("available", func(t *testing.T) { | ||||
| 		ioStreams, _ := testStream() | ||||
| 		rootCmd := NewMCPServer(ioStreams) | ||||
| 		rootCmd.SetArgs([]string{"--help"}) | ||||
| 		o, err := captureOutput(rootCmd.Execute) // --help doesn't use logger/klog, cobra prints directly to stdout | ||||
| 		if !strings.Contains(o, "Output format for resource list operations (one of: yaml, table)") { | ||||
| 			t.Fatalf("Expected all available outputs, got %s %v", o, err) | ||||
| 		} | ||||
| 	}) | ||||
| 	t.Run("defaults to table", func(t *testing.T) { | ||||
| 		ioStreams, out := testStream() | ||||
| 		rootCmd := NewMCPServer(ioStreams) | ||||
| 		rootCmd.SetArgs([]string{"--version", "--port=1337", "--log-level=1"}) | ||||
| 		if err := rootCmd.Execute(); !strings.Contains(out.String(), "- ListOutput: table") { | ||||
| 			t.Fatalf("Expected list-output 'table', got %s %v", out, err) | ||||
| 		} | ||||
| 	}) | ||||
| 	t.Run("set with --list-output", func(t *testing.T) { | ||||
| 		ioStreams, out := testStream() | ||||
| 		rootCmd := NewMCPServer(ioStreams) | ||||
| 		rootCmd.SetArgs([]string{"--version", "--port=1337", "--log-level=1", "--list-output", "yaml"}) | ||||
| 		_ = rootCmd.Execute() | ||||
| 		expected := `(?m)\" - ListOutput\: yaml\"` | ||||
| 		if m, err := regexp.MatchString(expected, out.String()); !m || err != nil { | ||||
| 			t.Fatalf("Expected list-output to be %s, got %s %v", expected, out.String(), err) | ||||
| 		} | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestReadOnly(t *testing.T) { | ||||
| 	t.Run("defaults to false", func(t *testing.T) { | ||||
| 		ioStreams, out := testStream() | ||||
| 		rootCmd := NewMCPServer(ioStreams) | ||||
| 		rootCmd.SetArgs([]string{"--version", "--port=1337", "--log-level=1"}) | ||||
| 		if err := rootCmd.Execute(); !strings.Contains(out.String(), " - Read-only mode: false") { | ||||
| 			t.Fatalf("Expected read-only mode false, got %s %v", out, err) | ||||
| 		} | ||||
| 	}) | ||||
| 	t.Run("set with --read-only", func(t *testing.T) { | ||||
| 		ioStreams, out := testStream() | ||||
| 		rootCmd := NewMCPServer(ioStreams) | ||||
| 		rootCmd.SetArgs([]string{"--version", "--port=1337", "--log-level=1", "--read-only"}) | ||||
| 		_ = rootCmd.Execute() | ||||
| 		expected := `(?m)\" - Read-only mode\: true\"` | ||||
| 		if m, err := regexp.MatchString(expected, out.String()); !m || err != nil { | ||||
| 			t.Fatalf("Expected read-only mode to be %s, got %s %v", expected, out.String(), err) | ||||
| 		} | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestDisableDestructive(t *testing.T) { | ||||
| 	t.Run("defaults to false", func(t *testing.T) { | ||||
| 		ioStreams, out := testStream() | ||||
| 		rootCmd := NewMCPServer(ioStreams) | ||||
| 		rootCmd.SetArgs([]string{"--version", "--port=1337", "--log-level=1"}) | ||||
| 		if err := rootCmd.Execute(); !strings.Contains(out.String(), " - Disable destructive tools: false") { | ||||
| 			t.Fatalf("Expected disable destructive false, got %s %v", out, err) | ||||
| 		} | ||||
| 	}) | ||||
| 	t.Run("set with --disable-destructive", func(t *testing.T) { | ||||
| 		ioStreams, out := testStream() | ||||
| 		rootCmd := NewMCPServer(ioStreams) | ||||
| 		rootCmd.SetArgs([]string{"--version", "--port=1337", "--log-level=1", "--disable-destructive"}) | ||||
| 		_ = rootCmd.Execute() | ||||
| 		expected := `(?m)\" - Disable destructive tools\: true\"` | ||||
| 		if m, err := regexp.MatchString(expected, out.String()); !m || err != nil { | ||||
| 			t.Fatalf("Expected disable-destructive mode to be %s, got %s %v", expected, out.String(), err) | ||||
| 		} | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestAuthorizationURL(t *testing.T) { | ||||
| 	t.Run("invalid authorization-url without protocol", func(t *testing.T) { | ||||
| 		ioStreams, _ := testStream() | ||||
| 		rootCmd := NewMCPServer(ioStreams) | ||||
| 		rootCmd.SetArgs([]string{"--version", "--require-oauth", "--port=8080", "--authorization-url", "example.com/auth", "--server-url", "https://example.com:8080"}) | ||||
| 		err := rootCmd.Execute() | ||||
| 		if err == nil { | ||||
| 			t.Fatal("Expected error for invalid authorization-url without protocol, got nil") | ||||
| 		} | ||||
| 		expected := "--authorization-url must be a valid URL" | ||||
| 		if !strings.Contains(err.Error(), expected) { | ||||
| 			t.Fatalf("Expected error to contain %s, got %s", expected, err.Error()) | ||||
| 		} | ||||
| 	}) | ||||
| 	t.Run("valid authorization-url with https", func(t *testing.T) { | ||||
| 		ioStreams, _ := testStream() | ||||
| 		rootCmd := NewMCPServer(ioStreams) | ||||
| 		rootCmd.SetArgs([]string{"--version", "--require-oauth", "--port=8080", "--authorization-url", "https://example.com/auth", "--server-url", "https://example.com:8080"}) | ||||
| 		err := rootCmd.Execute() | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Expected no error for valid https authorization-url, got %s", err.Error()) | ||||
| 		} | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestStdioLogging(t *testing.T) { | ||||
| 	t.Run("stdio disables klog", func(t *testing.T) { | ||||
| 		ioStreams, out := testStream() | ||||
| 		rootCmd := NewMCPServer(ioStreams) | ||||
| 		rootCmd.SetArgs([]string{"--version", "--log-level=1"}) | ||||
| 		err := rootCmd.Execute() | ||||
| 		require.NoErrorf(t, err, "Expected no error executing command, got %v", err) | ||||
| 		assert.Equalf(t, "0.0.0\n", out.String(), "Expected only version output, got %s", out.String()) | ||||
| 	}) | ||||
| 	t.Run("http mode enables klog", func(t *testing.T) { | ||||
| 		ioStreams, out := testStream() | ||||
| 		rootCmd := NewMCPServer(ioStreams) | ||||
| 		rootCmd.SetArgs([]string{"--version", "--log-level=1", "--port=1337"}) | ||||
| 		err := rootCmd.Execute() | ||||
| 		require.NoErrorf(t, err, "Expected no error executing command, got %v", err) | ||||
| 		assert.Containsf(t, out.String(), "Starting kubernetes-mcp-server", "Expected klog output, got %s", out.String()) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestDisableMultiCluster(t *testing.T) { | ||||
| 	t.Run("defaults to false", func(t *testing.T) { | ||||
| 		ioStreams, out := testStream() | ||||
| 		rootCmd := NewMCPServer(ioStreams) | ||||
| 		rootCmd.SetArgs([]string{"--version", "--port=1337", "--log-level=1"}) | ||||
| 		if err := rootCmd.Execute(); !strings.Contains(out.String(), " - ClusterProviderStrategy: auto-detect (it is recommended to set this explicitly in your Config)") { | ||||
| 			t.Fatalf("Expected ClusterProviderStrategy kubeconfig, got %s %v", out, err) | ||||
| 		} | ||||
| 	}) | ||||
| 	t.Run("set with --disable-multi-cluster", func(t *testing.T) { | ||||
| 		ioStreams, out := testStream() | ||||
| 		rootCmd := NewMCPServer(ioStreams) | ||||
| 		rootCmd.SetArgs([]string{"--version", "--port=1337", "--log-level=1", "--disable-multi-cluster"}) | ||||
| 		_ = rootCmd.Execute() | ||||
| 		expected := `(?m)\" - ClusterProviderStrategy\: disabled\"` | ||||
| 		if m, err := regexp.MatchString(expected, out.String()); !m || err != nil { | ||||
| 			t.Fatalf("Expected ClusterProviderStrategy %s, got %s %v", expected, out.String(), err) | ||||
| 		} | ||||
| 	}) | ||||
| } | ||||
|   | ||||
							
								
								
									
										0
									
								
								pkg/kubernetes-mcp-server/cmd/testdata/empty-config.toml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										0
									
								
								pkg/kubernetes-mcp-server/cmd/testdata/empty-config.toml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
								
								
									
										15
									
								
								pkg/kubernetes-mcp-server/cmd/testdata/valid-config.toml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								pkg/kubernetes-mcp-server/cmd/testdata/valid-config.toml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,15 @@ | ||||
| log_level = 1 | ||||
| port = "9999" | ||||
| kubeconfig = "test" | ||||
| list_output = "yaml" | ||||
| read_only = true | ||||
| disable_destructive = true | ||||
|  | ||||
| denied_resources = [ | ||||
|     {group = "apps", version = "v1", kind = "Deployment"}, | ||||
|     {group = "rbac.authorization.k8s.io", version = "v1", kind = "Role"} | ||||
| ] | ||||
|  | ||||
| enabled_tools = ["configuration_view", "events_list", "namespaces_list", "pods_list", "resources_list", "resources_get", "resources_create_or_update", "resources_delete"] | ||||
| disabled_tools = ["pods_delete", "pods_top", "pods_log", "pods_run", "pods_exec"] | ||||
|  | ||||
							
								
								
									
										40
									
								
								pkg/kubernetes/accesscontrol.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										40
									
								
								pkg/kubernetes/accesscontrol.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,40 @@ | ||||
| package kubernetes | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
|  | ||||
| 	"k8s.io/apimachinery/pkg/runtime/schema" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| ) | ||||
|  | ||||
| // isAllowed checks the resource is in denied list or not. | ||||
| // If it is in denied list, this function returns false. | ||||
| func isAllowed( | ||||
| 	staticConfig *config.StaticConfig, // TODO: maybe just use the denied resource slice | ||||
| 	gvk *schema.GroupVersionKind, | ||||
| ) bool { | ||||
| 	if staticConfig == nil { | ||||
| 		return true | ||||
| 	} | ||||
|  | ||||
| 	for _, val := range staticConfig.DeniedResources { | ||||
| 		// If kind is empty, that means Group/Version pair is denied entirely | ||||
| 		if val.Kind == "" { | ||||
| 			if gvk.Group == val.Group && gvk.Version == val.Version { | ||||
| 				return false | ||||
| 			} | ||||
| 		} | ||||
| 		if gvk.Group == val.Group && | ||||
| 			gvk.Version == val.Version && | ||||
| 			gvk.Kind == val.Kind { | ||||
| 			return false | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| func isNotAllowedError(gvk *schema.GroupVersionKind) error { | ||||
| 	return fmt.Errorf("resource not allowed: %s", gvk.String()) | ||||
| } | ||||
							
								
								
									
										141
									
								
								pkg/kubernetes/accesscontrol_clientset.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										141
									
								
								pkg/kubernetes/accesscontrol_clientset.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,141 @@ | ||||
| package kubernetes | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
|  | ||||
| 	authenticationv1api "k8s.io/api/authentication/v1" | ||||
| 	authorizationv1api "k8s.io/api/authorization/v1" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/apimachinery/pkg/runtime/schema" | ||||
| 	"k8s.io/apimachinery/pkg/util/httpstream" | ||||
| 	"k8s.io/client-go/discovery" | ||||
| 	"k8s.io/client-go/kubernetes" | ||||
| 	authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1" | ||||
| 	authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" | ||||
| 	corev1 "k8s.io/client-go/kubernetes/typed/core/v1" | ||||
| 	"k8s.io/client-go/rest" | ||||
| 	"k8s.io/client-go/tools/remotecommand" | ||||
| 	"k8s.io/metrics/pkg/apis/metrics" | ||||
| 	metricsv1beta1api "k8s.io/metrics/pkg/apis/metrics/v1beta1" | ||||
| 	metricsv1beta1 "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| ) | ||||
|  | ||||
| // AccessControlClientset is a limited clientset delegating interface to the standard kubernetes.Clientset | ||||
| // Only a limited set of functions are implemented with a single point of access to the kubernetes API where | ||||
| // apiVersion and kinds are checked for allowed access | ||||
| type AccessControlClientset struct { | ||||
| 	cfg             *rest.Config | ||||
| 	delegate        kubernetes.Interface | ||||
| 	discoveryClient discovery.DiscoveryInterface | ||||
| 	metricsV1beta1  *metricsv1beta1.MetricsV1beta1Client | ||||
| 	staticConfig    *config.StaticConfig // TODO: maybe just store the denied resource slice | ||||
| } | ||||
|  | ||||
| func (a *AccessControlClientset) DiscoveryClient() discovery.DiscoveryInterface { | ||||
| 	return a.discoveryClient | ||||
| } | ||||
|  | ||||
| func (a *AccessControlClientset) Pods(namespace string) (corev1.PodInterface, error) { | ||||
| 	gvk := &schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"} | ||||
| 	if !isAllowed(a.staticConfig, gvk) { | ||||
| 		return nil, isNotAllowedError(gvk) | ||||
| 	} | ||||
| 	return a.delegate.CoreV1().Pods(namespace), nil | ||||
| } | ||||
|  | ||||
| func (a *AccessControlClientset) PodsExec(namespace, name string, podExecOptions *v1.PodExecOptions) (remotecommand.Executor, error) { | ||||
| 	gvk := &schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"} | ||||
| 	if !isAllowed(a.staticConfig, gvk) { | ||||
| 		return nil, isNotAllowedError(gvk) | ||||
| 	} | ||||
| 	// Compute URL | ||||
| 	// https://github.com/kubernetes/kubectl/blob/5366de04e168bcbc11f5e340d131a9ca8b7d0df4/pkg/cmd/exec/exec.go#L382-L397 | ||||
| 	execRequest := a.delegate.CoreV1().RESTClient(). | ||||
| 		Post(). | ||||
| 		Resource("pods"). | ||||
| 		Namespace(namespace). | ||||
| 		Name(name). | ||||
| 		SubResource("exec") | ||||
| 	execRequest.VersionedParams(podExecOptions, ParameterCodec) | ||||
| 	spdyExec, err := remotecommand.NewSPDYExecutor(a.cfg, "POST", execRequest.URL()) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	webSocketExec, err := remotecommand.NewWebSocketExecutor(a.cfg, "GET", execRequest.URL().String()) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return remotecommand.NewFallbackExecutor(webSocketExec, spdyExec, func(err error) bool { | ||||
| 		return httpstream.IsUpgradeFailure(err) || httpstream.IsHTTPSProxyError(err) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (a *AccessControlClientset) PodsMetricses(ctx context.Context, namespace, name string, listOptions metav1.ListOptions) (*metrics.PodMetricsList, error) { | ||||
| 	gvk := &schema.GroupVersionKind{Group: metrics.GroupName, Version: metricsv1beta1api.SchemeGroupVersion.Version, Kind: "PodMetrics"} | ||||
| 	if !isAllowed(a.staticConfig, gvk) { | ||||
| 		return nil, isNotAllowedError(gvk) | ||||
| 	} | ||||
| 	versionedMetrics := &metricsv1beta1api.PodMetricsList{} | ||||
| 	var err error | ||||
| 	if name != "" { | ||||
| 		m, err := a.metricsV1beta1.PodMetricses(namespace).Get(ctx, name, metav1.GetOptions{}) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("failed to get metrics for pod %s/%s: %w", namespace, name, err) | ||||
| 		} | ||||
| 		versionedMetrics.Items = []metricsv1beta1api.PodMetrics{*m} | ||||
| 	} else { | ||||
| 		versionedMetrics, err = a.metricsV1beta1.PodMetricses(namespace).List(ctx, listOptions) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("failed to list pod metrics in namespace %s: %w", namespace, err) | ||||
| 		} | ||||
| 	} | ||||
| 	convertedMetrics := &metrics.PodMetricsList{} | ||||
| 	return convertedMetrics, metricsv1beta1api.Convert_v1beta1_PodMetricsList_To_metrics_PodMetricsList(versionedMetrics, convertedMetrics, nil) | ||||
| } | ||||
|  | ||||
| func (a *AccessControlClientset) Services(namespace string) (corev1.ServiceInterface, error) { | ||||
| 	gvk := &schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Service"} | ||||
| 	if !isAllowed(a.staticConfig, gvk) { | ||||
| 		return nil, isNotAllowedError(gvk) | ||||
| 	} | ||||
| 	return a.delegate.CoreV1().Services(namespace), nil | ||||
| } | ||||
|  | ||||
| func (a *AccessControlClientset) SelfSubjectAccessReviews() (authorizationv1.SelfSubjectAccessReviewInterface, error) { | ||||
| 	gvk := &schema.GroupVersionKind{Group: authorizationv1api.GroupName, Version: authorizationv1api.SchemeGroupVersion.Version, Kind: "SelfSubjectAccessReview"} | ||||
| 	if !isAllowed(a.staticConfig, gvk) { | ||||
| 		return nil, isNotAllowedError(gvk) | ||||
| 	} | ||||
| 	return a.delegate.AuthorizationV1().SelfSubjectAccessReviews(), nil | ||||
| } | ||||
|  | ||||
| // TokenReview returns TokenReviewInterface | ||||
| func (a *AccessControlClientset) TokenReview() (authenticationv1.TokenReviewInterface, error) { | ||||
| 	gvk := &schema.GroupVersionKind{Group: authenticationv1api.GroupName, Version: authorizationv1api.SchemeGroupVersion.Version, Kind: "TokenReview"} | ||||
| 	if !isAllowed(a.staticConfig, gvk) { | ||||
| 		return nil, isNotAllowedError(gvk) | ||||
| 	} | ||||
| 	return a.delegate.AuthenticationV1().TokenReviews(), nil | ||||
| } | ||||
|  | ||||
| func NewAccessControlClientset(cfg *rest.Config, staticConfig *config.StaticConfig) (*AccessControlClientset, error) { | ||||
| 	clientSet, err := kubernetes.NewForConfig(cfg) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	metricsClient, err := metricsv1beta1.NewForConfig(cfg) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return &AccessControlClientset{ | ||||
| 		cfg:             cfg, | ||||
| 		delegate:        clientSet, | ||||
| 		discoveryClient: clientSet.DiscoveryClient, | ||||
| 		metricsV1beta1:  metricsClient, | ||||
| 		staticConfig:    staticConfig, | ||||
| 	}, nil | ||||
| } | ||||
							
								
								
									
										80
									
								
								pkg/kubernetes/accesscontrol_restmapper.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										80
									
								
								pkg/kubernetes/accesscontrol_restmapper.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,80 @@ | ||||
| package kubernetes | ||||
|  | ||||
| import ( | ||||
| 	"k8s.io/apimachinery/pkg/api/meta" | ||||
| 	"k8s.io/apimachinery/pkg/runtime/schema" | ||||
| 	"k8s.io/client-go/restmapper" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| ) | ||||
|  | ||||
| type AccessControlRESTMapper struct { | ||||
| 	delegate     *restmapper.DeferredDiscoveryRESTMapper | ||||
| 	staticConfig *config.StaticConfig // TODO: maybe just store the denied resource slice | ||||
| } | ||||
|  | ||||
| var _ meta.RESTMapper = &AccessControlRESTMapper{} | ||||
|  | ||||
| func (a AccessControlRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { | ||||
| 	gvk, err := a.delegate.KindFor(resource) | ||||
| 	if err != nil { | ||||
| 		return schema.GroupVersionKind{}, err | ||||
| 	} | ||||
| 	if !isAllowed(a.staticConfig, &gvk) { | ||||
| 		return schema.GroupVersionKind{}, isNotAllowedError(&gvk) | ||||
| 	} | ||||
| 	return gvk, nil | ||||
| } | ||||
|  | ||||
| func (a AccessControlRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { | ||||
| 	gvks, err := a.delegate.KindsFor(resource) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	for i := range gvks { | ||||
| 		if !isAllowed(a.staticConfig, &gvks[i]) { | ||||
| 			return nil, isNotAllowedError(&gvks[i]) | ||||
| 		} | ||||
| 	} | ||||
| 	return gvks, nil | ||||
| } | ||||
|  | ||||
| func (a AccessControlRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { | ||||
| 	return a.delegate.ResourceFor(input) | ||||
| } | ||||
|  | ||||
| func (a AccessControlRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { | ||||
| 	return a.delegate.ResourcesFor(input) | ||||
| } | ||||
|  | ||||
| func (a AccessControlRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { | ||||
| 	for _, version := range versions { | ||||
| 		gvk := &schema.GroupVersionKind{Group: gk.Group, Version: version, Kind: gk.Kind} | ||||
| 		if !isAllowed(a.staticConfig, gvk) { | ||||
| 			return nil, isNotAllowedError(gvk) | ||||
| 		} | ||||
| 	} | ||||
| 	return a.delegate.RESTMapping(gk, versions...) | ||||
| } | ||||
|  | ||||
| func (a AccessControlRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { | ||||
| 	for _, version := range versions { | ||||
| 		gvk := &schema.GroupVersionKind{Group: gk.Group, Version: version, Kind: gk.Kind} | ||||
| 		if !isAllowed(a.staticConfig, gvk) { | ||||
| 			return nil, isNotAllowedError(gvk) | ||||
| 		} | ||||
| 	} | ||||
| 	return a.delegate.RESTMappings(gk, versions...) | ||||
| } | ||||
|  | ||||
| func (a AccessControlRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { | ||||
| 	return a.delegate.ResourceSingularizer(resource) | ||||
| } | ||||
|  | ||||
| func (a AccessControlRESTMapper) Reset() { | ||||
| 	a.delegate.Reset() | ||||
| } | ||||
|  | ||||
| func NewAccessControlRESTMapper(delegate *restmapper.DeferredDiscoveryRESTMapper, staticConfig *config.StaticConfig) *AccessControlRESTMapper { | ||||
| 	return &AccessControlRESTMapper{delegate: delegate, staticConfig: staticConfig} | ||||
| } | ||||
							
								
								
									
										19
									
								
								pkg/kubernetes/common_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										19
									
								
								pkg/kubernetes/common_test.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,19 @@ | ||||
| package kubernetes | ||||
|  | ||||
| import ( | ||||
| 	"os" | ||||
| 	"testing" | ||||
| ) | ||||
|  | ||||
| func TestMain(m *testing.M) { | ||||
| 	// Set up | ||||
| 	_ = os.Setenv("KUBECONFIG", "/dev/null")     // Avoid interference from existing kubeconfig | ||||
| 	_ = os.Setenv("KUBERNETES_SERVICE_HOST", "") // Avoid interference from in-cluster config | ||||
| 	_ = os.Setenv("KUBERNETES_SERVICE_PORT", "") // Avoid interference from in-cluster config | ||||
|  | ||||
| 	// Run tests | ||||
| 	code := m.Run() | ||||
|  | ||||
| 	// Tear down | ||||
| 	os.Exit(code) | ||||
| } | ||||
| @@ -1,33 +1,88 @@ | ||||
| package kubernetes | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"k8s.io/cli-runtime/pkg/genericiooptions" | ||||
| 	"k8s.io/client-go/tools/clientcmd" | ||||
| 	"k8s.io/component-base/cli/flag" | ||||
| 	"k8s.io/kubectl/pkg/cmd/config" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| 	"k8s.io/apimachinery/pkg/runtime" | ||||
| 	"k8s.io/client-go/rest" | ||||
| 	clientcmdapi "k8s.io/client-go/tools/clientcmd/api" | ||||
| 	"k8s.io/client-go/tools/clientcmd/api/latest" | ||||
| ) | ||||
|  | ||||
| func ConfigurationView() (string, error) { | ||||
| 	outBuffer := &bytes.Buffer{} | ||||
| 	pathOptions := clientcmd.NewDefaultPathOptions() | ||||
| 	ioStreams := genericiooptions.IOStreams{In: nil, Out: outBuffer, ErrOut: outBuffer} | ||||
| 	o := &config.ViewOptions{ | ||||
| 		IOStreams:    ioStreams, | ||||
| 		ConfigAccess: pathOptions, | ||||
| 		PrintFlags:   defaultPrintFlags(), | ||||
| 		Flatten:      true, | ||||
| 		Minify:       true, | ||||
| 		Merge:        flag.True, | ||||
| const inClusterKubeConfigDefaultContext = "in-cluster" | ||||
|  | ||||
| // InClusterConfig is a variable that holds the function to get the in-cluster config | ||||
| // Exposed for testing | ||||
| var InClusterConfig = func() (*rest.Config, error) { | ||||
| 	// TODO use kubernetes.default.svc instead of resolved server | ||||
| 	// Currently running into: `http: server gave HTTP response to HTTPS client` | ||||
| 	inClusterConfig, err := rest.InClusterConfig() | ||||
| 	if inClusterConfig != nil { | ||||
| 		inClusterConfig.Host = "https://kubernetes.default.svc" | ||||
| 	} | ||||
| 	printer, err := o.PrintFlags.ToPrinter() | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	o.PrintObject = printer.PrintObj | ||||
| 	err = o.Run() | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	return outBuffer.String(), nil | ||||
| 	return inClusterConfig, err | ||||
| } | ||||
|  | ||||
| func IsInCluster(cfg *config.StaticConfig) bool { | ||||
| 	// Even if running in-cluster, if a kubeconfig is provided, we consider it as out-of-cluster | ||||
| 	if cfg != nil && cfg.KubeConfig != "" { | ||||
| 		return false | ||||
| 	} | ||||
| 	restConfig, err := InClusterConfig() | ||||
| 	return err == nil && restConfig != nil | ||||
| } | ||||
|  | ||||
| func (k *Kubernetes) NamespaceOrDefault(namespace string) string { | ||||
| 	return k.manager.NamespaceOrDefault(namespace) | ||||
| } | ||||
|  | ||||
| // ConfigurationContextsDefault returns the current context name | ||||
| // TODO: Should be moved to the Provider level ? | ||||
| func (k *Kubernetes) ConfigurationContextsDefault() (string, error) { | ||||
| 	cfg, err := k.manager.clientCmdConfig.RawConfig() | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	return cfg.CurrentContext, nil | ||||
| } | ||||
|  | ||||
| // ConfigurationContextsList returns the list of available context names | ||||
| // TODO: Should be moved to the Provider level ? | ||||
| func (k *Kubernetes) ConfigurationContextsList() (map[string]string, error) { | ||||
| 	cfg, err := k.manager.clientCmdConfig.RawConfig() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	contexts := make(map[string]string, len(cfg.Contexts)) | ||||
| 	for name, context := range cfg.Contexts { | ||||
| 		cluster, ok := cfg.Clusters[context.Cluster] | ||||
| 		if !ok || cluster.Server == "" { | ||||
| 			contexts[name] = "unknown" | ||||
| 		} else { | ||||
| 			contexts[name] = cluster.Server | ||||
| 		} | ||||
| 	} | ||||
| 	return contexts, nil | ||||
| } | ||||
|  | ||||
| // ConfigurationView returns the current kubeconfig content as a kubeconfig YAML | ||||
| // If minify is true, keeps only the current-context and the relevant pieces of the configuration for that context. | ||||
| // If minify is false, all contexts, clusters, auth-infos, and users are returned in the configuration. | ||||
| // TODO: Should be moved to the Provider level ? | ||||
| func (k *Kubernetes) ConfigurationView(minify bool) (runtime.Object, error) { | ||||
| 	var cfg clientcmdapi.Config | ||||
| 	var err error | ||||
| 	if cfg, err = k.manager.clientCmdConfig.RawConfig(); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	if minify { | ||||
| 		if err = clientcmdapi.MinifyConfig(&cfg); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
| 	//nolint:staticcheck | ||||
| 	if err = clientcmdapi.FlattenConfig(&cfg); err != nil { | ||||
| 		// ignore error | ||||
| 		//return "", err | ||||
| 	} | ||||
| 	return latest.Scheme.ConvertToVersion(&cfg, latest.ExternalVersion) | ||||
| } | ||||
|   | ||||
							
								
								
									
										51
									
								
								pkg/kubernetes/events.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										51
									
								
								pkg/kubernetes/events.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,51 @@ | ||||
| package kubernetes | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" | ||||
| 	"k8s.io/apimachinery/pkg/runtime" | ||||
| 	"k8s.io/apimachinery/pkg/runtime/schema" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| func (k *Kubernetes) EventsList(ctx context.Context, namespace string) ([]map[string]any, error) { | ||||
| 	var eventMap []map[string]any | ||||
| 	raw, err := k.ResourcesList(ctx, &schema.GroupVersionKind{ | ||||
| 		Group: "", Version: "v1", Kind: "Event", | ||||
| 	}, namespace, ResourceListOptions{}) | ||||
| 	if err != nil { | ||||
| 		return eventMap, err | ||||
| 	} | ||||
| 	unstructuredList := raw.(*unstructured.UnstructuredList) | ||||
| 	if len(unstructuredList.Items) == 0 { | ||||
| 		return eventMap, nil | ||||
| 	} | ||||
| 	for _, item := range unstructuredList.Items { | ||||
| 		event := &v1.Event{} | ||||
| 		if err = runtime.DefaultUnstructuredConverter.FromUnstructured(item.Object, event); err != nil { | ||||
| 			return eventMap, err | ||||
| 		} | ||||
| 		timestamp := event.EventTime.Time | ||||
| 		if timestamp.IsZero() && event.Series != nil { | ||||
| 			timestamp = event.Series.LastObservedTime.Time | ||||
| 		} else if timestamp.IsZero() && event.Count > 1 { | ||||
| 			timestamp = event.LastTimestamp.Time | ||||
| 		} else if timestamp.IsZero() { | ||||
| 			timestamp = event.FirstTimestamp.Time | ||||
| 		} | ||||
| 		eventMap = append(eventMap, map[string]any{ | ||||
| 			"Namespace": event.Namespace, | ||||
| 			"Timestamp": timestamp.String(), | ||||
| 			"Type":      event.Type, | ||||
| 			"Reason":    event.Reason, | ||||
| 			"InvolvedObject": map[string]string{ | ||||
| 				"apiVersion": event.InvolvedObject.APIVersion, | ||||
| 				"Kind":       event.InvolvedObject.Kind, | ||||
| 				"Name":       event.InvolvedObject.Name, | ||||
| 			}, | ||||
| 			"Message": strings.TrimSpace(event.Message), | ||||
| 		}) | ||||
| 	} | ||||
| 	return eventMap, nil | ||||
| } | ||||
							
								
								
									
										17
									
								
								pkg/kubernetes/impersonate_roundtripper.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										17
									
								
								pkg/kubernetes/impersonate_roundtripper.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,17 @@ | ||||
| package kubernetes | ||||
|  | ||||
| import "net/http" | ||||
|  | ||||
| // nolint:unused | ||||
| type impersonateRoundTripper struct { | ||||
| 	delegate http.RoundTripper | ||||
| } | ||||
|  | ||||
| // nolint:unused | ||||
| func (irt *impersonateRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { | ||||
| 	// TODO: Solution won't work with discoveryclient which uses context.TODO() instead of the passed-in context | ||||
| 	if v, ok := req.Context().Value(OAuthAuthorizationHeader).(string); ok { | ||||
| 		req.Header.Set("Authorization", v) | ||||
| 	} | ||||
| 	return irt.delegate.RoundTrip(req) | ||||
| } | ||||
| @@ -1,37 +1,39 @@ | ||||
| package kubernetes | ||||
|  | ||||
| import ( | ||||
| 	"k8s.io/cli-runtime/pkg/genericclioptions" | ||||
| 	"k8s.io/client-go/rest" | ||||
| 	"k8s.io/client-go/restmapper" | ||||
| 	"k8s.io/client-go/tools/clientcmd" | ||||
| 	"k8s.io/kubectl/pkg/scheme" | ||||
| 	"k8s.io/apimachinery/pkg/runtime" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/helm" | ||||
| 	"k8s.io/client-go/kubernetes/scheme" | ||||
|  | ||||
| 	_ "k8s.io/client-go/plugin/pkg/client/auth/oidc" | ||||
| ) | ||||
|  | ||||
| type HeaderKey string | ||||
|  | ||||
| const ( | ||||
| 	CustomAuthorizationHeader = HeaderKey("kubernetes-authorization") | ||||
| 	OAuthAuthorizationHeader  = HeaderKey("Authorization") | ||||
|  | ||||
| 	CustomUserAgent = "kubernetes-mcp-server/bearer-token-auth" | ||||
| ) | ||||
|  | ||||
| type CloseWatchKubeConfig func() error | ||||
|  | ||||
| type Kubernetes struct { | ||||
| 	cfg                         *rest.Config | ||||
| 	deferredDiscoveryRESTMapper *restmapper.DeferredDiscoveryRESTMapper | ||||
| 	manager *Manager | ||||
| } | ||||
|  | ||||
| func NewKubernetes() (*Kubernetes, error) { | ||||
| 	cfg, err := resolveClientConfig() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return &Kubernetes{cfg: cfg}, nil | ||||
| // AccessControlClientset returns the access-controlled clientset | ||||
| // This ensures that any denied resources configured in the system are properly enforced | ||||
| func (k *Kubernetes) AccessControlClientset() *AccessControlClientset { | ||||
| 	return k.manager.accessControlClientSet | ||||
| } | ||||
|  | ||||
| func defaultPrintFlags() *genericclioptions.PrintFlags { | ||||
| 	return genericclioptions.NewPrintFlags(""). | ||||
| 		WithTypeSetter(scheme.Scheme). | ||||
| 		WithDefaultOutput("yaml") | ||||
| } | ||||
| var Scheme = scheme.Scheme | ||||
| var ParameterCodec = runtime.NewParameterCodec(Scheme) | ||||
|  | ||||
| func resolveClientConfig() (*rest.Config, error) { | ||||
| 	inClusterConfig, err := rest.InClusterConfig() | ||||
| 	if err == nil && inClusterConfig != nil { | ||||
| 		return inClusterConfig, nil | ||||
| 	} | ||||
| 	pathOptions := clientcmd.NewDefaultPathOptions() | ||||
| 	return clientcmd.BuildConfigFromFlags("", pathOptions.GetDefaultFilename()) | ||||
| func (k *Kubernetes) NewHelm() *helm.Helm { | ||||
| 	// This is a derived Kubernetes, so it already has the Helm initialized | ||||
| 	return helm.NewHelm(k.manager) | ||||
| } | ||||
|   | ||||
							
								
								
									
										185
									
								
								pkg/kubernetes/kubernetes_derived_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										185
									
								
								pkg/kubernetes/kubernetes_derived_test.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,185 @@ | ||||
| package kubernetes | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"strings" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/internal/test" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| 	"github.com/stretchr/testify/suite" | ||||
| ) | ||||
|  | ||||
| type DerivedTestSuite struct { | ||||
| 	suite.Suite | ||||
| } | ||||
|  | ||||
| func (s *DerivedTestSuite) TestKubeConfig() { | ||||
| 	// Create a temporary kubeconfig file for testing | ||||
| 	tempDir := s.T().TempDir() | ||||
| 	kubeconfigPath := filepath.Join(tempDir, "config") | ||||
| 	kubeconfigContent := ` | ||||
| apiVersion: v1 | ||||
| kind: Config | ||||
| clusters: | ||||
| - cluster: | ||||
|     server: https://test-cluster.example.com | ||||
|   name: test-cluster | ||||
| contexts: | ||||
| - context: | ||||
|     cluster: test-cluster | ||||
|     user: test-user | ||||
|   name: test-context | ||||
| current-context: test-context | ||||
| users: | ||||
| - name: test-user | ||||
|   user: | ||||
|     username: test-username | ||||
|     password: test-password | ||||
| ` | ||||
| 	err := os.WriteFile(kubeconfigPath, []byte(kubeconfigContent), 0644) | ||||
| 	s.Require().NoError(err, "failed to create kubeconfig file") | ||||
|  | ||||
| 	s.Run("with no RequireOAuth (default) config", func() { | ||||
| 		testStaticConfig := test.Must(config.ReadToml([]byte(` | ||||
| 			kubeconfig = "` + strings.ReplaceAll(kubeconfigPath, `\`, `\\`) + `" | ||||
| 		`))) | ||||
| 		s.Run("without authorization header returns original manager", func() { | ||||
| 			testManager, err := NewKubeconfigManager(testStaticConfig, "") | ||||
| 			s.Require().NoErrorf(err, "failed to create test manager: %v", err) | ||||
| 			s.T().Cleanup(testManager.Close) | ||||
|  | ||||
| 			derived, err := testManager.Derived(s.T().Context()) | ||||
| 			s.Require().NoErrorf(err, "failed to create derived manager: %v", err) | ||||
|  | ||||
| 			s.Equal(derived.manager, testManager, "expected original manager, got different manager") | ||||
| 		}) | ||||
|  | ||||
| 		s.Run("with invalid authorization header returns original manager", func() { | ||||
| 			testManager, err := NewKubeconfigManager(testStaticConfig, "") | ||||
| 			s.Require().NoErrorf(err, "failed to create test manager: %v", err) | ||||
| 			s.T().Cleanup(testManager.Close) | ||||
|  | ||||
| 			ctx := context.WithValue(s.T().Context(), HeaderKey("Authorization"), "invalid-token") | ||||
| 			derived, err := testManager.Derived(ctx) | ||||
| 			s.Require().NoErrorf(err, "failed to create derived manager: %v", err) | ||||
|  | ||||
| 			s.Equal(derived.manager, testManager, "expected original manager, got different manager") | ||||
| 		}) | ||||
|  | ||||
| 		s.Run("with valid bearer token creates derived manager with correct configuration", func() { | ||||
| 			testManager, err := NewKubeconfigManager(testStaticConfig, "") | ||||
| 			s.Require().NoErrorf(err, "failed to create test manager: %v", err) | ||||
| 			s.T().Cleanup(testManager.Close) | ||||
|  | ||||
| 			ctx := context.WithValue(s.T().Context(), HeaderKey("Authorization"), "Bearer aiTana-julIA") | ||||
| 			derived, err := testManager.Derived(ctx) | ||||
| 			s.Require().NoErrorf(err, "failed to create derived manager: %v", err) | ||||
|  | ||||
| 			s.NotEqual(derived.manager, testManager, "expected new derived manager, got original manager") | ||||
| 			s.Equal(derived.manager.staticConfig, testStaticConfig, "staticConfig not properly wired to derived manager") | ||||
|  | ||||
| 			s.Run("RestConfig is correctly copied and sensitive fields are omitted", func() { | ||||
| 				derivedCfg := derived.manager.cfg | ||||
| 				s.Require().NotNil(derivedCfg, "derived config is nil") | ||||
|  | ||||
| 				originalCfg := testManager.cfg | ||||
| 				s.Equalf(originalCfg.Host, derivedCfg.Host, "expected Host %s, got %s", originalCfg.Host, derivedCfg.Host) | ||||
| 				s.Equalf(originalCfg.APIPath, derivedCfg.APIPath, "expected APIPath %s, got %s", originalCfg.APIPath, derivedCfg.APIPath) | ||||
| 				s.Equalf(originalCfg.QPS, derivedCfg.QPS, "expected QPS %f, got %f", originalCfg.QPS, derivedCfg.QPS) | ||||
| 				s.Equalf(originalCfg.Burst, derivedCfg.Burst, "expected Burst %d, got %d", originalCfg.Burst, derivedCfg.Burst) | ||||
| 				s.Equalf(originalCfg.Timeout, derivedCfg.Timeout, "expected Timeout %v, got %v", originalCfg.Timeout, derivedCfg.Timeout) | ||||
|  | ||||
| 				s.Equalf(originalCfg.Insecure, derivedCfg.Insecure, "expected TLS Insecure %v, got %v", originalCfg.Insecure, derivedCfg.Insecure) | ||||
| 				s.Equalf(originalCfg.ServerName, derivedCfg.ServerName, "expected TLS ServerName %s, got %s", originalCfg.ServerName, derivedCfg.ServerName) | ||||
| 				s.Equalf(originalCfg.CAFile, derivedCfg.CAFile, "expected TLS CAFile %s, got %s", originalCfg.CAFile, derivedCfg.CAFile) | ||||
| 				s.Equalf(string(originalCfg.CAData), string(derivedCfg.CAData), "expected TLS CAData %s, got %s", string(originalCfg.CAData), string(derivedCfg.CAData)) | ||||
|  | ||||
| 				s.Equalf("aiTana-julIA", derivedCfg.BearerToken, "expected BearerToken %s, got %s", "aiTana-julIA", derivedCfg.BearerToken) | ||||
| 				s.Equalf("kubernetes-mcp-server/bearer-token-auth", derivedCfg.UserAgent, "expected UserAgent \"kubernetes-mcp-server/bearer-token-auth\", got %s", derivedCfg.UserAgent) | ||||
|  | ||||
| 				// Verify that sensitive fields are NOT copied to prevent credential leakage | ||||
| 				// The derived config should only use the bearer token from the Authorization header | ||||
| 				// and not inherit any authentication credentials from the original kubeconfig | ||||
| 				s.Emptyf(derivedCfg.CertFile, "expected TLS CertFile to be empty, got %s", derivedCfg.CertFile) | ||||
| 				s.Emptyf(derivedCfg.KeyFile, "expected TLS KeyFile to be empty, got %s", derivedCfg.KeyFile) | ||||
| 				s.Emptyf(len(derivedCfg.CertData), "expected TLS CertData to be empty, got %v", derivedCfg.CertData) | ||||
| 				s.Emptyf(len(derivedCfg.KeyData), "expected TLS KeyData to be empty, got %v", derivedCfg.KeyData) | ||||
|  | ||||
| 				s.Emptyf(derivedCfg.Username, "expected Username to be empty, got %s", derivedCfg.Username) | ||||
| 				s.Emptyf(derivedCfg.Password, "expected Password to be empty, got %s", derivedCfg.Password) | ||||
| 				s.Nilf(derivedCfg.AuthProvider, "expected AuthProvider to be nil, got %v", derivedCfg.AuthProvider) | ||||
| 				s.Nilf(derivedCfg.ExecProvider, "expected ExecProvider to be nil, got %v", derivedCfg.ExecProvider) | ||||
| 				s.Emptyf(derivedCfg.BearerTokenFile, "expected BearerTokenFile to be empty, got %s", derivedCfg.BearerTokenFile) | ||||
| 				s.Emptyf(derivedCfg.Impersonate.UserName, "expected Impersonate.UserName to be empty, got %s", derivedCfg.Impersonate.UserName) | ||||
|  | ||||
| 				// Verify that the original manager still has the sensitive data | ||||
| 				s.Falsef(originalCfg.Username == "" && originalCfg.Password == "", "original kubeconfig shouldn't be modified") | ||||
|  | ||||
| 			}) | ||||
| 			s.Run("derived manager has initialized clients", func() { | ||||
| 				// Verify that the derived manager has proper clients initialized | ||||
| 				s.NotNilf(derived.manager.accessControlClientSet, "expected accessControlClientSet to be initialized") | ||||
| 				s.Equalf(testStaticConfig, derived.manager.accessControlClientSet.staticConfig, "staticConfig not properly wired to derived manager") | ||||
| 				s.NotNilf(derived.manager.discoveryClient, "expected discoveryClient to be initialized") | ||||
| 				s.NotNilf(derived.manager.accessControlRESTMapper, "expected accessControlRESTMapper to be initialized") | ||||
| 				s.Equalf(testStaticConfig, derived.manager.accessControlRESTMapper.staticConfig, "staticConfig not properly wired to derived manager") | ||||
| 				s.NotNilf(derived.manager.dynamicClient, "expected dynamicClient to be initialized") | ||||
| 			}) | ||||
| 		}) | ||||
| 	}) | ||||
|  | ||||
| 	s.Run("with RequireOAuth=true", func() { | ||||
| 		testStaticConfig := test.Must(config.ReadToml([]byte(` | ||||
| 			kubeconfig = "` + strings.ReplaceAll(kubeconfigPath, `\`, `\\`) + `" | ||||
| 			require_oauth = true | ||||
| 		`))) | ||||
|  | ||||
| 		s.Run("with no authorization header returns oauth token required error", func() { | ||||
| 			testManager, err := NewKubeconfigManager(testStaticConfig, "") | ||||
| 			s.Require().NoErrorf(err, "failed to create test manager: %v", err) | ||||
| 			s.T().Cleanup(testManager.Close) | ||||
|  | ||||
| 			derived, err := testManager.Derived(s.T().Context()) | ||||
| 			s.Require().Error(err, "expected error for missing oauth token, got nil") | ||||
| 			s.EqualError(err, "oauth token required", "expected error 'oauth token required', got %s", err.Error()) | ||||
| 			s.Nil(derived, "expected nil derived manager when oauth token required") | ||||
| 		}) | ||||
|  | ||||
| 		s.Run("with invalid authorization header returns oauth token required error", func() { | ||||
| 			testManager, err := NewKubeconfigManager(testStaticConfig, "") | ||||
| 			s.Require().NoErrorf(err, "failed to create test manager: %v", err) | ||||
| 			s.T().Cleanup(testManager.Close) | ||||
|  | ||||
| 			ctx := context.WithValue(s.T().Context(), HeaderKey("Authorization"), "invalid-token") | ||||
| 			derived, err := testManager.Derived(ctx) | ||||
| 			s.Require().Error(err, "expected error for invalid oauth token, got nil") | ||||
| 			s.EqualError(err, "oauth token required", "expected error 'oauth token required', got %s", err.Error()) | ||||
| 			s.Nil(derived, "expected nil derived manager when oauth token required") | ||||
| 		}) | ||||
|  | ||||
| 		s.Run("with valid bearer token creates derived manager", func() { | ||||
| 			testManager, err := NewKubeconfigManager(testStaticConfig, "") | ||||
| 			s.Require().NoErrorf(err, "failed to create test manager: %v", err) | ||||
| 			s.T().Cleanup(testManager.Close) | ||||
|  | ||||
| 			ctx := context.WithValue(s.T().Context(), HeaderKey("Authorization"), "Bearer aiTana-julIA") | ||||
| 			derived, err := testManager.Derived(ctx) | ||||
| 			s.Require().NoErrorf(err, "failed to create derived manager: %v", err) | ||||
|  | ||||
| 			s.NotEqual(derived.manager, testManager, "expected new derived manager, got original manager") | ||||
| 			s.Equal(derived.manager.staticConfig, testStaticConfig, "staticConfig not properly wired to derived manager") | ||||
|  | ||||
| 			derivedCfg := derived.manager.cfg | ||||
| 			s.Require().NotNil(derivedCfg, "derived config is nil") | ||||
|  | ||||
| 			s.Equalf("aiTana-julIA", derivedCfg.BearerToken, "expected BearerToken %s, got %s", "aiTana-julIA", derivedCfg.BearerToken) | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestDerived(t *testing.T) { | ||||
| 	suite.Run(t, new(DerivedTestSuite)) | ||||
| } | ||||
							
								
								
									
										301
									
								
								pkg/kubernetes/manager.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										301
									
								
								pkg/kubernetes/manager.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,301 @@ | ||||
| package kubernetes | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/helm" | ||||
| 	"github.com/fsnotify/fsnotify" | ||||
| 	authenticationv1api "k8s.io/api/authentication/v1" | ||||
| 	"k8s.io/apimachinery/pkg/api/meta" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/client-go/discovery" | ||||
| 	"k8s.io/client-go/discovery/cached/memory" | ||||
| 	"k8s.io/client-go/dynamic" | ||||
| 	"k8s.io/client-go/rest" | ||||
| 	"k8s.io/client-go/restmapper" | ||||
| 	"k8s.io/client-go/tools/clientcmd" | ||||
| 	clientcmdapi "k8s.io/client-go/tools/clientcmd/api" | ||||
| 	"k8s.io/klog/v2" | ||||
| ) | ||||
|  | ||||
| type Manager struct { | ||||
| 	cfg                     *rest.Config | ||||
| 	clientCmdConfig         clientcmd.ClientConfig | ||||
| 	discoveryClient         discovery.CachedDiscoveryInterface | ||||
| 	accessControlClientSet  *AccessControlClientset | ||||
| 	accessControlRESTMapper *AccessControlRESTMapper | ||||
| 	dynamicClient           *dynamic.DynamicClient | ||||
|  | ||||
| 	staticConfig         *config.StaticConfig | ||||
| 	CloseWatchKubeConfig CloseWatchKubeConfig | ||||
| } | ||||
|  | ||||
| var _ helm.Kubernetes = (*Manager)(nil) | ||||
| var _ Openshift = (*Manager)(nil) | ||||
|  | ||||
| var ( | ||||
| 	ErrorKubeconfigInClusterNotAllowed = errors.New("kubeconfig manager cannot be used in in-cluster deployments") | ||||
| 	ErrorInClusterNotInCluster         = errors.New("in-cluster manager cannot be used outside of a cluster") | ||||
| ) | ||||
|  | ||||
| func NewKubeconfigManager(config *config.StaticConfig, kubeconfigContext string) (*Manager, error) { | ||||
| 	if IsInCluster(config) { | ||||
| 		return nil, ErrorKubeconfigInClusterNotAllowed | ||||
| 	} | ||||
|  | ||||
| 	pathOptions := clientcmd.NewDefaultPathOptions() | ||||
| 	if config.KubeConfig != "" { | ||||
| 		pathOptions.LoadingRules.ExplicitPath = config.KubeConfig | ||||
| 	} | ||||
| 	clientCmdConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( | ||||
| 		pathOptions.LoadingRules, | ||||
| 		&clientcmd.ConfigOverrides{ | ||||
| 			ClusterInfo:    clientcmdapi.Cluster{Server: ""}, | ||||
| 			CurrentContext: kubeconfigContext, | ||||
| 		}) | ||||
|  | ||||
| 	restConfig, err := clientCmdConfig.ClientConfig() | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("failed to create kubernetes rest config from kubeconfig: %v", err) | ||||
| 	} | ||||
|  | ||||
| 	return newManager(config, restConfig, clientCmdConfig) | ||||
| } | ||||
|  | ||||
| func NewInClusterManager(config *config.StaticConfig) (*Manager, error) { | ||||
| 	if config.KubeConfig != "" { | ||||
| 		return nil, fmt.Errorf("kubeconfig file %s cannot be used with the in-cluster deployments: %v", config.KubeConfig, ErrorKubeconfigInClusterNotAllowed) | ||||
| 	} | ||||
|  | ||||
| 	if !IsInCluster(config) { | ||||
| 		return nil, ErrorInClusterNotInCluster | ||||
| 	} | ||||
|  | ||||
| 	restConfig, err := InClusterConfig() | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("failed to create in-cluster kubernetes rest config: %v", err) | ||||
| 	} | ||||
|  | ||||
| 	// Create a dummy kubeconfig clientcmdapi.Config for in-cluster config to be used in places where clientcmd.ClientConfig is required | ||||
| 	clientCmdConfig := clientcmdapi.NewConfig() | ||||
| 	clientCmdConfig.Clusters["cluster"] = &clientcmdapi.Cluster{ | ||||
| 		Server:                restConfig.Host, | ||||
| 		InsecureSkipTLSVerify: restConfig.Insecure, | ||||
| 	} | ||||
| 	clientCmdConfig.AuthInfos["user"] = &clientcmdapi.AuthInfo{ | ||||
| 		Token: restConfig.BearerToken, | ||||
| 	} | ||||
| 	clientCmdConfig.Contexts[inClusterKubeConfigDefaultContext] = &clientcmdapi.Context{ | ||||
| 		Cluster:  "cluster", | ||||
| 		AuthInfo: "user", | ||||
| 	} | ||||
| 	clientCmdConfig.CurrentContext = inClusterKubeConfigDefaultContext | ||||
|  | ||||
| 	return newManager(config, restConfig, clientcmd.NewDefaultClientConfig(*clientCmdConfig, nil)) | ||||
| } | ||||
|  | ||||
| func newManager(config *config.StaticConfig, restConfig *rest.Config, clientCmdConfig clientcmd.ClientConfig) (*Manager, error) { | ||||
| 	k8s := &Manager{ | ||||
| 		staticConfig:    config, | ||||
| 		cfg:             restConfig, | ||||
| 		clientCmdConfig: clientCmdConfig, | ||||
| 	} | ||||
| 	if k8s.cfg.UserAgent == "" { | ||||
| 		k8s.cfg.UserAgent = rest.DefaultKubernetesUserAgent() | ||||
| 	} | ||||
| 	var err error | ||||
| 	// TODO: Won't work because not all client-go clients use the shared context (e.g. discovery client uses context.TODO()) | ||||
| 	//k8s.cfg.Wrap(func(original http.RoundTripper) http.RoundTripper { | ||||
| 	//	return &impersonateRoundTripper{original} | ||||
| 	//}) | ||||
| 	k8s.accessControlClientSet, err = NewAccessControlClientset(k8s.cfg, k8s.staticConfig) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	k8s.discoveryClient = memory.NewMemCacheClient(k8s.accessControlClientSet.DiscoveryClient()) | ||||
| 	k8s.accessControlRESTMapper = NewAccessControlRESTMapper( | ||||
| 		restmapper.NewDeferredDiscoveryRESTMapper(k8s.discoveryClient), | ||||
| 		k8s.staticConfig, | ||||
| 	) | ||||
| 	k8s.dynamicClient, err = dynamic.NewForConfig(k8s.cfg) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return k8s, nil | ||||
| } | ||||
|  | ||||
| func (m *Manager) WatchKubeConfig(onKubeConfigChange func() error) { | ||||
| 	if m.clientCmdConfig == nil { | ||||
| 		return | ||||
| 	} | ||||
| 	kubeConfigFiles := m.clientCmdConfig.ConfigAccess().GetLoadingPrecedence() | ||||
| 	if len(kubeConfigFiles) == 0 { | ||||
| 		return | ||||
| 	} | ||||
| 	watcher, err := fsnotify.NewWatcher() | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	for _, file := range kubeConfigFiles { | ||||
| 		_ = watcher.Add(file) | ||||
| 	} | ||||
| 	go func() { | ||||
| 		for { | ||||
| 			select { | ||||
| 			case _, ok := <-watcher.Events: | ||||
| 				if !ok { | ||||
| 					return | ||||
| 				} | ||||
| 				_ = onKubeConfigChange() | ||||
| 			case _, ok := <-watcher.Errors: | ||||
| 				if !ok { | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	}() | ||||
| 	if m.CloseWatchKubeConfig != nil { | ||||
| 		_ = m.CloseWatchKubeConfig() | ||||
| 	} | ||||
| 	m.CloseWatchKubeConfig = watcher.Close | ||||
| } | ||||
|  | ||||
| func (m *Manager) Close() { | ||||
| 	if m.CloseWatchKubeConfig != nil { | ||||
| 		_ = m.CloseWatchKubeConfig() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (m *Manager) configuredNamespace() string { | ||||
| 	if ns, _, nsErr := m.clientCmdConfig.Namespace(); nsErr == nil { | ||||
| 		return ns | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func (m *Manager) NamespaceOrDefault(namespace string) string { | ||||
| 	if namespace == "" { | ||||
| 		return m.configuredNamespace() | ||||
| 	} | ||||
| 	return namespace | ||||
| } | ||||
|  | ||||
| func (m *Manager) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { | ||||
| 	return m.discoveryClient, nil | ||||
| } | ||||
|  | ||||
| func (m *Manager) ToRESTMapper() (meta.RESTMapper, error) { | ||||
| 	return m.accessControlRESTMapper, nil | ||||
| } | ||||
|  | ||||
| // ToRESTConfig returns the rest.Config object (genericclioptions.RESTClientGetter) | ||||
| func (m *Manager) ToRESTConfig() (*rest.Config, error) { | ||||
| 	return m.cfg, nil | ||||
| } | ||||
|  | ||||
| // ToRawKubeConfigLoader returns the clientcmd.ClientConfig object (genericclioptions.RESTClientGetter) | ||||
| func (m *Manager) ToRawKubeConfigLoader() clientcmd.ClientConfig { | ||||
| 	return m.clientCmdConfig | ||||
| } | ||||
|  | ||||
| func (m *Manager) VerifyToken(ctx context.Context, token, audience string) (*authenticationv1api.UserInfo, []string, error) { | ||||
| 	tokenReviewClient, err := m.accessControlClientSet.TokenReview() | ||||
| 	if err != nil { | ||||
| 		return nil, nil, err | ||||
| 	} | ||||
| 	tokenReview := &authenticationv1api.TokenReview{ | ||||
| 		TypeMeta: metav1.TypeMeta{ | ||||
| 			APIVersion: "authentication.k8s.io/v1", | ||||
| 			Kind:       "TokenReview", | ||||
| 		}, | ||||
| 		Spec: authenticationv1api.TokenReviewSpec{ | ||||
| 			Token:     token, | ||||
| 			Audiences: []string{audience}, | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	result, err := tokenReviewClient.Create(ctx, tokenReview, metav1.CreateOptions{}) | ||||
| 	if err != nil { | ||||
| 		return nil, nil, fmt.Errorf("failed to create token review: %v", err) | ||||
| 	} | ||||
|  | ||||
| 	if !result.Status.Authenticated { | ||||
| 		if result.Status.Error != "" { | ||||
| 			return nil, nil, fmt.Errorf("token authentication failed: %s", result.Status.Error) | ||||
| 		} | ||||
| 		return nil, nil, fmt.Errorf("token authentication failed") | ||||
| 	} | ||||
|  | ||||
| 	return &result.Status.User, result.Status.Audiences, nil | ||||
| } | ||||
|  | ||||
| func (m *Manager) Derived(ctx context.Context) (*Kubernetes, error) { | ||||
| 	authorization, ok := ctx.Value(OAuthAuthorizationHeader).(string) | ||||
| 	if !ok || !strings.HasPrefix(authorization, "Bearer ") { | ||||
| 		if m.staticConfig.RequireOAuth { | ||||
| 			return nil, errors.New("oauth token required") | ||||
| 		} | ||||
| 		return &Kubernetes{manager: m}, nil | ||||
| 	} | ||||
| 	klog.V(5).Infof("%s header found (Bearer), using provided bearer token", OAuthAuthorizationHeader) | ||||
| 	derivedCfg := &rest.Config{ | ||||
| 		Host:    m.cfg.Host, | ||||
| 		APIPath: m.cfg.APIPath, | ||||
| 		// Copy only server verification TLS settings (CA bundle and server name) | ||||
| 		TLSClientConfig: rest.TLSClientConfig{ | ||||
| 			Insecure:   m.cfg.Insecure, | ||||
| 			ServerName: m.cfg.ServerName, | ||||
| 			CAFile:     m.cfg.CAFile, | ||||
| 			CAData:     m.cfg.CAData, | ||||
| 		}, | ||||
| 		BearerToken: strings.TrimPrefix(authorization, "Bearer "), | ||||
| 		// pass custom UserAgent to identify the client | ||||
| 		UserAgent:   CustomUserAgent, | ||||
| 		QPS:         m.cfg.QPS, | ||||
| 		Burst:       m.cfg.Burst, | ||||
| 		Timeout:     m.cfg.Timeout, | ||||
| 		Impersonate: rest.ImpersonationConfig{}, | ||||
| 	} | ||||
| 	clientCmdApiConfig, err := m.clientCmdConfig.RawConfig() | ||||
| 	if err != nil { | ||||
| 		if m.staticConfig.RequireOAuth { | ||||
| 			klog.Errorf("failed to get kubeconfig: %v", err) | ||||
| 			return nil, errors.New("failed to get kubeconfig") | ||||
| 		} | ||||
| 		return &Kubernetes{manager: m}, nil | ||||
| 	} | ||||
| 	clientCmdApiConfig.AuthInfos = make(map[string]*clientcmdapi.AuthInfo) | ||||
| 	derived := &Kubernetes{ | ||||
| 		manager: &Manager{ | ||||
| 			clientCmdConfig: clientcmd.NewDefaultClientConfig(clientCmdApiConfig, nil), | ||||
| 			cfg:             derivedCfg, | ||||
| 			staticConfig:    m.staticConfig, | ||||
| 		}, | ||||
| 	} | ||||
| 	derived.manager.accessControlClientSet, err = NewAccessControlClientset(derived.manager.cfg, derived.manager.staticConfig) | ||||
| 	if err != nil { | ||||
| 		if m.staticConfig.RequireOAuth { | ||||
| 			klog.Errorf("failed to get kubeconfig: %v", err) | ||||
| 			return nil, errors.New("failed to get kubeconfig") | ||||
| 		} | ||||
| 		return &Kubernetes{manager: m}, nil | ||||
| 	} | ||||
| 	derived.manager.discoveryClient = memory.NewMemCacheClient(derived.manager.accessControlClientSet.DiscoveryClient()) | ||||
| 	derived.manager.accessControlRESTMapper = NewAccessControlRESTMapper( | ||||
| 		restmapper.NewDeferredDiscoveryRESTMapper(derived.manager.discoveryClient), | ||||
| 		derived.manager.staticConfig, | ||||
| 	) | ||||
| 	derived.manager.dynamicClient, err = dynamic.NewForConfig(derived.manager.cfg) | ||||
| 	if err != nil { | ||||
| 		if m.staticConfig.RequireOAuth { | ||||
| 			klog.Errorf("failed to initialize dynamic client: %v", err) | ||||
| 			return nil, errors.New("failed to initialize dynamic client") | ||||
| 		} | ||||
| 		return &Kubernetes{manager: m}, nil | ||||
| 	} | ||||
| 	return derived, nil | ||||
| } | ||||
							
								
								
									
										202
									
								
								pkg/kubernetes/manager_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										202
									
								
								pkg/kubernetes/manager_test.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,202 @@ | ||||
| package kubernetes | ||||
|  | ||||
| import ( | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"runtime" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/internal/test" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| 	"github.com/stretchr/testify/suite" | ||||
| 	"k8s.io/client-go/rest" | ||||
| 	clientcmdapi "k8s.io/client-go/tools/clientcmd/api" | ||||
| ) | ||||
|  | ||||
| type ManagerTestSuite struct { | ||||
| 	suite.Suite | ||||
| 	originalEnv             []string | ||||
| 	originalInClusterConfig func() (*rest.Config, error) | ||||
| 	mockServer              *test.MockServer | ||||
| } | ||||
|  | ||||
| func (s *ManagerTestSuite) SetupTest() { | ||||
| 	s.originalEnv = os.Environ() | ||||
| 	s.originalInClusterConfig = InClusterConfig | ||||
| 	s.mockServer = test.NewMockServer() | ||||
| } | ||||
|  | ||||
| func (s *ManagerTestSuite) TearDownTest() { | ||||
| 	test.RestoreEnv(s.originalEnv) | ||||
| 	InClusterConfig = s.originalInClusterConfig | ||||
| 	if s.mockServer != nil { | ||||
| 		s.mockServer.Close() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (s *ManagerTestSuite) TestNewInClusterManager() { | ||||
| 	s.Run("In cluster", func() { | ||||
| 		InClusterConfig = func() (*rest.Config, error) { | ||||
| 			return &rest.Config{}, nil | ||||
| 		} | ||||
| 		s.Run("with default StaticConfig (empty kubeconfig)", func() { | ||||
| 			manager, err := NewInClusterManager(&config.StaticConfig{}) | ||||
| 			s.Require().NoError(err) | ||||
| 			s.Require().NotNil(manager) | ||||
| 			s.Run("behaves as in cluster", func() { | ||||
| 				rawConfig, err := manager.clientCmdConfig.RawConfig() | ||||
| 				s.Require().NoError(err) | ||||
| 				s.Equal("in-cluster", rawConfig.CurrentContext, "expected current context to be 'in-cluster'") | ||||
| 			}) | ||||
| 			s.Run("sets default user-agent", func() { | ||||
| 				s.Contains(manager.cfg.UserAgent, "("+runtime.GOOS+"/"+runtime.GOARCH+")") | ||||
| 			}) | ||||
| 		}) | ||||
| 		s.Run("with explicit kubeconfig", func() { | ||||
| 			manager, err := NewInClusterManager(&config.StaticConfig{ | ||||
| 				KubeConfig: s.mockServer.KubeconfigFile(s.T()), | ||||
| 			}) | ||||
| 			s.Run("returns error", func() { | ||||
| 				s.Error(err) | ||||
| 				s.Nil(manager) | ||||
| 				s.Regexp("kubeconfig file .+ cannot be used with the in-cluster deployments", err.Error()) | ||||
| 			}) | ||||
| 		}) | ||||
| 	}) | ||||
| 	s.Run("Out of cluster", func() { | ||||
| 		InClusterConfig = func() (*rest.Config, error) { | ||||
| 			return nil, rest.ErrNotInCluster | ||||
| 		} | ||||
| 		manager, err := NewInClusterManager(&config.StaticConfig{}) | ||||
| 		s.Run("returns error", func() { | ||||
| 			s.Error(err) | ||||
| 			s.Nil(manager) | ||||
| 			s.ErrorIs(err, ErrorInClusterNotInCluster) | ||||
| 			s.ErrorContains(err, "in-cluster manager cannot be used outside of a cluster") | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *ManagerTestSuite) TestNewKubeconfigManager() { | ||||
| 	s.Run("Out of cluster", func() { | ||||
| 		InClusterConfig = func() (*rest.Config, error) { | ||||
| 			return nil, rest.ErrNotInCluster | ||||
| 		} | ||||
| 		s.Run("with valid kubeconfig in env", func() { | ||||
| 			kubeconfig := s.mockServer.KubeconfigFile(s.T()) | ||||
| 			s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfig)) | ||||
| 			manager, err := NewKubeconfigManager(&config.StaticConfig{}, "") | ||||
| 			s.Require().NoError(err) | ||||
| 			s.Require().NotNil(manager) | ||||
| 			s.Run("behaves as NOT in cluster", func() { | ||||
| 				rawConfig, err := manager.clientCmdConfig.RawConfig() | ||||
| 				s.Require().NoError(err) | ||||
| 				s.NotEqual("in-cluster", rawConfig.CurrentContext, "expected current context to NOT be 'in-cluster'") | ||||
| 				s.Equal("fake-context", rawConfig.CurrentContext, "expected current context to be 'fake-context' as in kubeconfig") | ||||
| 			}) | ||||
| 			s.Run("loads correct config", func() { | ||||
| 				s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfig, "expected kubeconfig path to match") | ||||
| 			}) | ||||
| 			s.Run("sets default user-agent", func() { | ||||
| 				s.Contains(manager.cfg.UserAgent, "("+runtime.GOOS+"/"+runtime.GOARCH+")") | ||||
| 			}) | ||||
| 			s.Run("rest config host points to mock server", func() { | ||||
| 				s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server") | ||||
| 			}) | ||||
| 		}) | ||||
| 		s.Run("with valid kubeconfig in env and explicit kubeconfig in config", func() { | ||||
| 			kubeconfigInEnv := s.mockServer.KubeconfigFile(s.T()) | ||||
| 			s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigInEnv)) | ||||
| 			kubeconfigExplicit := s.mockServer.KubeconfigFile(s.T()) | ||||
| 			manager, err := NewKubeconfigManager(&config.StaticConfig{ | ||||
| 				KubeConfig: kubeconfigExplicit, | ||||
| 			}, "") | ||||
| 			s.Require().NoError(err) | ||||
| 			s.Require().NotNil(manager) | ||||
| 			s.Run("behaves as NOT in cluster", func() { | ||||
| 				rawConfig, err := manager.clientCmdConfig.RawConfig() | ||||
| 				s.Require().NoError(err) | ||||
| 				s.NotEqual("in-cluster", rawConfig.CurrentContext, "expected current context to NOT be 'in-cluster'") | ||||
| 				s.Equal("fake-context", rawConfig.CurrentContext, "expected current context to be 'fake-context' as in kubeconfig") | ||||
| 			}) | ||||
| 			s.Run("loads correct config (explicit)", func() { | ||||
| 				s.NotContains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfigInEnv, "expected kubeconfig path to NOT match env") | ||||
| 				s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfigExplicit, "expected kubeconfig path to match explicit") | ||||
| 			}) | ||||
| 			s.Run("rest config host points to mock server", func() { | ||||
| 				s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server") | ||||
| 			}) | ||||
| 		}) | ||||
| 		s.Run("with valid kubeconfig in env and explicit kubeconfig context (valid)", func() { | ||||
| 			kubeconfig := s.mockServer.Kubeconfig() | ||||
| 			kubeconfig.Contexts["not-the-mock-server"] = clientcmdapi.NewContext() | ||||
| 			kubeconfig.Contexts["not-the-mock-server"].Cluster = "not-the-mock-server" | ||||
| 			kubeconfig.Clusters["not-the-mock-server"] = clientcmdapi.NewCluster() | ||||
| 			kubeconfig.Clusters["not-the-mock-server"].Server = "https://not-the-mock-server:6443" // REST configuration should point to mock server, not this | ||||
| 			kubeconfig.CurrentContext = "not-the-mock-server" | ||||
| 			kubeconfigFile := test.KubeconfigFile(s.T(), kubeconfig) | ||||
| 			s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigFile)) | ||||
| 			manager, err := NewKubeconfigManager(&config.StaticConfig{}, "fake-context") // fake-context is the one mock-server serves | ||||
| 			s.Require().NoError(err) | ||||
| 			s.Require().NotNil(manager) | ||||
| 			s.Run("behaves as NOT in cluster", func() { | ||||
| 				rawConfig, err := manager.clientCmdConfig.RawConfig() | ||||
| 				s.Require().NoError(err) | ||||
| 				s.NotEqual("in-cluster", rawConfig.CurrentContext, "expected current context to NOT be 'in-cluster'") | ||||
| 				s.Equal("not-the-mock-server", rawConfig.CurrentContext, "expected current context to be 'not-the-mock-server' as in explicit context") | ||||
| 			}) | ||||
| 			s.Run("loads correct config", func() { | ||||
| 				s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfigFile, "expected kubeconfig path to match") | ||||
| 			}) | ||||
| 			s.Run("rest config host points to mock server", func() { | ||||
| 				s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server") | ||||
| 			}) | ||||
| 		}) | ||||
| 		s.Run("with valid kubeconfig in env and explicit kubeconfig context (invalid)", func() { | ||||
| 			kubeconfigInEnv := s.mockServer.KubeconfigFile(s.T()) | ||||
| 			s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigInEnv)) | ||||
| 			manager, err := NewKubeconfigManager(&config.StaticConfig{}, "i-do-not-exist") | ||||
| 			s.Run("returns error", func() { | ||||
| 				s.Error(err) | ||||
| 				s.Nil(manager) | ||||
| 				s.ErrorContains(err, `failed to create kubernetes rest config from kubeconfig: context "i-do-not-exist" does not exist`) | ||||
| 			}) | ||||
| 		}) | ||||
| 		s.Run("with invalid path kubeconfig in env", func() { | ||||
| 			s.Require().NoError(os.Setenv("KUBECONFIG", "i-dont-exist")) | ||||
| 			manager, err := NewKubeconfigManager(&config.StaticConfig{}, "") | ||||
| 			s.Run("returns error", func() { | ||||
| 				s.Error(err) | ||||
| 				s.Nil(manager) | ||||
| 				s.ErrorContains(err, "failed to create kubernetes rest config") | ||||
| 			}) | ||||
| 		}) | ||||
| 		s.Run("with empty kubeconfig in env", func() { | ||||
| 			kubeconfigPath := filepath.Join(s.T().TempDir(), "config") | ||||
| 			s.Require().NoError(os.WriteFile(kubeconfigPath, []byte(""), 0644)) | ||||
| 			s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigPath)) | ||||
| 			manager, err := NewKubeconfigManager(&config.StaticConfig{}, "") | ||||
| 			s.Run("returns error", func() { | ||||
| 				s.Error(err) | ||||
| 				s.Nil(manager) | ||||
| 				s.ErrorContains(err, "no configuration has been provided") | ||||
| 			}) | ||||
| 		}) | ||||
| 	}) | ||||
| 	s.Run("In cluster", func() { | ||||
| 		InClusterConfig = func() (*rest.Config, error) { | ||||
| 			return &rest.Config{}, nil | ||||
| 		} | ||||
| 		manager, err := NewKubeconfigManager(&config.StaticConfig{}, "") | ||||
| 		s.Run("returns error", func() { | ||||
| 			s.Error(err) | ||||
| 			s.Nil(manager) | ||||
| 			s.ErrorIs(err, ErrorKubeconfigInClusterNotAllowed) | ||||
| 			s.ErrorContains(err, "kubeconfig manager cannot be used in in-cluster deployments") | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestManager(t *testing.T) { | ||||
| 	suite.Run(t, new(ManagerTestSuite)) | ||||
| } | ||||
							
								
								
									
										19
									
								
								pkg/kubernetes/namespaces.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										19
									
								
								pkg/kubernetes/namespaces.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,19 @@ | ||||
| package kubernetes | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"k8s.io/apimachinery/pkg/runtime" | ||||
| 	"k8s.io/apimachinery/pkg/runtime/schema" | ||||
| ) | ||||
|  | ||||
| func (k *Kubernetes) NamespacesList(ctx context.Context, options ResourceListOptions) (runtime.Unstructured, error) { | ||||
| 	return k.ResourcesList(ctx, &schema.GroupVersionKind{ | ||||
| 		Group: "", Version: "v1", Kind: "Namespace", | ||||
| 	}, "", options) | ||||
| } | ||||
|  | ||||
| func (k *Kubernetes) ProjectsList(ctx context.Context, options ResourceListOptions) (runtime.Unstructured, error) { | ||||
| 	return k.ResourcesList(ctx, &schema.GroupVersionKind{ | ||||
| 		Group: "project.openshift.io", Version: "v1", Kind: "Project", | ||||
| 	}, "", options) | ||||
| } | ||||
							
								
								
									
										20
									
								
								pkg/kubernetes/openshift.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								pkg/kubernetes/openshift.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,20 @@ | ||||
| package kubernetes | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
|  | ||||
| 	"k8s.io/apimachinery/pkg/runtime/schema" | ||||
| ) | ||||
|  | ||||
| type Openshift interface { | ||||
| 	IsOpenShift(context.Context) bool | ||||
| } | ||||
|  | ||||
| func (m *Manager) IsOpenShift(_ context.Context) bool { | ||||
| 	// This method should be fast and not block (it's called at startup) | ||||
| 	_, err := m.discoveryClient.ServerResourcesForGroupVersion(schema.GroupVersion{ | ||||
| 		Group:   "project.openshift.io", | ||||
| 		Version: "v1", | ||||
| 	}.String()) | ||||
| 	return err == nil | ||||
| } | ||||
| @@ -1,18 +1,265 @@ | ||||
| package kubernetes | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"context" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
|  | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" | ||||
| 	labelutil "k8s.io/apimachinery/pkg/labels" | ||||
| 	"k8s.io/apimachinery/pkg/runtime" | ||||
| 	"k8s.io/apimachinery/pkg/runtime/schema" | ||||
| 	"k8s.io/apimachinery/pkg/util/intstr" | ||||
| 	"k8s.io/apimachinery/pkg/util/rand" | ||||
| 	"k8s.io/client-go/tools/remotecommand" | ||||
| 	"k8s.io/metrics/pkg/apis/metrics" | ||||
| 	metricsv1beta1api "k8s.io/metrics/pkg/apis/metrics/v1beta1" | ||||
| 	"k8s.io/utils/ptr" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/version" | ||||
| ) | ||||
|  | ||||
| func (k *Kubernetes) PodsListInAllNamespaces(ctx context.Context) (string, error) { | ||||
| 	return k.ResourcesList(ctx, &schema.GroupVersionKind{ | ||||
| 		Group: "", Version: "v1", Kind: "Pod", | ||||
| 	}, "") | ||||
| // Default number of lines to retrieve from the end of the logs | ||||
| const DefaultTailLines = int64(100) | ||||
|  | ||||
| type PodsTopOptions struct { | ||||
| 	metav1.ListOptions | ||||
| 	AllNamespaces bool | ||||
| 	Namespace     string | ||||
| 	Name          string | ||||
| } | ||||
|  | ||||
| func (k *Kubernetes) PodsListInNamespace(ctx context.Context, namespace string) (string, error) { | ||||
| func (k *Kubernetes) PodsListInAllNamespaces(ctx context.Context, options ResourceListOptions) (runtime.Unstructured, error) { | ||||
| 	return k.ResourcesList(ctx, &schema.GroupVersionKind{ | ||||
| 		Group: "", Version: "v1", Kind: "Pod", | ||||
| 	}, namespace) | ||||
| 	}, "", options) | ||||
| } | ||||
|  | ||||
| func (k *Kubernetes) PodsListInNamespace(ctx context.Context, namespace string, options ResourceListOptions) (runtime.Unstructured, error) { | ||||
| 	return k.ResourcesList(ctx, &schema.GroupVersionKind{ | ||||
| 		Group: "", Version: "v1", Kind: "Pod", | ||||
| 	}, namespace, options) | ||||
| } | ||||
|  | ||||
| func (k *Kubernetes) PodsGet(ctx context.Context, namespace, name string) (*unstructured.Unstructured, error) { | ||||
| 	return k.ResourcesGet(ctx, &schema.GroupVersionKind{ | ||||
| 		Group: "", Version: "v1", Kind: "Pod", | ||||
| 	}, k.NamespaceOrDefault(namespace), name) | ||||
| } | ||||
|  | ||||
| func (k *Kubernetes) PodsDelete(ctx context.Context, namespace, name string) (string, error) { | ||||
| 	namespace = k.NamespaceOrDefault(namespace) | ||||
| 	pod, err := k.ResourcesGet(ctx, &schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}, namespace, name) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
|  | ||||
| 	isManaged := pod.GetLabels()[AppKubernetesManagedBy] == version.BinaryName | ||||
| 	managedLabelSelector := labelutil.Set{ | ||||
| 		AppKubernetesManagedBy: version.BinaryName, | ||||
| 		AppKubernetesName:      pod.GetLabels()[AppKubernetesName], | ||||
| 	}.AsSelector() | ||||
|  | ||||
| 	// Delete managed service | ||||
| 	if isManaged { | ||||
| 		services, err := k.manager.accessControlClientSet.Services(namespace) | ||||
| 		if err != nil { | ||||
| 			return "", err | ||||
| 		} | ||||
| 		if sl, _ := services.List(ctx, metav1.ListOptions{ | ||||
| 			LabelSelector: managedLabelSelector.String(), | ||||
| 		}); sl != nil { | ||||
| 			for _, svc := range sl.Items { | ||||
| 				_ = services.Delete(ctx, svc.Name, metav1.DeleteOptions{}) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Delete managed Route | ||||
| 	if isManaged && k.supportsGroupVersion("route.openshift.io/v1") { | ||||
| 		routeResources := k.manager.dynamicClient. | ||||
| 			Resource(schema.GroupVersionResource{Group: "route.openshift.io", Version: "v1", Resource: "routes"}). | ||||
| 			Namespace(namespace) | ||||
| 		if rl, _ := routeResources.List(ctx, metav1.ListOptions{ | ||||
| 			LabelSelector: managedLabelSelector.String(), | ||||
| 		}); rl != nil { | ||||
| 			for _, route := range rl.Items { | ||||
| 				_ = routeResources.Delete(ctx, route.GetName(), metav1.DeleteOptions{}) | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 	} | ||||
| 	return "Pod deleted successfully", | ||||
| 		k.ResourcesDelete(ctx, &schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}, namespace, name) | ||||
| } | ||||
|  | ||||
| func (k *Kubernetes) PodsLog(ctx context.Context, namespace, name, container string, previous bool, tail int64) (string, error) { | ||||
| 	pods, err := k.manager.accessControlClientSet.Pods(k.NamespaceOrDefault(namespace)) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
|  | ||||
| 	logOptions := &v1.PodLogOptions{ | ||||
| 		Container: container, | ||||
| 		Previous:  previous, | ||||
| 	} | ||||
|  | ||||
| 	// Only set tailLines if a value is provided (non-zero) | ||||
| 	if tail > 0 { | ||||
| 		logOptions.TailLines = &tail | ||||
| 	} else { | ||||
| 		// Default to DefaultTailLines lines when not specified | ||||
| 		logOptions.TailLines = ptr.To(DefaultTailLines) | ||||
| 	} | ||||
|  | ||||
| 	req := pods.GetLogs(name, logOptions) | ||||
| 	res := req.Do(ctx) | ||||
| 	if res.Error() != nil { | ||||
| 		return "", res.Error() | ||||
| 	} | ||||
| 	rawData, err := res.Raw() | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	return string(rawData), nil | ||||
| } | ||||
|  | ||||
| func (k *Kubernetes) PodsRun(ctx context.Context, namespace, name, image string, port int32) ([]*unstructured.Unstructured, error) { | ||||
| 	if name == "" { | ||||
| 		name = version.BinaryName + "-run-" + rand.String(5) | ||||
| 	} | ||||
| 	labels := map[string]string{ | ||||
| 		AppKubernetesName:      name, | ||||
| 		AppKubernetesComponent: name, | ||||
| 		AppKubernetesManagedBy: version.BinaryName, | ||||
| 		AppKubernetesPartOf:    version.BinaryName + "-run-sandbox", | ||||
| 	} | ||||
| 	// NewPod | ||||
| 	var resources []any | ||||
| 	pod := &v1.Pod{ | ||||
| 		TypeMeta:   metav1.TypeMeta{APIVersion: "v1", Kind: "Pod"}, | ||||
| 		ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: k.NamespaceOrDefault(namespace), Labels: labels}, | ||||
| 		Spec: v1.PodSpec{Containers: []v1.Container{{ | ||||
| 			Name:            name, | ||||
| 			Image:           image, | ||||
| 			ImagePullPolicy: v1.PullAlways, | ||||
| 		}}}, | ||||
| 	} | ||||
| 	resources = append(resources, pod) | ||||
| 	if port > 0 { | ||||
| 		pod.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: port}} | ||||
| 		resources = append(resources, &v1.Service{ | ||||
| 			TypeMeta:   metav1.TypeMeta{APIVersion: "v1", Kind: "Service"}, | ||||
| 			ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: k.NamespaceOrDefault(namespace), Labels: labels}, | ||||
| 			Spec: v1.ServiceSpec{ | ||||
| 				Selector: labels, | ||||
| 				Type:     v1.ServiceTypeClusterIP, | ||||
| 				Ports:    []v1.ServicePort{{Port: port, TargetPort: intstr.FromInt32(port)}}, | ||||
| 			}, | ||||
| 		}) | ||||
| 	} | ||||
| 	if port > 0 && k.supportsGroupVersion("route.openshift.io/v1") { | ||||
| 		resources = append(resources, &unstructured.Unstructured{ | ||||
| 			Object: map[string]interface{}{ | ||||
| 				"apiVersion": "route.openshift.io/v1", | ||||
| 				"kind":       "Route", | ||||
| 				"metadata": map[string]interface{}{ | ||||
| 					"name":      name, | ||||
| 					"namespace": k.NamespaceOrDefault(namespace), | ||||
| 					"labels":    labels, | ||||
| 				}, | ||||
| 				"spec": map[string]interface{}{ | ||||
| 					"to": map[string]interface{}{ | ||||
| 						"kind":   "Service", | ||||
| 						"name":   name, | ||||
| 						"weight": 100, | ||||
| 					}, | ||||
| 					"port": map[string]interface{}{ | ||||
| 						"targetPort": intstr.FromInt32(port), | ||||
| 					}, | ||||
| 					"tls": map[string]interface{}{ | ||||
| 						"termination":                   "edge", | ||||
| 						"insecureEdgeTerminationPolicy": "Redirect", | ||||
| 					}, | ||||
| 				}, | ||||
| 			}, | ||||
| 		}) | ||||
|  | ||||
| 	} | ||||
|  | ||||
| 	// Convert the objects to Unstructured and reuse resourcesCreateOrUpdate functionality | ||||
| 	converter := runtime.DefaultUnstructuredConverter | ||||
| 	var toCreate []*unstructured.Unstructured | ||||
| 	for _, obj := range resources { | ||||
| 		m, err := converter.ToUnstructured(obj) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		u := &unstructured.Unstructured{} | ||||
| 		if err = converter.FromUnstructured(m, u); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		toCreate = append(toCreate, u) | ||||
| 	} | ||||
| 	return k.resourcesCreateOrUpdate(ctx, toCreate) | ||||
| } | ||||
|  | ||||
| func (k *Kubernetes) PodsTop(ctx context.Context, options PodsTopOptions) (*metrics.PodMetricsList, error) { | ||||
| 	// TODO, maybe move to mcp Tools setup and omit in case metrics aren't available in the target cluster | ||||
| 	if !k.supportsGroupVersion(metrics.GroupName + "/" + metricsv1beta1api.SchemeGroupVersion.Version) { | ||||
| 		return nil, errors.New("metrics API is not available") | ||||
| 	} | ||||
| 	namespace := options.Namespace | ||||
| 	if options.AllNamespaces && namespace == "" { | ||||
| 		namespace = "" | ||||
| 	} else { | ||||
| 		namespace = k.NamespaceOrDefault(namespace) | ||||
| 	} | ||||
| 	return k.manager.accessControlClientSet.PodsMetricses(ctx, namespace, options.Name, options.ListOptions) | ||||
| } | ||||
|  | ||||
| func (k *Kubernetes) PodsExec(ctx context.Context, namespace, name, container string, command []string) (string, error) { | ||||
| 	namespace = k.NamespaceOrDefault(namespace) | ||||
| 	pods, err := k.manager.accessControlClientSet.Pods(namespace) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	pod, err := pods.Get(ctx, name, metav1.GetOptions{}) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	// https://github.com/kubernetes/kubectl/blob/5366de04e168bcbc11f5e340d131a9ca8b7d0df4/pkg/cmd/exec/exec.go#L350-L352 | ||||
| 	if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed { | ||||
| 		return "", fmt.Errorf("cannot exec into a container in a completed pod; current phase is %s", pod.Status.Phase) | ||||
| 	} | ||||
| 	if container == "" { | ||||
| 		container = pod.Spec.Containers[0].Name | ||||
| 	} | ||||
| 	podExecOptions := &v1.PodExecOptions{ | ||||
| 		Container: container, | ||||
| 		Command:   command, | ||||
| 		Stdout:    true, | ||||
| 		Stderr:    true, | ||||
| 	} | ||||
| 	executor, err := k.manager.accessControlClientSet.PodsExec(namespace, name, podExecOptions) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	stdout := bytes.NewBuffer(make([]byte, 0)) | ||||
| 	stderr := bytes.NewBuffer(make([]byte, 0)) | ||||
| 	if err = executor.StreamWithContext(ctx, remotecommand.StreamOptions{ | ||||
| 		Stdout: stdout, Stderr: stderr, Tty: false, | ||||
| 	}); err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	if stdout.Len() > 0 { | ||||
| 		return stdout.String(), nil | ||||
| 	} | ||||
| 	if stderr.Len() > 0 { | ||||
| 		return stderr.String(), nil | ||||
| 	} | ||||
| 	return "", nil | ||||
| } | ||||
|   | ||||
							
								
								
									
										50
									
								
								pkg/kubernetes/provider.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										50
									
								
								pkg/kubernetes/provider.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,50 @@ | ||||
| package kubernetes | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| ) | ||||
|  | ||||
| type Provider interface { | ||||
| 	// Openshift extends the Openshift interface to provide OpenShift specific functionality to toolset providers | ||||
| 	// TODO: with the configurable toolset implementation and especially the multi-cluster approach | ||||
| 	// extending this interface might not be a good idea anymore. | ||||
| 	// For the kubecontext case, a user might be targeting both an OpenShift flavored cluster and a vanilla Kubernetes cluster. | ||||
| 	// See: https://github.com/containers/kubernetes-mcp-server/pull/372#discussion_r2421592315 | ||||
| 	Openshift | ||||
| 	TokenVerifier | ||||
| 	GetTargets(ctx context.Context) ([]string, error) | ||||
| 	GetDerivedKubernetes(ctx context.Context, target string) (*Kubernetes, error) | ||||
| 	GetDefaultTarget() string | ||||
| 	GetTargetParameterName() string | ||||
| 	WatchTargets(func() error) | ||||
| 	Close() | ||||
| } | ||||
|  | ||||
| func NewProvider(cfg *config.StaticConfig) (Provider, error) { | ||||
| 	strategy := resolveStrategy(cfg) | ||||
|  | ||||
| 	factory, err := getProviderFactory(strategy) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return factory(cfg) | ||||
| } | ||||
|  | ||||
| func resolveStrategy(cfg *config.StaticConfig) string { | ||||
| 	if cfg.ClusterProviderStrategy != "" { | ||||
| 		return cfg.ClusterProviderStrategy | ||||
| 	} | ||||
|  | ||||
| 	if cfg.KubeConfig != "" { | ||||
| 		return config.ClusterProviderKubeConfig | ||||
| 	} | ||||
|  | ||||
| 	if _, inClusterConfigErr := InClusterConfig(); inClusterConfigErr == nil { | ||||
| 		return config.ClusterProviderInCluster | ||||
| 	} | ||||
|  | ||||
| 	return config.ClusterProviderKubeConfig | ||||
| } | ||||
							
								
								
									
										131
									
								
								pkg/kubernetes/provider_kubeconfig.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										131
									
								
								pkg/kubernetes/provider_kubeconfig.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,131 @@ | ||||
| package kubernetes | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| 	authenticationv1api "k8s.io/api/authentication/v1" | ||||
| ) | ||||
|  | ||||
| // KubeConfigTargetParameterName is the parameter name used to specify | ||||
| // the kubeconfig context when using the kubeconfig cluster provider strategy. | ||||
| const KubeConfigTargetParameterName = "context" | ||||
|  | ||||
| // kubeConfigClusterProvider implements Provider for managing multiple | ||||
| // Kubernetes clusters using different contexts from a kubeconfig file. | ||||
| // It lazily initializes managers for each context as they are requested. | ||||
| type kubeConfigClusterProvider struct { | ||||
| 	defaultContext string | ||||
| 	managers       map[string]*Manager | ||||
| } | ||||
|  | ||||
| var _ Provider = &kubeConfigClusterProvider{} | ||||
|  | ||||
| func init() { | ||||
| 	RegisterProvider(config.ClusterProviderKubeConfig, newKubeConfigClusterProvider) | ||||
| } | ||||
|  | ||||
| // newKubeConfigClusterProvider creates a provider that manages multiple clusters | ||||
| // via kubeconfig contexts. | ||||
| // Internally, it leverages a KubeconfigManager for each context, initializing them | ||||
| // lazily when requested. | ||||
| func newKubeConfigClusterProvider(cfg *config.StaticConfig) (Provider, error) { | ||||
| 	m, err := NewKubeconfigManager(cfg, "") | ||||
| 	if err != nil { | ||||
| 		if errors.Is(err, ErrorKubeconfigInClusterNotAllowed) { | ||||
| 			return nil, fmt.Errorf("kubeconfig ClusterProviderStrategy is invalid for in-cluster deployments: %v", err) | ||||
| 		} | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	rawConfig, err := m.clientCmdConfig.RawConfig() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	allClusterManagers := map[string]*Manager{ | ||||
| 		rawConfig.CurrentContext: m, // we already initialized a manager for the default context, let's use it | ||||
| 	} | ||||
|  | ||||
| 	for name := range rawConfig.Contexts { | ||||
| 		if name == rawConfig.CurrentContext { | ||||
| 			continue // already initialized this, don't want to set it to nil | ||||
| 		} | ||||
|  | ||||
| 		allClusterManagers[name] = nil | ||||
| 	} | ||||
|  | ||||
| 	return &kubeConfigClusterProvider{ | ||||
| 		defaultContext: rawConfig.CurrentContext, | ||||
| 		managers:       allClusterManagers, | ||||
| 	}, nil | ||||
| } | ||||
|  | ||||
| func (p *kubeConfigClusterProvider) managerForContext(context string) (*Manager, error) { | ||||
| 	m, ok := p.managers[context] | ||||
| 	if ok && m != nil { | ||||
| 		return m, nil | ||||
| 	} | ||||
|  | ||||
| 	baseManager := p.managers[p.defaultContext] | ||||
|  | ||||
| 	m, err := NewKubeconfigManager(baseManager.staticConfig, context) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	p.managers[context] = m | ||||
|  | ||||
| 	return m, nil | ||||
| } | ||||
|  | ||||
| func (p *kubeConfigClusterProvider) IsOpenShift(ctx context.Context) bool { | ||||
| 	return p.managers[p.defaultContext].IsOpenShift(ctx) | ||||
| } | ||||
|  | ||||
| func (p *kubeConfigClusterProvider) VerifyToken(ctx context.Context, context, token, audience string) (*authenticationv1api.UserInfo, []string, error) { | ||||
| 	m, err := p.managerForContext(context) | ||||
| 	if err != nil { | ||||
| 		return nil, nil, err | ||||
| 	} | ||||
| 	return m.VerifyToken(ctx, token, audience) | ||||
| } | ||||
|  | ||||
| func (p *kubeConfigClusterProvider) GetTargets(_ context.Context) ([]string, error) { | ||||
| 	contextNames := make([]string, 0, len(p.managers)) | ||||
| 	for contextName := range p.managers { | ||||
| 		contextNames = append(contextNames, contextName) | ||||
| 	} | ||||
|  | ||||
| 	return contextNames, nil | ||||
| } | ||||
|  | ||||
| func (p *kubeConfigClusterProvider) GetTargetParameterName() string { | ||||
| 	return KubeConfigTargetParameterName | ||||
| } | ||||
|  | ||||
| func (p *kubeConfigClusterProvider) GetDerivedKubernetes(ctx context.Context, context string) (*Kubernetes, error) { | ||||
| 	m, err := p.managerForContext(context) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return m.Derived(ctx) | ||||
| } | ||||
|  | ||||
| func (p *kubeConfigClusterProvider) GetDefaultTarget() string { | ||||
| 	return p.defaultContext | ||||
| } | ||||
|  | ||||
| func (p *kubeConfigClusterProvider) WatchTargets(onKubeConfigChanged func() error) { | ||||
| 	m := p.managers[p.defaultContext] | ||||
|  | ||||
| 	m.WatchKubeConfig(onKubeConfigChanged) | ||||
| } | ||||
|  | ||||
| func (p *kubeConfigClusterProvider) Close() { | ||||
| 	m := p.managers[p.defaultContext] | ||||
|  | ||||
| 	m.Close() | ||||
| } | ||||
							
								
								
									
										151
									
								
								pkg/kubernetes/provider_kubeconfig_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										151
									
								
								pkg/kubernetes/provider_kubeconfig_test.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,151 @@ | ||||
| package kubernetes | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"net/http" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/internal/test" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| 	"github.com/stretchr/testify/suite" | ||||
| 	clientcmdapi "k8s.io/client-go/tools/clientcmd/api" | ||||
| ) | ||||
|  | ||||
| type ProviderKubeconfigTestSuite struct { | ||||
| 	BaseProviderSuite | ||||
| 	mockServer *test.MockServer | ||||
| 	provider   Provider | ||||
| } | ||||
|  | ||||
| func (s *ProviderKubeconfigTestSuite) SetupTest() { | ||||
| 	// Kubeconfig provider is used when the multi-cluster feature is enabled with the kubeconfig strategy. | ||||
| 	// For this test suite we simulate a kubeconfig with multiple contexts. | ||||
| 	s.mockServer = test.NewMockServer() | ||||
| 	kubeconfig := s.mockServer.Kubeconfig() | ||||
| 	for i := 0; i < 10; i++ { | ||||
| 		// Add multiple fake contexts to force multi-cluster behavior | ||||
| 		kubeconfig.Contexts[fmt.Sprintf("context-%d", i)] = clientcmdapi.NewContext() | ||||
| 	} | ||||
| 	provider, err := NewProvider(&config.StaticConfig{KubeConfig: test.KubeconfigFile(s.T(), kubeconfig)}) | ||||
| 	s.Require().NoError(err, "Expected no error creating provider with kubeconfig") | ||||
| 	s.provider = provider | ||||
| } | ||||
|  | ||||
| func (s *ProviderKubeconfigTestSuite) TearDownTest() { | ||||
| 	if s.mockServer != nil { | ||||
| 		s.mockServer.Close() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (s *ProviderKubeconfigTestSuite) TestType() { | ||||
| 	s.IsType(&kubeConfigClusterProvider{}, s.provider) | ||||
| } | ||||
|  | ||||
| func (s *ProviderKubeconfigTestSuite) TestWithNonOpenShiftCluster() { | ||||
| 	s.Run("IsOpenShift returns false", func() { | ||||
| 		inOpenShift := s.provider.IsOpenShift(s.T().Context()) | ||||
| 		s.False(inOpenShift, "Expected InOpenShift to return false") | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *ProviderKubeconfigTestSuite) TestWithOpenShiftCluster() { | ||||
| 	s.mockServer.Handle(&test.InOpenShiftHandler{}) | ||||
| 	s.Run("IsOpenShift returns true", func() { | ||||
| 		inOpenShift := s.provider.IsOpenShift(s.T().Context()) | ||||
| 		s.True(inOpenShift, "Expected InOpenShift to return true") | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *ProviderKubeconfigTestSuite) TestVerifyToken() { | ||||
| 	s.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { | ||||
| 		if req.URL.EscapedPath() == "/apis/authentication.k8s.io/v1/tokenreviews" { | ||||
| 			w.Header().Set("Content-Type", "application/json") | ||||
| 			_, _ = w.Write([]byte(` | ||||
| 				{ | ||||
| 					"kind": "TokenReview", | ||||
| 					"apiVersion": "authentication.k8s.io/v1", | ||||
| 					"spec": {"token": "the-token"}, | ||||
| 					"status": { | ||||
| 						"authenticated": true, | ||||
| 						"user": { | ||||
| 							"username": "test-user", | ||||
| 							"groups": ["system:authenticated"] | ||||
| 						}, | ||||
| 						"audiences": ["the-audience"] | ||||
| 					} | ||||
| 				}`)) | ||||
| 		} | ||||
| 	})) | ||||
| 	s.Run("VerifyToken returns UserInfo for non-empty context", func() { | ||||
| 		userInfo, audiences, err := s.provider.VerifyToken(s.T().Context(), "fake-context", "some-token", "the-audience") | ||||
| 		s.Require().NoError(err, "Expected no error from VerifyToken with empty target") | ||||
| 		s.Require().NotNil(userInfo, "Expected UserInfo from VerifyToken with empty target") | ||||
| 		s.Equalf(userInfo.Username, "test-user", "Expected username test-user, got: %s", userInfo.Username) | ||||
| 		s.Containsf(userInfo.Groups, "system:authenticated", "Expected group system:authenticated in %v", userInfo.Groups) | ||||
| 		s.Require().NotNil(audiences, "Expected audiences from VerifyToken with empty target") | ||||
| 		s.Len(audiences, 1, "Expected audiences from VerifyToken with empty target") | ||||
| 		s.Containsf(audiences, "the-audience", "Expected audience the-audience in %v", audiences) | ||||
| 	}) | ||||
| 	s.Run("VerifyToken returns UserInfo for empty context (default context)", func() { | ||||
| 		userInfo, audiences, err := s.provider.VerifyToken(s.T().Context(), "", "the-token", "the-audience") | ||||
| 		s.Require().NoError(err, "Expected no error from VerifyToken with empty target") | ||||
| 		s.Require().NotNil(userInfo, "Expected UserInfo from VerifyToken with empty target") | ||||
| 		s.Equalf(userInfo.Username, "test-user", "Expected username test-user, got: %s", userInfo.Username) | ||||
| 		s.Containsf(userInfo.Groups, "system:authenticated", "Expected group system:authenticated in %v", userInfo.Groups) | ||||
| 		s.Require().NotNil(audiences, "Expected audiences from VerifyToken with empty target") | ||||
| 		s.Len(audiences, 1, "Expected audiences from VerifyToken with empty target") | ||||
| 		s.Containsf(audiences, "the-audience", "Expected audience the-audience in %v", audiences) | ||||
| 	}) | ||||
| 	s.Run("VerifyToken returns error for invalid context", func() { | ||||
| 		userInfo, audiences, err := s.provider.VerifyToken(s.T().Context(), "invalid-context", "some-token", "the-audience") | ||||
| 		s.Require().Error(err, "Expected error from VerifyToken with invalid target") | ||||
| 		s.ErrorContainsf(err, `context "invalid-context" does not exist`, "Expected context does not exist error, got: %v", err) | ||||
| 		s.Nil(userInfo, "Expected no UserInfo from VerifyToken with invalid target") | ||||
| 		s.Nil(audiences, "Expected no audiences from VerifyToken with invalid target") | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *ProviderKubeconfigTestSuite) TestGetTargets() { | ||||
| 	s.Run("GetTargets returns all contexts defined in kubeconfig", func() { | ||||
| 		targets, err := s.provider.GetTargets(s.T().Context()) | ||||
| 		s.Require().NoError(err, "Expected no error from GetTargets") | ||||
| 		s.Len(targets, 11, "Expected 11 targets from GetTargets") | ||||
| 		s.Contains(targets, "fake-context", "Expected fake-context in targets from GetTargets") | ||||
| 		for i := 0; i < 10; i++ { | ||||
| 			s.Contains(targets, fmt.Sprintf("context-%d", i), "Expected context-%d in targets from GetTargets", i) | ||||
| 		} | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *ProviderKubeconfigTestSuite) TestGetDerivedKubernetes() { | ||||
| 	s.Run("GetDerivedKubernetes returns Kubernetes for valid context", func() { | ||||
| 		k8s, err := s.provider.GetDerivedKubernetes(s.T().Context(), "fake-context") | ||||
| 		s.Require().NoError(err, "Expected no error from GetDerivedKubernetes with valid context") | ||||
| 		s.NotNil(k8s, "Expected Kubernetes from GetDerivedKubernetes with valid context") | ||||
| 	}) | ||||
| 	s.Run("GetDerivedKubernetes returns Kubernetes for empty context (default)", func() { | ||||
| 		k8s, err := s.provider.GetDerivedKubernetes(s.T().Context(), "") | ||||
| 		s.Require().NoError(err, "Expected no error from GetDerivedKubernetes with empty context") | ||||
| 		s.NotNil(k8s, "Expected Kubernetes from GetDerivedKubernetes with empty context") | ||||
| 	}) | ||||
| 	s.Run("GetDerivedKubernetes returns error for invalid context", func() { | ||||
| 		k8s, err := s.provider.GetDerivedKubernetes(s.T().Context(), "invalid-context") | ||||
| 		s.Require().Error(err, "Expected error from GetDerivedKubernetes with invalid context") | ||||
| 		s.ErrorContainsf(err, `context "invalid-context" does not exist`, "Expected context does not exist error, got: %v", err) | ||||
| 		s.Nil(k8s, "Expected no Kubernetes from GetDerivedKubernetes with invalid context") | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *ProviderKubeconfigTestSuite) TestGetDefaultTarget() { | ||||
| 	s.Run("GetDefaultTarget returns current-context defined in kubeconfig", func() { | ||||
| 		s.Equal("fake-context", s.provider.GetDefaultTarget(), "Expected fake-context as default target") | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *ProviderKubeconfigTestSuite) TestGetTargetParameterName() { | ||||
| 	s.Equal("context", s.provider.GetTargetParameterName(), "Expected context as target parameter name") | ||||
| } | ||||
|  | ||||
| func TestProviderKubeconfig(t *testing.T) { | ||||
| 	suite.Run(t, new(ProviderKubeconfigTestSuite)) | ||||
| } | ||||
							
								
								
									
										47
									
								
								pkg/kubernetes/provider_registry.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										47
									
								
								pkg/kubernetes/provider_registry.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,47 @@ | ||||
| package kubernetes | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"sort" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| ) | ||||
|  | ||||
| // ProviderFactory creates a new Provider instance for a given strategy. | ||||
| // Implementations should validate that the Manager is compatible with their strategy | ||||
| // (e.g., kubeconfig provider should reject in-cluster managers). | ||||
| type ProviderFactory func(cfg *config.StaticConfig) (Provider, error) | ||||
|  | ||||
| var providerFactories = make(map[string]ProviderFactory) | ||||
|  | ||||
| // RegisterProvider registers a provider factory for a given strategy name. | ||||
| // This should be called from init() functions in provider implementation files. | ||||
| // Panics if a provider is already registered for the given strategy. | ||||
| func RegisterProvider(strategy string, factory ProviderFactory) { | ||||
| 	if _, exists := providerFactories[strategy]; exists { | ||||
| 		panic(fmt.Sprintf("provider already registered for strategy '%s'", strategy)) | ||||
| 	} | ||||
| 	providerFactories[strategy] = factory | ||||
| } | ||||
|  | ||||
| // getProviderFactory retrieves a registered provider factory by strategy name. | ||||
| // Returns an error if no provider is registered for the given strategy. | ||||
| func getProviderFactory(strategy string) (ProviderFactory, error) { | ||||
| 	factory, ok := providerFactories[strategy] | ||||
| 	if !ok { | ||||
| 		available := GetRegisteredStrategies() | ||||
| 		return nil, fmt.Errorf("no provider registered for strategy '%s', available strategies: %v", strategy, available) | ||||
| 	} | ||||
| 	return factory, nil | ||||
| } | ||||
|  | ||||
| // GetRegisteredStrategies returns a sorted list of all registered strategy names. | ||||
| // This is useful for error messages and debugging. | ||||
| func GetRegisteredStrategies() []string { | ||||
| 	strategies := make([]string, 0, len(providerFactories)) | ||||
| 	for strategy := range providerFactories { | ||||
| 		strategies = append(strategies, strategy) | ||||
| 	} | ||||
| 	sort.Strings(strategies) | ||||
| 	return strategies | ||||
| } | ||||
							
								
								
									
										56
									
								
								pkg/kubernetes/provider_registry_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										56
									
								
								pkg/kubernetes/provider_registry_test.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,56 @@ | ||||
| package kubernetes | ||||
|  | ||||
| import ( | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| 	"github.com/stretchr/testify/suite" | ||||
| ) | ||||
|  | ||||
| type ProviderRegistryTestSuite struct { | ||||
| 	BaseProviderSuite | ||||
| } | ||||
|  | ||||
| func (s *ProviderRegistryTestSuite) TestRegisterProvider() { | ||||
| 	s.Run("With no pre-existing provider, registers the provider", func() { | ||||
| 		RegisterProvider("test-strategy", func(cfg *config.StaticConfig) (Provider, error) { | ||||
| 			return nil, nil | ||||
| 		}) | ||||
| 		_, exists := providerFactories["test-strategy"] | ||||
| 		s.True(exists, "Provider should be registered") | ||||
| 	}) | ||||
| 	s.Run("With pre-existing provider, panics", func() { | ||||
| 		RegisterProvider("test-pre-existent", func(cfg *config.StaticConfig) (Provider, error) { | ||||
| 			return nil, nil | ||||
| 		}) | ||||
| 		s.Panics(func() { | ||||
| 			RegisterProvider("test-pre-existent", func(cfg *config.StaticConfig) (Provider, error) { | ||||
| 				return nil, nil | ||||
| 			}) | ||||
| 		}, "Registering a provider with an existing strategy should panic") | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *ProviderRegistryTestSuite) TestGetRegisteredStrategies() { | ||||
| 	s.Run("With no registered providers, returns empty list", func() { | ||||
| 		providerFactories = make(map[string]ProviderFactory) | ||||
| 		strategies := GetRegisteredStrategies() | ||||
| 		s.Empty(strategies, "No strategies should be registered") | ||||
| 	}) | ||||
| 	s.Run("With multiple registered providers, returns sorted list", func() { | ||||
| 		providerFactories = make(map[string]ProviderFactory) | ||||
| 		RegisterProvider("foo-strategy", func(cfg *config.StaticConfig) (Provider, error) { | ||||
| 			return nil, nil | ||||
| 		}) | ||||
| 		RegisterProvider("bar-strategy", func(cfg *config.StaticConfig) (Provider, error) { | ||||
| 			return nil, nil | ||||
| 		}) | ||||
| 		strategies := GetRegisteredStrategies() | ||||
| 		expected := []string{"bar-strategy", "foo-strategy"} | ||||
| 		s.Equal(expected, strategies, "Strategies should be sorted alphabetically") | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestProviderRegistry(t *testing.T) { | ||||
| 	suite.Run(t, new(ProviderRegistryTestSuite)) | ||||
| } | ||||
							
								
								
									
										94
									
								
								pkg/kubernetes/provider_single.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										94
									
								
								pkg/kubernetes/provider_single.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,94 @@ | ||||
| package kubernetes | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| 	authenticationv1api "k8s.io/api/authentication/v1" | ||||
| ) | ||||
|  | ||||
| // singleClusterProvider implements Provider for managing a single | ||||
| // Kubernetes cluster. Used for in-cluster deployments or when multi-cluster | ||||
| // support is disabled. | ||||
| type singleClusterProvider struct { | ||||
| 	strategy string | ||||
| 	manager  *Manager | ||||
| } | ||||
|  | ||||
| var _ Provider = &singleClusterProvider{} | ||||
|  | ||||
| func init() { | ||||
| 	RegisterProvider(config.ClusterProviderInCluster, newSingleClusterProvider(config.ClusterProviderInCluster)) | ||||
| 	RegisterProvider(config.ClusterProviderDisabled, newSingleClusterProvider(config.ClusterProviderDisabled)) | ||||
| } | ||||
|  | ||||
| // newSingleClusterProvider creates a provider that manages a single cluster. | ||||
| // When used within a cluster or with an 'in-cluster' strategy, it uses an InClusterManager. | ||||
| // Otherwise, it uses a KubeconfigManager. | ||||
| func newSingleClusterProvider(strategy string) ProviderFactory { | ||||
| 	return func(cfg *config.StaticConfig) (Provider, error) { | ||||
| 		if cfg != nil && cfg.KubeConfig != "" && strategy == config.ClusterProviderInCluster { | ||||
| 			return nil, fmt.Errorf("kubeconfig file %s cannot be used with the in-cluster ClusterProviderStrategy", cfg.KubeConfig) | ||||
| 		} | ||||
|  | ||||
| 		var m *Manager | ||||
| 		var err error | ||||
| 		if strategy == config.ClusterProviderInCluster || IsInCluster(cfg) { | ||||
| 			m, err = NewInClusterManager(cfg) | ||||
| 		} else { | ||||
| 			m, err = NewKubeconfigManager(cfg, "") | ||||
| 		} | ||||
| 		if err != nil { | ||||
| 			if errors.Is(err, ErrorInClusterNotInCluster) { | ||||
| 				return nil, fmt.Errorf("server must be deployed in cluster for the %s ClusterProviderStrategy: %v", strategy, err) | ||||
| 			} | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		return &singleClusterProvider{ | ||||
| 			manager:  m, | ||||
| 			strategy: strategy, | ||||
| 		}, nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (p *singleClusterProvider) IsOpenShift(ctx context.Context) bool { | ||||
| 	return p.manager.IsOpenShift(ctx) | ||||
| } | ||||
|  | ||||
| func (p *singleClusterProvider) VerifyToken(ctx context.Context, target, token, audience string) (*authenticationv1api.UserInfo, []string, error) { | ||||
| 	if target != "" { | ||||
| 		return nil, nil, fmt.Errorf("unable to get manager for other context/cluster with %s strategy", p.strategy) | ||||
| 	} | ||||
| 	return p.manager.VerifyToken(ctx, token, audience) | ||||
| } | ||||
|  | ||||
| func (p *singleClusterProvider) GetTargets(_ context.Context) ([]string, error) { | ||||
| 	return []string{""}, nil | ||||
| } | ||||
|  | ||||
| func (p *singleClusterProvider) GetDerivedKubernetes(ctx context.Context, target string) (*Kubernetes, error) { | ||||
| 	if target != "" { | ||||
| 		return nil, fmt.Errorf("unable to get manager for other context/cluster with %s strategy", p.strategy) | ||||
| 	} | ||||
|  | ||||
| 	return p.manager.Derived(ctx) | ||||
| } | ||||
|  | ||||
| func (p *singleClusterProvider) GetDefaultTarget() string { | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func (p *singleClusterProvider) GetTargetParameterName() string { | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func (p *singleClusterProvider) WatchTargets(watch func() error) { | ||||
| 	p.manager.WatchKubeConfig(watch) | ||||
| } | ||||
|  | ||||
| func (p *singleClusterProvider) Close() { | ||||
| 	p.manager.Close() | ||||
| } | ||||
							
								
								
									
										133
									
								
								pkg/kubernetes/provider_single_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										133
									
								
								pkg/kubernetes/provider_single_test.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,133 @@ | ||||
| package kubernetes | ||||
|  | ||||
| import ( | ||||
| 	"net/http" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/internal/test" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| 	"github.com/stretchr/testify/suite" | ||||
| 	"k8s.io/client-go/rest" | ||||
| ) | ||||
|  | ||||
| type ProviderSingleTestSuite struct { | ||||
| 	BaseProviderSuite | ||||
| 	mockServer                *test.MockServer | ||||
| 	originalIsInClusterConfig func() (*rest.Config, error) | ||||
| 	provider                  Provider | ||||
| } | ||||
|  | ||||
| func (s *ProviderSingleTestSuite) SetupTest() { | ||||
| 	// Single cluster provider is used when in-cluster or when the multi-cluster feature is disabled. | ||||
| 	// For this test suite we simulate an in-cluster deployment. | ||||
| 	s.originalIsInClusterConfig = InClusterConfig | ||||
| 	s.mockServer = test.NewMockServer() | ||||
| 	InClusterConfig = func() (*rest.Config, error) { | ||||
| 		return s.mockServer.Config(), nil | ||||
| 	} | ||||
| 	provider, err := NewProvider(&config.StaticConfig{}) | ||||
| 	s.Require().NoError(err, "Expected no error creating provider with kubeconfig") | ||||
| 	s.provider = provider | ||||
| } | ||||
|  | ||||
| func (s *ProviderSingleTestSuite) TearDownTest() { | ||||
| 	InClusterConfig = s.originalIsInClusterConfig | ||||
| 	if s.mockServer != nil { | ||||
| 		s.mockServer.Close() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (s *ProviderSingleTestSuite) TestType() { | ||||
| 	s.IsType(&singleClusterProvider{}, s.provider) | ||||
| } | ||||
|  | ||||
| func (s *ProviderSingleTestSuite) TestWithNonOpenShiftCluster() { | ||||
| 	s.Run("IsOpenShift returns false", func() { | ||||
| 		inOpenShift := s.provider.IsOpenShift(s.T().Context()) | ||||
| 		s.False(inOpenShift, "Expected InOpenShift to return false") | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *ProviderSingleTestSuite) TestWithOpenShiftCluster() { | ||||
| 	s.mockServer.Handle(&test.InOpenShiftHandler{}) | ||||
| 	s.Run("IsOpenShift returns true", func() { | ||||
| 		inOpenShift := s.provider.IsOpenShift(s.T().Context()) | ||||
| 		s.True(inOpenShift, "Expected InOpenShift to return true") | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *ProviderSingleTestSuite) TestVerifyToken() { | ||||
| 	s.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { | ||||
| 		if req.URL.EscapedPath() == "/apis/authentication.k8s.io/v1/tokenreviews" { | ||||
| 			w.Header().Set("Content-Type", "application/json") | ||||
| 			_, _ = w.Write([]byte(` | ||||
| 				{ | ||||
| 					"kind": "TokenReview", | ||||
| 					"apiVersion": "authentication.k8s.io/v1", | ||||
| 					"spec": {"token": "the-token"}, | ||||
| 					"status": { | ||||
| 						"authenticated": true, | ||||
| 						"user": { | ||||
| 							"username": "test-user", | ||||
| 							"groups": ["system:authenticated"] | ||||
| 						}, | ||||
| 						"audiences": ["the-audience"] | ||||
| 					} | ||||
| 				}`)) | ||||
| 		} | ||||
| 	})) | ||||
| 	s.Run("VerifyToken returns UserInfo for empty target (default target)", func() { | ||||
| 		userInfo, audiences, err := s.provider.VerifyToken(s.T().Context(), "", "the-token", "the-audience") | ||||
| 		s.Require().NoError(err, "Expected no error from VerifyToken with empty target") | ||||
| 		s.Require().NotNil(userInfo, "Expected UserInfo from VerifyToken with empty target") | ||||
| 		s.Equalf(userInfo.Username, "test-user", "Expected username test-user, got: %s", userInfo.Username) | ||||
| 		s.Containsf(userInfo.Groups, "system:authenticated", "Expected group system:authenticated in %v", userInfo.Groups) | ||||
| 		s.Require().NotNil(audiences, "Expected audiences from VerifyToken with empty target") | ||||
| 		s.Len(audiences, 1, "Expected audiences from VerifyToken with empty target") | ||||
| 		s.Containsf(audiences, "the-audience", "Expected audience the-audience in %v", audiences) | ||||
| 	}) | ||||
| 	s.Run("VerifyToken returns error for non-empty context", func() { | ||||
| 		userInfo, audiences, err := s.provider.VerifyToken(s.T().Context(), "non-empty", "the-token", "the-audience") | ||||
| 		s.Require().Error(err, "Expected error from VerifyToken with non-empty target") | ||||
| 		s.ErrorContains(err, "unable to get manager for other context/cluster with in-cluster strategy", "Expected error about trying to get other cluster") | ||||
| 		s.Nil(userInfo, "Expected no UserInfo from VerifyToken with non-empty target") | ||||
| 		s.Nil(audiences, "Expected no audiences from VerifyToken with non-empty target") | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *ProviderSingleTestSuite) TestGetTargets() { | ||||
| 	s.Run("GetTargets returns single empty target", func() { | ||||
| 		targets, err := s.provider.GetTargets(s.T().Context()) | ||||
| 		s.Require().NoError(err, "Expected no error from GetTargets") | ||||
| 		s.Len(targets, 1, "Expected 1 targets from GetTargets") | ||||
| 		s.Contains(targets, "", "Expected empty target from GetTargets") | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *ProviderSingleTestSuite) TestGetDerivedKubernetes() { | ||||
| 	s.Run("GetDerivedKubernetes returns Kubernetes for empty target", func() { | ||||
| 		k8s, err := s.provider.GetDerivedKubernetes(s.T().Context(), "") | ||||
| 		s.Require().NoError(err, "Expected no error from GetDerivedKubernetes with empty target") | ||||
| 		s.NotNil(k8s, "Expected Kubernetes from GetDerivedKubernetes with empty target") | ||||
| 	}) | ||||
| 	s.Run("GetDerivedKubernetes returns error for non-empty target", func() { | ||||
| 		k8s, err := s.provider.GetDerivedKubernetes(s.T().Context(), "non-empty-target") | ||||
| 		s.Require().Error(err, "Expected error from GetDerivedKubernetes with non-empty target") | ||||
| 		s.ErrorContains(err, "unable to get manager for other context/cluster with in-cluster strategy", "Expected error about trying to get other cluster") | ||||
| 		s.Nil(k8s, "Expected no Kubernetes from GetDerivedKubernetes with non-empty target") | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *ProviderSingleTestSuite) TestGetDefaultTarget() { | ||||
| 	s.Run("GetDefaultTarget returns empty string", func() { | ||||
| 		s.Empty(s.provider.GetDefaultTarget(), "Expected fake-context as default target") | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *ProviderSingleTestSuite) TestGetTargetParameterName() { | ||||
| 	s.Empty(s.provider.GetTargetParameterName(), "Expected empty string as target parameter name") | ||||
| } | ||||
|  | ||||
| func TestProviderSingle(t *testing.T) { | ||||
| 	suite.Run(t, new(ProviderSingleTestSuite)) | ||||
| } | ||||
							
								
								
									
										170
									
								
								pkg/kubernetes/provider_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										170
									
								
								pkg/kubernetes/provider_test.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,170 @@ | ||||
| package kubernetes | ||||
|  | ||||
| import ( | ||||
| 	"os" | ||||
| 	"strings" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/internal/test" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| 	"github.com/stretchr/testify/suite" | ||||
| 	"k8s.io/client-go/rest" | ||||
| ) | ||||
|  | ||||
| type BaseProviderSuite struct { | ||||
| 	suite.Suite | ||||
| 	originalProviderFactories map[string]ProviderFactory | ||||
| } | ||||
|  | ||||
| func (s *BaseProviderSuite) SetupTest() { | ||||
| 	s.originalProviderFactories = make(map[string]ProviderFactory) | ||||
| 	for k, v := range providerFactories { | ||||
| 		s.originalProviderFactories[k] = v | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (s *BaseProviderSuite) TearDownTest() { | ||||
| 	providerFactories = make(map[string]ProviderFactory) | ||||
| 	for k, v := range s.originalProviderFactories { | ||||
| 		providerFactories[k] = v | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type ProviderTestSuite struct { | ||||
| 	BaseProviderSuite | ||||
| 	originalEnv             []string | ||||
| 	originalInClusterConfig func() (*rest.Config, error) | ||||
| 	mockServer              *test.MockServer | ||||
| 	kubeconfigPath          string | ||||
| } | ||||
|  | ||||
| func (s *ProviderTestSuite) SetupTest() { | ||||
| 	s.BaseProviderSuite.SetupTest() | ||||
| 	s.originalEnv = os.Environ() | ||||
| 	s.originalInClusterConfig = InClusterConfig | ||||
| 	s.mockServer = test.NewMockServer() | ||||
| 	s.kubeconfigPath = strings.ReplaceAll(s.mockServer.KubeconfigFile(s.T()), `\`, `\\`) | ||||
| } | ||||
|  | ||||
| func (s *ProviderTestSuite) TearDownTest() { | ||||
| 	s.BaseProviderSuite.TearDownTest() | ||||
| 	test.RestoreEnv(s.originalEnv) | ||||
| 	InClusterConfig = s.originalInClusterConfig | ||||
| 	if s.mockServer != nil { | ||||
| 		s.mockServer.Close() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (s *ProviderTestSuite) TestNewProviderInCluster() { | ||||
| 	InClusterConfig = func() (*rest.Config, error) { | ||||
| 		return &rest.Config{}, nil | ||||
| 	} | ||||
| 	s.Run("With no cluster_provider_strategy, returns single-cluster provider", func() { | ||||
| 		cfg := test.Must(config.ReadToml([]byte{})) | ||||
| 		provider, err := NewProvider(cfg) | ||||
| 		s.Require().NoError(err, "Expected no error for in-cluster provider") | ||||
| 		s.NotNil(provider, "Expected provider instance") | ||||
| 		s.IsType(&singleClusterProvider{}, provider, "Expected singleClusterProvider type") | ||||
| 	}) | ||||
| 	s.Run("With cluster_provider_strategy=in-cluster, returns single-cluster provider", func() { | ||||
| 		cfg := test.Must(config.ReadToml([]byte(` | ||||
| 			cluster_provider_strategy = "in-cluster" | ||||
| 		`))) | ||||
| 		provider, err := NewProvider(cfg) | ||||
| 		s.Require().NoError(err, "Expected no error for single-cluster strategy") | ||||
| 		s.NotNil(provider, "Expected provider instance") | ||||
| 		s.IsType(&singleClusterProvider{}, provider, "Expected singleClusterProvider type") | ||||
| 	}) | ||||
| 	s.Run("With cluster_provider_strategy=kubeconfig, returns error", func() { | ||||
| 		cfg := test.Must(config.ReadToml([]byte(` | ||||
| 			cluster_provider_strategy = "kubeconfig" | ||||
| 		`))) | ||||
| 		provider, err := NewProvider(cfg) | ||||
| 		s.Require().Error(err, "Expected error for kubeconfig strategy") | ||||
| 		s.ErrorContains(err, "kubeconfig ClusterProviderStrategy is invalid for in-cluster deployments") | ||||
| 		s.Nilf(provider, "Expected no provider instance, got %v", provider) | ||||
| 	}) | ||||
| 	s.Run("With cluster_provider_strategy=kubeconfig and kubeconfig set to valid path, returns kubeconfig provider", func() { | ||||
| 		cfg := test.Must(config.ReadToml([]byte(` | ||||
| 			cluster_provider_strategy = "kubeconfig" | ||||
| 			kubeconfig = "` + s.kubeconfigPath + `" | ||||
| 		`))) | ||||
| 		provider, err := NewProvider(cfg) | ||||
| 		s.Require().NoError(err, "Expected no error for kubeconfig strategy") | ||||
| 		s.NotNil(provider, "Expected provider instance") | ||||
| 		s.IsType(&kubeConfigClusterProvider{}, provider, "Expected kubeConfigClusterProvider type") | ||||
| 	}) | ||||
| 	s.Run("With cluster_provider_strategy=non-existent, returns error", func() { | ||||
| 		cfg := test.Must(config.ReadToml([]byte(` | ||||
| 			cluster_provider_strategy = "i-do-not-exist" | ||||
| 		`))) | ||||
| 		provider, err := NewProvider(cfg) | ||||
| 		s.Require().Error(err, "Expected error for non-existent strategy") | ||||
| 		s.ErrorContains(err, "no provider registered for strategy 'i-do-not-exist'") | ||||
| 		s.Nilf(provider, "Expected no provider instance, got %v", provider) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *ProviderTestSuite) TestNewProviderLocal() { | ||||
| 	InClusterConfig = func() (*rest.Config, error) { | ||||
| 		return nil, rest.ErrNotInCluster | ||||
| 	} | ||||
| 	s.Require().NoError(os.Setenv("KUBECONFIG", s.kubeconfigPath)) | ||||
| 	s.Run("With no cluster_provider_strategy, returns kubeconfig provider", func() { | ||||
| 		cfg := test.Must(config.ReadToml([]byte{})) | ||||
| 		provider, err := NewProvider(cfg) | ||||
| 		s.Require().NoError(err, "Expected no error for kubeconfig provider") | ||||
| 		s.NotNil(provider, "Expected provider instance") | ||||
| 		s.IsType(&kubeConfigClusterProvider{}, provider, "Expected kubeConfigClusterProvider type") | ||||
| 	}) | ||||
| 	s.Run("With cluster_provider_strategy=kubeconfig, returns kubeconfig provider", func() { | ||||
| 		cfg := test.Must(config.ReadToml([]byte(` | ||||
| 			cluster_provider_strategy = "kubeconfig" | ||||
| 		`))) | ||||
| 		provider, err := NewProvider(cfg) | ||||
| 		s.Require().NoError(err, "Expected no error for kubeconfig provider") | ||||
| 		s.NotNil(provider, "Expected provider instance") | ||||
| 		s.IsType(&kubeConfigClusterProvider{}, provider, "Expected kubeConfigClusterProvider type") | ||||
| 	}) | ||||
| 	s.Run("With cluster_provider_strategy=disabled, returns single-cluster provider", func() { | ||||
| 		cfg := test.Must(config.ReadToml([]byte(` | ||||
| 			cluster_provider_strategy = "disabled" | ||||
| 		`))) | ||||
| 		provider, err := NewProvider(cfg) | ||||
| 		s.Require().NoError(err, "Expected no error for disabled strategy") | ||||
| 		s.NotNil(provider, "Expected provider instance") | ||||
| 		s.IsType(&singleClusterProvider{}, provider, "Expected singleClusterProvider type") | ||||
| 	}) | ||||
| 	s.Run("With cluster_provider_strategy=in-cluster, returns error", func() { | ||||
| 		cfg := test.Must(config.ReadToml([]byte(` | ||||
| 			cluster_provider_strategy = "in-cluster" | ||||
| 		`))) | ||||
| 		provider, err := NewProvider(cfg) | ||||
| 		s.Require().Error(err, "Expected error for in-cluster strategy") | ||||
| 		s.ErrorContains(err, "server must be deployed in cluster for the in-cluster ClusterProviderStrategy") | ||||
| 		s.Nilf(provider, "Expected no provider instance, got %v", provider) | ||||
| 	}) | ||||
| 	s.Run("With cluster_provider_strategy=in-cluster and kubeconfig set to valid path, returns error", func() { | ||||
| 		cfg := test.Must(config.ReadToml([]byte(` | ||||
| 			kubeconfig = "` + s.kubeconfigPath + `" | ||||
| 			cluster_provider_strategy = "in-cluster" | ||||
| 		`))) | ||||
| 		provider, err := NewProvider(cfg) | ||||
| 		s.Require().Error(err, "Expected error for in-cluster strategy") | ||||
| 		s.Regexp("kubeconfig file .+ cannot be used with the in-cluster ClusterProviderStrategy", err.Error()) | ||||
| 		s.Nilf(provider, "Expected no provider instance, got %v", provider) | ||||
| 	}) | ||||
| 	s.Run("With cluster_provider_strategy=non-existent, returns error", func() { | ||||
| 		cfg := test.Must(config.ReadToml([]byte(` | ||||
| 			cluster_provider_strategy = "i-do-not-exist" | ||||
| 		`))) | ||||
| 		provider, err := NewProvider(cfg) | ||||
| 		s.Require().Error(err, "Expected error for non-existent strategy") | ||||
| 		s.ErrorContains(err, "no provider registered for strategy 'i-do-not-exist'") | ||||
| 		s.Nilf(provider, "Expected no provider instance, got %v", provider) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestProvider(t *testing.T) { | ||||
| 	suite.Run(t, new(ProviderTestSuite)) | ||||
| } | ||||
| @@ -2,51 +2,208 @@ package kubernetes | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"k8s.io/apimachinery/pkg/runtime" | ||||
| 	"regexp" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/version" | ||||
| 	authv1 "k8s.io/api/authorization/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" | ||||
| 	metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" | ||||
| 	"k8s.io/apimachinery/pkg/runtime/schema" | ||||
| 	"k8s.io/client-go/discovery" | ||||
| 	memory "k8s.io/client-go/discovery/cached" | ||||
| 	"k8s.io/client-go/dynamic" | ||||
| 	"k8s.io/client-go/restmapper" | ||||
| 	"k8s.io/apimachinery/pkg/util/yaml" | ||||
| ) | ||||
|  | ||||
| // TODO: WIP | ||||
| func (k *Kubernetes) ResourcesList(ctx context.Context, gvk *schema.GroupVersionKind, namespace string) (string, error) { | ||||
| 	client, err := dynamic.NewForConfig(k.cfg) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	gvr, err := k.resourceFor(gvk) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	rl, err := client.Resource(*gvr).Namespace(namespace).List(ctx, metav1.ListOptions{}) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	return marshal(rl.Items) | ||||
| const ( | ||||
| 	AppKubernetesComponent = "app.kubernetes.io/component" | ||||
| 	AppKubernetesManagedBy = "app.kubernetes.io/managed-by" | ||||
| 	AppKubernetesName      = "app.kubernetes.io/name" | ||||
| 	AppKubernetesPartOf    = "app.kubernetes.io/part-of" | ||||
| ) | ||||
|  | ||||
| type ResourceListOptions struct { | ||||
| 	metav1.ListOptions | ||||
| 	AsTable bool | ||||
| } | ||||
|  | ||||
| func marshal(v any) (string, error) { | ||||
| 	ret, err := json.Marshal(v) | ||||
| func (k *Kubernetes) ResourcesList(ctx context.Context, gvk *schema.GroupVersionKind, namespace string, options ResourceListOptions) (runtime.Unstructured, error) { | ||||
| 	gvr, err := k.resourceFor(gvk) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return string(ret), nil | ||||
|  | ||||
| 	// Check if operation is allowed for all namespaces (applicable for namespaced resources) | ||||
| 	isNamespaced, _ := k.isNamespaced(gvk) | ||||
| 	if isNamespaced && !k.canIUse(ctx, gvr, namespace, "list") && namespace == "" { | ||||
| 		namespace = k.manager.configuredNamespace() | ||||
| 	} | ||||
| 	if options.AsTable { | ||||
| 		return k.resourcesListAsTable(ctx, gvk, gvr, namespace, options) | ||||
| 	} | ||||
| 	return k.manager.dynamicClient.Resource(*gvr).Namespace(namespace).List(ctx, options.ListOptions) | ||||
| } | ||||
|  | ||||
| func (k *Kubernetes) ResourcesGet(ctx context.Context, gvk *schema.GroupVersionKind, namespace, name string) (*unstructured.Unstructured, error) { | ||||
| 	gvr, err := k.resourceFor(gvk) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	// If it's a namespaced resource and namespace wasn't provided, try to use the default configured one | ||||
| 	if namespaced, nsErr := k.isNamespaced(gvk); nsErr == nil && namespaced { | ||||
| 		namespace = k.NamespaceOrDefault(namespace) | ||||
| 	} | ||||
| 	return k.manager.dynamicClient.Resource(*gvr).Namespace(namespace).Get(ctx, name, metav1.GetOptions{}) | ||||
| } | ||||
|  | ||||
| func (k *Kubernetes) ResourcesCreateOrUpdate(ctx context.Context, resource string) ([]*unstructured.Unstructured, error) { | ||||
| 	separator := regexp.MustCompile(`\r?\n---\r?\n`) | ||||
| 	resources := separator.Split(resource, -1) | ||||
| 	var parsedResources []*unstructured.Unstructured | ||||
| 	for _, r := range resources { | ||||
| 		var obj unstructured.Unstructured | ||||
| 		if err := yaml.NewYAMLToJSONDecoder(strings.NewReader(r)).Decode(&obj); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		parsedResources = append(parsedResources, &obj) | ||||
| 	} | ||||
| 	return k.resourcesCreateOrUpdate(ctx, parsedResources) | ||||
| } | ||||
|  | ||||
| func (k *Kubernetes) ResourcesDelete(ctx context.Context, gvk *schema.GroupVersionKind, namespace, name string) error { | ||||
| 	gvr, err := k.resourceFor(gvk) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	// If it's a namespaced resource and namespace wasn't provided, try to use the default configured one | ||||
| 	if namespaced, nsErr := k.isNamespaced(gvk); nsErr == nil && namespaced { | ||||
| 		namespace = k.NamespaceOrDefault(namespace) | ||||
| 	} | ||||
| 	return k.manager.dynamicClient.Resource(*gvr).Namespace(namespace).Delete(ctx, name, metav1.DeleteOptions{}) | ||||
| } | ||||
|  | ||||
| // resourcesListAsTable retrieves a list of resources in a table format. | ||||
| // It's almost identical to the dynamic.DynamicClient implementation, but it uses a specific Accept header to request the table format. | ||||
| // dynamic.DynamicClient does not provide a way to set the HTTP header (TODO: create an issue to request this feature) | ||||
| func (k *Kubernetes) resourcesListAsTable(ctx context.Context, gvk *schema.GroupVersionKind, gvr *schema.GroupVersionResource, namespace string, options ResourceListOptions) (runtime.Unstructured, error) { | ||||
| 	var url []string | ||||
| 	if len(gvr.Group) == 0 { | ||||
| 		url = append(url, "api") | ||||
| 	} else { | ||||
| 		url = append(url, "apis", gvr.Group) | ||||
| 	} | ||||
| 	url = append(url, gvr.Version) | ||||
| 	if len(namespace) > 0 { | ||||
| 		url = append(url, "namespaces", namespace) | ||||
| 	} | ||||
| 	url = append(url, gvr.Resource) | ||||
| 	var table metav1.Table | ||||
| 	err := k.manager.discoveryClient.RESTClient(). | ||||
| 		Get(). | ||||
| 		SetHeader("Accept", strings.Join([]string{ | ||||
| 			fmt.Sprintf("application/json;as=Table;v=%s;g=%s", metav1.SchemeGroupVersion.Version, metav1.GroupName), | ||||
| 			fmt.Sprintf("application/json;as=Table;v=%s;g=%s", metav1beta1.SchemeGroupVersion.Version, metav1beta1.GroupName), | ||||
| 			"application/json", | ||||
| 		}, ",")). | ||||
| 		AbsPath(url...). | ||||
| 		SpecificallyVersionedParams(&options.ListOptions, ParameterCodec, schema.GroupVersion{Version: "v1"}). | ||||
| 		Do(ctx).Into(&table) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	// Add metav1.Table apiVersion and kind to the unstructured object (server may not return these fields) | ||||
| 	table.SetGroupVersionKind(metav1.SchemeGroupVersion.WithKind("Table")) | ||||
| 	// Add additional columns for fields that aren't returned by the server | ||||
| 	table.ColumnDefinitions = append([]metav1.TableColumnDefinition{ | ||||
| 		{Name: "apiVersion", Type: "string"}, | ||||
| 		{Name: "kind", Type: "string"}, | ||||
| 	}, table.ColumnDefinitions...) | ||||
| 	for i := range table.Rows { | ||||
| 		row := &table.Rows[i] | ||||
| 		row.Cells = append([]interface{}{ | ||||
| 			gvr.GroupVersion().String(), | ||||
| 			gvk.Kind, | ||||
| 		}, row.Cells...) | ||||
| 	} | ||||
| 	unstructuredObject, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&table) | ||||
| 	return &unstructured.Unstructured{Object: unstructuredObject}, err | ||||
| } | ||||
|  | ||||
| func (k *Kubernetes) resourcesCreateOrUpdate(ctx context.Context, resources []*unstructured.Unstructured) ([]*unstructured.Unstructured, error) { | ||||
| 	for i, obj := range resources { | ||||
| 		gvk := obj.GroupVersionKind() | ||||
| 		gvr, rErr := k.resourceFor(&gvk) | ||||
| 		if rErr != nil { | ||||
| 			return nil, rErr | ||||
| 		} | ||||
|  | ||||
| 		namespace := obj.GetNamespace() | ||||
| 		// If it's a namespaced resource and namespace wasn't provided, try to use the default configured one | ||||
| 		if namespaced, nsErr := k.isNamespaced(&gvk); nsErr == nil && namespaced { | ||||
| 			namespace = k.NamespaceOrDefault(namespace) | ||||
| 		} | ||||
| 		resources[i], rErr = k.manager.dynamicClient.Resource(*gvr).Namespace(namespace).Apply(ctx, obj.GetName(), obj, metav1.ApplyOptions{ | ||||
| 			FieldManager: version.BinaryName, | ||||
| 		}) | ||||
| 		if rErr != nil { | ||||
| 			return nil, rErr | ||||
| 		} | ||||
| 		// Clear the cache to ensure the next operation is performed on the latest exposed APIs (will change after the CRD creation) | ||||
| 		if gvk.Kind == "CustomResourceDefinition" { | ||||
| 			k.manager.accessControlRESTMapper.Reset() | ||||
| 		} | ||||
| 	} | ||||
| 	return resources, nil | ||||
| } | ||||
|  | ||||
| func (k *Kubernetes) resourceFor(gvk *schema.GroupVersionKind) (*schema.GroupVersionResource, error) { | ||||
| 	if k.deferredDiscoveryRESTMapper == nil { | ||||
| 		d, err := discovery.NewDiscoveryClientForConfig(k.cfg) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		k.deferredDiscoveryRESTMapper = restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(d)) | ||||
| 	} | ||||
| 	m, err := k.deferredDiscoveryRESTMapper.RESTMapping(schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind}, gvk.Version) | ||||
| 	m, err := k.manager.accessControlRESTMapper.RESTMapping(schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind}, gvk.Version) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return &m.Resource, nil | ||||
| } | ||||
|  | ||||
| func (k *Kubernetes) isNamespaced(gvk *schema.GroupVersionKind) (bool, error) { | ||||
| 	apiResourceList, err := k.manager.discoveryClient.ServerResourcesForGroupVersion(gvk.GroupVersion().String()) | ||||
| 	if err != nil { | ||||
| 		return false, err | ||||
| 	} | ||||
| 	for _, apiResource := range apiResourceList.APIResources { | ||||
| 		if apiResource.Kind == gvk.Kind { | ||||
| 			return apiResource.Namespaced, nil | ||||
| 		} | ||||
| 	} | ||||
| 	return false, nil | ||||
| } | ||||
|  | ||||
| func (k *Kubernetes) supportsGroupVersion(groupVersion string) bool { | ||||
| 	if _, err := k.manager.discoveryClient.ServerResourcesForGroupVersion(groupVersion); err != nil { | ||||
| 		return false | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| func (k *Kubernetes) canIUse(ctx context.Context, gvr *schema.GroupVersionResource, namespace, verb string) bool { | ||||
| 	accessReviews, err := k.manager.accessControlClientSet.SelfSubjectAccessReviews() | ||||
| 	if err != nil { | ||||
| 		return false | ||||
| 	} | ||||
| 	response, err := accessReviews.Create(ctx, &authv1.SelfSubjectAccessReview{ | ||||
| 		Spec: authv1.SelfSubjectAccessReviewSpec{ResourceAttributes: &authv1.ResourceAttributes{ | ||||
| 			Namespace: namespace, | ||||
| 			Verb:      verb, | ||||
| 			Group:     gvr.Group, | ||||
| 			Version:   gvr.Version, | ||||
| 			Resource:  gvr.Resource, | ||||
| 		}}, | ||||
| 	}, metav1.CreateOptions{}) | ||||
| 	if err != nil { | ||||
| 		// TODO: maybe return the error too | ||||
| 		return false | ||||
| 	} | ||||
| 	return response.Status.Allowed | ||||
| } | ||||
|   | ||||
							
								
								
									
										11
									
								
								pkg/kubernetes/token.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								pkg/kubernetes/token.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| package kubernetes | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
|  | ||||
| 	authenticationv1api "k8s.io/api/authentication/v1" | ||||
| ) | ||||
|  | ||||
| type TokenVerifier interface { | ||||
| 	VerifyToken(ctx context.Context, cluster, token, audience string) (*authenticationv1api.UserInfo, []string, error) | ||||
| } | ||||
| @@ -1,47 +1,164 @@ | ||||
| package mcp | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"context" | ||||
| 	"github.com/mark3labs/mcp-go/client" | ||||
| 	"github.com/mark3labs/mcp-go/mcp" | ||||
| 	"github.com/mark3labs/mcp-go/server" | ||||
| 	"github.com/spf13/afero" | ||||
| 	"k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/client-go/rest" | ||||
| 	"k8s.io/client-go/tools/clientcmd" | ||||
| 	"k8s.io/client-go/tools/clientcmd/api" | ||||
| 	"encoding/json" | ||||
| 	"flag" | ||||
| 	"fmt" | ||||
| 	"net/http/httptest" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"runtime" | ||||
| 	"strconv" | ||||
| 	"testing" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/mark3labs/mcp-go/client" | ||||
| 	"github.com/mark3labs/mcp-go/client/transport" | ||||
| 	"github.com/mark3labs/mcp-go/mcp" | ||||
| 	"github.com/mark3labs/mcp-go/server" | ||||
| 	"github.com/pkg/errors" | ||||
| 	"github.com/spf13/afero" | ||||
| 	"github.com/stretchr/testify/suite" | ||||
| 	"golang.org/x/sync/errgroup" | ||||
| 	corev1 "k8s.io/api/core/v1" | ||||
| 	rbacv1 "k8s.io/api/rbac/v1" | ||||
| 	apiextensionsv1spec "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" | ||||
| 	apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/apimachinery/pkg/watch" | ||||
| 	"k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/client-go/rest" | ||||
| 	"k8s.io/client-go/tools/clientcmd" | ||||
| 	clientcmdapi "k8s.io/client-go/tools/clientcmd/api" | ||||
| 	toolswatch "k8s.io/client-go/tools/watch" | ||||
| 	"k8s.io/klog/v2" | ||||
| 	"k8s.io/klog/v2/textlogger" | ||||
| 	"k8s.io/utils/ptr" | ||||
| 	"sigs.k8s.io/controller-runtime/pkg/envtest" | ||||
| 	"sigs.k8s.io/controller-runtime/tools/setup-envtest/env" | ||||
| 	"sigs.k8s.io/controller-runtime/tools/setup-envtest/remote" | ||||
| 	"sigs.k8s.io/controller-runtime/tools/setup-envtest/store" | ||||
| 	"sigs.k8s.io/controller-runtime/tools/setup-envtest/versions" | ||||
| 	"sigs.k8s.io/controller-runtime/tools/setup-envtest/workflows" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/internal/test" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/output" | ||||
| ) | ||||
|  | ||||
| // envTest has an expensive setup, so we only want to do it once per entire test run. | ||||
| var envTest *envtest.Environment | ||||
| var envTestRestConfig *rest.Config | ||||
| var envTestUser = envtest.User{Name: "test-user", Groups: []string{"test:users"}} | ||||
|  | ||||
| func TestMain(m *testing.M) { | ||||
| 	// Set up | ||||
| 	_ = os.Setenv("KUBECONFIG", "/dev/null")     // Avoid interference from existing kubeconfig | ||||
| 	_ = os.Setenv("KUBERNETES_SERVICE_HOST", "") // Avoid interference from in-cluster config | ||||
| 	_ = os.Setenv("KUBERNETES_SERVICE_PORT", "") // Avoid interference from in-cluster config | ||||
| 	envTestDir, err := store.DefaultStoreDir() | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	envTestEnv := &env.Env{ | ||||
| 		FS:  afero.Afero{Fs: afero.NewOsFs()}, | ||||
| 		Out: os.Stdout, | ||||
| 		Client: &remote.HTTPClient{ | ||||
| 			IndexURL: remote.DefaultIndexURL, | ||||
| 		}, | ||||
| 		Platform: versions.PlatformItem{ | ||||
| 			Platform: versions.Platform{ | ||||
| 				OS:   runtime.GOOS, | ||||
| 				Arch: runtime.GOARCH, | ||||
| 			}, | ||||
| 		}, | ||||
| 		Version: versions.AnyVersion, | ||||
| 		Store:   store.NewAt(envTestDir), | ||||
| 	} | ||||
| 	envTestEnv.CheckCoherence() | ||||
| 	workflows.Use{}.Do(envTestEnv) | ||||
| 	versionDir := envTestEnv.Platform.BaseName(*envTestEnv.Version.AsConcrete()) | ||||
| 	envTest = &envtest.Environment{ | ||||
| 		BinaryAssetsDirectory: filepath.Join(envTestDir, "k8s", versionDir), | ||||
| 	} | ||||
| 	adminSystemMasterBaseConfig, _ := envTest.Start() | ||||
| 	au := test.Must(envTest.AddUser(envTestUser, adminSystemMasterBaseConfig)) | ||||
| 	envTestRestConfig = au.Config() | ||||
| 	envTest.KubeConfig = test.Must(au.KubeConfig()) | ||||
|  | ||||
| 	//Create test data as administrator | ||||
| 	ctx := context.Background() | ||||
| 	restoreAuth(ctx) | ||||
| 	createTestData(ctx) | ||||
|  | ||||
| 	// Test! | ||||
| 	code := m.Run() | ||||
|  | ||||
| 	// Tear down | ||||
| 	if envTest != nil { | ||||
| 		_ = envTest.Stop() | ||||
| 	} | ||||
| 	os.Exit(code) | ||||
| } | ||||
|  | ||||
| type mcpContext struct { | ||||
| 	ctx        context.Context | ||||
| 	tempDir    string | ||||
| 	testServer *httptest.Server | ||||
| 	cancel     context.CancelFunc | ||||
| 	mcpClient  *client.SSEMCPClient | ||||
| 	envTest    *envtest.Environment | ||||
| 	toolsets   []string | ||||
| 	listOutput output.Output | ||||
| 	logLevel   int | ||||
|  | ||||
| 	staticConfig  *config.StaticConfig | ||||
| 	clientOptions []transport.ClientOption | ||||
| 	before        func(*mcpContext) | ||||
| 	after         func(*mcpContext) | ||||
| 	ctx           context.Context | ||||
| 	tempDir       string | ||||
| 	cancel        context.CancelFunc | ||||
| 	mcpServer     *Server | ||||
| 	mcpHttpServer *httptest.Server | ||||
| 	mcpClient     *client.Client | ||||
| 	klogState     klog.State | ||||
| 	logBuffer     bytes.Buffer | ||||
| } | ||||
|  | ||||
| func (c *mcpContext) beforeEach(t *testing.T) { | ||||
| 	var err error | ||||
| 	c.ctx, c.cancel = context.WithCancel(context.Background()) | ||||
| 	c.ctx, c.cancel = context.WithCancel(t.Context()) | ||||
| 	c.tempDir = t.TempDir() | ||||
| 	c.withKubeConfig(nil) | ||||
| 	c.testServer = server.NewTestServer(NewSever().server) | ||||
| 	if c.mcpClient, err = client.NewSSEMCPClient(c.testServer.URL + "/sse"); err != nil { | ||||
| 	if c.staticConfig == nil { | ||||
| 		c.staticConfig = config.Default() | ||||
| 		// Default to use YAML output for lists (previously the default) | ||||
| 		c.staticConfig.ListOutput = "yaml" | ||||
| 	} | ||||
| 	if c.toolsets != nil { | ||||
| 		c.staticConfig.Toolsets = c.toolsets | ||||
|  | ||||
| 	} | ||||
| 	if c.listOutput != nil { | ||||
| 		c.staticConfig.ListOutput = c.listOutput.GetName() | ||||
| 	} | ||||
| 	if c.before != nil { | ||||
| 		c.before(c) | ||||
| 	} | ||||
| 	// Set up logging | ||||
| 	c.klogState = klog.CaptureState() | ||||
| 	flags := flag.NewFlagSet("test", flag.ContinueOnError) | ||||
| 	klog.InitFlags(flags) | ||||
| 	_ = flags.Set("v", strconv.Itoa(c.logLevel)) | ||||
| 	klog.SetLogger(textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(c.logLevel), textlogger.Output(&c.logBuffer)))) | ||||
| 	// MCP Server | ||||
| 	if c.mcpServer, err = NewServer(Configuration{StaticConfig: c.staticConfig}); err != nil { | ||||
| 		t.Fatal(err) | ||||
| 		return | ||||
| 	} | ||||
| 	c.mcpHttpServer = server.NewTestServer(c.mcpServer.server, server.WithSSEContextFunc(contextFunc)) | ||||
| 	if c.mcpClient, err = client.NewSSEMCPClient(c.mcpHttpServer.URL+"/sse", c.clientOptions...); err != nil { | ||||
| 		t.Fatal(err) | ||||
| 		return | ||||
| 	} | ||||
| 	// MCP Client | ||||
| 	if err = c.mcpClient.Start(c.ctx); err != nil { | ||||
| 		t.Fatal(err) | ||||
| 		return | ||||
| @@ -57,84 +174,278 @@ func (c *mcpContext) beforeEach(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func (c *mcpContext) afterEach() { | ||||
| 	if c.envTest != nil { | ||||
| 		_ = c.envTest.Stop() | ||||
| 	if c.after != nil { | ||||
| 		c.after(c) | ||||
| 	} | ||||
| 	c.cancel() | ||||
| 	c.mcpServer.Close() | ||||
| 	_ = c.mcpClient.Close() | ||||
| 	c.testServer.Close() | ||||
| 	c.mcpHttpServer.Close() | ||||
| 	c.klogState.Restore() | ||||
| } | ||||
|  | ||||
| func testCase(test func(t *testing.T, c *mcpContext)) func(*testing.T) { | ||||
| 	return func(t *testing.T) { | ||||
| 		mcpCtx := &mcpContext{} | ||||
| 		mcpCtx.beforeEach(t) | ||||
| 		defer mcpCtx.afterEach() | ||||
| 		test(t, mcpCtx) | ||||
| 	} | ||||
| func testCase(t *testing.T, test func(c *mcpContext)) { | ||||
| 	testCaseWithContext(t, &mcpContext{}, test) | ||||
| } | ||||
|  | ||||
| func (c *mcpContext) withKubeConfig(rc *rest.Config) *api.Config { | ||||
| 	fakeConfig := api.NewConfig() | ||||
| 	fakeConfig.CurrentContext = "fake-context" | ||||
| 	fakeConfig.Contexts["fake-context"] = api.NewContext() | ||||
| 	fakeConfig.Contexts["fake-context"].Cluster = "fake" | ||||
| 	fakeConfig.Contexts["fake-context"].AuthInfo = "fake" | ||||
| 	fakeConfig.Clusters["fake"] = api.NewCluster() | ||||
| 	fakeConfig.Clusters["fake"].Server = "https://example.com" | ||||
| 	fakeConfig.AuthInfos["fake"] = api.NewAuthInfo() | ||||
| func testCaseWithContext(t *testing.T, mcpCtx *mcpContext, test func(c *mcpContext)) { | ||||
| 	mcpCtx.beforeEach(t) | ||||
| 	defer mcpCtx.afterEach() | ||||
| 	test(mcpCtx) | ||||
| } | ||||
|  | ||||
| // withKubeConfig sets up a fake kubeconfig in the temp directory based on the provided rest.Config | ||||
| func (c *mcpContext) withKubeConfig(rc *rest.Config) *clientcmdapi.Config { | ||||
| 	fakeConfig := clientcmdapi.NewConfig() | ||||
| 	fakeConfig.Clusters["fake"] = clientcmdapi.NewCluster() | ||||
| 	fakeConfig.Clusters["fake"].Server = "https://127.0.0.1:6443" | ||||
| 	fakeConfig.Clusters["additional-cluster"] = clientcmdapi.NewCluster() | ||||
| 	fakeConfig.AuthInfos["fake"] = clientcmdapi.NewAuthInfo() | ||||
| 	fakeConfig.AuthInfos["additional-auth"] = clientcmdapi.NewAuthInfo() | ||||
| 	if rc != nil { | ||||
| 		fakeConfig.Clusters["fake"].Server = rc.Host | ||||
| 		fakeConfig.Clusters["fake"].CertificateAuthorityData = rc.TLSClientConfig.CAData | ||||
| 		fakeConfig.AuthInfos["fake"].ClientKeyData = rc.TLSClientConfig.KeyData | ||||
| 		fakeConfig.AuthInfos["fake"].ClientCertificateData = rc.TLSClientConfig.CertData | ||||
| 		fakeConfig.Clusters["fake"].CertificateAuthorityData = rc.CAData | ||||
| 		fakeConfig.AuthInfos["fake"].ClientKeyData = rc.KeyData | ||||
| 		fakeConfig.AuthInfos["fake"].ClientCertificateData = rc.CertData | ||||
| 	} | ||||
| 	fakeConfig.Contexts["fake-context"] = clientcmdapi.NewContext() | ||||
| 	fakeConfig.Contexts["fake-context"].Cluster = "fake" | ||||
| 	fakeConfig.Contexts["fake-context"].AuthInfo = "fake" | ||||
| 	fakeConfig.Contexts["additional-context"] = clientcmdapi.NewContext() | ||||
| 	fakeConfig.Contexts["additional-context"].Cluster = "additional-cluster" | ||||
| 	fakeConfig.Contexts["additional-context"].AuthInfo = "additional-auth" | ||||
| 	fakeConfig.CurrentContext = "fake-context" | ||||
| 	kubeConfig := filepath.Join(c.tempDir, "config") | ||||
| 	_ = clientcmd.WriteToFile(*fakeConfig, kubeConfig) | ||||
| 	_ = os.Setenv("KUBECONFIG", kubeConfig) | ||||
| 	if c.mcpServer != nil { | ||||
| 		if err := c.mcpServer.reloadKubernetesClusterProvider(); err != nil { | ||||
| 			panic(err) | ||||
| 		} | ||||
| 	} | ||||
| 	return fakeConfig | ||||
| } | ||||
|  | ||||
| // withEnvTest sets up the environment for kubeconfig to be used with envTest | ||||
| func (c *mcpContext) withEnvTest() { | ||||
| 	if c.envTest != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	envTestDir, err := store.DefaultStoreDir() | ||||
| 	if err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	envTest := &env.Env{ | ||||
| 		FS:  afero.Afero{Fs: afero.NewOsFs()}, | ||||
| 		Out: os.Stdout, | ||||
| 		Client: &remote.HTTPClient{ | ||||
| 			IndexURL: remote.DefaultIndexURL, | ||||
| 		}, | ||||
| 		Platform: versions.PlatformItem{ | ||||
| 			Platform: versions.Platform{ | ||||
| 				OS:   runtime.GOOS, | ||||
| 				Arch: runtime.GOARCH, | ||||
| 			}, | ||||
| 		}, | ||||
| 		Version: versions.AnyVersion, | ||||
| 		Store:   store.NewAt(envTestDir), | ||||
| 	} | ||||
| 	envTest.CheckCoherence() | ||||
| 	workflows.Use{}.Do(envTest) | ||||
| 	versionDir := envTest.Platform.Platform.BaseName(*envTest.Version.AsConcrete()) | ||||
| 	c.envTest = &envtest.Environment{ | ||||
| 		BinaryAssetsDirectory: filepath.Join(envTestDir, "k8s", versionDir), | ||||
| 	} | ||||
| 	restConfig, _ := c.envTest.Start() | ||||
| 	c.withKubeConfig(restConfig) | ||||
| 	c.withKubeConfig(envTestRestConfig) | ||||
| } | ||||
|  | ||||
| func (c *mcpContext) newKubernetesClient() *kubernetes.Clientset { | ||||
| // inOpenShift sets up the kubernetes environment to seem to be running OpenShift | ||||
| func inOpenShift(c *mcpContext) { | ||||
| 	c.withEnvTest() | ||||
| 	pathOptions := clientcmd.NewDefaultPathOptions() | ||||
| 	cfg, _ := clientcmd.BuildConfigFromFlags("", pathOptions.GetDefaultFilename()) | ||||
| 	kubernetesClient, err := kubernetes.NewForConfig(cfg) | ||||
| 	if err != nil { | ||||
| 	crdTemplate := ` | ||||
|           { | ||||
|             "apiVersion": "apiextensions.k8s.io/v1", | ||||
|             "kind": "CustomResourceDefinition", | ||||
|             "metadata": {"name": "%s"}, | ||||
|             "spec": { | ||||
|               "group": "%s", | ||||
|               "versions": [{ | ||||
|                 "name": "v1","served": true,"storage": true, | ||||
|                 "schema": {"openAPIV3Schema": {"type": "object","x-kubernetes-preserve-unknown-fields": true}} | ||||
|               }], | ||||
|               "scope": "%s", | ||||
|               "names": {"plural": "%s","singular": "%s","kind": "%s"} | ||||
|             } | ||||
|           }` | ||||
| 	tasks, _ := errgroup.WithContext(c.ctx) | ||||
| 	tasks.Go(func() error { | ||||
| 		return c.crdApply(fmt.Sprintf(crdTemplate, "projects.project.openshift.io", "project.openshift.io", | ||||
| 			"Cluster", "projects", "project", "Project")) | ||||
| 	}) | ||||
| 	tasks.Go(func() error { | ||||
| 		return c.crdApply(fmt.Sprintf(crdTemplate, "routes.route.openshift.io", "route.openshift.io", | ||||
| 			"Namespaced", "routes", "route", "Route")) | ||||
| 	}) | ||||
| 	if err := tasks.Wait(); err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	return kubernetesClient | ||||
| } | ||||
|  | ||||
| // inOpenShiftClear clears the kubernetes environment so it no longer seems to be running OpenShift | ||||
| func inOpenShiftClear(c *mcpContext) { | ||||
| 	tasks, _ := errgroup.WithContext(c.ctx) | ||||
| 	tasks.Go(func() error { return c.crdDelete("projects.project.openshift.io") }) | ||||
| 	tasks.Go(func() error { return c.crdDelete("routes.route.openshift.io") }) | ||||
| 	if err := tasks.Wait(); err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // newKubernetesClient creates a new Kubernetes client with the envTest kubeconfig | ||||
| func (c *mcpContext) newKubernetesClient() *kubernetes.Clientset { | ||||
| 	return kubernetes.NewForConfigOrDie(envTestRestConfig) | ||||
| } | ||||
|  | ||||
| // newApiExtensionsClient creates a new ApiExtensions client with the envTest kubeconfig | ||||
| func (c *mcpContext) newApiExtensionsClient() *apiextensionsv1.ApiextensionsV1Client { | ||||
| 	return apiextensionsv1.NewForConfigOrDie(envTestRestConfig) | ||||
| } | ||||
|  | ||||
| // crdApply creates a CRD from the provided resource string and waits for it to be established | ||||
| func (c *mcpContext) crdApply(resource string) error { | ||||
| 	apiExtensionsV1Client := c.newApiExtensionsClient() | ||||
| 	var crd = &apiextensionsv1spec.CustomResourceDefinition{} | ||||
| 	err := json.Unmarshal([]byte(resource), crd) | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("failed to create CRD %v", err) | ||||
| 	} | ||||
| 	_, err = apiExtensionsV1Client.CustomResourceDefinitions().Create(c.ctx, crd, metav1.CreateOptions{}) | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("failed to create CRD %v", err) | ||||
| 	} | ||||
| 	c.crdWaitUntilReady(crd.Name) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // crdDelete deletes a CRD by name and waits for it to be removed | ||||
| func (c *mcpContext) crdDelete(name string) error { | ||||
| 	apiExtensionsV1Client := c.newApiExtensionsClient() | ||||
| 	err := apiExtensionsV1Client.CustomResourceDefinitions().Delete(c.ctx, name, metav1.DeleteOptions{ | ||||
| 		GracePeriodSeconds: ptr.To(int64(0)), | ||||
| 	}) | ||||
| 	iteration := 0 | ||||
| 	for iteration < 100 { | ||||
| 		if _, derr := apiExtensionsV1Client.CustomResourceDefinitions().Get(c.ctx, name, metav1.GetOptions{}); derr != nil { | ||||
| 			break | ||||
| 		} | ||||
| 		time.Sleep(5 * time.Millisecond) | ||||
| 		iteration++ | ||||
| 	} | ||||
| 	if err != nil { | ||||
| 		return errors.Wrap(err, "failed to delete CRD") | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // crdWaitUntilReady waits for a CRD to be established | ||||
| func (c *mcpContext) crdWaitUntilReady(name string) { | ||||
| 	watcher, err := c.newApiExtensionsClient().CustomResourceDefinitions().Watch(c.ctx, metav1.ListOptions{ | ||||
| 		FieldSelector: "metadata.name=" + name, | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		panic(fmt.Errorf("failed to watch CRD %v", err)) | ||||
| 	} | ||||
| 	_, err = toolswatch.UntilWithoutRetry(c.ctx, watcher, func(event watch.Event) (bool, error) { | ||||
| 		for _, c := range event.Object.(*apiextensionsv1spec.CustomResourceDefinition).Status.Conditions { | ||||
| 			if c.Type == apiextensionsv1spec.Established && c.Status == apiextensionsv1spec.ConditionTrue { | ||||
| 				return true, nil | ||||
| 			} | ||||
| 		} | ||||
| 		return false, nil | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		panic(fmt.Errorf("failed to wait for CRD %v", err)) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // callTool helper function to call a tool by name with arguments | ||||
| func (c *mcpContext) callTool(name string, args map[string]interface{}) (*mcp.CallToolResult, error) { | ||||
| 	callToolRequest := mcp.CallToolRequest{} | ||||
| 	callToolRequest.Params.Name = name | ||||
| 	callToolRequest.Params.Arguments = args | ||||
| 	return c.mcpClient.CallTool(c.ctx, callToolRequest) | ||||
| } | ||||
|  | ||||
| func restoreAuth(ctx context.Context) { | ||||
| 	kubernetesAdmin := kubernetes.NewForConfigOrDie(envTest.Config) | ||||
| 	// Authorization | ||||
| 	_, _ = kubernetesAdmin.RbacV1().ClusterRoles().Update(ctx, &rbacv1.ClusterRole{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{Name: "allow-all"}, | ||||
| 		Rules: []rbacv1.PolicyRule{{ | ||||
| 			Verbs:     []string{"*"}, | ||||
| 			APIGroups: []string{"*"}, | ||||
| 			Resources: []string{"*"}, | ||||
| 		}}, | ||||
| 	}, metav1.UpdateOptions{}) | ||||
| 	_, _ = kubernetesAdmin.RbacV1().ClusterRoleBindings().Update(ctx, &rbacv1.ClusterRoleBinding{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{Name: "allow-all"}, | ||||
| 		Subjects:   []rbacv1.Subject{{Kind: "Group", Name: envTestUser.Groups[0]}}, | ||||
| 		RoleRef:    rbacv1.RoleRef{Kind: "ClusterRole", Name: "allow-all"}, | ||||
| 	}, metav1.UpdateOptions{}) | ||||
| } | ||||
|  | ||||
| func createTestData(ctx context.Context) { | ||||
| 	kubernetesAdmin := kubernetes.NewForConfigOrDie(envTestRestConfig) | ||||
| 	// Namespaces | ||||
| 	_, _ = kubernetesAdmin.CoreV1().Namespaces(). | ||||
| 		Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-1"}}, metav1.CreateOptions{}) | ||||
| 	_, _ = kubernetesAdmin.CoreV1().Namespaces(). | ||||
| 		Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-2"}}, metav1.CreateOptions{}) | ||||
| 	_, _ = kubernetesAdmin.CoreV1().Namespaces(). | ||||
| 		Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns-to-delete"}}, metav1.CreateOptions{}) | ||||
| 	_, _ = kubernetesAdmin.CoreV1().Pods("default").Create(ctx, &corev1.Pod{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:   "a-pod-in-default", | ||||
| 			Labels: map[string]string{"app": "nginx"}, | ||||
| 		}, | ||||
| 		Spec: corev1.PodSpec{ | ||||
| 			Containers: []corev1.Container{ | ||||
| 				{ | ||||
| 					Name:  "nginx", | ||||
| 					Image: "nginx", | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, | ||||
| 	}, metav1.CreateOptions{}) | ||||
| 	// Pods for listing | ||||
| 	_, _ = kubernetesAdmin.CoreV1().Pods("ns-1").Create(ctx, &corev1.Pod{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name: "a-pod-in-ns-1", | ||||
| 		}, | ||||
| 		Spec: corev1.PodSpec{ | ||||
| 			Containers: []corev1.Container{ | ||||
| 				{ | ||||
| 					Name:  "nginx", | ||||
| 					Image: "nginx", | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, | ||||
| 	}, metav1.CreateOptions{}) | ||||
| 	_, _ = kubernetesAdmin.CoreV1().Pods("ns-2").Create(ctx, &corev1.Pod{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name: "a-pod-in-ns-2", | ||||
| 		}, | ||||
| 		Spec: corev1.PodSpec{ | ||||
| 			Containers: []corev1.Container{ | ||||
| 				{ | ||||
| 					Name:  "nginx", | ||||
| 					Image: "nginx", | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, | ||||
| 	}, metav1.CreateOptions{}) | ||||
| 	_, _ = kubernetesAdmin.CoreV1().ConfigMaps("default"). | ||||
| 		Create(ctx, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "a-configmap-to-delete"}}, metav1.CreateOptions{}) | ||||
| } | ||||
|  | ||||
| type BaseMcpSuite struct { | ||||
| 	suite.Suite | ||||
| 	*test.McpClient | ||||
| 	mcpServer *Server | ||||
| 	Cfg       *config.StaticConfig | ||||
| } | ||||
|  | ||||
| func (s *BaseMcpSuite) SetupTest() { | ||||
| 	s.Cfg = config.Default() | ||||
| 	s.Cfg.ListOutput = "yaml" | ||||
| 	s.Cfg.KubeConfig = filepath.Join(s.T().TempDir(), "config") | ||||
| 	s.Require().NoError(os.WriteFile(s.Cfg.KubeConfig, envTest.KubeConfig, 0600), "Expected to write kubeconfig") | ||||
| } | ||||
|  | ||||
| func (s *BaseMcpSuite) TearDownTest() { | ||||
| 	if s.McpClient != nil { | ||||
| 		s.Close() | ||||
| 	} | ||||
| 	if s.mcpServer != nil { | ||||
| 		s.mcpServer.Close() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (s *BaseMcpSuite) InitMcpClient() { | ||||
| 	var err error | ||||
| 	s.mcpServer, err = NewServer(Configuration{StaticConfig: s.Cfg}) | ||||
| 	s.Require().NoError(err, "Expected no error creating MCP server") | ||||
| 	s.McpClient = test.NewMcpClient(s.T(), s.mcpServer.ServeHTTP(nil)) | ||||
| } | ||||
|   | ||||
| @@ -1,23 +0,0 @@ | ||||
| package mcp | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"github.com/manusa/kubernetes-mcp-server/pkg/kubernetes" | ||||
| 	"github.com/mark3labs/mcp-go/mcp" | ||||
| ) | ||||
|  | ||||
| func (s *Sever) initConfiguration() { | ||||
| 	s.server.AddTool(mcp.NewTool( | ||||
| 		"configuration_view", | ||||
| 		mcp.WithDescription("Get the current Kubernetes configuration content as a kubeconfig YAML"), | ||||
| 	), configurationView) | ||||
| } | ||||
|  | ||||
| func configurationView(_ context.Context, _ mcp.CallToolRequest) (*mcp.CallToolResult, error) { | ||||
| 	ret, err := kubernetes.ConfigurationView() | ||||
| 	if err != nil { | ||||
| 		err = fmt.Errorf("failed to get configuration view: %v", err) | ||||
| 	} | ||||
| 	return NewTextResult(ret, err), nil | ||||
| } | ||||
| @@ -1,25 +1,170 @@ | ||||
| package mcp | ||||
|  | ||||
| import ( | ||||
| 	"github.com/mark3labs/mcp-go/mcp" | ||||
| 	"strings" | ||||
| 	"fmt" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/mark3labs/mcp-go/mcp" | ||||
| 	"github.com/stretchr/testify/suite" | ||||
| 	"k8s.io/client-go/rest" | ||||
| 	clientcmdapi "k8s.io/client-go/tools/clientcmd/api" | ||||
| 	v1 "k8s.io/client-go/tools/clientcmd/api/v1" | ||||
| 	"sigs.k8s.io/yaml" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/internal/test" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/kubernetes" | ||||
| ) | ||||
|  | ||||
| func TestConfigurationView(t *testing.T) { | ||||
| 	t.Run("configuration_view returns configuration", testCase(func(t *testing.T, c *mcpContext) { | ||||
| 		configurationGet := mcp.CallToolRequest{} | ||||
| 		configurationGet.Params.Name = "configuration_view" | ||||
| 		configurationGet.Params.Arguments = map[string]interface{}{} | ||||
| 		tools, err := c.mcpClient.CallTool(c.ctx, configurationGet) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("call tool failed %v", err) | ||||
| 			return | ||||
| 		} | ||||
| 		resultContent := tools.Content[0].(map[string]interface{})["text"].(string) | ||||
| 		if !strings.Contains(resultContent, "cluster: fake\n") { | ||||
| 			t.Fatalf("mismatch in kube config: %s", resultContent) | ||||
| 			return | ||||
| 		} | ||||
| 	})) | ||||
| type ConfigurationSuite struct { | ||||
| 	BaseMcpSuite | ||||
| } | ||||
|  | ||||
| func (s *ConfigurationSuite) SetupTest() { | ||||
| 	s.BaseMcpSuite.SetupTest() | ||||
| 	// Use mock server for predictable kubeconfig content | ||||
| 	mockServer := test.NewMockServer() | ||||
| 	s.T().Cleanup(mockServer.Close) | ||||
| 	kubeconfig := mockServer.Kubeconfig() | ||||
| 	for i := 0; i < 10; i++ { | ||||
| 		// Add multiple fake contexts to force configuration_contexts_list tool to appear | ||||
| 		// and test minification in configuration_view tool | ||||
| 		name := fmt.Sprintf("cluster-%d", i) | ||||
| 		kubeconfig.Contexts[name] = clientcmdapi.NewContext() | ||||
| 		kubeconfig.Clusters[name+"-cluster"] = clientcmdapi.NewCluster() | ||||
| 		kubeconfig.AuthInfos[name+"-auth"] = clientcmdapi.NewAuthInfo() | ||||
| 		kubeconfig.Contexts[name].Cluster = name + "-cluster" | ||||
| 		kubeconfig.Contexts[name].AuthInfo = name + "-auth" | ||||
| 	} | ||||
| 	s.Cfg.KubeConfig = test.KubeconfigFile(s.T(), kubeconfig) | ||||
| } | ||||
|  | ||||
| func (s *ConfigurationSuite) TestContextsList() { | ||||
| 	s.InitMcpClient() | ||||
| 	s.Run("configuration_contexts_list", func() { | ||||
| 		toolResult, err := s.CallTool("configuration_contexts_list", map[string]interface{}{}) | ||||
| 		s.Run("returns contexts", func() { | ||||
| 			s.Nilf(err, "call tool failed %v", err) | ||||
| 		}) | ||||
| 		s.Require().NotNil(toolResult, "Expected tool result from call") | ||||
| 		s.Lenf(toolResult.Content, 1, "invalid tool result content length %v", len(toolResult.Content)) | ||||
| 		s.Run("contains context count", func() { | ||||
| 			s.Regexpf(`^Available Kubernetes contexts \(11 total`, toolResult.Content[0].(mcp.TextContent).Text, "invalid tool count result content %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 		}) | ||||
| 		s.Run("contains default context name", func() { | ||||
| 			s.Regexpf(`^Available Kubernetes contexts \(\d+ total, default: fake-context\)`, toolResult.Content[0].(mcp.TextContent).Text, "invalid tool context default result content %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 			s.Regexpf(`(?m)^\*fake-context -> http:\/\/127\.0\.0\.1:\d*$`, toolResult.Content[0].(mcp.TextContent).Text, "invalid tool context default result content %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *ConfigurationSuite) TestConfigurationView() { | ||||
| 	s.InitMcpClient() | ||||
| 	s.Run("configuration_view", func() { | ||||
| 		toolResult, err := s.CallTool("configuration_view", map[string]interface{}{}) | ||||
| 		s.Run("returns configuration", func() { | ||||
| 			s.Nilf(err, "call tool failed %v", err) | ||||
| 		}) | ||||
| 		s.Require().NotNil(toolResult, "Expected tool result from call") | ||||
| 		var decoded *v1.Config | ||||
| 		err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) | ||||
| 		s.Run("has yaml content", func() { | ||||
| 			s.Nilf(err, "invalid tool result content %v", err) | ||||
| 		}) | ||||
| 		s.Run("returns current-context", func() { | ||||
| 			s.Equalf("fake-context", decoded.CurrentContext, "fake-context not found: %v", decoded.CurrentContext) | ||||
| 		}) | ||||
| 		s.Run("returns context info", func() { | ||||
| 			s.Lenf(decoded.Contexts, 1, "invalid context count, expected 1, got %v", len(decoded.Contexts)) | ||||
| 			s.Equalf("fake-context", decoded.Contexts[0].Name, "fake-context not found: %v", decoded.Contexts) | ||||
| 			s.Equalf("fake", decoded.Contexts[0].Context.Cluster, "fake-cluster not found: %v", decoded.Contexts) | ||||
| 			s.Equalf("fake", decoded.Contexts[0].Context.AuthInfo, "fake-auth not found: %v", decoded.Contexts) | ||||
| 		}) | ||||
| 		s.Run("returns cluster info", func() { | ||||
| 			s.Lenf(decoded.Clusters, 1, "invalid cluster count, expected 1, got %v", len(decoded.Clusters)) | ||||
| 			s.Equalf("fake", decoded.Clusters[0].Name, "fake-cluster not found: %v", decoded.Clusters) | ||||
| 			s.Regexpf(`^https?://(127\.0\.0\.1|localhost):\d{1,5}$`, decoded.Clusters[0].Cluster.Server, "fake-server not found: %v", decoded.Clusters) | ||||
| 		}) | ||||
| 		s.Run("returns auth info", func() { | ||||
| 			s.Lenf(decoded.AuthInfos, 1, "invalid auth info count, expected 1, got %v", len(decoded.AuthInfos)) | ||||
| 			s.Equalf("fake", decoded.AuthInfos[0].Name, "fake-auth not found: %v", decoded.AuthInfos) | ||||
| 		}) | ||||
| 	}) | ||||
| 	s.Run("configuration_view(minified=false)", func() { | ||||
| 		toolResult, err := s.CallTool("configuration_view", map[string]interface{}{ | ||||
| 			"minified": false, | ||||
| 		}) | ||||
| 		s.Run("returns configuration", func() { | ||||
| 			s.Nilf(err, "call tool failed %v", err) | ||||
| 		}) | ||||
| 		var decoded *v1.Config | ||||
| 		err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) | ||||
| 		s.Run("has yaml content", func() { | ||||
| 			s.Nilf(err, "invalid tool result content %v", err) | ||||
| 		}) | ||||
| 		s.Run("returns additional context info", func() { | ||||
| 			s.Lenf(decoded.Contexts, 11, "invalid context count, expected 12, got %v", len(decoded.Contexts)) | ||||
| 			s.Equalf("cluster-0", decoded.Contexts[0].Name, "cluster-0 not found: %v", decoded.Contexts) | ||||
| 			s.Equalf("cluster-0-cluster", decoded.Contexts[0].Context.Cluster, "cluster-0-cluster not found: %v", decoded.Contexts) | ||||
| 			s.Equalf("cluster-0-auth", decoded.Contexts[0].Context.AuthInfo, "cluster-0-auth not found: %v", decoded.Contexts) | ||||
| 			s.Equalf("fake", decoded.Contexts[10].Context.Cluster, "fake not found: %v", decoded.Contexts) | ||||
| 			s.Equalf("fake", decoded.Contexts[10].Context.AuthInfo, "fake not found: %v", decoded.Contexts) | ||||
| 			s.Equalf("fake-context", decoded.Contexts[10].Name, "fake-context not found: %v", decoded.Contexts) | ||||
| 		}) | ||||
| 		s.Run("returns cluster info", func() { | ||||
| 			s.Lenf(decoded.Clusters, 11, "invalid cluster count, expected 2, got %v", len(decoded.Clusters)) | ||||
| 			s.Equalf("cluster-0-cluster", decoded.Clusters[0].Name, "cluster-0-cluster not found: %v", decoded.Clusters) | ||||
| 			s.Equalf("fake", decoded.Clusters[10].Name, "fake not found: %v", decoded.Clusters) | ||||
| 		}) | ||||
| 		s.Run("configuration_view with minified=false returns auth info", func() { | ||||
| 			s.Lenf(decoded.AuthInfos, 11, "invalid auth info count, expected 2, got %v", len(decoded.AuthInfos)) | ||||
| 			s.Equalf("cluster-0-auth", decoded.AuthInfos[0].Name, "cluster-0-auth not found: %v", decoded.AuthInfos) | ||||
| 			s.Equalf("fake", decoded.AuthInfos[10].Name, "fake not found: %v", decoded.AuthInfos) | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *ConfigurationSuite) TestConfigurationViewInCluster() { | ||||
| 	s.Cfg.KubeConfig = "" // Force in-cluster | ||||
| 	kubernetes.InClusterConfig = func() (*rest.Config, error) { | ||||
| 		return &rest.Config{ | ||||
| 			Host:        "https://kubernetes.default.svc", | ||||
| 			BearerToken: "fake-token", | ||||
| 		}, nil | ||||
| 	} | ||||
| 	s.T().Cleanup(func() { kubernetes.InClusterConfig = rest.InClusterConfig }) | ||||
| 	s.InitMcpClient() | ||||
| 	s.Run("configuration_view", func() { | ||||
| 		toolResult, err := s.CallTool("configuration_view", map[string]interface{}{}) | ||||
| 		s.Run("returns configuration", func() { | ||||
| 			s.Nilf(err, "call tool failed %v", err) | ||||
| 		}) | ||||
| 		s.Require().NotNil(toolResult, "Expected tool result from call") | ||||
| 		var decoded *v1.Config | ||||
| 		err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) | ||||
| 		s.Run("has yaml content", func() { | ||||
| 			s.Nilf(err, "invalid tool result content %v", err) | ||||
| 		}) | ||||
| 		s.Run("returns current-context", func() { | ||||
| 			s.Equalf("in-cluster", decoded.CurrentContext, "context not found: %v", decoded.CurrentContext) | ||||
| 		}) | ||||
| 		s.Run("returns context info", func() { | ||||
| 			s.Lenf(decoded.Contexts, 1, "invalid context count, expected 1, got %v", len(decoded.Contexts)) | ||||
| 			s.Equalf("in-cluster", decoded.Contexts[0].Name, "context not found: %v", decoded.Contexts) | ||||
| 			s.Equalf("cluster", decoded.Contexts[0].Context.Cluster, "cluster not found: %v", decoded.Contexts) | ||||
| 			s.Equalf("user", decoded.Contexts[0].Context.AuthInfo, "user not found: %v", decoded.Contexts) | ||||
| 		}) | ||||
| 		s.Run("returns cluster info", func() { | ||||
| 			s.Lenf(decoded.Clusters, 1, "invalid cluster count, expected 1, got %v", len(decoded.Clusters)) | ||||
| 			s.Equalf("cluster", decoded.Clusters[0].Name, "cluster not found: %v", decoded.Clusters) | ||||
| 			s.Equalf("https://kubernetes.default.svc", decoded.Clusters[0].Cluster.Server, "server not found: %v", decoded.Clusters) | ||||
| 		}) | ||||
| 		s.Run("returns auth info", func() { | ||||
| 			s.Lenf(decoded.AuthInfos, 1, "invalid auth info count, expected 1, got %v", len(decoded.AuthInfos)) | ||||
| 			s.Equalf("user", decoded.AuthInfos[0].Name, "user not found: %v", decoded.AuthInfos) | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestConfiguration(t *testing.T) { | ||||
| 	suite.Run(t, new(ConfigurationSuite)) | ||||
| } | ||||
|   | ||||
							
								
								
									
										143
									
								
								pkg/mcp/events_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										143
									
								
								pkg/mcp/events_test.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,143 @@ | ||||
| package mcp | ||||
|  | ||||
| import ( | ||||
| 	"strings" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/BurntSushi/toml" | ||||
| 	"github.com/mark3labs/mcp-go/mcp" | ||||
| 	"github.com/stretchr/testify/suite" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/client-go/kubernetes" | ||||
| 	"sigs.k8s.io/yaml" | ||||
| ) | ||||
|  | ||||
| type EventsSuite struct { | ||||
| 	BaseMcpSuite | ||||
| } | ||||
|  | ||||
| func (s *EventsSuite) TestEventsList() { | ||||
| 	s.InitMcpClient() | ||||
| 	s.Run("events_list (no events)", func() { | ||||
| 		toolResult, err := s.CallTool("events_list", map[string]interface{}{}) | ||||
| 		s.Run("no error", func() { | ||||
| 			s.Nilf(err, "call tool failed %v", err) | ||||
| 			s.Falsef(toolResult.IsError, "call tool failed") | ||||
| 		}) | ||||
| 		s.Run("returns no events message", func() { | ||||
| 			s.Equal("# No events found", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 		}) | ||||
| 	}) | ||||
| 	s.Run("events_list (with events)", func() { | ||||
| 		client := kubernetes.NewForConfigOrDie(envTestRestConfig) | ||||
| 		for _, ns := range []string{"default", "ns-1"} { | ||||
| 			_, _ = client.CoreV1().Events(ns).Create(s.T().Context(), &v1.Event{ | ||||
| 				ObjectMeta: metav1.ObjectMeta{ | ||||
| 					Name: "an-event-in-" + ns, | ||||
| 				}, | ||||
| 				InvolvedObject: v1.ObjectReference{ | ||||
| 					APIVersion: "v1", | ||||
| 					Kind:       "Pod", | ||||
| 					Name:       "a-pod", | ||||
| 					Namespace:  ns, | ||||
| 				}, | ||||
| 				Type:    "Normal", | ||||
| 				Message: "The event message", | ||||
| 			}, metav1.CreateOptions{}) | ||||
| 		} | ||||
| 		s.Run("events_list()", func() { | ||||
| 			toolResult, err := s.CallTool("events_list", map[string]interface{}{}) | ||||
| 			s.Run("no error", func() { | ||||
| 				s.Nilf(err, "call tool failed %v", err) | ||||
| 				s.Falsef(toolResult.IsError, "call tool failed") | ||||
| 			}) | ||||
| 			s.Run("has yaml comment indicating output format", func() { | ||||
| 				s.Truef(strings.HasPrefix(toolResult.Content[0].(mcp.TextContent).Text, "# The following events (YAML format) were found:\n"), "unexpected result %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 			}) | ||||
| 			var decoded []v1.Event | ||||
| 			err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) | ||||
| 			s.Run("has yaml content", func() { | ||||
| 				s.Nilf(err, "unmarshal failed %v", err) | ||||
| 			}) | ||||
| 			s.Run("returns all events", func() { | ||||
| 				s.YAMLEqf(""+ | ||||
| 					"- InvolvedObject:\n"+ | ||||
| 					"    Kind: Pod\n"+ | ||||
| 					"    Name: a-pod\n"+ | ||||
| 					"    apiVersion: v1\n"+ | ||||
| 					"  Message: The event message\n"+ | ||||
| 					"  Namespace: default\n"+ | ||||
| 					"  Reason: \"\"\n"+ | ||||
| 					"  Timestamp: 0001-01-01 00:00:00 +0000 UTC\n"+ | ||||
| 					"  Type: Normal\n"+ | ||||
| 					"- InvolvedObject:\n"+ | ||||
| 					"    Kind: Pod\n"+ | ||||
| 					"    Name: a-pod\n"+ | ||||
| 					"    apiVersion: v1\n"+ | ||||
| 					"  Message: The event message\n"+ | ||||
| 					"  Namespace: ns-1\n"+ | ||||
| 					"  Reason: \"\"\n"+ | ||||
| 					"  Timestamp: 0001-01-01 00:00:00 +0000 UTC\n"+ | ||||
| 					"  Type: Normal\n", | ||||
| 					toolResult.Content[0].(mcp.TextContent).Text, | ||||
| 					"unexpected result %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
|  | ||||
| 			}) | ||||
| 		}) | ||||
| 		s.Run("events_list(namespace=ns-1)", func() { | ||||
| 			toolResult, err := s.CallTool("events_list", map[string]interface{}{ | ||||
| 				"namespace": "ns-1", | ||||
| 			}) | ||||
| 			s.Run("no error", func() { | ||||
| 				s.Nilf(err, "call tool failed %v", err) | ||||
| 				s.Falsef(toolResult.IsError, "call tool failed") | ||||
| 			}) | ||||
| 			s.Run("has yaml comment indicating output format", func() { | ||||
| 				s.Truef(strings.HasPrefix(toolResult.Content[0].(mcp.TextContent).Text, "# The following events (YAML format) were found:\n"), "unexpected result %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 			}) | ||||
| 			var decoded []v1.Event | ||||
| 			err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) | ||||
| 			s.Run("has yaml content", func() { | ||||
| 				s.Nilf(err, "unmarshal failed %v", err) | ||||
| 			}) | ||||
| 			s.Run("returns events from namespace", func() { | ||||
| 				s.YAMLEqf(""+ | ||||
| 					"- InvolvedObject:\n"+ | ||||
| 					"    Kind: Pod\n"+ | ||||
| 					"    Name: a-pod\n"+ | ||||
| 					"    apiVersion: v1\n"+ | ||||
| 					"  Message: The event message\n"+ | ||||
| 					"  Namespace: ns-1\n"+ | ||||
| 					"  Reason: \"\"\n"+ | ||||
| 					"  Timestamp: 0001-01-01 00:00:00 +0000 UTC\n"+ | ||||
| 					"  Type: Normal\n", | ||||
| 					toolResult.Content[0].(mcp.TextContent).Text, | ||||
| 					"unexpected result %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 			}) | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *EventsSuite) TestEventsListDenied() { | ||||
| 	s.Require().NoError(toml.Unmarshal([]byte(` | ||||
| 		denied_resources = [ { version = "v1", kind = "Event" } ] | ||||
| 	`), s.Cfg), "Expected to parse denied resources config") | ||||
| 	s.InitMcpClient() | ||||
| 	s.Run("events_list (denied)", func() { | ||||
| 		toolResult, err := s.CallTool("events_list", map[string]interface{}{}) | ||||
| 		s.Run("has error", func() { | ||||
| 			s.Truef(toolResult.IsError, "call tool should fail") | ||||
| 			s.Nilf(err, "call tool should not return error object") | ||||
| 		}) | ||||
| 		s.Run("describes denial", func() { | ||||
| 			expectedMessage := "failed to list events in all namespaces: resource not allowed: /v1, Kind=Event" | ||||
| 			s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text, | ||||
| 				"expected descriptive error '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestEvents(t *testing.T) { | ||||
| 	suite.Run(t, new(EventsSuite)) | ||||
| } | ||||
							
								
								
									
										280
									
								
								pkg/mcp/helm_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										280
									
								
								pkg/mcp/helm_test.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,280 @@ | ||||
| package mcp | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"encoding/base64" | ||||
| 	"path/filepath" | ||||
| 	"runtime" | ||||
| 	"strings" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/BurntSushi/toml" | ||||
| 	"github.com/mark3labs/mcp-go/mcp" | ||||
| 	"github.com/stretchr/testify/suite" | ||||
| 	corev1 "k8s.io/api/core/v1" | ||||
| 	"k8s.io/apimachinery/pkg/api/errors" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/client-go/kubernetes" | ||||
| 	"sigs.k8s.io/yaml" | ||||
| ) | ||||
|  | ||||
| type HelmSuite struct { | ||||
| 	BaseMcpSuite | ||||
| } | ||||
|  | ||||
| func (s *HelmSuite) SetupTest() { | ||||
| 	s.BaseMcpSuite.SetupTest() | ||||
| 	clearHelmReleases(s.T().Context(), kubernetes.NewForConfigOrDie(envTestRestConfig)) | ||||
| } | ||||
|  | ||||
| func (s *HelmSuite) TestHelmInstall() { | ||||
| 	s.InitMcpClient() | ||||
| 	s.Run("helm_install(chart=helm-chart-no-op)", func() { | ||||
| 		_, file, _, _ := runtime.Caller(0) | ||||
| 		chartPath := filepath.Join(filepath.Dir(file), "testdata", "helm-chart-no-op") | ||||
| 		toolResult, err := s.CallTool("helm_install", map[string]interface{}{ | ||||
| 			"chart": chartPath, | ||||
| 		}) | ||||
| 		s.Run("no error", func() { | ||||
| 			s.Nilf(err, "call tool failed %v", err) | ||||
| 			s.Falsef(toolResult.IsError, "call tool failed") | ||||
| 		}) | ||||
| 		s.Run("returns installed chart", func() { | ||||
| 			var decoded []map[string]interface{} | ||||
| 			err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) | ||||
| 			s.Run("has yaml content", func() { | ||||
| 				s.Nilf(err, "invalid tool result content %v", err) | ||||
| 			}) | ||||
| 			s.Run("has 1 item", func() { | ||||
| 				s.Lenf(decoded, 1, "invalid helm install count, expected 1, got %v", len(decoded)) | ||||
| 			}) | ||||
| 			s.Run("has valid name", func() { | ||||
| 				s.Truef(strings.HasPrefix(decoded[0]["name"].(string), "helm-chart-no-op-"), "invalid helm install name, expected no-op-*, got %v", decoded[0]["name"]) | ||||
| 			}) | ||||
| 			s.Run("has valid namespace", func() { | ||||
| 				s.Equalf("default", decoded[0]["namespace"], "invalid helm install namespace, expected default, got %v", decoded[0]["namespace"]) | ||||
| 			}) | ||||
| 			s.Run("has valid chart", func() { | ||||
| 				s.Equalf("no-op", decoded[0]["chart"], "invalid helm install name, expected release name, got empty") | ||||
| 			}) | ||||
| 			s.Run("has valid chartVersion", func() { | ||||
| 				s.Equalf("1.33.7", decoded[0]["chartVersion"], "invalid helm install version, expected 1.33.7, got empty") | ||||
| 			}) | ||||
| 			s.Run("has valid status", func() { | ||||
| 				s.Equalf("deployed", decoded[0]["status"], "invalid helm install status, expected deployed, got %v", decoded[0]["status"]) | ||||
| 			}) | ||||
| 			s.Run("has valid revision", func() { | ||||
| 				s.Equalf(float64(1), decoded[0]["revision"], "invalid helm install revision, expected 1, got %v", decoded[0]["revision"]) | ||||
| 			}) | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *HelmSuite) TestHelmInstallDenied() { | ||||
| 	s.Require().NoError(toml.Unmarshal([]byte(` | ||||
| 		denied_resources = [ { version = "v1", kind = "Secret" } ] | ||||
| 	`), s.Cfg), "Expected to parse denied resources config") | ||||
| 	s.InitMcpClient() | ||||
| 	s.Run("helm_install(chart=helm-chart-secret, denied)", func() { | ||||
| 		_, file, _, _ := runtime.Caller(0) | ||||
| 		chartPath := filepath.Join(filepath.Dir(file), "testdata", "helm-chart-secret") | ||||
| 		toolResult, err := s.CallTool("helm_install", map[string]interface{}{ | ||||
| 			"chart": chartPath, | ||||
| 		}) | ||||
| 		s.Run("has error", func() { | ||||
| 			s.Truef(toolResult.IsError, "call tool should fail") | ||||
| 			s.Nilf(err, "call tool should not return error object") | ||||
| 		}) | ||||
| 		s.Run("describes denial", func() { | ||||
| 			s.Truef(strings.HasPrefix(toolResult.Content[0].(mcp.TextContent).Text, "failed to install helm chart"), "expected descriptive error, got %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 			expectedMessage := ": resource not allowed: /v1, Kind=Secret" | ||||
| 			s.Truef(strings.HasSuffix(toolResult.Content[0].(mcp.TextContent).Text, expectedMessage), "expected descriptive error '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *HelmSuite) TestHelmListNoReleases() { | ||||
| 	s.InitMcpClient() | ||||
| 	s.Run("helm_list() with no releases", func() { | ||||
| 		toolResult, err := s.CallTool("helm_list", map[string]interface{}{}) | ||||
| 		s.Run("no error", func() { | ||||
| 			s.Nilf(err, "call tool failed %v", err) | ||||
| 			s.Falsef(toolResult.IsError, "call tool failed") | ||||
| 		}) | ||||
| 		s.Run("returns not found", func() { | ||||
| 			s.Equalf("No Helm releases found", toolResult.Content[0].(mcp.TextContent).Text, "unexpected result %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *HelmSuite) TestHelmList() { | ||||
| 	kc := kubernetes.NewForConfigOrDie(envTestRestConfig) | ||||
| 	_, err := kc.CoreV1().Secrets("default").Create(s.T().Context(), &corev1.Secret{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:   "sh.helm.release.v1.release-to-list", | ||||
| 			Labels: map[string]string{"owner": "helm", "name": "release-to-list"}, | ||||
| 		}, | ||||
| 		Data: map[string][]byte{ | ||||
| 			"release": []byte(base64.StdEncoding.EncodeToString([]byte("{" + | ||||
| 				"\"name\":\"release-to-list\"," + | ||||
| 				"\"info\":{\"status\":\"deployed\"}" + | ||||
| 				"}"))), | ||||
| 		}, | ||||
| 	}, metav1.CreateOptions{}) | ||||
| 	s.Require().NoError(err) | ||||
| 	s.InitMcpClient() | ||||
| 	s.Run("helm_list() with deployed release", func() { | ||||
| 		toolResult, err := s.CallTool("helm_list", map[string]interface{}{}) | ||||
| 		s.Run("no error", func() { | ||||
| 			s.Nilf(err, "call tool failed %v", err) | ||||
| 			s.Falsef(toolResult.IsError, "call tool failed") | ||||
| 		}) | ||||
| 		s.Run("returns release", func() { | ||||
| 			var decoded []map[string]interface{} | ||||
| 			err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) | ||||
| 			s.Run("has yaml content", func() { | ||||
| 				s.Nilf(err, "invalid tool result content %v", err) | ||||
| 			}) | ||||
| 			s.Run("has 1 item", func() { | ||||
| 				s.Lenf(decoded, 1, "invalid helm list count, expected 1, got %v", len(decoded)) | ||||
| 			}) | ||||
| 			s.Run("has valid name", func() { | ||||
| 				s.Equalf("release-to-list", decoded[0]["name"], "invalid helm list name, expected release-to-list, got %v", decoded[0]["name"]) | ||||
| 			}) | ||||
| 			s.Run("has valid status", func() { | ||||
| 				s.Equalf("deployed", decoded[0]["status"], "invalid helm list status, expected deployed, got %v", decoded[0]["status"]) | ||||
| 			}) | ||||
| 		}) | ||||
| 	}) | ||||
| 	s.Run("helm_list(namespace=ns-1) with deployed release in other namespaces", func() { | ||||
| 		toolResult, err := s.CallTool("helm_list", map[string]interface{}{"namespace": "ns-1"}) | ||||
| 		s.Run("no error", func() { | ||||
| 			s.Nilf(err, "call tool failed %v", err) | ||||
| 			s.Falsef(toolResult.IsError, "call tool failed") | ||||
| 		}) | ||||
| 		s.Run("returns not found", func() { | ||||
| 			s.Equalf("No Helm releases found", toolResult.Content[0].(mcp.TextContent).Text, "unexpected result %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 		}) | ||||
| 	}) | ||||
| 	s.Run("helm_list(namespace=ns-1, all_namespaces=true) with deployed release in all namespaces", func() { | ||||
| 		toolResult, err := s.CallTool("helm_list", map[string]interface{}{"namespace": "ns-1", "all_namespaces": true}) | ||||
| 		s.Run("no error", func() { | ||||
| 			s.Nilf(err, "call tool failed %v", err) | ||||
| 			s.Falsef(toolResult.IsError, "call tool failed") | ||||
| 		}) | ||||
| 		s.Run("returns release", func() { | ||||
| 			var decoded []map[string]interface{} | ||||
| 			err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) | ||||
| 			s.Run("has yaml content", func() { | ||||
| 				s.Nilf(err, "invalid tool result content %v", err) | ||||
| 			}) | ||||
| 			s.Run("has 1 item", func() { | ||||
| 				s.Lenf(decoded, 1, "invalid helm list count, expected 1, got %v", len(decoded)) | ||||
| 			}) | ||||
| 			s.Run("has valid name", func() { | ||||
| 				s.Equalf("release-to-list", decoded[0]["name"], "invalid helm list name, expected release-to-list, got %v", decoded[0]["name"]) | ||||
| 			}) | ||||
| 			s.Run("has valid status", func() { | ||||
| 				s.Equalf("deployed", decoded[0]["status"], "invalid helm list status, expected deployed, got %v", decoded[0]["status"]) | ||||
| 			}) | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *HelmSuite) TestHelmUninstallNoReleases() { | ||||
| 	s.InitMcpClient() | ||||
| 	s.Run("helm_uninstall(name=release-to-uninstall) with no releases", func() { | ||||
| 		toolResult, err := s.CallTool("helm_uninstall", map[string]interface{}{ | ||||
| 			"name": "release-to-uninstall", | ||||
| 		}) | ||||
| 		s.Run("no error", func() { | ||||
| 			s.Nilf(err, "call tool failed %v", err) | ||||
| 			s.Falsef(toolResult.IsError, "call tool failed") | ||||
| 		}) | ||||
| 		s.Run("returns not found", func() { | ||||
| 			s.Equalf("Release release-to-uninstall not found", toolResult.Content[0].(mcp.TextContent).Text, "unexpected result %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *HelmSuite) TestHelmUninstall() { | ||||
| 	kc := kubernetes.NewForConfigOrDie(envTestRestConfig) | ||||
| 	_, err := kc.CoreV1().Secrets("default").Create(s.T().Context(), &corev1.Secret{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:   "sh.helm.release.v1.existent-release-to-uninstall.v0", | ||||
| 			Labels: map[string]string{"owner": "helm", "name": "existent-release-to-uninstall"}, | ||||
| 		}, | ||||
| 		Data: map[string][]byte{ | ||||
| 			"release": []byte(base64.StdEncoding.EncodeToString([]byte("{" + | ||||
| 				"\"name\":\"existent-release-to-uninstall\"," + | ||||
| 				"\"info\":{\"status\":\"deployed\"}" + | ||||
| 				"}"))), | ||||
| 		}, | ||||
| 	}, metav1.CreateOptions{}) | ||||
| 	s.Require().NoError(err) | ||||
| 	s.InitMcpClient() | ||||
| 	s.Run("helm_uninstall(name=existent-release-to-uninstall) with deployed release", func() { | ||||
| 		toolResult, err := s.CallTool("helm_uninstall", map[string]interface{}{ | ||||
| 			"name": "existent-release-to-uninstall", | ||||
| 		}) | ||||
| 		s.Run("no error", func() { | ||||
| 			s.Nilf(err, "call tool failed %v", err) | ||||
| 			s.Falsef(toolResult.IsError, "call tool failed") | ||||
| 		}) | ||||
| 		s.Run("returns uninstalled", func() { | ||||
| 			s.Truef(strings.HasPrefix(toolResult.Content[0].(mcp.TextContent).Text, "Uninstalled release existent-release-to-uninstall"), "unexpected result %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 			_, err = kc.CoreV1().Secrets("default").Get(s.T().Context(), "sh.helm.release.v1.existent-release-to-uninstall.v0", metav1.GetOptions{}) | ||||
| 			s.Truef(errors.IsNotFound(err), "expected release to be deleted, but it still exists") | ||||
| 		}) | ||||
|  | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *HelmSuite) TestHelmUninstallDenied() { | ||||
| 	s.Require().NoError(toml.Unmarshal([]byte(` | ||||
| 		denied_resources = [ { version = "v1", kind = "Secret" } ] | ||||
| 	`), s.Cfg), "Expected to parse denied resources config") | ||||
| 	kc := kubernetes.NewForConfigOrDie(envTestRestConfig) | ||||
| 	_, err := kc.CoreV1().Secrets("default").Create(s.T().Context(), &corev1.Secret{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name:   "sh.helm.release.v1.existent-release-to-uninstall.v0", | ||||
| 			Labels: map[string]string{"owner": "helm", "name": "existent-release-to-uninstall"}, | ||||
| 		}, | ||||
| 		Data: map[string][]byte{ | ||||
| 			"release": []byte(base64.StdEncoding.EncodeToString([]byte("{" + | ||||
| 				"\"name\":\"existent-release-to-uninstall\"," + | ||||
| 				"\"info\":{\"status\":\"deployed\"}," + | ||||
| 				"\"manifest\":\"apiVersion: v1\\nkind: Secret\\nmetadata:\\n  name: secret-to-deny\\n  namespace: default\\n\"" + | ||||
| 				"}"))), | ||||
| 		}, | ||||
| 	}, metav1.CreateOptions{}) | ||||
| 	s.Require().NoError(err) | ||||
| 	s.InitMcpClient() | ||||
| 	s.Run("helm_uninstall(name=existent-release-to-uninstall) with deployed release (denied)", func() { | ||||
| 		toolResult, err := s.CallTool("helm_uninstall", map[string]interface{}{ | ||||
| 			"name": "existent-release-to-uninstall", | ||||
| 		}) | ||||
| 		s.Run("has error", func() { | ||||
| 			s.Truef(toolResult.IsError, "call tool should fail") | ||||
| 			s.Nilf(err, "call tool should not return error object") | ||||
| 		}) | ||||
| 		s.Run("describes denial", func() { | ||||
| 			s.T().Skipf("Helm won't report what underlying resource caused the failure, so we can't assert on it") | ||||
| 			expectedMessage := "failed to uninstall release: resource not allowed: /v1, Kind=Secret" | ||||
| 			s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text, "expected descriptive error '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func clearHelmReleases(ctx context.Context, kc *kubernetes.Clientset) { | ||||
| 	secrets, _ := kc.CoreV1().Secrets("default").List(ctx, metav1.ListOptions{}) | ||||
| 	for _, secret := range secrets.Items { | ||||
| 		if strings.HasPrefix(secret.Name, "sh.helm.release.v1.") { | ||||
| 			_ = kc.CoreV1().Secrets("default").Delete(ctx, secret.Name, metav1.DeleteOptions{}) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestHelm(t *testing.T) { | ||||
| 	suite.Run(t, new(HelmSuite)) | ||||
| } | ||||
							
								
								
									
										63
									
								
								pkg/mcp/m3labs.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										63
									
								
								pkg/mcp/m3labs.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,63 @@ | ||||
| package mcp | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
|  | ||||
| 	"github.com/mark3labs/mcp-go/mcp" | ||||
| 	"github.com/mark3labs/mcp-go/server" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/api" | ||||
| ) | ||||
|  | ||||
| func ServerToolToM3LabsServerTool(s *Server, tools []api.ServerTool) ([]server.ServerTool, error) { | ||||
| 	m3labTools := make([]server.ServerTool, 0) | ||||
| 	for _, tool := range tools { | ||||
| 		m3labTool := mcp.Tool{ | ||||
| 			Name:        tool.Tool.Name, | ||||
| 			Description: tool.Tool.Description, | ||||
| 			Annotations: mcp.ToolAnnotation{ | ||||
| 				Title:           tool.Tool.Annotations.Title, | ||||
| 				ReadOnlyHint:    tool.Tool.Annotations.ReadOnlyHint, | ||||
| 				DestructiveHint: tool.Tool.Annotations.DestructiveHint, | ||||
| 				IdempotentHint:  tool.Tool.Annotations.IdempotentHint, | ||||
| 				OpenWorldHint:   tool.Tool.Annotations.OpenWorldHint, | ||||
| 			}, | ||||
| 		} | ||||
| 		if tool.Tool.InputSchema != nil { | ||||
| 			schema, err := json.Marshal(tool.Tool.InputSchema) | ||||
| 			if err != nil { | ||||
| 				return nil, fmt.Errorf("failed to marshal tool input schema for tool %s: %v", tool.Tool.Name, err) | ||||
| 			} | ||||
| 			// TODO: temporary fix to append an empty properties object (some client have trouble parsing a schema without properties) | ||||
| 			// As opposed, Gemini had trouble for a while when properties was present but empty. | ||||
| 			// https://github.com/containers/kubernetes-mcp-server/issues/340 | ||||
| 			if string(schema) == `{"type":"object"}` { | ||||
| 				schema = []byte(`{"type":"object","properties":{}}`) | ||||
| 			} | ||||
| 			m3labTool.RawInputSchema = schema | ||||
| 		} | ||||
| 		m3labHandler := func(ctx context.Context, request mcp.CallToolRequest) (*mcp.CallToolResult, error) { | ||||
| 			// get the correct derived Kubernetes client for the target specified in the request | ||||
| 			cluster := request.GetString(s.p.GetTargetParameterName(), s.p.GetDefaultTarget()) | ||||
| 			k, err := s.p.GetDerivedKubernetes(ctx, cluster) | ||||
| 			if err != nil { | ||||
| 				return nil, err | ||||
| 			} | ||||
|  | ||||
| 			result, err := tool.Handler(api.ToolHandlerParams{ | ||||
| 				Context:         ctx, | ||||
| 				Kubernetes:      k, | ||||
| 				ToolCallRequest: request, | ||||
| 				ListOutput:      s.configuration.ListOutput(), | ||||
| 			}) | ||||
| 			if err != nil { | ||||
| 				return nil, err | ||||
| 			} | ||||
| 			return NewTextResult(result.Content, result.Error), nil | ||||
| 		} | ||||
| 		m3labTools = append(m3labTools, server.ServerTool{Tool: m3labTool, Handler: m3labHandler}) | ||||
| 	} | ||||
| 	return m3labTools, nil | ||||
| } | ||||
							
								
								
									
										243
									
								
								pkg/mcp/mcp.go
									
									
									
									
									
								
							
							
						
						
									
										243
									
								
								pkg/mcp/mcp.go
									
									
									
									
									
								
							| @@ -1,39 +1,212 @@ | ||||
| package mcp | ||||
|  | ||||
| import ( | ||||
| 	"github.com/manusa/kubernetes-mcp-server/pkg/version" | ||||
| 	"bytes" | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"net/http" | ||||
| 	"slices" | ||||
|  | ||||
| 	"github.com/mark3labs/mcp-go/mcp" | ||||
| 	"github.com/mark3labs/mcp-go/server" | ||||
| 	authenticationapiv1 "k8s.io/api/authentication/v1" | ||||
| 	"k8s.io/klog/v2" | ||||
| 	"k8s.io/utils/ptr" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/api" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| 	internalk8s "github.com/containers/kubernetes-mcp-server/pkg/kubernetes" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/output" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/toolsets" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/version" | ||||
| ) | ||||
|  | ||||
| type Sever struct { | ||||
| 	server *server.MCPServer | ||||
| type ContextKey string | ||||
|  | ||||
| const TokenScopesContextKey = ContextKey("TokenScopesContextKey") | ||||
|  | ||||
| type Configuration struct { | ||||
| 	*config.StaticConfig | ||||
| 	listOutput output.Output | ||||
| 	toolsets   []api.Toolset | ||||
| } | ||||
|  | ||||
| func NewSever() *Sever { | ||||
| 	s := &Sever{ | ||||
| func (c *Configuration) Toolsets() []api.Toolset { | ||||
| 	if c.toolsets == nil { | ||||
| 		for _, toolset := range c.StaticConfig.Toolsets { | ||||
| 			c.toolsets = append(c.toolsets, toolsets.ToolsetFromString(toolset)) | ||||
| 		} | ||||
| 	} | ||||
| 	return c.toolsets | ||||
| } | ||||
|  | ||||
| func (c *Configuration) ListOutput() output.Output { | ||||
| 	if c.listOutput == nil { | ||||
| 		c.listOutput = output.FromString(c.StaticConfig.ListOutput) | ||||
| 	} | ||||
| 	return c.listOutput | ||||
| } | ||||
|  | ||||
| func (c *Configuration) isToolApplicable(tool api.ServerTool) bool { | ||||
| 	if c.ReadOnly && !ptr.Deref(tool.Tool.Annotations.ReadOnlyHint, false) { | ||||
| 		return false | ||||
| 	} | ||||
| 	if c.DisableDestructive && ptr.Deref(tool.Tool.Annotations.DestructiveHint, false) { | ||||
| 		return false | ||||
| 	} | ||||
| 	if c.EnabledTools != nil && !slices.Contains(c.EnabledTools, tool.Tool.Name) { | ||||
| 		return false | ||||
| 	} | ||||
| 	if c.DisabledTools != nil && slices.Contains(c.DisabledTools, tool.Tool.Name) { | ||||
| 		return false | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| type Server struct { | ||||
| 	configuration *Configuration | ||||
| 	server        *server.MCPServer | ||||
| 	enabledTools  []string | ||||
| 	p             internalk8s.Provider | ||||
| } | ||||
|  | ||||
| func NewServer(configuration Configuration) (*Server, error) { | ||||
| 	var serverOptions []server.ServerOption | ||||
| 	serverOptions = append(serverOptions, | ||||
| 		server.WithResourceCapabilities(true, true), | ||||
| 		server.WithPromptCapabilities(true), | ||||
| 		server.WithToolCapabilities(true), | ||||
| 		server.WithLogging(), | ||||
| 		server.WithToolHandlerMiddleware(toolCallLoggingMiddleware), | ||||
| 	) | ||||
| 	if configuration.RequireOAuth && false { // TODO: Disabled scope auth validation for now | ||||
| 		serverOptions = append(serverOptions, server.WithToolHandlerMiddleware(toolScopedAuthorizationMiddleware)) | ||||
| 	} | ||||
|  | ||||
| 	s := &Server{ | ||||
| 		configuration: &configuration, | ||||
| 		server: server.NewMCPServer( | ||||
| 			version.BinaryName, | ||||
| 			version.Version, | ||||
| 			server.WithResourceCapabilities(true, true), | ||||
| 			server.WithPromptCapabilities(true), | ||||
| 			server.WithLogging(), | ||||
| 			serverOptions..., | ||||
| 		), | ||||
| 	} | ||||
| 	s.initConfiguration() | ||||
| 	s.initPods() | ||||
| 	return s | ||||
| 	if err := s.reloadKubernetesClusterProvider(); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	s.p.WatchTargets(s.reloadKubernetesClusterProvider) | ||||
|  | ||||
| 	return s, nil | ||||
| } | ||||
|  | ||||
| func (s *Sever) ServeStdio() error { | ||||
| func (s *Server) reloadKubernetesClusterProvider() error { | ||||
| 	ctx := context.Background() | ||||
| 	p, err := internalk8s.NewProvider(s.configuration.StaticConfig) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	// close the old provider | ||||
| 	if s.p != nil { | ||||
| 		s.p.Close() | ||||
| 	} | ||||
|  | ||||
| 	s.p = p | ||||
|  | ||||
| 	targets, err := p.GetTargets(ctx) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	filter := CompositeFilter( | ||||
| 		s.configuration.isToolApplicable, | ||||
| 		ShouldIncludeTargetListTool(p.GetTargetParameterName(), targets), | ||||
| 	) | ||||
|  | ||||
| 	mutator := WithTargetParameter( | ||||
| 		p.GetDefaultTarget(), | ||||
| 		p.GetTargetParameterName(), | ||||
| 		targets, | ||||
| 	) | ||||
|  | ||||
| 	applicableTools := make([]api.ServerTool, 0) | ||||
| 	for _, toolset := range s.configuration.Toolsets() { | ||||
| 		for _, tool := range toolset.GetTools(p) { | ||||
| 			tool := mutator(tool) | ||||
| 			if !filter(tool) { | ||||
| 				continue | ||||
| 			} | ||||
|  | ||||
| 			applicableTools = append(applicableTools, tool) | ||||
| 			s.enabledTools = append(s.enabledTools, tool.Tool.Name) | ||||
| 		} | ||||
| 	} | ||||
| 	m3labsServerTools, err := ServerToolToM3LabsServerTool(s, applicableTools) | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("failed to convert tools: %v", err) | ||||
| 	} | ||||
|  | ||||
| 	s.server.SetTools(m3labsServerTools...) | ||||
|  | ||||
| 	// start new watch | ||||
| 	s.p.WatchTargets(s.reloadKubernetesClusterProvider) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (s *Server) ServeStdio() error { | ||||
| 	return server.ServeStdio(s.server) | ||||
| } | ||||
|  | ||||
| func (s *Server) ServeSse(baseUrl string, httpServer *http.Server) *server.SSEServer { | ||||
| 	options := make([]server.SSEOption, 0) | ||||
| 	options = append(options, server.WithSSEContextFunc(contextFunc), server.WithHTTPServer(httpServer)) | ||||
| 	if baseUrl != "" { | ||||
| 		options = append(options, server.WithBaseURL(baseUrl)) | ||||
| 	} | ||||
| 	return server.NewSSEServer(s.server, options...) | ||||
| } | ||||
|  | ||||
| func (s *Server) ServeHTTP(httpServer *http.Server) *server.StreamableHTTPServer { | ||||
| 	options := []server.StreamableHTTPOption{ | ||||
| 		server.WithHTTPContextFunc(contextFunc), | ||||
| 		server.WithStreamableHTTPServer(httpServer), | ||||
| 		server.WithStateLess(true), | ||||
| 	} | ||||
| 	return server.NewStreamableHTTPServer(s.server, options...) | ||||
| } | ||||
|  | ||||
| // KubernetesApiVerifyToken verifies the given token with the audience by | ||||
| // sending an TokenReview request to API Server for the specified cluster. | ||||
| func (s *Server) KubernetesApiVerifyToken(ctx context.Context, cluster, token, audience string) (*authenticationapiv1.UserInfo, []string, error) { | ||||
| 	if s.p == nil { | ||||
| 		return nil, nil, fmt.Errorf("kubernetes cluster provider is not initialized") | ||||
| 	} | ||||
| 	return s.p.VerifyToken(ctx, cluster, token, audience) | ||||
| } | ||||
|  | ||||
| // GetTargetParameterName returns the parameter name used for target identification in MCP requests | ||||
| func (s *Server) GetTargetParameterName() string { | ||||
| 	if s.p == nil { | ||||
| 		return "" // fallback for uninitialized provider | ||||
| 	} | ||||
| 	return s.p.GetTargetParameterName() | ||||
| } | ||||
|  | ||||
| func (s *Server) GetEnabledTools() []string { | ||||
| 	return s.enabledTools | ||||
| } | ||||
|  | ||||
| func (s *Server) Close() { | ||||
| 	if s.p != nil { | ||||
| 		s.p.Close() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func NewTextResult(content string, err error) *mcp.CallToolResult { | ||||
| 	if err != nil { | ||||
| 		return &mcp.CallToolResult{ | ||||
| 			IsError: true, | ||||
| 			Content: []interface{}{ | ||||
| 			Content: []mcp.Content{ | ||||
| 				mcp.TextContent{ | ||||
| 					Type: "text", | ||||
| 					Text: err.Error(), | ||||
| @@ -42,7 +215,7 @@ func NewTextResult(content string, err error) *mcp.CallToolResult { | ||||
| 		} | ||||
| 	} | ||||
| 	return &mcp.CallToolResult{ | ||||
| 		Content: []interface{}{ | ||||
| 		Content: []mcp.Content{ | ||||
| 			mcp.TextContent{ | ||||
| 				Type: "text", | ||||
| 				Text: content, | ||||
| @@ -50,3 +223,45 @@ func NewTextResult(content string, err error) *mcp.CallToolResult { | ||||
| 		}, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func contextFunc(ctx context.Context, r *http.Request) context.Context { | ||||
| 	// Get the standard Authorization header (OAuth compliant) | ||||
| 	authHeader := r.Header.Get(string(internalk8s.OAuthAuthorizationHeader)) | ||||
| 	if authHeader != "" { | ||||
| 		return context.WithValue(ctx, internalk8s.OAuthAuthorizationHeader, authHeader) | ||||
| 	} | ||||
|  | ||||
| 	// Fallback to custom header for backward compatibility | ||||
| 	customAuthHeader := r.Header.Get(string(internalk8s.CustomAuthorizationHeader)) | ||||
| 	if customAuthHeader != "" { | ||||
| 		return context.WithValue(ctx, internalk8s.OAuthAuthorizationHeader, customAuthHeader) | ||||
| 	} | ||||
|  | ||||
| 	return ctx | ||||
| } | ||||
|  | ||||
| func toolCallLoggingMiddleware(next server.ToolHandlerFunc) server.ToolHandlerFunc { | ||||
| 	return func(ctx context.Context, ctr mcp.CallToolRequest) (*mcp.CallToolResult, error) { | ||||
| 		klog.V(5).Infof("mcp tool call: %s(%v)", ctr.Params.Name, ctr.Params.Arguments) | ||||
| 		if ctr.Header != nil { | ||||
| 			buffer := bytes.NewBuffer(make([]byte, 0)) | ||||
| 			if err := ctr.Header.WriteSubset(buffer, map[string]bool{"Authorization": true, "authorization": true}); err == nil { | ||||
| 				klog.V(7).Infof("mcp tool call headers: %s", buffer) | ||||
| 			} | ||||
| 		} | ||||
| 		return next(ctx, ctr) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func toolScopedAuthorizationMiddleware(next server.ToolHandlerFunc) server.ToolHandlerFunc { | ||||
| 	return func(ctx context.Context, ctr mcp.CallToolRequest) (*mcp.CallToolResult, error) { | ||||
| 		scopes, ok := ctx.Value(TokenScopesContextKey).([]string) | ||||
| 		if !ok { | ||||
| 			return NewTextResult("", fmt.Errorf("authorization failed: Access denied: Tool '%s' requires scope 'mcp:%s' but no scope is available", ctr.Params.Name, ctr.Params.Name)), nil | ||||
| 		} | ||||
| 		if !slices.Contains(scopes, "mcp:"+ctr.Params.Name) && !slices.Contains(scopes, ctr.Params.Name) { | ||||
| 			return NewTextResult("", fmt.Errorf("authorization failed: Access denied: Tool '%s' requires scope 'mcp:%s' but only scopes %s are available", ctr.Params.Name, ctr.Params.Name, scopes)), nil | ||||
| 		} | ||||
| 		return next(ctx, ctr) | ||||
| 	} | ||||
| } | ||||
|   | ||||
| @@ -1,23 +1,127 @@ | ||||
| package mcp | ||||
|  | ||||
| import ( | ||||
| 	"github.com/mark3labs/mcp-go/mcp" | ||||
| 	"context" | ||||
| 	"net/http" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"runtime" | ||||
| 	"testing" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/internal/test" | ||||
| 	"github.com/mark3labs/mcp-go/client" | ||||
| 	"github.com/mark3labs/mcp-go/mcp" | ||||
| ) | ||||
|  | ||||
| func TestTools(t *testing.T) { | ||||
| 	expectedNames := []string{"pods_list", "pods_list_in_namespace", "configuration_view"} | ||||
| 	t.Run("Has configuration_view tool", testCase(func(t *testing.T, c *mcpContext) { | ||||
| 		tools, err := c.mcpClient.ListTools(c.ctx, mcp.ListToolsRequest{}) | ||||
| 		nameSet := make(map[string]bool) | ||||
| 		for _, tool := range tools.Tools { | ||||
| 			nameSet[tool.Name] = true | ||||
| 		} | ||||
| 		for _, name := range expectedNames { | ||||
| 			if nameSet[name] != true { | ||||
| 				t.Fatalf("tool name mismatch %v", err) | ||||
| 				return | ||||
| func TestWatchKubeConfig(t *testing.T) { | ||||
| 	if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { | ||||
| 		t.Skip("Skipping test on non-Unix-like platforms") | ||||
| 	} | ||||
| 	testCase(t, func(c *mcpContext) { | ||||
| 		// Given | ||||
| 		withTimeout, cancel := context.WithTimeout(c.ctx, 5*time.Second) | ||||
| 		defer cancel() | ||||
| 		var notification *mcp.JSONRPCNotification | ||||
| 		c.mcpClient.OnNotification(func(n mcp.JSONRPCNotification) { | ||||
| 			notification = &n | ||||
| 		}) | ||||
| 		// When | ||||
| 		f, _ := os.OpenFile(filepath.Join(c.tempDir, "config"), os.O_APPEND|os.O_WRONLY, 0644) | ||||
| 		_, _ = f.WriteString("\n") | ||||
| 		for notification == nil { | ||||
| 			select { | ||||
| 			case <-withTimeout.Done(): | ||||
| 			default: | ||||
| 				time.Sleep(100 * time.Millisecond) | ||||
| 			} | ||||
| 		} | ||||
| 	})) | ||||
| 		// Then | ||||
| 		t.Run("WatchKubeConfig notifies tools change", func(t *testing.T) { | ||||
| 			if notification == nil { | ||||
| 				t.Fatalf("WatchKubeConfig did not notify") | ||||
| 			} | ||||
| 			if notification.Method != "notifications/tools/list_changed" { | ||||
| 				t.Fatalf("WatchKubeConfig did not notify tools change, got %s", notification.Method) | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestSseHeaders(t *testing.T) { | ||||
| 	mockServer := test.NewMockServer() | ||||
| 	defer mockServer.Close() | ||||
| 	before := func(c *mcpContext) { | ||||
| 		c.withKubeConfig(mockServer.Config()) | ||||
| 		c.clientOptions = append(c.clientOptions, client.WithHeaders(map[string]string{"kubernetes-authorization": "Bearer a-token-from-mcp-client"})) | ||||
| 	} | ||||
| 	pathHeaders := make(map[string]http.Header, 0) | ||||
| 	mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { | ||||
| 		pathHeaders[req.URL.Path] = req.Header.Clone() | ||||
| 		// Request Performed by DiscoveryClient to Kube API (Get API Groups legacy -core-) | ||||
| 		if req.URL.Path == "/api" { | ||||
| 			w.Header().Set("Content-Type", "application/json") | ||||
| 			_, _ = w.Write([]byte(`{"kind":"APIVersions","versions":["v1"],"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0"}]}`)) | ||||
| 			return | ||||
| 		} | ||||
| 		// Request Performed by DiscoveryClient to Kube API (Get API Groups) | ||||
| 		if req.URL.Path == "/apis" { | ||||
| 			w.Header().Set("Content-Type", "application/json") | ||||
| 			//w.Write([]byte(`{"kind":"APIGroupList","apiVersion":"v1","groups":[{"name":"apps","versions":[{"groupVersion":"apps/v1","version":"v1"}],"preferredVersion":{"groupVersion":"apps/v1","version":"v1"}}]}`)) | ||||
| 			_, _ = w.Write([]byte(`{"kind":"APIGroupList","apiVersion":"v1","groups":[]}`)) | ||||
| 			return | ||||
| 		} | ||||
| 		// Request Performed by DiscoveryClient to Kube API (Get API Resources) | ||||
| 		if req.URL.Path == "/api/v1" { | ||||
| 			w.Header().Set("Content-Type", "application/json") | ||||
| 			_, _ = w.Write([]byte(`{"kind":"APIResourceList","apiVersion":"v1","resources":[{"name":"pods","singularName":"","namespaced":true,"kind":"Pod","verbs":["get","list","watch","create","update","patch","delete"]}]}`)) | ||||
| 			return | ||||
| 		} | ||||
| 		// Request Performed by DynamicClient | ||||
| 		if req.URL.Path == "/api/v1/namespaces/default/pods" { | ||||
| 			w.Header().Set("Content-Type", "application/json") | ||||
| 			_, _ = w.Write([]byte(`{"kind":"PodList","apiVersion":"v1","items":[]}`)) | ||||
| 			return | ||||
| 		} | ||||
| 		// Request Performed by kubernetes.Interface | ||||
| 		if req.URL.Path == "/api/v1/namespaces/default/pods/a-pod-to-delete" { | ||||
| 			w.WriteHeader(200) | ||||
| 			return | ||||
| 		} | ||||
| 		w.WriteHeader(404) | ||||
| 	})) | ||||
| 	testCaseWithContext(t, &mcpContext{before: before}, func(c *mcpContext) { | ||||
| 		_, _ = c.callTool("pods_list", map[string]interface{}{}) | ||||
| 		t.Run("DiscoveryClient propagates headers to Kube API", func(t *testing.T) { | ||||
| 			if len(pathHeaders) == 0 { | ||||
| 				t.Fatalf("No requests were made to Kube API") | ||||
| 			} | ||||
| 			if pathHeaders["/api"] == nil || pathHeaders["/api"].Get("Authorization") != "Bearer a-token-from-mcp-client" { | ||||
| 				t.Fatalf("Overridden header Authorization not found in request to /api") | ||||
| 			} | ||||
| 			if pathHeaders["/apis"] == nil || pathHeaders["/apis"].Get("Authorization") != "Bearer a-token-from-mcp-client" { | ||||
| 				t.Fatalf("Overridden header Authorization not found in request to /apis") | ||||
| 			} | ||||
| 			if pathHeaders["/api/v1"] == nil || pathHeaders["/api/v1"].Get("Authorization") != "Bearer a-token-from-mcp-client" { | ||||
| 				t.Fatalf("Overridden header Authorization not found in request to /api/v1") | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("DynamicClient propagates headers to Kube API", func(t *testing.T) { | ||||
| 			if len(pathHeaders) == 0 { | ||||
| 				t.Fatalf("No requests were made to Kube API") | ||||
| 			} | ||||
| 			if pathHeaders["/api/v1/namespaces/default/pods"] == nil || pathHeaders["/api/v1/namespaces/default/pods"].Get("Authorization") != "Bearer a-token-from-mcp-client" { | ||||
| 				t.Fatalf("Overridden header Authorization not found in request to /api/v1/namespaces/default/pods") | ||||
| 			} | ||||
| 		}) | ||||
| 		_, _ = c.callTool("pods_delete", map[string]interface{}{"name": "a-pod-to-delete"}) | ||||
| 		t.Run("kubernetes.Interface propagates headers to Kube API", func(t *testing.T) { | ||||
| 			if len(pathHeaders) == 0 { | ||||
| 				t.Fatalf("No requests were made to Kube API") | ||||
| 			} | ||||
| 			if pathHeaders["/api/v1/namespaces/default/pods/a-pod-to-delete"] == nil || pathHeaders["/api/v1/namespaces/default/pods/a-pod-to-delete"].Get("Authorization") != "Bearer a-token-from-mcp-client" { | ||||
| 				t.Fatalf("Overridden header Authorization not found in request to /api/v1/namespaces/default/pods/a-pod-to-delete") | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|   | ||||
							
								
								
									
										180
									
								
								pkg/mcp/mcp_tools_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										180
									
								
								pkg/mcp/mcp_tools_test.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,180 @@ | ||||
| package mcp | ||||
|  | ||||
| import ( | ||||
| 	"regexp" | ||||
| 	"strings" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/mark3labs/mcp-go/client/transport" | ||||
| 	"github.com/mark3labs/mcp-go/mcp" | ||||
| 	"k8s.io/utils/ptr" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/internal/test" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| ) | ||||
|  | ||||
| func TestUnrestricted(t *testing.T) { | ||||
| 	testCase(t, func(c *mcpContext) { | ||||
| 		tools, err := c.mcpClient.ListTools(c.ctx, mcp.ListToolsRequest{}) | ||||
| 		t.Run("ListTools returns tools", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call ListTools failed %v", err) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("Destructive tools ARE NOT read only", func(t *testing.T) { | ||||
| 			for _, tool := range tools.Tools { | ||||
| 				readOnly := ptr.Deref(tool.Annotations.ReadOnlyHint, false) | ||||
| 				destructive := ptr.Deref(tool.Annotations.DestructiveHint, false) | ||||
| 				if readOnly && destructive { | ||||
| 					t.Errorf("Tool %s is read-only and destructive, which is not allowed", tool.Name) | ||||
| 				} | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestReadOnly(t *testing.T) { | ||||
| 	readOnlyServer := func(c *mcpContext) { c.staticConfig = &config.StaticConfig{ReadOnly: true} } | ||||
| 	testCaseWithContext(t, &mcpContext{before: readOnlyServer}, func(c *mcpContext) { | ||||
| 		tools, err := c.mcpClient.ListTools(c.ctx, mcp.ListToolsRequest{}) | ||||
| 		t.Run("ListTools returns tools", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call ListTools failed %v", err) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("ListTools returns only read-only tools", func(t *testing.T) { | ||||
| 			for _, tool := range tools.Tools { | ||||
| 				if tool.Annotations.ReadOnlyHint == nil || !*tool.Annotations.ReadOnlyHint { | ||||
| 					t.Errorf("Tool %s is not read-only but should be", tool.Name) | ||||
| 				} | ||||
| 				if tool.Annotations.DestructiveHint != nil && *tool.Annotations.DestructiveHint { | ||||
| 					t.Errorf("Tool %s is destructive but should not be in read-only mode", tool.Name) | ||||
| 				} | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestDisableDestructive(t *testing.T) { | ||||
| 	disableDestructiveServer := func(c *mcpContext) { c.staticConfig = &config.StaticConfig{DisableDestructive: true} } | ||||
| 	testCaseWithContext(t, &mcpContext{before: disableDestructiveServer}, func(c *mcpContext) { | ||||
| 		tools, err := c.mcpClient.ListTools(c.ctx, mcp.ListToolsRequest{}) | ||||
| 		t.Run("ListTools returns tools", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call ListTools failed %v", err) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("ListTools does not return destructive tools", func(t *testing.T) { | ||||
| 			for _, tool := range tools.Tools { | ||||
| 				if tool.Annotations.DestructiveHint != nil && *tool.Annotations.DestructiveHint { | ||||
| 					t.Errorf("Tool %s is destructive but should not be", tool.Name) | ||||
| 				} | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestEnabledTools(t *testing.T) { | ||||
| 	enabledToolsServer := test.Must(config.ReadToml([]byte(` | ||||
| 		enabled_tools = [ "namespaces_list", "events_list" ] | ||||
| 	`))) | ||||
| 	testCaseWithContext(t, &mcpContext{staticConfig: enabledToolsServer}, func(c *mcpContext) { | ||||
| 		tools, err := c.mcpClient.ListTools(c.ctx, mcp.ListToolsRequest{}) | ||||
| 		t.Run("ListTools returns tools", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call ListTools failed %v", err) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("ListTools returns only explicitly enabled tools", func(t *testing.T) { | ||||
| 			if len(tools.Tools) != 2 { | ||||
| 				t.Fatalf("ListTools should return 2 tools, got %d", len(tools.Tools)) | ||||
| 			} | ||||
| 			for _, tool := range tools.Tools { | ||||
| 				if tool.Name != "namespaces_list" && tool.Name != "events_list" { | ||||
| 					t.Errorf("Tool %s is not enabled but should be", tool.Name) | ||||
| 				} | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestDisabledTools(t *testing.T) { | ||||
| 	testCaseWithContext(t, &mcpContext{ | ||||
| 		staticConfig: &config.StaticConfig{ | ||||
| 			DisabledTools: []string{"namespaces_list", "events_list"}, | ||||
| 		}, | ||||
| 	}, func(c *mcpContext) { | ||||
| 		tools, err := c.mcpClient.ListTools(c.ctx, mcp.ListToolsRequest{}) | ||||
| 		t.Run("ListTools returns tools", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call ListTools failed %v", err) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("ListTools does not return disabled tools", func(t *testing.T) { | ||||
| 			for _, tool := range tools.Tools { | ||||
| 				if tool.Name == "namespaces_list" || tool.Name == "events_list" { | ||||
| 					t.Errorf("Tool %s is not disabled but should be", tool.Name) | ||||
| 				} | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestToolCallLogging(t *testing.T) { | ||||
| 	testCaseWithContext(t, &mcpContext{logLevel: 5}, func(c *mcpContext) { | ||||
| 		_, _ = c.callTool("configuration_view", map[string]interface{}{ | ||||
| 			"minified": false, | ||||
| 		}) | ||||
| 		t.Run("Logs tool name", func(t *testing.T) { | ||||
| 			expectedLog := "mcp tool call: configuration_view(" | ||||
| 			if !strings.Contains(c.logBuffer.String(), expectedLog) { | ||||
| 				t.Errorf("Expected log to contain '%s', got: %s", expectedLog, c.logBuffer.String()) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("Logs tool call arguments", func(t *testing.T) { | ||||
| 			expected := `"mcp tool call: configuration_view\((.+)\)"` | ||||
| 			m := regexp.MustCompile(expected).FindStringSubmatch(c.logBuffer.String()) | ||||
| 			if len(m) != 2 { | ||||
| 				t.Fatalf("Expected log entry to contain arguments, got %s", c.logBuffer.String()) | ||||
| 			} | ||||
| 			if m[1] != "map[minified:false]" { | ||||
| 				t.Errorf("Expected log arguments to be 'map[minified:false]', got %s", m[1]) | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| 	before := func(c *mcpContext) { | ||||
| 		c.clientOptions = append(c.clientOptions, transport.WithHeaders(map[string]string{ | ||||
| 			"Accept-Encoding":   "gzip", | ||||
| 			"Authorization":     "Bearer should-not-be-logged", | ||||
| 			"authorization":     "Bearer should-not-be-logged", | ||||
| 			"a-loggable-header": "should-be-logged", | ||||
| 		})) | ||||
| 	} | ||||
| 	testCaseWithContext(t, &mcpContext{logLevel: 7, before: before}, func(c *mcpContext) { | ||||
| 		_, _ = c.callTool("configuration_view", map[string]interface{}{ | ||||
| 			"minified": false, | ||||
| 		}) | ||||
| 		t.Run("Logs tool call headers", func(t *testing.T) { | ||||
| 			expectedLog := "mcp tool call headers: A-Loggable-Header: should-be-logged" | ||||
| 			if !strings.Contains(c.logBuffer.String(), expectedLog) { | ||||
| 				t.Errorf("Expected log to contain '%s', got: %s", expectedLog, c.logBuffer.String()) | ||||
| 			} | ||||
| 		}) | ||||
| 		sensitiveHeaders := []string{ | ||||
| 			"Authorization:", | ||||
| 			// TODO: Add more sensitive headers as needed | ||||
| 		} | ||||
| 		t.Run("Does not log sensitive headers", func(t *testing.T) { | ||||
| 			for _, header := range sensitiveHeaders { | ||||
| 				if strings.Contains(c.logBuffer.String(), header) { | ||||
| 					t.Errorf("Log should not contain sensitive header '%s', got: %s", header, c.logBuffer.String()) | ||||
| 				} | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("Does not log sensitive header values", func(t *testing.T) { | ||||
| 			if strings.Contains(c.logBuffer.String(), "should-not-be-logged") { | ||||
| 				t.Errorf("Log should not contain sensitive header value 'should-not-be-logged', got: %s", c.logBuffer.String()) | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
							
								
								
									
										5
									
								
								pkg/mcp/modules.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										5
									
								
								pkg/mcp/modules.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,5 @@ | ||||
| package mcp | ||||
|  | ||||
| import _ "github.com/containers/kubernetes-mcp-server/pkg/toolsets/config" | ||||
| import _ "github.com/containers/kubernetes-mcp-server/pkg/toolsets/core" | ||||
| import _ "github.com/containers/kubernetes-mcp-server/pkg/toolsets/helm" | ||||
							
								
								
									
										175
									
								
								pkg/mcp/namespaces_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										175
									
								
								pkg/mcp/namespaces_test.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,175 @@ | ||||
| package mcp | ||||
|  | ||||
| import ( | ||||
| 	"regexp" | ||||
| 	"slices" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/BurntSushi/toml" | ||||
| 	"github.com/mark3labs/mcp-go/mcp" | ||||
| 	"github.com/stretchr/testify/suite" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" | ||||
| 	"k8s.io/apimachinery/pkg/runtime/schema" | ||||
| 	"k8s.io/client-go/dynamic" | ||||
| 	"sigs.k8s.io/yaml" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/internal/test" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| ) | ||||
|  | ||||
| type NamespacesSuite struct { | ||||
| 	BaseMcpSuite | ||||
| } | ||||
|  | ||||
| func (s *NamespacesSuite) TestNamespacesList() { | ||||
| 	s.InitMcpClient() | ||||
| 	s.Run("namespaces_list", func() { | ||||
| 		toolResult, err := s.CallTool("namespaces_list", map[string]interface{}{}) | ||||
| 		s.Run("no error", func() { | ||||
| 			s.Nilf(err, "call tool failed %v", err) | ||||
| 			s.Falsef(toolResult.IsError, "call tool failed") | ||||
| 		}) | ||||
| 		s.Require().NotNil(toolResult, "Expected tool result from call") | ||||
| 		var decoded []unstructured.Unstructured | ||||
| 		err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) | ||||
| 		s.Run("has yaml content", func() { | ||||
| 			s.Nilf(err, "invalid tool result content %v", err) | ||||
| 		}) | ||||
| 		s.Run("returns at least 3 items", func() { | ||||
| 			s.Truef(len(decoded) >= 3, "expected at least 3 items, got %v", len(decoded)) | ||||
| 			for _, expectedNamespace := range []string{"default", "ns-1", "ns-2"} { | ||||
| 				s.Truef(slices.ContainsFunc(decoded, func(ns unstructured.Unstructured) bool { | ||||
| 					return ns.GetName() == expectedNamespace | ||||
| 				}), "namespace %s not found in the list", expectedNamespace) | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *NamespacesSuite) TestNamespacesListDenied() { | ||||
| 	s.Require().NoError(toml.Unmarshal([]byte(` | ||||
| 		denied_resources = [ { version = "v1", kind = "Namespace" } ] | ||||
| 	`), s.Cfg), "Expected to parse denied resources config") | ||||
| 	s.InitMcpClient() | ||||
| 	s.Run("namespaces_list (denied)", func() { | ||||
| 		toolResult, err := s.CallTool("namespaces_list", map[string]interface{}{}) | ||||
| 		s.Run("has error", func() { | ||||
| 			s.Truef(toolResult.IsError, "call tool should fail") | ||||
| 			s.Nilf(err, "call tool should not return error object") | ||||
| 		}) | ||||
| 		s.Run("describes denial", func() { | ||||
| 			expectedMessage := "failed to list namespaces: resource not allowed: /v1, Kind=Namespace" | ||||
| 			s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text, | ||||
| 				"expected descriptive error '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (s *NamespacesSuite) TestNamespacesListAsTable() { | ||||
| 	s.Cfg.ListOutput = "table" | ||||
| 	s.InitMcpClient() | ||||
| 	s.Run("namespaces_list (list_output=table)", func() { | ||||
| 		toolResult, err := s.CallTool("namespaces_list", map[string]interface{}{}) | ||||
| 		s.Run("no error", func() { | ||||
| 			s.Nilf(err, "call tool failed %v", err) | ||||
| 			s.Falsef(toolResult.IsError, "call tool failed") | ||||
| 		}) | ||||
| 		s.Require().NotNil(toolResult, "Expected tool result from call") | ||||
| 		out := toolResult.Content[0].(mcp.TextContent).Text | ||||
| 		s.Run("returns column headers", func() { | ||||
| 			expectedHeaders := "APIVERSION\\s+KIND\\s+NAME\\s+STATUS\\s+AGE\\s+LABELS" | ||||
| 			m, e := regexp.MatchString(expectedHeaders, out) | ||||
| 			s.Truef(m, "Expected headers '%s' not found in output:\n%s", expectedHeaders, out) | ||||
| 			s.NoErrorf(e, "Error matching headers regex: %v", e) | ||||
| 		}) | ||||
| 		s.Run("returns formatted row for ns-1", func() { | ||||
| 			expectedRow := "(?<apiVersion>v1)\\s+" + | ||||
| 				"(?<kind>Namespace)\\s+" + | ||||
| 				"(?<name>ns-1)\\s+" + | ||||
| 				"(?<status>Active)\\s+" + | ||||
| 				"(?<age>(\\d+m)?(\\d+s)?)\\s+" + | ||||
| 				"(?<labels>kubernetes.io/metadata.name=ns-1)" | ||||
| 			m, e := regexp.MatchString(expectedRow, out) | ||||
| 			s.Truef(m, "Expected row '%s' not found in output:\n%s", expectedRow, out) | ||||
| 			s.NoErrorf(e, "Error matching ns-1 regex: %v", e) | ||||
| 		}) | ||||
| 		s.Run("returns formatted row for ns-2", func() { | ||||
| 			expectedRow := "(?<apiVersion>v1)\\s+" + | ||||
| 				"(?<kind>Namespace)\\s+" + | ||||
| 				"(?<name>ns-2)\\s+" + | ||||
| 				"(?<status>Active)\\s+" + | ||||
| 				"(?<age>(\\d+m)?(\\d+s)?)\\s+" + | ||||
| 				"(?<labels>kubernetes.io/metadata.name=ns-2)" | ||||
| 			m, e := regexp.MatchString(expectedRow, out) | ||||
| 			s.Truef(m, "Expected row '%s' not found in output:\n%s", expectedRow, out) | ||||
| 			s.NoErrorf(e, "Error matching ns-2 regex: %v", e) | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestNamespaces(t *testing.T) { | ||||
| 	suite.Run(t, new(NamespacesSuite)) | ||||
| } | ||||
|  | ||||
| func TestProjectsListInOpenShift(t *testing.T) { | ||||
| 	testCaseWithContext(t, &mcpContext{before: inOpenShift, after: inOpenShiftClear}, func(c *mcpContext) { | ||||
| 		dynamicClient := dynamic.NewForConfigOrDie(envTestRestConfig) | ||||
| 		_, _ = dynamicClient.Resource(schema.GroupVersionResource{Group: "project.openshift.io", Version: "v1", Resource: "projects"}). | ||||
| 			Create(c.ctx, &unstructured.Unstructured{Object: map[string]interface{}{ | ||||
| 				"apiVersion": "project.openshift.io/v1", | ||||
| 				"kind":       "Project", | ||||
| 				"metadata": map[string]interface{}{ | ||||
| 					"name": "an-openshift-project", | ||||
| 				}, | ||||
| 			}}, metav1.CreateOptions{}) | ||||
| 		toolResult, err := c.callTool("projects_list", map[string]interface{}{}) | ||||
| 		t.Run("projects_list returns project list", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call tool failed %v", err) | ||||
| 			} | ||||
| 			if toolResult.IsError { | ||||
| 				t.Fatalf("call tool failed") | ||||
| 			} | ||||
| 		}) | ||||
| 		var decoded []unstructured.Unstructured | ||||
| 		err = yaml.Unmarshal([]byte(toolResult.Content[0].(mcp.TextContent).Text), &decoded) | ||||
| 		t.Run("projects_list has yaml content", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("invalid tool result content %v", err) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("projects_list returns at least 1 items", func(t *testing.T) { | ||||
| 			if len(decoded) < 1 { | ||||
| 				t.Errorf("invalid project count, expected at least 1, got %v", len(decoded)) | ||||
| 			} | ||||
| 			idx := slices.IndexFunc(decoded, func(ns unstructured.Unstructured) bool { | ||||
| 				return ns.GetName() == "an-openshift-project" | ||||
| 			}) | ||||
| 			if idx == -1 { | ||||
| 				t.Errorf("namespace %s not found in the list", "an-openshift-project") | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestProjectsListInOpenShiftDenied(t *testing.T) { | ||||
| 	deniedResourcesServer := test.Must(config.ReadToml([]byte(` | ||||
| 		denied_resources = [ { group = "project.openshift.io", version = "v1" } ] | ||||
| 	`))) | ||||
| 	testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer, before: inOpenShift, after: inOpenShiftClear}, func(c *mcpContext) { | ||||
| 		c.withEnvTest() | ||||
| 		projectsList, _ := c.callTool("projects_list", map[string]interface{}{}) | ||||
| 		t.Run("projects_list has error", func(t *testing.T) { | ||||
| 			if !projectsList.IsError { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("projects_list describes denial", func(t *testing.T) { | ||||
| 			expectedMessage := "failed to list projects: resource not allowed: project.openshift.io/v1, Kind=Project" | ||||
| 			if projectsList.Content[0].(mcp.TextContent).Text != expectedMessage { | ||||
| 				t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, projectsList.Content[0].(mcp.TextContent).Text) | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
| @@ -1,52 +0,0 @@ | ||||
| package mcp | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"github.com/manusa/kubernetes-mcp-server/pkg/kubernetes" | ||||
| 	"github.com/mark3labs/mcp-go/mcp" | ||||
| ) | ||||
|  | ||||
| func (s *Sever) initPods() { | ||||
| 	s.server.AddTool(mcp.NewTool( | ||||
| 		"pods_list", | ||||
| 		mcp.WithDescription("List all the Kubernetes pods in the current cluster from all namespaces"), | ||||
| 	), podsListInAllNamespaces) | ||||
| 	s.server.AddTool(mcp.NewTool( | ||||
| 		"pods_list_in_namespace", | ||||
| 		mcp.WithDescription("List all the Kubernetes pods in the specified namespace in the current cluster"), | ||||
| 		mcp.WithString("namespace", | ||||
| 			mcp.Description("Namespace to list pods from"), | ||||
| 			mcp.Required(), | ||||
| 		), | ||||
| 	), podsListInNamespace) | ||||
| } | ||||
|  | ||||
| func podsListInAllNamespaces(ctx context.Context, _ mcp.CallToolRequest) (*mcp.CallToolResult, error) { | ||||
| 	k, err := kubernetes.NewKubernetes() | ||||
| 	if err != nil { | ||||
| 		return NewTextResult("", fmt.Errorf("failed to list pods in all namespaces: %v", err)), nil | ||||
| 	} | ||||
| 	ret, err := k.PodsListInAllNamespaces(ctx) | ||||
| 	if err != nil { | ||||
| 		return NewTextResult("", fmt.Errorf("failed to list pods in all namespaces: %v", err)), nil | ||||
| 	} | ||||
| 	return NewTextResult(ret, err), nil | ||||
| } | ||||
|  | ||||
| func podsListInNamespace(ctx context.Context, ctr mcp.CallToolRequest) (*mcp.CallToolResult, error) { | ||||
| 	k, err := kubernetes.NewKubernetes() | ||||
| 	if err != nil { | ||||
| 		return NewTextResult("", fmt.Errorf("failed to list pods in namespace: %v", err)), nil | ||||
| 	} | ||||
| 	ns := ctr.Params.Arguments["namespace"] | ||||
| 	if ns == nil { | ||||
| 		return NewTextResult("", errors.New("failed to list pods in namespace, missing argument namespace")), nil | ||||
| 	} | ||||
| 	ret, err := k.PodsListInNamespace(ctx, ns.(string)) | ||||
| 	if err != nil { | ||||
| 		return NewTextResult("", fmt.Errorf("failed to list pods in namespace %s: %v", ns, err)), nil | ||||
| 	} | ||||
| 	return NewTextResult(ret, err), nil | ||||
| } | ||||
							
								
								
									
										131
									
								
								pkg/mcp/pods_exec_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										131
									
								
								pkg/mcp/pods_exec_test.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,131 @@ | ||||
| package mcp | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"io" | ||||
| 	"net/http" | ||||
| 	"strings" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/mark3labs/mcp-go/mcp" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/internal/test" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| ) | ||||
|  | ||||
| func TestPodsExec(t *testing.T) { | ||||
| 	testCase(t, func(c *mcpContext) { | ||||
| 		mockServer := test.NewMockServer() | ||||
| 		defer mockServer.Close() | ||||
| 		c.withKubeConfig(mockServer.Config()) | ||||
| 		mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { | ||||
| 			if req.URL.Path != "/api/v1/namespaces/default/pods/pod-to-exec/exec" { | ||||
| 				return | ||||
| 			} | ||||
| 			var stdin, stdout bytes.Buffer | ||||
| 			ctx, err := test.CreateHTTPStreams(w, req, &test.StreamOptions{ | ||||
| 				Stdin:  &stdin, | ||||
| 				Stdout: &stdout, | ||||
| 			}) | ||||
| 			if err != nil { | ||||
| 				w.WriteHeader(http.StatusInternalServerError) | ||||
| 				_, _ = w.Write([]byte(err.Error())) | ||||
| 				return | ||||
| 			} | ||||
| 			defer func(conn io.Closer) { _ = conn.Close() }(ctx.Closer) | ||||
| 			_, _ = io.WriteString(ctx.StdoutStream, "command:"+strings.Join(req.URL.Query()["command"], " ")+"\n") | ||||
| 			_, _ = io.WriteString(ctx.StdoutStream, "container:"+strings.Join(req.URL.Query()["container"], " ")+"\n") | ||||
| 		})) | ||||
| 		mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { | ||||
| 			if req.URL.Path != "/api/v1/namespaces/default/pods/pod-to-exec" { | ||||
| 				return | ||||
| 			} | ||||
| 			test.WriteObject(w, &v1.Pod{ | ||||
| 				ObjectMeta: metav1.ObjectMeta{ | ||||
| 					Namespace: "default", | ||||
| 					Name:      "pod-to-exec", | ||||
| 				}, | ||||
| 				Spec: v1.PodSpec{Containers: []v1.Container{{Name: "container-to-exec"}}}, | ||||
| 			}) | ||||
| 		})) | ||||
| 		podsExecNilNamespace, err := c.callTool("pods_exec", map[string]interface{}{ | ||||
| 			"name":    "pod-to-exec", | ||||
| 			"command": []interface{}{"ls", "-l"}, | ||||
| 		}) | ||||
| 		t.Run("pods_exec with name and nil namespace returns command output", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call tool failed %v", err) | ||||
| 			} | ||||
| 			if podsExecNilNamespace.IsError { | ||||
| 				t.Fatalf("call tool failed: %v", podsExecNilNamespace.Content) | ||||
| 			} | ||||
| 			if !strings.Contains(podsExecNilNamespace.Content[0].(mcp.TextContent).Text, "command:ls -l\n") { | ||||
| 				t.Errorf("unexpected result %v", podsExecNilNamespace.Content[0].(mcp.TextContent).Text) | ||||
| 			} | ||||
| 		}) | ||||
| 		podsExecInNamespace, err := c.callTool("pods_exec", map[string]interface{}{ | ||||
| 			"namespace": "default", | ||||
| 			"name":      "pod-to-exec", | ||||
| 			"command":   []interface{}{"ls", "-l"}, | ||||
| 		}) | ||||
| 		t.Run("pods_exec with name and namespace returns command output", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call tool failed %v", err) | ||||
| 			} | ||||
| 			if podsExecInNamespace.IsError { | ||||
| 				t.Fatalf("call tool failed: %v", podsExecInNamespace.Content) | ||||
| 			} | ||||
| 			if !strings.Contains(podsExecInNamespace.Content[0].(mcp.TextContent).Text, "command:ls -l\n") { | ||||
| 				t.Errorf("unexpected result %v", podsExecInNamespace.Content[0].(mcp.TextContent).Text) | ||||
| 			} | ||||
| 		}) | ||||
| 		podsExecInNamespaceAndContainer, err := c.callTool("pods_exec", map[string]interface{}{ | ||||
| 			"namespace": "default", | ||||
| 			"name":      "pod-to-exec", | ||||
| 			"command":   []interface{}{"ls", "-l"}, | ||||
| 			"container": "a-specific-container", | ||||
| 		}) | ||||
| 		t.Run("pods_exec with name, namespace, and container returns command output", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call tool failed %v", err) | ||||
| 			} | ||||
| 			if podsExecInNamespaceAndContainer.IsError { | ||||
| 				t.Fatalf("call tool failed") | ||||
| 			} | ||||
| 			if !strings.Contains(podsExecInNamespaceAndContainer.Content[0].(mcp.TextContent).Text, "command:ls -l\n") { | ||||
| 				t.Errorf("unexpected result %v", podsExecInNamespaceAndContainer.Content[0].(mcp.TextContent).Text) | ||||
| 			} | ||||
| 			if !strings.Contains(podsExecInNamespaceAndContainer.Content[0].(mcp.TextContent).Text, "container:a-specific-container\n") { | ||||
| 				t.Errorf("expected container name not found %v", podsExecInNamespaceAndContainer.Content[0].(mcp.TextContent).Text) | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestPodsExecDenied(t *testing.T) { | ||||
| 	deniedResourcesServer := test.Must(config.ReadToml([]byte(` | ||||
| 		denied_resources = [ { version = "v1", kind = "Pod" } ] | ||||
| 	`))) | ||||
| 	testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { | ||||
| 		c.withEnvTest() | ||||
| 		podsRun, _ := c.callTool("pods_exec", map[string]interface{}{ | ||||
| 			"namespace": "default", | ||||
| 			"name":      "pod-to-exec", | ||||
| 			"command":   []interface{}{"ls", "-l"}, | ||||
| 			"container": "a-specific-container", | ||||
| 		}) | ||||
| 		t.Run("pods_exec has error", func(t *testing.T) { | ||||
| 			if !podsRun.IsError { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("pods_exec describes denial", func(t *testing.T) { | ||||
| 			expectedMessage := "failed to exec in pod pod-to-exec in namespace default: resource not allowed: /v1, Kind=Pod" | ||||
| 			if podsRun.Content[0].(mcp.TextContent).Text != expectedMessage { | ||||
| 				t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, podsRun.Content[0].(mcp.TextContent).Text) | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
							
								
								
									
										1172
									
								
								pkg/mcp/pods_test.go
									
									
									
									
									
								
							
							
						
						
									
										1172
									
								
								pkg/mcp/pods_test.go
									
									
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										251
									
								
								pkg/mcp/pods_top_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										251
									
								
								pkg/mcp/pods_top_test.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,251 @@ | ||||
| package mcp | ||||
|  | ||||
| import ( | ||||
| 	"net/http" | ||||
| 	"regexp" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/internal/test" | ||||
| 	"github.com/mark3labs/mcp-go/mcp" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| ) | ||||
|  | ||||
| func TestPodsTopMetricsUnavailable(t *testing.T) { | ||||
| 	testCase(t, func(c *mcpContext) { | ||||
| 		mockServer := test.NewMockServer() | ||||
| 		defer mockServer.Close() | ||||
| 		c.withKubeConfig(mockServer.Config()) | ||||
| 		mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { | ||||
| 			w.Header().Set("Content-Type", "application/json") | ||||
| 			// Request Performed by DiscoveryClient to Kube API (Get API Groups legacy -core-) | ||||
| 			if req.URL.Path == "/api" { | ||||
| 				_, _ = w.Write([]byte(`{"kind":"APIVersions","versions":[],"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0"}]}`)) | ||||
| 				return | ||||
| 			} | ||||
| 			// Request Performed by DiscoveryClient to Kube API (Get API Groups) | ||||
| 			if req.URL.Path == "/apis" { | ||||
| 				_, _ = w.Write([]byte(`{"kind":"APIGroupList","apiVersion":"v1","groups":[]}`)) | ||||
| 				return | ||||
| 			} | ||||
| 		})) | ||||
| 		podsTopMetricsApiUnavailable, err := c.callTool("pods_top", map[string]interface{}{}) | ||||
| 		t.Run("pods_top with metrics API not available", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call tool failed %v", err) | ||||
| 			} | ||||
| 			if !podsTopMetricsApiUnavailable.IsError { | ||||
| 				t.Errorf("call tool should have returned an error") | ||||
| 			} | ||||
| 			if podsTopMetricsApiUnavailable.Content[0].(mcp.TextContent).Text != "failed to get pods top: metrics API is not available" { | ||||
| 				t.Errorf("call tool returned unexpected content: %s", podsTopMetricsApiUnavailable.Content[0].(mcp.TextContent).Text) | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestPodsTopMetricsAvailable(t *testing.T) { | ||||
| 	testCase(t, func(c *mcpContext) { | ||||
| 		mockServer := test.NewMockServer() | ||||
| 		defer mockServer.Close() | ||||
| 		c.withKubeConfig(mockServer.Config()) | ||||
| 		mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { | ||||
| 			println("Request received:", req.Method, req.URL.Path) // TODO: REMOVE LINE | ||||
| 			w.Header().Set("Content-Type", "application/json") | ||||
| 			// Request Performed by DiscoveryClient to Kube API (Get API Groups legacy -core-) | ||||
| 			if req.URL.Path == "/api" { | ||||
| 				_, _ = w.Write([]byte(`{"kind":"APIVersions","versions":["metrics.k8s.io/v1beta1"],"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0"}]}`)) | ||||
| 				return | ||||
| 			} | ||||
| 			// Request Performed by DiscoveryClient to Kube API (Get API Groups) | ||||
| 			if req.URL.Path == "/apis" { | ||||
| 				_, _ = w.Write([]byte(`{"kind":"APIGroupList","apiVersion":"v1","groups":[]}`)) | ||||
| 				return | ||||
| 			} | ||||
| 			// Request Performed by DiscoveryClient to Kube API (Get API Resources) | ||||
| 			if req.URL.Path == "/apis/metrics.k8s.io/v1beta1" { | ||||
| 				_, _ = w.Write([]byte(`{"kind":"APIResourceList","apiVersion":"v1","groupVersion":"metrics.k8s.io/v1beta1","resources":[{"name":"pods","singularName":"","namespaced":true,"kind":"PodMetrics","verbs":["get","list"]}]}`)) | ||||
| 				return | ||||
| 			} | ||||
| 			// Pod Metrics from all namespaces | ||||
| 			if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/pods" { | ||||
| 				if req.URL.Query().Get("labelSelector") == "app=pod-ns-5-42" { | ||||
| 					_, _ = w.Write([]byte(`{"kind":"PodMetricsList","apiVersion":"metrics.k8s.io/v1beta1","items":[` + | ||||
| 						`{"metadata":{"name":"pod-ns-5-42","namespace":"ns-5"},"containers":[{"name":"container-1","usage":{"cpu":"42m","memory":"42Mi","swap":"42Mi"}}]}` + | ||||
| 						`]}`)) | ||||
| 				} else { | ||||
| 					_, _ = w.Write([]byte(`{"kind":"PodMetricsList","apiVersion":"metrics.k8s.io/v1beta1","items":[` + | ||||
| 						`{"metadata":{"name":"pod-1","namespace":"default"},"containers":[{"name":"container-1","usage":{"cpu":"100m","memory":"200Mi","swap":"13Mi"}},{"name":"container-2","usage":{"cpu":"200m","memory":"300Mi","swap":"37Mi"}}]},` + | ||||
| 						`{"metadata":{"name":"pod-2","namespace":"ns-1"},"containers":[{"name":"container-1-ns-1","usage":{"cpu":"300m","memory":"400Mi","swap":"42Mi"}}]}` + | ||||
| 						`]}`)) | ||||
|  | ||||
| 				} | ||||
| 				return | ||||
| 			} | ||||
| 			// Pod Metrics from configured namespace | ||||
| 			if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/namespaces/default/pods" { | ||||
| 				_, _ = w.Write([]byte(`{"kind":"PodMetricsList","apiVersion":"metrics.k8s.io/v1beta1","items":[` + | ||||
| 					`{"metadata":{"name":"pod-1","namespace":"default"},"containers":[{"name":"container-1","usage":{"cpu":"10m","memory":"20Mi","swap":"13Mi"}},{"name":"container-2","usage":{"cpu":"30m","memory":"40Mi","swap":"37Mi"}}]}` + | ||||
| 					`]}`)) | ||||
| 				return | ||||
| 			} | ||||
| 			// Pod Metrics from ns-5 namespace | ||||
| 			if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/namespaces/ns-5/pods" { | ||||
| 				_, _ = w.Write([]byte(`{"kind":"PodMetricsList","apiVersion":"metrics.k8s.io/v1beta1","items":[` + | ||||
| 					`{"metadata":{"name":"pod-ns-5-1","namespace":"ns-5"},"containers":[{"name":"container-1","usage":{"cpu":"10m","memory":"20Mi","swap":"42Mi"}}]}` + | ||||
| 					`]}`)) | ||||
| 				return | ||||
| 			} | ||||
| 			// Pod Metrics from ns-5 namespace with pod-ns-5-5 pod name | ||||
| 			if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/namespaces/ns-5/pods/pod-ns-5-5" { | ||||
| 				_, _ = w.Write([]byte(`{"kind":"PodMetrics","apiVersion":"metrics.k8s.io/v1beta1",` + | ||||
| 					`"metadata":{"name":"pod-ns-5-5","namespace":"ns-5"},` + | ||||
| 					`"containers":[{"name":"container-1","usage":{"cpu":"13m","memory":"37Mi","swap":"42Mi"}}]` + | ||||
| 					`}`)) | ||||
| 			} | ||||
| 		})) | ||||
| 		podsTopDefaults, err := c.callTool("pods_top", map[string]interface{}{}) | ||||
| 		t.Run("pods_top defaults returns pod metrics from all namespaces", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call tool failed %v", err) | ||||
| 			} | ||||
| 			textContent := podsTopDefaults.Content[0].(mcp.TextContent).Text | ||||
| 			if podsTopDefaults.IsError { | ||||
| 				t.Fatalf("call tool failed %s", textContent) | ||||
| 			} | ||||
| 			expectedHeaders := regexp.MustCompile(`(?m)^\s*NAMESPACE\s+POD\s+NAME\s+CPU\(cores\)\s+MEMORY\(bytes\)\s+SWAP\(bytes\)\s*$`) | ||||
| 			if !expectedHeaders.MatchString(textContent) { | ||||
| 				t.Errorf("Expected headers '%s' not found in output:\n%s", expectedHeaders.String(), textContent) | ||||
| 			} | ||||
| 			expectedRows := []string{ | ||||
| 				"default\\s+pod-1\\s+container-1\\s+100m\\s+200Mi\\s+13Mi", | ||||
| 				"default\\s+pod-1\\s+container-2\\s+200m\\s+300Mi\\s+37Mi", | ||||
| 				"ns-1\\s+pod-2\\s+container-1-ns-1\\s+300m\\s+400Mi\\s+42Mi", | ||||
| 			} | ||||
| 			for _, row := range expectedRows { | ||||
| 				if !regexp.MustCompile(row).MatchString(textContent) { | ||||
| 					t.Errorf("Expected row '%s' not found in output:\n%s", row, textContent) | ||||
| 				} | ||||
| 			} | ||||
| 			expectedTotal := regexp.MustCompile(`(?m)^\s+600m\s+900Mi\s+92Mi\s*$`) | ||||
| 			if !expectedTotal.MatchString(textContent) { | ||||
| 				t.Errorf("Expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent) | ||||
| 			} | ||||
| 		}) | ||||
| 		podsTopConfiguredNamespace, err := c.callTool("pods_top", map[string]interface{}{ | ||||
| 			"all_namespaces": false, | ||||
| 		}) | ||||
| 		t.Run("pods_top[allNamespaces=false] returns pod metrics from configured namespace", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call tool failed %v", err) | ||||
| 			} | ||||
| 			textContent := podsTopConfiguredNamespace.Content[0].(mcp.TextContent).Text | ||||
| 			expectedRows := []string{ | ||||
| 				"default\\s+pod-1\\s+container-1\\s+10m\\s+20Mi\\s+13Mi", | ||||
| 				"default\\s+pod-1\\s+container-2\\s+30m\\s+40Mi\\s+37Mi", | ||||
| 			} | ||||
| 			for _, row := range expectedRows { | ||||
| 				if !regexp.MustCompile(row).MatchString(textContent) { | ||||
| 					t.Errorf("Expected row '%s' not found in output:\n%s", row, textContent) | ||||
| 				} | ||||
| 			} | ||||
| 			expectedTotal := regexp.MustCompile(`(?m)^\s+40m\s+60Mi\s+50Mi\s*$`) | ||||
| 			if !expectedTotal.MatchString(textContent) { | ||||
| 				t.Errorf("Expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent) | ||||
| 			} | ||||
| 		}) | ||||
| 		podsTopNamespace, err := c.callTool("pods_top", map[string]interface{}{ | ||||
| 			"namespace": "ns-5", | ||||
| 		}) | ||||
| 		t.Run("pods_top[namespace=ns-5] returns pod metrics from provided namespace", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call tool failed %v", err) | ||||
| 			} | ||||
| 			textContent := podsTopNamespace.Content[0].(mcp.TextContent).Text | ||||
| 			expectedRow := regexp.MustCompile(`ns-5\s+pod-ns-5-1\s+container-1\s+10m\s+20Mi\s+42Mi`) | ||||
| 			if !expectedRow.MatchString(textContent) { | ||||
| 				t.Errorf("Expected row '%s' not found in output:\n%s", expectedRow.String(), textContent) | ||||
| 			} | ||||
| 			expectedTotal := regexp.MustCompile(`(?m)^\s+10m\s+20Mi\s+42Mi\s*$`) | ||||
| 			if !expectedTotal.MatchString(textContent) { | ||||
| 				t.Errorf("Expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent) | ||||
| 			} | ||||
| 		}) | ||||
| 		podsTopNamespaceName, err := c.callTool("pods_top", map[string]interface{}{ | ||||
| 			"namespace": "ns-5", | ||||
| 			"name":      "pod-ns-5-5", | ||||
| 		}) | ||||
| 		t.Run("pods_top[namespace=ns-5,name=pod-ns-5-5] returns pod metrics from provided namespace and name", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call tool failed %v", err) | ||||
| 			} | ||||
| 			textContent := podsTopNamespaceName.Content[0].(mcp.TextContent).Text | ||||
| 			expectedRow := regexp.MustCompile(`ns-5\s+pod-ns-5-5\s+container-1\s+13m\s+37Mi\s+42Mi`) | ||||
| 			if !expectedRow.MatchString(textContent) { | ||||
| 				t.Errorf("Expected row '%s' not found in output:\n%s", expectedRow.String(), textContent) | ||||
| 			} | ||||
| 			expectedTotal := regexp.MustCompile(`(?m)^\s+13m\s+37Mi\s+42Mi\s*$`) | ||||
| 			if !expectedTotal.MatchString(textContent) { | ||||
| 				t.Errorf("Expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent) | ||||
| 			} | ||||
| 		}) | ||||
| 		podsTopNamespaceLabelSelector, err := c.callTool("pods_top", map[string]interface{}{ | ||||
| 			"label_selector": "app=pod-ns-5-42", | ||||
| 		}) | ||||
| 		t.Run("pods_top[label_selector=app=pod-ns-5-42] returns pod metrics from pods matching selector", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call tool failed %v", err) | ||||
| 			} | ||||
| 			textContent := podsTopNamespaceLabelSelector.Content[0].(mcp.TextContent).Text | ||||
| 			expectedRow := regexp.MustCompile(`ns-5\s+pod-ns-5-42\s+container-1\s+42m\s+42Mi`) | ||||
| 			if !expectedRow.MatchString(textContent) { | ||||
| 				t.Errorf("Expected row '%s' not found in output:\n%s", expectedRow.String(), textContent) | ||||
| 			} | ||||
| 			expectedTotal := regexp.MustCompile(`(?m)^\s+42m\s+42Mi\s+42Mi\s*$`) | ||||
| 			if !expectedTotal.MatchString(textContent) { | ||||
| 				t.Errorf("Expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent) | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestPodsTopDenied(t *testing.T) { | ||||
| 	deniedResourcesServer := test.Must(config.ReadToml([]byte(` | ||||
| 		denied_resources = [ { group = "metrics.k8s.io", version = "v1beta1" } ] | ||||
| 	`))) | ||||
| 	testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { | ||||
| 		mockServer := test.NewMockServer() | ||||
| 		defer mockServer.Close() | ||||
| 		c.withKubeConfig(mockServer.Config()) | ||||
| 		mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { | ||||
| 			w.Header().Set("Content-Type", "application/json") | ||||
| 			// Request Performed by DiscoveryClient to Kube API (Get API Groups legacy -core-) | ||||
| 			if req.URL.Path == "/api" { | ||||
| 				_, _ = w.Write([]byte(`{"kind":"APIVersions","versions":["metrics.k8s.io/v1beta1"],"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0"}]}`)) | ||||
| 				return | ||||
| 			} | ||||
| 			// Request Performed by DiscoveryClient to Kube API (Get API Groups) | ||||
| 			if req.URL.Path == "/apis" { | ||||
| 				_, _ = w.Write([]byte(`{"kind":"APIGroupList","apiVersion":"v1","groups":[]}`)) | ||||
| 				return | ||||
| 			} | ||||
| 			// Request Performed by DiscoveryClient to Kube API (Get API Resources) | ||||
| 			if req.URL.Path == "/apis/metrics.k8s.io/v1beta1" { | ||||
| 				_, _ = w.Write([]byte(`{"kind":"APIResourceList","apiVersion":"v1","groupVersion":"metrics.k8s.io/v1beta1","resources":[{"name":"pods","singularName":"","namespaced":true,"kind":"PodMetrics","verbs":["get","list"]}]}`)) | ||||
| 				return | ||||
| 			} | ||||
| 		})) | ||||
| 		podsTop, _ := c.callTool("pods_top", map[string]interface{}{}) | ||||
| 		t.Run("pods_run has error", func(t *testing.T) { | ||||
| 			if !podsTop.IsError { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("pods_run describes denial", func(t *testing.T) { | ||||
| 			expectedMessage := "failed to get pods top: resource not allowed: metrics.k8s.io/v1beta1, Kind=PodMetrics" | ||||
| 			if podsTop.Content[0].(mcp.TextContent).Text != expectedMessage { | ||||
| 				t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, podsTop.Content[0].(mcp.TextContent).Text) | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
							
								
								
									
										792
									
								
								pkg/mcp/resources_test.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										792
									
								
								pkg/mcp/resources_test.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,792 @@ | ||||
| package mcp | ||||
|  | ||||
| import ( | ||||
| 	"regexp" | ||||
| 	"strings" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/mark3labs/mcp-go/mcp" | ||||
| 	corev1 "k8s.io/api/core/v1" | ||||
| 	v1 "k8s.io/api/rbac/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" | ||||
| 	"k8s.io/apimachinery/pkg/runtime/schema" | ||||
| 	"k8s.io/client-go/dynamic" | ||||
| 	"sigs.k8s.io/yaml" | ||||
|  | ||||
| 	"github.com/containers/kubernetes-mcp-server/internal/test" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/config" | ||||
| 	"github.com/containers/kubernetes-mcp-server/pkg/output" | ||||
| ) | ||||
|  | ||||
| func TestResourcesList(t *testing.T) { | ||||
| 	testCase(t, func(c *mcpContext) { | ||||
| 		c.withEnvTest() | ||||
| 		t.Run("resources_list with missing apiVersion returns error", func(t *testing.T) { | ||||
| 			toolResult, _ := c.callTool("resources_list", map[string]interface{}{}) | ||||
| 			if !toolResult.IsError { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 			} | ||||
| 			if toolResult.Content[0].(mcp.TextContent).Text != "failed to list resources, missing argument apiVersion" { | ||||
| 				t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_list with missing kind returns error", func(t *testing.T) { | ||||
| 			toolResult, _ := c.callTool("resources_list", map[string]interface{}{"apiVersion": "v1"}) | ||||
| 			if !toolResult.IsError { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 			} | ||||
| 			if toolResult.Content[0].(mcp.TextContent).Text != "failed to list resources, missing argument kind" { | ||||
| 				t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_list with invalid apiVersion returns error", func(t *testing.T) { | ||||
| 			toolResult, _ := c.callTool("resources_list", map[string]interface{}{"apiVersion": "invalid/api/version", "kind": "Pod"}) | ||||
| 			if !toolResult.IsError { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 			} | ||||
| 			if toolResult.Content[0].(mcp.TextContent).Text != "failed to list resources, invalid argument apiVersion" { | ||||
| 				t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_list with nonexistent apiVersion returns error", func(t *testing.T) { | ||||
| 			toolResult, _ := c.callTool("resources_list", map[string]interface{}{"apiVersion": "custom.non.existent.example.com/v1", "kind": "Custom"}) | ||||
| 			if !toolResult.IsError { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 			} | ||||
| 			if toolResult.Content[0].(mcp.TextContent).Text != `failed to list resources: no matches for kind "Custom" in version "custom.non.existent.example.com/v1"` { | ||||
| 				t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 			} | ||||
| 		}) | ||||
| 		namespaces, err := c.callTool("resources_list", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace"}) | ||||
| 		t.Run("resources_list returns namespaces", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call tool failed %v", err) | ||||
| 				return | ||||
| 			} | ||||
| 			if namespaces.IsError { | ||||
| 				t.Fatalf("call tool failed") | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		var decodedNamespaces []unstructured.Unstructured | ||||
| 		err = yaml.Unmarshal([]byte(namespaces.Content[0].(mcp.TextContent).Text), &decodedNamespaces) | ||||
| 		t.Run("resources_list has yaml content", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("invalid tool result content %v", err) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_list returns more than 2 items", func(t *testing.T) { | ||||
| 			if len(decodedNamespaces) < 3 { | ||||
| 				t.Fatalf("invalid namespace count, expected >2, got %v", len(decodedNamespaces)) | ||||
| 			} | ||||
| 		}) | ||||
|  | ||||
| 		// Test label selector functionality | ||||
| 		t.Run("resources_list with label selector returns filtered pods", func(t *testing.T) { | ||||
|  | ||||
| 			// List pods with label selector | ||||
| 			result, err := c.callTool("resources_list", map[string]interface{}{ | ||||
| 				"apiVersion":    "v1", | ||||
| 				"kind":          "Pod", | ||||
| 				"namespace":     "default", | ||||
| 				"labelSelector": "app=nginx", | ||||
| 			}) | ||||
|  | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call tool failed %v", err) | ||||
| 				return | ||||
| 			} | ||||
| 			if result.IsError { | ||||
| 				t.Fatalf("call tool failed") | ||||
| 				return | ||||
| 			} | ||||
|  | ||||
| 			var decodedPods []unstructured.Unstructured | ||||
| 			err = yaml.Unmarshal([]byte(result.Content[0].(mcp.TextContent).Text), &decodedPods) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("invalid tool result content %v", err) | ||||
| 				return | ||||
| 			} | ||||
|  | ||||
| 			// Verify only the pod with matching label is returned | ||||
| 			if len(decodedPods) != 1 { | ||||
| 				t.Fatalf("expected 1 pod, got %d", len(decodedPods)) | ||||
| 				return | ||||
| 			} | ||||
|  | ||||
| 			if decodedPods[0].GetName() != "a-pod-in-default" { | ||||
| 				t.Fatalf("expected pod-with-label, got %s", decodedPods[0].GetName()) | ||||
| 				return | ||||
| 			} | ||||
|  | ||||
| 			// Test that multiple label selectors work | ||||
| 			result, err = c.callTool("resources_list", map[string]interface{}{ | ||||
| 				"apiVersion":    "v1", | ||||
| 				"kind":          "Pod", | ||||
| 				"namespace":     "default", | ||||
| 				"labelSelector": "test-label=test-value,another=value", | ||||
| 			}) | ||||
|  | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call tool failed %v", err) | ||||
| 				return | ||||
| 			} | ||||
| 			if result.IsError { | ||||
| 				t.Fatalf("call tool failed") | ||||
| 				return | ||||
| 			} | ||||
|  | ||||
| 			err = yaml.Unmarshal([]byte(result.Content[0].(mcp.TextContent).Text), &decodedPods) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("invalid tool result content %v", err) | ||||
| 				return | ||||
| 			} | ||||
|  | ||||
| 			// Verify no pods match multiple label selector | ||||
| 			if len(decodedPods) != 0 { | ||||
| 				t.Fatalf("expected 0 pods, got %d", len(decodedPods)) | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestResourcesListDenied(t *testing.T) { | ||||
| 	deniedResourcesServer := test.Must(config.ReadToml([]byte(` | ||||
| 		denied_resources = [ | ||||
| 			{ version = "v1", kind = "Secret" }, | ||||
| 			{ group = "rbac.authorization.k8s.io", version = "v1" } | ||||
| 		] | ||||
| 	`))) | ||||
| 	testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { | ||||
| 		c.withEnvTest() | ||||
| 		deniedByKind, _ := c.callTool("resources_list", map[string]interface{}{"apiVersion": "v1", "kind": "Secret"}) | ||||
| 		t.Run("resources_list (denied by kind) has error", func(t *testing.T) { | ||||
| 			if !deniedByKind.IsError { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_list (denied by kind) describes denial", func(t *testing.T) { | ||||
| 			expectedMessage := "failed to list resources: resource not allowed: /v1, Kind=Secret" | ||||
| 			if deniedByKind.Content[0].(mcp.TextContent).Text != expectedMessage { | ||||
| 				t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) | ||||
| 			} | ||||
| 		}) | ||||
| 		deniedByGroup, _ := c.callTool("resources_list", map[string]interface{}{"apiVersion": "rbac.authorization.k8s.io/v1", "kind": "Role"}) | ||||
| 		t.Run("resources_list (denied by group) has error", func(t *testing.T) { | ||||
| 			if !deniedByGroup.IsError { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_list (denied by group) describes denial", func(t *testing.T) { | ||||
| 			expectedMessage := "failed to list resources: resource not allowed: rbac.authorization.k8s.io/v1, Kind=Role" | ||||
| 			if deniedByGroup.Content[0].(mcp.TextContent).Text != expectedMessage { | ||||
| 				t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) | ||||
| 			} | ||||
| 		}) | ||||
| 		allowedResource, _ := c.callTool("resources_list", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace"}) | ||||
| 		t.Run("resources_list (not denied) returns list", func(t *testing.T) { | ||||
| 			if allowedResource.IsError { | ||||
| 				t.Fatalf("call tool should not fail") | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestResourcesListAsTable(t *testing.T) { | ||||
| 	testCaseWithContext(t, &mcpContext{listOutput: output.Table, before: inOpenShift, after: inOpenShiftClear}, func(c *mcpContext) { | ||||
| 		c.withEnvTest() | ||||
| 		kc := c.newKubernetesClient() | ||||
| 		_, _ = kc.CoreV1().ConfigMaps("default").Create(t.Context(), &corev1.ConfigMap{ | ||||
| 			ObjectMeta: metav1.ObjectMeta{Name: "a-configmap-to-list-as-table", Labels: map[string]string{"resource": "config-map"}}, | ||||
| 			Data:       map[string]string{"key": "value"}, | ||||
| 		}, metav1.CreateOptions{}) | ||||
| 		configMapList, err := c.callTool("resources_list", map[string]interface{}{"apiVersion": "v1", "kind": "ConfigMap"}) | ||||
| 		t.Run("resources_list returns ConfigMap list", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call tool failed %v", err) | ||||
| 			} | ||||
| 			if configMapList.IsError { | ||||
| 				t.Fatalf("call tool failed") | ||||
| 			} | ||||
| 		}) | ||||
| 		outConfigMapList := configMapList.Content[0].(mcp.TextContent).Text | ||||
| 		t.Run("resources_list returns column headers for ConfigMap list", func(t *testing.T) { | ||||
| 			expectedHeaders := "NAMESPACE\\s+APIVERSION\\s+KIND\\s+NAME\\s+DATA\\s+AGE\\s+LABELS" | ||||
| 			if m, e := regexp.MatchString(expectedHeaders, outConfigMapList); !m || e != nil { | ||||
| 				t.Fatalf("Expected headers '%s' not found in output:\n%s", expectedHeaders, outConfigMapList) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_list returns formatted row for a-configmap-to-list-as-table", func(t *testing.T) { | ||||
| 			expectedRow := "(?<namespace>default)\\s+" + | ||||
| 				"(?<apiVersion>v1)\\s+" + | ||||
| 				"(?<kind>ConfigMap)\\s+" + | ||||
| 				"(?<name>a-configmap-to-list-as-table)\\s+" + | ||||
| 				"(?<data>1)\\s+" + | ||||
| 				"(?<age>(\\d+m)?(\\d+s)?)\\s+" + | ||||
| 				"(?<labels>resource=config-map)" | ||||
| 			if m, e := regexp.MatchString(expectedRow, outConfigMapList); !m || e != nil { | ||||
| 				t.Fatalf("Expected row '%s' not found in output:\n%s", expectedRow, outConfigMapList) | ||||
| 			} | ||||
| 		}) | ||||
| 		// Custom Resource List | ||||
| 		_, _ = dynamic.NewForConfigOrDie(envTestRestConfig). | ||||
| 			Resource(schema.GroupVersionResource{Group: "route.openshift.io", Version: "v1", Resource: "routes"}). | ||||
| 			Namespace("default"). | ||||
| 			Create(c.ctx, &unstructured.Unstructured{Object: map[string]interface{}{ | ||||
| 				"apiVersion": "route.openshift.io/v1", | ||||
| 				"kind":       "Route", | ||||
| 				"metadata": map[string]interface{}{ | ||||
| 					"name": "an-openshift-route-to-list-as-table", | ||||
| 				}, | ||||
| 			}}, metav1.CreateOptions{}) | ||||
| 		routeList, err := c.callTool("resources_list", map[string]interface{}{"apiVersion": "route.openshift.io/v1", "kind": "Route"}) | ||||
| 		t.Run("resources_list returns Route list", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call tool failed %v", err) | ||||
| 			} | ||||
| 			if routeList.IsError { | ||||
| 				t.Fatalf("call tool failed") | ||||
| 			} | ||||
| 		}) | ||||
| 		outRouteList := routeList.Content[0].(mcp.TextContent).Text | ||||
| 		t.Run("resources_list returns column headers for Route list", func(t *testing.T) { | ||||
| 			expectedHeaders := "NAMESPACE\\s+APIVERSION\\s+KIND\\s+NAME\\s+AGE\\s+LABELS" | ||||
| 			if m, e := regexp.MatchString(expectedHeaders, outRouteList); !m || e != nil { | ||||
| 				t.Fatalf("Expected headers '%s' not found in output:\n%s", expectedHeaders, outRouteList) | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_list returns formatted row for an-openshift-route-to-list-as-table", func(t *testing.T) { | ||||
| 			expectedRow := "(?<namespace>default)\\s+" + | ||||
| 				"(?<apiVersion>route.openshift.io/v1)\\s+" + | ||||
| 				"(?<kind>Route)\\s+" + | ||||
| 				"(?<name>an-openshift-route-to-list-as-table)\\s+" + | ||||
| 				"(?<age>(\\d+m)?(\\d+s)?)\\s+" + | ||||
| 				"(?<labels><none>)" | ||||
| 			if m, e := regexp.MatchString(expectedRow, outRouteList); !m || e != nil { | ||||
| 				t.Fatalf("Expected row '%s' not found in output:\n%s", expectedRow, outRouteList) | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestResourcesGet(t *testing.T) { | ||||
| 	testCase(t, func(c *mcpContext) { | ||||
| 		c.withEnvTest() | ||||
| 		t.Run("resources_get with missing apiVersion returns error", func(t *testing.T) { | ||||
| 			toolResult, _ := c.callTool("resources_get", map[string]interface{}{}) | ||||
| 			if !toolResult.IsError { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 				return | ||||
| 			} | ||||
| 			if toolResult.Content[0].(mcp.TextContent).Text != "failed to get resource, missing argument apiVersion" { | ||||
| 				t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_get with missing kind returns error", func(t *testing.T) { | ||||
| 			toolResult, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "v1"}) | ||||
| 			if !toolResult.IsError { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 				return | ||||
| 			} | ||||
| 			if toolResult.Content[0].(mcp.TextContent).Text != "failed to get resource, missing argument kind" { | ||||
| 				t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_get with invalid apiVersion returns error", func(t *testing.T) { | ||||
| 			toolResult, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "invalid/api/version", "kind": "Pod", "name": "a-pod"}) | ||||
| 			if !toolResult.IsError { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 				return | ||||
| 			} | ||||
| 			if toolResult.Content[0].(mcp.TextContent).Text != "failed to get resource, invalid argument apiVersion" { | ||||
| 				t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_get with nonexistent apiVersion returns error", func(t *testing.T) { | ||||
| 			toolResult, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "custom.non.existent.example.com/v1", "kind": "Custom", "name": "a-custom"}) | ||||
| 			if !toolResult.IsError { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 				return | ||||
| 			} | ||||
| 			if toolResult.Content[0].(mcp.TextContent).Text != `failed to get resource: no matches for kind "Custom" in version "custom.non.existent.example.com/v1"` { | ||||
| 				t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_get with missing name returns error", func(t *testing.T) { | ||||
| 			toolResult, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace"}) | ||||
| 			if !toolResult.IsError { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 				return | ||||
| 			} | ||||
| 			if toolResult.Content[0].(mcp.TextContent).Text != "failed to get resource, missing argument name" { | ||||
| 				t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		namespace, err := c.callTool("resources_get", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace", "name": "default"}) | ||||
| 		t.Run("resources_get returns namespace", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call tool failed %v", err) | ||||
| 				return | ||||
| 			} | ||||
| 			if namespace.IsError { | ||||
| 				t.Fatalf("call tool failed") | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		var decodedNamespace unstructured.Unstructured | ||||
| 		err = yaml.Unmarshal([]byte(namespace.Content[0].(mcp.TextContent).Text), &decodedNamespace) | ||||
| 		t.Run("resources_get has yaml content", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("invalid tool result content %v", err) | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_get returns default namespace", func(t *testing.T) { | ||||
| 			if decodedNamespace.GetName() != "default" { | ||||
| 				t.Fatalf("invalid namespace name, expected default, got %v", decodedNamespace.GetName()) | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestResourcesGetDenied(t *testing.T) { | ||||
| 	deniedResourcesServer := test.Must(config.ReadToml([]byte(` | ||||
| 		denied_resources = [ | ||||
| 			{ version = "v1", kind = "Secret" }, | ||||
| 			{ group = "rbac.authorization.k8s.io", version = "v1" } | ||||
| 		] | ||||
| 	`))) | ||||
| 	testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { | ||||
| 		c.withEnvTest() | ||||
| 		kc := c.newKubernetesClient() | ||||
| 		_, _ = kc.CoreV1().Secrets("default").Create(c.ctx, &corev1.Secret{ | ||||
| 			ObjectMeta: metav1.ObjectMeta{Name: "denied-secret"}, | ||||
| 		}, metav1.CreateOptions{}) | ||||
| 		_, _ = kc.RbacV1().Roles("default").Create(c.ctx, &v1.Role{ | ||||
| 			ObjectMeta: metav1.ObjectMeta{Name: "denied-role"}, | ||||
| 		}, metav1.CreateOptions{}) | ||||
| 		deniedByKind, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "v1", "kind": "Secret", "namespace": "default", "name": "denied-secret"}) | ||||
| 		t.Run("resources_get (denied by kind) has error", func(t *testing.T) { | ||||
| 			if !deniedByKind.IsError { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_get (denied by kind) describes denial", func(t *testing.T) { | ||||
| 			expectedMessage := "failed to get resource: resource not allowed: /v1, Kind=Secret" | ||||
| 			if deniedByKind.Content[0].(mcp.TextContent).Text != expectedMessage { | ||||
| 				t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) | ||||
| 			} | ||||
| 		}) | ||||
| 		deniedByGroup, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "rbac.authorization.k8s.io/v1", "kind": "Role", "namespace": "default", "name": "denied-role"}) | ||||
| 		t.Run("resources_get (denied by group) has error", func(t *testing.T) { | ||||
| 			if !deniedByGroup.IsError { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_get (denied by group) describes denial", func(t *testing.T) { | ||||
| 			expectedMessage := "failed to get resource: resource not allowed: rbac.authorization.k8s.io/v1, Kind=Role" | ||||
| 			if deniedByGroup.Content[0].(mcp.TextContent).Text != expectedMessage { | ||||
| 				t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) | ||||
| 			} | ||||
| 		}) | ||||
| 		allowedResource, _ := c.callTool("resources_get", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace", "name": "default"}) | ||||
| 		t.Run("resources_get (not denied) returns resource", func(t *testing.T) { | ||||
| 			if allowedResource.IsError { | ||||
| 				t.Fatalf("call tool should not fail") | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestResourcesCreateOrUpdate(t *testing.T) { | ||||
| 	testCase(t, func(c *mcpContext) { | ||||
| 		c.withEnvTest() | ||||
| 		t.Run("resources_create_or_update with nil resource returns error", func(t *testing.T) { | ||||
| 			toolResult, _ := c.callTool("resources_create_or_update", map[string]interface{}{}) | ||||
| 			if toolResult.IsError != true { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 				return | ||||
| 			} | ||||
| 			if toolResult.Content[0].(mcp.TextContent).Text != "failed to create or update resources, missing argument resource" { | ||||
| 				t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_create_or_update with empty resource returns error", func(t *testing.T) { | ||||
| 			toolResult, _ := c.callTool("resources_create_or_update", map[string]interface{}{"resource": ""}) | ||||
| 			if toolResult.IsError != true { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 				return | ||||
| 			} | ||||
| 			if toolResult.Content[0].(mcp.TextContent).Text != "failed to create or update resources, missing argument resource" { | ||||
| 				t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		client := c.newKubernetesClient() | ||||
| 		configMapYaml := "apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: a-cm-created-or-updated\n  namespace: default\n" | ||||
| 		resourcesCreateOrUpdateCm1, err := c.callTool("resources_create_or_update", map[string]interface{}{"resource": configMapYaml}) | ||||
| 		t.Run("resources_create_or_update with valid namespaced yaml resource returns success", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call tool failed %v", err) | ||||
| 				return | ||||
| 			} | ||||
| 			if resourcesCreateOrUpdateCm1.IsError { | ||||
| 				t.Errorf("call tool failed") | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		var decodedCreateOrUpdateCm1 []unstructured.Unstructured | ||||
| 		err = yaml.Unmarshal([]byte(resourcesCreateOrUpdateCm1.Content[0].(mcp.TextContent).Text), &decodedCreateOrUpdateCm1) | ||||
| 		t.Run("resources_create_or_update with valid namespaced yaml resource returns yaml content", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Errorf("invalid tool result content %v", err) | ||||
| 				return | ||||
| 			} | ||||
| 			if !strings.HasPrefix(resourcesCreateOrUpdateCm1.Content[0].(mcp.TextContent).Text, "# The following resources (YAML) have been created or updated successfully") { | ||||
| 				t.Errorf("Excpected success message, got %v", resourcesCreateOrUpdateCm1.Content[0].(mcp.TextContent).Text) | ||||
| 				return | ||||
| 			} | ||||
| 			if len(decodedCreateOrUpdateCm1) != 1 { | ||||
| 				t.Errorf("invalid resource count, expected 1, got %v", len(decodedCreateOrUpdateCm1)) | ||||
| 				return | ||||
| 			} | ||||
| 			if decodedCreateOrUpdateCm1[0].GetName() != "a-cm-created-or-updated" { | ||||
| 				t.Errorf("invalid resource name, expected a-cm-created-or-updated, got %v", decodedCreateOrUpdateCm1[0].GetName()) | ||||
| 				return | ||||
| 			} | ||||
| 			if decodedCreateOrUpdateCm1[0].GetUID() == "" { | ||||
| 				t.Errorf("invalid uid, got %v", decodedCreateOrUpdateCm1[0].GetUID()) | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_create_or_update with valid namespaced yaml resource creates ConfigMap", func(t *testing.T) { | ||||
| 			cm, _ := client.CoreV1().ConfigMaps("default").Get(c.ctx, "a-cm-created-or-updated", metav1.GetOptions{}) | ||||
| 			if cm == nil { | ||||
| 				t.Fatalf("ConfigMap not found") | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		configMapJson := "{\"apiVersion\": \"v1\", \"kind\": \"ConfigMap\", \"metadata\": {\"name\": \"a-cm-created-or-updated-2\", \"namespace\": \"default\"}}" | ||||
| 		resourcesCreateOrUpdateCm2, err := c.callTool("resources_create_or_update", map[string]interface{}{"resource": configMapJson}) | ||||
| 		t.Run("resources_create_or_update with valid namespaced json resource returns success", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call tool failed %v", err) | ||||
| 				return | ||||
| 			} | ||||
| 			if resourcesCreateOrUpdateCm2.IsError { | ||||
| 				t.Fatalf("call tool failed") | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_create_or_update with valid namespaced json resource creates config map", func(t *testing.T) { | ||||
| 			cm, _ := client.CoreV1().ConfigMaps("default").Get(c.ctx, "a-cm-created-or-updated-2", metav1.GetOptions{}) | ||||
| 			if cm == nil { | ||||
| 				t.Fatalf("ConfigMap not found") | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		customResourceDefinitionJson := ` | ||||
|           { | ||||
|             "apiVersion": "apiextensions.k8s.io/v1", | ||||
|             "kind": "CustomResourceDefinition", | ||||
|             "metadata": {"name": "customs.example.com"}, | ||||
|             "spec": { | ||||
|               "group": "example.com", | ||||
|               "versions": [{ | ||||
|                 "name": "v1","served": true,"storage": true, | ||||
|                 "schema": {"openAPIV3Schema": {"type": "object"}} | ||||
|               }], | ||||
|               "scope": "Namespaced", | ||||
|               "names": {"plural": "customs","singular": "custom","kind": "Custom"} | ||||
|             } | ||||
|           }` | ||||
| 		resourcesCreateOrUpdateCrd, err := c.callTool("resources_create_or_update", map[string]interface{}{"resource": customResourceDefinitionJson}) | ||||
| 		t.Run("resources_create_or_update with valid cluster-scoped json resource returns success", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call tool failed %v", err) | ||||
| 				return | ||||
| 			} | ||||
| 			if resourcesCreateOrUpdateCrd.IsError { | ||||
| 				t.Fatalf("call tool failed") | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_create_or_update with valid cluster-scoped json resource creates custom resource definition", func(t *testing.T) { | ||||
| 			apiExtensionsV1Client := c.newApiExtensionsClient() | ||||
| 			_, err = apiExtensionsV1Client.CustomResourceDefinitions().Get(c.ctx, "customs.example.com", metav1.GetOptions{}) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("custom resource definition not found") | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		c.crdWaitUntilReady("customs.example.com") | ||||
| 		customJson := "{\"apiVersion\": \"example.com/v1\", \"kind\": \"Custom\", \"metadata\": {\"name\": \"a-custom-resource\"}}" | ||||
| 		resourcesCreateOrUpdateCustom, err := c.callTool("resources_create_or_update", map[string]interface{}{"resource": customJson}) | ||||
| 		t.Run("resources_create_or_update with valid namespaced json resource returns success", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call tool failed %v", err) | ||||
| 				return | ||||
| 			} | ||||
| 			if resourcesCreateOrUpdateCustom.IsError { | ||||
| 				t.Fatalf("call tool failed, got: %v", resourcesCreateOrUpdateCustom.Content) | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_create_or_update with valid namespaced json resource creates custom resource", func(t *testing.T) { | ||||
| 			dynamicClient := dynamic.NewForConfigOrDie(envTestRestConfig) | ||||
| 			_, err = dynamicClient. | ||||
| 				Resource(schema.GroupVersionResource{Group: "example.com", Version: "v1", Resource: "customs"}). | ||||
| 				Namespace("default"). | ||||
| 				Get(c.ctx, "a-custom-resource", metav1.GetOptions{}) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("custom resource not found") | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		customJsonUpdated := "{\"apiVersion\": \"example.com/v1\", \"kind\": \"Custom\", \"metadata\": {\"name\": \"a-custom-resource\",\"annotations\": {\"updated\": \"true\"}}}" | ||||
| 		resourcesCreateOrUpdateCustomUpdated, err := c.callTool("resources_create_or_update", map[string]interface{}{"resource": customJsonUpdated}) | ||||
| 		t.Run("resources_create_or_update with valid namespaced json resource updates custom resource", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call tool failed %v", err) | ||||
| 				return | ||||
| 			} | ||||
| 			if resourcesCreateOrUpdateCustomUpdated.IsError { | ||||
| 				t.Fatalf("call tool failed") | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_create_or_update with valid namespaced json resource updates custom resource", func(t *testing.T) { | ||||
| 			dynamicClient := dynamic.NewForConfigOrDie(envTestRestConfig) | ||||
| 			customResource, _ := dynamicClient. | ||||
| 				Resource(schema.GroupVersionResource{Group: "example.com", Version: "v1", Resource: "customs"}). | ||||
| 				Namespace("default"). | ||||
| 				Get(c.ctx, "a-custom-resource", metav1.GetOptions{}) | ||||
| 			if customResource == nil { | ||||
| 				t.Fatalf("custom resource not found") | ||||
| 				return | ||||
| 			} | ||||
| 			annotations := customResource.GetAnnotations() | ||||
| 			if annotations == nil || annotations["updated"] != "true" { | ||||
| 				t.Fatalf("custom resource not updated") | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestResourcesCreateOrUpdateDenied(t *testing.T) { | ||||
| 	deniedResourcesServer := test.Must(config.ReadToml([]byte(` | ||||
| 		denied_resources = [ | ||||
| 			{ version = "v1", kind = "Secret" }, | ||||
| 			{ group = "rbac.authorization.k8s.io", version = "v1" } | ||||
| 		] | ||||
| 	`))) | ||||
| 	testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { | ||||
| 		c.withEnvTest() | ||||
| 		secretYaml := "apiVersion: v1\nkind: Secret\nmetadata:\n  name: a-denied-secret\n  namespace: default\n" | ||||
| 		deniedByKind, _ := c.callTool("resources_create_or_update", map[string]interface{}{"resource": secretYaml}) | ||||
| 		t.Run("resources_create_or_update (denied by kind) has error", func(t *testing.T) { | ||||
| 			if !deniedByKind.IsError { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_create_or_update (denied by kind) describes denial", func(t *testing.T) { | ||||
| 			expectedMessage := "failed to create or update resources: resource not allowed: /v1, Kind=Secret" | ||||
| 			if deniedByKind.Content[0].(mcp.TextContent).Text != expectedMessage { | ||||
| 				t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) | ||||
| 			} | ||||
| 		}) | ||||
| 		roleYaml := "apiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: a-denied-role\n  namespace: default\n" | ||||
| 		deniedByGroup, _ := c.callTool("resources_create_or_update", map[string]interface{}{"resource": roleYaml}) | ||||
| 		t.Run("resources_create_or_update (denied by group) has error", func(t *testing.T) { | ||||
| 			if !deniedByGroup.IsError { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_create_or_update (denied by group) describes denial", func(t *testing.T) { | ||||
| 			expectedMessage := "failed to create or update resources: resource not allowed: rbac.authorization.k8s.io/v1, Kind=Role" | ||||
| 			if deniedByGroup.Content[0].(mcp.TextContent).Text != expectedMessage { | ||||
| 				t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) | ||||
| 			} | ||||
| 		}) | ||||
| 		configMapYaml := "apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: a-cm-created-or-updated\n  namespace: default\n" | ||||
| 		allowedResource, _ := c.callTool("resources_create_or_update", map[string]interface{}{"resource": configMapYaml}) | ||||
| 		t.Run("resources_create_or_update (not denied) creates or updates resource", func(t *testing.T) { | ||||
| 			if allowedResource.IsError { | ||||
| 				t.Fatalf("call tool should not fail") | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestResourcesDelete(t *testing.T) { | ||||
| 	testCase(t, func(c *mcpContext) { | ||||
| 		c.withEnvTest() | ||||
| 		t.Run("resources_delete with missing apiVersion returns error", func(t *testing.T) { | ||||
| 			toolResult, _ := c.callTool("resources_delete", map[string]interface{}{}) | ||||
| 			if !toolResult.IsError { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 				return | ||||
| 			} | ||||
| 			if toolResult.Content[0].(mcp.TextContent).Text != "failed to delete resource, missing argument apiVersion" { | ||||
| 				t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_delete with missing kind returns error", func(t *testing.T) { | ||||
| 			toolResult, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1"}) | ||||
| 			if !toolResult.IsError { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 				return | ||||
| 			} | ||||
| 			if toolResult.Content[0].(mcp.TextContent).Text != "failed to delete resource, missing argument kind" { | ||||
| 				t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_delete with invalid apiVersion returns error", func(t *testing.T) { | ||||
| 			toolResult, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "invalid/api/version", "kind": "Pod", "name": "a-pod"}) | ||||
| 			if !toolResult.IsError { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 				return | ||||
| 			} | ||||
| 			if toolResult.Content[0].(mcp.TextContent).Text != "failed to delete resource, invalid argument apiVersion" { | ||||
| 				t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_delete with nonexistent apiVersion returns error", func(t *testing.T) { | ||||
| 			toolResult, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "custom.non.existent.example.com/v1", "kind": "Custom", "name": "a-custom"}) | ||||
| 			if !toolResult.IsError { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 				return | ||||
| 			} | ||||
| 			if toolResult.Content[0].(mcp.TextContent).Text != `failed to delete resource: no matches for kind "Custom" in version "custom.non.existent.example.com/v1"` { | ||||
| 				t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_delete with missing name returns error", func(t *testing.T) { | ||||
| 			toolResult, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace"}) | ||||
| 			if !toolResult.IsError { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 				return | ||||
| 			} | ||||
| 			if toolResult.Content[0].(mcp.TextContent).Text != "failed to delete resource, missing argument name" { | ||||
| 				t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_delete with nonexistent resource returns error", func(t *testing.T) { | ||||
| 			toolResult, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "ConfigMap", "name": "nonexistent-configmap"}) | ||||
| 			if !toolResult.IsError { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 				return | ||||
| 			} | ||||
| 			if toolResult.Content[0].(mcp.TextContent).Text != `failed to delete resource: configmaps "nonexistent-configmap" not found` { | ||||
| 				t.Fatalf("invalid error message, got %v", toolResult.Content[0].(mcp.TextContent).Text) | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		resourcesDeleteCm, err := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "ConfigMap", "name": "a-configmap-to-delete"}) | ||||
| 		t.Run("resources_delete with valid namespaced resource returns success", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call tool failed %v", err) | ||||
| 				return | ||||
| 			} | ||||
| 			if resourcesDeleteCm.IsError { | ||||
| 				t.Fatalf("call tool failed") | ||||
| 				return | ||||
| 			} | ||||
| 			if resourcesDeleteCm.Content[0].(mcp.TextContent).Text != "Resource deleted successfully" { | ||||
| 				t.Fatalf("invalid tool result content got: %v", resourcesDeleteCm.Content[0].(mcp.TextContent).Text) | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		client := c.newKubernetesClient() | ||||
| 		t.Run("resources_delete with valid namespaced resource deletes ConfigMap", func(t *testing.T) { | ||||
| 			_, err := client.CoreV1().ConfigMaps("default").Get(c.ctx, "a-configmap-to-delete", metav1.GetOptions{}) | ||||
| 			if err == nil { | ||||
| 				t.Fatalf("ConfigMap not deleted") | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		resourcesDeleteNamespace, err := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "Namespace", "name": "ns-to-delete"}) | ||||
| 		t.Run("resources_delete with valid namespaced resource returns success", func(t *testing.T) { | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("call tool failed %v", err) | ||||
| 				return | ||||
| 			} | ||||
| 			if resourcesDeleteNamespace.IsError { | ||||
| 				t.Fatalf("call tool failed") | ||||
| 				return | ||||
| 			} | ||||
| 			if resourcesDeleteNamespace.Content[0].(mcp.TextContent).Text != "Resource deleted successfully" { | ||||
| 				t.Fatalf("invalid tool result content got: %v", resourcesDeleteNamespace.Content[0].(mcp.TextContent).Text) | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_delete with valid namespaced resource deletes Namespace", func(t *testing.T) { | ||||
| 			ns, err := client.CoreV1().Namespaces().Get(c.ctx, "ns-to-delete", metav1.GetOptions{}) | ||||
| 			if err == nil && ns != nil && ns.DeletionTimestamp == nil { | ||||
| 				t.Fatalf("Namespace not deleted") | ||||
| 				return | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func TestResourcesDeleteDenied(t *testing.T) { | ||||
| 	deniedResourcesServer := test.Must(config.ReadToml([]byte(` | ||||
| 		denied_resources = [ | ||||
| 			{ version = "v1", kind = "Secret" }, | ||||
| 			{ group = "rbac.authorization.k8s.io", version = "v1" } | ||||
| 		] | ||||
| 	`))) | ||||
| 	testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { | ||||
| 		c.withEnvTest() | ||||
| 		kc := c.newKubernetesClient() | ||||
| 		_, _ = kc.CoreV1().ConfigMaps("default").Create(c.ctx, &corev1.ConfigMap{ | ||||
| 			ObjectMeta: metav1.ObjectMeta{Name: "allowed-configmap-to-delete"}, | ||||
| 		}, metav1.CreateOptions{}) | ||||
| 		deniedByKind, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "Secret", "namespace": "default", "name": "denied-secret"}) | ||||
| 		t.Run("resources_delete (denied by kind) has error", func(t *testing.T) { | ||||
| 			if !deniedByKind.IsError { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_delete (denied by kind) describes denial", func(t *testing.T) { | ||||
| 			expectedMessage := "failed to delete resource: resource not allowed: /v1, Kind=Secret" | ||||
| 			if deniedByKind.Content[0].(mcp.TextContent).Text != expectedMessage { | ||||
| 				t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) | ||||
| 			} | ||||
| 		}) | ||||
| 		deniedByGroup, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "rbac.authorization.k8s.io/v1", "kind": "Role", "namespace": "default", "name": "denied-role"}) | ||||
| 		t.Run("resources_delete (denied by group) has error", func(t *testing.T) { | ||||
| 			if !deniedByGroup.IsError { | ||||
| 				t.Fatalf("call tool should fail") | ||||
| 			} | ||||
| 		}) | ||||
| 		t.Run("resources_delete (denied by group) describes denial", func(t *testing.T) { | ||||
| 			expectedMessage := "failed to delete resource: resource not allowed: rbac.authorization.k8s.io/v1, Kind=Role" | ||||
| 			if deniedByGroup.Content[0].(mcp.TextContent).Text != expectedMessage { | ||||
| 				t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, deniedByKind.Content[0].(mcp.TextContent).Text) | ||||
| 			} | ||||
| 		}) | ||||
| 		allowedResource, _ := c.callTool("resources_delete", map[string]interface{}{"apiVersion": "v1", "kind": "ConfigMap", "name": "allowed-configmap-to-delete"}) | ||||
| 		t.Run("resources_delete (not denied) deletes resource", func(t *testing.T) { | ||||
| 			if allowedResource.IsError { | ||||
| 				t.Fatalf("call tool should not fail") | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
| } | ||||
							
								
								
									
										3
									
								
								pkg/mcp/testdata/helm-chart-no-op/Chart.yaml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										3
									
								
								pkg/mcp/testdata/helm-chart-no-op/Chart.yaml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,3 @@ | ||||
| apiVersion: v1 | ||||
| name: no-op | ||||
| version: 1.33.7 | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user