Add client API key validation

This commit is contained in:
openhands
2025-07-09 04:05:51 +00:00
committed by Holegots
parent 5027acb606
commit 0abb850405
5 changed files with 60 additions and 4 deletions

View File

@@ -1,6 +1,10 @@
# Required: Your OpenAI API key
OPENAI_API_KEY="sk-your-openai-api-key-here"
# Optional: Expected Anthropic API key for client validation
# If set, clients must provide this exact API key to access the proxy
ANTHROPIC_API_KEY="your-expected-anthropic-api-key"
# Optional: OpenAI API base URL (default: https://api.openai.com/v1)
# You can change this to use other providers like Azure OpenAI, local models, etc.
OPENAI_BASE_URL="https://api.openai.com/v1"

View File

@@ -46,7 +46,11 @@ uv run claude-code-proxy
### 4. Use with Claude Code
```bash
ANTHROPIC_BASE_URL=http://localhost:8082 ANTHROPIC_AUTH_TOKEN="some-api-key" claude
# If ANTHROPIC_API_KEY is not set in the proxy:
ANTHROPIC_BASE_URL=http://localhost:8082 ANTHROPIC_API_KEY="any-value" claude
# If ANTHROPIC_API_KEY is set in the proxy:
ANTHROPIC_BASE_URL=http://localhost:8082 ANTHROPIC_API_KEY="exact-matching-key" claude
```
## Configuration
@@ -57,6 +61,12 @@ ANTHROPIC_BASE_URL=http://localhost:8082 ANTHROPIC_AUTH_TOKEN="some-api-key" cla
- `OPENAI_API_KEY` - Your API key for the target provider
**Security:**
- `ANTHROPIC_API_KEY` - Expected Anthropic API key for client validation
- If set, clients must provide this exact API key to access the proxy
- If not set, any API key will be accepted
**Model Configuration:**
- `BIG_MODEL` - Model for Claude opus requests (default: `gpt-4o`)

View File

@@ -1,7 +1,8 @@
from fastapi import APIRouter, HTTPException, Request
from fastapi import APIRouter, HTTPException, Request, Header, Depends
from fastapi.responses import JSONResponse, StreamingResponse
from datetime import datetime
import uuid
from typing import Optional
from src.core.config import config
from src.core.logging import logger
@@ -23,8 +24,30 @@ openai_client = OpenAIClient(
api_version=config.azure_api_version,
)
async def validate_api_key(x_api_key: Optional[str] = Header(None), authorization: Optional[str] = Header(None)):
"""Validate the client's API key from either x-api-key header or Authorization header."""
client_api_key = None
# Extract API key from headers
if x_api_key:
client_api_key = x_api_key
elif authorization and authorization.startswith("Bearer "):
client_api_key = authorization.replace("Bearer ", "")
# Skip validation if ANTHROPIC_API_KEY is not set in the environment
if not config.anthropic_api_key:
return
# Validate the client API key
if not client_api_key or not config.validate_client_api_key(client_api_key):
logger.warning(f"Invalid API key provided by client")
raise HTTPException(
status_code=401,
detail="Invalid API key. Please provide a valid Anthropic API key."
)
@router.post("/v1/messages")
async def create_message(request: ClaudeMessagesRequest, http_request: Request):
async def create_message(request: ClaudeMessagesRequest, http_request: Request, _: None = Depends(validate_api_key)):
try:
logger.debug(
f"Processing Claude request: model={request.model}, stream={request.stream}"
@@ -96,7 +119,7 @@ async def create_message(request: ClaudeMessagesRequest, http_request: Request):
@router.post("/v1/messages/count_tokens")
async def count_tokens(request: ClaudeTokenCountRequest):
async def count_tokens(request: ClaudeTokenCountRequest, _: None = Depends(validate_api_key)):
try:
# For token counting, we'll use a simple estimation
# In a real implementation, you might want to use tiktoken or similar
@@ -141,6 +164,7 @@ async def health_check():
"timestamp": datetime.now().isoformat(),
"openai_api_configured": bool(config.openai_api_key),
"api_key_valid": config.validate_api_key(),
"client_api_key_validation": bool(config.anthropic_api_key),
}
@@ -193,6 +217,7 @@ async def root():
"openai_base_url": config.openai_base_url,
"max_tokens_limit": config.max_tokens_limit,
"api_key_configured": bool(config.openai_api_key),
"client_api_key_validation": bool(config.anthropic_api_key),
"big_model": config.big_model,
"small_model": config.small_model,
},

View File

@@ -8,6 +8,11 @@ class Config:
if not self.openai_api_key:
raise ValueError("OPENAI_API_KEY not found in environment variables")
# Add Anthropic API key for client validation
self.anthropic_api_key = os.environ.get("ANTHROPIC_API_KEY")
if not self.anthropic_api_key:
print("Warning: ANTHROPIC_API_KEY not set. Client API key validation will be disabled.")
self.openai_base_url = os.environ.get("OPENAI_BASE_URL", "https://api.openai.com/v1")
self.azure_api_version = os.environ.get("AZURE_API_VERSION") # For Azure OpenAI
self.host = os.environ.get("HOST", "0.0.0.0")
@@ -33,6 +38,15 @@ class Config:
if not self.openai_api_key.startswith('sk-'):
return False
return True
def validate_client_api_key(self, client_api_key):
"""Validate client's Anthropic API key"""
# If no ANTHROPIC_API_KEY is set in the environment, skip validation
if not self.anthropic_api_key:
return True
# Check if the client's API key matches the expected value
return client_api_key == self.anthropic_api_key
try:
config = Config()

View File

@@ -19,6 +19,8 @@ def main():
print(" OPENAI_API_KEY - Your OpenAI API key")
print("")
print("Optional environment variables:")
print(" ANTHROPIC_API_KEY - Expected Anthropic API key for client validation")
print(" If set, clients must provide this exact API key")
print(
f" OPENAI_BASE_URL - OpenAI API base URL (default: https://api.openai.com/v1)"
)
@@ -47,6 +49,7 @@ def main():
print(f" Max Tokens Limit: {config.max_tokens_limit}")
print(f" Request Timeout: {config.request_timeout}s")
print(f" Server: {config.host}:{config.port}")
print(f" Client API Key Validation: {'Enabled' if config.anthropic_api_key else 'Disabled'}")
print("")
# Parse log level - extract just the first word to handle comments