From 0e47f65bf795012362663433df201393390022db Mon Sep 17 00:00:00 2001 From: TCUDIKEL Date: Sat, 10 May 2025 17:38:30 +0300 Subject: [PATCH] first commit --- .gitignore | 8 + .idea/inspectionProfiles/Project_Default.xml | 31 + .../inspectionProfiles/profiles_settings.xml | 6 + .idea/mcphost-api.iml | 10 + .idea/modules.xml | 8 + .idea/vcs.xml | 6 + .idea/workspace.xml | 102 ++++ helpers/__init__.py | 0 helpers/local_logger.py | 15 + helpers/models.py | 15 + mcphost_openai_api.log | 548 ++++++++++++++++++ requirements.txt | 20 + serve_openai_compatible.py | 411 +++++++++++++ test.sh | 10 + 14 files changed, 1190 insertions(+) create mode 100644 .gitignore create mode 100644 .idea/inspectionProfiles/Project_Default.xml create mode 100644 .idea/inspectionProfiles/profiles_settings.xml create mode 100644 .idea/mcphost-api.iml create mode 100644 .idea/modules.xml create mode 100644 .idea/vcs.xml create mode 100644 .idea/workspace.xml create mode 100644 helpers/__init__.py create mode 100644 helpers/local_logger.py create mode 100644 helpers/models.py create mode 100644 mcphost_openai_api.log create mode 100644 requirements.txt create mode 100644 serve_openai_compatible.py create mode 100755 test.sh diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..002b455 --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +.venv +venv +config.json +mcphost +mcphost_openai_api.log.* +helpers/settings.py +.idea +__pycache__ \ No newline at end of file diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml new file mode 100644 index 0000000..89e2c5b --- /dev/null +++ b/.idea/inspectionProfiles/Project_Default.xml @@ -0,0 +1,31 @@ + + + + \ No newline at end of file diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml new file mode 100644 index 0000000..105ce2d --- /dev/null +++ b/.idea/inspectionProfiles/profiles_settings.xml @@ -0,0 +1,6 @@ + + + + \ No newline at end of file diff --git a/.idea/mcphost-api.iml b/.idea/mcphost-api.iml new file mode 100644 index 0000000..99b1a4e --- /dev/null +++ b/.idea/mcphost-api.iml @@ -0,0 +1,10 @@ + + + + + + + + + + \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml new file mode 100644 index 0000000..b64ffa2 --- /dev/null +++ b/.idea/modules.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml new file mode 100644 index 0000000..94a25f7 --- /dev/null +++ b/.idea/vcs.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/.idea/workspace.xml b/.idea/workspace.xml new file mode 100644 index 0000000..31f89b8 --- /dev/null +++ b/.idea/workspace.xml @@ -0,0 +1,102 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1746886781301 + + + + \ No newline at end of file diff --git a/helpers/__init__.py b/helpers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/helpers/local_logger.py b/helpers/local_logger.py new file mode 100644 index 0000000..a2bd7de --- /dev/null +++ b/helpers/local_logger.py @@ -0,0 +1,15 @@ + + + +class LoggerWriter: + def __init__(self, logger_func): + self.logger_func = logger_func + + def write(self, message): + # Remove trailing newlines for cleaner logs + if message.strip(): + self.logger_func(message.rstrip()) + + def flush(self): + # Required for file-like objects + pass diff --git a/helpers/models.py b/helpers/models.py new file mode 100644 index 0000000..da6730d --- /dev/null +++ b/helpers/models.py @@ -0,0 +1,15 @@ +from typing import Optional, List +from pydantic import BaseModel + + +# OpenAI-compatible models +class ChatMessage(BaseModel): + role: str + content: str + +class ChatCompletionRequest(BaseModel): + model: str = "mcphost-model" + messages: List[ChatMessage] + max_tokens: Optional[int] = 512 + temperature: Optional[float] = 0.1 + stream: Optional[bool] = False diff --git a/mcphost_openai_api.log b/mcphost_openai_api.log new file mode 100644 index 0000000..d6b201b --- /dev/null +++ b/mcphost_openai_api.log @@ -0,0 +1,548 @@ +2025-05-10 17:10:23.482 | INFO | __main__::378 - Starting OpenAI-compatible MCPhost API server... +2025-05-10 17:10:23.482 | INFO | __main__::379 - Configuration: +2025-05-10 17:10:23.483 | INFO | __main__::380 - MCPhost Path: ./mcphost +2025-05-10 17:10:23.483 | INFO | __main__::381 - Model: openai:qwen3:8b-q8_0 +2025-05-10 17:10:23.483 | INFO | __main__::382 - OpenAI URL: http://0.0.0.0:33740/v1 +2025-05-10 17:10:23.483 | INFO | __main__::383 - Debug: True +2025-05-10 17:10:25.677 | ERROR | __main__:chat_completions:216 - MCPhost process object is None +2025-05-10 17:11:54.622 | INFO | __main__::367 - Starting OpenAI-compatible MCPhost API server... +2025-05-10 17:11:54.622 | INFO | __main__::368 - Configuration: +2025-05-10 17:11:54.622 | INFO | __main__::369 - MCPhost Path: ./mcphost +2025-05-10 17:11:54.622 | INFO | __main__::370 - Model: openai:qwen3:8b-q8_0 +2025-05-10 17:11:54.622 | INFO | __main__::371 - OpenAI URL: http://0.0.0.0:33740/v1 +2025-05-10 17:11:54.622 | INFO | __main__::372 - Debug: True +2025-05-10 17:11:56.959 | ERROR | __main__:chat_completions:205 - MCPhost process object is None +2025-05-10 17:14:38.976 | INFO | __main__::367 - Starting OpenAI-compatible MCPhost API server... +2025-05-10 17:14:38.976 | INFO | __main__::368 - Configuration: +2025-05-10 17:14:38.976 | INFO | __main__::369 - MCPhost Path: ./mcphost +2025-05-10 17:14:38.976 | INFO | __main__::370 - Model: openai:qwen3:8b-q8_0 +2025-05-10 17:14:38.976 | INFO | __main__::371 - OpenAI URL: http://0.0.0.0:33740/v1 +2025-05-10 17:14:38.976 | INFO | __main__::372 - Debug: True +2025-05-10 17:14:38.984 | INFO | __main__:lifespan:74 - Starting MCPhost with command: ./mcphost --debug --config ./config.json --model openai:qwen3:8b-q8_0 --openai-url http://0.0.0.0:33740/v1 --openai-api-key anything +2025-05-10 17:14:38.984 | INFO | __main__:lifespan:75 - Environment: OLLAMA_NUM_CTX=40960 +2025-05-10 17:14:39.002 | INFO | __main__:lifespan:80 - MCPhost process spawned with PID: 565922 +2025-05-10 17:14:39.003 | INFO | __main__:lifespan:89 - Waiting for model to load... +2025-05-10 17:14:39.019 | ERROR | __main__:lifespan:142 - Failed to start MCPhost: 'function' object has no attribute 'write' +2025-05-10 17:14:39.019 | ERROR | __main__:lifespan:144 - MCPhost final output: +2025-05-10 17:14:39.019 | ERROR | __main__:lifespan:145 - Process alive: True +2025-05-10 17:14:39.019 | INFO | __main__:lifespan:151 - Shutting down MCPhost... +2025-05-10 17:14:39.019 | INFO | __main__:lifespan:159 - Terminating MCPhost process... +2025-05-10 17:14:39.120 | INFO | __main__:lifespan:162 - MCPhost process stopped +2025-05-10 17:16:06.172 | INFO | __main__::367 - Starting OpenAI-compatible MCPhost API server... +2025-05-10 17:16:06.172 | INFO | __main__::368 - Configuration: +2025-05-10 17:16:06.172 | INFO | __main__::369 - MCPhost Path: ./mcphost +2025-05-10 17:16:06.172 | INFO | __main__::370 - Model: openai:qwen3:8b-q8_0 +2025-05-10 17:16:06.172 | INFO | __main__::371 - OpenAI URL: http://0.0.0.0:33740/v1 +2025-05-10 17:16:06.173 | INFO | __main__::372 - Debug: True +2025-05-10 17:16:06.180 | INFO | __main__:lifespan:74 - Starting MCPhost with command: ./mcphost --debug --config ./config.json --model openai:qwen3:8b-q8_0 --openai-url http://0.0.0.0:33740/v1 --openai-api-key anything +2025-05-10 17:16:06.180 | INFO | __main__:lifespan:75 - Environment: OLLAMA_NUM_CTX=40960 +2025-05-10 17:16:06.199 | INFO | __main__:lifespan:80 - MCPhost process spawned with PID: 565949 +2025-05-10 17:16:06.199 | ERROR | __main__:lifespan:142 - Failed to start MCPhost: name 'LoggerWriter' is not defined +2025-05-10 17:16:06.199 | ERROR | __main__:lifespan:144 - MCPhost final output: None +2025-05-10 17:16:06.199 | ERROR | __main__:lifespan:145 - Process alive: True +2025-05-10 17:16:06.199 | INFO | __main__:lifespan:151 - Shutting down MCPhost... +2025-05-10 17:16:06.200 | INFO | __main__:lifespan:159 - Terminating MCPhost process... +2025-05-10 17:16:06.300 | INFO | __main__:lifespan:162 - MCPhost process stopped +2025-05-10 17:16:52.374 | INFO | __main__::368 - Starting OpenAI-compatible MCPhost API server... +2025-05-10 17:16:52.374 | INFO | __main__::369 - Configuration: +2025-05-10 17:16:52.374 | INFO | __main__::370 - MCPhost Path: ./mcphost +2025-05-10 17:16:52.374 | INFO | __main__::371 - Model: openai:qwen3:8b-q8_0 +2025-05-10 17:16:52.374 | INFO | __main__::372 - OpenAI URL: http://0.0.0.0:33740/v1 +2025-05-10 17:16:52.374 | INFO | __main__::373 - Debug: True +2025-05-10 17:16:52.382 | INFO | __main__:lifespan:75 - Starting MCPhost with command: ./mcphost --debug --config ./config.json --model openai:qwen3:8b-q8_0 --openai-url http://0.0.0.0:33740/v1 --openai-api-key anything +2025-05-10 17:16:52.382 | INFO | __main__:lifespan:76 - Environment: OLLAMA_NUM_CTX=40960 +2025-05-10 17:16:52.403 | INFO | __main__:lifespan:81 - MCPhost process spawned with PID: 565987 +2025-05-10 17:16:52.403 | INFO | __main__:lifespan:90 - Waiting for model to load... +2025-05-10 17:16:52.420 | DEBUG | local_logger:write:11 - ]11;?\ +2025-05-10 17:16:57.428 | DEBUG | local_logger:write:11 - ]10;?\ +2025-05-10 17:17:02.434 | DEBUG | local_logger:write:11 - ]11;?\ +2025-05-10 17:17:07.440 | DEBUG | local_logger:write:11 - 2025/05/10 17:17:07 INFO  Model loaded provider=openai model=qwen3:8b-q8_0 +2025-05-10 17:17:07.440 | INFO | __main__:lifespan:98 - MCPhost model loaded successfully +2025-05-10 17:17:07.440 | INFO | __main__:lifespan:109 - Waiting for prompt... +2025-05-10 17:17:07.441 | DEBUG | local_logger:write:11 - 2025/05/10 17:17:07 INFO  Created default config file path=./config.json +[?25l[?2004h +2025-05-10 17:17:07.458 | DEBUG | local_logger:write:11 - ┃ Enter your prompt (Type /help for commands, Ctrl+C to  +quit)  +┃    +┃   +┃   +┃   +┃   +┃   +alt+enter / ctrl+j new line • ctrl+e open editor • enter submit +2025-05-10 17:17:07.974 | DEBUG | local_logger:write:11 - ┃  + +2025-05-10 17:17:08.508 | DEBUG | local_logger:write:11 - ┃    + +2025-05-10 17:17:09.041 | DEBUG | local_logger:write:11 - ┃  + +2025-05-10 17:17:09.574 | DEBUG | local_logger:write:11 - ┃    + +2025-05-10 17:17:10.108 | DEBUG | local_logger:write:11 - ┃  + +2025-05-10 17:17:10.641 | DEBUG | local_logger:write:11 - ┃    + +2025-05-10 17:17:11.158 | DEBUG | local_logger:write:11 - ┃  + +2025-05-10 17:17:11.692 | DEBUG | local_logger:write:11 - ┃    + +2025-05-10 17:17:12.224 | DEBUG | local_logger:write:11 - ┃  + +2025-05-10 17:17:12.758 | DEBUG | local_logger:write:11 - ┃    + +2025-05-10 17:17:13.291 | DEBUG | local_logger:write:11 - ┃  + +2025-05-10 17:17:13.824 | DEBUG | local_logger:write:11 - ┃    + +2025-05-10 17:17:14.341 | DEBUG | local_logger:write:11 - ┃  + +2025-05-10 17:17:14.875 | DEBUG | local_logger:write:11 - ┃    + +2025-05-10 17:17:15.408 | DEBUG | local_logger:write:11 - ┃  + +2025-05-10 17:17:15.941 | DEBUG | local_logger:write:11 - ┃    + +2025-05-10 17:17:16.475 | DEBUG | local_logger:write:11 - ┃  + +2025-05-10 17:17:17.008 | DEBUG | local_logger:write:11 - ┃    + +2025-05-10 17:17:17.441 | ERROR | __main__:lifespan:119 - Timeout waiting for prompt +2025-05-10 17:17:17.441 | ERROR | __main__:lifespan:120 - Output so far: provider=openai model=qwen3:8b-q8_0 +2025/05/10 17:17:07 INFO  Created default config file path=./config.json +[?25l[?2004h ┃ Enter your prompt (Type /help for commands, Ctrl+C to  +quit)  +┃    +┃   +┃   +┃   +┃   +┃   +alt+enter / ctrl+j new line • ctrl+e open editor • enter submit┃  +┃    +┃  +┃    +┃  +┃    +┃  +┃    +┃  +┃    +┃  +┃    +┃  +┃    +┃  +┃    +┃  +┃    + +2025-05-10 17:17:17.442 | ERROR | __main__:lifespan:143 - Failed to start MCPhost: Timeout waiting for prompt +2025-05-10 17:17:17.442 | ERROR | __main__:lifespan:145 - MCPhost final output: provider=openai model=qwen3:8b-q8_0 +2025/05/10 17:17:07 INFO  Created default config file path=./config.json +[?25l[?2004h ┃ Enter your prompt (Type /help for commands, Ctrl+C to  +quit)  +┃    +┃   +┃   +┃   +┃   +┃   +alt+enter / ctrl+j new line • ctrl+e open editor • enter submit┃  +┃    +┃  +┃    +┃  +┃    +┃  +┃    +┃  +┃    +┃  +┃    +┃  +┃    +┃  +┃    +┃  +┃    + +2025-05-10 17:17:17.442 | ERROR | __main__:lifespan:146 - Process alive: True +2025-05-10 17:17:17.442 | INFO | __main__:lifespan:152 - Shutting down MCPhost... +2025-05-10 17:17:17.442 | INFO | __main__:lifespan:160 - Terminating MCPhost process... +2025-05-10 17:17:17.543 | INFO | __main__:lifespan:163 - MCPhost process stopped +2025-05-10 17:22:30.034 | INFO | __main__::368 - Starting OpenAI-compatible MCPhost API server... +2025-05-10 17:22:30.034 | INFO | __main__::369 - Configuration: +2025-05-10 17:22:30.034 | INFO | __main__::370 - MCPhost Path: ./mcphost +2025-05-10 17:22:30.035 | INFO | __main__::371 - Model: openai:qwen3:8b-q8_0 +2025-05-10 17:22:30.035 | INFO | __main__::372 - OpenAI URL: http://0.0.0.0:33740/v1 +2025-05-10 17:22:30.035 | INFO | __main__::373 - Debug: True +2025-05-10 17:22:30.207 | INFO | __main__:lifespan:75 - Starting MCPhost with command: ./mcphost --debug --config ./config.json --model openai:qwen3:8b-q8_0 --openai-url http://0.0.0.0:33740/v1 --openai-api-key anything +2025-05-10 17:22:30.207 | INFO | __main__:lifespan:76 - Environment: OLLAMA_NUM_CTX=40960 +2025-05-10 17:22:30.227 | ERROR | __main__:lifespan:83 - Failed to spawn MCPhost process: [Errno 8] Exec format error: b'./mcphost' +2025-05-10 17:22:30.228 | ERROR | __main__:lifespan:143 - Failed to start MCPhost: [Errno 8] Exec format error: b'./mcphost' +2025-05-10 17:22:30.228 | INFO | __main__:lifespan:152 - Shutting down MCPhost... +2025-05-10 17:23:22.399 | INFO | __main__::368 - Starting OpenAI-compatible MCPhost API server... +2025-05-10 17:23:22.400 | INFO | __main__::369 - Configuration: +2025-05-10 17:23:22.400 | INFO | __main__::370 - MCPhost Path: ./mcphost +2025-05-10 17:23:22.400 | INFO | __main__::371 - Model: openai:qwen3:8b-q8_0 +2025-05-10 17:23:22.400 | INFO | __main__::372 - OpenAI URL: http://0.0.0.0:33740/v1 +2025-05-10 17:23:22.400 | INFO | __main__::373 - Debug: True +2025-05-10 17:23:22.545 | INFO | __main__:lifespan:75 - Starting MCPhost with command: ./mcphost --debug --config ./config.json --model openai:qwen3:8b-q8_0 --openai-url http://0.0.0.0:33740/v1 --openai-api-key anything +2025-05-10 17:23:22.546 | INFO | __main__:lifespan:76 - Environment: OLLAMA_NUM_CTX=40960 +2025-05-10 17:23:22.571 | INFO | __main__:lifespan:81 - MCPhost process spawned with PID: 4385 +2025-05-10 17:23:22.572 | INFO | __main__:lifespan:90 - Waiting for model to load... +2025-05-10 17:23:22.574 | DEBUG | local_logger:write:11 - pydev debugger: bytes arguments were passed to a new process creation function. Breakpoints may not work correctly. +2025-05-10 17:23:23.188 | DEBUG | local_logger:write:11 - ]11;?\ +2025-05-10 17:23:28.196 | DEBUG | local_logger:write:11 - ]10;?\ +2025-05-10 17:23:33.199 | DEBUG | local_logger:write:11 - ]11;?\ +2025-05-10 17:23:38.203 | DEBUG | local_logger:write:11 - 2025/05/10 17:23:38 INFO Model loaded provider=openai model=qwen3:8b-q8_0 +2025-05-10 17:23:38.204 | INFO | __main__:lifespan:98 - MCPhost model loaded successfully +2025-05-10 17:23:38.205 | INFO | __main__:lifespan:109 - Waiting for prompt... +2025-05-10 17:23:38.206 | DEBUG | local_logger:write:11 - [?25l[?2004h +2025-05-10 17:23:38.222 | DEBUG | local_logger:write:11 - ┃ Enter your prompt (Type /help for commands, Ctrl+C to  +quit)  +┃  +┃  +┃  +┃  +┃  +┃  +alt+enter / ctrl+j new line • ctrl+e open editor • enter submit +2025-05-10 17:23:48.211 | ERROR | __main__:lifespan:119 - Timeout waiting for prompt +2025-05-10 17:23:48.211 | ERROR | __main__:lifespan:120 - Output so far: provider=openai model=qwen3:8b-q8_0 +[?25l[?2004h ┃ Enter your prompt (Type /help for commands, Ctrl+C to  +quit)  +┃  +┃  +┃  +┃  +┃  +┃  +alt+enter / ctrl+j new line • ctrl+e open editor • enter submit +2025-05-10 17:23:48.212 | ERROR | __main__:lifespan:143 - Failed to start MCPhost: Timeout waiting for prompt +2025-05-10 17:23:48.212 | ERROR | __main__:lifespan:145 - MCPhost final output: provider=openai model=qwen3:8b-q8_0 +[?25l[?2004h ┃ Enter your prompt (Type /help for commands, Ctrl+C to  +quit)  +┃  +┃  +┃  +┃  +┃  +┃  +alt+enter / ctrl+j new line • ctrl+e open editor • enter submit +2025-05-10 17:23:48.212 | ERROR | __main__:lifespan:146 - Process alive: True +2025-05-10 17:23:48.213 | INFO | __main__:lifespan:152 - Shutting down MCPhost... +2025-05-10 17:23:48.213 | INFO | __main__:lifespan:160 - Terminating MCPhost process... +2025-05-10 17:23:48.315 | INFO | __main__:lifespan:163 - MCPhost process stopped +2025-05-10 17:27:44.919 | INFO | __main__::375 - Starting OpenAI-compatible MCPhost API server... +2025-05-10 17:27:44.920 | INFO | __main__::376 - Configuration: +2025-05-10 17:27:44.921 | INFO | __main__::377 - MCPhost Path: ./mcphost +2025-05-10 17:27:44.921 | INFO | __main__::378 - Model: openai:qwen3:8b-q8_0 +2025-05-10 17:27:44.921 | INFO | __main__::379 - OpenAI URL: http://0.0.0.0:33740/v1 +2025-05-10 17:27:44.921 | INFO | __main__::380 - Debug: True +2025-05-10 17:27:45.074 | INFO | __main__:lifespan:75 - Starting MCPhost with command: ./mcphost --debug --config ./config.json --model openai:qwen3:8b-q8_0 --openai-url http://0.0.0.0:33740/v1 --openai-api-key anything +2025-05-10 17:27:45.074 | INFO | __main__:lifespan:76 - Environment: OLLAMA_NUM_CTX=40960 +2025-05-10 17:27:45.094 | INFO | __main__:lifespan:81 - MCPhost process spawned with PID: 4500 +2025-05-10 17:27:45.095 | INFO | __main__:lifespan:90 - Waiting for model to load... +2025-05-10 17:27:45.096 | DEBUG | local_logger:write:11 - pydev debugger: bytes arguments were passed to a new process creation function. Breakpoints may not work correctly. +2025-05-10 17:27:45.185 | DEBUG | local_logger:write:11 - ]11;?\ +2025-05-10 17:27:50.203 | DEBUG | local_logger:write:11 - ]10;?\ +2025-05-10 17:27:55.206 | DEBUG | local_logger:write:11 - ]11;?\ +2025-05-10 17:28:00.209 | DEBUG | local_logger:write:11 - 2025/05/10 17:28:00 INFO Model loaded provider=openai model=qwen3:8b-q8_0 +2025-05-10 17:28:00.210 | INFO | __main__:lifespan:98 - MCPhost model loaded successfully +2025-05-10 17:28:00.211 | INFO | __main__:lifespan:109 - Waiting for prompt... +2025-05-10 17:28:00.213 | DEBUG | local_logger:write:11 - [?25l[?2004h +2025-05-10 17:28:00.230 | DEBUG | local_logger:write:11 - ┃ Enter your prompt (Type /help for commands, Ctrl+C to  +quit)  +┃  +┃  +┃  +┃  +┃  +┃  +alt+enter / ctrl+j new line • ctrl+e open editor • enter submit +2025-05-10 17:28:00.231 | INFO | __main__:lifespan:128 - MCPhost process started and ready +2025-05-10 17:28:00.232 | INFO | __main__:monitor_process:39 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:28:00.232 | INFO | __main__:lifespan:137 - Started process monitoring thread +2025-05-10 17:28:00.234 | INFO | __main__:lifespan:141 - MCPhost process is alive and ready to accept requests +2025-05-10 17:28:05.248 | INFO | __main__:monitor_process:39 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:28:10.266 | INFO | __main__:monitor_process:39 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:28:15.281 | INFO | __main__:monitor_process:39 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:28:20.298 | INFO | __main__:monitor_process:39 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:28:23.421 | DEBUG | __main__:chat_completions:231 - Sending to MCPhost: Hello, how are you? +2025-05-10 17:28:23.580 | DEBUG | local_logger:write:11 -  +2025-05-10 17:28:23.581 | DEBUG | local_logger:write:11 - [ +2025-05-10 17:28:23.582 | DEBUG | local_logger:write:11 - 8 +2025-05-10 17:28:23.583 | DEBUG | local_logger:write:11 - A +2025-05-10 17:28:23.584 | DEBUG | local_logger:write:11 -  +2025-05-10 17:28:23.585 | DEBUG | local_logger:write:11 - [ +2025-05-10 17:28:23.585 | DEBUG | local_logger:write:11 - B +2025-05-10 17:28:23.586 | DEBUG | local_logger:write:11 -  +2025-05-10 17:28:23.587 | DEBUG | local_logger:write:11 - [ +2025-05-10 17:28:23.587 | DEBUG | local_logger:write:11 - B +2025-05-10 17:28:23.588 | DEBUG | local_logger:write:11 - ┃ +2025-05-10 17:28:23.589 | DEBUG | local_logger:write:11 - H +2025-05-10 17:28:23.589 | DEBUG | local_logger:write:11 - e +2025-05-10 17:28:23.590 | DEBUG | local_logger:write:11 - l +2025-05-10 17:28:23.590 | DEBUG | local_logger:write:11 - l +2025-05-10 17:28:23.591 | DEBUG | local_logger:write:11 - o +2025-05-10 17:28:23.592 | DEBUG | local_logger:write:11 - , +2025-05-10 17:28:23.593 | DEBUG | local_logger:write:11 -  +2025-05-10 17:28:23.594 | DEBUG | local_logger:write:11 - [ +2025-05-10 17:28:23.595 | DEBUG | local_logger:write:11 - 0 +2025-05-10 17:28:23.595 | DEBUG | local_logger:write:11 - K +2025-05-10 17:28:23.596 | DEBUG | local_logger:write:11 -  +2025-05-10 17:28:23.596 | DEBUG | local_logger:write:11 - [ +2025-05-10 17:28:23.597 | DEBUG | local_logger:write:11 - B +2025-05-10 17:28:23.598 | DEBUG | local_logger:write:11 -  +2025-05-10 17:28:23.598 | DEBUG | local_logger:write:11 - [ +2025-05-10 17:28:23.599 | DEBUG | local_logger:write:11 - B +2025-05-10 17:28:23.600 | DEBUG | local_logger:write:11 -  +2025-05-10 17:28:23.600 | DEBUG | local_logger:write:11 - [ +2025-05-10 17:28:23.601 | DEBUG | local_logger:write:11 - B +2025-05-10 17:28:23.602 | DEBUG | local_logger:write:11 -  +2025-05-10 17:28:23.602 | DEBUG | local_logger:write:11 - [ +2025-05-10 17:28:23.603 | DEBUG | local_logger:write:11 - B +2025-05-10 17:28:23.603 | DEBUG | local_logger:write:11 -  +2025-05-10 17:28:23.604 | DEBUG | local_logger:write:11 - [ +2025-05-10 17:28:23.604 | DEBUG | local_logger:write:11 - B +2025-05-10 17:28:23.604 | DEBUG | local_logger:write:11 -  +2025-05-10 17:28:23.605 | DEBUG | local_logger:write:11 - [ +2025-05-10 17:28:23.605 | DEBUG | local_logger:write:11 - 8 +2025-05-10 17:28:23.605 | DEBUG | local_logger:write:11 - 0 +2025-05-10 17:28:23.606 | DEBUG | local_logger:write:11 - D +2025-05-10 17:28:23.606 | DEBUG | local_logger:write:11 -  +2025-05-10 17:28:23.606 | DEBUG | local_logger:write:11 - [ +2025-05-10 17:28:23.607 | DEBUG | local_logger:write:11 - 8 +2025-05-10 17:28:23.607 | DEBUG | local_logger:write:11 - A +2025-05-10 17:28:23.608 | DEBUG | local_logger:write:11 -  +2025-05-10 17:28:23.608 | DEBUG | local_logger:write:11 - [ +2025-05-10 17:28:23.609 | DEBUG | local_logger:write:11 - B +2025-05-10 17:28:23.609 | DEBUG | local_logger:write:11 -  +2025-05-10 17:28:23.610 | DEBUG | local_logger:write:11 - [ +2025-05-10 17:28:23.610 | DEBUG | local_logger:write:11 - B +2025-05-10 17:28:23.611 | DEBUG | local_logger:write:11 - ┃ +2025-05-10 17:28:23.611 | DEBUG | local_logger:write:11 - H +2025-05-10 17:28:23.612 | DEBUG | local_logger:write:11 - e +2025-05-10 17:28:23.612 | DEBUG | local_logger:write:11 - l +2025-05-10 17:28:23.612 | DEBUG | local_logger:write:11 - l +2025-05-10 17:28:23.613 | DEBUG | local_logger:write:11 - o +2025-05-10 17:28:23.613 | DEBUG | local_logger:write:11 - , +2025-05-10 17:28:23.613 | DEBUG | local_logger:write:11 - h +2025-05-10 17:28:23.614 | DEBUG | local_logger:write:11 - o +2025-05-10 17:28:23.614 | DEBUG | local_logger:write:11 - w +2025-05-10 17:28:23.615 | DEBUG | local_logger:write:11 - a +2025-05-10 17:28:23.615 | DEBUG | local_logger:write:11 - r +2025-05-10 17:28:23.616 | DEBUG | local_logger:write:11 - e +2025-05-10 17:28:23.616 | DEBUG | local_logger:write:11 - y +2025-05-10 17:28:23.617 | DEBUG | local_logger:write:11 - o +2025-05-10 17:28:23.617 | DEBUG | local_logger:write:11 - u +2025-05-10 17:28:23.617 | DEBUG | local_logger:write:11 - ? +2025-05-10 17:28:23.619 | DEBUG | local_logger:write:11 -  +2025-05-10 17:28:23.619 | DEBUG | local_logger:write:11 - [ +2025-05-10 17:28:23.619 | DEBUG | local_logger:write:11 - 0 +2025-05-10 17:28:23.620 | DEBUG | local_logger:write:11 - K +2025-05-10 17:28:23.620 | DEBUG | local_logger:write:11 -  +2025-05-10 17:28:23.620 | DEBUG | local_logger:write:11 - [ +2025-05-10 17:28:23.621 | DEBUG | local_logger:write:11 - B +2025-05-10 17:28:23.621 | DEBUG | local_logger:write:11 -  +2025-05-10 17:28:23.621 | DEBUG | local_logger:write:11 - [ +2025-05-10 17:28:23.622 | DEBUG | local_logger:write:11 - B +2025-05-10 17:28:23.622 | DEBUG | local_logger:write:11 -  +2025-05-10 17:28:23.622 | DEBUG | local_logger:write:11 - [ +2025-05-10 17:28:23.623 | DEBUG | local_logger:write:11 - B +2025-05-10 17:28:23.623 | DEBUG | local_logger:write:11 -  +2025-05-10 17:28:23.623 | DEBUG | local_logger:write:11 - [ +2025-05-10 17:28:23.624 | DEBUG | local_logger:write:11 - B +2025-05-10 17:28:23.624 | DEBUG | local_logger:write:11 -  +2025-05-10 17:28:23.624 | DEBUG | local_logger:write:11 - [ +2025-05-10 17:28:23.625 | DEBUG | local_logger:write:11 - B +2025-05-10 17:28:23.625 | DEBUG | local_logger:write:11 -  +2025-05-10 17:28:23.625 | DEBUG | local_logger:write:11 - [ +2025-05-10 17:28:23.626 | DEBUG | local_logger:write:11 - 8 +2025-05-10 17:28:23.626 | DEBUG | local_logger:write:11 - 0 +2025-05-10 17:28:23.626 | DEBUG | local_logger:write:11 - D +2025-05-10 17:28:25.314 | INFO | __main__:monitor_process:39 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:28:30.327 | INFO | __main__:monitor_process:39 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:28:35.340 | INFO | __main__:monitor_process:39 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:28:40.356 | INFO | __main__:monitor_process:39 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:28:45.373 | INFO | __main__:monitor_process:39 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:28:50.390 | INFO | __main__:monitor_process:39 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:28:53.604 | DEBUG | __main__:chat_completions:299 - MCPhost response: ┃ Hello,  ┃ Hello, how are you?   +2025-05-10 17:28:55.406 | INFO | __main__:monitor_process:39 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:29:00.422 | INFO | __main__:monitor_process:39 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:29:05.443 | INFO | __main__:monitor_process:39 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:29:10.464 | INFO | __main__:monitor_process:39 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:29:15.481 | INFO | __main__:monitor_process:39 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:29:20.499 | INFO | __main__:monitor_process:39 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:29:25.519 | INFO | __main__:monitor_process:39 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:29:30.539 | INFO | __main__:monitor_process:39 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:29:35.558 | INFO | __main__:monitor_process:39 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:29:40.577 | INFO | __main__:monitor_process:39 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:29:45.597 | INFO | __main__:monitor_process:39 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:29:50.617 | INFO | __main__:monitor_process:39 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:29:55.634 | INFO | __main__:monitor_process:39 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:30:00.653 | INFO | __main__:monitor_process:39 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:30:03.910 | INFO | __main__:lifespan:158 - Shutting down MCPhost... +2025-05-10 17:30:04.668 | INFO | __main__:lifespan:166 - Terminating MCPhost process... +2025-05-10 17:30:05.273 | INFO | __main__:lifespan:169 - MCPhost process stopped +2025-05-10 17:31:20.410 | INFO | __main__::404 - Starting OpenAI-compatible MCPhost API server... +2025-05-10 17:31:20.411 | INFO | __main__::405 - Configuration: +2025-05-10 17:31:20.411 | INFO | __main__::406 - MCPhost Path: ./mcphost +2025-05-10 17:31:20.411 | INFO | __main__::407 - Model: openai:qwen3:8b-q8_0 +2025-05-10 17:31:20.412 | INFO | __main__::408 - OpenAI URL: http://0.0.0.0:33740/v1 +2025-05-10 17:31:20.412 | INFO | __main__::409 - Debug: True +2025-05-10 17:31:20.550 | INFO | __main__:lifespan:75 - Starting MCPhost with command: ./mcphost --debug --config ./config.json --model openai:qwen3:8b-q8_0 --openai-url http://0.0.0.0:33740/v1 --openai-api-key anything +2025-05-10 17:31:20.551 | INFO | __main__:lifespan:76 - Environment: OLLAMA_NUM_CTX=40960 +2025-05-10 17:31:20.574 | INFO | __main__:lifespan:81 - MCPhost process spawned with PID: 4603 +2025-05-10 17:31:20.574 | INFO | __main__:lifespan:90 - Waiting for model to load... +2025-05-10 17:31:20.575 | DEBUG | helpers.local_logger:write:11 - pydev debugger: bytes arguments were passed to a new process creation function. Breakpoints may not work correctly. +2025-05-10 17:31:20.678 | DEBUG | helpers.local_logger:write:11 - ]11;?\ +2025-05-10 17:31:25.705 | DEBUG | helpers.local_logger:write:11 - ]10;?\ +2025-05-10 17:31:30.709 | DEBUG | helpers.local_logger:write:11 - ]11;?\ +2025-05-10 17:31:35.713 | DEBUG | helpers.local_logger:write:11 - 2025/05/10 17:31:35 INFO Model loaded provider=openai model=qwen3:8b-q8_0 +2025-05-10 17:31:35.714 | INFO | __main__:lifespan:98 - MCPhost model loaded successfully +2025-05-10 17:31:35.715 | INFO | __main__:lifespan:109 - Waiting for prompt... +2025-05-10 17:31:35.718 | DEBUG | helpers.local_logger:write:11 - [?25l[?2004h +2025-05-10 17:31:35.735 | DEBUG | helpers.local_logger:write:11 - ┃ Enter your prompt (Type /help for commands, Ctrl+C to  +quit)  +┃  +┃  +┃  +┃  +┃  +┃  +alt+enter / ctrl+j new line • ctrl+e open editor • enter submit +2025-05-10 17:31:35.735 | INFO | __main__:lifespan:128 - MCPhost process started and ready +2025-05-10 17:31:35.736 | INFO | __main__:lifespan:137 - Started process monitoring thread +2025-05-10 17:31:35.736 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:31:35.737 | INFO | __main__:lifespan:141 - MCPhost process is alive and ready to accept requests +2025-05-10 17:31:40.752 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:31:45.770 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:31:50.789 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:31:55.321 | DEBUG | __main__:chat_completions:233 - Sending to MCPhost: Hello, how are you? +2025-05-10 17:31:55.485 | DEBUG | helpers.local_logger:write:11 - ┃ Hello, how are you?  + +2025-05-10 17:31:55.808 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:31:56.641 | DEBUG | __main__:chat_completions:325 - MCPhost response: Hello, how are you? +2025-05-10 17:32:00.824 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:32:05.837 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:32:10.849 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:32:15.857 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:32:20.875 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:32:25.893 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:32:27.241 | INFO | __main__:lifespan:158 - Shutting down MCPhost... +2025-05-10 17:32:27.900 | INFO | __main__:lifespan:166 - Terminating MCPhost process... +2025-05-10 17:32:28.505 | INFO | __main__:lifespan:169 - MCPhost process stopped +2025-05-10 17:32:32.580 | INFO | __main__::404 - Starting OpenAI-compatible MCPhost API server... +2025-05-10 17:32:32.580 | INFO | __main__::405 - Configuration: +2025-05-10 17:32:32.580 | INFO | __main__::406 - MCPhost Path: ../mcphost +2025-05-10 17:32:32.580 | INFO | __main__::407 - Model: openai:qwen3:8b-q8_0 +2025-05-10 17:32:32.581 | INFO | __main__::408 - OpenAI URL: http://10.8.0.10:33740/v1 +2025-05-10 17:32:32.581 | INFO | __main__::409 - Debug: True +2025-05-10 17:32:32.745 | INFO | __main__:lifespan:75 - Starting MCPhost with command: ../mcphost --debug --config ./config.json --model openai:qwen3:8b-q8_0 --openai-url http://10.8.0.10:33740/v1 --openai-api-key anything +2025-05-10 17:32:32.745 | INFO | __main__:lifespan:76 - Environment: OLLAMA_NUM_CTX=40960 +2025-05-10 17:32:32.751 | ERROR | __main__:lifespan:83 - Failed to spawn MCPhost process: The command was not found or was not executable: ../mcphost. +2025-05-10 17:32:32.751 | ERROR | __main__:lifespan:149 - Failed to start MCPhost: The command was not found or was not executable: ../mcphost. +2025-05-10 17:32:32.752 | INFO | __main__:lifespan:158 - Shutting down MCPhost... +2025-05-10 17:32:43.168 | INFO | __main__::404 - Starting OpenAI-compatible MCPhost API server... +2025-05-10 17:32:43.168 | INFO | __main__::405 - Configuration: +2025-05-10 17:32:43.169 | INFO | __main__::406 - MCPhost Path: ./mcphost +2025-05-10 17:32:43.169 | INFO | __main__::407 - Model: openai:qwen3:8b-q8_0 +2025-05-10 17:32:43.169 | INFO | __main__::408 - OpenAI URL: http://10.8.0.10:33740/v1 +2025-05-10 17:32:43.169 | INFO | __main__::409 - Debug: True +2025-05-10 17:32:43.293 | INFO | __main__:lifespan:75 - Starting MCPhost with command: ./mcphost --debug --config ./config.json --model openai:qwen3:8b-q8_0 --openai-url http://10.8.0.10:33740/v1 --openai-api-key anything +2025-05-10 17:32:43.293 | INFO | __main__:lifespan:76 - Environment: OLLAMA_NUM_CTX=40960 +2025-05-10 17:32:43.313 | INFO | __main__:lifespan:81 - MCPhost process spawned with PID: 4681 +2025-05-10 17:32:43.314 | INFO | __main__:lifespan:90 - Waiting for model to load... +2025-05-10 17:32:43.315 | DEBUG | helpers.local_logger:write:11 - pydev debugger: bytes arguments were passed to a new process creation function. Breakpoints may not work correctly. +2025-05-10 17:32:43.349 | DEBUG | helpers.local_logger:write:11 - ]11;?\ +2025-05-10 17:32:48.357 | DEBUG | helpers.local_logger:write:11 - ]10;?\ +2025-05-10 17:32:53.358 | DEBUG | helpers.local_logger:write:11 - ]11;?\ +2025-05-10 17:32:58.364 | DEBUG | helpers.local_logger:write:11 - 2025/05/10 17:32:58 INFO Model loaded provider=openai model=qwen3:8b-q8_0 +2025-05-10 17:32:58.364 | INFO | __main__:lifespan:98 - MCPhost model loaded successfully +2025-05-10 17:32:58.365 | INFO | __main__:lifespan:109 - Waiting for prompt... +2025-05-10 17:32:58.366 | DEBUG | helpers.local_logger:write:11 - [?25l[?2004h +2025-05-10 17:32:58.383 | DEBUG | helpers.local_logger:write:11 - ┃ Enter your prompt (Type /help for commands, Ctrl+C to  +quit)  +┃  +┃  +┃  +┃  +┃  +┃  +alt+enter / ctrl+j new line • ctrl+e open editor • enter submit +2025-05-10 17:32:58.384 | INFO | __main__:lifespan:128 - MCPhost process started and ready +2025-05-10 17:32:58.385 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:32:58.386 | INFO | __main__:lifespan:137 - Started process monitoring thread +2025-05-10 17:32:58.386 | INFO | __main__:lifespan:141 - MCPhost process is alive and ready to accept requests +2025-05-10 17:33:03.402 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:33:08.050 | DEBUG | __main__:chat_completions:233 - Sending to MCPhost: Hello, how are you? +2025-05-10 17:33:08.216 | DEBUG | helpers.local_logger:write:11 - ┃ Hello, how are you?  + +2025-05-10 17:33:08.420 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:33:09.367 | DEBUG | __main__:chat_completions:325 - MCPhost response: Hello, how are you? +2025-05-10 17:33:13.435 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:33:18.455 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:33:23.476 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:33:28.487 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:33:33.502 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:33:36.958 | DEBUG | __main__:chat_completions:233 - Sending to MCPhost: Tell me a joke. +2025-05-10 17:33:37.116 | DEBUG | helpers.local_logger:write:11 - ┃ Tell me a joke.  + +2025-05-10 17:33:38.258 | DEBUG | __main__:chat_completions:325 - MCPhost response: Tell me a joke. +2025-05-10 17:33:38.518 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:33:43.534 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:33:48.550 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:33:50.324 | DEBUG | __main__:chat_completions:233 - Sending to MCPhost: Tell me a joke. +2025-05-10 17:33:50.483 | DEBUG | helpers.local_logger:write:11 - ┃ Tell me a joke.  + +2025-05-10 17:33:51.627 | DEBUG | __main__:chat_completions:325 - MCPhost response: Tell me a joke. +2025-05-10 17:33:53.570 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:33:58.586 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:34:03.604 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:34:08.610 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:34:13.621 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:34:18.644 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:34:23.665 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:34:28.681 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:34:33.700 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:34:38.717 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:34:42.098 | INFO | __main__:lifespan:158 - Shutting down MCPhost... +2025-05-10 17:34:42.734 | INFO | __main__:lifespan:166 - Terminating MCPhost process... +2025-05-10 17:34:43.337 | INFO | __main__:lifespan:169 - MCPhost process stopped +2025-05-10 17:34:50.679 | INFO | __main__::404 - Starting OpenAI-compatible MCPhost API server... +2025-05-10 17:34:50.679 | INFO | __main__::405 - Configuration: +2025-05-10 17:34:50.679 | INFO | __main__::406 - MCPhost Path: ./mcphost +2025-05-10 17:34:50.679 | INFO | __main__::407 - Model: openai:qwen3:8b-q8_0 +2025-05-10 17:34:50.679 | INFO | __main__::408 - OpenAI URL: http://10.8.0.10:33740/v1 +2025-05-10 17:34:50.680 | INFO | __main__::409 - Debug: True +2025-05-10 17:34:50.814 | INFO | __main__:lifespan:75 - Starting MCPhost with command: ./mcphost --debug --config ./config.json --model openai:qwen3:8b-q8_0 --openai-url http://10.8.0.10:33740/v1 --openai-api-key anything +2025-05-10 17:34:50.814 | INFO | __main__:lifespan:76 - Environment: OLLAMA_NUM_CTX=40960 +2025-05-10 17:34:50.834 | INFO | __main__:lifespan:81 - MCPhost process spawned with PID: 4756 +2025-05-10 17:34:50.834 | INFO | __main__:lifespan:90 - Waiting for model to load... +2025-05-10 17:34:50.835 | DEBUG | helpers.local_logger:write:11 - pydev debugger: bytes arguments were passed to a new process creation function. Breakpoints may not work correctly. +2025-05-10 17:34:50.870 | DEBUG | helpers.local_logger:write:11 - ]11;?\ +2025-05-10 17:34:55.879 | DEBUG | helpers.local_logger:write:11 - ]10;?\ +2025-05-10 17:35:00.884 | DEBUG | helpers.local_logger:write:11 - ]11;?\ +2025-05-10 17:35:05.887 | DEBUG | helpers.local_logger:write:11 - 2025/05/10 17:35:05 INFO Model loaded provider=openai model=qwen3:8b-q8_0 +2025-05-10 17:35:05.887 | INFO | __main__:lifespan:98 - MCPhost model loaded successfully +2025-05-10 17:35:05.888 | INFO | __main__:lifespan:109 - Waiting for prompt... +2025-05-10 17:35:05.888 | DEBUG | helpers.local_logger:write:11 - [?25l[?2004h +2025-05-10 17:35:05.905 | DEBUG | helpers.local_logger:write:11 - ┃ Enter your prompt (Type /help for commands, Ctrl+C to  +quit)  +┃  +┃  +┃  +┃  +┃  +┃  +alt+enter / ctrl+j new line • ctrl+e open editor • enter submit +2025-05-10 17:35:05.906 | INFO | __main__:lifespan:128 - MCPhost process started and ready +2025-05-10 17:35:05.907 | INFO | __main__:lifespan:137 - Started process monitoring thread +2025-05-10 17:35:05.907 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:35:05.907 | INFO | __main__:lifespan:141 - MCPhost process is alive and ready to accept requests +2025-05-10 17:35:10.911 | INFO | __main__:monitor_process:38 - MCPhost process status - Alive: True, Exit status: None +2025-05-10 17:35:11.348 | INFO | __main__:lifespan:158 - Shutting down MCPhost... +2025-05-10 17:35:11.917 | INFO | __main__:lifespan:166 - Terminating MCPhost process... +2025-05-10 17:35:12.521 | INFO | __main__:lifespan:169 - MCPhost process stopped diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..7b4f287 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,20 @@ +annotated-types==0.7.0 +anyio==4.9.0 +click==8.1.8 +fastapi==0.115.12 +fsspec==2024.6.1 +h11==0.16.0 +idna==3.10 +loguru==0.7.3 +pexpect==4.9.0 +ptyprocess==0.7.0 +pydantic==2.11.4 +pydantic-settings==2.9.1 +pydantic_core==2.33.2 +python-dotenv==1.1.0 +sniffio==1.3.1 +starlette==0.46.2 +tqdm==4.66.4 +typing-inspection==0.4.0 +typing_extensions==4.12.2 +uvicorn==0.34.2 \ No newline at end of file diff --git a/serve_openai_compatible.py b/serve_openai_compatible.py new file mode 100644 index 0000000..e9e74fa --- /dev/null +++ b/serve_openai_compatible.py @@ -0,0 +1,411 @@ +#!/usr/bin/env python3 +""" +OpenAI-compatible MCPhost API wrapper with enhanced debugging +Usage: python mcphost_openai_api.py +""" + +import json +import time +import asyncio +import os +from typing import Optional +from contextlib import asynccontextmanager +import threading + +from fastapi import FastAPI, HTTPException +from starlette.responses import StreamingResponse +import uvicorn +from loguru import logger +import pexpect +import pyte + +from helpers.settings import settings +from helpers.models import ChatCompletionRequest +from helpers.local_logger import LoggerWriter + +# Global process variable +mcp_process: Optional[pexpect.spawn] = None +process_monitor_thread = None +stop_monitoring = threading.Event() + + +def monitor_process(): + """Background thread to monitor MCPhost process status""" + while not stop_monitoring.is_set(): + if mcp_process: + is_alive = mcp_process.isalive() + exit_status = mcp_process.exitstatus if not is_alive else None + logger.info(f"MCPhost process status - Alive: {is_alive}, Exit status: {exit_status}") + + if not is_alive and exit_status is not None: + logger.error(f"MCPhost process died with exit code: {exit_status}") + else: + logger.warning("MCPhost process is None") + + # Wait 5 seconds before next check + for _ in range(5): + if stop_monitoring.is_set(): + break + time.sleep(1) + + +@asynccontextmanager +async def lifespan(app: FastAPI): + # Startup: Start MCPhost subprocess + global mcp_process, process_monitor_thread + + try: + # Build command + cmd = [settings.mcphost_path] + + if settings.debug: + cmd.append("--debug") + + cmd.extend([ + "--config", settings.mcphost_config, + "--model", settings.mcphost_model, + "--openai-url", settings.openai_url, + "--openai-api-key", settings.openai_api_key + ]) + + # Set environment + env = os.environ.copy() + env["OLLAMA_NUM_CTX"] = str(settings.ollama_num_ctx) + + logger.info(f"Starting MCPhost with command: {' '.join(cmd)}") + logger.info(f"Environment: OLLAMA_NUM_CTX={settings.ollama_num_ctx}") + + # Use pexpect to spawn MCPhost in interactive mode + try: + mcp_process = pexpect.spawn(" ".join(cmd), timeout=30, encoding='utf-8', env=env) + logger.info(f"MCPhost process spawned with PID: {mcp_process.pid}") + except Exception as spawn_error: + logger.error(f"Failed to spawn MCPhost process: {spawn_error}") + raise + + # Enable detailed logging + mcp_process.logfile_read = LoggerWriter(logger.debug) + + # Wait for model to load + logger.info("Waiting for model to load...") + index = mcp_process.expect([ + "Model loaded", + pexpect.TIMEOUT, + pexpect.EOF + ], timeout=30) + + if index == 0: + logger.info("MCPhost model loaded successfully") + elif index == 1: + logger.error("Timeout waiting for model to load") + logger.error(f"Output so far: {mcp_process.before}") + raise RuntimeError("Timeout waiting for model to load") + elif index == 2: + logger.error("MCPhost process ended unexpectedly") + logger.error(f"Output: {mcp_process.before}") + raise RuntimeError("MCPhost process ended unexpectedly") + + # Wait for prompt interface + logger.info("Waiting for prompt...") + + # Read output until we see the prompt interface + seen_prompt = False + start_time = time.time() + + while time.time() - start_time < 10: # 10 second timeout + try: + output = mcp_process.read_nonblocking(size=1000, timeout=0.5) + if "Enter your prompt" in output or "┃" in output: + seen_prompt = True + break + except pexpect.TIMEOUT: + continue + except pexpect.EOF: + logger.error("MCPhost process ended unexpectedly") + raise RuntimeError("MCPhost process ended unexpectedly") + + if seen_prompt: + logger.info("MCPhost process started and ready") + else: + logger.error("Timeout waiting for prompt") + raise RuntimeError("Timeout waiting for prompt") + + # Start process monitoring thread + stop_monitoring.clear() + process_monitor_thread = threading.Thread(target=monitor_process, daemon=True) + process_monitor_thread.start() + logger.info("Started process monitoring thread") + + # Final check + if mcp_process.isalive(): + logger.info("MCPhost process is alive and ready to accept requests") + else: + logger.error("MCPhost process is not alive after startup") + raise RuntimeError("MCPhost process died during startup") + + yield + + except Exception as e: + logger.error(f"Failed to start MCPhost: {e}") + if mcp_process: + logger.error(f"MCPhost final output: {mcp_process.before}") + logger.error(f"Process alive: {mcp_process.isalive()}") + if not mcp_process.isalive(): + logger.error(f"Exit status: {mcp_process.exitstatus}") + raise + finally: + # Shutdown: Clean up subprocess + logger.info("Shutting down MCPhost...") + stop_monitoring.set() + + if process_monitor_thread: + process_monitor_thread.join(timeout=2) + + if mcp_process: + if mcp_process.isalive(): + logger.info("Terminating MCPhost process...") + mcp_process.terminate() + mcp_process.wait() + logger.info("MCPhost process stopped") + + +# Create FastAPI app +app = FastAPI(title="OpenAI-compatible MCPhost API", lifespan=lifespan) + + +async def _resp_async_generator(response_text: str, model: str): + """Generator for streaming responses in OpenAI format""" + tokens = response_text.split(" ") + + for i, token in enumerate(tokens): + chunk = { + "id": f"chatcmpl-{int(time.time())}-{i}", + "object": "chat.completion.chunk", + "created": int(time.time()), + "model": model, + "choices": [{ + "index": 0, + "delta": {"content": token + " "}, + "finish_reason": None + }] + } + yield f"data: {json.dumps(chunk)}\n\n" + await asyncio.sleep(0.01) + + final_chunk = { + "id": f"chatcmpl-{int(time.time())}-final", + "object": "chat.completion.chunk", + "created": int(time.time()), + "model": model, + "choices": [{ + "index": 0, + "delta": {}, + "finish_reason": "stop" + }] + } + yield f"data: {json.dumps(final_chunk)}\n\n" + yield "data: [DONE]\n\n" + + +@app.post("/v1/chat/completions") +@app.post("/chat/completions") +async def chat_completions(request: ChatCompletionRequest): + """OpenAI-compatible chat completions endpoint""" + if not mcp_process: + logger.error("MCPhost process object is None") + raise HTTPException(status_code=500, detail="MCPhost process not initialized") + + if not mcp_process.isalive(): + logger.error(f"MCPhost process not running. Exit status: {mcp_process.exitstatus}") + raise HTTPException(status_code=500, detail="MCPhost process not running") + + try: + # Extract the last user message + user_message = "" + for message in reversed(request.messages): + if message.role == "user": + user_message = message.content + break + + if not user_message: + user_message = request.messages[-1].content if request.messages else "" + + logger.debug(f"Sending to MCPhost: {user_message}") + + # Clear any pending output (non-blocking read) + try: + mcp_process.read_nonblocking(size=1000, timeout=0.1) + except: + pass + + # Send message to MCPhost + mcp_process.sendline(user_message) + + # Create virtual terminal to parse output + screen = pyte.Screen(80, 24) + stream = pyte.ByteStream(screen) + + # Wait for the response + response_text = "" + last_screen_content = "" + + try: + start_time = time.time() + no_change_count = 0 + + while time.time() - start_time < 30: # 30 second timeout + try: + # Read available data + data = mcp_process.read_nonblocking(size=1024, timeout=0.1) + + # Feed data to virtual terminal + stream.feed(data.encode('utf-8')) + + # Get current screen content + current_screen = '\n'.join(screen.display).strip() + + # Check if screen content has changed + if current_screen == last_screen_content: + no_change_count += 1 + if no_change_count > 10: # No change for 1 second + break + else: + no_change_count = 0 + last_screen_content = current_screen + + # Extract response text from screen + lines = current_screen.split('\n') + response_lines = [] + + for line in lines: + # Skip empty lines and prompt-related lines + if line.strip() and not "Enter your prompt" in line and not line.strip().startswith( + "alt+enter"): + # Remove the prompt box character if present + if line.startswith("┃"): + clean_line = line[1:].strip() + if clean_line and not "Enter your prompt" in clean_line: + response_lines.append(clean_line) + else: + response_lines.append(line.strip()) + + # Update response text + response_text = ' '.join(response_lines) + + except pexpect.TIMEOUT: + # If we haven't received anything yet, continue waiting + if not response_text: + continue + else: + no_change_count += 1 + if no_change_count > 10: + break + except pexpect.EOF: + logger.error("MCPhost process ended unexpectedly") + break + + except Exception as e: + logger.warning(f"Error reading response: {e}") + + response_text = response_text.strip() + + # Clean up the response text - remove any duplicates or artifacts + if response_text: + # Sometimes the terminal might show duplicated content + words = response_text.split() + clean_words = [] + for i, word in enumerate(words): + if i == 0 or word != words[i - 1]: + clean_words.append(word) + response_text = ' '.join(clean_words) + + if not response_text: + response_text = "No response received from MCPhost" + + logger.debug(f"MCPhost response: {response_text}") + + # Handle streaming response + if request.stream: + return StreamingResponse( + _resp_async_generator(response_text, request.model), + media_type="text/event-stream" + ) + + # Non-streaming response + return { + "id": f"chatcmpl-{int(time.time())}", + "object": "chat.completion", + "created": int(time.time()), + "model": request.model, + "choices": [{ + "index": 0, + "message": { + "role": "assistant", + "content": response_text + }, + "finish_reason": "stop" + }], + "usage": { + "prompt_tokens": len(user_message.split()), + "completion_tokens": len(response_text.split()), + "total_tokens": len(user_message.split()) + len(response_text.split()) + } + } + + except Exception as e: + logger.exception(f"Error in chat completion") + raise HTTPException(status_code=500, detail=str(e)) + + +@app.get("/v1/models") +@app.get("/models") +async def list_models(): + """List available models (OpenAI-compatible endpoint)""" + return { + "object": "list", + "data": [{ + "id": settings.mcphost_model, + "object": "model", + "created": int(time.time()), + "owned_by": "mcphost", + "permission": [], + "root": settings.mcphost_model, + "parent": None + }] + } + + +@app.get("/health") +async def health_check(): + """Health check endpoint""" + if not mcp_process: + logger.warning("Health check: MCPhost process is None") + return {"status": "unhealthy", "detail": "Process not initialized"} + + is_healthy = mcp_process.isalive() + status = "healthy" if is_healthy else "unhealthy" + detail = {"pid": mcp_process.pid if mcp_process else None} + + if not is_healthy and mcp_process: + detail["exit_status"] = mcp_process.exitstatus + + logger.info(f"Health check: {status}, details: {detail}") + return {"status": status, "detail": detail} + + +if __name__ == "__main__": + logger.add( + "mcphost_openai_api.log", + rotation="10 MB", + retention="10 days", + level="DEBUG" + ) + + logger.info("Starting OpenAI-compatible MCPhost API server...") + logger.info(f"Configuration:") + logger.info(f" MCPhost Path: {settings.mcphost_path}") + logger.info(f" Model: {settings.mcphost_model}") + logger.info(f" OpenAI URL: {settings.openai_url}") + logger.info(f" Debug: {settings.debug}") + + uvicorn.run(app, host=settings.host, port=settings.port, log_config=None) \ No newline at end of file diff --git a/test.sh b/test.sh new file mode 100755 index 0000000..bae48aa --- /dev/null +++ b/test.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +curl -X POST http://0.0.0.0:8000/v1/chat/completions -H "Content-Type: application/json" -H "Authorization: Bearer fake-api-key" -d '{ + "model": "mcphost-model", + "messages": [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Tell me a joke."} + ], + "temperature": 0.7 + }'