RoleMesh-Gateway/tests/test_config_loading.py

104 lines
3.5 KiB
Python

from __future__ import annotations
from pathlib import Path
import yaml
from rolemesh_gateway.config import load_config as load_gateway_config
from rolemesh_node_agent.config import load_config as load_node_config
def test_gateway_config_override_merges_nested_mappings(tmp_path: Path):
base = tmp_path / "models.yaml"
override = tmp_path / "models.local.yaml"
base.write_text(
yaml.safe_dump(
{
"version": 1,
"default_model": "writer",
"auth": {
"client_api_keys": ["base-client-key"],
"node_api_keys": ["base-node-key"],
},
"models": {
"writer": {
"type": "proxy",
"openai_model_name": "writer",
"proxy_url": "http://127.0.0.1:8012",
"defaults": {"temperature": 0.6, "max_tokens": 256},
}
},
}
)
)
override.write_text(
yaml.safe_dump(
{
"auth": {
"client_api_keys": ["local-client-key"],
},
"models": {
"writer": {
"proxy_url": "http://192.168.1.50:8012",
"defaults": {"temperature": 0.2},
}
},
}
)
)
cfg = load_gateway_config(base, override_path=override)
assert cfg.auth.client_api_keys == ["local-client-key"]
assert cfg.auth.node_api_keys == ["base-node-key"]
assert str(cfg.models["writer"].proxy_url) == "http://192.168.1.50:8012/"
assert cfg.models["writer"].defaults == {"temperature": 0.2, "max_tokens": 256}
def test_node_agent_config_override_replaces_local_machine_paths(tmp_path: Path):
base = tmp_path / "node_agent.yaml"
override = tmp_path / "node_agent.local.yaml"
base.write_text(
yaml.safe_dump(
{
"node_id": "node-generic",
"listen_host": "0.0.0.0",
"listen_port": 8091,
"dispatcher_base_url": "http://10.0.0.10:8080",
"llama_server_bin": "/path/to/llama-server",
"model_roots": ["/path/to/model-weights"],
"models": [
{
"model_id": "qwen3-8b",
"path": "/path/to/model-weights/Qwen3-8B-Q5_K_M.gguf",
"roles": ["tutor"],
}
],
}
)
)
override.write_text(
yaml.safe_dump(
{
"listen_host": "192.168.1.101",
"llama_server_bin": "/home/netuser/bin/llama.cpp/build/bin/llama-server",
"model_roots": ["/home/netuser/bin/models/llm"],
"models": [
{
"model_id": "qwen3-8b",
"path": "/home/netuser/bin/models/llm/Qwen3-8B-Q5_K_M.gguf",
"roles": ["tutor", "mentor"],
}
],
}
)
)
cfg = load_node_config(base, override_path=override)
assert cfg.listen_host == "192.168.1.101"
assert cfg.llama_server_bin == "/home/netuser/bin/llama.cpp/build/bin/llama-server"
assert [str(path) for path in cfg.model_roots] == ["/home/netuser/bin/models/llm"]
assert str(cfg.models[0].path) == "/home/netuser/bin/models/llm/Qwen3-8B-Q5_K_M.gguf"
assert cfg.models[0].roles == ["tutor", "mentor"]