Clarifying roles for use with RoleMesh Gateway.

This commit is contained in:
welsberr 2026-03-16 17:17:39 -04:00
parent 0755890faf
commit 51dd2b29ff
8 changed files with 249 additions and 25 deletions

View File

@ -17,9 +17,10 @@ That means Didactopus can keep a simple provider abstraction while delegating mo
## Recommended architecture
1. Run RoleMesh Gateway as the OpenAI-compatible front door.
2. Point RoleMesh roles at local backends or discovered node agents.
2. Expose whatever model aliases or upstream routes you want from RoleMesh Gateway.
3. Configure Didactopus to use the `rolemesh` model provider.
4. Let Didactopus send mentor/practice/project-advisor/evaluator requests by role.
4. Map Didactopus roles to RoleMesh model aliases in `role_to_model`.
5. Let Didactopus send role-specific requests while RoleMesh handles the actual model routing.
## Didactopus-side config
@ -33,26 +34,86 @@ The important fields are:
- `model_provider.rolemesh.default_model`
- `model_provider.rolemesh.role_to_model`
## Suggested role mapping
## Canonical Didactopus roles
With the sample RoleMesh gateway config, this is a good default mapping:
Didactopus now defines its own role set in code. RoleMesh is expected to serve those roles by alias mapping rather than by imposing its own role vocabulary.
Current canonical roles:
- `mentor -> planner`
- `learner -> writer`
- `practice -> writer`
- `project_advisor -> planner`
- `evaluator -> reviewer`
This keeps Didactopus prompts aligned with the role semantics RoleMesh already exposes.
These are the default RoleMesh alias values in the example config, not required gateway role names.
The Didactopus role meanings are:
- `mentor`: sequencing, hints, conceptual framing, and prerequisite guidance
- `learner`: learner-side reflection or transcript voice
- `practice`: exercise generation without answer offloading
- `project_advisor`: synthesis work and capstone-style guidance
- `evaluator`: critique, limitation checks, and mastery-oriented feedback
## Default alias mapping
The example config maps those Didactopus roles to these RoleMesh aliases:
- `mentor -> planner`
- `learner -> writer`
- `practice -> writer`
- `project_advisor -> planner`
- `evaluator -> reviewer`
That mapping is only a starting point. If your RoleMesh deployment uses aliases like `didactopus-mentor`, `study-writer`, or `local-critic`, change only the right-hand side values in `role_to_model`.
## How to customize it
`role_to_model` is the main integration seam.
Example:
```yaml
model_provider:
provider: rolemesh
rolemesh:
base_url: "http://127.0.0.1:8000"
api_key: "change-me-client-key-1"
default_model: "didactopus-mentor"
role_to_model:
mentor: "didactopus-mentor"
learner: "didactopus-learner"
practice: "didactopus-practice"
project_advisor: "didactopus-projects"
evaluator: "didactopus-evaluator"
```
Recommended rules for changes:
- Keep the left-hand side role ids unchanged unless you are also changing Didactopus code.
- Change the right-hand side values freely to match your local RoleMesh aliases.
- If two Didactopus roles can share one model, map them to the same alias.
- If one role needs a stronger or more cautious model, give it a dedicated alias in RoleMesh and map it here.
If you want to add a brand-new Didactopus role, update:
- `src/didactopus/roles.py`
- `src/didactopus/role_prompts.py`
- any feature module that calls `provider.generate(..., role=...)`
- `configs/config.rolemesh.example.yaml`
## Prompt layer
Didactopus now keeps its default RoleMesh-oriented prompts in:
Didactopus keeps its role prompts in:
- `didactopus.role_prompts`
- `didactopus.roles`
These prompts are intentionally anti-offloading:
- mentor mode prefers Socratic questions and hints
- learner mode preserves an earnest learner voice rather than a solver voice
- practice mode prefers reasoning-heavy tasks
- project-advisor mode prefers synthesis work
- evaluator mode prefers critique and explicit limitations

View File

@ -6,6 +6,7 @@ from typing import Any
import yaml
from pydantic import BaseModel, Field
from .roles import default_role_to_model
class Settings(BaseModel):
@ -54,15 +55,7 @@ class RoleMeshProviderConfig(BaseModel):
base_url: str = os.getenv("DIDACTOPUS_ROLEMESH_BASE_URL", "http://127.0.0.1:8000")
api_key: str = os.getenv("DIDACTOPUS_ROLEMESH_API_KEY", "")
default_model: str = "planner"
role_to_model: dict[str, str] = Field(
default_factory=lambda: {
"mentor": "planner",
"learner": "writer",
"practice": "writer",
"project_advisor": "planner",
"evaluator": "reviewer",
}
)
role_to_model: dict[str, str] = Field(default_factory=default_role_to_model)
timeout_seconds: float = 30.0

View File

@ -0,0 +1,90 @@
from __future__ import annotations
import os
from pathlib import Path
from typing import Any
import yaml
from pydantic import BaseModel, Field
from .roles import default_role_to_model
class Settings(BaseModel):
database_url: str = os.getenv("DIDACTOPUS_DATABASE_URL", "sqlite+pysqlite:///:memory:")
host: str = os.getenv("DIDACTOPUS_HOST", "127.0.0.1")
port: int = int(os.getenv("DIDACTOPUS_PORT", "8011"))
jwt_secret: str = os.getenv("DIDACTOPUS_JWT_SECRET", "change-me")
jwt_algorithm: str = "HS256"
class ReviewConfig(BaseModel):
default_reviewer: str = "Unknown Reviewer"
write_promoted_pack: bool = True
class BridgeConfig(BaseModel):
host: str = "127.0.0.1"
port: int = 8765
registry_path: str = "workspace_registry.json"
default_workspace_root: str = "workspaces"
class PlatformConfig(BaseModel):
dimension_thresholds: dict[str, float] = Field(
default_factory=lambda: {
"correctness": 0.8,
"explanation": 0.75,
"transfer": 0.7,
"project_execution": 0.75,
"critique": 0.7,
}
)
confidence_threshold: float = 0.8
@property
def default_dimension_thresholds(self) -> dict[str, float]:
return self.dimension_thresholds
class LocalProviderConfig(BaseModel):
backend: str = "stub"
model_name: str = "local-demo"
class RoleMeshProviderConfig(BaseModel):
base_url: str = os.getenv("DIDACTOPUS_ROLEMESH_BASE_URL", "http://127.0.0.1:8000")
api_key: str = os.getenv("DIDACTOPUS_ROLEMESH_API_KEY", "")
default_model: str = "planner"
role_to_model: dict[str, str] = Field(default_factory=default_role_to_model)
timeout_seconds: float = 30.0
class ModelProviderConfig(BaseModel):
provider: str = "stub"
local: LocalProviderConfig = Field(default_factory=LocalProviderConfig)
rolemesh: RoleMeshProviderConfig = Field(default_factory=RoleMeshProviderConfig)
class AppConfig(BaseModel):
review: ReviewConfig = Field(default_factory=ReviewConfig)
bridge: BridgeConfig = Field(default_factory=BridgeConfig)
platform: PlatformConfig = Field(default_factory=PlatformConfig)
model_provider: ModelProviderConfig = Field(default_factory=ModelProviderConfig)
def load_settings() -> Settings:
return Settings()
def load_config(path: str | Path) -> AppConfig:
data = yaml.safe_load(Path(path).read_text(encoding="utf-8")) or {}
return AppConfig.model_validate(_with_platform_defaults(data))
def _with_platform_defaults(data: dict[str, Any]) -> dict[str, Any]:
normalized = dict(data)
if "platform" not in normalized:
normalized["platform"] = {}
if "model_provider" not in normalized:
normalized["model_provider"] = {}
return normalized

View File

@ -6,6 +6,7 @@ from typing import Callable
from urllib import request
from .config import ModelProviderConfig
from .roles import get_role
@dataclass
@ -20,15 +21,8 @@ class ModelProvider:
self.config = config
def pending_notice(self, role: str | None, model_name: str | None = None) -> str:
label = role or "assistant"
notices = {
"mentor": "Didactopus is reviewing the next learning step before answering.",
"learner": "Didactopus is drafting the learner-side reflection now.",
"practice": "Didactopus is designing a practice task for you now.",
"project_advisor": "Didactopus is sketching a project direction now.",
"evaluator": "Didactopus is evaluating the work before replying.",
}
notice = notices.get(label, "Didactopus is preparing the next response.")
spec = get_role(role or "")
notice = spec.pending_notice if spec is not None else "Didactopus is preparing the next response."
if model_name:
return f"{notice} Model: {model_name}."
return notice

View File

@ -39,3 +39,17 @@ def evaluator_system_prompt() -> str:
"If the learner stated a caveat, limitation, or nuance, quote or paraphrase that part and evaluate its quality rather than pretending it is absent. "
"Do not invent omissions that are contradicted by the learner's actual text."
)
def system_prompt_for_role(role: str) -> str:
prompt_map = {
"mentor": mentor_system_prompt,
"learner": learner_system_prompt,
"practice": practice_system_prompt,
"project_advisor": project_advisor_system_prompt,
"evaluator": evaluator_system_prompt,
}
factory = prompt_map.get(role)
if factory is None:
raise KeyError(f"Unknown Didactopus role: {role}")
return factory()

65
src/didactopus/roles.py Normal file
View File

@ -0,0 +1,65 @@
from __future__ import annotations
from dataclasses import dataclass
@dataclass(frozen=True)
class DidactopusRole:
role_id: str
display_name: str
purpose: str
default_model_alias: str
pending_notice: str
DIDACTOPUS_ROLES: tuple[DidactopusRole, ...] = (
DidactopusRole(
role_id="mentor",
display_name="Mentor",
purpose="Guide the learner with sequencing, hints, and conceptual framing.",
default_model_alias="planner",
pending_notice="Didactopus is reviewing the next learning step before answering.",
),
DidactopusRole(
role_id="learner",
display_name="Learner",
purpose="Simulate or support the learner-side voice during transcript and study demos.",
default_model_alias="writer",
pending_notice="Didactopus is drafting the learner-side reflection now.",
),
DidactopusRole(
role_id="practice",
display_name="Practice Designer",
purpose="Create reasoning-heavy exercises and checks without giving away full solutions.",
default_model_alias="writer",
pending_notice="Didactopus is designing a practice task for you now.",
),
DidactopusRole(
role_id="project_advisor",
display_name="Project Advisor",
purpose="Suggest capstones and synthesis work that require independent execution.",
default_model_alias="planner",
pending_notice="Didactopus is sketching a project direction now.",
),
DidactopusRole(
role_id="evaluator",
display_name="Evaluator",
purpose="Critique learner work, identify weaknesses, and assess evidence of mastery.",
default_model_alias="reviewer",
pending_notice="Didactopus is evaluating the work before replying.",
),
)
ROLE_INDEX = {role.role_id: role for role in DIDACTOPUS_ROLES}
def default_role_to_model() -> dict[str, str]:
return {role.role_id: role.default_model_alias for role in DIDACTOPUS_ROLES}
def role_ids() -> list[str]:
return [role.role_id for role in DIDACTOPUS_ROLES]
def get_role(role_id: str) -> DidactopusRole | None:
return ROLE_INDEX.get(role_id)

View File

@ -1,5 +1,6 @@
from pathlib import Path
from didactopus.config import load_config
from didactopus.roles import role_ids
def test_load_example_config() -> None:
@ -14,3 +15,4 @@ def test_load_rolemesh_config() -> None:
assert config.model_provider.provider == "rolemesh"
assert config.model_provider.rolemesh.role_to_model["mentor"] == "planner"
assert config.model_provider.rolemesh.role_to_model["learner"] == "writer"
assert set(config.model_provider.rolemesh.role_to_model) == set(role_ids())

View File

@ -1,6 +1,6 @@
from didactopus.config import ModelProviderConfig
from didactopus.model_provider import ModelProvider
from didactopus.role_prompts import evaluator_system_prompt, mentor_system_prompt
from didactopus.role_prompts import evaluator_system_prompt, mentor_system_prompt, system_prompt_for_role
def test_stub_provider_includes_role_marker() -> None:
@ -80,3 +80,8 @@ def test_mentor_prompt_requires_acknowledging_existing_caveats() -> None:
prompt = mentor_system_prompt().lower()
assert "acknowledge what the learner already did correctly" in prompt
assert "do not claim a caveat" in prompt
def test_system_prompt_for_role_covers_defined_roles() -> None:
assert "mentor mode" in system_prompt_for_role("mentor").lower()
assert "practice-design mode" in system_prompt_for_role("practice").lower()