Initial ChatGPT sources

This commit is contained in:
welsberr 2026-03-12 19:59:59 -04:00
parent 50ee843b32
commit c99eea4793
34 changed files with 1074 additions and 228 deletions

22
.github/workflows/ci.yml vendored Normal file
View File

@ -0,0 +1,22 @@
name: CI
on:
push:
pull_request:
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: "3.11"
- name: Install
run: |
python -m pip install --upgrade pip
pip install -e .[dev]
- name: Lint
run: ruff check src tests
- name: Test
run: pytest

230
.gitignore vendored
View File

@ -1,229 +1,13 @@
# ---> Python
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.venv/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
# ---> Emacs
# -*- mode: gitignore; -*-
*~
\#*\#
/.emacs.desktop
/.emacs.desktop.lock
*.elc
auto-save-list
tramp
.\#*
# Org-mode
.org-id-locations
*_archive
# flymake-mode
*_flymake.*
# eshell files
/eshell/history
/eshell/lastdir
# elpa packages
/elpa/
# reftex files
*.rel
# AUCTeX auto folder
/auto/
# cask packages
.cask/
.coverage
htmlcov/
dist/
# Flycheck
flycheck_*.el
# server auth directory
/server/
# projectiles files
.projectile
# directory configuration
.dir-locals.el
# network security
/network-security.data
# ---> Rust
# Generated by Cargo
# will have compiled files and executables
debug/
target/
# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
Cargo.lock
# These are backup files generated by rustfmt
**/*.rs.bk
# MSVC Windows builds of rustc generate these, which store debugging information
*.pdb
build/
*.egg-info/
.env
configs/config.yaml

9
Dockerfile Normal file
View File

@ -0,0 +1,9 @@
FROM python:3.11-slim
WORKDIR /app
COPY pyproject.toml README.md /app/
COPY src /app/src
COPY configs /app/configs
COPY domain-packs /app/domain-packs
RUN pip install --no-cache-dir -e .
CMD ["python", "-m", "didactopus.main", "--domain", "statistics", "--goal", "practical mastery"]

20
LICENSE
View File

@ -1,9 +1,21 @@
MIT License
Copyright (c) 2026 welsberr
Copyright (c) 2026 Wesley R. Elsberry
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

13
Makefile Normal file
View File

@ -0,0 +1,13 @@
.PHONY: install test lint run
install:
python -m pip install -e .[dev]
test:
pytest
lint:
ruff check src tests
run:
python -m didactopus.main --domain "programming" --goal "build real projects"

184
README.md
View File

@ -1,3 +1,185 @@
# Didactopus
Didactctopus is a multi-talented AI system to assist autodidacts in gaining mastery of a chosen topic. Want to learn and get an assist doing it? Didactopus fits the bill.
**Didactopus** is a local-first AI-assisted autodidactic mastery platform designed to help motivated learners achieve genuine mastery through Socratic mentoring, structured practice, project work, verification, and competency-based evaluation.
**Tagline:** *Many arms, one goal — mastery.*
## Vision
Didactopus treats AI as a **mentor, curriculum planner, critic, evaluator, and project guide** rather than an answer vending machine. The design goal is to produce capable practitioners who can explain, apply, test, and extend knowledge in real settings.
The platform is meant to support **AI-assisted autodidacts**: learners who pursue real expertise outside, alongside, or beyond traditional institutions.
## Core principles
- Active learning over passive consumption
- Socratic questioning over direct answer dumping
- Verification culture over uncritical acceptance
- Competency gates over time-based progression
- Project-based evidence of mastery
- Local-first model use when available
- Portable, shareable domain plans and learning artifacts
## Initial architecture
The initial prototype is organized around six core services:
1. **Domain Mapping Engine**
Builds a concept graph for a target field, including prerequisites, competencies, canonical problem types, and artifact templates.
2. **Curriculum Generator**
Produces a staged learning roadmap adapted to learner goals and prior knowledge.
3. **Mentor Agent**
Conducts Socratic dialogue, reviews reasoning, and offers targeted critique.
4. **Practice Generator**
Produces exercises aimed at specific concepts and skill gaps.
5. **Project Advisor**
Proposes and scaffolds real projects that demonstrate competence.
6. **Evaluation System**
Scores explanations, problem solutions, project outputs, and transfer tasks against explicit rubrics.
## Distribution model for contributed learning content
Didactopus is designed to support distribution of contributed artifacts, including:
- domain plans
- concept maps
- curriculum templates
- exercise sets
- project blueprints
- evaluation rubrics
- benchmark packs
- exemplar portfolios
These should be shareable as versioned packages or repositories so that contributors can publish reusable mastery paths for particular domains.
See:
- `docs/artifact-distribution.md`
- `docs/domain-pack-format.md`
## Local model strategy
The codebase is designed to support a provider abstraction:
- **Local-first**: Ollama, llama.cpp server, vLLM, LM Studio, or other on-prem inference endpoints
- **Remote optional**: API-backed models only when configured
- **Hybrid mode**: local models for routine mentoring, remote models only for heavier synthesis or evaluation if explicitly allowed
## Repository layout
```text
didactopus/
├── README.md
├── LICENSE
├── pyproject.toml
├── Makefile
├── docker-compose.yml
├── Dockerfile
├── .gitignore
├── .github/workflows/ci.yml
├── configs/
│ └── config.example.yaml
├── docs/
│ ├── architecture.md
│ ├── repository-plan.md
│ ├── component-specs.md
│ ├── prototype-roadmap.md
│ ├── artifact-distribution.md
│ └── domain-pack-format.md
├── domain-packs/
│ └── example-statistics/
│ ├── pack.yaml
│ ├── concepts.yaml
│ ├── roadmap.yaml
│ ├── projects.yaml
│ └── rubrics.yaml
├── src/didactopus/
│ ├── __init__.py
│ ├── main.py
│ ├── config.py
│ ├── model_provider.py
│ ├── domain_map.py
│ ├── curriculum.py
│ ├── mentor.py
│ ├── practice.py
│ ├── project_advisor.py
│ ├── evaluation.py
│ └── artifact_registry.py
└── tests/
├── test_config.py
├── test_domain_map.py
└── test_artifact_registry.py
```
## Quick start
```bash
python -m venv .venv
source .venv/bin/activate
pip install -e .[dev]
cp configs/config.example.yaml configs/config.yaml
python -m didactopus.main --domain "statistics" --goal "reach practical mastery"
pytest
```
## Prototype capabilities in this scaffold
The current scaffold provides:
- a configuration model for local/remote provider selection
- a concept graph data structure for domain maps
- stubs for curriculum, mentor, practice, project, and evaluation services
- a simple artifact registry for local domain-pack discovery
- an example domain pack layout
- a CLI entry point to demonstrate end-to-end flow
- tests to validate configuration and artifact behavior
## Suggested first implementation milestones
### Milestone 1: Learner and domain modeling
- learner profile schema
- concept graph generation
- prerequisite traversal
- domain-pack schema validation
- local artifact discovery
### Milestone 2: Guided study loop
- Socratic mentor prompts
- explanation checking
- exercise generation by competency target
- evidence capture for learner work
### Milestone 3: Project-centered learning
- capstone generator
- milestone planning
- artifact review rubrics
- distributed project pack ingestion
### Milestone 4: Mastery evidence
- explanation scoring
- transfer tasks
- benchmark alignment
- progress dashboard
- artifact publication workflow
## Notes on evaluation design
A key design choice is that the assessment layer should look for:
- correct explanations in the learner's own words
- ability to solve novel problems
- detection of flawed reasoning
- evidence of successful project execution
- transfer across adjacent contexts
## Naming rationale
**Didactopus** combines *didactic* / *didact* with *octopus*: a central intelligence coordinating many arms of learning support.
## License
MIT

View File

@ -0,0 +1,22 @@
model_provider:
mode: local_first
local:
backend: ollama
endpoint: http://localhost:11434
model_name: llama3.1:8b
remote:
enabled: false
provider_name: none
endpoint: ""
model_name: ""
platform:
verification_required: true
require_learner_explanations: true
permit_direct_answers: false
mastery_threshold: 0.8
artifacts:
local_pack_dirs:
- domain-packs
allow_third_party_packs: true

21
docker-compose.yml Normal file
View File

@ -0,0 +1,21 @@
services:
didactopus:
build: .
image: didactopus:dev
volumes:
- ./:/app
working_dir: /app
command: python -m didactopus.main --domain "statistics" --goal "practical mastery"
environment:
DIDACTOPUS_CONFIG: /app/configs/config.yaml
ollama:
image: ollama/ollama:latest
profiles: ["local-llm"]
ports:
- "11434:11434"
volumes:
- ollama-data:/root/.ollama
volumes:
ollama-data:

70
docs/architecture.md Normal file
View File

@ -0,0 +1,70 @@
# Architecture Overview
## System aim
Didactopus supports mastery-oriented autodidactic learning across many domains while reducing the risk that AI becomes a crutch for superficial performance.
## Top-level architecture
```text
Learner Interface
|
v
Orchestration Layer
|- learner profile
|- session state
|- competency tracker
|- artifact registry
|
+--> Domain Mapping Engine
+--> Curriculum Generator
+--> Mentor Agent
+--> Practice Generator
+--> Project Advisor
+--> Evaluation System
|
v
Model Provider Abstraction
|- local model backends
|- optional remote backends
```
## Core data objects
- **LearnerProfile**: goals, prior knowledge, pacing, artifacts, assessment history
- **ConceptNode**: concept, prerequisites, representative tasks, mastery criteria
- **RoadmapStage**: stage goals, concepts, practice forms, project milestones
- **EvidenceItem**: explanations, solved problems, project artifacts, benchmark scores
- **EvaluationReport**: rubric scores, weaknesses, suggested remediation
- **ArtifactManifest**: metadata for a domain pack or other contributed artifact
## Critical design constraint
The platform should optimize for **competence evidence** rather than conversational fluency. A learner should not advance based solely on sounding knowledgeable.
## Local-first inference
The provider abstraction should support:
- Ollama
- llama.cpp HTTP servers
- LM Studio local server
- vLLM or comparable self-hosted inference
- optional remote APIs only by explicit configuration
## Artifact ecosystem
The architecture should support:
- first-party curated packs
- third-party domain packs
- version validation
- compatibility checks
- offline local discovery
## Safety against shallow learning
The orchestration layer should support policies such as:
- forcing first-attempt learner answers
- hiding worked solutions until after effort is shown
- requiring self-explanation
- issuing counterexamples and adversarial probes
- cross-checking claims against references and experiments where applicable

View File

@ -0,0 +1,83 @@
# Artifact Distribution Plan
## Goal
Didactopus should support community-contributed, versioned learning artifacts that can be reused, extended, and distributed independently of the core codebase.
## Artifact classes
The platform should support distribution of:
- domain plans
- concept graphs
- roadmap templates
- exercise banks
- project blueprints
- rubrics
- benchmark tasks
- reference reading lists
- exemplar work products
## Distribution models
### 1. In-repository packs
Simple packs stored under `domain-packs/` for development, examples, and curated first-party artifacts.
### 2. External Git repositories
A contributor can publish a domain pack as its own Git repository and users can clone or vendor it.
Example patterns:
- `didactopus-pack-statistics`
- `didactopus-pack-electronics`
- `didactopus-pack-evolutionary-biology`
### 3. Package index model
Eventually, packs could be distributed through a registry or package index. A manifest should identify:
- pack name
- version
- author
- license
- compatible Didactopus versions
- dependencies on other packs or shared competencies
## Design requirements
Artifacts should be:
- human-readable
- diff-friendly
- versionable with Git
- independently licensed
- schema-validated
- mergeable and composable
- usable offline
## Recommended file formats
Use YAML or JSON for:
- metadata
- concept graphs
- roadmap stages
- projects
- rubrics
Use Markdown for:
- explanatory notes
- contributor guidance
- reading guides
- learner-facing instructions
## Versioning
Each pack should declare:
- semantic version
- minimum core version
- maximum tested core version, if needed
- schema version
## Future extension
Later, Didactopus should support:
- signed packs
- dependency resolution
- artifact provenance metadata
- import/export CLI commands
- trust policies for third-party packs

63
docs/component-specs.md Normal file
View File

@ -0,0 +1,63 @@
# Component Specifications
## Domain Mapping Engine
Inputs:
- selected domain
- learner goal
- optional prior knowledge notes
- optional domain pack
Outputs:
- concept graph
- prerequisite chains
- competency clusters
- recommended sequence
## Curriculum Generator
Inputs:
- concept graph
- learner profile
- assessment results
- roadmap templates from pack
Outputs:
- roadmap stages
- study sessions
- mastery checkpoints
- recommended projects
## Mentor Agent
Responsibilities:
- ask probing questions
- request justification
- detect vague understanding
- encourage verification
- avoid over-answering
## Practice Generator
Responsibilities:
- generate exercises by concept
- vary difficulty and modality
- create transfer tasks
- produce reflection prompts
## Project Advisor
Responsibilities:
- suggest authentic projects
- decompose milestones
- help define success criteria
- review artifacts
## Evaluation System
Responsibilities:
- assess correctness
- assess explanation quality
- assess transfer and robustness
- recommend remediation
## Artifact Registry
Responsibilities:
- discover local domain packs
- validate manifests
- check compatibility
- expose installed artifacts to the orchestration layer

View File

@ -0,0 +1,78 @@
# Domain Pack Format
## Purpose
A domain pack is the primary distributable artifact for Didactopus. It contains structured material describing a field, learning sequence, projects, and evaluation artifacts.
## Directory layout
```text
example-pack/
├── pack.yaml
├── concepts.yaml
├── roadmap.yaml
├── projects.yaml
├── rubrics.yaml
├── resources.md
└── LICENSE
```
## pack.yaml
The manifest should contain:
- pack name
- display name
- version
- schema version
- description
- author
- license
- tags
- didactopus compatibility
- optional dependencies
## concepts.yaml
Contains:
- concept identifiers
- descriptions
- prerequisites
- representative tasks
- mastery signals
## roadmap.yaml
Contains:
- stages
- stage goals
- concept clusters
- checkpoint criteria
- recommended pacing notes
## projects.yaml
Contains:
- project identifiers
- difficulty
- prerequisites
- deliverables
- milestone suggestions
- verification expectations
## rubrics.yaml
Contains:
- assessment criteria
- score bands
- explanation requirements
- transfer-task criteria
## Example philosophy
The pack format should separate:
- domain structure
- learning sequence
- authentic practice
- evidence standards
That separation makes contributed packs easier to compare, merge, and improve.

34
docs/prototype-roadmap.md Normal file
View File

@ -0,0 +1,34 @@
# Initial Prototype Roadmap
## Prototype 0.1
Goal: prove the orchestration model.
Capabilities:
- accept domain and goal
- generate a toy concept graph
- produce a staged roadmap
- generate one Socratic prompt
- generate one practice task
- generate one capstone suggestion
- produce one evaluation rubric
- list locally installed domain packs
## Prototype 0.2
Goal: learner-state adaptation.
Capabilities:
- accept prior knowledge signals
- adjust roadmap depth
- store evidence items
- identify weakest competencies
- load a selected domain pack
## Prototype 0.3
Goal: local-model integration.
Capabilities:
- connect to local Ollama endpoint
- select configured model
- use deterministic prompts for component outputs
- retain remote provider fallback but disable by default
- validate artifact compatibility

50
docs/repository-plan.md Normal file
View File

@ -0,0 +1,50 @@
# Repository Plan
## Phase 0: Scaffold
Create the repository skeleton, interfaces, tests, naming, artifact registry, and configuration system.
## Phase 1: Domain representation
Implement:
- concept graph schema
- prerequisite traversal
- learner profile schema
- domain-pack import/export formats
- schema validation
## Phase 2: Roadmap generation
Implement:
- initial assessment intake
- competency gap analysis
- staged learning plan generation
- pack-aware roadmap assembly
## Phase 3: Mentoring loop
Implement:
- Socratic questioning templates
- explanation critique
- misconception detection
- reflective prompts
- evidence capture
## Phase 4: Practice and projects
Implement:
- competency-targeted exercise generation
- capstone recommendations
- milestone decomposition
- project review rubrics
- external pack loading
## Phase 5: Evaluation
Implement:
- concept mastery scoring
- transfer task generation
- portfolio review
- external benchmark mapping
## Phase 6: UI and persistence
Implement:
- API layer
- web frontend
- learner history store
- dashboard and evidence views
- artifact browser and installer

View File

@ -0,0 +1,14 @@
concepts:
- id: descriptive-statistics
title: Descriptive Statistics
prerequisites: []
mastery_signals:
- explain mean, median, variance, and standard deviation
- choose an appropriate descriptive summary for a dataset
- id: probability
title: Probability
prerequisites:
- descriptive-statistics
mastery_signals:
- explain conditional probability
- compute simple event probabilities

View File

@ -0,0 +1,7 @@
name: example-statistics
display_name: Example Statistics Pack
version: 0.1.0
schema_version: "1"
description: Example domain pack for a statistics mastery pathway.
author: Wesley R. Elsberry
license: MIT

View File

@ -0,0 +1,11 @@
projects:
- id: local-dataset-analysis
title: Analyze a Real Dataset
difficulty: introductory
prerequisites:
- descriptive-statistics
- probability
deliverables:
- written analysis
- plots
- interpretation of uncertainty

View File

@ -0,0 +1,13 @@
stages:
- id: stage-1
title: Foundations
concepts:
- descriptive-statistics
checkpoint:
- summarize a dataset correctly
- id: stage-2
title: Probabilistic Reasoning
concepts:
- probability
checkpoint:
- solve introductory probability tasks with explanation

View File

@ -0,0 +1,8 @@
rubrics:
- id: explanation-rubric
title: Explanation Quality
criteria:
- correctness
- clarity
- justification
- transfer

37
pyproject.toml Normal file
View File

@ -0,0 +1,37 @@
[build-system]
requires = ["setuptools>=68", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "didactopus"
version = "0.1.0"
description = "Didactopus: local-first AI-assisted autodidactic mastery platform"
readme = "README.md"
requires-python = ">=3.10"
license = {text = "MIT"}
authors = [
{name = "Wesley R. Elsberry"}
]
dependencies = [
"pydantic>=2.7",
"pyyaml>=6.0",
"networkx>=3.2"
]
[project.optional-dependencies]
dev = [
"pytest>=8.0",
"ruff>=0.6"
]
[project.scripts]
didactopus = "didactopus.main:main"
[tool.setuptools.packages.find]
where = ["src"]
[tool.pytest.ini_options]
testpaths = ["tests"]
[tool.ruff]
line-length = 100

View File

@ -0,0 +1,11 @@
__all__ = [
"config",
"model_provider",
"domain_map",
"curriculum",
"mentor",
"practice",
"project_advisor",
"evaluation",
"artifact_registry",
]

View File

@ -0,0 +1,28 @@
from pathlib import Path
from pydantic import BaseModel
import yaml
class ArtifactManifest(BaseModel):
name: str
display_name: str
version: str
schema_version: str
description: str = ""
author: str = ""
license: str = "unspecified"
def discover_domain_packs(base_dirs: list[str | Path]) -> list[tuple[Path, ArtifactManifest]]:
packs: list[tuple[Path, ArtifactManifest]] = []
for base_dir in base_dirs:
base_path = Path(base_dir)
if not base_path.exists():
continue
for pack_dir in sorted(p for p in base_path.iterdir() if p.is_dir()):
manifest_path = pack_dir / "pack.yaml"
if not manifest_path.exists():
continue
data = yaml.safe_load(manifest_path.read_text(encoding="utf-8")) or {}
packs.append((pack_dir, ArtifactManifest.model_validate(data)))
return packs

46
src/didactopus/config.py Normal file
View File

@ -0,0 +1,46 @@
from pathlib import Path
from pydantic import BaseModel, Field
import yaml
class ProviderEndpoint(BaseModel):
backend: str = "ollama"
endpoint: str = "http://localhost:11434"
model_name: str = "llama3.1:8b"
class RemoteProvider(BaseModel):
enabled: bool = False
provider_name: str = "none"
endpoint: str = ""
model_name: str = ""
class ModelProviderConfig(BaseModel):
mode: str = Field(default="local_first")
local: ProviderEndpoint = Field(default_factory=ProviderEndpoint)
remote: RemoteProvider = Field(default_factory=RemoteProvider)
class PlatformConfig(BaseModel):
verification_required: bool = True
require_learner_explanations: bool = True
permit_direct_answers: bool = False
mastery_threshold: float = 0.8
class ArtifactConfig(BaseModel):
local_pack_dirs: list[str] = Field(default_factory=lambda: ["domain-packs"])
allow_third_party_packs: bool = True
class AppConfig(BaseModel):
model_provider: ModelProviderConfig = Field(default_factory=ModelProviderConfig)
platform: PlatformConfig = Field(default_factory=PlatformConfig)
artifacts: ArtifactConfig = Field(default_factory=ArtifactConfig)
def load_config(path: str | Path) -> AppConfig:
with open(path, "r", encoding="utf-8") as handle:
data = yaml.safe_load(handle) or {}
return AppConfig.model_validate(data)

View File

@ -0,0 +1,21 @@
from dataclasses import dataclass
from .domain_map import DomainMap
@dataclass
class RoadmapStage:
title: str
concepts: list[str]
mastery_goal: str
def generate_initial_roadmap(domain_map: DomainMap, goal: str) -> list[RoadmapStage]:
sequence = domain_map.topological_sequence()
return [
RoadmapStage(
title=f"Stage {idx + 1}: {concept.title()}",
concepts=[concept],
mastery_goal=f"Demonstrate applied understanding of {concept} toward goal: {goal}",
)
for idx, concept in enumerate(sequence)
]

View File

@ -0,0 +1,39 @@
from dataclasses import dataclass, field
import networkx as nx
@dataclass
class ConceptNode:
name: str
description: str = ""
prerequisites: list[str] = field(default_factory=list)
representative_tasks: list[str] = field(default_factory=list)
class DomainMap:
def __init__(self, domain_name: str) -> None:
self.domain_name = domain_name
self.graph = nx.DiGraph()
def add_concept(self, node: ConceptNode) -> None:
self.graph.add_node(node.name, data=node)
for prereq in node.prerequisites:
self.graph.add_edge(prereq, node.name)
def concepts(self) -> list[str]:
return list(self.graph.nodes)
def prerequisites_for(self, concept: str) -> list[str]:
return list(nx.ancestors(self.graph, concept))
def topological_sequence(self) -> list[str]:
return list(nx.topological_sort(self.graph))
def build_demo_domain_map(domain_name: str) -> DomainMap:
dmap = DomainMap(domain_name)
dmap.add_concept(ConceptNode("foundations", "Core assumptions and terminology"))
dmap.add_concept(ConceptNode("methods", "Basic methods", ["foundations"]))
dmap.add_concept(ConceptNode("analysis", "Applying methods", ["methods"]))
dmap.add_concept(ConceptNode("projects", "Real-world capstones", ["analysis"]))
return dmap

View File

@ -0,0 +1,9 @@
from .model_provider import ModelProvider
def generate_rubric(provider: ModelProvider, concept: str) -> str:
prompt = (
f"Create a concise evaluation rubric for learner mastery of '{concept}'. "
f"Assess explanation quality, problem solving, and transfer."
)
return provider.generate(prompt).text

73
src/didactopus/main.py Normal file
View File

@ -0,0 +1,73 @@
import argparse
import os
from pathlib import Path
from .config import load_config
from .model_provider import ModelProvider
from .artifact_registry import discover_domain_packs
from .domain_map import build_demo_domain_map
from .curriculum import generate_initial_roadmap
from .mentor import generate_socratic_prompt
from .practice import generate_practice_task
from .project_advisor import suggest_capstone
from .evaluation import generate_rubric
def build_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="Didactopus mastery scaffold")
parser.add_argument("--domain", required=True, help="Target domain of study")
parser.add_argument("--goal", required=True, help="Learning goal")
parser.add_argument(
"--config",
default=os.environ.get("DIDACTOPUS_CONFIG", "configs/config.example.yaml"),
help="Path to configuration YAML",
)
return parser
def main() -> None:
args = build_parser().parse_args()
config = load_config(Path(args.config))
provider = ModelProvider(config.model_provider)
packs = discover_domain_packs(config.artifacts.local_pack_dirs)
dmap = build_demo_domain_map(args.domain)
roadmap = generate_initial_roadmap(dmap, args.goal)
print("== Didactopus ==")
print("Many arms, one goal — mastery.")
print()
print("== Provider ==")
print(provider.describe())
print()
print("== Installed Domain Packs ==")
if packs:
for pack_dir, manifest in packs:
print(f"- {manifest.display_name} ({manifest.name} {manifest.version}) @ {pack_dir}")
else:
print("- none found")
print()
print("== Domain Map Sequence ==")
for concept in dmap.topological_sequence():
print(f"- {concept}")
print()
print("== Roadmap ==")
for stage in roadmap:
print(f"- {stage.title}: {stage.mastery_goal}")
print()
focus_concept = dmap.topological_sequence()[1]
print("== Mentor Prompt ==")
print(generate_socratic_prompt(provider, focus_concept))
print()
print("== Practice Task ==")
print(generate_practice_task(provider, focus_concept))
print()
print("== Capstone Suggestion ==")
print(suggest_capstone(provider, args.domain))
print()
print("== Evaluation Rubric ==")
print(generate_rubric(provider, focus_concept))
if __name__ == "__main__":
main()

9
src/didactopus/mentor.py Normal file
View File

@ -0,0 +1,9 @@
from .model_provider import ModelProvider
def generate_socratic_prompt(provider: ModelProvider, concept: str) -> str:
prompt = (
f"You are a Socratic mentor. Ask one probing question that tests whether a learner "
f"truly understands the concept '{concept}' and can explain it in their own words."
)
return provider.generate(prompt).text

View File

@ -0,0 +1,27 @@
from dataclasses import dataclass
from .config import ModelProviderConfig
@dataclass
class ModelResponse:
text: str
provider: str
model_name: str
class ModelProvider:
def __init__(self, config: ModelProviderConfig) -> None:
self.config = config
def describe(self) -> str:
local = self.config.local
return f"mode={self.config.mode}, local={local.backend}:{local.model_name}"
def generate(self, prompt: str) -> ModelResponse:
local = self.config.local
preview = prompt.strip().replace("\n", " ")[:120]
return ModelResponse(
text=f"[stubbed-response] {preview}",
provider=local.backend,
model_name=local.model_name,
)

View File

@ -0,0 +1,9 @@
from .model_provider import ModelProvider
def generate_practice_task(provider: ModelProvider, concept: str) -> str:
prompt = (
f"Generate one practice task for the concept '{concept}'. Require reasoning, "
f"not mere recall, and avoid giving the answer."
)
return provider.generate(prompt).text

View File

@ -0,0 +1,9 @@
from .model_provider import ModelProvider
def suggest_capstone(provider: ModelProvider, domain: str) -> str:
prompt = (
f"Suggest one realistic capstone project for a learner pursuing mastery in {domain}. "
f"The project must require synthesis, verification, and original work."
)
return provider.generate(prompt).text

View File

@ -0,0 +1,9 @@
from didactopus.artifact_registry import discover_domain_packs
def test_discover_example_pack() -> None:
packs = discover_domain_packs(["domain-packs"])
assert len(packs) >= 1
_, manifest = packs[0]
assert manifest.name == "example-statistics"
assert manifest.display_name == "Example Statistics Pack"

10
tests/test_config.py Normal file
View File

@ -0,0 +1,10 @@
from pathlib import Path
from didactopus.config import load_config
def test_load_example_config() -> None:
config = load_config(Path("configs/config.example.yaml"))
assert config.model_provider.mode == "local_first"
assert config.platform.verification_required is True
assert config.platform.mastery_threshold == 0.8
assert "domain-packs" in config.artifacts.local_pack_dirs

13
tests/test_domain_map.py Normal file
View File

@ -0,0 +1,13 @@
from didactopus.domain_map import build_demo_domain_map
def test_demo_domain_map_order() -> None:
dmap = build_demo_domain_map("statistics")
sequence = dmap.topological_sequence()
assert sequence == ["foundations", "methods", "analysis", "projects"]
def test_prerequisites() -> None:
dmap = build_demo_domain_map("statistics")
prereqs = set(dmap.prerequisites_for("projects"))
assert prereqs == {"foundations", "methods", "analysis"}