feat: Calculer automatiquement les moyennes après chaque saisie de notes
Some checks failed
CI / Backend Tests (push) Has been cancelled
CI / Frontend Tests (push) Has been cancelled
CI / E2E Tests (push) Has been cancelled
CI / Naming Conventions (push) Has been cancelled
CI / Build Check (push) Has been cancelled

Les enseignants ont besoin de moyennes à jour immédiatement après la
publication ou modification des notes, sans attendre un batch nocturne.

Le système recalcule via Domain Events synchrones : statistiques
d'évaluation (min/max/moyenne/médiane), moyennes matières pondérées
(normalisation /20), et moyenne générale par élève. Les résultats sont
stockés dans des tables dénormalisées avec cache Redis (TTL 5 min).

Trois endpoints API exposent les données avec contrôle d'accès par rôle.
Une commande console permet le backfill des données historiques au
déploiement.
This commit is contained in:
2026-03-30 06:22:03 +02:00
parent b70d5ec2ad
commit b7dc27f2a5
786 changed files with 118783 additions and 316 deletions

View File

@@ -0,0 +1,202 @@
#!/usr/bin/env python3
# /// script
# requires-python = ">=3.10"
# ///
"""Tests for validate-module.py"""
import json
import subprocess
import sys
import tempfile
from pathlib import Path
SCRIPT = Path(__file__).resolve().parent.parent / "validate-module.py"
CSV_HEADER = "module,skill,display-name,menu-code,description,action,args,phase,after,before,required,output-location,outputs\n"
def create_module(tmp: Path, skills: list[str] | None = None, csv_rows: str = "",
yaml_content: str = "", setup_name: str = "bmad-tst-setup") -> Path:
"""Create a minimal module structure for testing."""
module_dir = tmp / "module"
module_dir.mkdir()
# Setup skill
setup = module_dir / setup_name
setup.mkdir()
(setup / "SKILL.md").write_text("---\nname: " + setup_name + "\n---\n# Setup\n")
(setup / "assets").mkdir()
(setup / "assets" / "module.yaml").write_text(
yaml_content or 'code: tst\nname: "Test Module"\ndescription: "A test module"\n'
)
(setup / "assets" / "module-help.csv").write_text(CSV_HEADER + csv_rows)
# Other skills
for skill in (skills or []):
skill_dir = module_dir / skill
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text(f"---\nname: {skill}\n---\n# {skill}\n")
return module_dir
def run_validate(module_dir: Path) -> tuple[int, dict]:
"""Run the validation script and return (exit_code, parsed_json)."""
result = subprocess.run(
[sys.executable, str(SCRIPT), str(module_dir)],
capture_output=True, text=True,
)
try:
data = json.loads(result.stdout)
except json.JSONDecodeError:
data = {"raw_stdout": result.stdout, "raw_stderr": result.stderr}
return result.returncode, data
def test_valid_module():
"""A well-formed module should pass."""
with tempfile.TemporaryDirectory() as tmp:
tmp = Path(tmp)
csv_rows = 'Test Module,bmad-tst-foo,Do Foo,DF,Does the foo thing,run,,anytime,,,false,output_folder,report\n'
module_dir = create_module(tmp, skills=["bmad-tst-foo"], csv_rows=csv_rows)
code, data = run_validate(module_dir)
assert code == 0, f"Expected pass: {data}"
assert data["status"] == "pass"
assert data["summary"]["total_findings"] == 0
def test_missing_setup_skill():
"""Module with no setup skill should fail critically."""
with tempfile.TemporaryDirectory() as tmp:
tmp = Path(tmp)
module_dir = tmp / "module"
module_dir.mkdir()
skill = module_dir / "bmad-tst-foo"
skill.mkdir()
(skill / "SKILL.md").write_text("---\nname: bmad-tst-foo\n---\n")
code, data = run_validate(module_dir)
assert code == 1
assert any(f["category"] == "structure" for f in data["findings"])
def test_missing_csv_entry():
"""Skill without a CSV entry should be flagged."""
with tempfile.TemporaryDirectory() as tmp:
tmp = Path(tmp)
module_dir = create_module(tmp, skills=["bmad-tst-foo", "bmad-tst-bar"],
csv_rows='Test Module,bmad-tst-foo,Do Foo,DF,Does foo,run,,anytime,,,false,output_folder,report\n')
code, data = run_validate(module_dir)
assert code == 1
missing = [f for f in data["findings"] if f["category"] == "missing-entry"]
assert len(missing) == 1
assert "bmad-tst-bar" in missing[0]["message"]
def test_orphan_csv_entry():
"""CSV entry for nonexistent skill should be flagged."""
with tempfile.TemporaryDirectory() as tmp:
tmp = Path(tmp)
csv_rows = 'Test Module,bmad-tst-ghost,Ghost,GH,Does not exist,run,,anytime,,,false,output_folder,report\n'
module_dir = create_module(tmp, skills=[], csv_rows=csv_rows)
code, data = run_validate(module_dir)
orphans = [f for f in data["findings"] if f["category"] == "orphan-entry"]
assert len(orphans) == 1
assert "bmad-tst-ghost" in orphans[0]["message"]
def test_duplicate_menu_codes():
"""Duplicate menu codes should be flagged."""
with tempfile.TemporaryDirectory() as tmp:
tmp = Path(tmp)
csv_rows = (
'Test Module,bmad-tst-foo,Do Foo,DF,Does foo,run,,anytime,,,false,output_folder,report\n'
'Test Module,bmad-tst-foo,Also Foo,DF,Also does foo,other,,anytime,,,false,output_folder,report\n'
)
module_dir = create_module(tmp, skills=["bmad-tst-foo"], csv_rows=csv_rows)
code, data = run_validate(module_dir)
dupes = [f for f in data["findings"] if f["category"] == "duplicate-menu-code"]
assert len(dupes) == 1
assert "DF" in dupes[0]["message"]
def test_invalid_before_after_ref():
"""Before/after references to nonexistent capabilities should be flagged."""
with tempfile.TemporaryDirectory() as tmp:
tmp = Path(tmp)
csv_rows = 'Test Module,bmad-tst-foo,Do Foo,DF,Does foo,run,,anytime,bmad-tst-ghost:phantom,,false,output_folder,report\n'
module_dir = create_module(tmp, skills=["bmad-tst-foo"], csv_rows=csv_rows)
code, data = run_validate(module_dir)
refs = [f for f in data["findings"] if f["category"] == "invalid-ref"]
assert len(refs) == 1
assert "bmad-tst-ghost:phantom" in refs[0]["message"]
def test_missing_yaml_fields():
"""module.yaml with missing required fields should be flagged."""
with tempfile.TemporaryDirectory() as tmp:
tmp = Path(tmp)
csv_rows = 'Test Module,bmad-tst-foo,Do Foo,DF,Does foo,run,,anytime,,,false,output_folder,report\n'
module_dir = create_module(tmp, skills=["bmad-tst-foo"], csv_rows=csv_rows,
yaml_content='code: tst\n')
code, data = run_validate(module_dir)
yaml_findings = [f for f in data["findings"] if f["category"] == "yaml"]
assert len(yaml_findings) >= 1 # at least name or description missing
def test_empty_csv():
"""CSV with header but no rows should be flagged."""
with tempfile.TemporaryDirectory() as tmp:
tmp = Path(tmp)
module_dir = create_module(tmp, skills=["bmad-tst-foo"], csv_rows="")
code, data = run_validate(module_dir)
assert code == 1
empty = [f for f in data["findings"] if f["category"] == "csv-empty"]
assert len(empty) == 1
def test_nonexistent_directory():
"""Nonexistent path should return error."""
result = subprocess.run(
[sys.executable, str(SCRIPT), "/nonexistent/path"],
capture_output=True, text=True,
)
assert result.returncode == 2
data = json.loads(result.stdout)
assert data["status"] == "error"
if __name__ == "__main__":
tests = [
test_valid_module,
test_missing_setup_skill,
test_missing_csv_entry,
test_orphan_csv_entry,
test_duplicate_menu_codes,
test_invalid_before_after_ref,
test_missing_yaml_fields,
test_empty_csv,
test_nonexistent_directory,
]
passed = 0
failed = 0
for test in tests:
try:
test()
print(f" PASS: {test.__name__}")
passed += 1
except AssertionError as e:
print(f" FAIL: {test.__name__}: {e}")
failed += 1
except Exception as e:
print(f" ERROR: {test.__name__}: {e}")
failed += 1
print(f"\n{passed} passed, {failed} failed")
sys.exit(1 if failed else 0)