12 Commits

Author SHA1 Message Date
Abhimanyu Saharan
40fa559b02 docs: fill in development + testing guides 2026-02-13 11:34:41 +00:00
Abhimanyu Saharan
02204cdf4e docs: define semver tag convention in release checklist 2026-02-13 11:30:03 +00:00
Abhimanyu Saharan
4b0dbbec83 docs: add release checklist + draft PR policy 2026-02-13 11:23:12 +00:00
Abhimanyu Saharan
90a7da47d0 tests: cover error payload json-safe bytes/bytearray/object branches 2026-02-12 15:09:59 +00:00
Abhimanyu Saharan
8e5fcd9243 feat(markdown): add mention rendering support in Markdown components 2026-02-12 20:18:19 +05:30
Abhimanyu Saharan
abfa25e464 Merge pull request #109 from abhi1693/ci/docs-quality-gates
CI: docs quality gates (broken link check)
2026-02-12 20:17:07 +05:30
Abhimanyu Saharan
a51446113b docs: fix extra blank line for markdownlint (MD012) 2026-02-12 14:44:43 +00:00
Abhimanyu Saharan
d6db63480e CI: add markdownlint + docs-check wrapper; extend link check scope 2026-02-12 14:44:43 +00:00
Abhimanyu Saharan
879b50199f ci(docs): scope link check to docs/ only (avoid legacy README links) 2026-02-12 14:44:43 +00:00
Abhimanyu Saharan
d830918fd1 ci(docs): add markdown relative link check gate 2026-02-12 14:44:43 +00:00
Abhimanyu Saharan
8e72b7e0bc feat(error_handling): improve error payload serialization for JSON compatibility 2026-02-12 20:10:30 +05:30
Abhimanyu Saharan
032b77afb8 feat(approvals): enhance approval model with task titles and confidence as float 2026-02-12 19:57:04 +05:30
33 changed files with 983 additions and 63 deletions

View File

@@ -87,6 +87,11 @@ jobs:
make frontend-test
make frontend-build
- name: Docs quality gates (lint + relative link check)
run: |
make docs-check
- name: Upload coverage artifacts
if: always()
uses: actions/upload-artifact@v4

23
.markdownlint-cli2.yaml Normal file
View File

@@ -0,0 +1,23 @@
# markdownlint-cli2 config
# Keep the ruleset intentionally tiny to avoid noisy churn.
config:
default: false
MD009: true # no trailing spaces
MD010: true # no hard tabs
MD012: true # no multiple consecutive blank lines
MD047: true # single trailing newline
globs:
- "**/*.md"
ignores:
- "**/node_modules/**"
- "**/.next/**"
- "**/dist/**"
- "**/build/**"
- "**/.venv/**"
- "**/__pycache__/**"
- "**/.pytest_cache/**"
- "**/.mypy_cache/**"
- "**/coverage/**"

View File

@@ -38,14 +38,24 @@ git checkout -b <branch-name>
If you accidentally based your branch off another feature branch, fix it by cherry-picking the intended commits onto a clean branch and force-pushing the corrected branch (or opening a new PR).
### Expectations
- Open a PR as **Draft** early (especially for multi-step work) so progress is visible and local changes arent lost.
- Keep PRs **small and focused** when possible.
- Include a clear description of the change and why its needed.
- Add/adjust tests when behavior changes.
- Update docs when contributor-facing or operator-facing behavior changes.
### Draft PR policy
- Start as **Draft** when:
- the PR is incomplete,
- CI is expected to fail,
- or you need early feedback on direction.
- Mark **Ready for review** when:
- `make check` is green locally (or youve explained why it cant be), and
- the description includes repro/verification notes.
### Local checks
From repo root, the closest “CI parity” command is:

View File

@@ -122,3 +122,15 @@ backend-templates-sync: ## Sync templates to existing gateway agents (usage: mak
.PHONY: check
check: lint typecheck backend-coverage frontend-test build ## Run lint + typecheck + tests + coverage + build
.PHONY: docs-lint
docs-lint: frontend-tooling ## Lint markdown files (tiny ruleset; avoids noisy churn)
$(NODE_WRAP) npx markdownlint-cli2@0.15.0 --config .markdownlint-cli2.yaml "**/*.md"
.PHONY: docs-link-check
docs-link-check: ## Check for broken relative links in markdown docs
python scripts/check_markdown_links.py
.PHONY: docs-check
docs-check: docs-lint docs-link-check ## Run all docs quality gates

View File

@@ -112,6 +112,15 @@ make setup
make check
```
## Contributing / PR workflow
- Contribution guide: [`CONTRIBUTING.md`](./CONTRIBUTING.md)
- Preferred flow: open a **Draft PR early**, then iterate until `make check` is green.
## Releasing
- Maintainer checklist: [Release checklist](./docs/release/README.md)
## License
This project is licensed under the MIT License. See [`LICENSE`](./LICENSE).

View File

@@ -26,6 +26,7 @@ from app.db.pagination import paginate
from app.db.session import async_session_maker, get_session
from app.models.agents import Agent
from app.models.approvals import Approval
from app.models.tasks import Task
from app.schemas.approvals import ApprovalCreate, ApprovalRead, ApprovalStatus, ApprovalUpdate
from app.schemas.pagination import DefaultLimitOffsetPage
from app.services.activity_log import record_activity
@@ -96,10 +97,36 @@ async def _approval_task_ids_map(
return mapping
def _approval_to_read(approval: Approval, *, task_ids: list[UUID]) -> ApprovalRead:
async def _task_titles_by_id(
session: AsyncSession,
*,
task_ids: set[UUID],
) -> dict[UUID, str]:
if not task_ids:
return {}
rows = list(
await session.exec(
select(col(Task.id), col(Task.title)).where(col(Task.id).in_(task_ids)),
),
)
return {task_id: title for task_id, title in rows}
def _approval_to_read(
approval: Approval,
*,
task_ids: list[UUID],
task_titles: list[str],
) -> ApprovalRead:
primary_task_id = task_ids[0] if task_ids else None
model = ApprovalRead.model_validate(approval, from_attributes=True)
return model.model_copy(update={"task_id": primary_task_id, "task_ids": task_ids})
return model.model_copy(
update={
"task_id": primary_task_id,
"task_ids": task_ids,
"task_titles": task_titles,
},
)
async def _approval_reads(
@@ -107,8 +134,17 @@ async def _approval_reads(
approvals: Sequence[Approval],
) -> list[ApprovalRead]:
mapping = await _approval_task_ids_map(session, approvals)
title_by_id = await _task_titles_by_id(
session,
task_ids={task_id for task_ids in mapping.values() for task_id in task_ids},
)
return [
_approval_to_read(approval, task_ids=mapping.get(approval.id, [])) for approval in approvals
_approval_to_read(
approval,
task_ids=(task_ids := mapping.get(approval.id, [])),
task_titles=[title_by_id[task_id] for task_id in task_ids if task_id in title_by_id],
)
for approval in approvals
]
@@ -389,7 +425,12 @@ async def create_approval(
)
await session.commit()
await session.refresh(approval)
return _approval_to_read(approval, task_ids=task_ids)
title_by_id = await _task_titles_by_id(session, task_ids=set(task_ids))
return _approval_to_read(
approval,
task_ids=task_ids,
task_titles=[title_by_id[task_id] for task_id in task_ids if task_id in title_by_id],
)
@router.patch("/{approval_id}", response_model=ApprovalRead)

View File

@@ -250,9 +250,7 @@ async def _query_wip(
if not board_ids:
return _wip_series_from_mapping(range_spec, {})
inbox_bucket_col = func.date_trunc(range_spec.bucket, Task.created_at).label(
"inbox_bucket"
)
inbox_bucket_col = func.date_trunc(range_spec.bucket, Task.created_at).label("inbox_bucket")
inbox_statement = (
select(inbox_bucket_col, func.count())
.where(col(Task.status) == "inbox")
@@ -264,9 +262,7 @@ async def _query_wip(
)
inbox_results = (await session.exec(inbox_statement)).all()
status_bucket_col = func.date_trunc(range_spec.bucket, Task.updated_at).label(
"status_bucket"
)
status_bucket_col = func.date_trunc(range_spec.bucket, Task.updated_at).label("status_bucket")
progress_case = case((col(Task.status) == "in_progress", 1), else_=0)
review_case = case((col(Task.status) == "review", 1), else_=0)
done_case = case((col(Task.status) == "done", 1), else_=0)

View File

@@ -224,12 +224,27 @@ def _get_request_id(request: Request) -> str | None:
def _error_payload(*, detail: object, request_id: str | None) -> dict[str, object]:
payload: dict[str, Any] = {"detail": detail}
payload: dict[str, Any] = {"detail": _json_safe(detail)}
if request_id:
payload["request_id"] = request_id
return payload
def _json_safe(value: object) -> object:
"""Return a JSON-serializable representation for error payloads."""
if isinstance(value, bytes):
return value.decode("utf-8", errors="replace")
if isinstance(value, (bytearray, memoryview)):
return bytes(value).decode("utf-8", errors="replace")
if isinstance(value, dict):
return {str(key): _json_safe(item) for key, item in value.items()}
if isinstance(value, (list, tuple, set)):
return [_json_safe(item) for item in value]
if value is None or isinstance(value, (str, int, float, bool)):
return value
return str(value)
async def _request_validation_handler(
request: Request,
exc: RequestValidationError,

View File

@@ -5,7 +5,7 @@ from __future__ import annotations
from datetime import datetime
from uuid import UUID, uuid4
from sqlalchemy import JSON, Column
from sqlalchemy import JSON, Column, Float
from sqlmodel import Field
from app.core.time import utcnow
@@ -25,7 +25,7 @@ class Approval(QueryModel, table=True):
agent_id: UUID | None = Field(default=None, foreign_key="agents.id", index=True)
action_type: str
payload: dict[str, object] | None = Field(default=None, sa_column=Column(JSON))
confidence: int
confidence: float = Field(sa_column=Column(Float, nullable=False))
rubric_scores: dict[str, int] | None = Field(default=None, sa_column=Column(JSON))
status: str = Field(default="pending", index=True)
created_at: datetime = Field(default_factory=utcnow)

View File

@@ -11,6 +11,7 @@ from sqlmodel import Field, SQLModel
ApprovalStatus = Literal["pending", "approved", "rejected"]
STATUS_REQUIRED_ERROR = "status is required"
LEAD_REASONING_REQUIRED_ERROR = "lead reasoning is required"
RUNTIME_ANNOTATION_TYPES = (datetime, UUID)
@@ -21,7 +22,7 @@ class ApprovalBase(SQLModel):
task_id: UUID | None = None
task_ids: list[UUID] = Field(default_factory=list)
payload: dict[str, object] | None = None
confidence: int
confidence: float = Field(ge=0, le=100)
rubric_scores: dict[str, int] | None = None
status: ApprovalStatus = "pending"
@@ -48,6 +49,21 @@ class ApprovalCreate(ApprovalBase):
agent_id: UUID | None = None
@model_validator(mode="after")
def validate_lead_reasoning(self) -> Self:
"""Ensure each approval request includes explicit lead reasoning."""
payload = self.payload
if isinstance(payload, dict):
reason = payload.get("reason")
if isinstance(reason, str) and reason.strip():
return self
decision = payload.get("decision")
if isinstance(decision, dict):
nested_reason = decision.get("reason")
if isinstance(nested_reason, str) and nested_reason.strip():
return self
raise ValueError(LEAD_REASONING_REQUIRED_ERROR)
class ApprovalUpdate(SQLModel):
"""Payload for mutating approval status."""
@@ -67,6 +83,7 @@ class ApprovalRead(ApprovalBase):
id: UUID
board_id: UUID
task_titles: list[str] = Field(default_factory=list)
agent_id: UUID | None = None
created_at: datetime
resolved_at: datetime | None = None

View File

@@ -36,10 +36,21 @@ def _memory_to_read(memory: BoardMemory) -> BoardMemoryRead:
return BoardMemoryRead.model_validate(memory, from_attributes=True)
def _approval_to_read(approval: Approval, *, task_ids: list[UUID]) -> ApprovalRead:
def _approval_to_read(
approval: Approval,
*,
task_ids: list[UUID],
task_titles: list[str],
) -> ApprovalRead:
model = ApprovalRead.model_validate(approval, from_attributes=True)
primary_task_id = task_ids[0] if task_ids else None
return model.model_copy(update={"task_id": primary_task_id, "task_ids": task_ids})
return model.model_copy(
update={
"task_id": primary_task_id,
"task_ids": task_ids,
"task_titles": task_titles,
},
)
def _task_to_card(
@@ -137,13 +148,21 @@ async def build_board_snapshot(session: AsyncSession, board: Board) -> BoardSnap
session,
approval_ids=approval_ids,
)
task_title_by_id = {task.id: task.title for task in tasks}
approval_reads = [
_approval_to_read(
approval,
task_ids=task_ids_by_approval.get(
approval.id,
[approval.task_id] if approval.task_id is not None else [],
task_ids=(
linked_task_ids := task_ids_by_approval.get(
approval.id,
[approval.task_id] if approval.task_id is not None else [],
)
),
task_titles=[
task_title_by_id[task_id]
for task_id in linked_task_ids
if task_id in task_title_by_id
],
)
for approval in approvals
]

View File

@@ -5,16 +5,16 @@ from __future__ import annotations
import hashlib
from typing import Mapping
CONFIDENCE_THRESHOLD = 80
CONFIDENCE_THRESHOLD = 80.0
MIN_PLANNING_SIGNALS = 2
def compute_confidence(rubric_scores: Mapping[str, int]) -> int:
def compute_confidence(rubric_scores: Mapping[str, int]) -> float:
"""Compute aggregate confidence from rubric score components."""
return int(sum(rubric_scores.values()))
return float(sum(rubric_scores.values()))
def approval_required(*, confidence: int, is_external: bool, is_risky: bool) -> bool:
def approval_required(*, confidence: float, is_external: bool, is_risky: bool) -> bool:
"""Return whether an action must go through explicit approval."""
return is_external or is_risky or confidence < CONFIDENCE_THRESHOLD

View File

@@ -0,0 +1,39 @@
"""make approval confidence float
Revision ID: e2f9c6b4a1d3
Revises: d8c1e5a4f7b2
Create Date: 2026-02-12 20:00:00.000000
"""
from __future__ import annotations
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "e2f9c6b4a1d3"
down_revision = "d8c1e5a4f7b2"
branch_labels = None
depends_on = None
def upgrade() -> None:
op.alter_column(
"approvals",
"confidence",
existing_type=sa.Integer(),
type_=sa.Float(),
existing_nullable=False,
)
def downgrade() -> None:
op.alter_column(
"approvals",
"confidence",
existing_type=sa.Float(),
type_=sa.Integer(),
existing_nullable=False,
)

View File

@@ -31,4 +31,3 @@ This file defines how you decide when to act vs when to ask.
## Collaboration defaults
- If you are idle/unassigned: pick 1 in-progress/review task owned by someone else and leave a concrete, helpful comment (context gaps, quality risks, validation ideas, edge cases, handoff clarity).
- If you notice duplicate work: flag it and propose a merge/split so there is one clear DRI per deliverable.

View File

@@ -66,4 +66,3 @@ Notes:
| Date | Change |
|------|--------|
| | |

View File

@@ -51,22 +51,25 @@ async def test_create_approval_rejects_duplicate_pending_for_same_task() -> None
async with await _make_session(engine) as session:
board, task_ids = await _seed_board_with_tasks(session, task_count=1)
task_id = task_ids[0]
await approvals_api.create_approval(
created = await approvals_api.create_approval(
payload=ApprovalCreate(
action_type="task.execute",
task_id=task_id,
payload={"reason": "Initial execution needs confirmation."},
confidence=80,
status="pending",
),
board=board,
session=session,
)
assert created.task_titles == [f"task-{task_id}"]
with pytest.raises(HTTPException) as exc:
await approvals_api.create_approval(
payload=ApprovalCreate(
action_type="task.retry",
task_id=task_id,
payload={"reason": "Retry should still be gated."},
confidence=77,
status="pending",
),
@@ -91,22 +94,25 @@ async def test_create_approval_rejects_pending_conflict_from_linked_task_ids() -
async with await _make_session(engine) as session:
board, task_ids = await _seed_board_with_tasks(session, task_count=2)
task_a, task_b = task_ids
await approvals_api.create_approval(
created = await approvals_api.create_approval(
payload=ApprovalCreate(
action_type="task.batch_execute",
task_ids=[task_a, task_b],
payload={"reason": "Batch operation requires sign-off."},
confidence=85,
status="pending",
),
board=board,
session=session,
)
assert created.task_titles == [f"task-{task_a}", f"task-{task_b}"]
with pytest.raises(HTTPException) as exc:
await approvals_api.create_approval(
payload=ApprovalCreate(
action_type="task.execute",
task_id=task_b,
payload={"reason": "Single task overlaps with pending batch."},
confidence=70,
status="pending",
),
@@ -135,6 +141,7 @@ async def test_update_approval_rejects_reopening_to_pending_with_existing_pendin
payload=ApprovalCreate(
action_type="task.execute",
task_id=task_id,
payload={"reason": "Primary pending approval is active."},
confidence=83,
status="pending",
),
@@ -145,6 +152,7 @@ async def test_update_approval_rejects_reopening_to_pending_with_existing_pendin
payload=ApprovalCreate(
action_type="task.review",
task_id=task_id,
payload={"reason": "Review decision completed earlier."},
confidence=90,
status="approved",
),

View File

@@ -0,0 +1,60 @@
from __future__ import annotations
import pytest
from pydantic import ValidationError
from app.schemas.approvals import ApprovalCreate
def test_approval_create_requires_confidence_score() -> None:
with pytest.raises(ValidationError, match="confidence"):
ApprovalCreate.model_validate(
{
"action_type": "task.update",
"payload": {"reason": "Missing confidence should fail."},
},
)
@pytest.mark.parametrize("confidence", [-1.0, 101.0])
def test_approval_create_rejects_out_of_range_confidence(confidence: float) -> None:
with pytest.raises(ValidationError, match="confidence"):
ApprovalCreate.model_validate(
{
"action_type": "task.update",
"payload": {"reason": "Confidence must be in range."},
"confidence": confidence,
},
)
def test_approval_create_requires_lead_reasoning() -> None:
with pytest.raises(ValidationError, match="lead reasoning is required"):
ApprovalCreate.model_validate(
{
"action_type": "task.update",
"confidence": 80,
},
)
def test_approval_create_accepts_nested_decision_reason() -> None:
model = ApprovalCreate.model_validate(
{
"action_type": "task.update",
"confidence": 80,
"payload": {"decision": {"reason": "Needs manual approval."}},
},
)
assert model.payload == {"decision": {"reason": "Needs manual approval."}}
def test_approval_create_accepts_float_confidence() -> None:
model = ApprovalCreate.model_validate(
{
"action_type": "task.update",
"confidence": 88.75,
"payload": {"reason": "Fractional confidence should be preserved."},
},
)
assert model.confidence == 88.75

View File

@@ -14,6 +14,7 @@ from app.core.error_handling import (
_error_payload,
_get_request_id,
_http_exception_exception_handler,
_json_safe,
_request_validation_exception_handler,
_response_validation_exception_handler,
install_error_handling,
@@ -38,6 +39,31 @@ def test_request_validation_error_includes_request_id():
assert resp.headers.get(REQUEST_ID_HEADER) == body["request_id"]
def test_request_validation_error_handles_bytes_input_without_500():
class Payload(BaseModel):
content: str
app = FastAPI()
install_error_handling(app)
@app.put("/needs-object")
def needs_object(payload: Payload) -> dict[str, str]:
return {"content": payload.content}
client = TestClient(app, raise_server_exceptions=False)
resp = client.put(
"/needs-object",
content=b"plain-text-body",
headers={"content-type": "text/plain"},
)
assert resp.status_code == 422
body = resp.json()
assert isinstance(body.get("detail"), list)
assert isinstance(body.get("request_id"), str) and body["request_id"]
assert resp.headers.get(REQUEST_ID_HEADER) == body["request_id"]
def test_http_exception_includes_request_id():
app = FastAPI()
install_error_handling(app)
@@ -184,6 +210,23 @@ def test_error_payload_omits_request_id_when_none() -> None:
assert _error_payload(detail="x", request_id=None) == {"detail": "x"}
def test_json_safe_decodes_bytes() -> None:
assert _json_safe(b"abc") == "abc"
def test_json_safe_decodes_bytearray_and_memoryview() -> None:
assert _json_safe(bytearray(b"abc")) == "abc"
assert _json_safe(memoryview(b"abc")) == "abc"
def test_json_safe_falls_back_to_str() -> None:
class Thing:
def __str__(self) -> str:
return "thing"
assert _json_safe(Thing()) == "thing"
@pytest.mark.asyncio
async def test_request_validation_exception_wrapper_rejects_wrong_exception() -> None:
req = Request({"type": "http", "headers": [], "state": {}})

58
docs/03-development.md Normal file
View File

@@ -0,0 +1,58 @@
# Development workflow
This page describes a contributor-friendly local dev loop.
## Prereqs
- Python 3.12+
- Node 22+
- Docker + Docker Compose v2 (`docker compose`)
- `uv` (installed automatically by `make backend-sync` in CI; for local install see https://docs.astral.sh/uv/)
## Fast local loop (DB in Docker, apps on host)
1) Start Postgres (via compose)
```bash
cp .env.example .env
# Configure .env as needed (see root README for auth-mode notes)
docker compose -f compose.yml --env-file .env up -d db
```
2) Install deps
```bash
make setup
```
3) Apply DB migrations
```bash
make backend-migrate
```
4) Run backend (dev)
```bash
cd backend
uv run uvicorn app.main:app --reload --host 0.0.0.0 --port 8000
```
5) Run frontend (dev)
In another terminal:
```bash
cd frontend
npm run dev -- --hostname 0.0.0.0 --port 3000
```
Open:
- Frontend: http://localhost:3000
- Backend health: http://localhost:8000/healthz
## Notes
- If you want the fully-containerized stack instead, see the root READMEs compose instructions.
- If you add new env vars, prefer documenting them in `.env.example` and linking from docs rather than pasting secrets.

19
docs/README.md Normal file
View File

@@ -0,0 +1,19 @@
# Mission Control docs
This folder is the starting point for Mission Control documentation.
## Sections
- [Development workflow](./03-development.md)
- [Testing guide](./testing/README.md)
- [Coverage policy](./coverage-policy.md)
- [Deployment](./deployment/README.md)
- [Production notes](./production/README.md)
- [Troubleshooting](./troubleshooting/README.md)
- [Release checklist](./release/README.md)
- [Gateway WebSocket protocol](./openclaw_gateway_ws.md)
## Status
These pages are minimal placeholders so repo-relative links stay healthy. The actual docs
information architecture will be defined in the Docs overhaul tasks.

3
docs/coverage-policy.md Normal file
View File

@@ -0,0 +1,3 @@
# Coverage policy
Placeholder: coverage policy is currently documented in the root `Makefile` (`backend-coverage`).

View File

@@ -0,0 +1,3 @@
# Deployment guide
Placeholder.

View File

@@ -0,0 +1,3 @@
# Gateway WebSocket protocol
Placeholder.

View File

@@ -0,0 +1,3 @@
# Production notes
Placeholder.

104
docs/release/README.md Normal file
View File

@@ -0,0 +1,104 @@
# Release checklist (maintainers)
This project does not yet have a fully automated release pipeline.
Use this checklist for **manual releases** and as a place to evolve the process over time.
## Before you start
- [ ] You have maintainer permissions on `abhi1693/openclaw-mission-control`
- [ ] You know which **scope** youre releasing:
- [ ] backend-only change
- [ ] frontend-only change
- [ ] infra/docs-only change
- [ ] full release
## 1) Pick the release commit
- [ ] Ensure youre on the latest `master`:
```bash
git fetch origin
git checkout master
git reset --hard origin/master
```
- [ ] Identify the commit SHA you want to release (usually `origin/master`).
## 2) Verify CI + local checks
- [ ] CI is green on the target commit.
- [ ] Run the local CI-parity checks:
```bash
make setup
make check
make docs-check
```
## 3) Smoke test (recommended)
If the change affects runtime behavior (API/UI/auth), do a quick smoke test using the compose stack.
```bash
cp .env.example .env
# Configure .env (see repo README for auth mode notes)
docker compose -f compose.yml --env-file .env up -d --build
after_up() {
echo "Frontend: http://localhost:3000"
echo "Backend health: http://localhost:8000/healthz"
}
after_up
```
- [ ] Validate basic flows (at minimum):
- [ ] frontend loads
- [ ] backend `/healthz` returns 200
- [ ] auth mode behaves as expected (local or clerk)
## 4) Prepare release notes
There is no enforced changelog format yet.
Choose one:
- [ ] GitHub Release notes (recommended for now)
- [ ] `CHANGELOG.md` (TODO: adopt Keep a Changelog)
Minimum release notes:
- [ ] Summary of key changes
- [ ] Any config changes / env var changes
- [ ] Any DB migration notes
- [ ] Known issues
## 5) Tag + publish
Tagging convention:
- Use semver tags: `vMAJOR.MINOR.PATCH` (e.g. `v1.4.0`).
- Create a GitHub Release from the tag (release notes can be manual for now).
- If the repo already has existing tags/releases, mirror the established convention exactly.
Suggested manual flow:
```bash
# Example (adjust once tag conventions are decided)
git tag -a v0.1.0 -m "Release v0.1.0"
git push origin v0.1.0
```
- [ ] Create a GitHub Release from the tag
- [ ] Paste the release notes
## 6) Post-release
- [ ] Monitor new issues for regressions
- [ ] If you changed public behavior, ensure docs are updated (README + docs pages)
## Notes / follow-ups
- Consider adding:
- automated versioning (changesets / semantic-release)
- release workflow in `.github/workflows/release.yml`
- a `CHANGELOG.md`

57
docs/testing/README.md Normal file
View File

@@ -0,0 +1,57 @@
# Testing guide
This repos CI parity entrypoint is:
```bash
make check
```
That target runs lint + typecheck + unit tests + coverage gate + frontend build.
## Quick commands
From repo root:
```bash
make setup
# Format + lint + typecheck + tests + build (CI parity)
make check
```
Docs-only quality gates:
```bash
make docs-check
```
## Backend
```bash
make backend-lint
make backend-typecheck
make backend-test
make backend-coverage
```
## Frontend
```bash
make frontend-lint
make frontend-typecheck
make frontend-test
make frontend-build
```
## E2E (Cypress)
CI runs Cypress E2E in `.github/workflows/ci.yml`.
Locally, the frontend package.json provides the E2E script:
```bash
cd frontend
npm run e2e
```
If your E2E tests require auth, ensure you have the necessary env configured (see root README for auth mode). Do **not** paste tokens into docs.

View File

@@ -0,0 +1,3 @@
# Troubleshooting
Placeholder.

View File

@@ -252,7 +252,8 @@ export default function DashboardPage() {
const searchParams = useSearchParams();
const selectedRangeParam = searchParams.get("range");
const selectedRange: RangeKey =
selectedRangeParam && DASHBOARD_RANGE_SET.has(selectedRangeParam as RangeKey)
selectedRangeParam &&
DASHBOARD_RANGE_SET.has(selectedRangeParam as RangeKey)
? (selectedRangeParam as RangeKey)
: DEFAULT_RANGE;
const metricsQuery = useDashboardMetricsApiV1MetricsDashboardGet<
@@ -401,10 +402,7 @@ export default function DashboardPage() {
</div>
<div className="mt-8 grid grid-cols-1 gap-6 lg:grid-cols-2">
<ChartCard
title="Completed Tasks"
subtitle="Throughput"
>
<ChartCard title="Completed Tasks" subtitle="Throughput">
<ResponsiveContainer width="100%" height="100%">
<BarChart
data={throughputSeries}
@@ -449,10 +447,7 @@ export default function DashboardPage() {
</ResponsiveContainer>
</ChartCard>
<ChartCard
title="Avg Hours to Review"
subtitle="Cycle time"
>
<ChartCard title="Avg Hours to Review" subtitle="Cycle time">
<ResponsiveContainer width="100%" height="100%">
<LineChart
data={cycleSeries}
@@ -501,10 +496,7 @@ export default function DashboardPage() {
</ResponsiveContainer>
</ChartCard>
<ChartCard
title="Failed Events"
subtitle="Error rate"
>
<ChartCard title="Failed Events" subtitle="Error rate">
<ResponsiveContainer width="100%" height="100%">
<LineChart
data={errorSeries}

View File

@@ -52,9 +52,14 @@ describe("BoardApprovalsPanel", () => {
linked_request: {
tasks: [
{
task_id: "task-1",
title: "Launch onboarding checklist",
description: "Create and validate the v1 onboarding checklist.",
},
{
task_id: "task-2",
title: "Publish onboarding checklist",
},
],
task_ids: ["task-1", "task-2"],
},
@@ -84,7 +89,46 @@ describe("BoardApprovalsPanel", () => {
expect(
screen.getByText("Needs explicit sign-off before rollout."),
).toBeInTheDocument();
expect(screen.getByText("62% score")).toBeInTheDocument();
expect(screen.getByText(/related tasks/i)).toBeInTheDocument();
expect(
screen.getByRole("link", { name: "Launch onboarding checklist" }),
).toHaveAttribute("href", "/boards/board-1?taskId=task-1");
expect(
screen.getByRole("link", { name: "Publish onboarding checklist" }),
).toHaveAttribute("href", "/boards/board-1?taskId=task-2");
expect(screen.getByText(/rubric scores/i)).toBeInTheDocument();
expect(screen.getByText("Clarity")).toBeInTheDocument();
});
it("uses schema task_titles for related task links when payload titles are missing", () => {
const approval = {
id: "approval-2",
board_id: "board-1",
action_type: "task.update",
confidence: 88,
status: "pending",
task_id: "task-a",
task_ids: ["task-a", "task-b"],
task_titles: ["Prepare release notes", "Publish release notes"],
created_at: "2026-02-12T11:00:00Z",
resolved_at: null,
payload: {
task_ids: ["task-a", "task-b"],
reason: "Needs sign-off before publishing.",
},
rubric_scores: null,
} as ApprovalRead;
renderWithQueryClient(
<BoardApprovalsPanel boardId="board-1" approvals={[approval]} />,
);
expect(
screen.getByRole("link", { name: "Prepare release notes" }),
).toHaveAttribute("href", "/boards/board-1?taskId=task-a");
expect(
screen.getByRole("link", { name: "Publish release notes" }),
).toHaveAttribute("href", "/boards/board-1?taskId=task-b");
});
});

View File

@@ -1,6 +1,7 @@
"use client";
import { useCallback, useMemo, useState } from "react";
import Link from "next/link";
import { useAuth } from "@/auth/clerk";
import { useQueryClient } from "@tanstack/react-query";
@@ -28,9 +29,16 @@ import { apiDatetimeToMs, parseApiDatetime } from "@/lib/datetime";
import { cn } from "@/lib/utils";
type Approval = ApprovalRead & { status: string };
const normalizeScore = (value: unknown): number => {
if (typeof value !== "number" || !Number.isFinite(value)) return 0;
return value;
};
const normalizeApproval = (approval: ApprovalRead): Approval => ({
...approval,
status: approval.status ?? "pending",
confidence: normalizeScore(approval.confidence),
});
type BoardApprovalsPanelProps = {
@@ -237,6 +245,79 @@ const approvalTaskIds = (approval: Approval) => {
return [...new Set(merged)];
};
type RelatedTaskSummary = {
id: string;
title: string;
};
const approvalRelatedTasks = (approval: Approval): RelatedTaskSummary[] => {
const payload = approval.payload ?? {};
const taskIds = approvalTaskIds(approval);
if (taskIds.length === 0) return [];
const apiTaskTitles = (
approval as Approval & { task_titles?: string[] | null }
).task_titles;
const titleByTaskId = new Map<string, string>();
const orderedTitles: string[] = [];
const collectTaskTitles = (path: string[]) => {
const tasks = payloadAtPath(payload, path);
if (!Array.isArray(tasks)) return;
for (const task of tasks) {
if (!isRecord(task)) continue;
const rawTitle = task["title"];
const title = typeof rawTitle === "string" ? rawTitle.trim() : "";
if (!title) continue;
orderedTitles.push(title);
const taskId =
typeof task["task_id"] === "string"
? task["task_id"]
: typeof task["taskId"] === "string"
? task["taskId"]
: typeof task["id"] === "string"
? task["id"]
: null;
if (taskId && taskId.trim()) {
titleByTaskId.set(taskId, title);
}
}
};
collectTaskTitles(["linked_request", "tasks"]);
collectTaskTitles(["linkedRequest", "tasks"]);
const indexedTitles = [
...(Array.isArray(apiTaskTitles) ? apiTaskTitles : []),
...orderedTitles,
...payloadValues(payload, "task_titles"),
...payloadValues(payload, "taskTitles"),
...payloadNestedValues(payload, ["linked_request", "task_titles"]),
...payloadNestedValues(payload, ["linked_request", "taskTitles"]),
...payloadNestedValues(payload, ["linkedRequest", "task_titles"]),
...payloadNestedValues(payload, ["linkedRequest", "taskTitles"]),
]
.map((value) => value.trim())
.filter((value) => value.length > 0);
const singleTitle =
payloadValue(payload, "title") ??
payloadNestedValue(payload, ["task", "title"]) ??
payloadFirstLinkedTaskValue(payload, "title");
return taskIds.map((taskId, index) => {
const resolvedTitle =
titleByTaskId.get(taskId) ??
indexedTitles[index] ??
(taskIds.length === 1 ? singleTitle : null) ??
"Untitled task";
return { id: taskId, title: resolvedTitle };
});
};
const taskHref = (boardId: string, taskId: string) =>
`/boards/${encodeURIComponent(boardId)}?taskId=${encodeURIComponent(taskId)}`;
const approvalSummary = (approval: Approval, boardLabel?: string | null) => {
const payload = approval.payload ?? {};
const taskIds = approvalTaskIds(approval);
@@ -544,6 +625,9 @@ export function BoardApprovalsPanel({
</p>
) : null}
<div className="mt-2 flex items-center gap-2 text-xs text-slate-500">
<span className="rounded bg-slate-100 px-1.5 py-0.5 font-semibold text-slate-700">
{approval.confidence}% score
</span>
<Clock className="h-3.5 w-3.5 opacity-60" />
<span>{formatTimestamp(approval.created_at)}</span>
</div>
@@ -582,10 +666,12 @@ export function BoardApprovalsPanel({
const titleText = titleRow?.value?.trim() ?? "";
const descriptionText = summary.description?.trim() ?? "";
const reasoningText = summary.reason?.trim() ?? "";
const relatedTasks = approvalRelatedTasks(selectedApproval);
const extraRows = summary.rows.filter((row) => {
const normalized = row.label.toLowerCase();
if (normalized === "title") return false;
if (normalized === "task") return false;
if (normalized === "tasks") return false;
if (normalized === "assignee") return false;
return true;
});
@@ -733,6 +819,28 @@ export function BoardApprovalsPanel({
</div>
) : null}
{relatedTasks.length > 0 ? (
<div className="space-y-2">
<p className="text-[11px] font-semibold uppercase tracking-[0.2em] text-slate-500">
Related tasks
</p>
<div className="flex flex-wrap gap-2">
{relatedTasks.map((task) => (
<Link
key={`${selectedApproval.id}-task-${task.id}`}
href={taskHref(
selectedApproval.board_id,
task.id,
)}
className="rounded-md border border-slate-200 bg-white px-2 py-1 text-xs text-slate-700 underline-offset-2 transition hover:border-slate-300 hover:bg-slate-50 hover:text-slate-900 hover:underline"
>
{task.title}
</Link>
))}
</div>
</div>
) : null}
{extraRows.length > 0 ? (
<div className="space-y-2">
<p className="text-[11px] font-semibold uppercase tracking-[0.2em] text-slate-500">

View File

@@ -1,6 +1,14 @@
"use client";
import { memo, type HTMLAttributes } from "react";
import {
Children,
cloneElement,
isValidElement,
memo,
type HTMLAttributes,
type ReactElement,
type ReactNode,
} from "react";
import ReactMarkdown, { type Components } from "react-markdown";
import remarkBreaks from "remark-breaks";
@@ -13,6 +21,86 @@ type MarkdownCodeProps = HTMLAttributes<HTMLElement> & {
inline?: boolean;
};
const MENTION_PATTERN =
/(^|[^A-Za-z0-9_])(@[A-Za-z0-9_](?:[A-Za-z0-9_.-]*[A-Za-z0-9_])?)/g;
const renderMentionsInText = (text: string, keyPrefix: string): ReactNode => {
let lastIndex = 0;
let mentionCount = 0;
const nodes: ReactNode[] = [];
for (const match of text.matchAll(MENTION_PATTERN)) {
const matchIndex = match.index ?? 0;
const prefix = match[1] ?? "";
const mention = match[2] ?? "";
const mentionStart = matchIndex + prefix.length;
if (matchIndex > lastIndex) {
nodes.push(text.slice(lastIndex, matchIndex));
}
if (prefix) {
nodes.push(prefix);
}
nodes.push(
<span
key={`${keyPrefix}-${mentionCount}`}
className="font-semibold text-cyan-700"
>
{mention}
</span>,
);
lastIndex = mentionStart + mention.length;
mentionCount += 1;
}
if (nodes.length === 0) {
return text;
}
if (lastIndex < text.length) {
nodes.push(text.slice(lastIndex));
}
return nodes;
};
const renderMentions = (content: ReactNode, keyPrefix = "mention"): ReactNode => {
if (typeof content === "string") {
return renderMentionsInText(content, keyPrefix);
}
if (
content === null ||
content === undefined ||
typeof content === "boolean" ||
typeof content === "number"
) {
return content;
}
if (Array.isArray(content)) {
return Children.map(content, (child, index) =>
renderMentions(child, `${keyPrefix}-${index}`),
);
}
if (isValidElement(content)) {
if (typeof content.type === "string" && content.type === "code") {
return content;
}
const childProps = content.props as { children?: ReactNode };
if (childProps.children === undefined) {
return content;
}
return cloneElement(
content as ReactElement<{ children?: ReactNode }>,
undefined,
renderMentions(childProps.children, keyPrefix),
);
}
return content;
};
const MARKDOWN_CODE_COMPONENTS: Components = {
pre: ({ node: _node, className, ...props }) => (
<pre
@@ -77,28 +165,47 @@ const MARKDOWN_TABLE_COMPONENTS: Components = {
tr: ({ node: _node, className, ...props }) => (
<tr className={cn("align-top", className)} {...props} />
),
th: ({ node: _node, className, ...props }) => (
th: ({ node: _node, className, children, ...props }) => (
<th
className={cn(
"border border-slate-200 px-3 py-2 text-left text-xs font-semibold",
className,
)}
{...props}
/>
>
{renderMentions(children)}
</th>
),
td: ({ node: _node, className, ...props }) => (
td: ({ node: _node, className, children, ...props }) => (
<td
className={cn("border border-slate-200 px-3 py-2 align-top", className)}
{...props}
/>
>
{renderMentions(children)}
</td>
),
};
const MARKDOWN_COMPONENTS_BASIC: Components = {
...MARKDOWN_TABLE_COMPONENTS,
...MARKDOWN_CODE_COMPONENTS,
p: ({ node: _node, className, ...props }) => (
<p className={cn("mb-2 last:mb-0", className)} {...props} />
a: ({ node: _node, className, children, ...props }) => (
<a
className={cn(
"font-medium text-sky-700 underline decoration-sky-400 underline-offset-2 transition-colors hover:text-sky-800 hover:decoration-sky-600",
className,
)}
target="_blank"
rel="noopener noreferrer"
{...props}
>
{renderMentions(children)}
</a>
),
p: ({ node: _node, className, children, ...props }) => (
<p className={cn("mb-2 last:mb-0", className)} {...props}>
{renderMentions(children)}
</p>
),
ul: ({ node: _node, className, ...props }) => (
<ul className={cn("mb-2 list-disc pl-5", className)} {...props} />
@@ -106,27 +213,39 @@ const MARKDOWN_COMPONENTS_BASIC: Components = {
ol: ({ node: _node, className, ...props }) => (
<ol className={cn("mb-2 list-decimal pl-5", className)} {...props} />
),
li: ({ node: _node, className, ...props }) => (
<li className={cn("mb-1", className)} {...props} />
li: ({ node: _node, className, children, ...props }) => (
<li className={cn("mb-1", className)} {...props}>
{renderMentions(children)}
</li>
),
strong: ({ node: _node, className, ...props }) => (
<strong className={cn("font-semibold", className)} {...props} />
strong: ({ node: _node, className, children, ...props }) => (
<strong className={cn("font-semibold", className)} {...props}>
{renderMentions(children)}
</strong>
),
};
const MARKDOWN_COMPONENTS_DESCRIPTION: Components = {
...MARKDOWN_COMPONENTS_BASIC,
p: ({ node: _node, className, ...props }) => (
<p className={cn("mb-3 last:mb-0", className)} {...props} />
p: ({ node: _node, className, children, ...props }) => (
<p className={cn("mb-3 last:mb-0", className)} {...props}>
{renderMentions(children)}
</p>
),
h1: ({ node: _node, className, ...props }) => (
<h1 className={cn("mb-2 text-base font-semibold", className)} {...props} />
h1: ({ node: _node, className, children, ...props }) => (
<h1 className={cn("mb-2 text-base font-semibold", className)} {...props}>
{renderMentions(children)}
</h1>
),
h2: ({ node: _node, className, ...props }) => (
<h2 className={cn("mb-2 text-sm font-semibold", className)} {...props} />
h2: ({ node: _node, className, children, ...props }) => (
<h2 className={cn("mb-2 text-sm font-semibold", className)} {...props}>
{renderMentions(children)}
</h2>
),
h3: ({ node: _node, className, ...props }) => (
<h3 className={cn("mb-2 text-sm font-semibold", className)} {...props} />
h3: ({ node: _node, className, children, ...props }) => (
<h3 className={cn("mb-2 text-sm font-semibold", className)} {...props}>
{renderMentions(children)}
</h3>
),
};

View File

@@ -57,10 +57,14 @@ export function DashboardSidebar() {
return (
<aside className="flex h-full w-64 flex-col border-r border-slate-200 bg-white">
<div className="flex-1 px-3 py-4">
<p className="px-3 text-xs font-semibold uppercase tracking-wider text-slate-500">Navigation</p>
<p className="px-3 text-xs font-semibold uppercase tracking-wider text-slate-500">
Navigation
</p>
<nav className="mt-3 space-y-4 text-sm">
<div>
<p className="px-3 text-[11px] font-semibold uppercase tracking-wider text-slate-400">Overview</p>
<p className="px-3 text-[11px] font-semibold uppercase tracking-wider text-slate-400">
Overview
</p>
<div className="mt-1 space-y-1">
<Link
href="/dashboard"
@@ -90,7 +94,9 @@ export function DashboardSidebar() {
</div>
<div>
<p className="px-3 text-[11px] font-semibold uppercase tracking-wider text-slate-400">Boards</p>
<p className="px-3 text-[11px] font-semibold uppercase tracking-wider text-slate-400">
Boards
</p>
<div className="mt-1 space-y-1">
<Link
href="/board-groups"

103
scripts/check_markdown_links.py Executable file
View File

@@ -0,0 +1,103 @@
#!/usr/bin/env python3
"""Lightweight markdown link checker for repo docs.
Checks *relative* links inside markdown files and fails CI if any targets are missing.
Design goals:
- No external deps.
- Ignore http(s)/mailto links.
- Ignore pure anchors (#foo).
- Support links with anchors (./path.md#section) by checking only the path part.
Limitations:
- Does not validate that anchors exist inside target files.
- Does not validate links generated dynamically or via HTML.
"""
from __future__ import annotations
import re
import sys
from pathlib import Path
LINK_RE = re.compile(r"\[[^\]]+\]\(([^)]+)\)")
def iter_md_files(root: Path) -> list[Path]:
"""Return markdown files to check.
Policy:
- Always check root contributor-facing markdown (`README.md`, `CONTRIBUTING.md`).
- If `docs/` exists, also check `docs/**/*.md`.
Rationale:
- We want fast feedback on broken *relative* links in the most important entrypoints.
- We intentionally do **not** crawl external URLs.
"""
files: list[Path] = []
for p in (root / "README.md", root / "CONTRIBUTING.md"):
if p.exists():
files.append(p)
docs = root / "docs"
if docs.exists():
files.extend(sorted(docs.rglob("*.md")))
# de-dupe + stable order
return sorted({p.resolve() for p in files})
def normalize_target(raw: str) -> str | None:
raw = raw.strip()
if not raw:
return None
if raw.startswith("http://") or raw.startswith("https://") or raw.startswith("mailto:"):
return None
if raw.startswith("#"):
return None
# strip query/fragment
raw = raw.split("#", 1)[0].split("?", 1)[0]
if not raw:
return None
return raw
def main() -> int:
root = Path(__file__).resolve().parents[1]
md_files = iter_md_files(root)
missing: list[tuple[Path, str]] = []
for md in md_files:
text = md.read_text(encoding="utf-8")
for m in LINK_RE.finditer(text):
target_raw = m.group(1)
target = normalize_target(target_raw)
if target is None:
continue
# Skip common markdown reference-style quirks.
if target.startswith("<") and target.endswith(">"):
continue
# Resolve relative to current file.
resolved = (md.parent / target).resolve()
if not resolved.exists():
missing.append((md, target_raw))
if missing:
print("Broken relative links detected:\n")
for md, target in missing:
print(f"- {md.relative_to(root)} -> {target}")
print(f"\nTotal: {len(missing)}")
return 1
print(f"OK: checked {len(md_files)} markdown files")
return 0
if __name__ == "__main__":
raise SystemExit(main())