Sub-slice C — Annotated change log
20 commits
Each commit on
feat/pipeline-orchestrator since the spec/plan landed (d6b4ca0),
with a one-paragraph annotation of why, the file-stat block, and the full unified diff.
Click Show diff to expand. Commits are listed chronologically (oldest first).
0c9357d
feat(backend): add hubspot_deal_id + queue-friendly indexes to deals
WhySchema foundation. Migration 0007 adds
hubspot_deal_id (TEXT NOT NULL UNIQUE) and hubspot_checked_in_at (TIMESTAMPTZ nullable) to deals, with seed-row backfill, plus a updated_at DESC index for the queue endpoint. Adds make_deal(**overrides) to conftest so existing tests don't churn on every Deal(...) call site.Files changed
| .../0007_add_hubspot_deal_id_and_queue_index.py | 52 ++++++++++++++++++++++ |
| backend/src/vcc_backend/features/deals/models.py | 6 ++- |
| backend/tests/conftest.py | 10 +++++ |
| backend/tests/test_admin_deals_route.py | 5 ++- |
| backend/tests/test_deal_model.py | 26 ++++++++++- |
| backend/tests/test_deal_service.py | 13 +++--- |
| backend/tests/test_deals_route.py | 5 ++- |
| backend/tests/test_pipeline_orchestrator.py | 7 +-- |
| backend/tests/test_pipeline_run_model.py | 5 ++- |
| backend/tests/test_pipeline_run_service.py | 5 ++- |
| backend/tests/test_process_deal_task.py | 5 ++- |
| backend/tests/test_workers_demo_task.py | 3 +- |
Show diff
diff --git a/backend/alembic/versions/0007_add_hubspot_deal_id_and_queue_index.py b/backend/alembic/versions/0007_add_hubspot_deal_id_and_queue_index.py
new file mode 100644
@@ -0,0 +1,52 @@
+"""add hubspot_deal_id, hubspot_checked_in_at, queue indexes
+
+Revision ID: 0007
+Revises: 0006
+Create Date: 2026-05-13
+"""
+from __future__ import annotations
+
+import sqlalchemy as sa
+from alembic import op
+
+revision = "0007"
+down_revision = "0006"
+branch_labels = None
+depends_on = None
+
+
+def upgrade() -> None:
+ op.add_column("deals", sa.Column("hubspot_deal_id", sa.Text(), nullable=True))
+ op.add_column(
+ "deals",
+ sa.Column("hubspot_checked_in_at", sa.DateTime(timezone=True), nullable=True),
+ )
+ op.execute(
+ "UPDATE deals SET hubspot_deal_id = 'seed-' || id::text "
+ "WHERE hubspot_deal_id IS NULL"
+ )
+ op.execute(
+ "UPDATE deals SET hubspot_checked_in_at = created_at "
+ "WHERE hubspot_checked_in_at IS NULL"
+ )
+ op.alter_column("deals", "hubspot_deal_id", nullable=False)
+
+ op.create_index(
+ "ix_deals_hubspot_deal_id", "deals", ["hubspot_deal_id"], unique=True
+ )
+ op.create_index(
+ "ix_deals_updated_at_desc", "deals", [sa.text("updated_at DESC")]
+ )
+ op.create_index(
+ "ix_deals_hubspot_checked_in_at",
+ "deals",
+ [sa.text("hubspot_checked_in_at DESC")],
+ )
+
+
+def downgrade() -> None:
+ op.drop_index("ix_deals_hubspot_checked_in_at", table_name="deals")
+ op.drop_index("ix_deals_updated_at_desc", table_name="deals")
+ op.drop_index("ix_deals_hubspot_deal_id", table_name="deals")
+ op.drop_column("deals", "hubspot_checked_in_at")
+ op.drop_column("deals", "hubspot_deal_id")
diff --git a/backend/src/vcc_backend/features/deals/models.py b/backend/src/vcc_backend/features/deals/models.py
@@ -4,7 +4,7 @@ import enum
from datetime import UTC, datetime
from uuid import UUID, uuid4
-from sqlalchemy import DateTime, Enum, func
+from sqlalchemy import DateTime, Enum, Text, func
from sqlalchemy.dialects.postgresql import UUID as PgUUID
from sqlalchemy.orm import Mapped, mapped_column
@@ -50,6 +50,10 @@ class Deal(Base):
nullable=False,
default=PipelineStage.INTAKE,
)
+ hubspot_deal_id: Mapped[str] = mapped_column(Text, nullable=False, unique=True)
+ hubspot_checked_in_at: Mapped[datetime | None] = mapped_column(
+ DateTime(timezone=True), nullable=True
+ )
created_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True),
nullable=False,
diff --git a/backend/tests/conftest.py b/backend/tests/conftest.py
@@ -4,6 +4,7 @@ import os
os.environ.setdefault("TESTCONTAINERS_RYUK_DISABLED", "true")
import subprocess
+import uuid as _uuid
from collections.abc import AsyncIterator, Iterator
from pathlib import Path
@@ -18,8 +19,17 @@ from sqlalchemy.ext.asyncio import (
)
from testcontainers.postgres import PostgresContainer
+from vcc_backend.features.deals.models import Deal
+
BACKEND_ROOT = Path(__file__).parent.parent
+
+def make_deal(**overrides) -> Deal:
+ """Construct a Deal with sane test defaults. Override any field via kwargs."""
+ defaults = {"hubspot_deal_id": f"test-{_uuid.uuid4()}"}
+ defaults.update(overrides)
+ return Deal(**defaults)
+
# Tables that get TRUNCATE'd between tests. Append as new tables arrive in later plans.
TRUNCATE_TABLES = ["pipeline_runs", "deals", "procrastinate_jobs", "procrastinate_events"]
diff --git a/backend/tests/test_admin_deals_route.py b/backend/tests/test_admin_deals_route.py
@@ -8,8 +8,9 @@ import pytest
from httpx import ASGITransport, AsyncClient
from sqlalchemy.ext.asyncio import AsyncSession
+from tests.conftest import make_deal
from vcc_backend.api.app import create_app
-from vcc_backend.features.deals.models import Deal, DealStatus, PipelineStage
+from vcc_backend.features.deals.models import DealStatus, PipelineStage
@pytest.mark.asyncio
@@ -21,7 +22,7 @@ async def test_post_process_returns_200_and_job_id_for_existing_deal(
"""Happy path: existing deal → 200 with job_id."""
monkeypatch.setenv("DATABASE_URL", database_url)
now = datetime.now(UTC)
- deal = Deal(
+ deal = make_deal(
id=uuid4(),
status=DealStatus.PENDING,
current_stage=PipelineStage.INTAKE,
diff --git a/backend/tests/test_deal_model.py b/backend/tests/test_deal_model.py
@@ -1,7 +1,9 @@
import pytest
from sqlalchemy import select
+from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.asyncio import AsyncSession
+from tests.conftest import make_deal
from vcc_backend.features.deals.models import Deal, DealStatus, PipelineStage
@@ -9,7 +11,7 @@ from vcc_backend.features.deals.models import Deal, DealStatus, PipelineStage
async def test_deal_can_be_persisted_and_loaded(session: AsyncSession) -> None:
# Isolation is via TRUNCATE at fixture setup; flushed-but-uncommitted data
# is discarded on session close.
- deal = Deal(
+ deal = make_deal(
status=DealStatus.PENDING,
current_stage=PipelineStage.INTAKE,
)
@@ -22,3 +24,25 @@ async def test_deal_can_be_persisted_and_loaded(session: AsyncSession) -> None:
loaded = (await session.execute(select(Deal).where(Deal.id == deal.id))).scalar_one()
assert loaded.status == DealStatus.PENDING
assert loaded.current_stage == PipelineStage.INTAKE
+
+
+@pytest.mark.asyncio
+async def test_hubspot_deal_id_unique_constraint(session: AsyncSession) -> None:
+ """Inserting two deals with the same hubspot_deal_id must violate the unique constraint."""
+ session.add(Deal(hubspot_deal_id="dup-id"))
+ await session.commit()
+
+ session.add(Deal(hubspot_deal_id="dup-id"))
+ with pytest.raises(IntegrityError):
+ await session.commit()
+ await session.rollback()
+
+
+@pytest.mark.asyncio
+async def test_hubspot_checked_in_at_nullable(session: AsyncSession) -> None:
+ """hubspot_checked_in_at is nullable for admin-created deals."""
+ deal = Deal(hubspot_deal_id="no-upstream-timestamp")
+ session.add(deal)
+ await session.commit()
+ await session.refresh(deal)
+ assert deal.hubspot_checked_in_at is None
diff --git a/backend/tests/test_deal_service.py b/backend/tests/test_deal_service.py
@@ -1,7 +1,8 @@
import pytest
from sqlalchemy.ext.asyncio import AsyncSession
-from vcc_backend.features.deals.models import Deal, DealStatus, PipelineStage
+from tests.conftest import make_deal
+from vcc_backend.features.deals.models import DealStatus, PipelineStage
from vcc_backend.features.deals.service import DealService
@@ -9,9 +10,9 @@ from vcc_backend.features.deals.service import DealService
async def test_list_deals_returns_persisted_deals(session: AsyncSession) -> None:
session.add_all(
[
- Deal(status=DealStatus.PENDING, current_stage=PipelineStage.INTAKE),
- Deal(status=DealStatus.IN_PROGRESS, current_stage=PipelineStage.SEGMENTATION),
- Deal(status=DealStatus.COMPLETED, current_stage=PipelineStage.DONE),
+ make_deal(status=DealStatus.PENDING, current_stage=PipelineStage.INTAKE),
+ make_deal(status=DealStatus.IN_PROGRESS, current_stage=PipelineStage.SEGMENTATION),
+ make_deal(status=DealStatus.COMPLETED, current_stage=PipelineStage.DONE),
]
)
await session.flush()
@@ -25,8 +26,8 @@ async def test_list_deals_returns_persisted_deals(session: AsyncSession) -> None
@pytest.mark.asyncio
async def test_list_deals_orders_by_created_at_desc(session: AsyncSession) -> None:
- older = Deal(status=DealStatus.PENDING, current_stage=PipelineStage.INTAKE)
- newer = Deal(status=DealStatus.IN_PROGRESS, current_stage=PipelineStage.SEGMENTATION)
+ older = make_deal(status=DealStatus.PENDING, current_stage=PipelineStage.INTAKE)
+ newer = make_deal(status=DealStatus.IN_PROGRESS, current_stage=PipelineStage.SEGMENTATION)
session.add(older)
await session.flush()
session.add(newer)
diff --git a/backend/tests/test_deals_route.py b/backend/tests/test_deals_route.py
@@ -2,8 +2,9 @@ import pytest
from httpx import ASGITransport, AsyncClient
from sqlalchemy.ext.asyncio import AsyncSession
+from tests.conftest import make_deal
from vcc_backend.api.app import create_app
-from vcc_backend.features.deals.models import Deal, DealStatus, PipelineStage
+from vcc_backend.features.deals.models import DealStatus, PipelineStage
@pytest.mark.asyncio
@@ -25,7 +26,7 @@ async def test_get_deals_returns_persisted_deals(
database_url: str, monkeypatch: pytest.MonkeyPatch, session: AsyncSession
) -> None:
monkeypatch.setenv("DATABASE_URL", database_url)
- deal = Deal(status=DealStatus.PENDING, current_stage=PipelineStage.INTAKE)
+ deal = make_deal(status=DealStatus.PENDING, current_stage=PipelineStage.INTAKE)
session.add(deal)
await session.commit()
diff --git a/backend/tests/test_pipeline_orchestrator.py b/backend/tests/test_pipeline_orchestrator.py
@@ -8,7 +8,8 @@ import pytest
from sqlalchemy import func, select
from sqlalchemy.ext.asyncio import AsyncSession
-from vcc_backend.features.deals.models import Deal, DealStatus, PipelineStage
+from tests.conftest import make_deal
+from vcc_backend.features.deals.models import DealStatus, PipelineStage
from vcc_backend.features.pipeline.models import PipelineRun, RunStatus
from vcc_backend.features.pipeline.orchestrator import run_pipeline
@@ -19,7 +20,7 @@ async def test_run_pipeline_walks_all_six_stages_for_pending_deal(
) -> None:
"""Happy path: a pending deal at intake walks through all 6 stages."""
now = datetime.now(UTC)
- deal = Deal(
+ deal = make_deal(
id=uuid4(),
status=DealStatus.PENDING,
current_stage=PipelineStage.INTAKE,
@@ -62,7 +63,7 @@ async def test_run_pipeline_walks_all_six_stages_for_pending_deal(
async def test_run_pipeline_is_idempotent_for_done_deal(session: AsyncSession) -> None:
"""A deal already at DONE is a no-op — no new rows, no state change."""
now = datetime.now(UTC)
- deal = Deal(
+ deal = make_deal(
id=uuid4(),
status=DealStatus.COMPLETED,
current_stage=PipelineStage.DONE,
diff --git a/backend/tests/test_pipeline_run_model.py b/backend/tests/test_pipeline_run_model.py
@@ -8,7 +8,8 @@ import pytest
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
-from vcc_backend.features.deals.models import Deal, DealStatus, PipelineStage
+from tests.conftest import make_deal
+from vcc_backend.features.deals.models import DealStatus, PipelineStage
from vcc_backend.features.pipeline.models import PipelineRun, RunStatus
@@ -36,7 +37,7 @@ def test_pipeline_run_defaults() -> None:
async def test_pipeline_run_can_be_persisted_and_loaded(session: AsyncSession) -> None:
"""End-to-end: insert a Deal, insert a PipelineRun referring to it, fetch back."""
now = datetime.now(UTC)
- deal = Deal(
+ deal = make_deal(
id=uuid4(),
status=DealStatus.PENDING,
current_stage=PipelineStage.INTAKE,
diff --git a/backend/tests/test_pipeline_run_service.py b/backend/tests/test_pipeline_run_service.py
@@ -7,7 +7,8 @@ from uuid import uuid4
import pytest
from sqlalchemy.ext.asyncio import AsyncSession
-from vcc_backend.features.deals.models import Deal, DealStatus, PipelineStage
+from tests.conftest import make_deal
+from vcc_backend.features.deals.models import DealStatus, PipelineStage
from vcc_backend.features.pipeline.models import PipelineRun, RunStatus
from vcc_backend.features.pipeline.service import PipelineRunService
@@ -18,7 +19,7 @@ async def test_list_runs_for_deal_returns_in_started_at_desc_order(
) -> None:
"""Newer runs come first — matches the index order."""
now = datetime.now(UTC)
- deal = Deal(
+ deal = make_deal(
id=uuid4(),
status=DealStatus.PENDING,
current_stage=PipelineStage.INTAKE,
diff --git a/backend/tests/test_process_deal_task.py b/backend/tests/test_process_deal_task.py
@@ -17,7 +17,8 @@ import pytest
from sqlalchemy import func, select
from sqlalchemy.ext.asyncio import AsyncSession
-from vcc_backend.features.deals.models import Deal, DealStatus, PipelineStage
+from tests.conftest import make_deal
+from vcc_backend.features.deals.models import DealStatus, PipelineStage
from vcc_backend.features.pipeline.models import PipelineRun, RunStatus
@@ -31,7 +32,7 @@ async def test_process_deal_runs_via_worker(
monkeypatch.setenv("DATABASE_URL", database_url)
now = datetime.now(UTC)
- deal = Deal(
+ deal = make_deal(
id=uuid4(),
status=DealStatus.PENDING,
current_stage=PipelineStage.INTAKE,
diff --git a/backend/tests/test_workers_demo_task.py b/backend/tests/test_workers_demo_task.py
@@ -78,8 +78,9 @@ async def test_count_deals_runs_via_worker(
"current_stage": PipelineStage.INTAKE,
"created_at": now,
"updated_at": now,
+ "hubspot_deal_id": f"test-seed-{i}",
}
- for _ in range(3)
+ for i in range(3)
],
)
await session.commit()
6d50232
feat(backend): add HubSpot poll settings
WhyPure settings additions:
HUBSPOT_ACCESS_TOKEN (SecretStr|None), HUBSPOT_POLL_INTERVAL_MINUTES (5), HUBSPOT_BOOTSTRAP_DAYS (30). SecretStr keeps the token out of repr/dump output.Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
Files changed
| backend/src/vcc_backend/core/settings.py | 6 ++++++ |
| backend/tests/test_settings.py | 27 +++++++++++++++++++++++++++ |
Show diff
diff --git a/backend/src/vcc_backend/core/settings.py b/backend/src/vcc_backend/core/settings.py
@@ -1,5 +1,6 @@
from functools import lru_cache
+from pydantic import SecretStr
from pydantic_settings import BaseSettings, SettingsConfigDict
@@ -15,6 +16,11 @@ class Settings(BaseSettings):
log_level: str = "INFO"
env: str = "dev"
+ # HubSpot poller (see workers/app.py for worker-side validation)
+ hubspot_access_token: SecretStr | None = None
+ hubspot_poll_interval_minutes: int = 5
+ hubspot_bootstrap_days: int = 30
+
@property
def procrastinate_database_url(self) -> str:
"""Procrastinate's psycopg3 connector wants a plain postgresql:// URL
diff --git a/backend/tests/test_settings.py b/backend/tests/test_settings.py
@@ -52,3 +52,30 @@ def test_procrastinate_database_url_passes_through_when_no_driver_suffix(monkeyp
settings = get_settings()
assert settings.procrastinate_database_url == "postgresql://vcc:vcc@localhost:55432/vcc"
+
+
+def test_hubspot_settings_defaults(monkeypatch: pytest.MonkeyPatch) -> None:
+ """HubSpot poll settings have sensible defaults when env is unset."""
+ monkeypatch.setenv("DATABASE_URL", "postgresql+asyncpg://u:p@h:5432/d")
+ monkeypatch.delenv("HUBSPOT_ACCESS_TOKEN", raising=False)
+ monkeypatch.delenv("HUBSPOT_POLL_INTERVAL_MINUTES", raising=False)
+ monkeypatch.delenv("HUBSPOT_BOOTSTRAP_DAYS", raising=False)
+ from vcc_backend.core.settings import Settings
+
+ s = Settings() # type: ignore[call-arg]
+ assert s.hubspot_access_token is None
+ assert s.hubspot_poll_interval_minutes == 5
+ assert s.hubspot_bootstrap_days == 30
+
+
+def test_hubspot_access_token_is_secret(monkeypatch: pytest.MonkeyPatch) -> None:
+ """Token is SecretStr so it doesn't leak into repr/dumps."""
+ monkeypatch.setenv("DATABASE_URL", "postgresql+asyncpg://u:p@h:5432/d")
+ monkeypatch.setenv("HUBSPOT_ACCESS_TOKEN", "should-not-leak")
+ from pydantic import SecretStr
+ from vcc_backend.core.settings import Settings
+
+ s = Settings() # type: ignore[call-arg]
+ assert isinstance(s.hubspot_access_token, SecretStr)
+ assert "should-not-leak" not in repr(s)
+ assert s.hubspot_access_token.get_secret_value() == "should-not-leak"
efc0e0d
feat(backend): fail-fast worker startup when HUBSPOT_ACCESS_TOKEN missing
WhyFail-fast worker boot.
_validate_worker_settings() raises if HUBSPOT_ACCESS_TOKEN is missing, gated by VCC_WORKER=1 so the API service (which lazy-imports workers.app for the re-enqueue endpoint) isn't affected. Docker-compose worker service gets VCC_WORKER=1 + placeholder token.Adds _validate_worker_settings() that raises RuntimeError at boot (not silently every 5 min) when required worker config is absent. Gated behind VCC_WORKER=1 so the API service can lazy-import workers.app without needing HubSpot credentials. docker-compose worker service gets VCC_WORKER=1 and a dev-placeholder token so the container boots cleanly in local dev. Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
Files changed
| backend/src/vcc_backend/workers/app.py | 27 +++++++++++++---- |
| backend/tests/test_worker_settings_validation.py | 37 ++++++++++++++++++++++++ |
| docker-compose.yml | 2 ++ |
Show diff
diff --git a/backend/src/vcc_backend/workers/app.py b/backend/src/vcc_backend/workers/app.py
@@ -2,15 +2,31 @@
The `app` module-level variable is what `procrastinate --app vcc_backend.workers.app worker`
binds to. Tasks register themselves via the `@app.task` decorator in `tasks.py`,
-which is imported at the end of this module so `app` exists when decorators
-fire.
+which is imported at the end of this module so `app` exists when decorators fire.
"""
from __future__ import annotations
+import os
+
from procrastinate import App, PsycopgConnector
-from vcc_backend.core.settings import get_settings
+from vcc_backend.core.settings import Settings, get_settings
+
+
+def _validate_worker_settings(settings: Settings) -> None:
+ """Fail at worker startup, not silently every 5 min, when prod config is wrong.
+
+ The check is opt-in via VCC_WORKER=1 so the API service can import workers.app
+ lazily (for the admin re-enqueue endpoint) without paying for HubSpot config.
+ """
+ missing: list[str] = []
+ if settings.hubspot_access_token is None:
+ missing.append(
+ "HUBSPOT_ACCESS_TOKEN (required because poll_hubspot is registered as a periodic task)"
+ )
+ if missing:
+ raise RuntimeError("Worker cannot start: " + "; ".join(missing))
def _build_app() -> App:
@@ -18,13 +34,12 @@ def _build_app() -> App:
and inside tests (to construct a fresh App pointed at a test database).
"""
settings = get_settings()
+ if os.environ.get("VCC_WORKER") == "1":
+ _validate_worker_settings(settings)
connector = PsycopgConnector(conninfo=settings.procrastinate_database_url)
return App(connector=connector)
app = _build_app()
-# Registering tasks must happen AFTER `app` is bound so the @app.task
-# decorators in tasks.py can reference it. The noqa silences ruff's late-import
-# and unused-import warnings — both are intentional here.
from vcc_backend.workers import tasks # noqa: E402, F401
diff --git a/backend/tests/test_worker_settings_validation.py b/backend/tests/test_worker_settings_validation.py
new file mode 100644
@@ -0,0 +1,37 @@
+"""Worker startup validation.
+
+_validate_worker_settings raises immediately when HUBSPOT_ACCESS_TOKEN is unset
+so missing prod config surfaces at boot, not silently every 5 min.
+"""
+import pytest
+
+from vcc_backend.core.settings import Settings
+
+
+def _settings(**overrides) -> Settings:
+ base = {
+ "database_url": "postgresql+asyncpg://u:p@h:5432/d",
+ "hubspot_access_token": None,
+ }
+ base.update(overrides)
+ return Settings(**base) # type: ignore[arg-type]
+
+
+def test_validate_worker_settings_raises_when_token_missing(monkeypatch) -> None:
+ monkeypatch.setenv("DATABASE_URL", "postgresql+asyncpg://u:p@h:5432/d")
+ from vcc_backend.workers.app import _validate_worker_settings
+
+ s = _settings(hubspot_access_token=None)
+ with pytest.raises(RuntimeError) as excinfo:
+ _validate_worker_settings(s)
+ msg = str(excinfo.value)
+ assert "HUBSPOT_ACCESS_TOKEN" in msg
+ assert "poll_hubspot" in msg
+
+
+def test_validate_worker_settings_passes_when_token_present(monkeypatch) -> None:
+ monkeypatch.setenv("DATABASE_URL", "postgresql+asyncpg://u:p@h:5432/d")
+ from vcc_backend.workers.app import _validate_worker_settings
+
+ s = _settings(hubspot_access_token="anything")
+ _validate_worker_settings(s) # no raise
diff --git a/docker-compose.yml b/docker-compose.yml
@@ -47,6 +47,8 @@ services:
DATABASE_URL: postgresql+asyncpg://vcc:vcc@postgres:5432/vcc
LOG_LEVEL: INFO
ENV: dev
+ VCC_WORKER: "1"
+ HUBSPOT_ACCESS_TOKEN: dev-placeholder
depends_on:
backend:
condition: service_healthy
63234bd
feat(backend): HubSpot client with day-batched search + pagination
WhyNew
integrations/hubspot/ package: HubspotClient (async HTTPX, day-batched search to stay under HubSpot's 10K-per-query limit, follows pagination cursors), HubspotDealSummary (Pydantic), HubspotError (typed). 7 respx-mocked tests.Adds integrations/hubspot/ package with HubspotClient (async HTTPX, day-windowed search, pagination cursor following), HubspotDealSummary Pydantic model, HubspotError typed exception, and 7 respx-mocked tests. Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
Files changed
| backend/pyproject.toml | 5 + |
| backend/src/vcc_backend/integrations/__init__.py | 0 |
| .../vcc_backend/integrations/hubspot/__init__.py | 7 + |
| .../src/vcc_backend/integrations/hubspot/client.py | 165 +++++++++++++++++++++ |
| backend/tests/test_hubspot_client.py | 139 +++++++++++++++++ |
| backend/uv.lock | 20 +++ |
Show diff
diff --git a/backend/pyproject.toml b/backend/pyproject.toml
@@ -54,3 +54,8 @@ ignore = ["B008"] # FastAPI uses Depends() in argument defaults by design
[tool.mypy]
python_version = "3.12"
strict = true
+
+[dependency-groups]
+dev = [
+ "respx>=0.21",
+]
diff --git a/backend/src/vcc_backend/integrations/__init__.py b/backend/src/vcc_backend/integrations/__init__.py
new file mode 100644
diff --git a/backend/src/vcc_backend/integrations/hubspot/__init__.py b/backend/src/vcc_backend/integrations/hubspot/__init__.py
new file mode 100644
@@ -0,0 +1,7 @@
+from vcc_backend.integrations.hubspot.client import (
+ HubspotClient,
+ HubspotDealSummary,
+ HubspotError,
+)
+
+__all__ = ["HubspotClient", "HubspotDealSummary", "HubspotError"]
diff --git a/backend/src/vcc_backend/integrations/hubspot/client.py b/backend/src/vcc_backend/integrations/hubspot/client.py
new file mode 100644
@@ -0,0 +1,165 @@
+"""HubSpot client — search deals that entered the 'Checked-In' lifecycle stage.
+
+Returns Pydantic models. No internal retry; periodic ticks are the retry strategy.
+"""
+from __future__ import annotations
+
+from datetime import UTC, datetime, timedelta
+from typing import Any
+
+import httpx
+from pydantic import BaseModel, Field
+
+CHECKED_IN_DATE_PROP = "hs_v2_date_entered_2746058991"
+DEFAULT_BASE_URL = "https://api.hubapi.com"
+SEARCH_PATH = "/crm/v3/objects/deals/search"
+
+
+class HubspotError(Exception):
+ """Wraps HTTPX errors and non-2xx responses; carries response body when available."""
+
+ def __init__(self, message: str, *, status: int | None = None, body: str | None = None) -> None:
+ super().__init__(message)
+ self.status = status
+ self.body = body
+
+
+class HubspotDealSummary(BaseModel):
+ """A single deal returned by the HubSpot search."""
+
+ hubspot_deal_id: str
+ name: str | None = None
+ country: str | None = None
+ created_at: datetime | None = None
+ deal_stage: str | None = None
+ amount: float | None = None
+ checked_in_at: datetime
+
+ @classmethod
+ def from_hubspot(cls, payload: dict[str, Any]) -> HubspotDealSummary:
+ """Adapt HubSpot's raw search-result shape to the summary model."""
+ props = payload.get("properties", {}) or {}
+ checked_in_raw = props.get(CHECKED_IN_DATE_PROP)
+ if not checked_in_raw:
+ raise HubspotError(f"deal {payload.get('id')!r} missing {CHECKED_IN_DATE_PROP}")
+ amount_raw = props.get("amount")
+ return cls(
+ hubspot_deal_id=str(payload["id"]),
+ name=props.get("dealname") or None,
+ country=props.get("country_for_deal") or None,
+ created_at=_parse_iso(props.get("createdate")),
+ deal_stage=props.get("dealstage") or None,
+ amount=float(amount_raw) if amount_raw not in (None, "") else None,
+ checked_in_at=_parse_iso(checked_in_raw) or _now_utc(),
+ )
+
+
+class HubspotClient:
+ """Async HTTPX client for HubSpot's deals/search endpoint."""
+
+ def __init__(self, access_token: str | None, base_url: str = DEFAULT_BASE_URL) -> None:
+ if not access_token:
+ raise HubspotError("HubspotClient requires a non-empty access_token")
+ self._token = access_token
+ self._base_url = base_url.rstrip("/")
+
+ async def search_checked_in_deals(
+ self,
+ since: datetime,
+ until: datetime,
+ ) -> list[HubspotDealSummary]:
+ """Search HubSpot for deals that entered Checked-In between since and until.
+
+ Iterates by day to stay under the 10K-per-query limit; follows pagination
+ cursors within each day. Returns deduplicated results.
+ """
+ if until <= since:
+ return []
+ headers = {"Authorization": f"Bearer {self._token}", "Content-Type": "application/json"}
+ seen: set[str] = set()
+ out: list[HubspotDealSummary] = []
+ async with httpx.AsyncClient(timeout=30) as client:
+ for day_start, day_end in _iter_day_windows(since, until):
+ out.extend(await self._fetch_day(client, headers, day_start, day_end, seen))
+ return out
+
+ async def _fetch_day(
+ self,
+ client: httpx.AsyncClient,
+ headers: dict[str, str],
+ since: datetime,
+ until: datetime,
+ seen: set[str],
+ ) -> list[HubspotDealSummary]:
+ base_body = {
+ "filterGroups": [
+ {
+ "filters": [
+ {"propertyName": CHECKED_IN_DATE_PROP, "operator": "GTE", "value": _iso(since)},
+ {"propertyName": CHECKED_IN_DATE_PROP, "operator": "LT", "value": _iso(until)},
+ ]
+ }
+ ],
+ "properties": [
+ "dealname",
+ "country_for_deal",
+ "createdate",
+ "dealstage",
+ "amount",
+ CHECKED_IN_DATE_PROP,
+ ],
+ "sorts": [{"propertyName": CHECKED_IN_DATE_PROP, "direction": "DESCENDING"}],
+ "limit": 100,
+ }
+ out: list[HubspotDealSummary] = []
+ after: str | None = None
+ while True:
+ body = {**base_body, "after": after} if after else base_body
+ try:
+ resp = await client.post(f"{self._base_url}{SEARCH_PATH}", headers=headers, json=body)
+ except httpx.RequestError as exc:
+ raise HubspotError(f"HubSpot request failed: {exc}") from exc
+ if resp.status_code >= 400:
+ raise HubspotError(
+ f"HubSpot {resp.status_code}",
+ status=resp.status_code,
+ body=resp.text[:500],
+ )
+ data = resp.json()
+ for raw in data.get("results", []):
+ deal_id = str(raw.get("id"))
+ if deal_id in seen:
+ continue
+ seen.add(deal_id)
+ out.append(HubspotDealSummary.from_hubspot(raw))
+ after = (data.get("paging") or {}).get("next", {}).get("after")
+ if not after:
+ break
+ return out
+
+
+def _iter_day_windows(since: datetime, until: datetime):
+ """Yield (day_start, day_end) UTC tuples covering [since, until)."""
+ cur = since
+ while cur < until:
+ step = cur + timedelta(days=1)
+ yield (cur, min(step, until))
+ cur = step
+
+
+def _parse_iso(value: str | None) -> datetime | None:
+ if not value:
+ return None
+ v = value.replace("Z", "+00:00") if isinstance(value, str) else value
+ try:
+ return datetime.fromisoformat(v)
+ except (TypeError, ValueError):
+ return None
+
+
+def _iso(value: datetime) -> str:
+ return value.astimezone(UTC).isoformat().replace("+00:00", "Z")
+
+
+def _now_utc() -> datetime:
+ return datetime.now(UTC)
diff --git a/backend/tests/test_hubspot_client.py b/backend/tests/test_hubspot_client.py
new file mode 100644
@@ -0,0 +1,139 @@
+"""HubSpot client unit tests — respx-mocked HTTPX."""
+from datetime import UTC, datetime, timedelta
+
+import httpx
+import pytest
+import respx
+
+from vcc_backend.integrations.hubspot.client import (
+ HubspotClient,
+ HubspotDealSummary,
+ HubspotError,
+)
+
+TOKEN = "fake-token"
+
+
+def test_summary_parses_hubspot_response_shape() -> None:
+ payload = {
+ "id": "12345",
+ "properties": {
+ "dealname": "A box",
+ "country_for_deal": "NL",
+ "createdate": "2026-05-01T10:00:00Z",
+ "dealstage": "checked-in-stage-id",
+ "amount": "200",
+ "hs_v2_date_entered_2746058991": "2026-05-12T09:15:00Z",
+ },
+ }
+ summary = HubspotDealSummary.from_hubspot(payload)
+ assert summary.hubspot_deal_id == "12345"
+ assert summary.name == "A box"
+ assert summary.country == "NL"
+ assert summary.deal_stage == "checked-in-stage-id"
+ assert summary.amount == 200.0
+ assert summary.checked_in_at == datetime(2026, 5, 12, 9, 15, tzinfo=UTC)
+
+
+def test_client_construction_requires_token() -> None:
+ with pytest.raises(HubspotError):
+ HubspotClient(access_token="")
+ with pytest.raises(HubspotError):
+ HubspotClient(access_token=None) # type: ignore[arg-type]
+
+
+@pytest.mark.asyncio
+async def test_search_returns_deals_for_single_day(respx_mock: respx.MockRouter) -> None:
+ respx_mock.post("https://api.hubapi.com/crm/v3/objects/deals/search").mock(
+ return_value=httpx.Response(
+ 200,
+ json={
+ "results": [
+ {
+ "id": "1",
+ "properties": {
+ "dealname": "Box A",
+ "hs_v2_date_entered_2746058991": "2026-05-12T10:00:00Z",
+ },
+ }
+ ],
+ "paging": {},
+ },
+ )
+ )
+ client = HubspotClient(access_token=TOKEN)
+ since = datetime(2026, 5, 12, tzinfo=UTC)
+ until = since + timedelta(days=1)
+ results = await client.search_checked_in_deals(since=since, until=until)
+ assert len(results) == 1
+ assert results[0].hubspot_deal_id == "1"
+ assert respx_mock.calls.call_count == 1
+
+
+@pytest.mark.asyncio
+async def test_search_batches_by_day(respx_mock: respx.MockRouter) -> None:
+ respx_mock.post("https://api.hubapi.com/crm/v3/objects/deals/search").mock(
+ return_value=httpx.Response(200, json={"results": [], "paging": {}})
+ )
+ client = HubspotClient(access_token=TOKEN)
+ since = datetime(2026, 5, 10, tzinfo=UTC)
+ until = datetime(2026, 5, 13, tzinfo=UTC)
+ await client.search_checked_in_deals(since=since, until=until)
+ assert respx_mock.calls.call_count == 3
+
+
+@pytest.mark.asyncio
+async def test_search_follows_pagination(respx_mock: respx.MockRouter) -> None:
+ page_1 = {
+ "results": [
+ {
+ "id": "1",
+ "properties": {"hs_v2_date_entered_2746058991": "2026-05-12T10:00:00Z"},
+ }
+ ],
+ "paging": {"next": {"after": "cursor-2"}},
+ }
+ page_2 = {
+ "results": [
+ {
+ "id": "2",
+ "properties": {"hs_v2_date_entered_2746058991": "2026-05-12T11:00:00Z"},
+ }
+ ],
+ "paging": {},
+ }
+ route = respx_mock.post("https://api.hubapi.com/crm/v3/objects/deals/search").mock(
+ side_effect=[httpx.Response(200, json=page_1), httpx.Response(200, json=page_2)]
+ )
+ client = HubspotClient(access_token=TOKEN)
+ since = datetime(2026, 5, 12, tzinfo=UTC)
+ until = since + timedelta(days=1)
+ results = await client.search_checked_in_deals(since=since, until=until)
+ ids = {r.hubspot_deal_id for r in results}
+ assert ids == {"1", "2"}
+ assert route.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_4xx_raises_hubspot_error(respx_mock: respx.MockRouter) -> None:
+ respx_mock.post("https://api.hubapi.com/crm/v3/objects/deals/search").mock(
+ return_value=httpx.Response(401, text="unauthorized")
+ )
+ client = HubspotClient(access_token=TOKEN)
+ since = datetime(2026, 5, 12, tzinfo=UTC)
+ until = since + timedelta(days=1)
+ with pytest.raises(HubspotError) as excinfo:
+ await client.search_checked_in_deals(since=since, until=until)
+ assert excinfo.value.status == 401
+
+
+@pytest.mark.asyncio
+async def test_network_error_raises_hubspot_error(respx_mock: respx.MockRouter) -> None:
+ respx_mock.post("https://api.hubapi.com/crm/v3/objects/deals/search").mock(
+ side_effect=httpx.ConnectError("boom")
+ )
+ client = HubspotClient(access_token=TOKEN)
+ since = datetime(2026, 5, 12, tzinfo=UTC)
+ until = since + timedelta(days=1)
+ with pytest.raises(HubspotError):
+ await client.search_checked_in_deals(since=since, until=until)
diff --git a/backend/uv.lock b/backend/uv.lock
@@ -990,6 +990,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/d7/8e/7540e8a2036f79a125c1d2ebadf69ed7901608859186c856fa0388ef4197/requests-2.33.1-py3-none-any.whl", hash = "sha256:4e6d1ef462f3626a1f0a0a9c42dd93c63bad33f9f1c1937509b8c5c8718ab56a", size = 64947, upload-time = "2026-03-30T16:09:13.83Z" },
]
+[[package]]
+name = "respx"
+version = "0.23.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "httpx" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/43/98/4e55c9c486404ec12373708d015ebce157966965a5ebe7f28ff2c784d41b/respx-0.23.1.tar.gz", hash = "sha256:242dcc6ce6b5b9bf621f5870c82a63997e8e82bc7c947f9ffe272b8f3dd5a780", size = 29243, upload-time = "2026-04-08T14:37:16.008Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/1d/4a/221da6ca167db45693d8d26c7dc79ccfc978a440251bf6721c9aaf251ac0/respx-0.23.1-py2.py3-none-any.whl", hash = "sha256:b18004b029935384bccfa6d7d9d74b4ec9af73a081cc28600fffc0447f4b8c1a", size = 25557, upload-time = "2026-04-08T14:37:14.613Z" },
+]
+
[[package]]
name = "rich"
version = "15.0.0"
@@ -1284,6 +1296,11 @@ dev = [
{ name = "types-pyyaml" },
]
+[package.dev-dependencies]
+dev = [
+ { name = "respx" },
+]
+
[package.metadata]
requires-dist = [
{ name = "alembic", specifier = ">=1.13" },
@@ -1308,6 +1325,9 @@ requires-dist = [
]
provides-extras = ["dev"]
+[package.metadata.requires-dev]
+dev = [{ name = "respx", specifier = ">=0.21" }]
+
[[package]]
name = "watchfiles"
version = "1.1.1"
972a0ba
fix(backend): tighten HubSpot client error handling + add edge-case tests
WhyHardening pass after code review. Bare
KeyError on missing payload['id'] → HubspotError. Silent _now_utc() fallback for malformed timestamp → HubspotError (was a real data-integrity hazard). float(amount_raw) wrapped. Adds return-type annotations, 5 new edge-case tests including request-body inspection.Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
Files changed
| .../src/vcc_backend/integrations/hubspot/client.py | 23 +++++-- |
| backend/tests/test_hubspot_client.py | 76 ++++++++++++++++++++++ |
Show diff
diff --git a/backend/src/vcc_backend/integrations/hubspot/client.py b/backend/src/vcc_backend/integrations/hubspot/client.py
@@ -4,6 +4,7 @@ Returns Pydantic models. No internal retry; periodic ticks are the retry strateg
"""
from __future__ import annotations
+from collections.abc import Iterator
from datetime import UTC, datetime, timedelta
from typing import Any
@@ -38,19 +39,31 @@ class HubspotDealSummary(BaseModel):
@classmethod
def from_hubspot(cls, payload: dict[str, Any]) -> HubspotDealSummary:
"""Adapt HubSpot's raw search-result shape to the summary model."""
+ deal_id = payload.get("id")
+ if deal_id is None:
+ raise HubspotError("HubSpot deal payload is missing required field 'id'")
props = payload.get("properties", {}) or {}
checked_in_raw = props.get(CHECKED_IN_DATE_PROP)
if not checked_in_raw:
- raise HubspotError(f"deal {payload.get('id')!r} missing {CHECKED_IN_DATE_PROP}")
+ raise HubspotError(f"deal {deal_id!r} missing {CHECKED_IN_DATE_PROP}")
+ checked_in_at = _parse_iso(checked_in_raw)
+ if checked_in_at is None:
+ raise HubspotError(f"deal {deal_id!r} has invalid checked_in date: {checked_in_raw!r}")
amount_raw = props.get("amount")
+ amount: float | None = None
+ if amount_raw not in (None, ""):
+ try:
+ amount = float(str(amount_raw))
+ except (ValueError, TypeError):
+ amount = None
return cls(
- hubspot_deal_id=str(payload["id"]),
+ hubspot_deal_id=str(deal_id),
name=props.get("dealname") or None,
country=props.get("country_for_deal") or None,
created_at=_parse_iso(props.get("createdate")),
deal_stage=props.get("dealstage") or None,
- amount=float(amount_raw) if amount_raw not in (None, "") else None,
- checked_in_at=_parse_iso(checked_in_raw) or _now_utc(),
+ amount=amount,
+ checked_in_at=checked_in_at,
)
@@ -138,7 +151,7 @@ class HubspotClient:
return out
-def _iter_day_windows(since: datetime, until: datetime):
+def _iter_day_windows(since: datetime, until: datetime) -> Iterator[tuple[datetime, datetime]]:
"""Yield (day_start, day_end) UTC tuples covering [since, until)."""
cur = since
while cur < until:
diff --git a/backend/tests/test_hubspot_client.py b/backend/tests/test_hubspot_client.py
@@ -1,4 +1,5 @@
"""HubSpot client unit tests — respx-mocked HTTPX."""
+import json
from datetime import UTC, datetime, timedelta
import httpx
@@ -6,6 +7,7 @@ import pytest
import respx
from vcc_backend.integrations.hubspot.client import (
+ CHECKED_IN_DATE_PROP,
HubspotClient,
HubspotDealSummary,
HubspotError,
@@ -137,3 +139,77 @@ async def test_network_error_raises_hubspot_error(respx_mock: respx.MockRouter)
until = since + timedelta(days=1)
with pytest.raises(HubspotError):
await client.search_checked_in_deals(since=since, until=until)
+
+
+# ---------------------------------------------------------------------------
+# Edge-case / error-path tests added during code review
+# ---------------------------------------------------------------------------
+
+
+@pytest.mark.asyncio
+async def test_since_gte_until_returns_empty_no_http_calls(respx_mock: respx.MockRouter) -> None:
+ """When since >= until, return [] immediately without making any HTTP calls."""
+ client = HubspotClient(access_token=TOKEN)
+ since = datetime(2026, 5, 12, tzinfo=UTC)
+
+ # since == until
+ result = await client.search_checked_in_deals(since=since, until=since)
+ assert result == []
+ assert respx_mock.calls.call_count == 0
+
+ # since > until
+ result = await client.search_checked_in_deals(since=since, until=since - timedelta(hours=1))
+ assert result == []
+ assert respx_mock.calls.call_count == 0
+
+
+def test_from_hubspot_raises_on_missing_id() -> None:
+ """from_hubspot must raise HubspotError, not KeyError, when 'id' is absent."""
+ payload: dict = {
+ "properties": {
+ CHECKED_IN_DATE_PROP: "2026-05-12T10:00:00Z",
+ }
+ }
+ with pytest.raises(HubspotError, match="missing required field 'id'"):
+ HubspotDealSummary.from_hubspot(payload)
+
+
+def test_from_hubspot_raises_on_missing_checked_in_date_prop() -> None:
+ """from_hubspot must raise HubspotError when the checked-in date property is absent."""
+ payload = {"id": "99", "properties": {}}
+ with pytest.raises(HubspotError, match="missing"):
+ HubspotDealSummary.from_hubspot(payload)
+
+
+def test_from_hubspot_raises_on_malformed_checked_in_date() -> None:
+ """from_hubspot must raise HubspotError for a truthy but unparseable date string."""
+ payload = {
+ "id": "99",
+ "properties": {
+ CHECKED_IN_DATE_PROP: "not-a-date",
+ },
+ }
+ with pytest.raises(HubspotError, match="invalid checked_in date"):
+ HubspotDealSummary.from_hubspot(payload)
+
+
+@pytest.mark.asyncio
+async def test_search_request_body_uses_gte_lt_and_iso_z(respx_mock: respx.MockRouter) -> None:
+ """The first search call must send GTE/LT operators with ISO-Z formatted timestamps."""
+ respx_mock.post("https://api.hubapi.com/crm/v3/objects/deals/search").mock(
+ return_value=httpx.Response(200, json={"results": [], "paging": {}})
+ )
+ client = HubspotClient(access_token=TOKEN)
+ since = datetime(2026, 5, 12, 0, 0, 0, tzinfo=UTC)
+ until = since + timedelta(days=1)
+ await client.search_checked_in_deals(since=since, until=until)
+
+ assert respx_mock.calls.call_count == 1
+ body = json.loads(respx_mock.calls[0].request.content)
+ filters = body["filterGroups"][0]["filters"]
+ operators = {f["operator"]: f["value"] for f in filters}
+
+ assert "GTE" in operators
+ assert "LT" in operators
+ assert operators["GTE"] == "2026-05-12T00:00:00Z"
+ assert operators["LT"] == "2026-05-13T00:00:00Z"
4ab445f
feat(backend): DealSyncService with cursor + idempotent upsert
Why
DealSyncService owns the DB side: _compute_window() derives the cursor from max(deals.hubspot_checked_in_at) − 2d (or 30-day bootstrap if empty), upserts by hubspot_deal_id via select-then-write, and returns SyncResult (with the list of newly-inserted ids). Does NOT call defer_async — stays Procrastinate-free.Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
Files changed
| backend/src/vcc_backend/features/deals/service.py | 113 +++++++++++++++++++++- |
| backend/tests/test_deal_sync_service.py | 85 ++++++++++++++++ |
Show diff
diff --git a/backend/src/vcc_backend/features/deals/service.py b/backend/src/vcc_backend/features/deals/service.py
@@ -1,7 +1,19 @@
-from sqlalchemy import select
+from __future__ import annotations
+
+from dataclasses import dataclass, field
+from datetime import UTC, datetime, timedelta
+from typing import Protocol
+from uuid import UUID
+
+import structlog
+from sqlalchemy import func, select, update
from sqlalchemy.ext.asyncio import AsyncSession
-from vcc_backend.features.deals.models import Deal
+from vcc_backend.core.settings import Settings
+from vcc_backend.features.deals.models import Deal, DealStatus, PipelineStage
+from vcc_backend.integrations.hubspot.client import HubspotClient, HubspotDealSummary
+
+_logger = structlog.get_logger()
class DealService:
@@ -13,3 +25,100 @@ class DealService:
async def list_deals(self) -> list[Deal]:
result = await self._session.execute(select(Deal).order_by(Deal.created_at.desc()))
return list(result.scalars().all())
+
+
+# ---------------------------------------------------------------------------
+# DealSyncService
+# ---------------------------------------------------------------------------
+
+
+@dataclass
+class SyncResult:
+ fetched: int = 0
+ new_deals: int = 0
+ skipped_existing: int = 0
+ cursor_since: datetime = field(default_factory=lambda: datetime.now(UTC))
+ enqueued_deal_ids: list[UUID] = field(default_factory=list)
+
+
+class _HubspotProtocol(Protocol):
+ async def search_checked_in_deals(
+ self, since: datetime, until: datetime
+ ) -> list[HubspotDealSummary]: ...
+
+
+class DealSyncService:
+ """Polls HubSpot, upserts deals by hubspot_deal_id, returns IDs that need enqueueing.
+
+ Does NOT call defer_async — the periodic task (workers/tasks.py) is responsible for that
+ so this service stays free of Procrastinate coupling and is easy to unit-test.
+ """
+
+ def __init__(
+ self,
+ session: AsyncSession,
+ settings: Settings,
+ hubspot_client: _HubspotProtocol | None = None,
+ ) -> None:
+ self._session = session
+ self._settings = settings
+ if hubspot_client is None:
+ token = settings.hubspot_access_token
+ assert token is not None, "Worker startup validation should have rejected this"
+ self._hubspot: _HubspotProtocol = HubspotClient(token.get_secret_value())
+ else:
+ self._hubspot = hubspot_client
+
+ async def sync_once(self) -> SyncResult:
+ since, until = await self._compute_window()
+ summaries = await self._hubspot.search_checked_in_deals(since=since, until=until)
+ new_ids: list[UUID] = []
+ for summary in summaries:
+ deal_id, created = await self._upsert(summary)
+ if created:
+ new_ids.append(deal_id)
+ await self._session.commit()
+ return SyncResult(
+ fetched=len(summaries),
+ new_deals=len(new_ids),
+ skipped_existing=len(summaries) - len(new_ids),
+ cursor_since=since,
+ enqueued_deal_ids=new_ids,
+ )
+
+ async def _compute_window(self) -> tuple[datetime, datetime]:
+ until = datetime.now(UTC)
+ latest = await self._session.scalar(select(func.max(Deal.hubspot_checked_in_at)))
+ if latest is None:
+ since = until - timedelta(days=self._settings.hubspot_bootstrap_days)
+ else:
+ if latest.tzinfo is None:
+ latest = latest.replace(tzinfo=UTC)
+ since = latest - timedelta(days=2)
+ return since, until
+
+ async def _upsert(self, summary: HubspotDealSummary) -> tuple[UUID, bool]:
+ """Insert or update by hubspot_deal_id. Returns (deal_id, created).
+
+ Select-then-write keyed by hubspot_deal_id; the unique index is the safety net.
+ Procrastinate's periodic-task locking prevents two ticks from racing here.
+ """
+ existing_id: UUID | None = await self._session.scalar(
+ select(Deal.id).where(Deal.hubspot_deal_id == summary.hubspot_deal_id)
+ )
+ if existing_id is not None:
+ await self._session.execute(
+ update(Deal)
+ .where(Deal.id == existing_id)
+ .values(hubspot_checked_in_at=summary.checked_in_at, updated_at=func.now())
+ )
+ return existing_id, False
+ deal = Deal(
+ hubspot_deal_id=summary.hubspot_deal_id,
+ hubspot_checked_in_at=summary.checked_in_at,
+ status=DealStatus.PENDING,
+ current_stage=PipelineStage.INTAKE,
+ )
+ self._session.add(deal)
+ await self._session.flush()
+ return deal.id, True
diff --git a/backend/tests/test_deal_sync_service.py b/backend/tests/test_deal_sync_service.py
new file mode 100644
@@ -0,0 +1,85 @@
+"""DealSyncService — sync_once, upsert, cursor."""
+from __future__ import annotations
+
+from collections.abc import Iterable
+from datetime import UTC, datetime, timedelta
+
+import pytest
+from sqlalchemy import select
+from sqlalchemy.ext.asyncio import AsyncSession
+
+from vcc_backend.core.settings import Settings
+from vcc_backend.features.deals.models import Deal
+from vcc_backend.features.deals.service import DealSyncService
+from vcc_backend.integrations.hubspot.client import HubspotDealSummary
+
+pytestmark = pytest.mark.asyncio
+
+
+def _settings() -> Settings:
+ return Settings( # type: ignore[arg-type]
+ database_url="postgresql+asyncpg://u:p@h:5432/d",
+ hubspot_access_token="placeholder",
+ hubspot_bootstrap_days=30,
+ )
+
+
+def _summary(deal_id: str, when: datetime) -> HubspotDealSummary:
+ return HubspotDealSummary(hubspot_deal_id=deal_id, checked_in_at=when)
+
+
+class FakeHubspot:
+ def __init__(self, batches: Iterable[list[HubspotDealSummary]]):
+ self._batches = list(batches)
+
+ async def search_checked_in_deals(self, since, until) -> list[HubspotDealSummary]:
+ return self._batches.pop(0) if self._batches else []
+
+
+async def test_sync_once_inserts_new_deals_and_collects_ids(session: AsyncSession) -> None:
+ when = datetime(2026, 5, 12, 10, tzinfo=UTC)
+ hubspot = FakeHubspot([[_summary("1", when), _summary("2", when)]])
+ service = DealSyncService(session, _settings(), hubspot_client=hubspot)
+
+ result = await service.sync_once()
+
+ assert result.fetched == 2
+ assert result.new_deals == 2
+ assert result.skipped_existing == 0
+ rows = (await session.execute(select(Deal))).scalars().all()
+ assert {r.hubspot_deal_id for r in rows} == {"1", "2"}
+ # The enqueue side-effect (defer_async) is verified in the periodic-task test;
+ # here we just confirm sync_once collected ids for everything it inserted.
+ assert {str(uuid) for uuid in result.enqueued_deal_ids} == {str(r.id) for r in rows}
+
+
+async def test_sync_once_is_idempotent(session: AsyncSession) -> None:
+ """Re-running with the same summaries upserts only updated_at and skips enqueue."""
+ when = datetime(2026, 5, 12, 10, tzinfo=UTC)
+ hubspot = FakeHubspot([[_summary("1", when)], [_summary("1", when)]])
+ service = DealSyncService(session, _settings(), hubspot_client=hubspot)
+
+ first = await service.sync_once()
+ second = await service.sync_once()
+
+ assert first.new_deals == 1
+ assert second.new_deals == 0
+ assert second.skipped_existing == 1
+ assert second.enqueued_deal_ids == []
+
+
+async def test_compute_window_uses_bootstrap_days_when_empty(session: AsyncSession) -> None:
+ service = DealSyncService(session, _settings(), hubspot_client=FakeHubspot([]))
+ since, until = await service._compute_window()
+ delta = until - since
+ assert timedelta(days=29) < delta < timedelta(days=31)
+
+
+async def test_compute_window_uses_max_minus_2d_when_populated(session: AsyncSession) -> None:
+ latest = datetime(2026, 5, 10, 12, tzinfo=UTC)
+ session.add(Deal(hubspot_deal_id="pre-existing", hubspot_checked_in_at=latest))
+ await session.commit()
+ service = DealSyncService(session, _settings(), hubspot_client=FakeHubspot([]))
+ since, _until = await service._compute_window()
+ expected = latest - timedelta(days=2)
+ assert abs((since - expected).total_seconds()) < 1
6855a07
fix(tests): TRUNCATE before test_get_deals_returns_empty_when_none via session fixture
WhyTest isolation fix.
test_get_deals_returns_empty_when_none didn't use the session fixture (which TRUNCATEs at setup), so commits from neighbour tests leaked in. Adding the fixture as a parameter forces the TRUNCATE.Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
Files changed
| backend/tests/test_deals_route.py | 2 +- |
Show diff
diff --git a/backend/tests/test_deals_route.py b/backend/tests/test_deals_route.py
@@ -9,7 +9,7 @@ from vcc_backend.features.deals.models import DealStatus, PipelineStage
@pytest.mark.asyncio
async def test_get_deals_returns_empty_when_none(
- database_url: str, monkeypatch: pytest.MonkeyPatch
+ database_url: str, monkeypatch: pytest.MonkeyPatch, session: AsyncSession
) -> None:
monkeypatch.setenv("DATABASE_URL", database_url)
app = create_app()
de5cbb9
feat(backend): poll_hubspot Procrastinate periodic task
Why
poll_hubspot Procrastinate periodic task: @app.periodic(cron="*/5 * * * *") + @app.task. Builds a session, calls DealSyncService.sync_once(), then process_deal.defer_async(deal_id=str(uuid)) for each new deal. Commit-then-enqueue ordering: jobs never reference uncommitted rows.Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
Files changed
| backend/src/vcc_backend/workers/tasks.py | 23 ++++++++++++ |
| backend/tests/test_poll_hubspot_task.py | 62 ++++++++++++++++++++++++++++++++ |
Show diff
diff --git a/backend/src/vcc_backend/workers/tasks.py b/backend/src/vcc_backend/workers/tasks.py
@@ -15,6 +15,7 @@ from sqlalchemy import func, select
from vcc_backend.core.db import get_session_factory
from vcc_backend.core.settings import get_settings
from vcc_backend.features.deals.models import Deal
+from vcc_backend.features.deals.service import DealSyncService
from vcc_backend.features.pipeline.orchestrator import run_pipeline
from vcc_backend.workers.app import app
@@ -48,3 +49,25 @@ async def process_deal(deal_id: str) -> None:
logger.warning("process_deal_missing_deal", deal_id=deal_id)
return
await run_pipeline(session, deal)
+
+
+@app.periodic(cron="*/5 * * * *")
+@app.task(queue="default", name="poll_hubspot")
+async def poll_hubspot(timestamp: int) -> None:
+ """Periodic HubSpot poll: search → upsert deals → enqueue process_deal for new ones."""
+ settings = get_settings()
+ sm = get_session_factory(settings.database_url)
+ async with sm() as session:
+ service = DealSyncService(session, settings)
+ result = await service.sync_once()
+
+ for deal_id in result.enqueued_deal_ids:
+ await process_deal.defer_async(deal_id=str(deal_id))
+
+ logger.info(
+ "hubspot_poll_completed",
+ fetched=result.fetched,
+ new_deals=result.new_deals,
+ skipped_existing=result.skipped_existing,
+ cursor_since=result.cursor_since.isoformat(),
+ )
diff --git a/backend/tests/test_poll_hubspot_task.py b/backend/tests/test_poll_hubspot_task.py
new file mode 100644
@@ -0,0 +1,62 @@
+"""poll_hubspot periodic task — Procrastinate InMemoryConnector."""
+from __future__ import annotations
+
+from unittest.mock import AsyncMock
+
+import pytest
+
+
+def test_poll_hubspot_is_registered_as_periodic() -> None:
+ from vcc_backend.workers import app as worker_app
+
+ registered = {pt.task.name for pt in worker_app.app.periodic_registry.periodic_tasks.values()}
+ assert "poll_hubspot" in registered
+
+
+@pytest.mark.asyncio
+async def test_poll_hubspot_delegates_to_sync_service(monkeypatch: pytest.MonkeyPatch) -> None:
+ """Task wires settings → service → defer_async per returned deal id."""
+ from contextlib import asynccontextmanager
+ from datetime import UTC, datetime
+ from unittest.mock import MagicMock
+ from uuid import uuid4
+
+ from vcc_backend.features.deals.service import SyncResult
+ from vcc_backend.workers.tasks import poll_hubspot
+
+ monkeypatch.setenv("DATABASE_URL", "postgresql+asyncpg://u:p@h:5432/d")
+ monkeypatch.setenv("HUBSPOT_ACCESS_TOKEN", "test-token")
+
+ deal_id = uuid4()
+ result = SyncResult(
+ fetched=1,
+ new_deals=1,
+ skipped_existing=0,
+ cursor_since=datetime.now(UTC),
+ enqueued_deal_ids=[deal_id],
+ )
+
+ sync_mock = AsyncMock(return_value=result)
+ defer_mock = AsyncMock()
+
+ fake_session = MagicMock()
+
+ @asynccontextmanager
+ async def fake_session_factory():
+ yield fake_session
+
+ monkeypatch.setattr(
+ "vcc_backend.workers.tasks.get_session_factory",
+ lambda _url: fake_session_factory,
+ )
+ monkeypatch.setattr(
+ "vcc_backend.workers.tasks.DealSyncService.sync_once", sync_mock
+ )
+ monkeypatch.setattr(
+ "vcc_backend.workers.tasks.process_deal.defer_async", defer_mock
+ )
+
+ await poll_hubspot.func(timestamp=1234567890)
+
+ sync_mock.assert_awaited_once()
+ defer_mock.assert_awaited_once_with(deal_id=str(deal_id))
a81e54b
fix(tests): set DATABASE_URL before importing workers.app in poll_hubspot tests
WhySecond test isolation fix.
workers.app runs _build_app() at module-load time, which requires DATABASE_URL. In isolation the env var wasn't set before the import. Moving the monkeypatch.setenv calls before the import fixes it.Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
Files changed
| backend/tests/test_poll_hubspot_task.py | 15 ++++++++++----- |
Show diff
diff --git a/backend/tests/test_poll_hubspot_task.py b/backend/tests/test_poll_hubspot_task.py
@@ -6,7 +6,10 @@ from unittest.mock import AsyncMock
import pytest
-def test_poll_hubspot_is_registered_as_periodic() -> None:
+def test_poll_hubspot_is_registered_as_periodic(
+ monkeypatch: pytest.MonkeyPatch, database_url: str
+) -> None:
+ monkeypatch.setenv("DATABASE_URL", database_url)
from vcc_backend.workers import app as worker_app
registered = {pt.task.name for pt in worker_app.app.periodic_registry.periodic_tasks.values()}
@@ -14,19 +17,21 @@ def test_poll_hubspot_is_registered_as_periodic() -> None:
@pytest.mark.asyncio
-async def test_poll_hubspot_delegates_to_sync_service(monkeypatch: pytest.MonkeyPatch) -> None:
+async def test_poll_hubspot_delegates_to_sync_service(
+ monkeypatch: pytest.MonkeyPatch, database_url: str
+) -> None:
"""Task wires settings → service → defer_async per returned deal id."""
from contextlib import asynccontextmanager
from datetime import UTC, datetime
from unittest.mock import MagicMock
from uuid import uuid4
+ monkeypatch.setenv("DATABASE_URL", database_url)
+ monkeypatch.setenv("HUBSPOT_ACCESS_TOKEN", "test-placeholder")
+
from vcc_backend.features.deals.service import SyncResult
from vcc_backend.workers.tasks import poll_hubspot
- monkeypatch.setenv("DATABASE_URL", "postgresql+asyncpg://u:p@h:5432/d")
- monkeypatch.setenv("HUBSPOT_ACCESS_TOKEN", "test-token")
-
deal_id = uuid4()
result = SyncResult(
fetched=1,
329a838
feat(backend): DealQueueService + queue schemas with stage synthesis
WhyQueue read-side. New Pydantic schemas (
StageProgress, DealQueueRow, DealQueuePage) + DealQueueService.list_page: selectinload for exactly 2 SQL queries, keyset pagination via before=updated_at, limit+1 trick for next_before. _build_steps synthesizes pending for stages with no run row, latest-wins on duplicates, excludes REVIEW/DONE.Adds pipeline_runs relationship to Deal, StageProgress/DealQueueRow/DealQueuePage Pydantic schemas, and DealQueueService.list_page with keyset pagination and _build_steps synthesis (latest-run-wins, 4 UI stages, selectinload anti-N+1). Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
Files changed
| backend/src/vcc_backend/features/deals/models.py | 13 ++- |
| backend/src/vcc_backend/features/deals/schemas.py | 25 +++++ |
| backend/src/vcc_backend/features/deals/service.py | 90 +++++++++++++++++ |
| backend/tests/test_deal_queue_service.py | 117 ++++++++++++++++++++++ |
Show diff
diff --git a/backend/src/vcc_backend/features/deals/models.py b/backend/src/vcc_backend/features/deals/models.py
@@ -2,14 +2,18 @@ from __future__ import annotations
import enum
from datetime import UTC, datetime
+from typing import TYPE_CHECKING
from uuid import UUID, uuid4
from sqlalchemy import DateTime, Enum, Text, func
from sqlalchemy.dialects.postgresql import UUID as PgUUID
-from sqlalchemy.orm import Mapped, mapped_column
+from sqlalchemy.orm import Mapped, mapped_column, relationship
from vcc_backend.core.db import Base
+if TYPE_CHECKING:
+ from vcc_backend.features.pipeline.models import PipelineRun
+
class DealStatus(enum.StrEnum):
PENDING = "pending"
@@ -66,3 +70,10 @@ class Deal(Base):
server_default=func.now(),
onupdate=func.now(),
)
+
+ pipeline_runs: Mapped[list[PipelineRun]] = relationship(
+ "PipelineRun",
+ backref="deal",
+ cascade="all, delete-orphan",
+ lazy="noload", # always load explicitly via selectinload
+ )
diff --git a/backend/src/vcc_backend/features/deals/schemas.py b/backend/src/vcc_backend/features/deals/schemas.py
@@ -1,4 +1,5 @@
from datetime import datetime
+from typing import Literal
from uuid import UUID
from pydantic import BaseModel, ConfigDict
@@ -16,3 +17,27 @@ class DealRead(BaseModel):
current_stage: PipelineStage
created_at: datetime
updated_at: datetime
+
+
+class StageProgress(BaseModel):
+ stage: Literal["intake", "segmentation", "categorization", "appraisal"]
+ status: Literal["pending", "running", "succeeded", "failed"]
+ started_at: datetime | None = None
+ ended_at: datetime | None = None
+ pipeline_run_id: UUID | None = None
+ error: str | None = None
+
+
+class DealQueueRow(BaseModel):
+ id: UUID
+ hubspot_deal_id: str
+ status: DealStatus
+ current_stage: PipelineStage
+ created_at: datetime
+ updated_at: datetime
+ steps: list[StageProgress]
+
+
+class DealQueuePage(BaseModel):
+ items: list[DealQueueRow]
+ next_before: datetime | None = None
diff --git a/backend/src/vcc_backend/features/deals/service.py b/backend/src/vcc_backend/features/deals/service.py
@@ -8,6 +8,7 @@ from uuid import UUID
import structlog
from sqlalchemy import func, select, update
from sqlalchemy.ext.asyncio import AsyncSession
+from sqlalchemy.orm import selectinload
from vcc_backend.core.settings import Settings
from vcc_backend.features.deals.models import Deal, DealStatus, PipelineStage
@@ -122,3 +123,92 @@ class DealSyncService:
self._session.add(deal)
await self._session.flush()
return deal.id, True
+
+
+# ---------------------------------------------------------------------------
+# DealQueueService
+# ---------------------------------------------------------------------------
+
+from vcc_backend.features.deals.schemas import ( # noqa: E402
+ DealQueuePage,
+ DealQueueRow,
+ StageProgress,
+)
+from vcc_backend.features.pipeline.models import PipelineRun, RunStatus # noqa: E402
+
+_UI_STAGES: tuple[PipelineStage, ...] = (
+ PipelineStage.INTAKE,
+ PipelineStage.SEGMENTATION,
+ PipelineStage.CATEGORIZATION,
+ PipelineStage.APPRAISAL,
+)
+
+
+class DealQueueService:
+ """Read-side service for /admin/queue/deals."""
+
+ def __init__(self, session: AsyncSession) -> None:
+ self._session = session
+
+ async def list_page(
+ self,
+ limit: int,
+ before: datetime | None,
+ ) -> DealQueuePage:
+ stmt = (
+ select(Deal)
+ .options(selectinload(Deal.pipeline_runs))
+ .order_by(Deal.updated_at.desc(), Deal.id.desc())
+ .limit(limit + 1)
+ .execution_options(populate_existing=True)
+ )
+ if before is not None:
+ stmt = stmt.where(Deal.updated_at < before)
+
+ rows = list((await self._session.execute(stmt)).scalars().unique().all())
+ has_next = len(rows) > limit
+ rows = rows[:limit]
+
+ items = [self._to_row(d) for d in rows]
+ next_before = rows[-1].updated_at if has_next and rows else None
+ return DealQueuePage(items=items, next_before=next_before)
+
+ def _to_row(self, deal: Deal) -> DealQueueRow:
+ return DealQueueRow(
+ id=deal.id,
+ hubspot_deal_id=deal.hubspot_deal_id,
+ status=deal.status,
+ current_stage=deal.current_stage,
+ created_at=deal.created_at,
+ updated_at=deal.updated_at,
+ steps=_build_steps(deal),
+ )
+
+
+def _build_steps(deal: Deal) -> list[StageProgress]:
+ """One StageProgress per UI stage, latest-by-started_at wins for duplicates."""
+ from typing import Literal, cast
+
+ by_stage: dict[PipelineStage, PipelineRun] = {}
+ for _r in sorted(deal.pipeline_runs, key=lambda r: r.started_at):
+ by_stage[_r.stage] = _r
+ result: list[StageProgress] = []
+ for stage in _UI_STAGES:
+ stage_lit = cast(
+ Literal["intake", "segmentation", "categorization", "appraisal"], stage.value
+ )
+ run: PipelineRun | None = by_stage.get(stage)
+ if run is None:
+ result.append(StageProgress(stage=stage_lit, status="pending"))
+ else:
+ result.append(
+ StageProgress(
+ stage=stage_lit,
+ status=run.status.value,
+ started_at=run.started_at,
+ ended_at=run.ended_at,
+ pipeline_run_id=run.id,
+ error=run.error if run.status == RunStatus.FAILED else None,
+ )
+ )
+ return result
diff --git a/backend/tests/test_deal_queue_service.py b/backend/tests/test_deal_queue_service.py
new file mode 100644
@@ -0,0 +1,117 @@
+"""DealQueueService — pagination + _build_steps synthesis."""
+from __future__ import annotations
+
+from datetime import UTC, datetime
+
+import pytest
+from sqlalchemy.ext.asyncio import AsyncSession
+
+from vcc_backend.features.deals.models import Deal, PipelineStage
+from vcc_backend.features.deals.service import DealQueueService
+from vcc_backend.features.pipeline.models import PipelineRun, RunStatus
+
+pytestmark = pytest.mark.asyncio
+
+
+def _utc(year: int, month: int, day: int, hour: int = 0) -> datetime:
+ return datetime(year, month, day, hour, tzinfo=UTC)
+
+
+async def _make_deal(session: AsyncSession, hub_id: str, updated_at: datetime) -> Deal:
+ deal = Deal(hubspot_deal_id=hub_id)
+ session.add(deal)
+ await session.flush()
+ # Force updated_at to a deterministic value for keyset pagination ordering tests
+ await session.execute(
+ Deal.__table__.update().where(Deal.id == deal.id).values(updated_at=updated_at)
+ )
+ await session.commit()
+ await session.refresh(deal)
+ return deal
+
+
+async def test_list_page_returns_empty_when_no_deals(session: AsyncSession) -> None:
+ service = DealQueueService(session)
+ page = await service.list_page(limit=50, before=None)
+ assert page.items == []
+ assert page.next_before is None
+
+
+async def test_list_page_paginates_with_before_cursor(session: AsyncSession) -> None:
+ await _make_deal(session, "d1", _utc(2026, 5, 13, 10))
+ await _make_deal(session, "d2", _utc(2026, 5, 13, 11))
+ await _make_deal(session, "d3", _utc(2026, 5, 13, 12))
+
+ service = DealQueueService(session)
+ page1 = await service.list_page(limit=2, before=None)
+ assert [r.hubspot_deal_id for r in page1.items] == ["d3", "d2"]
+ assert page1.next_before == _utc(2026, 5, 13, 11)
+
+ page2 = await service.list_page(limit=2, before=page1.next_before)
+ assert [r.hubspot_deal_id for r in page2.items] == ["d1"]
+ assert page2.next_before is None
+
+
+async def test_build_steps_marks_missing_stages_pending(session: AsyncSession) -> None:
+ await _make_deal(session, "no-runs", _utc(2026, 5, 13))
+ service = DealQueueService(session)
+ page = await service.list_page(limit=10, before=None)
+ assert len(page.items) == 1
+ row = page.items[0]
+ assert len(row.steps) == 4
+ assert {s.stage for s in row.steps} == {"intake", "segmentation", "categorization", "appraisal"}
+ assert all(s.status == "pending" for s in row.steps)
+
+
+async def test_build_steps_excludes_review_and_done(session: AsyncSession) -> None:
+ deal = await _make_deal(session, "with-review-run", _utc(2026, 5, 13))
+ session.add(
+ PipelineRun(
+ deal_id=deal.id, stage=PipelineStage.REVIEW, status=RunStatus.SUCCEEDED,
+ started_at=_utc(2026, 5, 13), ended_at=_utc(2026, 5, 13),
+ )
+ )
+ await session.commit()
+ service = DealQueueService(session)
+ page = await service.list_page(limit=10, before=None)
+ stages = [s.stage for s in page.items[0].steps]
+ assert "review" not in stages
+ assert "done" not in stages
+
+
+async def test_build_steps_latest_run_wins(session: AsyncSession) -> None:
+ deal = await _make_deal(session, "twice", _utc(2026, 5, 13))
+ session.add_all([
+ PipelineRun(
+ deal_id=deal.id, stage=PipelineStage.INTAKE, status=RunStatus.FAILED,
+ started_at=_utc(2026, 5, 13, 10), ended_at=_utc(2026, 5, 13, 10),
+ error="first try failed",
+ ),
+ PipelineRun(
+ deal_id=deal.id, stage=PipelineStage.INTAKE, status=RunStatus.SUCCEEDED,
+ started_at=_utc(2026, 5, 13, 11), ended_at=_utc(2026, 5, 13, 11),
+ ),
+ ])
+ await session.commit()
+ service = DealQueueService(session)
+ page = await service.list_page(limit=10, before=None)
+ intake = next(s for s in page.items[0].steps if s.stage == "intake")
+ assert intake.status == "succeeded"
+ assert intake.error is None
+
+
+async def test_failed_run_surfaces_error(session: AsyncSession) -> None:
+ deal = await _make_deal(session, "failed-run", _utc(2026, 5, 13))
+ session.add(
+ PipelineRun(
+ deal_id=deal.id, stage=PipelineStage.CATEGORIZATION, status=RunStatus.FAILED,
+ started_at=_utc(2026, 5, 13), ended_at=_utc(2026, 5, 13),
+ error="downstream 503",
+ )
+ )
+ await session.commit()
+ service = DealQueueService(session)
+ page = await service.list_page(limit=10, before=None)
+ cat = next(s for s in page.items[0].steps if s.stage == "categorization")
+ assert cat.status == "failed"
+ assert cat.error == "downstream 503"
eb1d076
feat(backend): GET /admin/queue/deals — keyset-paginated queue rows
Why
GET /admin/queue/deals?limit&before route. Thin shell over the service — limit bounded 1..200 (422 outside), before parses ISO. 4 route tests.Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
Files changed
| backend/src/vcc_backend/api/routers/admin.py | 15 +++++- |
| backend/tests/test_admin_queue_route.py | 70 ++++++++++++++++++++++++++++ |
Show diff
diff --git a/backend/src/vcc_backend/api/routers/admin.py b/backend/src/vcc_backend/api/routers/admin.py
@@ -1,14 +1,17 @@
"""Admin endpoints. Single /admin router; sub-slice C adds GET /admin/jobs here."""
from __future__ import annotations
+from datetime import datetime
from uuid import UUID
-from fastapi import APIRouter, Depends, HTTPException
+from fastapi import APIRouter, Depends, HTTPException, Query
from pydantic import BaseModel
from sqlalchemy.ext.asyncio import AsyncSession
from vcc_backend.api.deps import db_session
from vcc_backend.features.deals.models import Deal
+from vcc_backend.features.deals.schemas import DealQueuePage
+from vcc_backend.features.deals.service import DealQueueService
router = APIRouter(prefix="/admin", tags=["admin"])
@@ -36,3 +39,13 @@ async def enqueue_process_deal(
async with procrastinate_app.open_async():
job_id = await process_deal.defer_async(deal_id=str(deal_id))
return EnqueueProcessResponse(job_id=job_id, deal_id=deal_id)
+
+
+@router.get("/queue/deals", response_model=DealQueuePage)
+async def list_queue_deals(
+ limit: int = Query(default=50, ge=1, le=200),
+ before: datetime | None = Query(default=None),
+ session: AsyncSession = Depends(db_session),
+) -> DealQueuePage:
+ """Paginated per-deal queue for the admin UI; FE filters client-side."""
+ return await DealQueueService(session).list_page(limit=limit, before=before)
diff --git a/backend/tests/test_admin_queue_route.py b/backend/tests/test_admin_queue_route.py
new file mode 100644
@@ -0,0 +1,70 @@
+"""GET /admin/queue/deals — TestClient + real DB."""
+from __future__ import annotations
+
+from datetime import UTC, datetime
+from urllib.parse import urlencode
+
+import pytest
+from httpx import ASGITransport, AsyncClient
+
+from vcc_backend.api.app import create_app
+from vcc_backend.features.deals.models import Deal
+
+pytestmark = pytest.mark.asyncio
+
+
+def _client() -> AsyncClient:
+ app = create_app()
+ return AsyncClient(transport=ASGITransport(app=app), base_url="http://test")
+
+
+async def test_empty_db_returns_empty_page(
+ database_url: str, monkeypatch: pytest.MonkeyPatch, session
+) -> None:
+ monkeypatch.setenv("DATABASE_URL", database_url)
+ async with _client() as ac:
+ resp = await ac.get("/admin/queue/deals")
+ assert resp.status_code == 200
+ assert resp.json() == {"items": [], "next_before": None}
+
+
+async def test_lists_deal_with_synthesized_pending_stages(
+ database_url: str, monkeypatch: pytest.MonkeyPatch, session
+) -> None:
+ monkeypatch.setenv("DATABASE_URL", database_url)
+ d = Deal(hubspot_deal_id="abc")
+ session.add(d)
+ await session.commit()
+ async with _client() as ac:
+ resp = await ac.get("/admin/queue/deals")
+ assert resp.status_code == 200
+ body = resp.json()
+ assert len(body["items"]) == 1
+ row = body["items"][0]
+ assert row["hubspot_deal_id"] == "abc"
+ assert len(row["steps"]) == 4
+ assert all(s["status"] == "pending" for s in row["steps"])
+
+
+async def test_limit_bounds_enforced(
+ database_url: str, monkeypatch: pytest.MonkeyPatch, session
+) -> None:
+ monkeypatch.setenv("DATABASE_URL", database_url)
+ async with _client() as ac:
+ assert (await ac.get("/admin/queue/deals?limit=0")).status_code == 422
+ assert (await ac.get("/admin/queue/deals?limit=201")).status_code == 422
+
+
+async def test_before_parameter_parses_iso(
+ database_url: str, monkeypatch: pytest.MonkeyPatch, session
+) -> None:
+ monkeypatch.setenv("DATABASE_URL", database_url)
+ d = Deal(hubspot_deal_id="abc")
+ session.add(d)
+ await session.commit()
+ async with _client() as ac:
+ future = datetime(2099, 1, 1, tzinfo=UTC).isoformat()
+ qs = urlencode({"before": future})
+ resp = await ac.get(f"/admin/queue/deals?{qs}")
+ assert resp.status_code == 200
+ assert len(resp.json()["items"]) == 1
a4da6d0
chore: regenerate openapi + FE types for /admin/queue/deals
WhyCodegen update:
openapi.yaml regenerated to include the new endpoint + schemas (DealQueuePage, DealQueueRow, StageProgress). web/src/lib/api/generated/ is gitignored — regenerated locally.Files changed
| openapi.yaml | 138 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ |
Show diff
diff --git a/openapi.yaml b/openapi.yaml
@@ -63,8 +63,100 @@ paths:
application/json:
schema:
$ref: '#/components/schemas/HTTPValidationError'
+ /admin/queue/deals:
+ get:
+ tags:
+ - admin
+ summary: List Queue Deals
+ description: Paginated per-deal queue for the admin UI; FE filters client-side.
+ operationId: list_queue_deals_admin_queue_deals_get
+ parameters:
+ - name: limit
+ in: query
+ required: false
+ schema:
+ type: integer
+ maximum: 200
+ minimum: 1
+ default: 50
+ title: Limit
+ - name: before
+ in: query
+ required: false
+ schema:
+ anyOf:
+ - type: string
+ format: date-time
+ - type: 'null'
+ title: Before
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/DealQueuePage'
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
components:
schemas:
+ DealQueuePage:
+ properties:
+ items:
+ items:
+ $ref: '#/components/schemas/DealQueueRow'
+ type: array
+ title: Items
+ next_before:
+ anyOf:
+ - type: string
+ format: date-time
+ - type: 'null'
+ title: Next Before
+ type: object
+ required:
+ - items
+ title: DealQueuePage
+ DealQueueRow:
+ properties:
+ id:
+ type: string
+ format: uuid
+ title: Id
+ hubspot_deal_id:
+ type: string
+ title: Hubspot Deal Id
+ status:
+ $ref: '#/components/schemas/DealStatus'
+ current_stage:
+ $ref: '#/components/schemas/PipelineStage'
+ created_at:
+ type: string
+ format: date-time
+ title: Created At
+ updated_at:
+ type: string
+ format: date-time
+ title: Updated At
+ steps:
+ items:
+ $ref: '#/components/schemas/StageProgress'
+ type: array
+ title: Steps
+ type: object
+ required:
+ - id
+ - hubspot_deal_id
+ - status
+ - current_stage
+ - created_at
+ - updated_at
+ - steps
+ title: DealQueueRow
DealRead:
properties:
id:
@@ -133,6 +225,52 @@ components:
- review
- done
title: PipelineStage
+ StageProgress:
+ properties:
+ stage:
+ type: string
+ enum:
+ - intake
+ - segmentation
+ - categorization
+ - appraisal
+ title: Stage
+ status:
+ type: string
+ enum:
+ - pending
+ - running
+ - succeeded
+ - failed
+ title: Status
+ started_at:
+ anyOf:
+ - type: string
+ format: date-time
+ - type: 'null'
+ title: Started At
+ ended_at:
+ anyOf:
+ - type: string
+ format: date-time
+ - type: 'null'
+ title: Ended At
+ pipeline_run_id:
+ anyOf:
+ - type: string
+ format: uuid
+ - type: 'null'
+ title: Pipeline Run Id
+ error:
+ anyOf:
+ - type: string
+ - type: 'null'
+ title: Error
+ type: object
+ required:
+ - stage
+ - status
+ title: StageProgress
ValidationError:
properties:
loc:
995e6c1
feat(web): queue adapter API→UI shape with status + duration synthesis
WhyFE adapter
queue.ts: the only place API ↔ UI shape diverges. succeeded → done, ISO strings → ms epoch, row status synthesized from steps, durationMs from min-start..max-end-or-now. 6 vitest cases. (vitest was added as a dev dep here.)Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
Files changed
| web/bun.lockb | Bin 240002 -> 249839 bytes |
| web/package.json | 3 +- |
| web/src/lib/api/adapters/queue.test.ts | 127 +++++++++++++++++++++++++++++++++ |
| web/src/lib/api/adapters/queue.ts | 90 +++++++++++++++++++++++ |
Show diff
diff --git a/web/bun.lockb b/web/bun.lockb
diff --git a/web/package.json b/web/package.json
@@ -88,7 +88,8 @@
"prettier": "^3.7.3",
"typescript": "^5.8.3",
"typescript-eslint": "^8.56.1",
- "vite": "^7.3.1"
+ "vite": "^7.3.1",
+ "vitest": "^4.1.6"
},
"msw": {
"workerDirectory": [
diff --git a/web/src/lib/api/adapters/queue.test.ts b/web/src/lib/api/adapters/queue.test.ts
new file mode 100644
@@ -0,0 +1,127 @@
+import { describe, it, expect } from "vitest";
+import { adaptQueuePage } from "./queue";
+import type { DealQueuePage } from "@/lib/api/generated/types.gen";
+
+const baseRow = {
+ id: "11111111-1111-1111-1111-111111111111",
+ hubspot_deal_id: "10293",
+ status: "pending" as const,
+ current_stage: "intake" as const,
+ created_at: "2026-05-13T10:00:00Z",
+ updated_at: "2026-05-13T10:05:00Z",
+};
+
+function step(stage: "intake" | "segmentation" | "categorization" | "appraisal",
+ status: "pending" | "running" | "succeeded" | "failed",
+ opts: { startedAt?: string; endedAt?: string; error?: string } = {}) {
+ return {
+ stage,
+ status,
+ started_at: opts.startedAt ?? null,
+ ended_at: opts.endedAt ?? null,
+ pipeline_run_id: status === "pending" ? null : "22222222-2222-2222-2222-222222222222",
+ error: opts.error ?? null,
+ };
+}
+
+describe("adaptQueuePage", () => {
+ it("maps hubspot id to display and UUID to action id", () => {
+ const page: DealQueuePage = {
+ items: [{
+ ...baseRow,
+ steps: [
+ step("intake", "pending"),
+ step("segmentation", "pending"),
+ step("categorization", "pending"),
+ step("appraisal", "pending"),
+ ],
+ }],
+ next_before: null,
+ };
+ const { rows } = adaptQueuePage(page);
+ expect(rows[0].dealId).toBe("10293");
+ expect(rows[0].dealUuid).toBe(baseRow.id);
+ });
+
+ it("synthesizes row.status = pending when every step is pending", () => {
+ const page: DealQueuePage = {
+ items: [{
+ ...baseRow,
+ steps: [
+ step("intake", "pending"),
+ step("segmentation", "pending"),
+ step("categorization", "pending"),
+ step("appraisal", "pending"),
+ ],
+ }],
+ next_before: null,
+ };
+ expect(adaptQueuePage(page).rows[0].status).toBe("pending");
+ });
+
+ it("synthesizes row.status = running when any step is running", () => {
+ const page: DealQueuePage = {
+ items: [{
+ ...baseRow,
+ steps: [
+ step("intake", "succeeded", { startedAt: "2026-05-13T10:00:00Z", endedAt: "2026-05-13T10:01:00Z" }),
+ step("segmentation", "running", { startedAt: "2026-05-13T10:01:00Z" }),
+ step("categorization", "pending"),
+ step("appraisal", "pending"),
+ ],
+ }],
+ next_before: null,
+ };
+ expect(adaptQueuePage(page).rows[0].status).toBe("running");
+ });
+
+ it("synthesizes row.status = failed when any step is failed", () => {
+ const page: DealQueuePage = {
+ items: [{
+ ...baseRow,
+ steps: [
+ step("intake", "succeeded", { startedAt: "2026-05-13T10:00:00Z", endedAt: "2026-05-13T10:01:00Z" }),
+ step("segmentation", "failed", { startedAt: "2026-05-13T10:01:00Z", endedAt: "2026-05-13T10:02:00Z", error: "boom" }),
+ step("categorization", "pending"),
+ step("appraisal", "pending"),
+ ],
+ }],
+ next_before: null,
+ };
+ expect(adaptQueuePage(page).rows[0].status).toBe("failed");
+ });
+
+ it("synthesizes row.status = done when all steps succeeded", () => {
+ const page: DealQueuePage = {
+ items: [{
+ ...baseRow,
+ steps: [
+ step("intake", "succeeded", { startedAt: "2026-05-13T10:00:00Z", endedAt: "2026-05-13T10:01:00Z" }),
+ step("segmentation", "succeeded", { startedAt: "2026-05-13T10:01:00Z", endedAt: "2026-05-13T10:02:00Z" }),
+ step("categorization", "succeeded", { startedAt: "2026-05-13T10:02:00Z", endedAt: "2026-05-13T10:03:00Z" }),
+ step("appraisal", "succeeded", { startedAt: "2026-05-13T10:03:00Z", endedAt: "2026-05-13T10:04:00Z" }),
+ ],
+ }],
+ next_before: null,
+ };
+ expect(adaptQueuePage(page).rows[0].status).toBe("done");
+ });
+
+ it("converts ISO timestamps to epoch ms and computes durationMs", () => {
+ const page: DealQueuePage = {
+ items: [{
+ ...baseRow,
+ steps: [
+ step("intake", "succeeded", { startedAt: "2026-05-13T10:00:00Z", endedAt: "2026-05-13T10:00:30Z" }),
+ step("segmentation", "succeeded", { startedAt: "2026-05-13T10:00:30Z", endedAt: "2026-05-13T10:01:00Z" }),
+ step("categorization", "pending"),
+ step("appraisal", "pending"),
+ ],
+ }],
+ next_before: null,
+ };
+ const { rows } = adaptQueuePage(page);
+ expect(rows[0].steps[0].startedAt).toBe(Date.parse("2026-05-13T10:00:00Z"));
+ expect(rows[0].durationMs).toBe(60_000);
+ });
+});
diff --git a/web/src/lib/api/adapters/queue.ts b/web/src/lib/api/adapters/queue.ts
new file mode 100644
@@ -0,0 +1,90 @@
+import type { DealQueuePage, DealQueueRow, StageProgress } from "@/lib/api/generated/types.gen";
+
+export type StageId = "intake" | "segmentation" | "categorization" | "appraisal";
+export type RunStatus = "pending" | "running" | "done" | "failed";
+export type StepStatus = "pending" | "running" | "done" | "failed";
+
+export interface QueueStep {
+ stage: StageId;
+ status: StepStatus;
+ startedAt?: number;
+ endedAt?: number;
+ pipelineRunId?: string;
+ errorMessage?: string;
+}
+
+export interface QueueRow {
+ dealId: string; // hubspot_deal_id, display
+ dealUuid: string; // backend UUID, for actions
+ createdAt: number;
+ updatedAt: number;
+ steps: QueueStep[];
+ status: RunStatus;
+ durationMs: number;
+}
+
+export const STAGES: { id: StageId; label: string }[] = [
+ { id: "intake", label: "Intake" },
+ { id: "segmentation", label: "Segmentation" },
+ { id: "categorization", label: "Categorization" },
+ { id: "appraisal", label: "Appraisal" },
+];
+
+export function adaptQueuePage(page: DealQueuePage): {
+ rows: QueueRow[];
+ nextBefore: string | null;
+} {
+ return {
+ rows: page.items.map(adaptRow),
+ nextBefore: page.next_before ?? null,
+ };
+}
+
+function adaptRow(row: DealQueueRow): QueueRow {
+ const steps = row.steps.map(adaptStep);
+ return {
+ dealId: row.hubspot_deal_id,
+ dealUuid: row.id,
+ createdAt: Date.parse(row.created_at),
+ updatedAt: Date.parse(row.updated_at),
+ steps,
+ status: deriveStatus(steps),
+ durationMs: deriveDuration(steps),
+ };
+}
+
+function adaptStep(s: StageProgress): QueueStep {
+ return {
+ stage: s.stage,
+ status: s.status === "succeeded" ? "done" : (s.status as StepStatus),
+ startedAt: s.started_at ? Date.parse(s.started_at) : undefined,
+ endedAt: s.ended_at ? Date.parse(s.ended_at) : undefined,
+ pipelineRunId: s.pipeline_run_id ?? undefined,
+ errorMessage: s.error ?? undefined,
+ };
+}
+
+function deriveStatus(steps: QueueStep[]): RunStatus {
+ if (steps.some((s) => s.status === "failed")) return "failed";
+ if (steps.some((s) => s.status === "running")) return "running";
+ if (steps.every((s) => s.status === "done")) return "done";
+ return "pending";
+}
+
+function deriveDuration(steps: QueueStep[]): number {
+ const starts = steps.map((s) => s.startedAt).filter((v): v is number => v !== undefined);
+ if (!starts.length) return 0;
+ const start = Math.min(...starts);
+ const ends = steps.map((s) => s.endedAt).filter((v): v is number => v !== undefined);
+ if (ends.length) {
+ // Use span from first start to furthest recorded end.
+ // When a step is actively running (no endedAt), extend to now.
+ const hasRunningStep = steps.some((s) => s.status === "running" && !s.endedAt);
+ const rightEdge = hasRunningStep ? Math.max(Date.now(), ...ends) : Math.max(...ends);
+ return rightEdge - start;
+ }
+ // No ended timestamps yet.
+ const hasRunningStep = steps.some((s) => s.status === "running");
+ if (!hasRunningStep) return 0;
+ return Date.now() - start;
+}
63e371d
feat(web): MSW handler + fixture for GET /admin/queue/deals
WhyMSW handler + fixture for design-time data. 5 representative rows (mid-progress, failed-categorization, completed, pending, running-segmentation) covering each visual state of the queue table.
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
Files changed
| web/src/lib/api/mocks/fixtures/queue-deals.json | 75 +++++++++++++++++++++++++ |
| web/src/lib/api/mocks/handlers.ts | 2 + |
Show diff
diff --git a/web/src/lib/api/mocks/fixtures/queue-deals.json b/web/src/lib/api/mocks/fixtures/queue-deals.json
new file mode 100644
@@ -0,0 +1,75 @@
+{
+ "items": [
+ {
+ "id": "11111111-1111-1111-1111-111111111111",
+ "hubspot_deal_id": "10293",
+ "status": "in_progress",
+ "current_stage": "categorization",
+ "created_at": "2026-05-13T09:55:00Z",
+ "updated_at": "2026-05-13T10:02:30Z",
+ "steps": [
+ { "stage": "intake", "status": "succeeded", "started_at": "2026-05-13T09:55:00Z", "ended_at": "2026-05-13T09:55:30Z", "pipeline_run_id": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaa1", "error": null },
+ { "stage": "segmentation", "status": "succeeded", "started_at": "2026-05-13T09:55:30Z", "ended_at": "2026-05-13T10:01:00Z", "pipeline_run_id": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaa2", "error": null },
+ { "stage": "categorization", "status": "running", "started_at": "2026-05-13T10:01:00Z", "ended_at": null, "pipeline_run_id": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaa3", "error": null },
+ { "stage": "appraisal", "status": "pending", "started_at": null, "ended_at": null, "pipeline_run_id": null, "error": null }
+ ]
+ },
+ {
+ "id": "22222222-2222-2222-2222-222222222222",
+ "hubspot_deal_id": "10290",
+ "status": "in_progress",
+ "current_stage": "categorization",
+ "created_at": "2026-05-13T09:50:00Z",
+ "updated_at": "2026-05-13T10:01:10Z",
+ "steps": [
+ { "stage": "intake", "status": "succeeded", "started_at": "2026-05-13T09:50:00Z", "ended_at": "2026-05-13T09:50:30Z", "pipeline_run_id": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbb1", "error": null },
+ { "stage": "segmentation", "status": "succeeded", "started_at": "2026-05-13T09:50:30Z", "ended_at": "2026-05-13T09:58:00Z", "pipeline_run_id": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbb2", "error": null },
+ { "stage": "categorization", "status": "failed", "started_at": "2026-05-13T09:58:00Z", "ended_at": "2026-05-13T10:01:10Z", "pipeline_run_id": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbb3", "error": "Upstream model returned 503 after 3 retries" },
+ { "stage": "appraisal", "status": "pending", "started_at": null, "ended_at": null, "pipeline_run_id": null, "error": null }
+ ]
+ },
+ {
+ "id": "33333333-3333-3333-3333-333333333333",
+ "hubspot_deal_id": "10289",
+ "status": "completed",
+ "current_stage": "done",
+ "created_at": "2026-05-13T09:30:00Z",
+ "updated_at": "2026-05-13T09:42:00Z",
+ "steps": [
+ { "stage": "intake", "status": "succeeded", "started_at": "2026-05-13T09:30:00Z", "ended_at": "2026-05-13T09:30:30Z", "pipeline_run_id": "cccccccc-cccc-cccc-cccc-ccccccccccc1", "error": null },
+ { "stage": "segmentation", "status": "succeeded", "started_at": "2026-05-13T09:30:30Z", "ended_at": "2026-05-13T09:35:00Z", "pipeline_run_id": "cccccccc-cccc-cccc-cccc-ccccccccccc2", "error": null },
+ { "stage": "categorization", "status": "succeeded", "started_at": "2026-05-13T09:35:00Z", "ended_at": "2026-05-13T09:38:00Z", "pipeline_run_id": "cccccccc-cccc-cccc-cccc-ccccccccccc3", "error": null },
+ { "stage": "appraisal", "status": "succeeded", "started_at": "2026-05-13T09:38:00Z", "ended_at": "2026-05-13T09:42:00Z", "pipeline_run_id": "cccccccc-cccc-cccc-cccc-ccccccccccc4", "error": null }
+ ]
+ },
+ {
+ "id": "44444444-4444-4444-4444-444444444444",
+ "hubspot_deal_id": "10288",
+ "status": "pending",
+ "current_stage": "intake",
+ "created_at": "2026-05-13T10:03:00Z",
+ "updated_at": "2026-05-13T10:03:00Z",
+ "steps": [
+ { "stage": "intake", "status": "pending", "started_at": null, "ended_at": null, "pipeline_run_id": null, "error": null },
+ { "stage": "segmentation", "status": "pending", "started_at": null, "ended_at": null, "pipeline_run_id": null, "error": null },
+ { "stage": "categorization", "status": "pending", "started_at": null, "ended_at": null, "pipeline_run_id": null, "error": null },
+ { "stage": "appraisal", "status": "pending", "started_at": null, "ended_at": null, "pipeline_run_id": null, "error": null }
+ ]
+ },
+ {
+ "id": "55555555-5555-5555-5555-555555555555",
+ "hubspot_deal_id": "10287",
+ "status": "in_progress",
+ "current_stage": "segmentation",
+ "created_at": "2026-05-13T10:00:00Z",
+ "updated_at": "2026-05-13T10:00:45Z",
+ "steps": [
+ { "stage": "intake", "status": "succeeded", "started_at": "2026-05-13T10:00:00Z", "ended_at": "2026-05-13T10:00:30Z", "pipeline_run_id": "dddddddd-dddd-dddd-dddd-ddddddddddd1", "error": null },
+ { "stage": "segmentation", "status": "running", "started_at": "2026-05-13T10:00:30Z", "ended_at": null, "pipeline_run_id": "dddddddd-dddd-dddd-dddd-ddddddddddd2", "error": null },
+ { "stage": "categorization", "status": "pending", "started_at": null, "ended_at": null, "pipeline_run_id": null, "error": null },
+ { "stage": "appraisal", "status": "pending", "started_at": null, "ended_at": null, "pipeline_run_id": null, "error": null }
+ ]
+ }
+ ],
+ "next_before": null
+}
diff --git a/web/src/lib/api/mocks/handlers.ts b/web/src/lib/api/mocks/handlers.ts
@@ -6,10 +6,12 @@
import { http, HttpResponse } from "msw";
import { BOX_FIXTURES } from "./fixtures/boxes";
+import queueDeals from "./fixtures/queue-deals.json";
const baseUrl = import.meta.env.VITE_API_BASE_URL ?? "http://localhost:8000";
export const handlers = [
http.get(`${baseUrl}/boxes`, () => HttpResponse.json(BOX_FIXTURES)),
http.get(`${baseUrl}/health`, () => HttpResponse.json({ status: "ok" })),
+ http.get(`${baseUrl}/admin/queue/deals`, () => HttpResponse.json(queueDeals)),
];
cbe8fcf
feat(web): switch queue page to TanStack Query against /admin/queue/deals
WhyQueue page rewrite.
useQueueStore (zustand) → useQuery with 2s refetchInterval; isLive + filters become local state; URL search-param (?run=<uuid>) declared via zod on the route. QueueTable takes rows+filters as props. PipelineTrail types migrated.Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
Files changed
| web/src/components/queue/PipelineTrail.tsx | 17 +++---- |
| web/src/components/queue/QueueTable.tsx | 76 ++++++++++++++--------------- |
| web/src/components/queue/RunDetailSheet.tsx | 3 +- |
| web/src/routes/admin.production.queue.tsx | 50 +++++++++++-------- |
Show diff
diff --git a/web/src/components/queue/PipelineTrail.tsx b/web/src/components/queue/PipelineTrail.tsx
@@ -1,10 +1,10 @@
-import { Check, Loader2, X, Minus, Circle } from "lucide-react";
+import { Check, Loader2, X, Circle } from "lucide-react";
import { cn } from "@/lib/utils";
-import type { PipelineStep, StepStatus } from "@/lib/queue-mock";
-import { STAGES } from "@/lib/queue-mock";
+import type { QueueStep, StepStatus } from "@/lib/api/adapters/queue";
+import { STAGES } from "@/lib/api/adapters/queue";
interface Props {
- steps: PipelineStep[];
+ steps: QueueStep[];
size?: "sm" | "md";
showLabels?: boolean;
selectedStepId?: string | null;
@@ -16,7 +16,6 @@ const STATUS_STYLES: Record<StepStatus, string> = {
running: "bg-primary/10 text-primary border-primary",
done: "bg-success/15 text-success border-success/40",
failed: "bg-destructive/10 text-destructive border-destructive/40",
- skipped: "bg-muted text-muted-foreground border-border opacity-60",
};
function StatusIcon({ status, className }: { status: StepStatus; className?: string }) {
@@ -27,8 +26,6 @@ function StatusIcon({ status, className }: { status: StepStatus; className?: str
return <Check className={className} />;
case "failed":
return <X className={className} />;
- case "skipped":
- return <Minus className={className} />;
default:
return <Circle className={className} />;
}
@@ -48,12 +45,12 @@ export function PipelineTrail({
<div className={cn("flex items-center", showLabels ? "gap-0" : "gap-1")}>
{steps.map((step, idx) => {
const stage = STAGES.find((s) => s.id === step.stage);
- const selected = selectedStepId === step.id;
+ const selected = selectedStepId === step.stage;
return (
- <div key={step.id} className={cn("flex items-center", showLabels && "flex-1")}>
+ <div key={step.stage} className={cn("flex items-center", showLabels && "flex-1")}>
<button
type="button"
- onClick={onSelect ? () => onSelect(step.id) : undefined}
+ onClick={onSelect ? () => onSelect(step.stage) : undefined}
disabled={!onSelect}
title={`${stage?.label ?? step.stage} — ${step.status}`}
className={cn(
diff --git a/web/src/components/queue/QueueTable.tsx b/web/src/components/queue/QueueTable.tsx
@@ -1,6 +1,6 @@
import { useMemo } from "react";
-import { useQueueStore } from "@/lib/queue-store";
-import { getRunStatus, getRunDuration, type RunStatus } from "@/lib/queue-mock";
+import { useNavigate, useSearch } from "@tanstack/react-router";
+import type { QueueRow, RunStatus, StageId } from "@/lib/api/adapters/queue";
import { PipelineTrail } from "./PipelineTrail";
import {
Table,
@@ -20,6 +20,12 @@ const STATUS_BADGE: Record<RunStatus, string> = {
failed: "bg-destructive/10 text-destructive border-destructive/40",
};
+export interface QueueFilters {
+ search: string;
+ status: "all" | RunStatus;
+ stage: "all" | StageId;
+}
+
function fmtDuration(ms: number) {
if (!ms) return "—";
const s = Math.floor(ms / 1000);
@@ -39,25 +45,23 @@ function fmtRelative(ts: number) {
return `${h}h ago`;
}
-export function QueueTable() {
- const runs = useQueueStore((s) => s.runs);
- const filters = useQueueStore((s) => s.filters);
- const selectedRunId = useQueueStore((s) => s.selectedRunId);
- const selectRun = useQueueStore((s) => s.selectRun);
+export function QueueTable({ rows, filters }: { rows: QueueRow[]; filters: QueueFilters }) {
+ const navigate = useNavigate({ from: "/admin/production/queue" });
+ const search = useSearch({ from: "/admin/production/queue" }) as { run?: string };
const filtered = useMemo(() => {
- return runs
+ return rows
.filter((r) => {
if (filters.search && !r.dealId.includes(filters.search.trim())) return false;
- if (filters.status !== "all" && getRunStatus(r) !== filters.status) return false;
+ if (filters.status !== "all" && r.status !== filters.status) return false;
if (filters.stage !== "all") {
const step = r.steps.find((s) => s.stage === filters.stage);
- if (!step || step.status === "pending" || step.status === "skipped") return false;
+ if (!step || step.status === "pending") return false;
}
return true;
})
.sort((a, b) => b.updatedAt - a.updatedAt);
- }, [runs, filters]);
+ }, [rows, filters]);
return (
<div className="rounded-lg border border-border bg-card">
@@ -79,33 +83,29 @@ export function QueueTable() {
</TableCell>
</TableRow>
)}
- {filtered.map((run) => {
- const status = getRunStatus(run);
- const dur = getRunDuration(run);
- return (
- <TableRow
- key={run.id}
- onClick={() => selectRun(run.id)}
- className={cn("cursor-pointer", selectedRunId === run.id && "bg-accent/40")}
- >
- <TableCell className="font-mono text-sm">#{run.dealId}</TableCell>
- <TableCell>
- <PipelineTrail steps={run.steps} size="sm" />
- </TableCell>
- <TableCell>
- <Badge variant="outline" className={cn("capitalize", STATUS_BADGE[status])}>
- {status}
- </Badge>
- </TableCell>
- <TableCell className="font-mono text-xs text-muted-foreground">
- {fmtDuration(dur)}
- </TableCell>
- <TableCell className="text-right pr-4 text-xs text-muted-foreground">
- {fmtRelative(run.updatedAt)}
- </TableCell>
- </TableRow>
- );
- })}
+ {filtered.map((run) => (
+ <TableRow
+ key={run.dealUuid}
+ onClick={() => navigate({ search: { run: run.dealUuid } })}
+ className={cn("cursor-pointer", search.run === run.dealUuid && "bg-accent/40")}
+ >
+ <TableCell className="font-mono text-sm">#{run.dealId}</TableCell>
+ <TableCell>
+ <PipelineTrail steps={run.steps} size="sm" />
+ </TableCell>
+ <TableCell>
+ <Badge variant="outline" className={cn("capitalize", STATUS_BADGE[run.status])}>
+ {run.status}
+ </Badge>
+ </TableCell>
+ <TableCell className="font-mono text-xs text-muted-foreground">
+ {fmtDuration(run.durationMs)}
+ </TableCell>
+ <TableCell className="text-right pr-4 text-xs text-muted-foreground">
+ {fmtRelative(run.updatedAt)}
+ </TableCell>
+ </TableRow>
+ ))}
</TableBody>
</Table>
</div>
diff --git a/web/src/components/queue/RunDetailSheet.tsx b/web/src/components/queue/RunDetailSheet.tsx
@@ -32,7 +32,8 @@ function fmtDuration(ms: number) {
return `${String(Math.floor(s / 60)).padStart(2, "0")}:${String(s % 60).padStart(2, "0")}`;
}
-export function RunDetailSheet() {
+// rows prop is accepted here but not yet consumed — Task 14 will wire it up.
+export function RunDetailSheet(_props: { rows?: unknown[] }) {
const runs = useQueueStore((s) => s.runs);
const selectedRunId = useQueueStore((s) => s.selectedRunId);
const selectedStepId = useQueueStore((s) => s.selectedStepId);
diff --git a/web/src/routes/admin.production.queue.tsx b/web/src/routes/admin.production.queue.tsx
@@ -1,8 +1,10 @@
-import { useEffect } from "react";
+import { useMemo, useState } from "react";
import { createFileRoute } from "@tanstack/react-router";
-import { useQueueStore } from "@/lib/queue-store";
-import { STAGES, type RunStatus, type StageId } from "@/lib/queue-mock";
-import { QueueTable } from "@/components/queue/QueueTable";
+import { useQuery } from "@tanstack/react-query";
+import { z } from "zod";
+import { listQueueDealsAdminQueueDealsGetOptions } from "@/lib/api/generated/@tanstack/react-query.gen";
+import { adaptQueuePage, STAGES, type RunStatus, type StageId } from "@/lib/api/adapters/queue";
+import { QueueTable, type QueueFilters } from "@/components/queue/QueueTable";
import { RunDetailSheet } from "@/components/queue/RunDetailSheet";
import { Input } from "@/components/ui/input";
import {
@@ -16,9 +18,12 @@ import { Button } from "@/components/ui/button";
import { Pause, Play, Search } from "lucide-react";
import { cn } from "@/lib/utils";
+const searchSchema = z.object({ run: z.string().uuid().optional() });
+
export const Route = createFileRoute("/admin/production/queue")({
head: () => ({ meta: [{ title: "System Queue — VCC Admin" }] }),
component: SystemQueuePage,
+ validateSearch: searchSchema,
});
const STATUSES: { id: "all" | RunStatus; label: string }[] = [
@@ -30,17 +35,20 @@ const STATUSES: { id: "all" | RunStatus; label: string }[] = [
];
function SystemQueuePage() {
- const isLive = useQueueStore((s) => s.isLive);
- const setLive = useQueueStore((s) => s.setLive);
- const tick = useQueueStore((s) => s.tick);
- const filters = useQueueStore((s) => s.filters);
- const setFilters = useQueueStore((s) => s.setFilters);
+ const [isLive, setIsLive] = useState(true);
+ const [filters, setFilters] = useState<QueueFilters>({
+ search: "",
+ status: "all",
+ stage: "all",
+ });
+
+ const { data, isFetching } = useQuery({
+ ...listQueueDealsAdminQueueDealsGetOptions({ query: { limit: 50 } }),
+ refetchInterval: isLive ? 2000 : false,
+ select: adaptQueuePage,
+ });
- useEffect(() => {
- if (!isLive) return;
- const id = setInterval(() => tick(), 2000);
- return () => clearInterval(id);
- }, [isLive, tick]);
+ const rows = data?.rows ?? [];
return (
<div className="p-6 space-y-4">
@@ -59,9 +67,9 @@ function SystemQueuePage() {
isLive ? "bg-success animate-pulse" : "bg-muted-foreground/40",
)}
/>
- {isLive ? "Live · 2s" : "Paused"}
+ {isLive ? `Live · 2s${isFetching ? " · …" : ""}` : "Paused"}
</div>
- <Button size="sm" variant="outline" onClick={() => setLive(!isLive)}>
+ <Button size="sm" variant="outline" onClick={() => setIsLive((v) => !v)}>
{isLive ? (
<Pause className="h-3.5 w-3.5 mr-1.5" />
) : (
@@ -77,14 +85,14 @@ function SystemQueuePage() {
<Search className="absolute left-2.5 top-1/2 -translate-y-1/2 h-3.5 w-3.5 text-muted-foreground" />
<Input
value={filters.search}
- onChange={(e) => setFilters({ search: e.target.value })}
+ onChange={(e) => setFilters((f) => ({ ...f, search: e.target.value }))}
placeholder="Search deal id…"
className="h-9 pl-8 w-48"
/>
</div>
<Select
value={filters.status}
- onValueChange={(v) => setFilters({ status: v as "all" | RunStatus })}
+ onValueChange={(v) => setFilters((f) => ({ ...f, status: v as "all" | RunStatus }))}
>
<SelectTrigger className="h-9 w-[150px]">
<SelectValue />
@@ -99,7 +107,7 @@ function SystemQueuePage() {
</Select>
<Select
value={filters.stage}
- onValueChange={(v) => setFilters({ stage: v as "all" | StageId })}
+ onValueChange={(v) => setFilters((f) => ({ ...f, stage: v as "all" | StageId }))}
>
<SelectTrigger className="h-9 w-[170px]">
<SelectValue />
@@ -115,8 +123,8 @@ function SystemQueuePage() {
</Select>
</div>
- <QueueTable />
- <RunDetailSheet />
+ <QueueTable rows={rows} filters={filters} />
+ <RunDetailSheet rows={rows} />
</div>
);
}
ac6ded1
feat(web): URL-driven detail sheet, single Timeline tab, drop IO/Logs/Error tabs
WhyDetail sheet rewrite. Opens/closes by URL search-param, not zustand. Re-run-from-scratch wires to the generated mutation hook. Only the Timeline tab survives (IOTab / LogsTab / ErrorTab deleted — fields they rendered aren't on the backend yet and are tracked as deferred UI capabilities).
RunDetailSheet now reads/writes ?run=<uuid> search param via TanStack Router, uses real QueueRow data, and wires Re-run to the generated enqueueProcessDeal mutation. TimelineTab rewritten against QueueStep/StageId adapter types. Dead IOTab, LogsTab, ErrorTab components deleted. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
Files changed
| web/src/components/queue/RunDetailSheet.tsx | 107 +++++++++++--------------- |
| web/src/components/queue/tabs/ErrorTab.tsx | 43 ----------- |
| web/src/components/queue/tabs/IOTab.tsx | 54 ------------- |
| web/src/components/queue/tabs/LogsTab.tsx | 82 -------------------- |
| web/src/components/queue/tabs/TimelineTab.tsx | 87 +++++++++++---------- |
Show diff
diff --git a/web/src/components/queue/RunDetailSheet.tsx b/web/src/components/queue/RunDetailSheet.tsx
@@ -1,6 +1,8 @@
-import { useMemo } from "react";
-import { useQueueStore } from "@/lib/queue-store";
-import { getRunStatus, getRunDuration, type RunStatus } from "@/lib/queue-mock";
+import { useMemo, useState } from "react";
+import { useNavigate, useSearch } from "@tanstack/react-router";
+import { useMutation, useQueryClient } from "@tanstack/react-query";
+import { enqueueProcessDealAdminDealsDealIdProcessPostMutation } from "@/lib/api/generated/@tanstack/react-query.gen";
+import type { QueueRow, QueueStep, RunStatus, StageId } from "@/lib/api/adapters/queue";
import {
Sheet,
SheetContent,
@@ -8,15 +10,11 @@ import {
SheetTitle,
SheetDescription,
} from "@/components/ui/sheet";
-import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs";
import { Button } from "@/components/ui/button";
import { Badge } from "@/components/ui/badge";
-import { RotateCcw, RefreshCw } from "lucide-react";
+import { RefreshCw } from "lucide-react";
import { PipelineTrail } from "./PipelineTrail";
import { TimelineTab } from "./tabs/TimelineTab";
-import { IOTab } from "./tabs/IOTab";
-import { LogsTab } from "./tabs/LogsTab";
-import { ErrorTab } from "./tabs/ErrorTab";
import { cn } from "@/lib/utils";
const STATUS_BADGE: Record<RunStatus, string> = {
@@ -32,30 +30,36 @@ function fmtDuration(ms: number) {
return `${String(Math.floor(s / 60)).padStart(2, "0")}:${String(s % 60).padStart(2, "0")}`;
}
-// rows prop is accepted here but not yet consumed — Task 14 will wire it up.
-export function RunDetailSheet(_props: { rows?: unknown[] }) {
- const runs = useQueueStore((s) => s.runs);
- const selectedRunId = useQueueStore((s) => s.selectedRunId);
- const selectedStepId = useQueueStore((s) => s.selectedStepId);
- const selectRun = useQueueStore((s) => s.selectRun);
- const selectStep = useQueueStore((s) => s.selectStep);
- const retryStep = useQueueStore((s) => s.retryStep);
- const rerunFromScratch = useQueueStore((s) => s.rerunFromScratch);
+export function RunDetailSheet({ rows }: { rows: QueueRow[] }) {
+ const navigate = useNavigate({ from: "/admin/production/queue" });
+ const search = useSearch({ from: "/admin/production/queue" });
+ const queryClient = useQueryClient();
const run = useMemo(
- () => runs.find((r) => r.id === selectedRunId) ?? null,
- [runs, selectedRunId],
- );
- const step = useMemo(
- () => run?.steps.find((s) => s.id === selectedStepId) ?? run?.steps[0] ?? null,
- [run, selectedStepId],
+ () => rows.find((r) => r.dealUuid === search.run) ?? null,
+ [rows, search.run],
);
+ const [selectedStepStage, setSelectedStepStage] = useState<StageId | null>(null);
+ const step: QueueStep | null = run
+ ? (run.steps.find((s) => s.stage === selectedStepStage) ?? run.steps[0] ?? null)
+ : null;
+
+ const reRun = useMutation({
+ ...enqueueProcessDealAdminDealsDealIdProcessPostMutation(),
+ onSuccess: () => {
+ queryClient.invalidateQueries({ queryKey: ["listQueueDeals"] });
+ },
+ });
const open = !!run;
- const status = run ? getRunStatus(run) : "pending";
return (
- <Sheet open={open} onOpenChange={(o) => !o && selectRun(null)}>
+ <Sheet
+ open={open}
+ onOpenChange={(o) => {
+ if (!o) navigate({ search: { run: undefined } });
+ }}
+ >
<SheetContent
side="right"
className="w-full sm:max-w-xl flex flex-col gap-0 p-0 sm:max-w-[560px]"
@@ -67,11 +71,11 @@ export function RunDetailSheet(_props: { rows?: unknown[] }) {
<div className="min-w-0">
<SheetTitle className="font-mono text-base">Deal #{run.dealId}</SheetTitle>
<SheetDescription className="flex items-center gap-2 mt-1">
- <Badge variant="outline" className={cn("capitalize", STATUS_BADGE[status])}>
- {status}
+ <Badge variant="outline" className={cn("capitalize", STATUS_BADGE[run.status])}>
+ {run.status}
</Badge>
<span className="text-xs text-muted-foreground font-mono">
- duration {fmtDuration(getRunDuration(run))}
+ duration {fmtDuration(run.durationMs)}
</span>
</SheetDescription>
</div>
@@ -82,8 +86,8 @@ export function RunDetailSheet(_props: { rows?: unknown[] }) {
steps={run.steps}
size="md"
showLabels
- selectedStepId={step.id}
- onSelect={(id) => selectStep(id)}
+ selectedStepId={step.stage}
+ onSelect={(id) => setSelectedStepStage(id as StageId)}
/>
</div>
@@ -91,45 +95,22 @@ export function RunDetailSheet(_props: { rows?: unknown[] }) {
<Button
size="sm"
variant="outline"
- onClick={() => retryStep(run.id, step.id)}
- disabled={step.status === "running"}
+ disabled={reRun.isPending}
+ onClick={() => reRun.mutate({ path: { deal_id: run.dealUuid } })}
>
- <RotateCcw className="h-3.5 w-3.5 mr-1.5" />
- Retry step
- </Button>
- <Button size="sm" variant="outline" onClick={() => rerunFromScratch(run.id)}>
<RefreshCw className="h-3.5 w-3.5 mr-1.5" />
- Re-run from scratch
+ {reRun.isPending ? "Enqueuing…" : "Re-run from scratch"}
</Button>
</div>
</SheetHeader>
- <Tabs defaultValue="timeline" className="flex-1 flex flex-col min-h-0">
- <div className="px-6 pt-3 border-b border-border">
- <TabsList>
- <TabsTrigger value="timeline">Timeline</TabsTrigger>
- <TabsTrigger value="io">Inputs & Outputs</TabsTrigger>
- <TabsTrigger value="logs">Logs</TabsTrigger>
- <TabsTrigger value="error" disabled={step.status !== "failed"}>
- Error
- </TabsTrigger>
- </TabsList>
- </div>
- <div className="flex-1 overflow-hidden">
- <TabsContent value="timeline" className="h-full overflow-y-auto px-6 py-4 m-0">
- <TimelineTab steps={run.steps} selectedStepId={step.id} onSelect={selectStep} />
- </TabsContent>
- <TabsContent value="io" className="h-full overflow-y-auto px-6 py-4 m-0">
- <IOTab run={run} step={step} />
- </TabsContent>
- <TabsContent value="logs" className="h-full px-6 py-4 m-0 flex flex-col">
- <LogsTab step={step} />
- </TabsContent>
- <TabsContent value="error" className="h-full overflow-y-auto px-6 py-4 m-0">
- <ErrorTab step={step} />
- </TabsContent>
- </div>
- </Tabs>
+ <div className="flex-1 overflow-y-auto px-6 py-4">
+ <TimelineTab
+ steps={run.steps}
+ selectedStepId={step.stage}
+ onSelect={setSelectedStepStage}
+ />
+ </div>
</>
)}
</SheetContent>
diff --git a/web/src/components/queue/tabs/ErrorTab.tsx b/web/src/components/queue/tabs/ErrorTab.tsx
deleted file mode 100644
@@ -1,43 +0,0 @@
-import type { PipelineStep } from "@/lib/queue-mock";
-import { AlertCircle } from "lucide-react";
-
-interface Props {
- step: PipelineStep;
-}
-
-export function ErrorTab({ step }: Props) {
- if (step.status !== "failed") {
- return (
- <div className="rounded-md border border-dashed border-border bg-card p-8 text-center text-sm text-muted-foreground">
- This step has no error.
- </div>
- );
- }
- return (
- <div className="space-y-4">
- <div className="flex items-start gap-3 rounded-md border border-destructive/30 bg-destructive/5 p-3">
- <AlertCircle className="h-5 w-5 shrink-0 text-destructive" />
- <div className="min-w-0">
- <div className="text-sm font-medium text-destructive">{step.errorMessage}</div>
- <div className="mt-1 flex flex-wrap gap-x-3 gap-y-0.5 text-[11px] font-mono text-muted-foreground">
- <span>
- model: {step.model}@{step.modelVersion}
- </span>
- <span>retries: {step.retries}</span>
- {step.endedAt && <span>at {new Date(step.endedAt).toLocaleString()}</span>}
- </div>
- </div>
- </div>
- {step.errorStack && (
- <section>
- <h4 className="text-xs font-semibold text-muted-foreground uppercase tracking-wide mb-2">
- Stack
- </h4>
- <pre className="rounded-md border border-border bg-card p-3 overflow-x-auto text-[11px] leading-snug font-mono">
- {step.errorStack}
- </pre>
- </section>
- )}
- </div>
- );
-}
diff --git a/web/src/components/queue/tabs/IOTab.tsx b/web/src/components/queue/tabs/IOTab.tsx
deleted file mode 100644
@@ -1,54 +0,0 @@
-import type { PipelineStep, PipelineRun } from "@/lib/queue-mock";
-
-interface Props {
- run: PipelineRun;
- step: PipelineStep;
-}
-
-export function IOTab({ run, step }: Props) {
- return (
- <div className="space-y-4">
- <section>
- <h4 className="text-xs font-semibold text-muted-foreground uppercase tracking-wide mb-2">
- Input
- </h4>
- <div className="rounded-md border border-border bg-card p-3">
- <div className="flex gap-3">
- <img
- src={run.imageUrl}
- alt={`Deal ${run.dealId} input`}
- className="h-20 w-20 rounded object-cover bg-muted"
- loading="lazy"
- />
- <div className="flex-1 min-w-0 text-xs">
- <div className="font-mono text-muted-foreground">deal #{run.dealId}</div>
- <div className="mt-1 font-mono text-muted-foreground">
- model: {step.model}@{step.modelVersion}
- </div>
- {step.params && (
- <pre className="mt-2 overflow-x-auto rounded bg-muted/60 p-2 text-[11px] leading-snug">
- {JSON.stringify(step.params, null, 2)}
- </pre>
- )}
- </div>
- </div>
- </div>
- </section>
-
- <section>
- <h4 className="text-xs font-semibold text-muted-foreground uppercase tracking-wide mb-2">
- Output
- </h4>
- {step.output ? (
- <pre className="rounded-md border border-border bg-card p-3 overflow-x-auto text-[11px] leading-snug font-mono">
- {JSON.stringify(step.output, null, 2)}
- </pre>
- ) : (
- <div className="rounded-md border border-dashed border-border bg-card p-6 text-center text-xs text-muted-foreground">
- {step.status === "running" ? "Step is still running…" : "No output yet."}
- </div>
- )}
- </section>
- </div>
- );
-}
diff --git a/web/src/components/queue/tabs/LogsTab.tsx b/web/src/components/queue/tabs/LogsTab.tsx
deleted file mode 100644
@@ -1,82 +0,0 @@
-import { useEffect, useMemo, useRef, useState } from "react";
-import type { LogSeverity, PipelineStep } from "@/lib/queue-mock";
-import { ScrollArea } from "@/components/ui/scroll-area";
-import { Button } from "@/components/ui/button";
-import { cn } from "@/lib/utils";
-
-interface Props {
- step: PipelineStep;
-}
-
-const SEVERITIES: { id: LogSeverity | "all"; label: string }[] = [
- { id: "all", label: "All" },
- { id: "info", label: "Info" },
- { id: "warn", label: "Warn" },
- { id: "error", label: "Error" },
-];
-
-const SEV_COLOR: Record<LogSeverity, string> = {
- info: "text-muted-foreground",
- warn: "text-warning",
- error: "text-destructive",
-};
-
-export function LogsTab({ step }: Props) {
- const [filter, setFilter] = useState<LogSeverity | "all">("all");
- const [autoScroll, setAutoScroll] = useState(true);
- const endRef = useRef<HTMLDivElement>(null);
-
- const lines = useMemo(
- () => (filter === "all" ? step.logs : step.logs.filter((l) => l.severity === filter)),
- [step.logs, filter],
- );
-
- useEffect(() => {
- if (autoScroll) endRef.current?.scrollIntoView({ behavior: "smooth", block: "end" });
- }, [lines.length, autoScroll]);
-
- return (
- <div className="flex flex-col gap-3 h-full">
- <div className="flex items-center justify-between gap-2">
- <div className="flex gap-1">
- {SEVERITIES.map((s) => (
- <Button
- key={s.id}
- size="sm"
- variant={filter === s.id ? "secondary" : "ghost"}
- className="h-7 text-xs"
- onClick={() => setFilter(s.id)}
- >
- {s.label}
- </Button>
- ))}
- </div>
- <Button
- size="sm"
- variant={autoScroll ? "secondary" : "ghost"}
- className="h-7 text-xs"
- onClick={() => setAutoScroll((v) => !v)}
- >
- Auto-scroll
- </Button>
- </div>
- <ScrollArea className="flex-1 rounded-md border border-border bg-card">
- <div className="p-3 font-mono text-[11px] leading-relaxed">
- {lines.length === 0 && (
- <div className="text-center text-muted-foreground py-6">No log lines.</div>
- )}
- {lines.map((l) => (
- <div key={l.id} className={cn("flex gap-2", SEV_COLOR[l.severity])}>
- <span className="shrink-0 text-muted-foreground">
- {new Date(l.ts).toLocaleTimeString()}
- </span>
- <span className="shrink-0 uppercase">{l.severity}</span>
- <span className="break-all">{l.message}</span>
- </div>
- ))}
- <div ref={endRef} />
- </div>
- </ScrollArea>
- </div>
- );
-}
diff --git a/web/src/components/queue/tabs/TimelineTab.tsx b/web/src/components/queue/tabs/TimelineTab.tsx
@@ -1,12 +1,12 @@
-import type { PipelineStep } from "@/lib/queue-mock";
-import { STAGES } from "@/lib/queue-mock";
+import type { QueueStep, StageId } from "@/lib/api/adapters/queue";
+import { STAGES } from "@/lib/api/adapters/queue";
import { cn } from "@/lib/utils";
import { Badge } from "@/components/ui/badge";
interface Props {
- steps: PipelineStep[];
+ steps: QueueStep[];
selectedStepId: string | null;
- onSelect: (id: string) => void;
+ onSelect: (stage: StageId) => void;
}
function fmtTime(ts?: number) {
@@ -21,54 +21,57 @@ function fmtDur(start?: number, end?: number) {
return `${s}s`;
}
-const STATUS_DOT: Record<string, string> = {
- pending: "bg-muted-foreground/40",
- running: "bg-primary animate-pulse",
- done: "bg-success",
- failed: "bg-destructive",
- skipped: "bg-muted-foreground/30",
+const STATUS_TONE: Record<QueueStep["status"], string> = {
+ pending: "bg-muted text-muted-foreground",
+ running: "bg-primary/10 text-primary border-primary/30",
+ done: "bg-success/15 text-success border-success/40",
+ failed: "bg-destructive/10 text-destructive border-destructive/40",
};
export function TimelineTab({ steps, selectedStepId, onSelect }: Props) {
+ const ordered = STAGES.map((s) => steps.find((step) => step.stage === s.id)).filter(
+ (x): x is QueueStep => x !== undefined,
+ );
return (
- <div className="space-y-1">
- {steps.map((step) => {
- const stage = STAGES.find((s) => s.id === step.stage);
- const selected = selectedStepId === step.id;
+ <ol className="space-y-2 relative pl-4">
+ <span className="absolute left-1.5 top-1 bottom-1 w-px bg-border" />
+ {ordered.map((step) => {
+ const selected = selectedStepId === step.stage;
return (
- <button
- key={step.id}
- type="button"
- onClick={() => onSelect(step.id)}
+ <li
+ key={step.stage}
+ onClick={() => onSelect(step.stage)}
className={cn(
- "w-full text-left rounded-md border border-transparent p-3 transition-colors",
- "hover:bg-accent/40",
- selected && "bg-accent/60 border-border",
+ "relative rounded-md border border-border bg-card p-3 cursor-pointer transition-colors",
+ selected && "border-primary/40 ring-1 ring-primary/30",
)}
>
- <div className="flex items-center gap-3">
- <span className={cn("h-2.5 w-2.5 rounded-full", STATUS_DOT[step.status])} />
- <div className="flex-1 min-w-0">
- <div className="flex items-center justify-between gap-2">
- <span className="text-sm font-medium">{stage?.label}</span>
- <Badge variant="outline" className="text-[10px] capitalize">
- {step.status}
- </Badge>
- </div>
- <div className="mt-0.5 flex flex-wrap items-center gap-x-3 gap-y-0.5 text-[11px] text-muted-foreground font-mono">
- <span>
- {step.model}@{step.modelVersion}
- </span>
- <span>start {fmtTime(step.startedAt)}</span>
- <span>end {fmtTime(step.endedAt)}</span>
- <span>dur {fmtDur(step.startedAt, step.endedAt)}</span>
- {step.retries > 0 && <span className="text-warning">retries {step.retries}</span>}
- </div>
- </div>
+ <span
+ className={cn(
+ "absolute -left-2.5 top-3 w-3 h-3 rounded-full border-2 border-card",
+ step.status === "failed" && "bg-destructive",
+ step.status === "running" && "bg-primary animate-pulse",
+ step.status === "done" && "bg-success",
+ step.status === "pending" && "bg-muted-foreground/40",
+ )}
+ />
+ <div className="flex items-center justify-between gap-2">
+ <div className="font-medium text-sm capitalize">{step.stage}</div>
+ <Badge variant="outline" className={cn("capitalize", STATUS_TONE[step.status])}>
+ {step.status}
+ </Badge>
+ </div>
+ <div className="mt-1 flex flex-wrap gap-x-3 gap-y-0.5 text-[11px] font-mono text-muted-foreground">
+ <span>start: {fmtTime(step.startedAt)}</span>
+ <span>end: {fmtTime(step.endedAt)}</span>
+ <span>dur: {fmtDur(step.startedAt, step.endedAt)}</span>
</div>
- </button>
+ {step.errorMessage && (
+ <p className="mt-2 text-xs text-destructive">{step.errorMessage}</p>
+ )}
+ </li>
);
})}
- </div>
+ </ol>
);
}
b4189c0
chore(web): drop queue-mock.ts + queue-store.ts (replaced by generated types + adapter)
WhyFinal cleanup of the pre-codegen types.
queue-store.ts + queue-mock.ts deleted (504 lines gone), prettier-formatted the adapter and its test. Type-source policy is now fully enforced — every type comes from generated types or the adapter.Also fix pre-existing prettier formatting nits in queue.ts / queue.test.ts. Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
Files changed
| web/src/lib/api/adapters/queue.test.ts | 156 ++++++++++++++-------- |
| web/src/lib/api/adapters/queue.ts | 12 +- |
| web/src/lib/queue-mock.ts | 212 ------------------------------ |
| web/src/lib/queue-store.ts | 229 --------------------------------- |
Show diff
diff --git a/web/src/lib/api/adapters/queue.test.ts b/web/src/lib/api/adapters/queue.test.ts
@@ -11,9 +11,11 @@ const baseRow = {
updated_at: "2026-05-13T10:05:00Z",
};
-function step(stage: "intake" | "segmentation" | "categorization" | "appraisal",
- status: "pending" | "running" | "succeeded" | "failed",
- opts: { startedAt?: string; endedAt?: string; error?: string } = {}) {
+function step(
+ stage: "intake" | "segmentation" | "categorization" | "appraisal",
+ status: "pending" | "running" | "succeeded" | "failed",
+ opts: { startedAt?: string; endedAt?: string; error?: string } = {},
+) {
return {
stage,
status,
@@ -27,15 +29,17 @@ function step(stage: "intake" | "segmentation" | "categorization" | "appraisal",
describe("adaptQueuePage", () => {
it("maps hubspot id to display and UUID to action id", () => {
const page: DealQueuePage = {
- items: [{
- ...baseRow,
- steps: [
- step("intake", "pending"),
- step("segmentation", "pending"),
- step("categorization", "pending"),
- step("appraisal", "pending"),
- ],
- }],
+ items: [
+ {
+ ...baseRow,
+ steps: [
+ step("intake", "pending"),
+ step("segmentation", "pending"),
+ step("categorization", "pending"),
+ step("appraisal", "pending"),
+ ],
+ },
+ ],
next_before: null,
};
const { rows } = adaptQueuePage(page);
@@ -45,15 +49,17 @@ describe("adaptQueuePage", () => {
it("synthesizes row.status = pending when every step is pending", () => {
const page: DealQueuePage = {
- items: [{
- ...baseRow,
- steps: [
- step("intake", "pending"),
- step("segmentation", "pending"),
- step("categorization", "pending"),
- step("appraisal", "pending"),
- ],
- }],
+ items: [
+ {
+ ...baseRow,
+ steps: [
+ step("intake", "pending"),
+ step("segmentation", "pending"),
+ step("categorization", "pending"),
+ step("appraisal", "pending"),
+ ],
+ },
+ ],
next_before: null,
};
expect(adaptQueuePage(page).rows[0].status).toBe("pending");
@@ -61,15 +67,20 @@ describe("adaptQueuePage", () => {
it("synthesizes row.status = running when any step is running", () => {
const page: DealQueuePage = {
- items: [{
- ...baseRow,
- steps: [
- step("intake", "succeeded", { startedAt: "2026-05-13T10:00:00Z", endedAt: "2026-05-13T10:01:00Z" }),
- step("segmentation", "running", { startedAt: "2026-05-13T10:01:00Z" }),
- step("categorization", "pending"),
- step("appraisal", "pending"),
- ],
- }],
+ items: [
+ {
+ ...baseRow,
+ steps: [
+ step("intake", "succeeded", {
+ startedAt: "2026-05-13T10:00:00Z",
+ endedAt: "2026-05-13T10:01:00Z",
+ }),
+ step("segmentation", "running", { startedAt: "2026-05-13T10:01:00Z" }),
+ step("categorization", "pending"),
+ step("appraisal", "pending"),
+ ],
+ },
+ ],
next_before: null,
};
expect(adaptQueuePage(page).rows[0].status).toBe("running");
@@ -77,15 +88,24 @@ describe("adaptQueuePage", () => {
it("synthesizes row.status = failed when any step is failed", () => {
const page: DealQueuePage = {
- items: [{
- ...baseRow,
- steps: [
- step("intake", "succeeded", { startedAt: "2026-05-13T10:00:00Z", endedAt: "2026-05-13T10:01:00Z" }),
- step("segmentation", "failed", { startedAt: "2026-05-13T10:01:00Z", endedAt: "2026-05-13T10:02:00Z", error: "boom" }),
- step("categorization", "pending"),
- step("appraisal", "pending"),
- ],
- }],
+ items: [
+ {
+ ...baseRow,
+ steps: [
+ step("intake", "succeeded", {
+ startedAt: "2026-05-13T10:00:00Z",
+ endedAt: "2026-05-13T10:01:00Z",
+ }),
+ step("segmentation", "failed", {
+ startedAt: "2026-05-13T10:01:00Z",
+ endedAt: "2026-05-13T10:02:00Z",
+ error: "boom",
+ }),
+ step("categorization", "pending"),
+ step("appraisal", "pending"),
+ ],
+ },
+ ],
next_before: null,
};
expect(adaptQueuePage(page).rows[0].status).toBe("failed");
@@ -93,15 +113,29 @@ describe("adaptQueuePage", () => {
it("synthesizes row.status = done when all steps succeeded", () => {
const page: DealQueuePage = {
- items: [{
- ...baseRow,
- steps: [
- step("intake", "succeeded", { startedAt: "2026-05-13T10:00:00Z", endedAt: "2026-05-13T10:01:00Z" }),
- step("segmentation", "succeeded", { startedAt: "2026-05-13T10:01:00Z", endedAt: "2026-05-13T10:02:00Z" }),
- step("categorization", "succeeded", { startedAt: "2026-05-13T10:02:00Z", endedAt: "2026-05-13T10:03:00Z" }),
- step("appraisal", "succeeded", { startedAt: "2026-05-13T10:03:00Z", endedAt: "2026-05-13T10:04:00Z" }),
- ],
- }],
+ items: [
+ {
+ ...baseRow,
+ steps: [
+ step("intake", "succeeded", {
+ startedAt: "2026-05-13T10:00:00Z",
+ endedAt: "2026-05-13T10:01:00Z",
+ }),
+ step("segmentation", "succeeded", {
+ startedAt: "2026-05-13T10:01:00Z",
+ endedAt: "2026-05-13T10:02:00Z",
+ }),
+ step("categorization", "succeeded", {
+ startedAt: "2026-05-13T10:02:00Z",
+ endedAt: "2026-05-13T10:03:00Z",
+ }),
+ step("appraisal", "succeeded", {
+ startedAt: "2026-05-13T10:03:00Z",
+ endedAt: "2026-05-13T10:04:00Z",
+ }),
+ ],
+ },
+ ],
next_before: null,
};
expect(adaptQueuePage(page).rows[0].status).toBe("done");
@@ -109,15 +143,23 @@ describe("adaptQueuePage", () => {
it("converts ISO timestamps to epoch ms and computes durationMs", () => {
const page: DealQueuePage = {
- items: [{
- ...baseRow,
- steps: [
- step("intake", "succeeded", { startedAt: "2026-05-13T10:00:00Z", endedAt: "2026-05-13T10:00:30Z" }),
- step("segmentation", "succeeded", { startedAt: "2026-05-13T10:00:30Z", endedAt: "2026-05-13T10:01:00Z" }),
- step("categorization", "pending"),
- step("appraisal", "pending"),
- ],
- }],
+ items: [
+ {
+ ...baseRow,
+ steps: [
+ step("intake", "succeeded", {
+ startedAt: "2026-05-13T10:00:00Z",
+ endedAt: "2026-05-13T10:00:30Z",
+ }),
+ step("segmentation", "succeeded", {
+ startedAt: "2026-05-13T10:00:30Z",
+ endedAt: "2026-05-13T10:01:00Z",
+ }),
+ step("categorization", "pending"),
+ step("appraisal", "pending"),
+ ],
+ },
+ ],
next_before: null,
};
const { rows } = adaptQueuePage(page);
diff --git a/web/src/lib/api/adapters/queue.ts b/web/src/lib/api/adapters/queue.ts
@@ -14,8 +14,8 @@ export interface QueueStep {
}
export interface QueueRow {
- dealId: string; // hubspot_deal_id, display
- dealUuid: string; // backend UUID, for actions
+ dealId: string; // hubspot_deal_id, display
+ dealUuid: string; // backend UUID, for actions
createdAt: number;
updatedAt: number;
steps: QueueStep[];
@@ -24,10 +24,10 @@ export interface QueueRow {
}
export const STAGES: { id: StageId; label: string }[] = [
- { id: "intake", label: "Intake" },
- { id: "segmentation", label: "Segmentation" },
- { id: "categorization", label: "Categorization" },
- { id: "appraisal", label: "Appraisal" },
+ { id: "intake", label: "Intake" },
+ { id: "segmentation", label: "Segmentation" },
+ { id: "categorization", label: "Categorization" },
+ { id: "appraisal", label: "Appraisal" },
];
export function adaptQueuePage(page: DealQueuePage): {
diff --git a/web/src/lib/queue-mock.ts b/web/src/lib/queue-mock.ts
deleted file mode 100644
@@ -1,212 +0,0 @@
-export type StageId = "intake" | "segmentation" | "categorization" | "appraisal";
-
-export const STAGES: { id: StageId; label: string }[] = [
- { id: "intake", label: "Intake" },
- { id: "segmentation", label: "Segmentation" },
- { id: "categorization", label: "Categorization" },
- { id: "appraisal", label: "Appraisal" },
-];
-
-export type StepStatus = "pending" | "running" | "done" | "failed" | "skipped";
-export type LogSeverity = "info" | "warn" | "error";
-
-export interface LogLine {
- id: string;
- ts: number;
- severity: LogSeverity;
- message: string;
-}
-
-export interface PipelineStep {
- id: string;
- stage: StageId;
- status: StepStatus;
- model?: string;
- modelVersion?: string;
- startedAt?: number;
- endedAt?: number;
- retries: number;
- params?: Record<string, unknown>;
- output?: unknown;
- errorMessage?: string;
- errorStack?: string;
- logs: LogLine[];
-}
-
-export interface PipelineRun {
- id: string;
- dealId: string;
- imageUrl: string;
- createdAt: number;
- updatedAt: number;
- steps: PipelineStep[];
-}
-
-export type RunStatus = "pending" | "running" | "done" | "failed";
-
-export function getRunStatus(run: PipelineRun): RunStatus {
- if (run.steps.some((s) => s.status === "failed")) return "failed";
- if (run.steps.some((s) => s.status === "running")) return "running";
- if (run.steps.every((s) => s.status === "done" || s.status === "skipped")) return "done";
- return "pending";
-}
-
-export function getRunDuration(run: PipelineRun): number {
- const starts = run.steps.map((s) => s.startedAt).filter((v): v is number => !!v);
- const ends = run.steps.map((s) => s.endedAt).filter((v): v is number => !!v);
- if (!starts.length) return 0;
- const start = Math.min(...starts);
- const end = ends.length === run.steps.length ? Math.max(...ends) : Date.now();
- return end - start;
-}
-
-const MODEL_BY_STAGE: Record<StageId, { model: string; version: string }> = {
- intake: { model: "intake-router", version: "1.2.0" },
- segmentation: { model: "SAM3", version: "0.4.1" },
- categorization: { model: "vcc-classifier", version: "2.1.0" },
- appraisal: { model: "vcc-appraiser", version: "1.5.3" },
-};
-
-const SAMPLE_LOGS: Record<StageId, string[]> = {
- intake: [
- "Received deal payload",
- "Validated 6 images",
- "Stored to /deals bucket",
- "Enqueued segmentation job",
- ],
- segmentation: [
- "Loading SAM3 weights",
- "Running mask generation (mode=auto)",
- "Detected 12 candidate masks",
- "Filtering by min-area=2400px",
- "Emitting 8 object crops",
- ],
- categorization: [
- "Embedding 8 crops",
- "Top-1 = jewelry/watch (0.91)",
- "Top-1 = ceramics/figurine (0.78)",
- "Persisted category assignments",
- ],
- appraisal: [
- "Pricing 8 items",
- "Comp lookup hit-rate 75%",
- "Applied minimum-price guideline to 1 item",
- "Generated valuation report",
- ],
-};
-
-let _id = 0;
-const uid = (p: string) => `${p}_${++_id}_${Math.random().toString(36).slice(2, 7)}`;
-
-function makeLogs(stage: StageId, count: number, baseTs: number): LogLine[] {
- const pool = SAMPLE_LOGS[stage];
- return Array.from({ length: count }).map((_, i) => ({
- id: uid("log"),
- ts: baseTs + i * 800,
- severity: "info" as LogSeverity,
- message: pool[i % pool.length],
- }));
-}
-
-function makeStep(stage: StageId, status: StepStatus, baseTs: number): PipelineStep {
- const meta = MODEL_BY_STAGE[stage];
- const dur = 4000 + Math.floor(Math.random() * 12000);
- const startedAt = status === "pending" ? undefined : baseTs;
- const endedAt =
- status === "done" || status === "failed" || status === "skipped" ? baseTs + dur : undefined;
-
- const step: PipelineStep = {
- id: uid("step"),
- stage,
- status,
- model: meta.model,
- modelVersion: meta.version,
- startedAt,
- endedAt,
- retries: status === "failed" ? 1 : 0,
- params: stage === "segmentation" ? { mode: "auto", minArea: 2400 } : undefined,
- output:
- status === "done"
- ? stage === "segmentation"
- ? { masks: 8, durationMs: dur }
- : stage === "categorization"
- ? { categories: ["jewelry/watch", "ceramics/figurine"] }
- : stage === "appraisal"
- ? { totalValueGbp: 184.5, items: 8 }
- : { ok: true }
- : undefined,
- logs: status === "pending" ? [] : makeLogs(stage, status === "running" ? 2 : 4, baseTs),
- };
-
- if (status === "failed") {
- step.errorMessage = "Upstream model returned 503 after 3 retries";
- step.errorStack =
- "Error: Upstream 503\n at fetchModel (segmenter.ts:142)\n at runStep (pipeline.ts:88)\n at processRun (worker.ts:31)";
- step.logs.push({
- id: uid("log"),
- ts: (endedAt ?? baseTs) - 100,
- severity: "error",
- message: "Upstream model returned 503 after 3 retries",
- });
- }
-
- return step;
-}
-
-const SEED_IMAGES = [
- "https://images.unsplash.com/photo-1513519245088-0e12902e5a38?w=400&q=70",
- "https://images.unsplash.com/photo-1519748771451-a94c596fad67?w=400&q=70",
- "https://images.unsplash.com/photo-1483985988355-763728e1935b?w=400&q=70",
- "https://images.unsplash.com/photo-1519681393784-d120267933ba?w=400&q=70",
- "https://images.unsplash.com/photo-1452860606245-08befc0ff44b?w=400&q=70",
-];
-
-interface RunBlueprint {
- dealId: string;
- ageMin: number; // minutes ago started
- stageStatuses: StepStatus[]; // length 4
-}
-
-const BLUEPRINTS: RunBlueprint[] = [
- { dealId: "10293", ageMin: 0, stageStatuses: ["done", "done", "running", "pending"] },
- { dealId: "10292", ageMin: 1, stageStatuses: ["done", "running", "pending", "pending"] },
- { dealId: "10291", ageMin: 2, stageStatuses: ["done", "done", "done", "running"] },
- { dealId: "10290", ageMin: 4, stageStatuses: ["done", "done", "failed", "pending"] },
- { dealId: "10289", ageMin: 6, stageStatuses: ["done", "done", "done", "done"] },
- { dealId: "10288", ageMin: 8, stageStatuses: ["done", "failed", "pending", "pending"] },
- { dealId: "10287", ageMin: 12, stageStatuses: ["done", "done", "done", "done"] },
- { dealId: "10286", ageMin: 14, stageStatuses: ["done", "done", "done", "done"] },
- { dealId: "10285", ageMin: 18, stageStatuses: ["done", "done", "done", "done"] },
- { dealId: "10284", ageMin: 22, stageStatuses: ["done", "done", "done", "failed"] },
- { dealId: "10283", ageMin: 26, stageStatuses: ["done", "done", "done", "done"] },
- { dealId: "10282", ageMin: 30, stageStatuses: ["done", "done", "done", "done"] },
- { dealId: "10281", ageMin: 35, stageStatuses: ["done", "done", "done", "done"] },
- { dealId: "10280", ageMin: 40, stageStatuses: ["done", "done", "done", "done"] },
- { dealId: "10279", ageMin: 45, stageStatuses: ["done", "done", "done", "done"] },
-];
-
-export function createSeedRuns(): PipelineRun[] {
- const now = Date.now();
- return BLUEPRINTS.map((bp, i) => {
- const startBase = now - bp.ageMin * 60_000;
- let cursor = startBase;
- const steps: PipelineStep[] = STAGES.map((s, idx) => {
- const status = bp.stageStatuses[idx];
- const step = makeStep(s.id, status, cursor);
- if (step.endedAt) cursor = step.endedAt + 200;
- else if (step.startedAt) cursor = step.startedAt;
- return step;
- });
-
- return {
- id: uid("run"),
- dealId: bp.dealId,
- imageUrl: SEED_IMAGES[i % SEED_IMAGES.length],
- createdAt: startBase,
- updatedAt: now - Math.floor(Math.random() * 30_000),
- steps,
- };
- });
-}
-
-export { uid };
diff --git a/web/src/lib/queue-store.ts b/web/src/lib/queue-store.ts
deleted file mode 100644
@@ -1,229 +0,0 @@
-import { create } from "zustand";
-import {
- createSeedRuns,
- getRunStatus,
- STAGES,
- uid,
- type LogSeverity,
- type PipelineRun,
- type PipelineStep,
- type RunStatus,
- type StageId,
- type StepStatus,
-} from "./queue-mock";
-
-const SAMPLE_RUNNING_LOGS = [
- "Heartbeat ok",
- "Processing batch 3/8",
- "Embedding crop #5",
- "Comp lookup: 4 hits",
- "Streaming partial result",
- "Checkpoint saved",
-];
-
-interface Filters {
- status: "all" | RunStatus;
- stage: "all" | StageId;
- search: string;
-}
-
-interface QueueState {
- runs: PipelineRun[];
- selectedRunId: string | null;
- selectedStepId: string | null;
- isLive: boolean;
- filters: Filters;
-
- tick: () => void;
- selectRun: (runId: string | null) => void;
- selectStep: (stepId: string | null) => void;
- setLive: (v: boolean) => void;
- setFilters: (patch: Partial<Filters>) => void;
- retryStep: (runId: string, stepId: string) => void;
- rerunFromScratch: (runId: string) => void;
-}
-
-function appendLog(step: PipelineStep, severity: LogSeverity, message: string): PipelineStep {
- return {
- ...step,
- logs: [...step.logs, { id: uid("log"), ts: Date.now(), severity, message }],
- };
-}
-
-function advanceRun(run: PipelineRun): PipelineRun {
- // Stop if any failed
- if (run.steps.some((s) => s.status === "failed")) return run;
-
- const runningIdx = run.steps.findIndex((s) => s.status === "running");
- let changed = false;
- const steps = [...run.steps];
-
- if (runningIdx >= 0) {
- const step = steps[runningIdx];
- // ~20% chance to complete each tick, 4% to fail
- const r = Math.random();
- if (r < 0.04) {
- steps[runningIdx] = {
- ...appendLog(step, "error", "Model returned 503 — giving up after 3 retries"),
- status: "failed",
- endedAt: Date.now(),
- retries: step.retries + 1,
- errorMessage: "Upstream model returned 503 after 3 retries",
- errorStack:
- "Error: Upstream 503\n at fetchModel (segmenter.ts:142)\n at runStep (pipeline.ts:88)",
- };
- changed = true;
- } else if (r < 0.24) {
- steps[runningIdx] = {
- ...appendLog(step, "info", "Step complete"),
- status: "done",
- endedAt: Date.now(),
- output:
- step.stage === "segmentation"
- ? { masks: 6 + Math.floor(Math.random() * 6) }
- : step.stage === "categorization"
- ? { categories: ["jewelry/watch", "ceramics/figurine"] }
- : step.stage === "appraisal"
- ? { totalValueGbp: Math.round(Math.random() * 300 * 100) / 100 }
- : { ok: true },
- };
- // Start next pending
- const nextIdx = steps.findIndex((s, i) => i > runningIdx && s.status === "pending");
- if (nextIdx >= 0) {
- steps[nextIdx] = {
- ...steps[nextIdx],
- status: "running",
- startedAt: Date.now(),
- logs: [{ id: uid("log"), ts: Date.now(), severity: "info", message: "Starting step" }],
- };
- }
- changed = true;
- } else {
- // Append a heartbeat log
- const msg = SAMPLE_RUNNING_LOGS[Math.floor(Math.random() * SAMPLE_RUNNING_LOGS.length)];
- steps[runningIdx] = appendLog(step, "info", msg);
- changed = true;
- }
- } else {
- // No running step — start the first pending one
- const pendingIdx = steps.findIndex((s) => s.status === "pending");
- if (pendingIdx >= 0) {
- steps[pendingIdx] = {
- ...steps[pendingIdx],
- status: "running",
- startedAt: Date.now(),
- logs: [{ id: uid("log"), ts: Date.now(), severity: "info", message: "Starting step" }],
- };
- changed = true;
- }
- }
-
- if (!changed) return run;
- return { ...run, steps, updatedAt: Date.now() };
-}
-
-export const useQueueStore = create<QueueState>((set, get) => ({
- runs: createSeedRuns(),
- selectedRunId: null,
- selectedStepId: null,
- isLive: true,
- filters: { status: "all", stage: "all", search: "" },
-
- tick: () => {
- set((state) => ({ runs: state.runs.map(advanceRun) }));
- },
-
- selectRun: (runId) => {
- const run = runId ? get().runs.find((r) => r.id === runId) : null;
- const focus =
- run?.steps.find((s) => s.status === "failed") ??
- run?.steps.find((s) => s.status === "running") ??
- run?.steps[run.steps.length - 1] ??
- null;
- set({ selectedRunId: runId, selectedStepId: focus?.id ?? null });
- },
-
- selectStep: (stepId) => set({ selectedStepId: stepId }),
- setLive: (v) => set({ isLive: v }),
- setFilters: (patch) => set((s) => ({ filters: { ...s.filters, ...patch } })),
-
- retryStep: (runId, stepId) => {
- set((state) => ({
- runs: state.runs.map((run) => {
- if (run.id !== runId) return run;
- const idx = run.steps.findIndex((s) => s.id === stepId);
- if (idx < 0) return run;
- const steps = run.steps.map((s, i) => {
- if (i < idx) return s;
- if (i === idx) {
- return {
- ...s,
- status: "running" as StepStatus,
- startedAt: Date.now(),
- endedAt: undefined,
- retries: s.retries + 1,
- errorMessage: undefined,
- errorStack: undefined,
- output: undefined,
- logs: [
- {
- id: uid("log"),
- ts: Date.now(),
- severity: "info" as LogSeverity,
- message: "Retrying step",
- },
- ],
- };
- }
- return {
- ...s,
- status: "pending" as StepStatus,
- startedAt: undefined,
- endedAt: undefined,
- output: undefined,
- errorMessage: undefined,
- errorStack: undefined,
- logs: [],
- };
- });
- return { ...run, steps, updatedAt: Date.now() };
- }),
- }));
- },
-
- rerunFromScratch: (runId) => {
- set((state) => ({
- runs: state.runs.map((run) => {
- if (run.id !== runId) return run;
- const steps = STAGES.map((stageDef, idx) => {
- const existing = run.steps[idx];
- const isFirst = idx === 0;
- return {
- ...existing,
- status: (isFirst ? "running" : "pending") as StepStatus,
- startedAt: isFirst ? Date.now() : undefined,
- endedAt: undefined,
- output: undefined,
- errorMessage: undefined,
- errorStack: undefined,
- retries: 0,
- logs: isFirst
- ? [
- {
- id: uid("log"),
- ts: Date.now(),
- severity: "info" as LogSeverity,
- message: "Starting step",
- },
- ]
- : [],
- stage: stageDef.id,
- };
- });
- return { ...run, steps, updatedAt: Date.now(), createdAt: Date.now() };
- }),
- }));
- },
-}));
-
-export { getRunStatus };
4e9f8c7
test(web): QueueTable filter behavior (status, search, stage)
WhyVitest component test for
QueueTable filter behavior (3 cases: status, search substring, stage). Added @testing-library/* + jsdom + a separate vitest.config.ts with environment: 'jsdom' (separate from vite.config.ts so we don't pull in TanStack Start's SSR plugin during tests).Add vitest component tests for QueueTable filter logic (status, search substring, and stage-started filters) with jsdom environment, jest-dom matchers, and @tanstack/react-router mocked for isolation. Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
Files changed
| web/bun.lockb | Bin 249839 -> 270498 bytes |
| web/package.json | 4 ++ |
| web/src/components/queue/QueueTable.test.tsx | 101 +++++++++++++++++++++++++++ |
| web/vitest.config.ts | 16 +++++ |
| web/vitest.setup.ts | 1 + |
Show diff
diff --git a/web/bun.lockb b/web/bun.lockb
diff --git a/web/package.json b/web/package.json
@@ -74,6 +74,9 @@
"devDependencies": {
"@eslint/js": "^9.32.0",
"@hey-api/openapi-ts": "^0.97.1",
+ "@testing-library/dom": "^10.4.1",
+ "@testing-library/jest-dom": "^6.9.1",
+ "@testing-library/react": "^16.3.2",
"@types/node": "^22.16.5",
"@types/react": "^19.2.0",
"@types/react-dom": "^19.2.0",
@@ -84,6 +87,7 @@
"eslint-plugin-react-hooks": "^5.2.0",
"eslint-plugin-react-refresh": "^0.4.20",
"globals": "^15.15.0",
+ "jsdom": "^29.1.1",
"msw": "^2.14.6",
"prettier": "^3.7.3",
"typescript": "^5.8.3",
diff --git a/web/src/components/queue/QueueTable.test.tsx b/web/src/components/queue/QueueTable.test.tsx
new file mode 100644
@@ -0,0 +1,101 @@
+import { describe, it, expect, vi } from "vitest";
+import { render, screen } from "@testing-library/react";
+import { QueueTable, type QueueFilters } from "./QueueTable";
+import type { QueueRow } from "@/lib/api/adapters/queue";
+
+// TanStack Router hooks need a router context. For unit tests we shim them
+// via vi.mock so the component can be rendered in isolation.
+vi.mock("@tanstack/react-router", () => ({
+ useNavigate: () => () => {},
+ useSearch: () => ({ run: undefined }),
+}));
+
+function row(overrides: Partial<QueueRow> = {}): QueueRow {
+ return {
+ dealId: "10000",
+ dealUuid: "00000000-0000-0000-0000-000000000000",
+ createdAt: 0,
+ updatedAt: 0,
+ steps: [
+ { stage: "intake", status: "done" },
+ { stage: "segmentation", status: "done" },
+ { stage: "categorization", status: "done" },
+ { stage: "appraisal", status: "done" },
+ ],
+ status: "done",
+ durationMs: 0,
+ ...overrides,
+ };
+}
+
+const empty: QueueFilters = { search: "", status: "all", stage: "all" };
+
+describe("QueueTable filtering", () => {
+ it("filters by status", () => {
+ render(
+ <QueueTable
+ rows={[
+ row({ dealUuid: "1", dealId: "1", status: "done" }),
+ row({
+ dealUuid: "2",
+ dealId: "2",
+ status: "failed",
+ steps: [
+ { stage: "intake", status: "done" },
+ { stage: "segmentation", status: "failed" },
+ { stage: "categorization", status: "pending" },
+ { stage: "appraisal", status: "pending" },
+ ],
+ }),
+ ]}
+ filters={{ ...empty, status: "failed" }}
+ />,
+ );
+ expect(screen.queryByText("#1")).not.toBeInTheDocument();
+ expect(screen.getByText("#2")).toBeInTheDocument();
+ });
+
+ it("filters by search substring on deal id", () => {
+ render(
+ <QueueTable
+ rows={[row({ dealUuid: "1", dealId: "10001" }), row({ dealUuid: "2", dealId: "20002" })]}
+ filters={{ ...empty, search: "100" }}
+ />,
+ );
+ expect(screen.getByText("#10001")).toBeInTheDocument();
+ expect(screen.queryByText("#20002")).not.toBeInTheDocument();
+ });
+
+ it("filters by stage — only shows rows that have started that stage", () => {
+ render(
+ <QueueTable
+ rows={[
+ row({
+ dealUuid: "1",
+ dealId: "started",
+ steps: [
+ { stage: "intake", status: "done" },
+ { stage: "segmentation", status: "running" },
+ { stage: "categorization", status: "pending" },
+ { stage: "appraisal", status: "pending" },
+ ],
+ }),
+ row({
+ dealUuid: "2",
+ dealId: "pending",
+ steps: [
+ { stage: "intake", status: "pending" },
+ { stage: "segmentation", status: "pending" },
+ { stage: "categorization", status: "pending" },
+ { stage: "appraisal", status: "pending" },
+ ],
+ status: "pending",
+ }),
+ ]}
+ filters={{ ...empty, stage: "segmentation" }}
+ />,
+ );
+ expect(screen.getByText("#started")).toBeInTheDocument();
+ expect(screen.queryByText("#pending")).not.toBeInTheDocument();
+ });
+});
diff --git a/web/vitest.config.ts b/web/vitest.config.ts
new file mode 100644
@@ -0,0 +1,16 @@
+import { defineConfig } from "vitest/config";
+import tsConfigPaths from "vite-tsconfig-paths";
+
+export default defineConfig({
+ plugins: [tsConfigPaths()],
+ test: {
+ environment: "jsdom",
+ globals: true,
+ setupFiles: ["./vitest.setup.ts"],
+ },
+ resolve: {
+ alias: {
+ "@": new URL("./src", import.meta.url).pathname,
+ },
+ },
+});
diff --git a/web/vitest.setup.ts b/web/vitest.setup.ts
new file mode 100644
@@ -0,0 +1 @@
+import "@testing-library/jest-dom";
cce2bad
fix(lint): ruff import order, unused Field, line length
WhyCarry-over lint cleanup found during the verification gate: I001 import sort on the migration +
test_settings.py, F401 unused Field import + 3 E501 line-length fixes in the hubspot client.Cleanup of carry-over warnings from earlier sub-slice C commits. Found during the T17 verification gate. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
Files changed
| .../0007_add_hubspot_deal_id_and_queue_index.py | 1 + |
| backend/src/vcc_backend/integrations/hubspot/client.py | 18 ++++++++++++++---- |
| backend/tests/test_settings.py | 1 + |
Show diff
diff --git a/backend/alembic/versions/0007_add_hubspot_deal_id_and_queue_index.py b/backend/alembic/versions/0007_add_hubspot_deal_id_and_queue_index.py
@@ -7,6 +7,7 @@ Create Date: 2026-05-13
from __future__ import annotations
import sqlalchemy as sa
+
from alembic import op
revision = "0007"
diff --git a/backend/src/vcc_backend/integrations/hubspot/client.py b/backend/src/vcc_backend/integrations/hubspot/client.py
@@ -9,7 +9,7 @@ from datetime import UTC, datetime, timedelta
from typing import Any
import httpx
-from pydantic import BaseModel, Field
+from pydantic import BaseModel
CHECKED_IN_DATE_PROP = "hs_v2_date_entered_2746058991"
DEFAULT_BASE_URL = "https://api.hubapi.com"
@@ -108,8 +108,16 @@ class HubspotClient:
"filterGroups": [
{
"filters": [
- {"propertyName": CHECKED_IN_DATE_PROP, "operator": "GTE", "value": _iso(since)},
- {"propertyName": CHECKED_IN_DATE_PROP, "operator": "LT", "value": _iso(until)},
+ {
+ "propertyName": CHECKED_IN_DATE_PROP,
+ "operator": "GTE",
+ "value": _iso(since),
+ },
+ {
+ "propertyName": CHECKED_IN_DATE_PROP,
+ "operator": "LT",
+ "value": _iso(until),
+ },
]
}
],
@@ -129,7 +137,9 @@ class HubspotClient:
while True:
body = {**base_body, "after": after} if after else base_body
try:
- resp = await client.post(f"{self._base_url}{SEARCH_PATH}", headers=headers, json=body)
+ resp = await client.post(
+ f"{self._base_url}{SEARCH_PATH}", headers=headers, json=body
+ )
except httpx.RequestError as exc:
raise HubspotError(f"HubSpot request failed: {exc}") from exc
if resp.status_code >= 400:
diff --git a/backend/tests/test_settings.py b/backend/tests/test_settings.py
@@ -73,6 +73,7 @@ def test_hubspot_access_token_is_secret(monkeypatch: pytest.MonkeyPatch) -> None
monkeypatch.setenv("DATABASE_URL", "postgresql+asyncpg://u:p@h:5432/d")
monkeypatch.setenv("HUBSPOT_ACCESS_TOKEN", "should-not-leak")
from pydantic import SecretStr
+
from vcc_backend.core.settings import Settings
s = Settings() # type: ignore[call-arg]
d02e339
fix: invalidate correct queryKey; raise on missing token (don't assert)
WhyTwo issues from the final gestalt review: (1)
RunDetailSheet's invalidateQueries used a stale string key ['listQueueDeals'] that never matched the generated object-shaped queryKey — switched to the codegen factory. (2) DealSyncService used assert for token presence — stripped under python -O — switched to an explicit raise RuntimeError.Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
Files changed
| backend/src/vcc_backend/features/deals/service.py | 8 +++++++- |
| web/src/components/queue/RunDetailSheet.tsx | 7 +++++-- |
Show diff
diff --git a/backend/src/vcc_backend/features/deals/service.py b/backend/src/vcc_backend/features/deals/service.py
@@ -65,7 +65,13 @@ class DealSyncService:
self._settings = settings
if hubspot_client is None:
token = settings.hubspot_access_token
- assert token is not None, "Worker startup validation should have rejected this"
+ if token is None:
+ raise RuntimeError(
+ "DealSyncService requires HUBSPOT_ACCESS_TOKEN. "
+ "Worker startup validation should have caught this; "
+ "if you are constructing the service in a test, "
+ "pass hubspot_client= explicitly."
+ )
self._hubspot: _HubspotProtocol = HubspotClient(token.get_secret_value())
else:
self._hubspot = hubspot_client
diff --git a/web/src/components/queue/RunDetailSheet.tsx b/web/src/components/queue/RunDetailSheet.tsx
@@ -1,7 +1,10 @@
import { useMemo, useState } from "react";
import { useNavigate, useSearch } from "@tanstack/react-router";
import { useMutation, useQueryClient } from "@tanstack/react-query";
-import { enqueueProcessDealAdminDealsDealIdProcessPostMutation } from "@/lib/api/generated/@tanstack/react-query.gen";
+import {
+ enqueueProcessDealAdminDealsDealIdProcessPostMutation,
+ listQueueDealsAdminQueueDealsGetQueryKey,
+} from "@/lib/api/generated/@tanstack/react-query.gen";
import type { QueueRow, QueueStep, RunStatus, StageId } from "@/lib/api/adapters/queue";
import {
Sheet,
@@ -47,7 +50,7 @@ export function RunDetailSheet({ rows }: { rows: QueueRow[] }) {
const reRun = useMutation({
...enqueueProcessDealAdminDealsDealIdProcessPostMutation(),
onSuccess: () => {
- queryClient.invalidateQueries({ queryKey: ["listQueueDeals"] });
+ queryClient.invalidateQueries({ queryKey: listQueueDealsAdminQueueDealsGetQueryKey() });
},
});