Platform upgrade: semantic search, citations, readiness, tests, Docker
Major features added by 5 parallel agent teams: - Semantic "Ask" (NL queries via FTS5 + embeddings + Claude synthesis) - Global search across drafts, ideas, authors, gaps - REST API expansion (14 endpoints, up from 3) with CSV/JSON export - Citation graph visualization (D3.js, 440 nodes, 2422 edges) - Standards readiness scoring (0-100 composite from 6 factors) - Side-by-side draft comparison view with shared/unique analysis - Annotation system (notes + tags per draft, DB-persisted) - Docker deployment (Dockerfile + docker-compose with Ollama) - Scheduled updates (cron script with log rotation) - Pipeline health dashboard (stage progress bars, cost tracking) - Test suite foundation (54 pytest tests covering DB, models, web data) Fixes: compare_drafts() stubbed→working, get_authors_for_draft() bug, source-aware analysis prompts, config env var overrides + validation, resilient batch error handling with --retry-failed, observatory --dry-run Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
168
tests/conftest.py
Normal file
168
tests/conftest.py
Normal file
@@ -0,0 +1,168 @@
|
||||
"""Shared fixtures for IETF Draft Analyzer tests."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sqlite3
|
||||
from datetime import datetime, timezone
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from ietf_analyzer.config import Config
|
||||
from ietf_analyzer.db import Database, SCHEMA
|
||||
from ietf_analyzer.models import Author, Draft, Rating
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def tmp_db(tmp_path):
|
||||
"""Create an in-memory Database with all tables initialized."""
|
||||
cfg = Config(
|
||||
data_dir=str(tmp_path),
|
||||
db_path=str(tmp_path / "test.db"),
|
||||
)
|
||||
db = Database(cfg)
|
||||
# Force connection + schema creation
|
||||
_ = db.conn
|
||||
yield db
|
||||
db.close()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_draft():
|
||||
"""Return a Draft object with realistic data."""
|
||||
return Draft(
|
||||
name="draft-test-ai-agent-protocol",
|
||||
rev="02",
|
||||
title="AI Agent Communication Protocol",
|
||||
abstract="This document defines a protocol for autonomous AI agents to communicate with each other in a standardized manner.",
|
||||
time="2025-06-15T12:00:00+00:00",
|
||||
dt_id=12345,
|
||||
pages=28,
|
||||
words=12000,
|
||||
group="dispatch",
|
||||
group_uri="/api/v1/group/group/1234/",
|
||||
expires="2025-12-15T12:00:00+00:00",
|
||||
ad=None,
|
||||
shepherd=None,
|
||||
states=["I-D Exists"],
|
||||
full_text="Internet-Draft: AI Agent Communication Protocol\n\nAbstract\n\nThis document defines...",
|
||||
categories=["A2A protocols", "Agent discovery/reg"],
|
||||
tags=["ai", "agent"],
|
||||
fetched_at="2025-06-20T10:00:00+00:00",
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_rating():
|
||||
"""Return a Rating object with realistic data."""
|
||||
return Rating(
|
||||
draft_name="draft-test-ai-agent-protocol",
|
||||
novelty=4,
|
||||
maturity=3,
|
||||
overlap=2,
|
||||
momentum=3,
|
||||
relevance=5,
|
||||
summary="Defines a novel protocol for AI agent communication with discovery and auth mechanisms.",
|
||||
novelty_note="Unique approach to agent handshake",
|
||||
maturity_note="Early stage but well-structured",
|
||||
overlap_note="Partially overlaps with MCP drafts",
|
||||
momentum_note="Active working group interest",
|
||||
relevance_note="Directly addresses core AI agent interop",
|
||||
categories=["A2A protocols", "Agent discovery/reg"],
|
||||
rated_at="2025-06-20T10:00:00+00:00",
|
||||
)
|
||||
|
||||
|
||||
def _make_draft(name, title, time, group=None, pages=10, categories=None):
|
||||
"""Helper to create Draft objects for seeding."""
|
||||
return Draft(
|
||||
name=name,
|
||||
rev="00",
|
||||
title=title,
|
||||
abstract=f"Abstract for {title}.",
|
||||
time=time,
|
||||
dt_id=None,
|
||||
pages=pages,
|
||||
words=pages * 400,
|
||||
group=group,
|
||||
categories=categories or [],
|
||||
fetched_at=datetime.now(timezone.utc).isoformat(),
|
||||
)
|
||||
|
||||
|
||||
def _make_rating(draft_name, novelty, maturity, overlap, momentum, relevance, categories=None):
|
||||
"""Helper to create Rating objects for seeding."""
|
||||
return Rating(
|
||||
draft_name=draft_name,
|
||||
novelty=novelty,
|
||||
maturity=maturity,
|
||||
overlap=overlap,
|
||||
momentum=momentum,
|
||||
relevance=relevance,
|
||||
summary=f"Summary for {draft_name}.",
|
||||
categories=categories or ["A2A protocols"],
|
||||
rated_at=datetime.now(timezone.utc).isoformat(),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def seeded_db(tmp_db):
|
||||
"""Populate tmp_db with 5 drafts, ratings, ideas, authors, and refs."""
|
||||
db = tmp_db
|
||||
|
||||
drafts = [
|
||||
_make_draft("draft-alpha-agent-comm", "Alpha Agent Communication", "2025-01-10", "dispatch", 20, ["A2A protocols"]),
|
||||
_make_draft("draft-beta-ml-traffic", "Beta ML Traffic Optimization", "2025-02-15", "netmod", 15, ["ML traffic mgmt"]),
|
||||
_make_draft("draft-gamma-agent-id", "Gamma Agent Identity", "2025-03-20", "secdispatch", 12, ["Agent identity/auth"]),
|
||||
_make_draft("draft-delta-safety", "Delta AI Safety Framework", "2025-04-25", None, 30, ["AI safety/alignment"]),
|
||||
_make_draft("draft-epsilon-discovery", "Epsilon Agent Discovery", "2025-05-30", "dispatch", 8, ["Agent discovery/reg"]),
|
||||
]
|
||||
for d in drafts:
|
||||
db.upsert_draft(d)
|
||||
|
||||
ratings = [
|
||||
_make_rating("draft-alpha-agent-comm", 4, 3, 2, 3, 5, ["A2A protocols"]),
|
||||
_make_rating("draft-beta-ml-traffic", 3, 4, 3, 2, 3, ["ML traffic mgmt"]),
|
||||
_make_rating("draft-gamma-agent-id", 5, 2, 1, 4, 4, ["Agent identity/auth"]),
|
||||
_make_rating("draft-delta-safety", 3, 3, 4, 3, 4, ["AI safety/alignment"]),
|
||||
_make_rating("draft-epsilon-discovery", 4, 2, 2, 5, 5, ["Agent discovery/reg"]),
|
||||
]
|
||||
for r in ratings:
|
||||
db.upsert_rating(r)
|
||||
|
||||
# Ideas
|
||||
db.insert_ideas("draft-alpha-agent-comm", [
|
||||
{"title": "Agent Handshake", "description": "Three-way handshake for agents", "type": "protocol"},
|
||||
{"title": "Capability Negotiation", "description": "Agents advertise capabilities", "type": "mechanism"},
|
||||
])
|
||||
db.insert_ideas("draft-beta-ml-traffic", [
|
||||
{"title": "ML Traffic Classifier", "description": "Classify traffic using ML", "type": "mechanism"},
|
||||
])
|
||||
db.insert_ideas("draft-gamma-agent-id", [
|
||||
{"title": "Agent Certificate", "description": "X.509 extension for agents", "type": "extension"},
|
||||
])
|
||||
|
||||
# Authors
|
||||
author1 = Author(person_id=1001, name="Alice Researcher", ascii_name="Alice Researcher",
|
||||
affiliation="ExampleCorp", fetched_at=datetime.now(timezone.utc).isoformat())
|
||||
author2 = Author(person_id=1002, name="Bob Engineer", ascii_name="Bob Engineer",
|
||||
affiliation="TestLabs", fetched_at=datetime.now(timezone.utc).isoformat())
|
||||
author3 = Author(person_id=1003, name="Carol Scientist", ascii_name="Carol Scientist",
|
||||
affiliation="ExampleCorp", fetched_at=datetime.now(timezone.utc).isoformat())
|
||||
for a in [author1, author2, author3]:
|
||||
db.upsert_author(a)
|
||||
|
||||
db.upsert_draft_author("draft-alpha-agent-comm", 1001, 1, "ExampleCorp")
|
||||
db.upsert_draft_author("draft-alpha-agent-comm", 1002, 2, "TestLabs")
|
||||
db.upsert_draft_author("draft-beta-ml-traffic", 1002, 1, "TestLabs")
|
||||
db.upsert_draft_author("draft-gamma-agent-id", 1001, 1, "ExampleCorp")
|
||||
db.upsert_draft_author("draft-gamma-agent-id", 1003, 2, "ExampleCorp")
|
||||
db.upsert_draft_author("draft-delta-safety", 1003, 1, "ExampleCorp")
|
||||
|
||||
# Refs
|
||||
db.insert_refs("draft-alpha-agent-comm", [("rfc", "8259"), ("rfc", "9110"), ("draft", "draft-ietf-httpbis")])
|
||||
db.insert_refs("draft-beta-ml-traffic", [("rfc", "8259"), ("bcp", "BCP14")])
|
||||
db.insert_refs("draft-gamma-agent-id", [("rfc", "5280"), ("rfc", "8259")])
|
||||
|
||||
yield db
|
||||
Reference in New Issue
Block a user