Major features added by 5 parallel agent teams: - Semantic "Ask" (NL queries via FTS5 + embeddings + Claude synthesis) - Global search across drafts, ideas, authors, gaps - REST API expansion (14 endpoints, up from 3) with CSV/JSON export - Citation graph visualization (D3.js, 440 nodes, 2422 edges) - Standards readiness scoring (0-100 composite from 6 factors) - Side-by-side draft comparison view with shared/unique analysis - Annotation system (notes + tags per draft, DB-persisted) - Docker deployment (Dockerfile + docker-compose with Ollama) - Scheduled updates (cron script with log rotation) - Pipeline health dashboard (stage progress bars, cost tracking) - Test suite foundation (54 pytest tests covering DB, models, web data) Fixes: compare_drafts() stubbed→working, get_authors_for_draft() bug, source-aware analysis prompts, config env var overrides + validation, resilient batch error handling with --retry-failed, observatory --dry-run Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
39 lines
796 B
TOML
39 lines
796 B
TOML
[build-system]
|
|
requires = ["setuptools>=68.0"]
|
|
build-backend = "setuptools.build_meta"
|
|
|
|
[project]
|
|
name = "ietf-draft-analyzer"
|
|
version = "0.1.0"
|
|
description = "Track, categorize, and rate AI/agent-related IETF Internet-Drafts"
|
|
requires-python = ">=3.11"
|
|
dependencies = [
|
|
"click>=8.0",
|
|
"httpx>=0.27",
|
|
"anthropic>=0.40",
|
|
"ollama>=0.4",
|
|
"rich>=13.0",
|
|
"numpy>=1.26",
|
|
"python-dotenv>=1.0",
|
|
"plotly>=5.18",
|
|
"matplotlib>=3.8",
|
|
"seaborn>=0.13",
|
|
"scipy>=1.11",
|
|
"scikit-learn>=1.3",
|
|
"networkx>=3.2",
|
|
"markdown>=3.5",
|
|
"flask>=3.0",
|
|
]
|
|
|
|
[project.optional-dependencies]
|
|
test = ["pytest", "pytest-cov"]
|
|
|
|
[project.scripts]
|
|
ietf = "ietf_analyzer.cli:main"
|
|
|
|
[tool.setuptools.packages.find]
|
|
where = ["src"]
|
|
|
|
[tool.pytest.ini_options]
|
|
pythonpath = ["src"]
|