v0.3.0: Publication-ready release with blog site, paper update, and polish

Release prep:
- Version bump to 0.3.0 (pyproject.toml, cli.py)
- Rewrite README.md with current stats (475 drafts, 713 authors, 501 ideas)
- Add CONTRIBUTING.md with dev setup and code conventions

Blog site:
- Add scripts/build-site.py (markdown → HTML with clean CSS, dark mode, nav)
- Generate static site in docs/blog/ (10 pages)
- Ready for GitHub Pages deployment

Academic paper (paper/main.tex):
- Update all counts: 474→475 drafts, 557→710 authors, 1907→462 ideas, 11→12 gaps
- Add false-positive filtering methodology (113 excluded, 361 relevant)
- Add cross-org convergence analysis (132 ideas, 33% rate)
- Add GDPR compliance gap to gap table
- Add LLM-as-judge caveats to rating methodology and limitations
- Add FIPA, IEEE P3394, W3C WoT to related work with bibliography entries
- Fix safety ratio to show monthly variation (1.5:1 to 21:1)

Pipeline:
- Fetch 1 new draft (475 total), 3 new authors (713 total)
- Fix 16 ruff lint errors across test files
- All 106 tests pass

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-03-08 17:54:43 +01:00
parent e247bfef8f
commit 1ec1f69bee
34 changed files with 4268 additions and 272 deletions

285
scripts/build-site.py Normal file
View File

@@ -0,0 +1,285 @@
#!/usr/bin/env python3
"""Build static HTML blog site from markdown posts in data/reports/blog-series/."""
import re
from pathlib import Path
import markdown
ROOT = Path(__file__).resolve().parent.parent
POSTS_DIR = ROOT / "data" / "reports" / "blog-series"
OUT_DIR = ROOT / "docs" / "blog"
CSS_DIR = OUT_DIR / "css"
POSTS_OUT = OUT_DIR / "posts"
# Ordered list of posts to include
POSTS = [
("00-series-overview.md", "Series Overview"),
("01-gold-rush.md", "The Gold Rush"),
("02-who-writes-the-rules.md", "Who Writes the Rules"),
("03-oauth-wars.md", "The OAuth Wars"),
("04-what-nobody-builds.md", "What Nobody Builds"),
("05-1262-ideas.md", "Where Drafts Converge"),
("06-big-picture.md", "The Big Picture"),
("07-how-we-built-this.md", "How We Built This"),
("08-agents-building-the-analysis.md", "Agents Building the Agent Analysis"),
]
CSS = """\
:root {
--bg: #ffffff;
--text: #1a1a1a;
--muted: #6b7280;
--border: #e5e7eb;
--accent: #2563eb;
--code-bg: #f3f4f6;
}
@media (prefers-color-scheme: dark) {
:root {
--bg: #111827;
--text: #e5e7eb;
--muted: #9ca3af;
--border: #374151;
--accent: #60a5fa;
--code-bg: #1f2937;
}
}
* { margin: 0; padding: 0; box-sizing: border-box; }
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', system-ui, sans-serif;
color: var(--text);
background: var(--bg);
line-height: 1.7;
font-size: 17px;
}
.container {
max-width: 720px;
margin: 0 auto;
padding: 2rem 1.5rem;
}
nav {
border-bottom: 1px solid var(--border);
padding: 1rem 0;
margin-bottom: 2rem;
}
nav a {
color: var(--accent);
text-decoration: none;
margin-right: 1.5rem;
font-size: 0.9rem;
}
nav a:hover { text-decoration: underline; }
nav .site-title { font-weight: 700; font-size: 1.1rem; }
h1 { font-size: 2rem; margin: 1.5rem 0 1rem; line-height: 1.2; }
h2 { font-size: 1.5rem; margin: 2rem 0 0.75rem; }
h3 { font-size: 1.2rem; margin: 1.5rem 0 0.5rem; }
p { margin: 0.75rem 0; }
a { color: var(--accent); }
blockquote {
border-left: 3px solid var(--accent);
padding-left: 1rem;
color: var(--muted);
margin: 1rem 0;
}
code {
background: var(--code-bg);
padding: 0.15rem 0.4rem;
border-radius: 3px;
font-size: 0.9em;
}
pre {
background: var(--code-bg);
padding: 1rem;
border-radius: 6px;
overflow-x: auto;
margin: 1rem 0;
}
pre code { background: none; padding: 0; }
table {
width: 100%;
border-collapse: collapse;
margin: 1rem 0;
font-size: 0.95rem;
}
th, td {
padding: 0.5rem 0.75rem;
border: 1px solid var(--border);
text-align: left;
}
th { background: var(--code-bg); font-weight: 600; }
ul, ol { padding-left: 1.5rem; margin: 0.75rem 0; }
li { margin: 0.25rem 0; }
.post-nav {
display: flex;
justify-content: space-between;
margin-top: 3rem;
padding-top: 1rem;
border-top: 1px solid var(--border);
font-size: 0.9rem;
}
.post-list { list-style: none; padding: 0; }
.post-list li { margin: 1rem 0; }
.post-list a { font-size: 1.1rem; font-weight: 500; }
.post-list .desc { color: var(--muted); font-size: 0.9rem; }
footer {
margin-top: 3rem;
padding-top: 1rem;
border-top: 1px solid var(--border);
color: var(--muted);
font-size: 0.85rem;
}
"""
def slug(filename: str) -> str:
return filename.replace(".md", ".html")
def build_nav(current: str = "") -> str:
links = ['<a href="/blog/" class="site-title">IETF AI Agent Analysis</a>']
for fn, title in POSTS[1:]: # skip overview in nav
s = slug(fn)
if fn == current:
links.append(f"<strong>{title.split()[-1]}</strong>")
else:
links.append(f'<a href="/blog/posts/{s}">{title.split()[-1]}</a>')
return "<nav>" + "\n".join(links) + "</nav>"
def build_post_nav(idx: int) -> str:
parts = []
if idx > 0:
prev_fn, prev_title = POSTS[idx - 1]
parts.append(f'<a href="/blog/posts/{slug(prev_fn)}">&larr; {prev_title}</a>')
else:
parts.append("<span></span>")
if idx < len(POSTS) - 1:
next_fn, next_title = POSTS[idx + 1]
parts.append(f'<a href="/blog/posts/{slug(next_fn)}">{next_title} &rarr;</a>')
else:
parts.append("<span></span>")
return f'<div class="post-nav">{parts[0]}{parts[1]}</div>'
def wrap_html(title: str, body: str, nav: str, post_nav: str = "") -> str:
return f"""<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>{title} — IETF AI Agent Analysis</title>
<link rel="stylesheet" href="/blog/css/style.css">
</head>
<body>
<div class="container">
{nav}
{body}
{post_nav}
<footer>
<p>IETF Draft Analyzer &mdash; Data collected through March 2026.
<a href="https://github.com/cnennemann/ietf-draft-analyzer">Source on GitHub</a></p>
</footer>
</div>
</body>
</html>"""
def extract_title(md_text: str) -> str:
"""Extract first H1 from markdown."""
m = re.search(r"^#\s+(.+)$", md_text, re.MULTILINE)
return m.group(1) if m else "Untitled"
def main():
md_ext = markdown.Markdown(extensions=["tables", "fenced_code", "toc"])
# Create output dirs
CSS_DIR.mkdir(parents=True, exist_ok=True)
POSTS_OUT.mkdir(parents=True, exist_ok=True)
# Write CSS
(CSS_DIR / "style.css").write_text(CSS)
# Write .nojekyll
(OUT_DIR / ".nojekyll").write_text("")
# Build each post
for idx, (fn, fallback_title) in enumerate(POSTS):
src = POSTS_DIR / fn
if not src.exists():
print(f" SKIP {fn} (not found)")
continue
md_text = src.read_text()
md_ext.reset()
html_body = md_ext.convert(md_text)
title = extract_title(md_text) or fallback_title
nav = build_nav(fn)
post_nav = build_post_nav(idx)
full_html = wrap_html(title, html_body, nav, post_nav)
out_path = POSTS_OUT / slug(fn)
out_path.write_text(full_html)
print(f" BUILT {out_path.relative_to(ROOT)}")
# Build index page
post_links = []
for i, (fn, title) in enumerate(POSTS):
if i == 0:
continue # skip overview in index list
post_links.append(
f'<li><a href="/blog/posts/{slug(fn)}">Post {i}: {title}</a></li>'
)
index_body = f"""
<h1>The AI Agent Standards Gold Rush</h1>
<p><em>A data-driven analysis of {475} IETF Internet-Drafts on AI agents, autonomous systems, and machine learning protocols.</em></p>
<p>The IETF is experiencing an unprecedented surge in AI/agent standardization activity.
We built an automated analysis pipeline to make sense of it: {713} authors, {501} ideas,
{132} cross-organizational convergent ideas, and {12} identified gaps.</p>
<h2>The Series</h2>
<ul class="post-list">
{"".join(post_links)}
</ul>
<h2>About</h2>
<p>This analysis was produced using the <a href="https://github.com/cnennemann/ietf-draft-analyzer">IETF Draft Analyzer</a>,
an open-source Python tool that combines Claude for multi-dimensional rating and idea extraction
with Ollama for semantic embeddings. Total API cost: ~$9-15.</p>
<p><a href="/blog/posts/{slug(POSTS[0][0])}">Read the series overview &rarr;</a></p>
"""
index_html = wrap_html("Home", index_body, build_nav())
(OUT_DIR / "index.html").write_text(index_html)
print(f" BUILT docs/blog/index.html")
print(f"\nDone. {len(POSTS) + 1} pages built in docs/blog/")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,135 @@
#!/usr/bin/env python3
"""Compare Claude Haiku vs Ollama as pre-classifiers, using Claude Sonnet ratings as ground truth."""
import sqlite3
import hashlib
import json
import sys
import time
sys.path.insert(0, "src")
import anthropic
from ietf_analyzer.config import Config
cfg = Config.load()
conn = sqlite3.connect(cfg.db_path)
conn.row_factory = sqlite3.Row
HAIKU_PROMPT = """\
You are classifying IETF Internet-Drafts for an AI/agent standards tracker.
A draft is RELEVANT if it relates to ANY of these topics:
- AI agents, autonomous agents, multi-agent systems
- Agent identity, authentication, authorization, discovery
- Agent-to-agent (A2A) communication protocols
- Large language models (LLMs), generative AI
- Machine learning in network operations
- AI safety, alignment, trustworthiness
- Model Context Protocol (MCP), agentic workflows
- OAuth/JWT/credentials for agents or AI systems
- Autonomous network operations using AI
- Intelligent network management or traffic handling
A draft is NOT relevant if it only covers:
- Pure cryptography without AI/agent context
- General networking protocols (BGP, DNS, TLS) without AI
- Email, HTTP, or web standards without AI/agent features
- Remote attestation (RATS) unless specifically for AI agents
- Accessibility guidelines for user agents (browsers)
Title: {title}
Abstract: {abstract}
Is this draft relevant to AI agents or related topics? Answer ONLY "yes" or "no"."""
client = anthropic.Anthropic()
def haiku_classify(title, abstract):
"""Classify with Haiku, using llm_cache to avoid repeat calls."""
prompt = HAIKU_PROMPT.format(title=title, abstract=abstract[:2000])
cache_key = hashlib.sha256(f"haiku-classify:{prompt}".encode()).hexdigest()
# Check cache
cached = conn.execute("SELECT response_json FROM llm_cache WHERE prompt_hash=?", (cache_key,)).fetchone()
if cached:
return cached["response_json"].strip().lower().startswith("yes"), True
resp = client.messages.create(
model=cfg.claude_model_cheap,
max_tokens=10,
messages=[{"role": "user", "content": prompt}],
)
answer = resp.content[0].text.strip().lower()
# Cache it
conn.execute(
"INSERT OR REPLACE INTO llm_cache (draft_name, prompt_hash, request_json, response_json, model, input_tokens, output_tokens) VALUES (?,?,?,?,?,?,?)",
("_classify_", cache_key, prompt[:500], answer, cfg.claude_model_cheap, resp.usage.input_tokens, resp.usage.output_tokens),
)
conn.commit()
return answer.startswith("yes"), False
# Get all rated drafts
rows = conn.execute("""
SELECT d.name, d.title, d.abstract, r.relevance, r.false_positive
FROM drafts d JOIN ratings r ON d.name = r.draft_name
WHERE d.abstract IS NOT NULL AND d.abstract != ''
ORDER BY d.name
""").fetchall()
print(f"Classifying {len(rows)} drafts with Haiku...\n")
haiku_agree = 0
haiku_fp = [] # Haiku=yes, Claude=no
haiku_fn = [] # Haiku=no, Claude=yes
total_tokens_in = 0
total_tokens_out = 0
cached_count = 0
api_count = 0
for i, r in enumerate(rows):
claude_relevant = not r["false_positive"] and r["relevance"] >= 3
haiku_relevant, was_cached = haiku_classify(r["title"], r["abstract"])
if was_cached:
cached_count += 1
else:
api_count += 1
if api_count % 20 == 0:
time.sleep(1) # rate limit
if haiku_relevant == claude_relevant:
haiku_agree += 1
elif haiku_relevant and not claude_relevant:
haiku_fp.append({"name": r["name"], "title": r["title"][:60], "rel": r["relevance"], "fp": r["false_positive"]})
else:
haiku_fn.append({"name": r["name"], "title": r["title"][:60], "rel": r["relevance"], "fp": r["false_positive"]})
if (i + 1) % 50 == 0:
print(f" Processed {i+1}/{len(rows)} ({cached_count} cached, {api_count} API calls)...")
print(f"\n{'='*70}")
print(f"HAIKU AGREEMENT with Claude Sonnet: {haiku_agree}/{len(rows)} ({100*haiku_agree/len(rows):.1f}%)")
print(f"API calls: {api_count}, Cached: {cached_count}")
print(f"{'='*70}")
print(f"\nHaiku=RELEVANT but Sonnet=NOT ({len(haiku_fp)}):")
for d in haiku_fp[:10]:
fp = " [FP]" if d["fp"] else ""
print(f" rel={d['rel']}{fp} | {d['name']}: {d['title']}")
print(f"\nHaiku=IRRELEVANT but Sonnet=RELEVANT ({len(haiku_fn)}):")
for d in haiku_fn[:10]:
print(f" rel={d['rel']} | {d['name']}: {d['title']}")
# Cost estimate
avg_tokens_per_call = 800 # ~800 input tokens per classification
cost_per_draft = (avg_tokens_per_call * 0.80 + 50 * 4.0) / 1_000_000 # Haiku pricing
print(f"\n{'='*70}")
print(f"Cost estimate: ~${cost_per_draft:.5f}/draft = ~${cost_per_draft * len(rows):.3f} for {len(rows)} drafts")
print(f"Ollama cost: $0 (but 66.9% agreement)")
print(f"Haiku cost: ~${cost_per_draft * len(rows):.3f} ({100*haiku_agree/len(rows):.1f}% agreement)")
conn.close()

View File

@@ -0,0 +1,73 @@
#!/usr/bin/env python3
"""Fetch from all 5 sources and import into DB."""
import sys
sys.path.insert(0, "src")
from ietf_analyzer.config import Config
from ietf_analyzer.sources import FETCHERS, get_fetcher
from ietf_analyzer.db import Database
from ietf_analyzer.models import Draft
from rich.console import Console
console = Console()
cfg = Config.load()
db = Database(cfg)
# Only fetch from new sources (IETF and W3C already done recently)
sources_to_fetch = ["etsi", "itu", "iso"]
total_new = 0
for source_name in sources_to_fetch:
console.print(f"\n[bold blue]{'='*60}[/]")
console.print(f"[bold blue]Fetching from {source_name.upper()}...[/]")
console.print(f"[bold blue]{'='*60}[/]")
fetcher = get_fetcher(source_name, cfg)
try:
docs = fetcher.search(cfg.search_keywords)
console.print(f" Found {len(docs)} documents")
new_count = 0
for doc in docs:
existing = db.get_draft(doc.name)
if existing:
continue
new_count += 1
# Convert to Draft
draft = Draft(
name=doc.name,
rev="01",
title=doc.title,
abstract=doc.abstract,
source=doc.source,
source_id=doc.source_id,
source_url=doc.source_url,
time=doc.time,
doc_status=doc.doc_status,
full_text=doc.full_text,
)
db.upsert_draft(draft)
console.print(f" [green]Imported {new_count} new documents[/]")
total_new += new_count
except Exception as e:
console.print(f" [red]Error: {e}[/]")
import traceback
traceback.print_exc()
finally:
fetcher.close()
console.print(f"\n[bold green]Total new documents: {total_new}[/]")
# Final stats
import sqlite3
conn = sqlite3.connect(cfg.db_path)
rows = conn.execute("SELECT source, COUNT(*) FROM drafts GROUP BY source ORDER BY source").fetchall()
console.print("\n[bold]Database by source:[/]")
for source, count in rows:
console.print(f" {source}: {count}")
total = conn.execute("SELECT COUNT(*) FROM drafts").fetchone()[0]
console.print(f" [bold]Total: {total}[/]")
conn.close()