Files
ietf-draft-analyzer/workspace/packages/act/bench/bench_act.py
Christian Nennemann 3a139dfc7e feat: ACT/ECT strategy, package restructure, draft -01/-02 prep
Strategic work for IETF submission of draft-nennemann-act-01 and
draft-nennemann-wimse-ect-02:

Package restructure:
- move ACT and ECT refimpls to workspace/packages/{act,ect}/
- ietf-act and ietf-ect distribution names (sibling packages)
- cross-spec interop test plan (INTEROP-TEST-PLAN.md)

ACT draft -01 revisions:
- rename 'par' claim to 'pred' (align with ECT)
- rename 'Agent Compact Token' to 'Agent Context Token' (semantic
  alignment with ECT family)
- add Applicability section (MCP, OpenAI, LangGraph, A2A, CrewAI)
- add DAG vs Linear Delegation Chains section (differentiator vs
  txn-tokens-for-agents actchain, Agentic JWT, AIP/IBCTs)
- add Related Work: AIP, SentinelAgent, Agentic JWT, txn-tokens-for-agents,
  HDP, SCITT-AI-agent-execution
- pin SCITT arch to -22, note AUTH48 status

Outreach drafts:
- Emirdag liaison email (SCITT-AI coordination)
- OAuth ML response on txn-tokens-for-agents-06

Strategy document:
- STRATEGY.md with phased action plan, risk register, timeline

Submodule:
- update workspace/drafts/ietf-wimse-ect pointer to -02 commit
2026-04-12 07:33:08 +02:00

175 lines
5.1 KiB
Python

"""ACT performance benchmarks.
Measures Phase 1 creation time (construct + sign + encode) against
the 500µs target from the specification.
"""
import time
import uuid
import statistics
from act import (
ACTMandate,
ACTRecord,
Capability,
TaskClaim,
encode_jws,
decode_jws,
generate_ed25519_keypair,
generate_p256_keypair,
sign,
verify,
transition_to_record,
)
def bench_phase1_ed25519(n: int = 10000) -> None:
"""Benchmark Phase 1 creation with Ed25519."""
priv, pub = generate_ed25519_keypair()
# Warmup
for _ in range(100):
m = ACTMandate(
alg="EdDSA", kid="k", iss="a", sub="b", aud="b",
iat=1772064000, exp=1772064900, jti=str(uuid.uuid4()),
task=TaskClaim(purpose="t"), cap=[Capability(action="x.y")],
)
sig = sign(priv, m.signing_input())
encode_jws(m, sig)
times = []
for _ in range(n):
start = time.perf_counter()
m = ACTMandate(
alg="EdDSA", kid="k", iss="a", sub="b", aud="b",
iat=1772064000, exp=1772064900, jti=str(uuid.uuid4()),
task=TaskClaim(purpose="benchmark"),
cap=[Capability(action="read.data")],
)
sig = sign(priv, m.signing_input())
encode_jws(m, sig)
elapsed = time.perf_counter() - start
times.append(elapsed * 1_000_000) # µs
mean = statistics.mean(times)
median = statistics.median(times)
p99 = sorted(times)[int(n * 0.99)]
print(f"Phase 1 Ed25519 (n={n}):")
print(f" Mean: {mean:.1f} µs")
print(f" Median: {median:.1f} µs")
print(f" P99: {p99:.1f} µs")
print(f" Target: <= 500 µs {'PASS' if mean <= 500 else 'FAIL'}")
print()
def bench_phase1_p256(n: int = 5000) -> None:
"""Benchmark Phase 1 creation with P-256."""
priv, pub = generate_p256_keypair()
for _ in range(50):
m = ACTMandate(
alg="ES256", kid="k", iss="a", sub="b", aud="b",
iat=1772064000, exp=1772064900, jti=str(uuid.uuid4()),
task=TaskClaim(purpose="t"), cap=[Capability(action="x.y")],
)
sig = sign(priv, m.signing_input())
encode_jws(m, sig)
times = []
for _ in range(n):
start = time.perf_counter()
m = ACTMandate(
alg="ES256", kid="k", iss="a", sub="b", aud="b",
iat=1772064000, exp=1772064900, jti=str(uuid.uuid4()),
task=TaskClaim(purpose="benchmark"),
cap=[Capability(action="read.data")],
)
sig = sign(priv, m.signing_input())
encode_jws(m, sig)
elapsed = time.perf_counter() - start
times.append(elapsed * 1_000_000)
mean = statistics.mean(times)
median = statistics.median(times)
p99 = sorted(times)[int(n * 0.99)]
print(f"Phase 1 ES256 (n={n}):")
print(f" Mean: {mean:.1f} µs")
print(f" Median: {median:.1f} µs")
print(f" P99: {p99:.1f} µs")
print()
def bench_phase2_transition(n: int = 5000) -> None:
"""Benchmark Phase 1 -> Phase 2 transition."""
iss_priv, _ = generate_ed25519_keypair()
sub_priv, _ = generate_ed25519_keypair()
mandate = ACTMandate(
alg="EdDSA", kid="k", iss="a", sub="b", aud="b",
iat=1772064000, exp=1772064900, jti=str(uuid.uuid4()),
task=TaskClaim(purpose="t"), cap=[Capability(action="x.y")],
)
# Warmup
for _ in range(50):
transition_to_record(
mandate, sub_kid="sk", sub_private_key=sub_priv,
exec_act="x.y", pred=[], status="completed",
)
times = []
for _ in range(n):
start = time.perf_counter()
transition_to_record(
mandate, sub_kid="sk", sub_private_key=sub_priv,
exec_act="x.y", pred=[], status="completed",
)
elapsed = time.perf_counter() - start
times.append(elapsed * 1_000_000)
mean = statistics.mean(times)
median = statistics.median(times)
print(f"Phase 2 Transition (n={n}):")
print(f" Mean: {mean:.1f} µs")
print(f" Median: {median:.1f} µs")
print()
def bench_verify(n: int = 5000) -> None:
"""Benchmark JWS decode + verify."""
priv, pub = generate_ed25519_keypair()
m = ACTMandate(
alg="EdDSA", kid="k", iss="a", sub="b", aud="b",
iat=1772064000, exp=1772064900, jti=str(uuid.uuid4()),
task=TaskClaim(purpose="t"), cap=[Capability(action="x.y")],
)
sig = sign(priv, m.signing_input())
compact = encode_jws(m, sig)
# Warmup
for _ in range(50):
_, _, s, si = decode_jws(compact)
verify(pub, s, si)
times = []
for _ in range(n):
start = time.perf_counter()
_, _, s, si = decode_jws(compact)
verify(pub, s, si)
elapsed = time.perf_counter() - start
times.append(elapsed * 1_000_000)
mean = statistics.mean(times)
median = statistics.median(times)
print(f"Decode + Verify (n={n}):")
print(f" Mean: {mean:.1f} µs")
print(f" Median: {median:.1f} µs")
print()
if __name__ == "__main__":
bench_phase1_ed25519()
bench_phase1_p256()
bench_phase2_transition()
bench_verify()