feat: add memory, convergence, colette bridge, templates, progress, effectiveness, git integration

- skills/memory: cross-run learning from recurring findings + lib/archeflow-memory.sh
- skills/convergence: oscillation detection + early termination in multi-cycle runs
- skills/colette-bridge: auto-inject voice profiles, personas, characters from colette.yaml
- skills/templates: workflow/team/archetype gallery with init/save/share
- skills/progress: live .archeflow/progress.md during runs
- skills/effectiveness: per-archetype signal-to-noise + cost efficiency scoring
- skills/git-integration: auto-branch per run, commit per phase, rollback support
This commit is contained in:
2026-04-03 11:40:04 +02:00
parent b6df3d19fd
commit 19f8f76232
8 changed files with 2243 additions and 0 deletions

423
lib/archeflow-memory.sh Executable file
View File

@@ -0,0 +1,423 @@
#!/usr/bin/env bash
# archeflow-memory.sh — Cross-run memory for ArcheFlow orchestrations.
#
# Extracts lessons from completed runs, injects known issues into agent prompts,
# and manages lesson lifecycle (add, list, decay, forget).
#
# Usage:
# ./lib/archeflow-memory.sh extract <events.jsonl> # Extract lessons from a completed run
# ./lib/archeflow-memory.sh inject <domain> <archetype> # Output relevant lessons for injection
# ./lib/archeflow-memory.sh add <type> <description> # Manually add a lesson
# ./lib/archeflow-memory.sh list # List all active lessons
# ./lib/archeflow-memory.sh decay # Apply decay to all lessons
# ./lib/archeflow-memory.sh forget <id> # Archive a lesson by ID
#
# Dependencies: jq, bash 4+
set -euo pipefail
MEMORY_DIR=".archeflow/memory"
LESSONS_FILE="${MEMORY_DIR}/lessons.jsonl"
ARCHIVE_FILE="${MEMORY_DIR}/archive.jsonl"
# --- Helpers ---
ensure_dir() {
mkdir -p "$MEMORY_DIR"
}
next_id() {
if [[ ! -f "$LESSONS_FILE" ]]; then
echo "m-001"
return
fi
local max_num
max_num=$(jq -r '.id // ""' "$LESSONS_FILE" 2>/dev/null \
| sed 's/^m-//' \
| sort -n \
| tail -1)
if [[ -z "$max_num" || "$max_num" == "null" ]]; then
echo "m-001"
else
printf "m-%03d" $(( 10#$max_num + 1 ))
fi
}
now_ts() {
date -u +%Y-%m-%dT%H:%M:%SZ
}
# Tokenize a description into sorted unique lowercase keywords (min 3 chars)
tokenize() {
echo "$1" | tr '[:upper:]' '[:lower:]' | tr -cs '[:alnum:]' '\n' | awk 'length >= 3' | sort -u
}
# Calculate keyword overlap ratio between two descriptions
# Returns a value 0-100 (percentage)
keyword_overlap() {
local desc_a="$1"
local desc_b="$2"
local tokens_a tokens_b common total_a
tokens_a=$(tokenize "$desc_a")
tokens_b=$(tokenize "$desc_b")
if [[ -z "$tokens_a" || -z "$tokens_b" ]]; then
echo "0"
return
fi
total_a=$(echo "$tokens_a" | wc -l)
common=$(comm -12 <(echo "$tokens_a") <(echo "$tokens_b") | wc -l)
if [[ "$total_a" -eq 0 ]]; then
echo "0"
else
echo $(( common * 100 / total_a ))
fi
}
# --- Commands ---
cmd_extract() {
local events_file="$1"
if [[ ! -f "$events_file" ]]; then
echo "Error: events file not found: $events_file" >&2
exit 1
fi
ensure_dir
# Extract run_id from the first event
local run_id
run_id=$(jq -r '.run_id' "$events_file" | head -1)
# Extract all findings from review.verdict events
local findings
findings=$(jq -c '
select(.type == "review.verdict") |
.data as $d |
($d.findings // [])[] |
{
source: ($d.archetype // "unknown"),
severity: .severity,
description: .description,
category: (.category // "general")
}
' "$events_file" 2>/dev/null || true)
if [[ -z "$findings" ]]; then
echo "[archeflow-memory] No findings to extract from $events_file" >&2
return 0
fi
local updated=0
local added=0
# Process each finding
while IFS= read -r finding; do
local desc source severity category
desc=$(echo "$finding" | jq -r '.description')
source=$(echo "$finding" | jq -r '.source')
severity=$(echo "$finding" | jq -r '.severity')
category=$(echo "$finding" | jq -r '.category')
# Skip INFO-level findings for auto-extraction
if [[ "$severity" == "info" || "$severity" == "recommendation" ]]; then
continue
fi
# Check against existing lessons
local matched=false
if [[ -f "$LESSONS_FILE" ]]; then
while IFS= read -r lesson; do
local lesson_desc lesson_id overlap
lesson_desc=$(echo "$lesson" | jq -r '.description')
lesson_id=$(echo "$lesson" | jq -r '.id')
overlap=$(keyword_overlap "$desc" "$lesson_desc")
if [[ "$overlap" -ge 50 ]]; then
# Match found — update existing lesson
local tmp_file="${LESSONS_FILE}.tmp"
jq -c "
if .id == \"$lesson_id\" then
.frequency += 1 |
.ts = \"$(now_ts)\" |
.last_seen_run = \"$run_id\" |
.runs_since_last_seen = 0
else . end
" "$LESSONS_FILE" > "$tmp_file"
mv "$tmp_file" "$LESSONS_FILE"
matched=true
updated=$((updated + 1))
echo "[archeflow-memory] Updated lesson $lesson_id (freq +1): $lesson_desc" >&2
break
fi
done < "$LESSONS_FILE"
fi
if [[ "$matched" == "false" ]]; then
# New finding — add as candidate (frequency=1)
local new_id
new_id=$(next_id)
local tags
tags=$(echo "$desc" | tr '[:upper:]' '[:lower:]' | tr -cs '[:alnum:]' '\n' | awk 'length >= 4' | head -5 | jq -R . | jq -sc .)
jq -cn \
--arg id "$new_id" \
--arg ts "$(now_ts)" \
--arg run_id "$run_id" \
--arg source "$source" \
--arg desc "$desc" \
--arg severity "$severity" \
--arg category "$category" \
--argjson tags "$tags" \
'{
id: $id,
ts: $ts,
run_id: $run_id,
type: "pattern",
source: $source,
description: $desc,
frequency: 1,
severity: $severity,
domain: $category,
tags: $tags,
archetype: null,
last_seen_run: $run_id,
runs_since_last_seen: 0
}' >> "$LESSONS_FILE"
added=$((added + 1))
echo "[archeflow-memory] Added candidate lesson $new_id: $desc" >&2
fi
done <<< "$findings"
echo "[archeflow-memory] Extract complete: $updated updated, $added new candidates" >&2
}
cmd_inject() {
local domain="${1:-}"
local archetype="${2:-}"
if [[ ! -f "$LESSONS_FILE" ]]; then
return 0
fi
# Build jq filter for relevant lessons
# Rules:
# - frequency >= 2 for patterns/archetype_hints/anti_patterns
# - frequency >= 1 for preferences (always injected)
# - frequency >= 5 always injected (universal)
# - Filter by domain (match or "general") and archetype (if provided)
# - Sort by frequency desc, cap at 10
local lessons
lessons=$(jq -c "
select(
(.type == \"preference\") or
(.frequency >= 5) or
(
(.frequency >= 2) and
(
(\"$domain\" == \"\") or
(.domain == \"$domain\") or
(.domain == \"general\")
) and
(
(\"$archetype\" == \"\") or
(.archetype == null) or
(.archetype == \"$archetype\")
)
)
)
" "$LESSONS_FILE" 2>/dev/null | jq -sc 'sort_by(-.frequency) | .[:10][]' 2>/dev/null || true)
if [[ -z "$lessons" ]]; then
return 0
fi
echo "## Known Issues (from past runs)"
while IFS= read -r lesson; do
local desc freq src
desc=$(echo "$lesson" | jq -r '.description')
freq=$(echo "$lesson" | jq -r '.frequency')
src=$(echo "$lesson" | jq -r '.source')
echo "- ${desc} [seen ${freq}x, ${src}]"
done <<< "$lessons"
}
cmd_add() {
local type="${1:-preference}"
local desc="${2:-}"
if [[ -z "$desc" ]]; then
echo "Usage: $0 add <type> <description>" >&2
echo "Types: pattern, preference, archetype_hint, anti_pattern" >&2
exit 1
fi
ensure_dir
local new_id
new_id=$(next_id)
local tags
tags=$(echo "$desc" | tr '[:upper:]' '[:lower:]' | tr -cs '[:alnum:]' '\n' | awk 'length >= 4' | head -5 | jq -R . | jq -sc .)
jq -cn \
--arg id "$new_id" \
--arg ts "$(now_ts)" \
--arg type "$type" \
--arg desc "$desc" \
--argjson tags "$tags" \
'{
id: $id,
ts: $ts,
run_id: "manual",
type: $type,
source: "user_feedback",
description: $desc,
frequency: 1,
severity: "info",
domain: "general",
tags: $tags,
archetype: null,
last_seen_run: "",
runs_since_last_seen: 0
}' >> "$LESSONS_FILE"
echo "[archeflow-memory] Added lesson $new_id ($type): $desc" >&2
}
cmd_list() {
if [[ ! -f "$LESSONS_FILE" ]]; then
echo "No lessons stored yet." >&2
return 0
fi
printf "%-8s %-5s %-16s %-8s %s\n" "ID" "Freq" "Type" "Domain" "Description"
printf "%-8s %-5s %-16s %-8s %s\n" "----" "----" "----" "------" "-----------"
jq -r '[.id, (.frequency|tostring), .type, .domain, .description] | @tsv' "$LESSONS_FILE" \
| while IFS=$'\t' read -r id freq type domain desc; do
printf "%-8s %-5s %-16s %-8s %s\n" "$id" "$freq" "$type" "$domain" "$desc"
done
}
cmd_decay() {
if [[ ! -f "$LESSONS_FILE" ]]; then
return 0
fi
ensure_dir
local tmp_file="${LESSONS_FILE}.tmp"
local archived=0
local decayed=0
# Process each lesson
> "$tmp_file"
while IFS= read -r lesson; do
local runs_since freq id
runs_since=$(echo "$lesson" | jq -r '.runs_since_last_seen')
freq=$(echo "$lesson" | jq -r '.frequency')
id=$(echo "$lesson" | jq -r '.id')
# Increment runs_since_last_seen
runs_since=$((runs_since + 1))
if [[ "$runs_since" -ge 10 ]]; then
freq=$((freq - 1))
runs_since=0
decayed=$((decayed + 1))
if [[ "$freq" -le 0 ]]; then
# Archive the lesson
echo "$lesson" | jq -c '.frequency = 0 | .ts = "'"$(now_ts)"'"' >> "$ARCHIVE_FILE"
archived=$((archived + 1))
echo "[archeflow-memory] Archived lesson $id (frequency reached 0)" >&2
continue
fi
fi
echo "$lesson" | jq -c \
--argjson freq "$freq" \
--argjson runs_since "$runs_since" \
'.frequency = $freq | .runs_since_last_seen = $runs_since' >> "$tmp_file"
done < "$LESSONS_FILE"
mv "$tmp_file" "$LESSONS_FILE"
echo "[archeflow-memory] Decay complete: $decayed decayed, $archived archived" >&2
}
cmd_forget() {
local target_id="$1"
if [[ ! -f "$LESSONS_FILE" ]]; then
echo "No lessons file found." >&2
exit 1
fi
ensure_dir
# Check if the lesson exists
if ! jq -e "select(.id == \"$target_id\")" "$LESSONS_FILE" > /dev/null 2>&1; then
echo "Error: lesson $target_id not found." >&2
exit 1
fi
# Archive the lesson
jq -c "select(.id == \"$target_id\")" "$LESSONS_FILE" >> "$ARCHIVE_FILE"
# Remove from lessons
local tmp_file="${LESSONS_FILE}.tmp"
jq -c "select(.id != \"$target_id\")" "$LESSONS_FILE" > "$tmp_file"
mv "$tmp_file" "$LESSONS_FILE"
echo "[archeflow-memory] Forgot lesson $target_id (moved to archive)" >&2
}
# --- Main ---
if [[ $# -lt 1 ]]; then
echo "Usage: $0 <command> [args...]" >&2
echo "" >&2
echo "Commands:" >&2
echo " extract <events.jsonl> Extract lessons from a completed run" >&2
echo " inject <domain> <archetype> Output relevant lessons for injection" >&2
echo " add <type> <description> Manually add a lesson" >&2
echo " list List all active lessons" >&2
echo " decay Apply decay to all lessons" >&2
echo " forget <id> Archive a lesson by ID" >&2
exit 1
fi
COMMAND="$1"
shift
case "$COMMAND" in
extract)
[[ $# -lt 1 ]] && { echo "Usage: $0 extract <events.jsonl>" >&2; exit 1; }
cmd_extract "$1"
;;
inject)
cmd_inject "${1:-}" "${2:-}"
;;
add)
[[ $# -lt 2 ]] && { echo "Usage: $0 add <type> <description>" >&2; exit 1; }
cmd_add "$1" "$2"
;;
list)
cmd_list
;;
decay)
cmd_decay
;;
forget)
[[ $# -lt 1 ]] && { echo "Usage: $0 forget <id>" >&2; exit 1; }
cmd_forget "$1"
;;
*)
echo "Unknown command: $COMMAND" >&2
exit 1
;;
esac

View File

@@ -0,0 +1,392 @@
---
name: colette-bridge
description: |
Bridges ArcheFlow with the Colette writing platform. Auto-detects colette.yaml in the project
root, resolves voice profiles, personas, and character sheets, then builds a summarized context
bundle that gets injected into every agent prompt via artifact routing. Eliminates manual
copy-pasting of writing context into agent prompts.
<example>Automatically loaded when colette.yaml is detected at run.start</example>
<example>User: "archeflow:run" in a project with colette.yaml</example>
---
# Colette Bridge — Writing Context Auto-Loader
When ArcheFlow detects `colette.yaml` in the project root, this skill automatically loads voice profiles, personas, character sheets, and project rules into a context bundle that every agent receives (filtered by archetype role).
## Prerequisites
- `archeflow:domains` — Colette Bridge sets domain to `writing` automatically
- `archeflow:artifact-routing` — bundle is injected via the artifact routing system
- `archeflow:run` — bridge hooks into run initialization
## Trigger
At `run.start`, after domain detection but before the Plan phase:
1. Check if `colette.yaml` exists in the project root
2. If found, activate Colette Bridge
3. If not found, skip silently (no error, no warning)
When the bridge activates, it emits a decision event:
```bash
./lib/archeflow-event.sh "$RUN_ID" decision init "" \
'{"what":"colette_bridge","chosen":"activated","signal":"colette.yaml found","files_resolved":<count>}'
```
---
## File Resolution
Colette projects reference files by ID (e.g., `vp-giesing-gschichten-v1`) but the actual YAML files may live in different locations. The bridge resolves files using this search order:
### Search Priority (highest first)
| Priority | Location | Example |
|----------|----------|---------|
| 1 | Explicit path in `colette.yaml` | `voice.profile: ../writing.colette/profiles/custom.yaml` |
| 2 | Project root subdirectories | `./profiles/vp-giesing-gschichten-v1.yaml` |
| 3 | Parent directory + `writing.colette/` | `../writing.colette/profiles/vp-giesing-gschichten-v1.yaml` |
### What Gets Resolved
| Source | colette.yaml field | Search paths |
|--------|-------------------|-------------|
| Voice profile | `voice.profile` | `profiles/<id>.yaml`, `../writing.colette/profiles/<id>.yaml` |
| Persona | `writing.persona` or inferred from profile | `personas/<id>.yaml`, `../writing.colette/personas/<id>.yaml` |
| Characters | Auto-discovered | `characters/*.yaml` |
| Series config | `series` section (if present) | `colette.yaml` itself, `../writing.colette/series/<name>.yaml` |
| Project rules | Always | `CLAUDE.md` in project root |
### Resolution Procedure
```
for each reference in colette.yaml:
1. If the field contains a path (has / or .yaml) → use as-is, verify exists
2. If the field contains an ID (e.g., "vp-giesing-gschichten-v1"):
a. Check ./profiles/<id>.yaml (or ./personas/<id>.yaml)
b. Check ../writing.colette/profiles/<id>.yaml (or ../writing.colette/personas/<id>.yaml)
c. If not found → warn in event log, skip this file
3. For characters/ → glob characters/*.yaml in project root
4. For CLAUDE.md → check project root
```
If a referenced file cannot be found at any location, emit a warning event but do not abort:
```bash
./lib/archeflow-event.sh "$RUN_ID" decision init "" \
'{"what":"colette_bridge_warning","chosen":"skip","file":"vp-giesing-gschichten-v1","reason":"not found in any search path"}'
```
---
## Context Bundle
The bridge generates `.archeflow/context/colette-bundle.md` — a summarized, token-efficient Markdown file that agents receive as part of their prompt context.
### Bundle Structure
```markdown
# Writing Context (auto-loaded from Colette)
## Voice Profile: <id>
**Tone:** <tone_summary from meta>
**Perspective:** <perspektive>
**Density:** <dichte>
**Attitude:** <haltung>
**Sharpness:** <schaerfe>
**Humor:** <humor>
**Tempo:** <tempo>
**Reader relationship:** <leser_beziehung>
### Forbidden
- <each item from verboten>
### Allowed
- <each item from erlaubt>
### Style models
- <each item from vorbilder, name only + one-word tag>
## Persona: <id>
**Name:** <name>
**Bio:** <bio, max 2 sentences>
**Genres:** <genres, comma-separated>
### Rules
- <each item from rules>
## Characters
### <name> (<role>)
- **Age:** <age>
- **Key traits:** <first 3 personality items>
- **Speech:** <speech_pattern, first sentence only>
- **Relationships:** <key relationships, one line each>
[Repeated for each character in characters/*.yaml]
## Series Context
[Only if series config found in colette.yaml]
- **Shared concepts:** <list>
- **Glossary:** <key terms>
- **Forbidden cross-story:** <items>
## Project Rules (from CLAUDE.md)
[Key writing rules extracted from CLAUDE.md, summarized as bullet points]
- <rule 1>
- <rule 2>
- ...
```
### Summarization Rules
The bundle is **summarized**, not a raw YAML dump. This reduces token cost:
- Voice profile dimensions: key name + value (no YAML formatting, no `dimensionen:` wrapper)
- Verboten/erlaubt: bullet list, strip explanation after the dash if over 15 words
- Characters: name, role, age, top 3 traits, first sentence of speech pattern, relationships
- Persona bio: max 2 sentences
- CLAUDE.md: extract only rules/style sections, skip meta/git/cost config
- Target: bundle should be under 1500 tokens for a typical project
---
## Caching
The bundle is regenerated only when source files have changed. Cache validation uses file modification times.
### Cache Check Procedure
```
bundle_path = .archeflow/context/colette-bundle.md
if bundle_path does not exist → generate
if bundle_path exists:
bundle_mtime = mtime of bundle_path
for each resolved source file:
if source_mtime > bundle_mtime → regenerate, break
if no source file is newer → use cached bundle
```
When the cache is valid, emit:
```bash
./lib/archeflow-event.sh "$RUN_ID" decision init "" \
'{"what":"colette_bundle_cache","chosen":"reuse","reason":"all sources older than bundle"}'
```
When regenerating:
```bash
./lib/archeflow-event.sh "$RUN_ID" decision init "" \
'{"what":"colette_bundle_cache","chosen":"regenerate","reason":"<file> modified since last bundle"}'
```
---
## Per-Agent Attention Filters
Not every agent needs the full bundle. The bridge defines attention filters that control which sections each archetype receives. This extends the base attention filters from `archeflow:attention-filters`.
| Archetype | Bundle sections injected | Rationale |
|-----------|------------------------|-----------|
| **Explorer** | Full bundle | Needs all context for research — setting, characters, voice, rules |
| **Creator** | Voice dimensions + persona rules + characters | Designs outline — needs to know who speaks how, who exists, what's allowed |
| **Maker** | Full bundle | Writes prose — needs voice for style, characters for dialogue, rules for guardrails |
| **Guardian** | Characters + series shared_concepts | Checks consistency — needs character facts and cross-story constraints |
| **Sage** | Voice profile (full, including verboten/erlaubt) + persona rules | Checks voice drift — needs the complete voice spec and persona constraints |
| **Trickster** | Characters + series glossary | Tests continuity — needs character facts and terminology for contradiction checks |
### Filter Implementation
When injecting the bundle into an agent prompt, extract only the relevant sections:
```
# For Guardian:
Extract: "## Characters" section (all characters)
Extract: "## Series Context" section (if present)
Skip: everything else
# For Sage:
Extract: "## Voice Profile" section (full, with forbidden/allowed)
Extract: "## Persona" section (rules subsection)
Skip: characters, series, project rules
# For Explorer and Maker:
Inject: full bundle as-is
```
The filtering happens at prompt assembly time, not at bundle generation time. One bundle, multiple filtered views.
### Custom Archetypes
Custom archetypes (e.g., `story-explorer`, `story-sage`) inherit the filter of their closest base archetype:
| Custom archetype | Inherits filter from | Override |
|-----------------|---------------------|----------|
| `story-explorer` | Explorer | Full bundle |
| `story-sage` | Sage | Full voice profile + persona rules |
| `story-guardian` | Guardian | Characters + series |
If a custom archetype needs a different filter, define it in the archetype's markdown frontmatter:
```yaml
---
name: story-sage
colette_filter: [voice_profile, persona, characters]
---
```
The `colette_filter` field accepts section keys: `voice_profile`, `persona`, `characters`, `series`, `project_rules`, `full`.
---
## Integration with Run Skill
The Colette Bridge hooks into `archeflow:run` initialization. The sequence is:
```
run.start
├── Domain detection (from archeflow:domains)
│ └── colette.yaml found → domain = writing
├── Colette Bridge activation
│ ├── Resolve files (voice profile, persona, characters, CLAUDE.md)
│ ├── Check bundle cache
│ ├── Generate/refresh bundle → .archeflow/context/colette-bundle.md
│ └── Register bundle path in artifact routing
└── Continue to Plan phase
```
### Artifact Routing Registration
The bundle path is registered so that every phase's context injection includes the (filtered) bundle:
```
artifact_routing.register_context(
path = ".archeflow/context/colette-bundle.md",
inject_at = "all_phases",
filter_by = "archetype" # Apply per-agent attention filters
)
```
In practice, this means the run skill prepends the filtered bundle content to each agent's prompt, after the standard task description but before phase-specific artifacts.
### Prompt Injection Order
```
1. Archetype definition (from SKILL.md or custom archetype .md)
2. Domain-specific review focus (from archeflow:domains)
3. Colette bundle (filtered for this archetype)
4. Task description
5. Phase-specific artifacts (Explorer output, Creator proposal, etc.)
6. Cycle feedback (if cycle 2+)
```
---
## Example: Giesing Gschichten
Given this `colette.yaml`:
```yaml
project:
name: "Giesing Gschichten"
author: "C. Nennemann"
language: de
type: fiction
voice:
profile: vp-giesing-gschichten-v1
writing:
target_words: 6000
style: "Ich-Erzaehler, lakonisch, Eberhofer-meets-Grossstadt"
```
The bridge:
1. Reads `voice.profile: vp-giesing-gschichten-v1`
2. Searches for `./profiles/vp-giesing-gschichten-v1.yaml` — not found
3. Searches for `../writing.colette/profiles/vp-giesing-gschichten-v1.yaml` — found
4. Infers persona from voice profile ID pattern or searches `personas/` — finds `giesinger.yaml` at `../writing.colette/personas/giesinger.yaml`
5. Globs `characters/*.yaml` — finds `alex.yaml` (and others if present)
6. Reads `CLAUDE.md` for writing rules
7. Generates bundle:
```markdown
# Writing Context (auto-loaded from Colette)
## Voice Profile: vp-giesing-gschichten-v1
**Tone:** Lakonisch, warmherzig-genervt, trockener Humor
**Perspective:** Ich-Erzaehler (Alex), nah dran, subjektiv
**Density:** Alltagsdetails die Atmosphaere schaffen
**Attitude:** Lakonisch, leicht genervt, aber mit Herz
**Sharpness:** Beobachtungsscharf, sprachlich reduziert
**Humor:** Trocken, Understatement, absurde Situationen
**Tempo:** Gemaechlich mit Spannungsspitzen, Slow Burn
**Reader relationship:** Kumpel am Stammtisch
### Forbidden
- Hochdeutsch-Sterilitaet
- Krimi-Klischees (CSI, Profiler, Tatort)
- Lederhosen-Kitsch und Oktoberfest-Folklore
- Dialekt-Overkill
- Moralisieren oder Erklaeren
- Kuenstliche Spannungsaufbauten
- Adverb-Orgien und Adjektiv-Ketten
- Infodumps
### Allowed
- Bairische Einsprengsel in Hochdeutsch-Prosa
- Essen und Trinken als Leitmotiv
- Kiffer-Humor und Slow-Motion-Beobachtungen
- Gentrification-Satire
- Echte Giesinger Orte und Strassen
- Skurrile Nachbarn
- Kriminalplot aus dem Alltag
- Kurze, lakonische Dialoge
### Style models
- Rita Falk (Erzaehlton), Wolf Haas (lakonisch), Helmut Dietl (Muenchner Milieu), Friedrich Ani (duester), Bukowski (Anti-Held)
## Persona: giesinger
**Name:** Der Giesinger
**Bio:** Erzaehlt Geschichten aus Muenchen-Giesing. Eberhofer meets Grossstadt.
**Genres:** Krimi, Kurzgeschichte, Milieustudie
### Rules
- Ich-Erzaehler, immer — Alex erzaehlt
- Hauptsaechlich Hochdeutsch mit bairischen Einsprengsel
- Jede Geschichte hat einen Kriminalplot
- Essen/Trinken in jeder Geschichte
- Echte Giesinger Orte und Strassen
- Humor durch Understatement
- Alex ist kein Ermittler
- Figuren reden wie echte Menschen
## Characters
### Alex (protagonist)
- **Age:** Mitte 30
- **Key traits:** Lakonisch, funktionaler Kiffer, unmotiviert aber nicht dumm
- **Speech:** Kurze Saetze, Hochdeutsch mit bairischen Einsprengsel.
- **Relationships:** Mo — Nachbar, Kumpel und Unruhestifter
## Project Rules (from CLAUDE.md)
- Jede Geschichte beginnt mit einer Alltagsszene
- Kriminalplot ergibt sich organisch aus dem Alltag
- Essen/Trinken in jeder Geschichte
- Echte Giesinger Orte verwenden
- Kein Moralisieren, kein Erklaerbaer
- Ende muss nicht alles aufloesen
```
---
## Design Principles
1. **Summarize, don't dump.** Raw YAML wastes tokens and confuses agents. The bundle is a curated briefing.
2. **Cache aggressively.** Voice profiles and characters rarely change mid-run. Only regenerate when mtimes change.
3. **Filter per agent.** A Guardian checking plot consistency does not need the full voice profile. A Sage checking voice drift does not need character sheets.
4. **Graceful degradation.** Missing files are warned about, not fatal. A project with `colette.yaml` but no characters/ still works — the Characters section is simply empty.
5. **One bundle, filtered views.** Generate the full bundle once. Filter at injection time per archetype. This keeps caching simple.
6. **Additive to existing skills.** The bridge does not replace domain detection or artifact routing — it hooks into them. Remove the bridge, everything still works (just without auto-loaded writing context).

249
skills/convergence/SKILL.md Normal file
View File

@@ -0,0 +1,249 @@
---
name: convergence
description: |
Detects convergence, stalling, and oscillation in multi-cycle PDCA runs. Prevents wasted cycles
by stopping early when findings are not being resolved or are bouncing between cycles.
<example>Automatically loaded during Act phase before exit decision</example>
<example>User: "Is the run converging?"</example>
---
# Convergence Detection
In multi-cycle PDCA runs, the Act phase must decide whether another cycle will help or just waste tokens. This skill provides the analysis: are findings being resolved (converging), staying the same (stalling), or bouncing back (oscillating)?
## When It Runs
Convergence analysis runs **after the Check phase completes and before the Act phase exit decision**. It requires at least 2 cycles of data — on cycle 1, it is skipped (no comparison baseline).
```
Check phase → Convergence Analysis → Act phase exit decision
```
---
## Step 1: Finding Comparison
Extract findings from the current cycle and compare against the previous cycle.
### Data Sources
- **Current cycle findings:** Parsed from `check-*.md` artifacts in `.archeflow/artifacts/<run_id>/`
- **Previous cycle findings:** Parsed from `check-*.md` artifacts in `.archeflow/artifacts/<run_id>/cycle-<N-1>/`
Each finding is identified by a composite key: `source + category + file_location + description_keywords`.
### Finding Categories
Every finding from the current cycle is classified into exactly one category:
| Category | Definition |
|----------|------------|
| **NEW** | Finding not present in any previous cycle |
| **RESOLVED** | Was present in the previous cycle, absent in the current cycle |
| **PERSISTENT** | Present in both the current and previous cycle (same key) |
| **REGRESSED** | Was RESOLVED in the previous cycle (was present in N-2, absent in N-1), but returned in the current cycle |
### Matching Algorithm
Two findings match if:
1. Same `source` archetype (guardian, sage, etc.)
2. Same `category` (security, reliability, quality, etc.)
3. Same or overlapping file location (same file, line within 10 lines)
4. 50%+ keyword overlap in description (lowercase, strip punctuation)
All four conditions must hold. This prevents false matches across unrelated findings.
---
## Step 2: Convergence Score
Calculate a convergence score from the categorized findings:
```
convergence = resolved_count / (resolved_count + new_count + regressed_count)
```
If the denominator is 0 (no resolved, no new, no regressed — only persistent), the score is `0.0` (stalled, not converging).
### Score Interpretation
| Score Range | Status | Meaning |
|-------------|--------|---------|
| > 0.8 | **Converging** | Most issues being resolved, few new ones introduced |
| 0.5 - 0.8 | **Stalling** | Fixing roughly as many as introducing |
| < 0.5 | **Diverging** | Making things worse — more new/regressed than resolved |
| 0.0 (all persistent) | **Stuck** | No progress in either direction |
---
## Step 3: Oscillation Detection
An oscillating finding is one that bounces between resolved and re-introduced across cycles:
1. Finding was present in cycle N-2
2. Finding was absent in cycle N-1 (resolved)
3. Finding is present again in cycle N (regressed)
This indicates the fix in cycle N-1 was undone or invalidated by other changes in cycle N.
### Oscillation Rules
- A single oscillating finding: **flag it** in the convergence report but continue.
- Two or more oscillating findings: **STOP** and escalate to the user.
- Message: `"Findings X and Y are oscillating between cycles. Manual intervention needed — the automated fixes are interfering with each other."`
Oscillation tracking requires 3+ cycles of data. On cycles 1-2, oscillation detection is skipped.
---
## Step 4: Early Termination Rules
The convergence analysis can override the normal Act phase exit decision. If any of these conditions hold, the recommendation is **STOP**:
| Condition | Threshold | Recommendation |
|-----------|-----------|----------------|
| Diverging | Score < 0.5 for 2 consecutive cycles | STOP — changes are making things worse |
| Stalled | 0 findings resolved between cycles | STOP — no progress, further cycles will not help |
| Stuck | All findings are PERSISTENT for 2 consecutive cycles | STOP — automated fixes cannot resolve these |
| Oscillating | 2+ findings oscillating | STOP — fixes are interfering with each other |
When STOP is recommended, the Act phase should:
1. **Not** start another PDCA cycle
2. Report all unresolved findings to the user
3. Present the best implementation so far (on its branch, not merged)
4. Include the convergence report explaining why the run was stopped
### Override Behavior
The convergence STOP recommendation overrides the normal cycle-back logic in the Act phase. Even if `CYCLE < MAX_CYCLES` and there are fixable-looking findings, if convergence says STOP, the run stops.
The user can always override by explicitly requesting another cycle: `"Run one more cycle anyway"`.
---
## Step 5: Integration with Act Phase
### Event Data
Convergence data is included in the `cycle.boundary` event emitted by the Act phase:
```json
{
"type": "cycle.boundary",
"phase": "act",
"data": {
"cycle": 2,
"max_cycles": 3,
"exit_condition": "convergence_stop",
"met": false,
"fixes_applied": 2,
"next_action": "stop",
"convergence": {
"score": 0.35,
"status": "diverging",
"resolved": 1,
"new": 2,
"regressed": 1,
"persistent": 3,
"oscillating": ["Timeline reference mismatch"],
"recommendation": "stop",
"reason": "Diverging for 2 consecutive cycles"
}
}
}
```
### Decision Tree Update
The Act phase decision tree (from `act-phase` skill Step 4) gains a new first branch:
```
┌─ Convergence analysis (cycle 2+)
├─ Convergence says STOP
│ └─ STOP: Report to user with convergence report
├─ Convergence says CONTINUE
│ └─ Fall through to normal exit decision logic
└─ Cycle 1 (no convergence data)
└─ Fall through to normal exit decision logic
```
### Act Feedback Enhancement
When the Act phase builds `act-feedback.md` for the next cycle, it includes the convergence summary at the top:
```markdown
## Convergence Analysis (Cycle 1 → 2)
Score: 0.75 (converging)
Resolved: 3 | New: 1 | Regressed: 0 | Persistent: 2
Recommendation: Continue — trend is positive
### Finding Status
| Finding | Status | Cycles |
|---------|--------|--------|
| SQL injection in user input | RESOLVED | 1 |
| Missing rate limit | RESOLVED | 1 |
| Test names unclear | RESOLVED | 1 |
| Null check missing in parser | PERSISTENT | 2 |
| Error path not tested | PERSISTENT | 2 |
| New: Unused import introduced | NEW | 1 |
```
---
## Convergence Report Format
The full convergence report is generated as part of the orchestration output:
```markdown
## Convergence Analysis (Cycle N-1 → N)
**Score:** 0.75 (converging)
**Resolved:** 3 | **New:** 1 | **Regressed:** 0 | **Persistent:** 2 | **Oscillating:** 0
### Resolved This Cycle
| Source | Category | Description |
|--------|----------|-------------|
| guardian | security | SQL injection in user input handler |
| guardian | reliability | Missing rate limit on auth endpoint |
| sage | quality | Test names don't describe behavior |
### New This Cycle
| Source | Category | Description |
|--------|----------|-------------|
| sage | quality | Unused import introduced by fix |
### Persistent (unresolved across cycles)
| Source | Category | Description | Cycles Open |
|--------|----------|-------------|-------------|
| trickster | reliability | Null check missing in parser | 2 |
| sage | testing | Error path not tested | 2 |
### Oscillating
(none)
**Recommendation:** Continue — trend is positive
```
---
## Integration with Memory Skill
When convergence detects PERSISTENT findings (present for 2+ cycles), these are strong candidates for the `memory` skill's lesson extraction:
- After a run that had persistent findings, `archeflow-memory.sh extract` will pick these up with higher confidence (they have been confirmed across multiple cycles within a single run).
- Persistent findings that also appear in `lessons.jsonl` from prior runs get a double frequency boost (cross-cycle within run + cross-run pattern).
---
## Design Principles
1. **Conservative stopping.** Requires 2 consecutive data points before recommending STOP. A single bad cycle might be noise.
2. **User has final say.** STOP is a recommendation, not an enforced shutdown. The user can override.
3. **Cheap computation.** Keyword matching on finding descriptions, simple arithmetic on counts. No ML, no embeddings.
4. **Bounded scope.** Only compares adjacent cycles (N vs N-1, with N-2 for oscillation). Does not attempt to model long-term trends across many cycles.
5. **Observable.** All convergence data is included in the `cycle.boundary` event, making it available for post-hoc analysis via the process log.

View File

@@ -0,0 +1,200 @@
---
name: effectiveness
description: |
Track archetype effectiveness across runs. Scores each archetype on signal-to-noise,
fix rate, cost efficiency, accuracy, and cycle impact. Recommends model tier changes
and archetype removal based on rolling averages.
<example>User: "Which reviewers are actually useful?"</example>
<example>User: "Show archetype effectiveness report"</example>
---
# Agent Effectiveness Scoring
Track which archetypes are most useful vs. which waste tokens. Over multiple runs, build a profile of each archetype's effectiveness and use it to optimize team composition and model selection.
## Storage
```
.archeflow/memory/effectiveness.jsonl # Per-run archetype scores (append-only)
```
## Scoring Dimensions
For each archetype that participates in a run, calculate these scores:
| Dimension | How Measured | Weight |
|-----------|-------------|--------|
| **Signal-to-noise** | useful findings / total findings | 0.30 |
| **Fix rate** | findings that led to actual fixes / total findings | 0.25 |
| **Cost efficiency** | useful findings per dollar spent | 0.20 |
| **Accuracy** | findings not contradicted by other reviewers | 0.15 |
| **Cycle impact** | did this archetype's findings lead to cycle exit? | 0.10 |
### Definitions
- **Useful finding**: A finding in a `review.verdict` event with `severity >= WARNING` (i.e., severity is `warning`, `bug`, or `critical`) AND `fix_required == true`.
- **Actual fix**: A `fix.applied` event whose `source` field matches this archetype (or whose DAG `parent` chain traces back to this archetype's `review.verdict` event).
- **Contradicted finding**: Another reviewer's `review.verdict` has `verdict == "approved"` for the same scope where this archetype flagged an issue. Approximation: if archetype A flags N findings but archetype B approves the same code with 0 findings in overlapping severity categories, A's unmatched findings are considered potentially contradicted.
- **Cycle impact**: The archetype's findings (with `fix_required == true`) resulted in fixes that were part of the final approved cycle. Determined by checking if `fix.applied` events referencing this archetype exist before the final `cycle.boundary` with `met == true`.
### Composite Score
```
composite = (signal_to_noise * 0.30)
+ (fix_rate * 0.25)
+ (cost_efficiency_normalized * 0.20)
+ (accuracy * 0.15)
+ (cycle_impact * 0.10)
```
**Cost efficiency normalization**: Raw cost efficiency is `useful_findings / cost_usd`. To normalize to 0-1 range, use: `min(1.0, raw_efficiency / 100)`. The threshold of 100 means "100 useful findings per dollar" is considered perfect efficiency (achievable with haiku on structured reviews).
## Per-Run Scoring
After `run.complete`, calculate scores for each archetype that participated. The `extract` command does this.
### Per-Run Score Record
```jsonl
{"ts":"2026-04-03T16:00:00Z","run_id":"2026-04-03-der-huster","archetype":"guardian","signal_to_noise":0.85,"fix_rate":1.0,"cost_efficiency":42.5,"accuracy":1.0,"cycle_impact":true,"composite_score":0.91,"tokens":5000,"cost_usd":0.004,"model":"haiku","findings_total":4,"findings_useful":3,"fixes_applied":3}
```
Appended to `.archeflow/memory/effectiveness.jsonl`.
### Scoring Non-Review Archetypes
Only archetypes that produce `review.verdict` events are scored (Guardian, Skeptic, Sage, Trickster, and any custom review archetypes). Non-review archetypes (Explorer, Creator, Maker) are tracked by cost-tracking but not effectiveness-scored, because their output quality is measured differently (by whether the run succeeds, not by individual findings).
## Aggregate Scoring
Across all runs, maintain rolling averages (computed on-demand, not stored):
```jsonl
{"archetype":"guardian","runs":12,"avg_composite":0.88,"avg_signal_noise":0.82,"avg_cost_efficiency":38.2,"trend":"stable","recommendation":"keep"}
{"archetype":"trickster","runs":8,"avg_composite":0.35,"avg_signal_noise":0.20,"avg_cost_efficiency":5.1,"trend":"declining","recommendation":"consider_removing"}
```
### Trend Calculation
Compare the average composite score of the last 5 runs to the 5 runs before that:
- **improving**: last-5 avg > prior-5 avg + 0.05
- **declining**: last-5 avg < prior-5 avg - 0.05
- **stable**: within +/- 0.05
If fewer than 10 runs exist, trend is `"insufficient_data"`.
### Recommendations
Based on aggregate composite scores:
| Composite Score | Recommendation | Meaning |
|----------------|---------------|---------|
| >= 0.70 | `keep` | Archetype is valuable, contributes meaningful findings |
| 0.40 - 0.69 | `optimize` | Consider cheaper model or tighter review lens |
| < 0.40 | `consider_removing` | Might be wasting tokens, review whether it adds value |
## Integration Points
### At Run Start
When the `run` skill initializes, show a brief effectiveness summary for the team's archetypes:
```
Archetype effectiveness (last 10 runs):
guardian: 0.88 (keep) — haiku, $0.004/run avg
sage: 0.72 (keep) — sonnet, $0.08/run avg
skeptic: 0.45 (optimize) — haiku, $0.003/run avg
trickster: 0.32 (consider_removing) — haiku, $0.003/run avg
```
### Model Tier Suggestions
Cross-reference effectiveness with model assignment:
- **High effectiveness on cheap model** (composite >= 0.7, model = haiku): "Keep cheap. Working well."
- **Low effectiveness on cheap model** (composite < 0.5, model = haiku): "Consider upgrading to sonnet — cheap model may not be capturing issues."
- **High effectiveness on expensive model** (composite >= 0.7, model = sonnet): "Try downgrading to haiku — may maintain quality at lower cost."
- **Low effectiveness on expensive model** (composite < 0.5, model = sonnet): "Consider removing — expensive and not contributing."
### Cost-Tracking Integration
Multiply estimated cost by effectiveness to get "value per dollar":
```
value_per_dollar = composite_score / cost_usd
```
This metric helps compare archetypes directly: a cheap archetype with moderate effectiveness may have higher value_per_dollar than an expensive one with high effectiveness.
## Effectiveness Script
**Location:** `lib/archeflow-score.sh`
```
Usage:
archeflow-score.sh extract <events.jsonl> # Score archetypes from a completed run
archeflow-score.sh report # Show aggregate effectiveness report
archeflow-score.sh recommend <team.yaml> # Recommend model tiers for a team
```
### `extract` Command
1. Read all events from the JSONL file
2. Verify a `run.complete` event exists (scoring incomplete runs is unreliable)
3. For each `review.verdict` event:
- Count total findings and useful findings (severity >= WARNING, fix_required)
- Cross-reference with `fix.applied` events via the `source` field or DAG parent chain
- Check for contradictions from other reviewers
- Determine cycle impact
4. Calculate all scoring dimensions and composite score
5. Append per-archetype score records to `.archeflow/memory/effectiveness.jsonl`
### `report` Command
1. Read `.archeflow/memory/effectiveness.jsonl`
2. Group by archetype
3. Calculate rolling averages (last 10 runs per archetype)
4. Calculate trends (last 5 vs. prior 5)
5. Output a markdown table:
```markdown
# Archetype Effectiveness Report
| Archetype | Runs | Avg Score | S/N | Fix Rate | Cost Eff | Accuracy | Trend | Rec |
|-----------|------|-----------|-----|----------|----------|----------|-------|-----|
| guardian | 12 | 0.88 | 0.82 | 0.95 | 38.2 | 0.97 | stable | keep |
| sage | 10 | 0.72 | 0.70 | 0.80 | 12.1 | 0.88 | improving | keep |
| skeptic | 8 | 0.45 | 0.40 | 0.50 | 22.5 | 0.60 | stable | optimize |
| trickster | 8 | 0.35 | 0.20 | 0.30 | 5.1 | 0.55 | declining | consider_removing |
**Model suggestions:**
- skeptic (haiku, score 0.45): Consider upgrading to sonnet or tightening review lens
- trickster (haiku, score 0.35): Consider removing — low signal, low fix rate
```
### `recommend` Command
1. Read the team preset YAML file
2. For each archetype in the team, look up its effectiveness from `.archeflow/memory/effectiveness.jsonl`
3. Cross-reference current model assignment with effectiveness
4. Output recommendations:
```markdown
# Model Recommendations for team: story-development
| Archetype | Current Model | Score | Suggestion |
|-----------|--------------|-------|------------|
| guardian | haiku | 0.88 | Keep haiku — high effectiveness at low cost |
| sage | sonnet | 0.72 | Keep sonnet — quality-sensitive role |
| skeptic | haiku | 0.45 | Try sonnet — may improve signal quality |
| trickster | haiku | 0.35 | Consider removing from team |
```
## Design Principles
1. **Append-only.** Score records are immutable facts. Aggregates are computed on-demand.
2. **Review archetypes only.** Non-review agents (Explorer, Creator, Maker) are not scored — their value is in the final product, not in individual findings.
3. **Relative, not absolute.** Scores are meaningful in comparison (guardian vs. trickster), not as standalone numbers. The thresholds (0.7, 0.4) are starting points — calibrate after 20+ runs.
4. **Actionable.** Every report ends with concrete recommendations (keep, optimize, remove, change model).
5. **Cheap to compute.** One JSONL scan per report. No databases, no external services.

View File

@@ -0,0 +1,242 @@
---
name: git-integration
description: |
Git-per-phase commit strategy for ArcheFlow runs. Creates a branch per run, commits after
every phase transition and agent completion, and merges (squash or no-ff) on success.
Enables rollback to any phase boundary and full audit trail via git history.
<example>Automatically loaded by archeflow:run when git.enabled is true</example>
<example>User: "archeflow rollback --to plan"</example>
<example>User: "Show me the git history for this run"</example>
---
# Git Integration — Per-Phase Commit Strategy
Every ArcheFlow run creates a dedicated branch. Each phase transition and agent completion produces a commit. At run completion, the branch is merged back to the base branch. On failure, the branch stays intact for inspection or rollback.
## Prerequisites
- `archeflow:orchestration` — workflow rules and safety constraints
- `archeflow:process-log` — event schema (git events are emitted alongside process events)
- `archeflow:artifact-routing` — artifact paths that get committed
## Helper Script
All git operations go through the helper script:
```bash
./lib/archeflow-git.sh <command> <run_id> [args...]
```
See `lib/archeflow-git.sh` for full usage. The skill describes *when* to call the script; the script handles *how*.
---
## Branch Strategy
```
main (or current base branch)
└── archeflow/<run_id> # Created at run.start
├── commit: "archeflow(plan): explorer research"
├── commit: "archeflow(plan): creator outline"
├── commit: "archeflow(plan→do): phase transition"
├── commit: "archeflow(do): maker draft"
├── commit: "archeflow(do→check): phase transition"
├── commit: "archeflow(check): guardian review"
├── commit: "archeflow(check): sage review"
├── commit: "archeflow(check→act): phase transition"
├── commit: "archeflow(act): apply 6 fixes"
├── commit: "archeflow(act): cycle 1 complete"
└── commit: "archeflow(run): complete — <summary>"
```
Branch naming: `archeflow/<run_id>` (e.g., `archeflow/2026-04-03-jwt-auth`).
---
## Commit Points
| Trigger | What to commit | Message format |
|---------|---------------|----------------|
| After `agent.complete` | Agent artifacts + any created/modified files | `archeflow(<phase>): <archetype> <summary>` |
| After `phase.transition` | All artifacts from completed phase | `archeflow(<from>→<to>): phase transition` |
| After each `fix.applied` | The fixed file | `archeflow(fix): <source> — <finding summary>` |
| After `cycle.boundary` | Everything staged | `archeflow(act): cycle <N> <status>` |
| After `run.complete` | Final state + process report | `archeflow(run): complete — <summary>` |
---
## Commit Protocol
1. **Stage only relevant files.** Never `git add -A`. Stage:
- `.archeflow/artifacts/<run_id>/` — artifacts produced by the current agent/phase
- `.archeflow/events/<run_id>.jsonl` — updated event log
- Any project files created or modified by the current agent (from `do-maker-files.txt` or explicit file list)
2. **Exclude ephemeral files.** Never commit:
- `.archeflow/progress.md` (live progress display, ephemeral)
- `.archeflow/explorer-cache/` (local cache, not run-specific)
- `.archeflow/session-log.md` (separate concern)
3. **Use conventional commit format:** `archeflow(<scope>): <message>`
4. **Signing:** If `git.signing_key` is configured, pass `-c user.signingkey=<key>` to `git commit`.
### Integration with the Run Skill
The `archeflow:run` skill calls git operations at these points:
```
run.start → ./lib/archeflow-git.sh init <run_id>
agent.complete → ./lib/archeflow-git.sh commit <run_id> <phase> "<archetype> <summary>" [files...]
phase.transition → ./lib/archeflow-git.sh phase-commit <run_id> <phase>
fix.applied → ./lib/archeflow-git.sh commit <run_id> fix "<source> — <finding>"
cycle.boundary → ./lib/archeflow-git.sh commit <run_id> act "cycle <N> <status>"
run.complete (ok) → ./lib/archeflow-git.sh merge <run_id> [--squash|--no-ff]
run.complete (fail) → branch preserved, not merged
```
---
## Run Lifecycle
### 1. Initialization (`run.start`)
```bash
./lib/archeflow-git.sh init <run_id>
```
This will:
1. Verify a clean working tree (or stash uncommitted changes)
2. Create branch `archeflow/<run_id>` from current HEAD
3. Switch to the new branch
### 2. During Execution (phase commits)
After each agent completes or phase transitions, the run skill calls:
```bash
# After an agent completes:
./lib/archeflow-git.sh commit <run_id> plan "explorer research" \
.archeflow/artifacts/<run_id>/plan-explorer.md
# After a phase transition:
./lib/archeflow-git.sh phase-commit <run_id> plan
```
The `commit` command stages artifact directories and event logs automatically. Additional files can be passed as trailing arguments.
The `phase-commit` command stages all artifacts matching the phase prefix and commits with a transition message.
### 3. Completion (merge)
```bash
# Success — squash merge (default):
./lib/archeflow-git.sh merge <run_id> --squash
# Success — preserve history:
./lib/archeflow-git.sh merge <run_id> --no-ff
# Failure or user abort:
# Do nothing. Branch stays for inspection.
echo "Branch archeflow/<run_id> preserved for inspection."
```
The merge command:
1. Verifies all changes on the branch are committed
2. Switches to the base branch (main or wherever the run started)
3. Merges with the chosen strategy
4. If squash: creates a single commit with `feat: <task summary>`
5. Does NOT delete the branch (user may want to inspect)
### 4. Cleanup (optional, after inspection)
```bash
./lib/archeflow-git.sh cleanup <run_id>
```
Deletes the branch after the user has confirmed the merge is correct.
---
## Rollback
Roll back to the end of any completed phase:
```bash
./lib/archeflow-git.sh rollback <run_id> --to plan
```
This will:
1. Find the last commit for the target phase by searching commit messages
2. Show the user what commits will be lost (everything after the target)
3. Perform `git reset --hard <commit>` on the branch
4. Trim the events JSONL to remove events that occurred after the rollback point
**Supported rollback targets:** `plan`, `do`, `check`, `act`, or any cycle number (`cycle-1`, `cycle-2`).
**Safety:** Rollback only works on the run's branch, never on main. The script verifies you are on `archeflow/<run_id>` before proceeding.
---
## Status
View the git state of a run:
```bash
./lib/archeflow-git.sh status <run_id>
```
Output:
```
Branch: archeflow/2026-04-03-jwt-auth
Base: main (3 commits ahead)
Commits:
abc1234 archeflow(plan): explorer research
def5678 archeflow(plan): creator outline
ghi9012 archeflow(plan→do): phase transition
jkl3456 archeflow(do): maker implementation
Current phase: do
Files changed (total): 8
Uncommitted changes: none
```
---
## Configuration
In `.archeflow/config.yaml` or a team preset:
```yaml
git:
enabled: true # Default: true. Set false to disable all git operations.
branch_prefix: "archeflow/" # Default. The run_id is appended.
commit_style: conventional # conventional (archeflow(<scope>): msg) | simple (<phase>: msg)
merge_strategy: squash # squash | no-ff | rebase
auto_push: false # Push branch to remote after each commit
signing_key: null # SSH key path for signed commits (e.g., ~/.ssh/id_ed25519.pub)
```
The helper script reads this config if it exists. All values have sensible defaults.
---
## Safety Rules
These rules are inherited from `archeflow:orchestration` and reinforced here:
1. **Never force-push.** No `--force`, no `--force-with-lease`. If a push fails, diagnose and fix.
2. **Never modify main history.** Merges are forward-only. No rebasing main.
3. **Branch stays intact on failure.** If a run fails or is aborted, the branch is preserved for inspection. Never auto-delete failed branches.
4. **All commits are individually revertable.** Each commit represents a discrete unit of work.
5. **Worktree mode compatibility.** If the Maker runs in a worktree, git-integration commits go to the worktree's branch. The merge happens at the run level, not the worktree level. The Maker's worktree branch is a sub-branch of `archeflow/<run_id>`.
6. **Clean merge or abort.** If a merge produces conflicts, do not force-resolve. Report the conflict, leave the branch intact, and let the user decide.
7. **No signing by default.** Signing is opt-in via config. If configured, all commits on the branch are signed.
---
## Design Principles
1. **Git is the audit trail.** Every phase transition is a commit. `git log` tells the full story of a run.
2. **Rollback is cheap.** Reset to any phase boundary, re-run from there. No need to start over.
3. **Merge strategy is a project decision.** Squash for clean history, no-ff for detailed history. Both are valid.
4. **Events + git = full observability.** Process events capture *what happened* (decisions, verdicts, timing). Git captures *what changed* (files, diffs). Together they provide complete run archaeology.
5. **Fail-safe by default.** Every safety rule defaults to the conservative option. The user must explicitly opt in to destructive operations.

224
skills/memory/SKILL.md Normal file
View File

@@ -0,0 +1,224 @@
---
name: memory
description: |
Cross-run memory system that learns from past ArcheFlow runs. Detects recurring findings,
stores lessons, and injects known issues into agent prompts so the same mistakes are not
repeated across orchestrations.
<example>User: "archeflow memory list"</example>
<example>User: "archeflow memory add 'User prefers single bundled PR'"</example>
<example>Automatically loaded at run start and after run.complete</example>
---
# Cross-Run Memory
ArcheFlow forgets everything after each run. If Guardian repeatedly flags the same type of issue (e.g., timeline errors in fiction, missing null checks in code), the next run starts from zero. This skill fixes that by extracting lessons from completed runs and injecting them into future agent prompts.
## Storage
```
.archeflow/memory/lessons.jsonl # Append-only, one lesson per line
```
Each lesson is a single JSON line:
```jsonl
{"id":"m-001","ts":"2026-04-03T14:00:00Z","run_id":"2026-04-03-der-huster","type":"pattern","source":"guardian","description":"Timeline references must match story start day","frequency":2,"severity":"bug","domain":"writing","tags":["continuity","timeline"],"last_seen_run":"2026-04-03-der-huster","runs_since_last_seen":0}
{"id":"m-002","ts":"2026-04-03T15:00:00Z","run_id":"2026-04-03-der-huster","type":"preference","source":"user_feedback","description":"User prefers single bundled PR over many small ones","frequency":1,"severity":"info","domain":"general","tags":["workflow"],"last_seen_run":"","runs_since_last_seen":0}
{"id":"m-003","ts":"2026-04-04T10:00:00Z","run_id":"2026-04-04-auth-fix","type":"archetype_hint","source":"sage","description":"Voice drift most common in long monologue passages","frequency":3,"severity":"warning","domain":"writing","tags":["voice","prose"],"archetype":"story-sage","last_seen_run":"2026-04-04-auth-fix","runs_since_last_seen":0}
{"id":"m-004","ts":"2026-04-04T11:00:00Z","run_id":"2026-04-04-auth-fix","type":"anti_pattern","source":"maker","description":"Splitting auth middleware into per-route handlers causes duplication","frequency":1,"severity":"warning","domain":"code","tags":["auth","middleware"],"last_seen_run":"2026-04-04-auth-fix","runs_since_last_seen":0}
```
## Lesson Types
| Type | Source | Description |
|------|--------|-------------|
| `pattern` | Auto-detected | Recurring finding across runs (same category + similar description) |
| `preference` | Manual | User correction or workflow preference (added via CLI) |
| `archetype_hint` | Auto-detected | Per-archetype insight (e.g., Sage catches voice drift in monologues) |
| `anti_pattern` | Manual or auto | Something that was tried and failed — avoid repeating |
## Lesson Fields
| Field | Type | Description |
|-------|------|-------------|
| `id` | string | Unique ID, format `m-NNN` (monotonically increasing) |
| `ts` | ISO 8601 | When the lesson was created or last updated |
| `run_id` | string | Run that created or last triggered this lesson |
| `type` | string | One of: `pattern`, `preference`, `archetype_hint`, `anti_pattern` |
| `source` | string | Archetype or `user_feedback` that originated the lesson |
| `description` | string | Human-readable lesson text |
| `frequency` | integer | How many times this lesson was triggered |
| `severity` | string | `bug`, `warning`, `info`, or `recommendation` |
| `domain` | string | `writing`, `code`, `general`, or project-specific |
| `tags` | string[] | Keywords for matching and filtering |
| `archetype` | string or null | For `archetype_hint` type — which archetype this applies to |
| `last_seen_run` | string | Run ID where this lesson was last matched |
| `runs_since_last_seen` | integer | Counter for decay — incremented each run that does NOT trigger this lesson |
---
## Auto-Detection
After each `run.complete`, the orchestrator runs lesson extraction:
```bash
./lib/archeflow-memory.sh extract .archeflow/events/<run_id>.jsonl
```
### Extraction Algorithm
1. **Read all `review.verdict` events** from the completed run's JSONL.
2. **For each finding** in each verdict:
a. Tokenize the finding description into keywords (lowercase, strip punctuation).
b. Compare keywords against each existing lesson's description + tags.
c. **Match threshold:** 50%+ keyword overlap between finding and lesson.
3. **If match found:** Update the existing lesson:
- Increment `frequency` by 1
- Update `ts` to now
- Update `last_seen_run` to current run ID
- Reset `runs_since_last_seen` to 0
4. **If no match AND severity >= WARNING:** Add as candidate lesson with `frequency: 1`.
5. **Candidates become active** when `frequency >= 2` (triggered in a second run).
### Promotion Rule
A finding that appears in only one run stays at `frequency: 1` — it might be a one-off. Once the same pattern appears in a second run (matched by keyword overlap), it gets promoted to `frequency: 2` and becomes eligible for injection.
This prevents noise from single-run anomalies while still capturing genuine recurring issues quickly.
---
## Injection
At run start, before spawning agents, the orchestrator injects relevant lessons:
```bash
LESSONS=$(./lib/archeflow-memory.sh inject <domain> <archetype>)
```
### Injection Rules
1. Read `lessons.jsonl`.
2. Filter by `domain` (exact match or `general`) and optionally by `archetype`.
3. Only include lessons with `frequency >= 2` (confirmed patterns).
4. Sort by frequency descending (most common first).
5. Cap at **10 lessons** per injection.
6. Lessons with `frequency >= 5` are **always injected** regardless of domain/archetype filter (they are universal enough to matter).
### Injection Format
Append to the agent's system prompt as a structured section:
```markdown
## Known Issues (from past runs)
- Timeline references must match story start day [seen 3x, guardian]
- Voice drift common in monologue passages >200 words [seen 2x, sage]
- Missing null checks in API response handlers [seen 5x, guardian]
```
### Integration with Run Skill
In the `run` skill, after Step 0 (Initialize) and before Step 1 (Plan Phase):
```bash
# Load cross-run memory for this domain
MEMORY_LESSONS=$(./lib/archeflow-memory.sh inject "$DOMAIN" "")
# Inject into Explorer/Creator prompts if non-empty
if [[ -n "$MEMORY_LESSONS" ]]; then
EXPLORER_PROMPT="${EXPLORER_PROMPT}
${MEMORY_LESSONS}"
CREATOR_PROMPT="${CREATOR_PROMPT}
${MEMORY_LESSONS}"
fi
```
For reviewers in the Check phase, inject archetype-specific lessons:
```bash
GUARDIAN_LESSONS=$(./lib/archeflow-memory.sh inject "$DOMAIN" "guardian")
SAGE_LESSONS=$(./lib/archeflow-memory.sh inject "$DOMAIN" "sage")
```
---
## Decay
Lessons that stop being relevant should fade out. After each `run.complete`, apply decay:
```bash
./lib/archeflow-memory.sh decay
```
### Decay Algorithm
1. For every lesson in `lessons.jsonl`:
- If `last_seen_run` is NOT the current run → increment `runs_since_last_seen` by 1
2. If `runs_since_last_seen >= 10`:
- Decrement `frequency` by 1
- Reset `runs_since_last_seen` to 0
3. If `frequency` drops to 0:
- Move the lesson to `.archeflow/memory/archive.jsonl` (append)
- Remove from `lessons.jsonl`
This means a lesson that was seen 5 times but then stops appearing will survive 50 runs of non-triggering before being fully archived (5 decrements x 10 runs each).
---
## Manual Management
### Add a lesson
```bash
archeflow memory add "User prefers single bundled PR over many small ones"
# Internally: ./lib/archeflow-memory.sh add preference "User prefers single bundled PR over many small ones"
```
Manually added lessons start at `frequency: 1` but with type `preference`, which means they are injected immediately (preferences skip the frequency >= 2 threshold).
### List lessons
```bash
archeflow memory list
# Internally: ./lib/archeflow-memory.sh list
```
Output:
```
ID Freq Type Domain Description
m-001 3 pattern writing Timeline references must match story start day
m-002 1 preference general User prefers single bundled PR over many small ones
m-003 5 archetype_hint writing Voice drift most common in long monologue passages
m-004 1 anti_pattern code Splitting auth middleware causes duplication
```
### Forget a lesson
```bash
archeflow memory forget m-002
# Internally: ./lib/archeflow-memory.sh forget m-002
```
Moves the lesson to `archive.jsonl` regardless of frequency.
---
## Integration Points
| Moment | Action | Script Command |
|--------|--------|----------------|
| After `run.complete` | Extract lessons from findings | `archeflow-memory.sh extract <events.jsonl>` |
| After extraction | Apply decay to all lessons | `archeflow-memory.sh decay` |
| Before agent spawn (run start) | Inject relevant lessons | `archeflow-memory.sh inject <domain> <archetype>` |
| User command | Add/list/forget lessons | `archeflow-memory.sh add/list/forget` |
## Design Principles
1. **Append-only storage.** `lessons.jsonl` is append-only during writes; decay rewrites the file in place but preserves all data (archived lessons move to `archive.jsonl`).
2. **Conservative promotion.** A finding must appear in 2+ runs before injection. One-offs are noise.
3. **Graceful degradation.** If `lessons.jsonl` doesn't exist, injection returns empty — no error, no block.
4. **Cheap.** Keyword matching, not embeddings. `jq` for JSON, `grep` for matching. No external services.
5. **Bounded.** Max 10 lessons injected per prompt. Prevents context pollution.

191
skills/progress/SKILL.md Normal file
View File

@@ -0,0 +1,191 @@
---
name: progress
description: |
Live progress file for ArcheFlow orchestrations. Regenerates `.archeflow/progress.md`
after every event emission, giving users real-time visibility into run status, budget
usage, and DAG shape — watchable from a second terminal.
<example>User: "What's happening with my run?"</example>
<example>watch -n 2 cat .archeflow/progress.md</example>
---
# Live Progress — Real-Time Run Visibility
During long-running orchestrations (Maker drafting, parallel reviews), users have no visibility into what is happening. This skill solves that by maintaining a live progress file that is regenerated after every event.
## Progress File
**Location:** `.archeflow/progress.md`
Updated after every event emission during a run. Users can watch it from a second terminal:
```bash
# Simple polling
watch -n 2 cat .archeflow/progress.md
# Continuous mode (built-in)
./lib/archeflow-progress.sh <run_id> --watch
# Programmatic consumption
./lib/archeflow-progress.sh <run_id> --json
```
## Progress File Format
```markdown
# ArcheFlow Run: 2026-04-03-der-huster
**Status:** DO phase — maker running (3/6 scenes drafted)
**Started:** 14:32 | **Elapsed:** 8 min
**Budget:** $1.45 / $10.00 (14%)
## Progress
- [x] PLAN: Explorer (87s, 21k tok, $0.02)
- [x] PLAN: Creator (167s, 26k tok, $0.08)
- [x] PLAN -> DO transition
- [ ] **DO: Maker** <- running (5 min elapsed)
- [ ] CHECK: Guardian
- [ ] CHECK: Sage
- [ ] ACT: Apply fixes
## Latest Event
#6 agent.start — maker (do) — 14:40
## DAG (so far)
#1 run.start
├── #2 story-explorer ✓
│ ├── #3 decision ✓
│ └── #4 creator ✓
├── #5 plan→do ✓
└── #6 maker ← running
```
## How to Use
### During Orchestration (run skill integration)
The `run` skill should call `archeflow-progress.sh` after each event emission. This keeps progress decoupled from the event emitter itself — no modification to `archeflow-event.sh` is needed.
Add this call after every `archeflow-event.sh` invocation in the run loop:
```bash
# After emitting an event:
./lib/archeflow-event.sh "$RUN_ID" agent.complete plan explorer '{"archetype":"explorer",...}'
# Update progress:
./lib/archeflow-progress.sh "$RUN_ID"
```
This is a fast operation (reads JSONL, writes one markdown file) and adds negligible overhead.
### From a Second Terminal
```bash
# One-shot: see current state
./lib/archeflow-progress.sh <run_id>
cat .archeflow/progress.md
# Continuous: auto-refresh every 2 seconds
./lib/archeflow-progress.sh <run_id> --watch
# JSON output for dashboards or scripts
./lib/archeflow-progress.sh <run_id> --json
```
### Reactive Mode (via JSONL tail)
```bash
tail -f .archeflow/events/<run_id>.jsonl | while read line; do
./lib/archeflow-progress.sh <run_id>
done
```
## Progress Script
**Location:** `lib/archeflow-progress.sh`
```
Usage:
archeflow-progress.sh <run_id> # Generate/update progress.md
archeflow-progress.sh <run_id> --watch # Continuous update mode (2s interval)
archeflow-progress.sh <run_id> --json # Output as JSON (for dashboards)
```
### What the Script Does
1. **Read** `.archeflow/events/<run_id>.jsonl` — the event stream for this run
2. **Determine** current phase and active agent from the latest events
3. **Build checklist** — mark completed agents with timing/cost data, show pending agents as unchecked
4. **Show partial DAG** — completed nodes with checkmarks, running node with arrow indicator
5. **Calculate budget** — sum `estimated_cost_usd` from `agent.complete` events, compare to budget from `run.start` config or `.archeflow/config.yaml`
6. **Compute elapsed time** — difference between `run.start` timestamp and now
7. **Write** to `.archeflow/progress.md`
### Output Modes
**Default (markdown):** Writes `.archeflow/progress.md` and prints the same content to stdout.
**`--watch`:** Clears the terminal every 2 seconds, re-reads the JSONL, and regenerates the display. Exits when a `run.complete` event is found.
**`--json`:** Outputs a structured JSON object to stdout (does not write progress.md):
```json
{
"run_id": "2026-04-03-der-huster",
"status": "running",
"phase": "do",
"active_agent": "maker",
"elapsed_seconds": 480,
"budget_used_usd": 1.45,
"budget_total_usd": 10.00,
"budget_percent": 14,
"completed": [
{"agent": "explorer", "phase": "plan", "duration_s": 87, "tokens": 21000, "cost_usd": 0.02},
{"agent": "creator", "phase": "plan", "duration_s": 167, "tokens": 26000, "cost_usd": 0.08}
],
"pending": ["guardian", "sage"],
"latest_event": {"seq": 6, "type": "agent.start", "agent": "maker", "phase": "do"},
"total_events": 6
}
```
## Checklist Construction
The progress checklist is built from events, not from a predefined workflow definition. Each event type maps to a checklist entry:
| Event Type | Checklist Entry |
|-----------|----------------|
| `agent.complete` | `- [x] PHASE: archetype (duration, tokens, cost)` |
| `agent.start` (no matching complete) | `- [ ] **PHASE: archetype** <- running (elapsed)` |
| `phase.transition` | `- [x] PHASE -> PHASE transition` |
| `review.verdict` | `- [x] CHECK: archetype -> VERDICT` |
| `fix.applied` | `- [x] ACT: Fix (source)` |
| `cycle.boundary` | `- [x] Cycle N complete` |
Pending agents (not yet started) are NOT shown in the checklist — only started or completed agents appear. This avoids guessing which agents will be spawned.
## Budget Display
Budget information comes from two sources:
1. **`run.start` event** — may contain `config.budget_usd`
2. **`.archeflow/config.yaml`** — global `budget.per_run_usd`
If no budget is configured, the budget line shows cost only (no percentage):
```
**Cost:** $1.45 (no budget set)
```
## Integration with Other Skills
- **`run`**: Should call `archeflow-progress.sh` after each event emission
- **`process-log`**: Progress reads the same JSONL that process-log defines
- **`cost-tracking`**: Budget data and cost calculations follow cost-tracking conventions
- **`autonomous-mode`**: Progress file is useful for monitoring autonomous overnight runs
## Design Principles
1. **Read-only on events.** Progress never modifies the JSONL. It is a derived view.
2. **Fast.** One JSONL read + one markdown write. No jq streaming, no databases.
3. **Decoupled.** No hooks in `archeflow-event.sh`. The `run` skill calls progress explicitly.
4. **Optional.** If progress is never called, orchestration works fine. No side effects.
5. **Terminal-friendly.** Output is plain markdown — renders well in `cat`, `bat`, `glow`, or any terminal.

322
skills/templates/SKILL.md Normal file
View File

@@ -0,0 +1,322 @@
---
name: templates
description: |
Template gallery for sharing workflows, team presets, archetypes, domain configs, and complete
setup bundles across ArcheFlow projects. Supports init-from-template, save-as-template, and
clone-from-project operations.
<example>User: "archeflow init writing-short-story"</example>
<example>User: "archeflow template save my-backend-setup"</example>
<example>User: "archeflow template list"</example>
<example>User: "archeflow init --from ../book.giesing-gschichten"</example>
---
# Template Gallery — Shareable ArcheFlow Configurations
Workflows, team presets, custom archetypes, and domain configs should be reusable across projects. This skill defines the template system that makes ArcheFlow setups portable and shareable.
## Template Storage
Templates live in two locations, with project-local overriding global:
| Location | Scope | Precedence |
|----------|-------|------------|
| `.archeflow/templates/` | Project-local | Higher (checked first) |
| `~/.archeflow/templates/` | Global (user-wide) | Lower (fallback) |
### Directory Structure
```
~/.archeflow/templates/
├── workflows/
│ ├── kurzgeschichte.yaml
│ ├── feature-implementation.yaml
│ └── security-review.yaml
├── teams/
│ ├── story-development.yaml
│ ├── backend.yaml
│ └── fullstack.yaml
├── archetypes/
│ ├── story-explorer.md
│ ├── story-sage.md
│ └── db-specialist.md
├── domains/
│ ├── writing.yaml
│ ├── code.yaml
│ └── research.yaml
└── bundles/
├── writing-short-story/
│ ├── manifest.yaml
│ ├── team.yaml
│ ├── workflow.yaml
│ ├── archetypes/
│ │ ├── story-explorer.md
│ │ └── story-sage.md
│ └── domain.yaml
└── backend-feature/
├── manifest.yaml
├── team.yaml
├── workflow.yaml
└── domain.yaml
```
Individual templates (workflows/, teams/, archetypes/, domains/) are single files that can be used standalone. Bundles are complete setups that include everything a project needs.
---
## Bundle Manifest
Every bundle has a `manifest.yaml` that declares what it contains, what it requires, and what variables it exposes.
```yaml
name: writing-short-story
description: "Complete setup for short fiction writing with ArcheFlow"
version: 1
domain: writing
includes:
team: story-development.yaml
workflow: kurzgeschichte.yaml
archetypes:
- story-explorer.md
- story-sage.md
domain: writing.yaml
requires:
- colette.yaml # Project must have this file
variables:
target_words: 6000 # Default, can be overridden at init time
max_cycles: 2 # Default, can be overridden at init time
```
### Manifest Fields
| Field | Required | Description |
|-------|----------|-------------|
| `name` | Yes | Bundle identifier (used in `archeflow init <name>`) |
| `description` | Yes | Human-readable description |
| `version` | No | Bundle version (integer, default 1) |
| `domain` | No | Domain this bundle is designed for |
| `includes` | Yes | Map of file types to filenames within the bundle |
| `requires` | No | List of files that must exist in the target project |
| `variables` | No | Key-value pairs with defaults, overridable at init |
### Includes Types
| Key | Target location in `.archeflow/` | Accepts |
|-----|----------------------------------|---------|
| `team` | `teams/<filename>` | Single YAML file |
| `workflow` | `workflows/<filename>` | Single YAML file |
| `archetypes` | `archetypes/<filename>` | List of Markdown files |
| `domain` | `domains/<filename>` | Single YAML file |
| `hooks` | `hooks.yaml` | Single YAML file |
---
## Operations
### `archeflow init <bundle-name>`
Initialize a project's `.archeflow/` directory from a named bundle.
**Procedure:**
1. Search for the bundle:
- `.archeflow/templates/bundles/<name>/manifest.yaml` (project-local)
- `~/.archeflow/templates/bundles/<name>/manifest.yaml` (global)
- If not found: error with list of available bundles
2. Read `manifest.yaml`
3. Check `requires`:
- For each required file, verify it exists in the project root
- If missing: error with `"Required file not found: <file>. This bundle requires it."`
4. Check for existing `.archeflow/` setup:
- If `.archeflow/teams/`, `.archeflow/workflows/`, etc. already contain files: warn and ask before overwriting
- Never silently overwrite existing configuration
5. Copy files from bundle to `.archeflow/`:
- `team``.archeflow/teams/<filename>`
- `workflow``.archeflow/workflows/<filename>`
- `archetypes``.archeflow/archetypes/<filename>` (each file)
- `domain``.archeflow/domains/<filename>`
- `hooks``.archeflow/hooks.yaml`
6. Create `.archeflow/config.yaml` with variables from manifest:
```yaml
# Generated by archeflow init from bundle: <name>
bundle: <name>
bundle_version: <version>
initialized: <timestamp>
variables:
target_words: 6000
max_cycles: 2
```
7. Print setup summary:
```
ArcheFlow initialized from bundle: <name>
Team: <team filename> → .archeflow/teams/
Workflow: <workflow filename> → .archeflow/workflows/
Archetypes: <count> files → .archeflow/archetypes/
Domain: <domain filename> → .archeflow/domains/
Config: .archeflow/config.yaml (variables: target_words=6000, max_cycles=2)
Ready to run: archeflow:run
```
### `archeflow init --from <project-path>`
Clone another project's ArcheFlow setup into the current project.
**Procedure:**
1. Verify `<project-path>/.archeflow/` exists
2. Copy these subdirectories (if they exist):
- `teams/`
- `workflows/`
- `archetypes/`
- `domains/`
- `config.yaml`
- `hooks.yaml`
3. Do NOT copy (run-specific data):
- `events/`
- `artifacts/`
- `context/` (generated by colette-bridge, project-specific)
- `templates/` (project-local templates stay local)
4. Warn if target `.archeflow/` already has files
5. Print summary of what was copied
### `archeflow template save <name>`
Save the current project's `.archeflow/` setup as a reusable template bundle.
**Procedure:**
1. Verify `.archeflow/` exists and has content
2. Create bundle directory: `~/.archeflow/templates/bundles/<name>/`
- If it already exists: warn and ask before overwriting
3. Copy from `.archeflow/` to bundle:
- `teams/*.yaml` → bundle `team` (first file, or prompt if multiple)
- `workflows/*.yaml` → bundle `workflow` (first file, or prompt if multiple)
- `archetypes/*.md` → bundle `archetypes/`
- `domains/*.yaml` → bundle `domain` (first file, or prompt if multiple)
- `hooks.yaml` → bundle (if exists)
4. Generate `manifest.yaml`:
```yaml
name: <name>
description: "Saved from <project directory name>"
version: 1
domain: <from domain yaml if present>
includes:
team: <filename>
workflow: <filename>
archetypes: [<filenames>]
domain: <filename>
requires: []
variables: <from config.yaml variables section if present>
```
5. Print summary:
```
Template saved: <name>
Location: ~/.archeflow/templates/bundles/<name>/
Files: <count> files
Use with: archeflow init <name>
```
### `archeflow template list`
List all available templates — both individual files and bundles, from both global and project-local locations.
**Output format:**
```
ArcheFlow Templates
====================
Bundles:
writing-short-story Complete setup for short fiction writing [global]
backend-feature Backend feature implementation [global]
my-project-setup Saved from book.giesing-gschichten [global]
Individual Templates:
Workflows:
kurzgeschichte.yaml [global]
feature-implementation.yaml [global]
Teams:
story-development.yaml [global]
backend.yaml [global]
Archetypes:
story-explorer.md [global]
story-sage.md [global]
Domains:
writing.yaml [global]
code.yaml [global]
```
### `archeflow template share <name> <path>`
Export a template bundle to a directory for sharing (e.g., via git, email, file share).
**Procedure:**
1. Find the bundle (global or local)
2. Copy the entire bundle directory to `<path>/<name>/`
3. Print the path and a one-liner for importing:
```
Exported: <path>/<name>/
To import: cp -r <path>/<name> ~/.archeflow/templates/bundles/
```
---
## Variable Substitution
Bundle manifests can define variables with defaults. These are stored in `.archeflow/config.yaml` after init and can be overridden:
- At init time: `archeflow init writing-short-story --set target_words=8000`
- After init: edit `.archeflow/config.yaml` directly
Variables are available to workflows and the run skill via config:
```yaml
# In a workflow, reference variables:
phases:
do:
description: |
Draft the story. Target: ${target_words} words.
```
Variable substitution happens at run time, not at init time. The workflow file contains the `${variable}` placeholder; the run skill reads `.archeflow/config.yaml` and substitutes before passing to agents.
---
## Individual Template Usage
Not everything needs a bundle. Individual templates can be copied directly:
```bash
# Copy a single workflow
cp ~/.archeflow/templates/workflows/kurzgeschichte.yaml .archeflow/workflows/
# Copy a single archetype
cp ~/.archeflow/templates/archetypes/story-explorer.md .archeflow/archetypes/
# Copy a team preset
cp ~/.archeflow/templates/teams/story-development.yaml .archeflow/teams/
```
The `archeflow init` command handles bundles. For individual files, manual copy or the helper script (`lib/archeflow-init.sh`) can be used.
---
## Integration with Other Skills
- **`archeflow:run`** — Reads `.archeflow/config.yaml` for variables, applies them during run initialization
- **`archeflow:domains`** — Domain YAML from templates is loaded like any other domain config
- **`archeflow:custom-archetypes`** — Archetype .md files from templates work identically to hand-written ones
- **`archeflow:workflow-design`** — Workflow YAML from templates follows the same schema
- **`archeflow:colette-bridge`** — Bundle `requires: [colette.yaml]` ensures the bridge has what it needs
---
## Design Principles
1. **Bundles are self-contained.** Everything needed to set up a project is in the bundle directory. No external dependencies beyond `requires`.
2. **Never silently overwrite.** Init warns before replacing existing files. Templates are helpers, not bulldozers.
3. **Global + local layering.** Project-local templates override global ones. This allows per-project customization without polluting the global registry.
4. **Skip run data.** Events, artifacts, and context are run-specific. Templates carry only configuration.
5. **Variables are late-bound.** Substitution happens at run time, not template time. This keeps templates generic.
6. **Plain files, no magic.** Templates are just directories of YAML and Markdown files. No databases, no registries, no lock files.