From 949bac4c4c9fda71ab129b7fc1067cde28de1463 Mon Sep 17 00:00:00 2001
From: TevinClaw <510129976@qq.com>
Date: Sat, 14 Mar 2026 12:25:13 +0800
Subject: [PATCH] 添加主agent学习与记忆技能

---
 workspace/skills/memory-merger/SKILL.md                                  |  108 +
 workspace/skills/self-improving-agent/hooks/openclaw/handler.js          |   56 
 workspace/skills/self-improving-agent/.learnings/LEARNINGS.md            |    5 
 workspace/skills/ontology/references/queries.md                          |  211 ++
 workspace/skills/memory-management/scripts/weekly_maintenance.py         |  207 ++
 workspace/skills/self-improving-agent/references/openclaw-integration.md |  248 +++
 workspace/skills/self-improving-agent/_meta.json                         |    6 
 workspace/skills/ontology/_meta.json                                     |    6 
 workspace/skills/self-improving-agent/scripts/error-detector.sh          |   55 
 workspace/.learnings/ERRORS.md                                           |   20 
 workspace/skills/self-improving-agent/.learnings/ERRORS.md               |    5 
 workspace/skills/self-improving-agent/assets/SKILL-TEMPLATE.md           |  177 ++
 workspace/skills/self-improving-agent/references/hooks-setup.md          |  223 +++
 workspace/skills/memory-management/scripts/daily_check.py                |   83 +
 workspace/skills/self-improving-agent/.clawhub/origin.json               |    7 
 workspace/skills/memory-management/SKILL.md                              |  231 +++
 workspace/skills/self-improving-agent/hooks/openclaw/HOOK.md             |   23 
 workspace/skills/self-improving-agent/scripts/activator.sh               |   20 
 workspace/skills/ontology/.clawhub/origin.json                           |    7 
 workspace/skills/ontology/scripts/ontology.py                            |  580 +++++++
 workspace/skills/self-improving-agent/references/examples.md             |  374 +++++
 workspace/skills/memory-management/scripts/write_l2.py                   |   56 
 workspace/skills/self-improving-agent/scripts/extract-skill.sh           |  221 +++
 workspace/skills/ontology/references/schema.md                           |  322 ++++
 workspace/skills/ontology/SKILL.md                                       |  232 +++
 workspace/.learnings/LEARNINGS.md                                        |    5 
 workspace/skills/memory-management/scripts/write_l0.py                   |   63 
 workspace/skills/self-improving-agent/.learnings/FEATURE_REQUESTS.md     |    5 
 workspace/skills/self-improving-agent/assets/LEARNINGS.md                |   45 
 workspace/skills/self-improving-agent/SKILL.md                           |  647 ++++++++
 workspace/skills/self-improving-agent/hooks/openclaw/handler.ts          |   62 
 workspace/.learnings/FEATURE_REQUESTS.md                                 |   19 
 workspace/skills/memory-management/scripts/check_size.py                 |   36 
 33 files changed, 4,365 insertions(+), 0 deletions(-)

diff --git a/workspace/.learnings/ERRORS.md b/workspace/.learnings/ERRORS.md
new file mode 100644
index 0000000..3e71508
--- /dev/null
+++ b/workspace/.learnings/ERRORS.md
@@ -0,0 +1,20 @@
+# ERRORS.md
+
+## 概述
+记录命令失败、异常和错误信息。
+
+## 状态值
+- `pending` - 待处理
+- `in_progress` - 正在处理
+- `resolved` - 已解决
+- `wont_fix` - 不予修复
+
+## 优先级
+- `low` - 轻微问题
+- `medium` - 中等问题,有替代方案
+- `high` - 显著问题
+- `critical` - 阻塞关键功能
+
+---
+
+<!-- 在下方添加错误条目 -->
diff --git a/workspace/.learnings/FEATURE_REQUESTS.md b/workspace/.learnings/FEATURE_REQUESTS.md
new file mode 100644
index 0000000..b0c32ce
--- /dev/null
+++ b/workspace/.learnings/FEATURE_REQUESTS.md
@@ -0,0 +1,19 @@
+# FEATURE_REQUESTS.md
+
+## 概述
+记录用户请求但当前不具备的功能。
+
+## 状态值
+- `pending` - 待处理
+- `in_progress` - 正在开发
+- `resolved` - 已实现
+- `wont_implement` - 不予实现
+
+## 复杂度估算
+- `simple` - 简单
+- `medium` - 中等
+- `complex` - 复杂
+
+---
+
+<!-- 在下方添加功能请求条目 -->
diff --git a/workspace/.learnings/LEARNINGS.md b/workspace/.learnings/LEARNINGS.md
new file mode 100644
index 0000000..6db62ac
--- /dev/null
+++ b/workspace/.learnings/LEARNINGS.md
@@ -0,0 +1,5 @@
+# Learnings Log
+
+记录学习、改进和最佳实践。
+
+---
diff --git a/workspace/skills/memory-management/SKILL.md b/workspace/skills/memory-management/SKILL.md
new file mode 100644
index 0000000..9146b8a
--- /dev/null
+++ b/workspace/skills/memory-management/SKILL.md
@@ -0,0 +1,231 @@
+---
+name: memory-management
+description: "三层记忆架构管理系统 (L0索引→L1概览→L2详情)。用于记录、维护和优化AI助手的记忆体系。当需要记录重要信息、决策或事件时使用;当需要维护记忆体系(归档、整理、合并)时使用;当L0层接近4KB限制时需要整理。"
+---
+
+# 三层记忆管理
+
+> **架构**: L0索引 → L1概览 → L2详情
+> **红线**: L0 < 4KB | 只存索引 | 详情通过路径引用
+
+---
+
+## 目录结构(固定)
+
+```
+~/.openclaw/workspace/
+├── MEMORY.md                    # L0: 索引层(红线:4KB)
+├── memory/
+│   ├── milestones/              # L1: 概览层
+│   │   └── YYYY-MM-topic.md
+│   └── journal/                 # L2: 详情层
+│       └── YYYY-MM-DD.md
+├── AGENTS.md                    # 启动序列(参考)
+├── SOUL.md                      # 人格(参考)
+└── USER.md                      # 用户偏好(参考)
+```
+
+---
+
+## 写入规则
+
+### L0 (MEMORY.md) - 索引层
+
+**触发条件:** 任何需要"记住"的事
+
+**写入内容:**
+- 索引目录(指向L1/L2的链接)
+- 最近活动摘要(3-5条)
+- 关键决策列表
+
+**模板:**
+```markdown
+## 🔍 快速检索
+
+### 最近活动
+- YYYY-MM-DD: [一句话摘要] → 详见 [L2](./memory/journal/YYYY-MM-DD.md)
+
+### 关键决策
+- [决策标题]:简要说明
+```
+
+**红线检查:**
+- 文件 > 4KB → 触发归档提醒
+- 超过10条未归档 → 提示整理到L1
+
+### L1 (milestones/) - 概览层
+
+**触发条件:** 
+- L2积累到一定量,或跨会话仍重要
+- 每周维护时自动合并
+
+**组织方式:**
+- 按主题:`YYYY-MM-skills.md`, `YYYY-MM-decisions.md`
+- 每条包含:决策/事件、时间、关联的L2链接
+
+**模板:**
+```markdown
+# YYYY-MM 主题里程碑
+
+## [日期] 事件标题
+**背景**:简述
+**决策/结论**:关键点
+**来源**:[L2链接](./journal/YYYY-MM-DD.md#锚点)
+```
+
+### L2 (journal/) - 详情层
+
+**触发条件:** 详细记录、完整对话、原始上下文
+
+**写入内容:**
+- 完整背景
+- 详细过程
+- 决策/结论
+- 关联引用
+
+**模板:**
+```markdown
+# YYYY-MM-DD
+
+## [HH:MM] 事件标题
+
+### 背景
+发生了什么
+
+### 详情
+完整记录
+
+### 决策/结论
+关键产出
+
+### 关联
+- L1里程碑:[链接]
+```
+
+---
+
+## 维护规则
+
+### 每日维护(晚上10点后触发,心跳触发)
+
+**触发条件:** 晚上10点后,如果今日还没有写入L2
+
+**任务清单:**
+- [ ] 检查今日是否有重要决策需要记录到L2
+- [ ] **检查飞书渠道历史** — 如用户询问"检查昨天的每日总结",需读取所有session并提取飞书渠道的完整聊天记录补充到L2
+- [ ] 更新 MEMORY.md 的"最近活动"摘要
+- [ ] 确保 L0 层不超过 4KB
+
+**重要提醒:**
+> ⚠️ **飞书历史检查**:当用户说"检查昨天的每日总结"或类似表述时,必须:
+> 1. 使用 `sessions_list` 查找过去48小时的活跃session
+> 2. 检查 `.openclaw/agents/main/sessions/` 目录下是否有 `.jsonl.reset.*` 归档文件
+> 3. 读取这些文件提取飞书渠道的完整聊天记录
+> 4. 将遗漏的内容补充到当日L2记录中
+
+**脚本调用:**
+```bash
+python ~/.openclaw/workspace/skills/memory-management/scripts/daily_check.py
+```
+
+### 每周维护(周一早上9:30,Cron定时任务)
+
+**配置方法:**
+```bash
+# 添加cron任务,每周一9:30执行
+openclaw cron add \
+  --name "memory-weekly-maintenance" \
+  --cron "30 9 * * 1" \
+  --message "执行三层记忆每周维护:1.运行memory-merger整理L2→L1 2.检查L0大小 3.生成周报发送给用户" \
+  --channel feishu \
+  --to "USER_ID" \
+  --tz "Asia/Shanghai"
+```
+
+**任务清单:**
+- [ ] 运行 memory-merger 整理本周 L2 → L1
+- [ ] 检查 L0 大小,必要时归档
+- [ ] 生成周报内容
+- [ ] 发送周报到飞书
+
+**脚本调用:**
+```bash
+python ~/.openclaw/workspace/skills/memory-management/scripts/weekly_maintenance.py
+```
+
+---
+
+## 决策流程
+
+```
+发生事件
+    │
+    ▼
+需要记住?
+    ├── 否 → 忽略
+    │
+    └── 是
+         │
+         ▼
+    详细程度?
+         ├── 高 → 写L2 (journal/YYYY-MM-DD.md)
+         │         └── 更新L0引用
+         │
+         └── 低 → 写L0 (MEMORY.md)
+                   └── 定期检查大小
+```
+
+**L2 → L1 升级流程:**
+```
+L2积累
+    │
+    ▼
+重要/持久?
+    ├── 是 → 提炼 → 写L1 (milestones/)
+    │         └── 更新L0索引
+    │
+    └── 否 → 保持L2
+```
+
+---
+
+## 使用脚本
+
+### 快速写入L0
+```bash
+python scripts/write_l0.py "摘要内容" --link "memory/journal/2026-03-13.md"
+```
+
+### 创建/追加L2
+```bash
+python scripts/write_l2.py --date 2026-03-13 --title "事件标题" --file content.txt
+```
+
+### 检查L0大小
+```bash
+python scripts/check_size.py  # 输出:L0当前3.2KB/4KB ✅
+```
+
+### 每周维护
+```bash
+python scripts/weekly_maintenance.py --send-report
+```
+
+---
+
+## 红线规则
+
+1. **L0 < 4KB**:超过则必须归档到L1
+2. **L0只存索引**:详情必须通过路径引用
+3. **L2日期命名**:必须按YYYY-MM-DD格式
+4. **每周必须维护**:不能无限累积L2
+5. **飞书历史必须检查**:当检查每日总结时,必须读取所有session提取飞书聊天记录
+
+---
+
+## 与memory-merger的关系
+
+- **memory-merger**:专注L2→L1的合并逻辑
+- **memory-management**:完整的记忆体系管理(写入+维护+检查)
+
+当需要执行L2→L1合并时,本技能会调用memory-merger。
diff --git a/workspace/skills/memory-management/scripts/check_size.py b/workspace/skills/memory-management/scripts/check_size.py
new file mode 100755
index 0000000..f5b40c0
--- /dev/null
+++ b/workspace/skills/memory-management/scripts/check_size.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python3
+"""
+检查MEMORY.md文件大小
+"""
+
+import sys
+from pathlib import Path
+
+
+def main():
+    workspace = Path.home() / ".openclaw" / "workspace"
+    memory_file = workspace / "MEMORY.md"
+    
+    if not memory_file.exists():
+        print("❌ MEMORY.md 不存在")
+        return 1
+    
+    size = memory_file.stat().st_size
+    kb = size / 1024
+    
+    print(f"📊 MEMORY.md (L0层) 大小检查")
+    print(f"   当前: {kb:.1f}KB / 4KB")
+    
+    if size > 4096:
+        print("   🚨 状态: 超过红线!需要立即归档")
+        return 2
+    elif size > 3500:
+        print("   ⚠️  状态: 接近限制,建议准备归档")
+        return 1
+    else:
+        print("   ✅ 状态: 正常")
+        return 0
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/workspace/skills/memory-management/scripts/daily_check.py b/workspace/skills/memory-management/scripts/daily_check.py
new file mode 100755
index 0000000..52fc94a
--- /dev/null
+++ b/workspace/skills/memory-management/scripts/daily_check.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python3
+"""
+每日记忆检查脚本
+在晚上10点后触发,检查今日是否已写入L2
+"""
+
+import os
+import sys
+from datetime import datetime
+from pathlib import Path
+
+
+def get_workspace_path() -> Path:
+    """获取workspace路径。"""
+    return Path.home() / ".openclaw" / "workspace"
+
+
+def check_today_journal() -> bool:
+    """检查今日是否已有L2记录。"""
+    workspace = get_workspace_path()
+    today = datetime.now().strftime("%Y-%m-%d")
+    journal_file = workspace / "memory" / "journal" / f"{today}.md"
+    return journal_file.exists()
+
+
+def get_l0_size() -> int:
+    """获取MEMORY.md文件大小(字节)。"""
+    workspace = get_workspace_path()
+    memory_file = workspace / "MEMORY.md"
+    if memory_file.exists():
+        return memory_file.stat().st_size
+    return 0
+
+
+def format_size(size_bytes: int) -> str:
+    """格式化文件大小显示。"""
+    kb = size_bytes / 1024
+    return f"{kb:.1f}KB"
+
+
+def main():
+    """主函数。"""
+    today_str = datetime.now().strftime("%Y-%m-%d")
+    print(f"📅 日期检查: {today_str}")
+    print("=" * 50)
+    
+    # 检查今日L2
+    has_today_journal = check_today_journal()
+    print(f"\n📝 L2记录检查:")
+    if has_today_journal:
+        print("  ✅ 今日已有journal记录")
+    else:
+        print("  ⚠️  今日尚未创建journal记录")
+        print("  💡 建议:如有重要决策或事件,写入L2详情层")
+    
+    # 检查L0大小
+    l0_size = get_l0_size()
+    print(f"\n📊 L0 (MEMORY.md) 大小检查:")
+    print(f"  当前: {format_size(l0_size)} / 4KB")
+    
+    if l0_size > 4096:
+        print("  🚨 警告:超过4KB红线!需要立即归档到L1")
+    elif l0_size > 3500:
+        print("  ⚠️  提醒:接近4KB限制,建议准备归档")
+    else:
+        print("  ✅ 大小正常")
+    
+    print("\n" + "=" * 50)
+    print("📋 每日维护清单:")
+    if not has_today_journal:
+        print("  [ ] 如有重要事件,写入今日L2")
+    else:
+        print("  [x] L2记录已存在")
+    print("  [ ] 检查MEMORY.md最近活动摘要")
+    if l0_size > 3500:
+        print("  [ ] L0接近限制,考虑归档到L1")
+    print("  [ ] 确认L0层引用链接有效")
+    
+    return 0 if has_today_journal else 1
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/workspace/skills/memory-management/scripts/weekly_maintenance.py b/workspace/skills/memory-management/scripts/weekly_maintenance.py
new file mode 100755
index 0000000..e4cee14
--- /dev/null
+++ b/workspace/skills/memory-management/scripts/weekly_maintenance.py
@@ -0,0 +1,207 @@
+#!/usr/bin/env python3
+"""
+每周维护脚本
+周一早上9:30执行,负责:
+1. 运行memory-merger整理L2→L1
+2. 检查L0大小
+3. 生成周报
+4. 发送报告(可选)
+"""
+
+import os
+import sys
+import subprocess
+from datetime import datetime, timedelta
+from pathlib import Path
+
+
+def get_workspace_path() -> Path:
+    """获取workspace路径。"""
+    return Path.home() / ".openclaw" / "workspace"
+
+
+def run_memory_merger() -> tuple:
+    """运行memory-merger技能。"""
+    workspace = get_workspace_path()
+    merger_path = workspace / ".agents" / "skills" / "memory-merger"
+    
+    if not merger_path.exists():
+        return False, "memory-merger技能未安装"
+    
+    # 运行memory-merger
+    try:
+        result = subprocess.run(
+            ["python", str(merger_path / "scripts" / "merge.py"), "memory-management"],
+            capture_output=True,
+            text=True,
+            timeout=60
+        )
+        if result.returncode == 0:
+            return True, result.stdout
+        else:
+            return False, result.stderr
+    except Exception as e:
+        return False, str(e)
+
+
+def check_l0_size() -> dict:
+    """检查L0状态。"""
+    workspace = get_workspace_path()
+    memory_file = workspace / "MEMORY.md"
+    
+    if not memory_file.exists():
+        return {"exists": False, "size": 0, "status": "missing"}
+    
+    size = memory_file.stat().st_size
+    kb = size / 1024
+    
+    if size > 4096:
+        status = "over_limit"
+    elif size > 3500:
+        status = "warning"
+    else:
+        status = "ok"
+    
+    return {
+        "exists": True,
+        "size": size,
+        "size_kb": kb,
+        "status": status
+    }
+
+
+def count_journal_files() -> int:
+    """统计本周L2文件数量。"""
+    workspace = get_workspace_path()
+    journal_dir = workspace / "memory" / "journal"
+    
+    if not journal_dir.exists():
+        return 0
+    
+    # 获取本周日期范围
+    today = datetime.now()
+    start_of_week = today - timedelta(days=today.weekday())
+    
+    count = 0
+    for f in journal_dir.glob("*.md"):
+        try:
+            file_date = datetime.strptime(f.stem, "%Y-%m-%d")
+            if start_of_week <= file_date <= today:
+                count += 1
+        except ValueError:
+            continue
+    
+    return count
+
+
+def count_milestone_files() -> int:
+    """统计L1里程碑文件数量。"""
+    workspace = get_workspace_path()
+    milestones_dir = workspace / "memory" / "milestones"
+    
+    if not milestones_dir.exists():
+        return 0
+    
+    return len(list(milestones_dir.glob("*.md")))
+
+
+def generate_report() -> str:
+    """生成周报内容。"""
+    today_str = datetime.now().strftime("%Y-%m-%d")
+    week_start = (datetime.now() - timedelta(days=datetime.now().weekday())).strftime("%Y-%m-%d")
+    
+    report = []
+    report.append("# 📊 记忆管理周报")
+    report.append(f"**周期**: {week_start} ~ {today_str}")
+    report.append(f"**生成时间**: {datetime.now().strftime('%Y-%m-%d %H:%M')}")
+    report.append("")
+    report.append("---")
+    report.append("")
+    
+    # L0状态
+    l0_status = check_l0_size()
+    report.append("## 📋 L0层 (MEMORY.md)")
+    if l0_status["exists"]:
+        report.append(f"- **大小**: {l0_status['size_kb']:.1f}KB / 4KB")
+        if l0_status["status"] == "ok":
+            report.append("- **状态**: ✅ 正常")
+        elif l0_status["status"] == "warning":
+            report.append("- **状态**: ⚠️ 接近限制,建议归档")
+        else:
+            report.append("- **状态**: 🚨 超过红线,需要立即归档")
+    else:
+        report.append("- **状态**: ❌ 文件不存在")
+    report.append("")
+    
+    # L2统计
+    journal_count = count_journal_files()
+    report.append("## 📝 L2层 (Journal)")
+    report.append(f"- **本周新增**: {journal_count} 条记录")
+    report.append("")
+    
+    # L1统计
+    milestone_count = count_milestone_files()
+    report.append("## 🗂️ L1层 (Milestones)")
+    report.append(f"- **里程碑总数**: {milestone_count} 个主题")
+    report.append("")
+    
+    # 维护任务
+    report.append("## 🔧 本周维护任务")
+    
+    # 尝试运行memory-merger
+    success, output = run_memory_merger()
+    if success:
+        report.append("- ✅ L2→L1合并完成")
+        if output.strip():
+            report.append(f"- 📄 合并详情:\n```\n{output}\n```")
+    else:
+        report.append(f"- ❌ L2→L1合并失败: {output}")
+    
+    if l0_status["status"] in ["warning", "over_limit"]:
+        report.append("- ⚠️ L0层需要归档整理")
+    
+    report.append("")
+    report.append("---")
+    report.append("")
+    report.append("*由memory-management技能自动生成*")
+    
+    return "\n".join(report)
+
+
+def main():
+    """主函数。"""
+    import argparse
+    parser = argparse.ArgumentParser(description="三层记忆每周维护")
+    parser.add_argument("--send-report", action="store_true", help="发送报告到飞书")
+    parser.add_argument("--output", type=str, help="报告输出文件路径")
+    args = parser.parse_args()
+    
+    print("🔄 开始执行每周维护...")
+    print("=" * 50)
+    
+    # 生成报告
+    report = generate_report()
+    
+    # 输出到文件
+    if args.output:
+        with open(args.output, 'w', encoding='utf-8') as f:
+            f.write(report)
+        print(f"✅ 报告已保存到: {args.output}")
+    
+    # 打印报告
+    print("\n" + report)
+    
+    # 发送到飞书(如果需要)
+    if args.send_report:
+        print("\n📤 发送到飞书...")
+        # 这里会调用message工具,但在脚本中我们通过stdout返回
+        print(report)
+    
+    print("\n" + "=" * 50)
+    print("✅ 每周维护完成")
+    
+    return 0
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/workspace/skills/memory-management/scripts/write_l0.py b/workspace/skills/memory-management/scripts/write_l0.py
new file mode 100755
index 0000000..7d29caf
--- /dev/null
+++ b/workspace/skills/memory-management/scripts/write_l0.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python3
+"""
+快速写入L0层 (MEMORY.md)
+"""
+
+import sys
+import argparse
+from datetime import datetime
+from pathlib import Path
+
+
+def main():
+    parser = argparse.ArgumentParser(description="写入L0层记忆")
+    parser.add_argument("content", help="记录内容")
+    parser.add_argument("--link", help="关联的L2文件路径")
+    parser.add_argument("--type", default="活动", help="记录类型")
+    args = parser.parse_args()
+    
+    workspace = Path.home() / ".openclaw" / "workspace"
+    memory_file = workspace / "MEMORY.md"
+    
+    # 确保文件存在
+    if not memory_file.exists():
+        print(f"❌ {memory_file} 不存在")
+        return 1
+    
+    today = datetime.now().strftime("%Y-%m-%d")
+    
+    # 构建记录行
+    line = f"- **[{args.type}]** {args.content}"
+    if args.link:
+        line += f" → 详见 [{args.link}](./{args.link})"
+    
+    # 读取现有内容
+    with open(memory_file, 'r', encoding='utf-8') as f:
+        content = f.read()
+    
+    # 在"最近活动"部分添加
+    if "### 最近活动" in content:
+        parts = content.split("### 最近活动")
+        if len(parts) == 2:
+            # 在第一行后插入
+            lines = parts[1].split('\n')
+            insert_idx = 0
+            for i, l in enumerate(lines):
+                if l.strip() and not l.startswith('#'):
+                    insert_idx = i
+                    break
+            lines.insert(insert_idx, f"- {today}: {args.content}")
+            new_content = parts[0] + "### 最近活动" + '\n'.join(lines)
+            
+            with open(memory_file, 'w', encoding='utf-8') as f:
+                f.write(new_content)
+            
+            print(f"✅ 已写入L0: {args.content}")
+            return 0
+    
+    print("⚠️  未找到'最近活动'区块,请手动添加")
+    return 1
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/workspace/skills/memory-management/scripts/write_l2.py b/workspace/skills/memory-management/scripts/write_l2.py
new file mode 100755
index 0000000..b8934b7
--- /dev/null
+++ b/workspace/skills/memory-management/scripts/write_l2.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python3
+"""
+创建/追加L2层 (journal/)
+"""
+
+import sys
+import argparse
+from datetime import datetime
+from pathlib import Path
+
+
+def main():
+    parser = argparse.ArgumentParser(description="写入L2层记忆")
+    parser.add_argument("--date", default=datetime.now().strftime("%Y-%m-%d"), help="日期 (YYYY-MM-DD)")
+    parser.add_argument("--title", required=True, help="事件标题")
+    parser.add_argument("--content", help="内容(或从stdin读取)")
+    parser.add_argument("--file", help="从文件读取内容")
+    args = parser.parse_args()
+    
+    workspace = Path.home() / ".openclaw" / "workspace"
+    journal_dir = workspace / "memory" / "journal"
+    journal_dir.mkdir(parents=True, exist_ok=True)
+    
+    journal_file = journal_dir / f"{args.date}.md"
+    
+    # 获取内容
+    content = ""
+    if args.file:
+        with open(args.file, 'r', encoding='utf-8') as f:
+            content = f.read()
+    elif args.content:
+        content = args.content
+    elif not sys.stdin.isatty():
+        content = sys.stdin.read()
+    
+    now = datetime.now().strftime("%H:%M")
+    
+    # 构建记录
+    entry = f"\n## [{now}] {args.title}\n\n{content}\n"
+    
+    # 写入文件
+    if journal_file.exists():
+        with open(journal_file, 'a', encoding='utf-8') as f:
+            f.write(entry)
+        print(f"✅ 已追加到 {journal_file}")
+    else:
+        header = f"# {args.date}\n"
+        with open(journal_file, 'w', encoding='utf-8') as f:
+            f.write(header + entry)
+        print(f"✅ 已创建 {journal_file}")
+    
+    return 0
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/workspace/skills/memory-merger/SKILL.md b/workspace/skills/memory-merger/SKILL.md
new file mode 100644
index 0000000..d6ff27b
--- /dev/null
+++ b/workspace/skills/memory-merger/SKILL.md
@@ -0,0 +1,108 @@
+---
+name: memory-merger
+description: 'Merges mature lessons from a domain memory file into its instruction file. Syntax: `/memory-merger >domain [scope]` where scope is `global` (default), `user`, `workspace`, or `ws`.'
+---
+
+# Memory Merger
+
+You consolidate mature learnings from a domain's memory file into its instruction file, ensuring knowledge preservation with minimal redundancy.
+
+**Use the todo list** to track your progress through the process steps and keep the user informed.
+
+## Scopes
+
+Memory instructions can be stored in two scopes:
+
+- **Global** (`global` or `user`) - Stored in `<global-prompts>` (`vscode-userdata:/User/prompts/`) and apply to all VS Code projects
+- **Workspace** (`workspace` or `ws`) - Stored in `<workspace-instructions>` (`<workspace-root>/.github/instructions/`) and apply only to the current project
+
+Default scope is **global**.
+
+Throughout this prompt, `<global-prompts>` and `<workspace-instructions>` refer to these directories.
+
+## Syntax
+
+```
+/memory-merger >domain-name [scope]
+```
+
+- `>domain-name` - Required. The domain to merge (e.g., `>clojure`, `>git-workflow`, `>prompt-engineering`)
+- `[scope]` - Optional. One of: `global`, `user` (both mean global), `workspace`, or `ws`. Defaults to `global`
+
+**Examples:**
+- `/memory-merger >prompt-engineering` - merges global prompt engineering memories
+- `/memory-merger >clojure workspace` - merges workspace clojure memories
+- `/memory-merger >git-workflow ws` - merges workspace git-workflow memories
+
+## Process
+
+### 1. Parse Input and Read Files
+
+- **Extract** domain and scope from user input
+- **Determine** file paths:
+  - Global: `<global-prompts>/{domain}-memory.instructions.md` → `<global-prompts>/{domain}.instructions.md`
+  - Workspace: `<workspace-instructions>/{domain}-memory.instructions.md` → `<workspace-instructions>/{domain}.instructions.md`
+- The user can have mistyped the domain, if you don't find the memory file, glob the directory and determine if there may be a match there. Ask the user for input if in doubt.
+- **Read** both files (memory file must exist; instruction file may not)
+
+### 2. Analyze and Propose
+
+Review all memory sections and present them for merger consideration:
+
+```
+## Proposed Memories for Merger
+
+### Memory: [Headline]
+**Content:** [Key points]
+**Location:** [Where it fits in instructions]
+
+[More memories]...
+```
+
+Say: "Please review these memories. Approve all with 'go' or specify which to skip."
+
+**STOP and wait for user input.**
+
+### 3. Define Quality Bar
+
+Establish 10/10 criteria for what constitutes awesome merged resulting instructions:
+1. **Zero knowledge loss** - Every detail, example, and nuance preserved
+2. **Minimal redundancy** - Overlapping guidance consolidated
+3. **Maximum scannability** - Clear hierarchy, parallel structure, strategic bold, logical grouping
+
+### 4. Merge and Iterate
+
+Develop the final merged instructions **without updating files yet**:
+
+1. Draft the merged instructions incorporating approved memories
+2. Evaluate against quality bar
+3. Refine structure, wording, organization
+4. Repeat until the merged instructions meet 10/10 criteria
+
+### 5. Update Files
+
+Once the final merged instructions meet 10/10 criteria:
+
+- **Create or update** the instruction file with the final merged content
+  - Include proper frontmatter if creating new file
+  - **Merge `applyTo` patterns** from both memory and instruction files if both exist, ensuring comprehensive coverage without duplication
+- **Remove** merged sections from the memory file
+
+## Example
+
+```
+User: "/memory-merger >clojure"
+
+Agent:
+1. Reads clojure-memory.instructions.md and clojure.instructions.md
+2. Proposes 3 memories for merger
+3. [STOPS]
+
+User: "go"
+
+Agent:
+4. Defines quality bar for 10/10
+5. Merges new instructions candidate, iterates to 10/10
+6. Updates clojure.instructions.md
+7. Cleans clojure-memory.instructions.md
+```
diff --git a/workspace/skills/ontology/.clawhub/origin.json b/workspace/skills/ontology/.clawhub/origin.json
new file mode 100644
index 0000000..f7cad74
--- /dev/null
+++ b/workspace/skills/ontology/.clawhub/origin.json
@@ -0,0 +1,7 @@
+{
+  "version": 1,
+  "registry": "https://clawhub.ai",
+  "slug": "ontology",
+  "installedVersion": "1.0.4",
+  "installedAt": 1773305411897
+}
diff --git a/workspace/skills/ontology/SKILL.md b/workspace/skills/ontology/SKILL.md
new file mode 100644
index 0000000..d136c31
--- /dev/null
+++ b/workspace/skills/ontology/SKILL.md
@@ -0,0 +1,232 @@
+---
+name: ontology
+description: Typed knowledge graph for structured agent memory and composable skills. Use when creating/querying entities (Person, Project, Task, Event, Document), linking related objects, enforcing constraints, planning multi-step actions as graph transformations, or when skills need to share state. Trigger on "remember", "what do I know about", "link X to Y", "show dependencies", entity CRUD, or cross-skill data access.
+---
+
+# Ontology
+
+A typed vocabulary + constraint system for representing knowledge as a verifiable graph.
+
+## Core Concept
+
+Everything is an **entity** with a **type**, **properties**, and **relations** to other entities. Every mutation is validated against type constraints before committing.
+
+```
+Entity: { id, type, properties, relations, created, updated }
+Relation: { from_id, relation_type, to_id, properties }
+```
+
+## When to Use
+
+| Trigger | Action |
+|---------|--------|
+| "Remember that..." | Create/update entity |
+| "What do I know about X?" | Query graph |
+| "Link X to Y" | Create relation |
+| "Show all tasks for project Z" | Graph traversal |
+| "What depends on X?" | Dependency query |
+| Planning multi-step work | Model as graph transformations |
+| Skill needs shared state | Read/write ontology objects |
+
+## Core Types
+
+```yaml
+# Agents & People
+Person: { name, email?, phone?, notes? }
+Organization: { name, type?, members[] }
+
+# Work
+Project: { name, status, goals[], owner? }
+Task: { title, status, due?, priority?, assignee?, blockers[] }
+Goal: { description, target_date?, metrics[] }
+
+# Time & Place
+Event: { title, start, end?, location?, attendees[], recurrence? }
+Location: { name, address?, coordinates? }
+
+# Information
+Document: { title, path?, url?, summary? }
+Message: { content, sender, recipients[], thread? }
+Thread: { subject, participants[], messages[] }
+Note: { content, tags[], refs[] }
+
+# Resources
+Account: { service, username, credential_ref? }
+Device: { name, type, identifiers[] }
+Credential: { service, secret_ref }  # Never store secrets directly
+
+# Meta
+Action: { type, target, timestamp, outcome? }
+Policy: { scope, rule, enforcement }
+```
+
+## Storage
+
+Default: `memory/ontology/graph.jsonl`
+
+```jsonl
+{"op":"create","entity":{"id":"p_001","type":"Person","properties":{"name":"Alice"}}}
+{"op":"create","entity":{"id":"proj_001","type":"Project","properties":{"name":"Website Redesign","status":"active"}}}
+{"op":"relate","from":"proj_001","rel":"has_owner","to":"p_001"}
+```
+
+Query via scripts or direct file ops. For complex graphs, migrate to SQLite.
+
+### Append-Only Rule
+
+When working with existing ontology data or schema, **append/merge** changes instead of overwriting files. This preserves history and avoids clobbering prior definitions.
+
+## Workflows
+
+### Create Entity
+
+```bash
+python3 scripts/ontology.py create --type Person --props '{"name":"Alice","email":"alice@example.com"}'
+```
+
+### Query
+
+```bash
+python3 scripts/ontology.py query --type Task --where '{"status":"open"}'
+python3 scripts/ontology.py get --id task_001
+python3 scripts/ontology.py related --id proj_001 --rel has_task
+```
+
+### Link Entities
+
+```bash
+python3 scripts/ontology.py relate --from proj_001 --rel has_task --to task_001
+```
+
+### Validate
+
+```bash
+python3 scripts/ontology.py validate  # Check all constraints
+```
+
+## Constraints
+
+Define in `memory/ontology/schema.yaml`:
+
+```yaml
+types:
+  Task:
+    required: [title, status]
+    status_enum: [open, in_progress, blocked, done]
+  
+  Event:
+    required: [title, start]
+    validate: "end >= start if end exists"
+
+  Credential:
+    required: [service, secret_ref]
+    forbidden_properties: [password, secret, token]  # Force indirection
+
+relations:
+  has_owner:
+    from_types: [Project, Task]
+    to_types: [Person]
+    cardinality: many_to_one
+  
+  blocks:
+    from_types: [Task]
+    to_types: [Task]
+    acyclic: true  # No circular dependencies
+```
+
+## Skill Contract
+
+Skills that use ontology should declare:
+
+```yaml
+# In SKILL.md frontmatter or header
+ontology:
+  reads: [Task, Project, Person]
+  writes: [Task, Action]
+  preconditions:
+    - "Task.assignee must exist"
+  postconditions:
+    - "Created Task has status=open"
+```
+
+## Planning as Graph Transformation
+
+Model multi-step plans as a sequence of graph operations:
+
+```
+Plan: "Schedule team meeting and create follow-up tasks"
+
+1. CREATE Event { title: "Team Sync", attendees: [p_001, p_002] }
+2. RELATE Event -> has_project -> proj_001
+3. CREATE Task { title: "Prepare agenda", assignee: p_001 }
+4. RELATE Task -> for_event -> event_001
+5. CREATE Task { title: "Send summary", assignee: p_001, blockers: [task_001] }
+```
+
+Each step is validated before execution. Rollback on constraint violation.
+
+## Integration Patterns
+
+### With Causal Inference
+
+Log ontology mutations as causal actions:
+
+```python
+# When creating/updating entities, also log to causal action log
+action = {
+    "action": "create_entity",
+    "domain": "ontology", 
+    "context": {"type": "Task", "project": "proj_001"},
+    "outcome": "created"
+}
+```
+
+### Cross-Skill Communication
+
+```python
+# Email skill creates commitment
+commitment = ontology.create("Commitment", {
+    "source_message": msg_id,
+    "description": "Send report by Friday",
+    "due": "2026-01-31"
+})
+
+# Task skill picks it up
+tasks = ontology.query("Commitment", {"status": "pending"})
+for c in tasks:
+    ontology.create("Task", {
+        "title": c.description,
+        "due": c.due,
+        "source": c.id
+    })
+```
+
+## Quick Start
+
+```bash
+# Initialize ontology storage
+mkdir -p memory/ontology
+touch memory/ontology/graph.jsonl
+
+# Create schema (optional but recommended)
+python3 scripts/ontology.py schema-append --data '{
+  "types": {
+    "Task": { "required": ["title", "status"] },
+    "Project": { "required": ["name"] },
+    "Person": { "required": ["name"] }
+  }
+}'
+
+# Start using
+python3 scripts/ontology.py create --type Person --props '{"name":"Alice"}'
+python3 scripts/ontology.py list --type Person
+```
+
+## References
+
+- `references/schema.md` — Full type definitions and constraint patterns
+- `references/queries.md` — Query language and traversal examples
+
+## Instruction Scope
+
+Runtime instructions operate on local files (`memory/ontology/graph.jsonl` and `memory/ontology/schema.yaml`) and provide CLI usage for create/query/relate/validate; this is within scope. The skill reads/writes workspace files and will create the `memory/ontology` directory when used. Validation includes property/enum/forbidden checks, relation type/cardinality validation, acyclicity for relations marked `acyclic: true`, and Event `end >= start` checks; other higher-level constraints may still be documentation-only unless implemented in code.
diff --git a/workspace/skills/ontology/_meta.json b/workspace/skills/ontology/_meta.json
new file mode 100644
index 0000000..fdafdf8
--- /dev/null
+++ b/workspace/skills/ontology/_meta.json
@@ -0,0 +1,6 @@
+{
+  "ownerId": "kn72dv4fm7ss7swbq47nnpad9x7zy2jh",
+  "slug": "ontology",
+  "version": "1.0.4",
+  "publishedAt": 1773249559725
+}
\ No newline at end of file
diff --git a/workspace/skills/ontology/references/queries.md b/workspace/skills/ontology/references/queries.md
new file mode 100644
index 0000000..fefb678
--- /dev/null
+++ b/workspace/skills/ontology/references/queries.md
@@ -0,0 +1,211 @@
+# Query Reference
+
+Query patterns and graph traversal examples.
+
+## Basic Queries
+
+### Get by ID
+
+```bash
+python3 scripts/ontology.py get --id task_001
+```
+
+### List by Type
+
+```bash
+# All tasks
+python3 scripts/ontology.py list --type Task
+
+# All people
+python3 scripts/ontology.py list --type Person
+```
+
+### Filter by Properties
+
+```bash
+# Open tasks
+python3 scripts/ontology.py query --type Task --where '{"status":"open"}'
+
+# High priority tasks
+python3 scripts/ontology.py query --type Task --where '{"priority":"high"}'
+
+# Tasks assigned to specific person (by property)
+python3 scripts/ontology.py query --type Task --where '{"assignee":"p_001"}'
+```
+
+## Relation Queries
+
+### Get Related Entities
+
+```bash
+# Tasks belonging to a project (outgoing)
+python3 scripts/ontology.py related --id proj_001 --rel has_task
+
+# What projects does this task belong to (incoming)
+python3 scripts/ontology.py related --id task_001 --rel part_of --dir incoming
+
+# All relations for an entity (both directions)
+python3 scripts/ontology.py related --id p_001 --dir both
+```
+
+### Common Patterns
+
+```bash
+# Who owns this project?
+python3 scripts/ontology.py related --id proj_001 --rel has_owner
+
+# What events is this person attending?
+python3 scripts/ontology.py related --id p_001 --rel attendee_of --dir outgoing
+
+# What's blocking this task?
+python3 scripts/ontology.py related --id task_001 --rel blocked_by --dir incoming
+```
+
+## Programmatic Queries
+
+### Python API
+
+```python
+from scripts.ontology import load_graph, query_entities, get_related
+
+# Load the graph
+entities, relations = load_graph("memory/ontology/graph.jsonl")
+
+# Query entities
+open_tasks = query_entities("Task", {"status": "open"}, "memory/ontology/graph.jsonl")
+
+# Get related
+project_tasks = get_related("proj_001", "has_task", "memory/ontology/graph.jsonl")
+```
+
+### Complex Queries
+
+```python
+# Find all tasks blocked by incomplete dependencies
+def find_blocked_tasks(graph_path):
+    entities, relations = load_graph(graph_path)
+    blocked = []
+    
+    for entity in entities.values():
+        if entity["type"] != "Task":
+            continue
+        if entity["properties"].get("status") == "blocked":
+            # Find what's blocking it
+            blockers = get_related(entity["id"], "blocked_by", graph_path, "incoming")
+            incomplete_blockers = [
+                b for b in blockers 
+                if b["entity"]["properties"].get("status") != "done"
+            ]
+            if incomplete_blockers:
+                blocked.append({
+                    "task": entity,
+                    "blockers": incomplete_blockers
+                })
+    
+    return blocked
+```
+
+### Path Queries
+
+```python
+# Find path between two entities
+def find_path(from_id, to_id, graph_path, max_depth=5):
+    entities, relations = load_graph(graph_path)
+    
+    visited = set()
+    queue = [(from_id, [])]
+    
+    while queue:
+        current, path = queue.pop(0)
+        
+        if current == to_id:
+            return path
+        
+        if current in visited or len(path) >= max_depth:
+            continue
+        
+        visited.add(current)
+        
+        for rel in relations:
+            if rel["from"] == current and rel["to"] not in visited:
+                queue.append((rel["to"], path + [rel]))
+            if rel["to"] == current and rel["from"] not in visited:
+                queue.append((rel["from"], path + [{**rel, "direction": "incoming"}]))
+    
+    return None  # No path found
+```
+
+## Query Patterns by Use Case
+
+### Task Management
+
+```bash
+# All my open tasks
+python3 scripts/ontology.py query --type Task --where '{"status":"open","assignee":"p_me"}'
+
+# Overdue tasks (requires custom script for date comparison)
+# See references/schema.md for date handling
+
+# Tasks with no blockers
+python3 scripts/ontology.py query --type Task --where '{"status":"open"}'
+# Then filter in code for those with no incoming "blocks" relations
+```
+
+### Project Overview
+
+```bash
+# All tasks in project
+python3 scripts/ontology.py related --id proj_001 --rel has_task
+
+# Project team members
+python3 scripts/ontology.py related --id proj_001 --rel has_member
+
+# Project goals
+python3 scripts/ontology.py related --id proj_001 --rel has_goal
+```
+
+### People & Contacts
+
+```bash
+# All people
+python3 scripts/ontology.py list --type Person
+
+# People in an organization
+python3 scripts/ontology.py related --id org_001 --rel has_member
+
+# What's assigned to this person
+python3 scripts/ontology.py related --id p_001 --rel assigned_to --dir incoming
+```
+
+### Events & Calendar
+
+```bash
+# All events
+python3 scripts/ontology.py list --type Event
+
+# Events at a location
+python3 scripts/ontology.py related --id loc_001 --rel located_at --dir incoming
+
+# Event attendees
+python3 scripts/ontology.py related --id event_001 --rel attendee_of --dir incoming
+```
+
+## Aggregations
+
+For complex aggregations, use Python:
+
+```python
+from collections import Counter
+
+def task_status_summary(project_id, graph_path):
+    """Count tasks by status for a project."""
+    tasks = get_related(project_id, "has_task", graph_path)
+    statuses = Counter(t["entity"]["properties"].get("status", "unknown") for t in tasks)
+    return dict(statuses)
+
+def workload_by_person(graph_path):
+    """Count open tasks per person."""
+    open_tasks = query_entities("Task", {"status": "open"}, graph_path)
+    workload = Counter(t["properties"].get("assignee") for t in open_tasks)
+    return dict(workload)
+```
diff --git a/workspace/skills/ontology/references/schema.md b/workspace/skills/ontology/references/schema.md
new file mode 100644
index 0000000..094c894
--- /dev/null
+++ b/workspace/skills/ontology/references/schema.md
@@ -0,0 +1,322 @@
+# Ontology Schema Reference
+
+Full type definitions and constraint patterns for the ontology graph.
+
+## Core Types
+
+### Agents & People
+
+```yaml
+Person:
+  required: [name]
+  properties:
+    name: string
+    email: string?
+    phone: string?
+    organization: ref(Organization)?
+    notes: string?
+    tags: string[]?
+
+Organization:
+  required: [name]
+  properties:
+    name: string
+    type: enum(company, team, community, government, other)?
+    website: url?
+    members: ref(Person)[]?
+```
+
+### Work Management
+
+```yaml
+Project:
+  required: [name]
+  properties:
+    name: string
+    description: string?
+    status: enum(planning, active, paused, completed, archived)
+    owner: ref(Person)?
+    team: ref(Person)[]?
+    goals: ref(Goal)[]?
+    start_date: date?
+    end_date: date?
+    tags: string[]?
+
+Task:
+  required: [title, status]
+  properties:
+    title: string
+    description: string?
+    status: enum(open, in_progress, blocked, done, cancelled)
+    priority: enum(low, medium, high, urgent)?
+    assignee: ref(Person)?
+    project: ref(Project)?
+    due: datetime?
+    estimate_hours: number?
+    blockers: ref(Task)[]?
+    tags: string[]?
+
+Goal:
+  required: [description]
+  properties:
+    description: string
+    target_date: date?
+    status: enum(active, achieved, abandoned)?
+    metrics: object[]?
+    key_results: string[]?
+```
+
+### Time & Location
+
+```yaml
+Event:
+  required: [title, start]
+  properties:
+    title: string
+    description: string?
+    start: datetime
+    end: datetime?
+    location: ref(Location)?
+    attendees: ref(Person)[]?
+    recurrence: object?  # iCal RRULE format
+    status: enum(confirmed, tentative, cancelled)?
+    reminders: object[]?
+
+Location:
+  required: [name]
+  properties:
+    name: string
+    address: string?
+    city: string?
+    country: string?
+    coordinates: object?  # {lat, lng}
+    timezone: string?
+```
+
+### Information
+
+```yaml
+Document:
+  required: [title]
+  properties:
+    title: string
+    path: string?  # Local file path
+    url: url?      # Remote URL
+    mime_type: string?
+    summary: string?
+    content_hash: string?
+    tags: string[]?
+
+Message:
+  required: [content, sender]
+  properties:
+    content: string
+    sender: ref(Person)
+    recipients: ref(Person)[]
+    thread: ref(Thread)?
+    timestamp: datetime
+    platform: string?  # email, slack, whatsapp, etc.
+    external_id: string?
+
+Thread:
+  required: [subject]
+  properties:
+    subject: string
+    participants: ref(Person)[]
+    messages: ref(Message)[]
+    status: enum(active, archived)?
+    last_activity: datetime?
+
+Note:
+  required: [content]
+  properties:
+    content: string
+    title: string?
+    tags: string[]?
+    refs: ref(Entity)[]?  # Links to any entity
+    created: datetime
+```
+
+### Resources
+
+```yaml
+Account:
+  required: [service, username]
+  properties:
+    service: string  # github, gmail, aws, etc.
+    username: string
+    url: url?
+    credential_ref: ref(Credential)?
+
+Device:
+  required: [name, type]
+  properties:
+    name: string
+    type: enum(computer, phone, tablet, server, iot, other)
+    os: string?
+    identifiers: object?  # {mac, serial, etc.}
+    owner: ref(Person)?
+
+Credential:
+  required: [service, secret_ref]
+  forbidden_properties: [password, secret, token, key, api_key]
+  properties:
+    service: string
+    secret_ref: string  # Reference to secret store (e.g., "keychain:github-token")
+    expires: datetime?
+    scope: string[]?
+```
+
+### Meta
+
+```yaml
+Action:
+  required: [type, target, timestamp]
+  properties:
+    type: string  # create, update, delete, send, etc.
+    target: ref(Entity)
+    timestamp: datetime
+    actor: ref(Person|Agent)?
+    outcome: enum(success, failure, pending)?
+    details: object?
+
+Policy:
+  required: [scope, rule]
+  properties:
+    scope: string  # What this policy applies to
+    rule: string   # The constraint in natural language or code
+    enforcement: enum(block, warn, log)
+    enabled: boolean
+```
+
+## Relation Types
+
+### Ownership & Assignment
+
+```yaml
+owns:
+  from_types: [Person, Organization]
+  to_types: [Account, Device, Document, Project]
+  cardinality: one_to_many
+
+has_owner:
+  from_types: [Project, Task, Document]
+  to_types: [Person]
+  cardinality: many_to_one
+
+assigned_to:
+  from_types: [Task]
+  to_types: [Person]
+  cardinality: many_to_one
+```
+
+### Hierarchy & Containment
+
+```yaml
+has_task:
+  from_types: [Project]
+  to_types: [Task]
+  cardinality: one_to_many
+
+has_goal:
+  from_types: [Project]
+  to_types: [Goal]
+  cardinality: one_to_many
+
+member_of:
+  from_types: [Person]
+  to_types: [Organization]
+  cardinality: many_to_many
+
+part_of:
+  from_types: [Task, Document, Event]
+  to_types: [Project]
+  cardinality: many_to_one
+```
+
+### Dependencies
+
+```yaml
+blocks:
+  from_types: [Task]
+  to_types: [Task]
+  acyclic: true  # Prevents circular dependencies
+  cardinality: many_to_many
+
+depends_on:
+  from_types: [Task, Project]
+  to_types: [Task, Project, Event]
+  acyclic: true
+  cardinality: many_to_many
+
+requires:
+  from_types: [Action]
+  to_types: [Credential, Policy]
+  cardinality: many_to_many
+```
+
+### References
+
+```yaml
+mentions:
+  from_types: [Document, Message, Note]
+  to_types: [Person, Project, Task, Event]
+  cardinality: many_to_many
+
+references:
+  from_types: [Document, Note]
+  to_types: [Document, Note]
+  cardinality: many_to_many
+
+follows_up:
+  from_types: [Task, Event]
+  to_types: [Event, Message]
+  cardinality: many_to_one
+```
+
+### Events
+
+```yaml
+attendee_of:
+  from_types: [Person]
+  to_types: [Event]
+  cardinality: many_to_many
+  properties:
+    status: enum(accepted, declined, tentative, pending)
+
+located_at:
+  from_types: [Event, Person, Device]
+  to_types: [Location]
+  cardinality: many_to_one
+```
+
+## Global Constraints
+
+```yaml
+constraints:
+  # Credentials must never store secrets directly
+  - type: Credential
+    rule: "forbidden_properties: [password, secret, token]"
+    message: "Credentials must use secret_ref to reference external secret storage"
+
+  # Tasks must have valid status transitions
+  - type: Task
+    rule: "status transitions: open -> in_progress -> (done|blocked) -> done"
+    enforcement: warn
+
+  # Events must have end >= start
+  - type: Event
+    rule: "if end exists: end >= start"
+    message: "Event end time must be after start time"
+
+  # No orphan tasks (should belong to a project or have explicit owner)
+  - type: Task
+    rule: "has_relation(part_of, Project) OR has_property(owner)"
+    enforcement: warn
+    message: "Task should belong to a project or have an explicit owner"
+
+  # Circular dependency prevention
+  - relation: blocks
+    rule: "acyclic"
+    message: "Circular task dependencies are not allowed"
+```
diff --git a/workspace/skills/ontology/scripts/ontology.py b/workspace/skills/ontology/scripts/ontology.py
new file mode 100644
index 0000000..040b435
--- /dev/null
+++ b/workspace/skills/ontology/scripts/ontology.py
@@ -0,0 +1,580 @@
+#!/usr/bin/env python3
+"""
+Ontology graph operations: create, query, relate, validate.
+
+Usage:
+    python ontology.py create --type Person --props '{"name":"Alice"}'
+    python ontology.py get --id p_001
+    python ontology.py query --type Task --where '{"status":"open"}'
+    python ontology.py relate --from proj_001 --rel has_task --to task_001
+    python ontology.py related --id proj_001 --rel has_task
+    python ontology.py list --type Person
+    python ontology.py delete --id p_001
+    python ontology.py validate
+"""
+
+import argparse
+import json
+import uuid
+from datetime import datetime, timezone
+from pathlib import Path
+
+DEFAULT_GRAPH_PATH = "memory/ontology/graph.jsonl"
+DEFAULT_SCHEMA_PATH = "memory/ontology/schema.yaml"
+
+
+def resolve_safe_path(
+    user_path: str,
+    *,
+    root: Path | None = None,
+    must_exist: bool = False,
+    label: str = "path",
+) -> Path:
+    """Resolve user path within root and reject traversal outside it."""
+    if not user_path or not user_path.strip():
+        raise SystemExit(f"Invalid {label}: empty path")
+
+    safe_root = (root or Path.cwd()).resolve()
+    candidate = Path(user_path).expanduser()
+    if not candidate.is_absolute():
+        candidate = safe_root / candidate
+
+    try:
+        resolved = candidate.resolve(strict=False)
+    except OSError as exc:
+        raise SystemExit(f"Invalid {label}: {exc}") from exc
+
+    try:
+        resolved.relative_to(safe_root)
+    except ValueError:
+        raise SystemExit(
+            f"Invalid {label}: must stay within workspace root '{safe_root}'"
+        )
+
+    if must_exist and not resolved.exists():
+        raise SystemExit(f"Invalid {label}: file not found '{resolved}'")
+
+    return resolved
+
+
+def generate_id(type_name: str) -> str:
+    """Generate a unique ID for an entity."""
+    prefix = type_name.lower()[:4]
+    suffix = uuid.uuid4().hex[:8]
+    return f"{prefix}_{suffix}"
+
+
+def load_graph(path: str) -> tuple[dict, list]:
+    """Load entities and relations from graph file."""
+    entities = {}
+    relations = []
+    
+    graph_path = Path(path)
+    if not graph_path.exists():
+        return entities, relations
+    
+    with open(graph_path) as f:
+        for line in f:
+            line = line.strip()
+            if not line:
+                continue
+            record = json.loads(line)
+            op = record.get("op")
+            
+            if op == "create":
+                entity = record["entity"]
+                entities[entity["id"]] = entity
+            elif op == "update":
+                entity_id = record["id"]
+                if entity_id in entities:
+                    entities[entity_id]["properties"].update(record.get("properties", {}))
+                    entities[entity_id]["updated"] = record.get("timestamp")
+            elif op == "delete":
+                entity_id = record["id"]
+                entities.pop(entity_id, None)
+            elif op == "relate":
+                relations.append({
+                    "from": record["from"],
+                    "rel": record["rel"],
+                    "to": record["to"],
+                    "properties": record.get("properties", {})
+                })
+            elif op == "unrelate":
+                relations = [r for r in relations 
+                           if not (r["from"] == record["from"] 
+                                  and r["rel"] == record["rel"] 
+                                  and r["to"] == record["to"])]
+    
+    return entities, relations
+
+
+def append_op(path: str, record: dict):
+    """Append an operation to the graph file."""
+    graph_path = Path(path)
+    graph_path.parent.mkdir(parents=True, exist_ok=True)
+    
+    with open(graph_path, "a") as f:
+        f.write(json.dumps(record) + "\n")
+
+
+def create_entity(type_name: str, properties: dict, graph_path: str, entity_id: str = None) -> dict:
+    """Create a new entity."""
+    entity_id = entity_id or generate_id(type_name)
+    timestamp = datetime.now(timezone.utc).isoformat()
+    
+    entity = {
+        "id": entity_id,
+        "type": type_name,
+        "properties": properties,
+        "created": timestamp,
+        "updated": timestamp
+    }
+    
+    record = {"op": "create", "entity": entity, "timestamp": timestamp}
+    append_op(graph_path, record)
+    
+    return entity
+
+
+def get_entity(entity_id: str, graph_path: str) -> dict | None:
+    """Get entity by ID."""
+    entities, _ = load_graph(graph_path)
+    return entities.get(entity_id)
+
+
+def query_entities(type_name: str, where: dict, graph_path: str) -> list:
+    """Query entities by type and properties."""
+    entities, _ = load_graph(graph_path)
+    results = []
+    
+    for entity in entities.values():
+        if type_name and entity["type"] != type_name:
+            continue
+        
+        match = True
+        for key, value in where.items():
+            if entity["properties"].get(key) != value:
+                match = False
+                break
+        
+        if match:
+            results.append(entity)
+    
+    return results
+
+
+def list_entities(type_name: str, graph_path: str) -> list:
+    """List all entities of a type."""
+    entities, _ = load_graph(graph_path)
+    if type_name:
+        return [e for e in entities.values() if e["type"] == type_name]
+    return list(entities.values())
+
+
+def update_entity(entity_id: str, properties: dict, graph_path: str) -> dict | None:
+    """Update entity properties."""
+    entities, _ = load_graph(graph_path)
+    if entity_id not in entities:
+        return None
+    
+    timestamp = datetime.now(timezone.utc).isoformat()
+    record = {"op": "update", "id": entity_id, "properties": properties, "timestamp": timestamp}
+    append_op(graph_path, record)
+    
+    entities[entity_id]["properties"].update(properties)
+    entities[entity_id]["updated"] = timestamp
+    return entities[entity_id]
+
+
+def delete_entity(entity_id: str, graph_path: str) -> bool:
+    """Delete an entity."""
+    entities, _ = load_graph(graph_path)
+    if entity_id not in entities:
+        return False
+    
+    timestamp = datetime.now(timezone.utc).isoformat()
+    record = {"op": "delete", "id": entity_id, "timestamp": timestamp}
+    append_op(graph_path, record)
+    return True
+
+
+def create_relation(from_id: str, rel_type: str, to_id: str, properties: dict, graph_path: str):
+    """Create a relation between entities."""
+    timestamp = datetime.now(timezone.utc).isoformat()
+    record = {
+        "op": "relate",
+        "from": from_id,
+        "rel": rel_type,
+        "to": to_id,
+        "properties": properties,
+        "timestamp": timestamp
+    }
+    append_op(graph_path, record)
+    return record
+
+
+def get_related(entity_id: str, rel_type: str, graph_path: str, direction: str = "outgoing") -> list:
+    """Get related entities."""
+    entities, relations = load_graph(graph_path)
+    results = []
+    
+    for rel in relations:
+        if direction == "outgoing" and rel["from"] == entity_id:
+            if not rel_type or rel["rel"] == rel_type:
+                if rel["to"] in entities:
+                    results.append({
+                        "relation": rel["rel"],
+                        "entity": entities[rel["to"]]
+                    })
+        elif direction == "incoming" and rel["to"] == entity_id:
+            if not rel_type or rel["rel"] == rel_type:
+                if rel["from"] in entities:
+                    results.append({
+                        "relation": rel["rel"],
+                        "entity": entities[rel["from"]]
+                    })
+        elif direction == "both":
+            if rel["from"] == entity_id or rel["to"] == entity_id:
+                if not rel_type or rel["rel"] == rel_type:
+                    other_id = rel["to"] if rel["from"] == entity_id else rel["from"]
+                    if other_id in entities:
+                        results.append({
+                            "relation": rel["rel"],
+                            "direction": "outgoing" if rel["from"] == entity_id else "incoming",
+                            "entity": entities[other_id]
+                        })
+    
+    return results
+
+
+def validate_graph(graph_path: str, schema_path: str) -> list:
+    """Validate graph against schema constraints."""
+    entities, relations = load_graph(graph_path)
+    errors = []
+    
+    # Load schema if exists
+    schema = load_schema(schema_path)
+    
+    type_schemas = schema.get("types", {})
+    relation_schemas = schema.get("relations", {})
+    global_constraints = schema.get("constraints", [])
+    
+    for entity_id, entity in entities.items():
+        type_name = entity["type"]
+        type_schema = type_schemas.get(type_name, {})
+        
+        # Check required properties
+        required = type_schema.get("required", [])
+        for prop in required:
+            if prop not in entity["properties"]:
+                errors.append(f"{entity_id}: missing required property '{prop}'")
+        
+        # Check forbidden properties
+        forbidden = type_schema.get("forbidden_properties", [])
+        for prop in forbidden:
+            if prop in entity["properties"]:
+                errors.append(f"{entity_id}: contains forbidden property '{prop}'")
+        
+        # Check enum values
+        for prop, allowed in type_schema.items():
+            if prop.endswith("_enum"):
+                field = prop.replace("_enum", "")
+                value = entity["properties"].get(field)
+                if value and value not in allowed:
+                    errors.append(f"{entity_id}: '{field}' must be one of {allowed}, got '{value}'")
+    
+    # Relation constraints (type + cardinality + acyclicity)
+    rel_index = {}
+    for rel in relations:
+        rel_index.setdefault(rel["rel"], []).append(rel)
+    
+    for rel_type, rel_schema in relation_schemas.items():
+        rels = rel_index.get(rel_type, [])
+        from_types = rel_schema.get("from_types", [])
+        to_types = rel_schema.get("to_types", [])
+        cardinality = rel_schema.get("cardinality")
+        acyclic = rel_schema.get("acyclic", False)
+        
+        # Type checks
+        for rel in rels:
+            from_entity = entities.get(rel["from"])
+            to_entity = entities.get(rel["to"])
+            if not from_entity or not to_entity:
+                errors.append(f"{rel_type}: relation references missing entity ({rel['from']} -> {rel['to']})")
+                continue
+            if from_types and from_entity["type"] not in from_types:
+                errors.append(
+                    f"{rel_type}: from entity {rel['from']} type {from_entity['type']} not in {from_types}"
+                )
+            if to_types and to_entity["type"] not in to_types:
+                errors.append(
+                    f"{rel_type}: to entity {rel['to']} type {to_entity['type']} not in {to_types}"
+                )
+        
+        # Cardinality checks
+        if cardinality in ("one_to_one", "one_to_many", "many_to_one"):
+            from_counts = {}
+            to_counts = {}
+            for rel in rels:
+                from_counts[rel["from"]] = from_counts.get(rel["from"], 0) + 1
+                to_counts[rel["to"]] = to_counts.get(rel["to"], 0) + 1
+            
+            if cardinality in ("one_to_one", "many_to_one"):
+                for from_id, count in from_counts.items():
+                    if count > 1:
+                        errors.append(f"{rel_type}: from entity {from_id} violates cardinality {cardinality}")
+            if cardinality in ("one_to_one", "one_to_many"):
+                for to_id, count in to_counts.items():
+                    if count > 1:
+                        errors.append(f"{rel_type}: to entity {to_id} violates cardinality {cardinality}")
+        
+        # Acyclic checks
+        if acyclic:
+            graph = {}
+            for rel in rels:
+                graph.setdefault(rel["from"], []).append(rel["to"])
+            
+            visited = {}
+            
+            def dfs(node, stack):
+                visited[node] = True
+                stack.add(node)
+                for nxt in graph.get(node, []):
+                    if nxt in stack:
+                        return True
+                    if not visited.get(nxt, False):
+                        if dfs(nxt, stack):
+                            return True
+                stack.remove(node)
+                return False
+            
+            for node in graph:
+                if not visited.get(node, False):
+                    if dfs(node, set()):
+                        errors.append(f"{rel_type}: cyclic dependency detected")
+                        break
+    
+    # Global constraints (limited enforcement)
+    for constraint in global_constraints:
+        ctype = constraint.get("type")
+        relation = constraint.get("relation")
+        rule = (constraint.get("rule") or "").strip().lower()
+        if ctype == "Event" and "end" in rule and "start" in rule:
+            for entity_id, entity in entities.items():
+                if entity["type"] != "Event":
+                    continue
+                start = entity["properties"].get("start")
+                end = entity["properties"].get("end")
+                if start and end:
+                    try:
+                        start_dt = datetime.fromisoformat(start)
+                        end_dt = datetime.fromisoformat(end)
+                        if end_dt < start_dt:
+                            errors.append(f"{entity_id}: end must be >= start")
+                    except ValueError:
+                        errors.append(f"{entity_id}: invalid datetime format in start/end")
+        if relation and rule == "acyclic":
+            # Already enforced above via relations schema
+            continue
+    
+    return errors
+
+
+def load_schema(schema_path: str) -> dict:
+    """Load schema from YAML if it exists."""
+    schema = {}
+    schema_file = Path(schema_path)
+    if schema_file.exists():
+        import yaml
+        with open(schema_file) as f:
+            schema = yaml.safe_load(f) or {}
+    return schema
+
+
+def write_schema(schema_path: str, schema: dict) -> None:
+    """Write schema to YAML."""
+    schema_file = Path(schema_path)
+    schema_file.parent.mkdir(parents=True, exist_ok=True)
+    import yaml
+    with open(schema_file, "w") as f:
+        yaml.safe_dump(schema, f, sort_keys=False)
+
+
+def merge_schema(base: dict, incoming: dict) -> dict:
+    """Merge incoming schema into base, appending lists and deep-merging dicts."""
+    for key, value in (incoming or {}).items():
+        if key in base and isinstance(base[key], dict) and isinstance(value, dict):
+            base[key] = merge_schema(base[key], value)
+        elif key in base and isinstance(base[key], list) and isinstance(value, list):
+            base[key] = base[key] + [v for v in value if v not in base[key]]
+        else:
+            base[key] = value
+    return base
+
+
+def append_schema(schema_path: str, incoming: dict) -> dict:
+    """Append/merge schema fragment into existing schema."""
+    base = load_schema(schema_path)
+    merged = merge_schema(base, incoming)
+    write_schema(schema_path, merged)
+    return merged
+
+
+def main():
+    parser = argparse.ArgumentParser(description="Ontology graph operations")
+    subparsers = parser.add_subparsers(dest="command", required=True)
+    
+    # Create
+    create_p = subparsers.add_parser("create", help="Create entity")
+    create_p.add_argument("--type", "-t", required=True, help="Entity type")
+    create_p.add_argument("--props", "-p", default="{}", help="Properties JSON")
+    create_p.add_argument("--id", help="Entity ID (auto-generated if not provided)")
+    create_p.add_argument("--graph", "-g", default=DEFAULT_GRAPH_PATH)
+    
+    # Get
+    get_p = subparsers.add_parser("get", help="Get entity by ID")
+    get_p.add_argument("--id", required=True, help="Entity ID")
+    get_p.add_argument("--graph", "-g", default=DEFAULT_GRAPH_PATH)
+    
+    # Query
+    query_p = subparsers.add_parser("query", help="Query entities")
+    query_p.add_argument("--type", "-t", help="Entity type")
+    query_p.add_argument("--where", "-w", default="{}", help="Filter JSON")
+    query_p.add_argument("--graph", "-g", default=DEFAULT_GRAPH_PATH)
+    
+    # List
+    list_p = subparsers.add_parser("list", help="List entities")
+    list_p.add_argument("--type", "-t", help="Entity type")
+    list_p.add_argument("--graph", "-g", default=DEFAULT_GRAPH_PATH)
+    
+    # Update
+    update_p = subparsers.add_parser("update", help="Update entity")
+    update_p.add_argument("--id", required=True, help="Entity ID")
+    update_p.add_argument("--props", "-p", required=True, help="Properties JSON")
+    update_p.add_argument("--graph", "-g", default=DEFAULT_GRAPH_PATH)
+    
+    # Delete
+    delete_p = subparsers.add_parser("delete", help="Delete entity")
+    delete_p.add_argument("--id", required=True, help="Entity ID")
+    delete_p.add_argument("--graph", "-g", default=DEFAULT_GRAPH_PATH)
+    
+    # Relate
+    relate_p = subparsers.add_parser("relate", help="Create relation")
+    relate_p.add_argument("--from", dest="from_id", required=True, help="From entity ID")
+    relate_p.add_argument("--rel", "-r", required=True, help="Relation type")
+    relate_p.add_argument("--to", dest="to_id", required=True, help="To entity ID")
+    relate_p.add_argument("--props", "-p", default="{}", help="Relation properties JSON")
+    relate_p.add_argument("--graph", "-g", default=DEFAULT_GRAPH_PATH)
+    
+    # Related
+    related_p = subparsers.add_parser("related", help="Get related entities")
+    related_p.add_argument("--id", required=True, help="Entity ID")
+    related_p.add_argument("--rel", "-r", help="Relation type filter")
+    related_p.add_argument("--dir", "-d", choices=["outgoing", "incoming", "both"], default="outgoing")
+    related_p.add_argument("--graph", "-g", default=DEFAULT_GRAPH_PATH)
+    
+    # Validate
+    validate_p = subparsers.add_parser("validate", help="Validate graph")
+    validate_p.add_argument("--graph", "-g", default=DEFAULT_GRAPH_PATH)
+    validate_p.add_argument("--schema", "-s", default=DEFAULT_SCHEMA_PATH)
+
+    # Schema append
+    schema_p = subparsers.add_parser("schema-append", help="Append/merge schema fragment")
+    schema_p.add_argument("--schema", "-s", default=DEFAULT_SCHEMA_PATH)
+    schema_p.add_argument("--data", "-d", help="Schema fragment as JSON")
+    schema_p.add_argument("--file", "-f", help="Schema fragment file (YAML or JSON)")
+    
+    args = parser.parse_args()
+    workspace_root = Path.cwd().resolve()
+
+    if hasattr(args, "graph"):
+        args.graph = str(
+            resolve_safe_path(args.graph, root=workspace_root, label="graph path")
+        )
+    if hasattr(args, "schema"):
+        args.schema = str(
+            resolve_safe_path(args.schema, root=workspace_root, label="schema path")
+        )
+    if hasattr(args, "file") and args.file:
+        args.file = str(
+            resolve_safe_path(
+                args.file, root=workspace_root, must_exist=True, label="schema file"
+            )
+        )
+    
+    if args.command == "create":
+        props = json.loads(args.props)
+        entity = create_entity(args.type, props, args.graph, args.id)
+        print(json.dumps(entity, indent=2))
+    
+    elif args.command == "get":
+        entity = get_entity(args.id, args.graph)
+        if entity:
+            print(json.dumps(entity, indent=2))
+        else:
+            print(f"Entity not found: {args.id}")
+    
+    elif args.command == "query":
+        where = json.loads(args.where)
+        results = query_entities(args.type, where, args.graph)
+        print(json.dumps(results, indent=2))
+    
+    elif args.command == "list":
+        results = list_entities(args.type, args.graph)
+        print(json.dumps(results, indent=2))
+    
+    elif args.command == "update":
+        props = json.loads(args.props)
+        entity = update_entity(args.id, props, args.graph)
+        if entity:
+            print(json.dumps(entity, indent=2))
+        else:
+            print(f"Entity not found: {args.id}")
+    
+    elif args.command == "delete":
+        if delete_entity(args.id, args.graph):
+            print(f"Deleted: {args.id}")
+        else:
+            print(f"Entity not found: {args.id}")
+    
+    elif args.command == "relate":
+        props = json.loads(args.props)
+        rel = create_relation(args.from_id, args.rel, args.to_id, props, args.graph)
+        print(json.dumps(rel, indent=2))
+    
+    elif args.command == "related":
+        results = get_related(args.id, args.rel, args.graph, args.dir)
+        print(json.dumps(results, indent=2))
+    
+    elif args.command == "validate":
+        errors = validate_graph(args.graph, args.schema)
+        if errors:
+            print("Validation errors:")
+            for err in errors:
+                print(f"  - {err}")
+        else:
+            print("Graph is valid.")
+    
+    elif args.command == "schema-append":
+        if not args.data and not args.file:
+            raise SystemExit("schema-append requires --data or --file")
+        
+        incoming = {}
+        if args.data:
+            incoming = json.loads(args.data)
+        else:
+            path = Path(args.file)
+            if path.suffix.lower() == ".json":
+                with open(path) as f:
+                    incoming = json.load(f)
+            else:
+                import yaml
+                with open(path) as f:
+                    incoming = yaml.safe_load(f) or {}
+        
+        merged = append_schema(args.schema, incoming)
+        print(json.dumps(merged, indent=2))
+
+
+if __name__ == "__main__":
+    main()
diff --git a/workspace/skills/self-improving-agent/.clawhub/origin.json b/workspace/skills/self-improving-agent/.clawhub/origin.json
new file mode 100644
index 0000000..c0718b2
--- /dev/null
+++ b/workspace/skills/self-improving-agent/.clawhub/origin.json
@@ -0,0 +1,7 @@
+{
+  "version": 1,
+  "registry": "https://clawhub.ai",
+  "slug": "self-improving-agent",
+  "installedVersion": "3.0.1",
+  "installedAt": 1773304830240
+}
diff --git a/workspace/skills/self-improving-agent/.learnings/ERRORS.md b/workspace/skills/self-improving-agent/.learnings/ERRORS.md
new file mode 100644
index 0000000..6bce392
--- /dev/null
+++ b/workspace/skills/self-improving-agent/.learnings/ERRORS.md
@@ -0,0 +1,5 @@
+# Errors Log
+
+Command failures, exceptions, and unexpected behaviors.
+
+---
diff --git a/workspace/skills/self-improving-agent/.learnings/FEATURE_REQUESTS.md b/workspace/skills/self-improving-agent/.learnings/FEATURE_REQUESTS.md
new file mode 100644
index 0000000..3527277
--- /dev/null
+++ b/workspace/skills/self-improving-agent/.learnings/FEATURE_REQUESTS.md
@@ -0,0 +1,5 @@
+# Feature Requests
+
+Capabilities requested by user that don't currently exist.
+
+---
diff --git a/workspace/skills/self-improving-agent/.learnings/LEARNINGS.md b/workspace/skills/self-improving-agent/.learnings/LEARNINGS.md
new file mode 100644
index 0000000..d31195d
--- /dev/null
+++ b/workspace/skills/self-improving-agent/.learnings/LEARNINGS.md
@@ -0,0 +1,5 @@
+# Learnings Log
+
+Captured learnings, corrections, and discoveries. Review before major tasks.
+
+---
diff --git a/workspace/skills/self-improving-agent/SKILL.md b/workspace/skills/self-improving-agent/SKILL.md
new file mode 100644
index 0000000..97b5717
--- /dev/null
+++ b/workspace/skills/self-improving-agent/SKILL.md
@@ -0,0 +1,647 @@
+---
+name: self-improvement
+description: "Captures learnings, errors, and corrections to enable continuous improvement. Use when: (1) A command or operation fails unexpectedly, (2) User corrects Claude ('No, that's wrong...', 'Actually...'), (3) User requests a capability that doesn't exist, (4) An external API or tool fails, (5) Claude realizes its knowledge is outdated or incorrect, (6) A better approach is discovered for a recurring task. Also review learnings before major tasks."
+metadata:
+---
+
+# Self-Improvement Skill
+
+Log learnings and errors to markdown files for continuous improvement. Coding agents can later process these into fixes, and important learnings get promoted to project memory.
+
+## Quick Reference
+
+| Situation | Action |
+|-----------|--------|
+| Command/operation fails | Log to `.learnings/ERRORS.md` |
+| User corrects you | Log to `.learnings/LEARNINGS.md` with category `correction` |
+| User wants missing feature | Log to `.learnings/FEATURE_REQUESTS.md` |
+| API/external tool fails | Log to `.learnings/ERRORS.md` with integration details |
+| Knowledge was outdated | Log to `.learnings/LEARNINGS.md` with category `knowledge_gap` |
+| Found better approach | Log to `.learnings/LEARNINGS.md` with category `best_practice` |
+| Simplify/Harden recurring patterns | Log/update `.learnings/LEARNINGS.md` with `Source: simplify-and-harden` and a stable `Pattern-Key` |
+| Similar to existing entry | Link with `**See Also**`, consider priority bump |
+| Broadly applicable learning | Promote to `CLAUDE.md`, `AGENTS.md`, and/or `.github/copilot-instructions.md` |
+| Workflow improvements | Promote to `AGENTS.md` (OpenClaw workspace) |
+| Tool gotchas | Promote to `TOOLS.md` (OpenClaw workspace) |
+| Behavioral patterns | Promote to `SOUL.md` (OpenClaw workspace) |
+
+## OpenClaw Setup (Recommended)
+
+OpenClaw is the primary platform for this skill. It uses workspace-based prompt injection with automatic skill loading.
+
+### Installation
+
+**Via ClawdHub (recommended):**
+```bash
+clawdhub install self-improving-agent
+```
+
+**Manual:**
+```bash
+git clone https://github.com/peterskoett/self-improving-agent.git ~/.openclaw/skills/self-improving-agent
+```
+
+Remade for openclaw from original repo : https://github.com/pskoett/pskoett-ai-skills - https://github.com/pskoett/pskoett-ai-skills/tree/main/skills/self-improvement
+
+### Workspace Structure
+
+OpenClaw injects these files into every session:
+
+```
+~/.openclaw/workspace/
+├── AGENTS.md          # Multi-agent workflows, delegation patterns
+├── SOUL.md            # Behavioral guidelines, personality, principles
+├── TOOLS.md           # Tool capabilities, integration gotchas
+├── MEMORY.md          # Long-term memory (main session only)
+├── memory/            # Daily memory files
+│   └── YYYY-MM-DD.md
+└── .learnings/        # This skill's log files
+    ├── LEARNINGS.md
+    ├── ERRORS.md
+    └── FEATURE_REQUESTS.md
+```
+
+### Create Learning Files
+
+```bash
+mkdir -p ~/.openclaw/workspace/.learnings
+```
+
+Then create the log files (or copy from `assets/`):
+- `LEARNINGS.md` — corrections, knowledge gaps, best practices
+- `ERRORS.md` — command failures, exceptions
+- `FEATURE_REQUESTS.md` — user-requested capabilities
+
+### Promotion Targets
+
+When learnings prove broadly applicable, promote them to workspace files:
+
+| Learning Type | Promote To | Example |
+|---------------|------------|---------|
+| Behavioral patterns | `SOUL.md` | "Be concise, avoid disclaimers" |
+| Workflow improvements | `AGENTS.md` | "Spawn sub-agents for long tasks" |
+| Tool gotchas | `TOOLS.md` | "Git push needs auth configured first" |
+
+### Inter-Session Communication
+
+OpenClaw provides tools to share learnings across sessions:
+
+- **sessions_list** — View active/recent sessions
+- **sessions_history** — Read another session's transcript  
+- **sessions_send** — Send a learning to another session
+- **sessions_spawn** — Spawn a sub-agent for background work
+
+### Optional: Enable Hook
+
+For automatic reminders at session start:
+
+```bash
+# Copy hook to OpenClaw hooks directory
+cp -r hooks/openclaw ~/.openclaw/hooks/self-improvement
+
+# Enable it
+openclaw hooks enable self-improvement
+```
+
+See `references/openclaw-integration.md` for complete details.
+
+---
+
+## Generic Setup (Other Agents)
+
+For Claude Code, Codex, Copilot, or other agents, create `.learnings/` in your project:
+
+```bash
+mkdir -p .learnings
+```
+
+Copy templates from `assets/` or create files with headers.
+
+### Add reference to agent files AGENTS.md, CLAUDE.md, or .github/copilot-instructions.md to remind yourself to log learnings. (this is an alternative to hook-based reminders)
+
+#### Self-Improvement Workflow
+
+When errors or corrections occur:
+1. Log to `.learnings/ERRORS.md`, `LEARNINGS.md`, or `FEATURE_REQUESTS.md`
+2. Review and promote broadly applicable learnings to:
+   - `CLAUDE.md` - project facts and conventions
+   - `AGENTS.md` - workflows and automation
+   - `.github/copilot-instructions.md` - Copilot context
+
+## Logging Format
+
+### Learning Entry
+
+Append to `.learnings/LEARNINGS.md`:
+
+```markdown
+## [LRN-YYYYMMDD-XXX] category
+
+**Logged**: ISO-8601 timestamp
+**Priority**: low | medium | high | critical
+**Status**: pending
+**Area**: frontend | backend | infra | tests | docs | config
+
+### Summary
+One-line description of what was learned
+
+### Details
+Full context: what happened, what was wrong, what's correct
+
+### Suggested Action
+Specific fix or improvement to make
+
+### Metadata
+- Source: conversation | error | user_feedback
+- Related Files: path/to/file.ext
+- Tags: tag1, tag2
+- See Also: LRN-20250110-001 (if related to existing entry)
+- Pattern-Key: simplify.dead_code | harden.input_validation (optional, for recurring-pattern tracking)
+- Recurrence-Count: 1 (optional)
+- First-Seen: 2025-01-15 (optional)
+- Last-Seen: 2025-01-15 (optional)
+
+---
+```
+
+### Error Entry
+
+Append to `.learnings/ERRORS.md`:
+
+```markdown
+## [ERR-YYYYMMDD-XXX] skill_or_command_name
+
+**Logged**: ISO-8601 timestamp
+**Priority**: high
+**Status**: pending
+**Area**: frontend | backend | infra | tests | docs | config
+
+### Summary
+Brief description of what failed
+
+### Error
+```
+Actual error message or output
+```
+
+### Context
+- Command/operation attempted
+- Input or parameters used
+- Environment details if relevant
+
+### Suggested Fix
+If identifiable, what might resolve this
+
+### Metadata
+- Reproducible: yes | no | unknown
+- Related Files: path/to/file.ext
+- See Also: ERR-20250110-001 (if recurring)
+
+---
+```
+
+### Feature Request Entry
+
+Append to `.learnings/FEATURE_REQUESTS.md`:
+
+```markdown
+## [FEAT-YYYYMMDD-XXX] capability_name
+
+**Logged**: ISO-8601 timestamp
+**Priority**: medium
+**Status**: pending
+**Area**: frontend | backend | infra | tests | docs | config
+
+### Requested Capability
+What the user wanted to do
+
+### User Context
+Why they needed it, what problem they're solving
+
+### Complexity Estimate
+simple | medium | complex
+
+### Suggested Implementation
+How this could be built, what it might extend
+
+### Metadata
+- Frequency: first_time | recurring
+- Related Features: existing_feature_name
+
+---
+```
+
+## ID Generation
+
+Format: `TYPE-YYYYMMDD-XXX`
+- TYPE: `LRN` (learning), `ERR` (error), `FEAT` (feature)
+- YYYYMMDD: Current date
+- XXX: Sequential number or random 3 chars (e.g., `001`, `A7B`)
+
+Examples: `LRN-20250115-001`, `ERR-20250115-A3F`, `FEAT-20250115-002`
+
+## Resolving Entries
+
+When an issue is fixed, update the entry:
+
+1. Change `**Status**: pending` → `**Status**: resolved`
+2. Add resolution block after Metadata:
+
+```markdown
+### Resolution
+- **Resolved**: 2025-01-16T09:00:00Z
+- **Commit/PR**: abc123 or #42
+- **Notes**: Brief description of what was done
+```
+
+Other status values:
+- `in_progress` - Actively being worked on
+- `wont_fix` - Decided not to address (add reason in Resolution notes)
+- `promoted` - Elevated to CLAUDE.md, AGENTS.md, or .github/copilot-instructions.md
+
+## Promoting to Project Memory
+
+When a learning is broadly applicable (not a one-off fix), promote it to permanent project memory.
+
+### When to Promote
+
+- Learning applies across multiple files/features
+- Knowledge any contributor (human or AI) should know
+- Prevents recurring mistakes
+- Documents project-specific conventions
+
+### Promotion Targets
+
+| Target | What Belongs There |
+|--------|-------------------|
+| `CLAUDE.md` | Project facts, conventions, gotchas for all Claude interactions |
+| `AGENTS.md` | Agent-specific workflows, tool usage patterns, automation rules |
+| `.github/copilot-instructions.md` | Project context and conventions for GitHub Copilot |
+| `SOUL.md` | Behavioral guidelines, communication style, principles (OpenClaw workspace) |
+| `TOOLS.md` | Tool capabilities, usage patterns, integration gotchas (OpenClaw workspace) |
+
+### How to Promote
+
+1. **Distill** the learning into a concise rule or fact
+2. **Add** to appropriate section in target file (create file if needed)
+3. **Update** original entry:
+   - Change `**Status**: pending` → `**Status**: promoted`
+   - Add `**Promoted**: CLAUDE.md`, `AGENTS.md`, or `.github/copilot-instructions.md`
+
+### Promotion Examples
+
+**Learning** (verbose):
+> Project uses pnpm workspaces. Attempted `npm install` but failed. 
+> Lock file is `pnpm-lock.yaml`. Must use `pnpm install`.
+
+**In CLAUDE.md** (concise):
+```markdown
+## Build & Dependencies
+- Package manager: pnpm (not npm) - use `pnpm install`
+```
+
+**Learning** (verbose):
+> When modifying API endpoints, must regenerate TypeScript client.
+> Forgetting this causes type mismatches at runtime.
+
+**In AGENTS.md** (actionable):
+```markdown
+## After API Changes
+1. Regenerate client: `pnpm run generate:api`
+2. Check for type errors: `pnpm tsc --noEmit`
+```
+
+## Recurring Pattern Detection
+
+If logging something similar to an existing entry:
+
+1. **Search first**: `grep -r "keyword" .learnings/`
+2. **Link entries**: Add `**See Also**: ERR-20250110-001` in Metadata
+3. **Bump priority** if issue keeps recurring
+4. **Consider systemic fix**: Recurring issues often indicate:
+   - Missing documentation (→ promote to CLAUDE.md or .github/copilot-instructions.md)
+   - Missing automation (→ add to AGENTS.md)
+   - Architectural problem (→ create tech debt ticket)
+
+## Simplify & Harden Feed
+
+Use this workflow to ingest recurring patterns from the `simplify-and-harden`
+skill and turn them into durable prompt guidance.
+
+### Ingestion Workflow
+
+1. Read `simplify_and_harden.learning_loop.candidates` from the task summary.
+2. For each candidate, use `pattern_key` as the stable dedupe key.
+3. Search `.learnings/LEARNINGS.md` for an existing entry with that key:
+   - `grep -n "Pattern-Key: <pattern_key>" .learnings/LEARNINGS.md`
+4. If found:
+   - Increment `Recurrence-Count`
+   - Update `Last-Seen`
+   - Add `See Also` links to related entries/tasks
+5. If not found:
+   - Create a new `LRN-...` entry
+   - Set `Source: simplify-and-harden`
+   - Set `Pattern-Key`, `Recurrence-Count: 1`, and `First-Seen`/`Last-Seen`
+
+### Promotion Rule (System Prompt Feedback)
+
+Promote recurring patterns into agent context/system prompt files when all are true:
+
+- `Recurrence-Count >= 3`
+- Seen across at least 2 distinct tasks
+- Occurred within a 30-day window
+
+Promotion targets:
+- `CLAUDE.md`
+- `AGENTS.md`
+- `.github/copilot-instructions.md`
+- `SOUL.md` / `TOOLS.md` for OpenClaw workspace-level guidance when applicable
+
+Write promoted rules as short prevention rules (what to do before/while coding),
+not long incident write-ups.
+
+## Periodic Review
+
+Review `.learnings/` at natural breakpoints:
+
+### When to Review
+- Before starting a new major task
+- After completing a feature
+- When working in an area with past learnings
+- Weekly during active development
+
+### Quick Status Check
+```bash
+# Count pending items
+grep -h "Status\*\*: pending" .learnings/*.md | wc -l
+
+# List pending high-priority items
+grep -B5 "Priority\*\*: high" .learnings/*.md | grep "^## \["
+
+# Find learnings for a specific area
+grep -l "Area\*\*: backend" .learnings/*.md
+```
+
+### Review Actions
+- Resolve fixed items
+- Promote applicable learnings
+- Link related entries
+- Escalate recurring issues
+
+## Detection Triggers
+
+Automatically log when you notice:
+
+**Corrections** (→ learning with `correction` category):
+- "No, that's not right..."
+- "Actually, it should be..."
+- "You're wrong about..."
+- "That's outdated..."
+
+**Feature Requests** (→ feature request):
+- "Can you also..."
+- "I wish you could..."
+- "Is there a way to..."
+- "Why can't you..."
+
+**Knowledge Gaps** (→ learning with `knowledge_gap` category):
+- User provides information you didn't know
+- Documentation you referenced is outdated
+- API behavior differs from your understanding
+
+**Errors** (→ error entry):
+- Command returns non-zero exit code
+- Exception or stack trace
+- Unexpected output or behavior
+- Timeout or connection failure
+
+## Priority Guidelines
+
+| Priority | When to Use |
+|----------|-------------|
+| `critical` | Blocks core functionality, data loss risk, security issue |
+| `high` | Significant impact, affects common workflows, recurring issue |
+| `medium` | Moderate impact, workaround exists |
+| `low` | Minor inconvenience, edge case, nice-to-have |
+
+## Area Tags
+
+Use to filter learnings by codebase region:
+
+| Area | Scope |
+|------|-------|
+| `frontend` | UI, components, client-side code |
+| `backend` | API, services, server-side code |
+| `infra` | CI/CD, deployment, Docker, cloud |
+| `tests` | Test files, testing utilities, coverage |
+| `docs` | Documentation, comments, READMEs |
+| `config` | Configuration files, environment, settings |
+
+## Best Practices
+
+1. **Log immediately** - context is freshest right after the issue
+2. **Be specific** - future agents need to understand quickly
+3. **Include reproduction steps** - especially for errors
+4. **Link related files** - makes fixes easier
+5. **Suggest concrete fixes** - not just "investigate"
+6. **Use consistent categories** - enables filtering
+7. **Promote aggressively** - if in doubt, add to CLAUDE.md or .github/copilot-instructions.md
+8. **Review regularly** - stale learnings lose value
+
+## Gitignore Options
+
+**Keep learnings local** (per-developer):
+```gitignore
+.learnings/
+```
+
+**Track learnings in repo** (team-wide):
+Don't add to .gitignore - learnings become shared knowledge.
+
+**Hybrid** (track templates, ignore entries):
+```gitignore
+.learnings/*.md
+!.learnings/.gitkeep
+```
+
+## Hook Integration
+
+Enable automatic reminders through agent hooks. This is **opt-in** - you must explicitly configure hooks.
+
+### Quick Setup (Claude Code / Codex)
+
+Create `.claude/settings.json` in your project:
+
+```json
+{
+  "hooks": {
+    "UserPromptSubmit": [{
+      "matcher": "",
+      "hooks": [{
+        "type": "command",
+        "command": "./skills/self-improvement/scripts/activator.sh"
+      }]
+    }]
+  }
+}
+```
+
+This injects a learning evaluation reminder after each prompt (~50-100 tokens overhead).
+
+### Full Setup (With Error Detection)
+
+```json
+{
+  "hooks": {
+    "UserPromptSubmit": [{
+      "matcher": "",
+      "hooks": [{
+        "type": "command",
+        "command": "./skills/self-improvement/scripts/activator.sh"
+      }]
+    }],
+    "PostToolUse": [{
+      "matcher": "Bash",
+      "hooks": [{
+        "type": "command",
+        "command": "./skills/self-improvement/scripts/error-detector.sh"
+      }]
+    }]
+  }
+}
+```
+
+### Available Hook Scripts
+
+| Script | Hook Type | Purpose |
+|--------|-----------|---------|
+| `scripts/activator.sh` | UserPromptSubmit | Reminds to evaluate learnings after tasks |
+| `scripts/error-detector.sh` | PostToolUse (Bash) | Triggers on command errors |
+
+See `references/hooks-setup.md` for detailed configuration and troubleshooting.
+
+## Automatic Skill Extraction
+
+When a learning is valuable enough to become a reusable skill, extract it using the provided helper.
+
+### Skill Extraction Criteria
+
+A learning qualifies for skill extraction when ANY of these apply:
+
+| Criterion | Description |
+|-----------|-------------|
+| **Recurring** | Has `See Also` links to 2+ similar issues |
+| **Verified** | Status is `resolved` with working fix |
+| **Non-obvious** | Required actual debugging/investigation to discover |
+| **Broadly applicable** | Not project-specific; useful across codebases |
+| **User-flagged** | User says "save this as a skill" or similar |
+
+### Extraction Workflow
+
+1. **Identify candidate**: Learning meets extraction criteria
+2. **Run helper** (or create manually):
+   ```bash
+   ./skills/self-improvement/scripts/extract-skill.sh skill-name --dry-run
+   ./skills/self-improvement/scripts/extract-skill.sh skill-name
+   ```
+3. **Customize SKILL.md**: Fill in template with learning content
+4. **Update learning**: Set status to `promoted_to_skill`, add `Skill-Path`
+5. **Verify**: Read skill in fresh session to ensure it's self-contained
+
+### Manual Extraction
+
+If you prefer manual creation:
+
+1. Create `skills/<skill-name>/SKILL.md`
+2. Use template from `assets/SKILL-TEMPLATE.md`
+3. Follow [Agent Skills spec](https://agentskills.io/specification):
+   - YAML frontmatter with `name` and `description`
+   - Name must match folder name
+   - No README.md inside skill folder
+
+### Extraction Detection Triggers
+
+Watch for these signals that a learning should become a skill:
+
+**In conversation:**
+- "Save this as a skill"
+- "I keep running into this"
+- "This would be useful for other projects"
+- "Remember this pattern"
+
+**In learning entries:**
+- Multiple `See Also` links (recurring issue)
+- High priority + resolved status
+- Category: `best_practice` with broad applicability
+- User feedback praising the solution
+
+### Skill Quality Gates
+
+Before extraction, verify:
+
+- [ ] Solution is tested and working
+- [ ] Description is clear without original context
+- [ ] Code examples are self-contained
+- [ ] No project-specific hardcoded values
+- [ ] Follows skill naming conventions (lowercase, hyphens)
+
+## Multi-Agent Support
+
+This skill works across different AI coding agents with agent-specific activation.
+
+### Claude Code
+
+**Activation**: Hooks (UserPromptSubmit, PostToolUse)
+**Setup**: `.claude/settings.json` with hook configuration
+**Detection**: Automatic via hook scripts
+
+### Codex CLI
+
+**Activation**: Hooks (same pattern as Claude Code)
+**Setup**: `.codex/settings.json` with hook configuration
+**Detection**: Automatic via hook scripts
+
+### GitHub Copilot
+
+**Activation**: Manual (no hook support)
+**Setup**: Add to `.github/copilot-instructions.md`:
+
+```markdown
+## Self-Improvement
+
+After solving non-obvious issues, consider logging to `.learnings/`:
+1. Use format from self-improvement skill
+2. Link related entries with See Also
+3. Promote high-value learnings to skills
+
+Ask in chat: "Should I log this as a learning?"
+```
+
+**Detection**: Manual review at session end
+
+### OpenClaw
+
+**Activation**: Workspace injection + inter-agent messaging
+**Setup**: See "OpenClaw Setup" section above
+**Detection**: Via session tools and workspace files
+
+### Agent-Agnostic Guidance
+
+Regardless of agent, apply self-improvement when you:
+
+1. **Discover something non-obvious** - solution wasn't immediate
+2. **Correct yourself** - initial approach was wrong
+3. **Learn project conventions** - discovered undocumented patterns
+4. **Hit unexpected errors** - especially if diagnosis was difficult
+5. **Find better approaches** - improved on your original solution
+
+### Copilot Chat Integration
+
+For Copilot users, add this to your prompts when relevant:
+
+> After completing this task, evaluate if any learnings should be logged to `.learnings/` using the self-improvement skill format.
+
+Or use quick prompts:
+- "Log this to learnings"
+- "Create a skill from this solution"
+- "Check .learnings/ for related issues"
diff --git a/workspace/skills/self-improving-agent/_meta.json b/workspace/skills/self-improving-agent/_meta.json
new file mode 100644
index 0000000..d53726a
--- /dev/null
+++ b/workspace/skills/self-improving-agent/_meta.json
@@ -0,0 +1,6 @@
+{
+  "ownerId": "kn70cjr952qdec1nx70zs6wefn7ynq2t",
+  "slug": "self-improving-agent",
+  "version": "3.0.1",
+  "publishedAt": 1773230308177
+}
\ No newline at end of file
diff --git a/workspace/skills/self-improving-agent/assets/LEARNINGS.md b/workspace/skills/self-improving-agent/assets/LEARNINGS.md
new file mode 100644
index 0000000..6993f9b
--- /dev/null
+++ b/workspace/skills/self-improving-agent/assets/LEARNINGS.md
@@ -0,0 +1,45 @@
+# Learnings
+
+Corrections, insights, and knowledge gaps captured during development.
+
+**Categories**: correction | insight | knowledge_gap | best_practice
+**Areas**: frontend | backend | infra | tests | docs | config
+**Statuses**: pending | in_progress | resolved | wont_fix | promoted | promoted_to_skill
+
+## Status Definitions
+
+| Status | Meaning |
+|--------|---------|
+| `pending` | Not yet addressed |
+| `in_progress` | Actively being worked on |
+| `resolved` | Issue fixed or knowledge integrated |
+| `wont_fix` | Decided not to address (reason in Resolution) |
+| `promoted` | Elevated to CLAUDE.md, AGENTS.md, or copilot-instructions.md |
+| `promoted_to_skill` | Extracted as a reusable skill |
+
+## Skill Extraction Fields
+
+When a learning is promoted to a skill, add these fields:
+
+```markdown
+**Status**: promoted_to_skill
+**Skill-Path**: skills/skill-name
+```
+
+Example:
+```markdown
+## [LRN-20250115-001] best_practice
+
+**Logged**: 2025-01-15T10:00:00Z
+**Priority**: high
+**Status**: promoted_to_skill
+**Skill-Path**: skills/docker-m1-fixes
+**Area**: infra
+
+### Summary
+Docker build fails on Apple Silicon due to platform mismatch
+...
+```
+
+---
+
diff --git a/workspace/skills/self-improving-agent/assets/SKILL-TEMPLATE.md b/workspace/skills/self-improving-agent/assets/SKILL-TEMPLATE.md
new file mode 100644
index 0000000..0162134
--- /dev/null
+++ b/workspace/skills/self-improving-agent/assets/SKILL-TEMPLATE.md
@@ -0,0 +1,177 @@
+# Skill Template
+
+Template for creating skills extracted from learnings. Copy and customize.
+
+---
+
+## SKILL.md Template
+
+```markdown
+---
+name: skill-name-here
+description: "Concise description of when and why to use this skill. Include trigger conditions."
+---
+
+# Skill Name
+
+Brief introduction explaining the problem this skill solves and its origin.
+
+## Quick Reference
+
+| Situation | Action |
+|-----------|--------|
+| [Trigger 1] | [Action 1] |
+| [Trigger 2] | [Action 2] |
+
+## Background
+
+Why this knowledge matters. What problems it prevents. Context from the original learning.
+
+## Solution
+
+### Step-by-Step
+
+1. First step with code or command
+2. Second step
+3. Verification step
+
+### Code Example
+
+\`\`\`language
+// Example code demonstrating the solution
+\`\`\`
+
+## Common Variations
+
+- **Variation A**: Description and how to handle
+- **Variation B**: Description and how to handle
+
+## Gotchas
+
+- Warning or common mistake #1
+- Warning or common mistake #2
+
+## Related
+
+- Link to related documentation
+- Link to related skill
+
+## Source
+
+Extracted from learning entry.
+- **Learning ID**: LRN-YYYYMMDD-XXX
+- **Original Category**: correction | insight | knowledge_gap | best_practice
+- **Extraction Date**: YYYY-MM-DD
+```
+
+---
+
+## Minimal Template
+
+For simple skills that don't need all sections:
+
+```markdown
+---
+name: skill-name-here
+description: "What this skill does and when to use it."
+---
+
+# Skill Name
+
+[Problem statement in one sentence]
+
+## Solution
+
+[Direct solution with code/commands]
+
+## Source
+
+- Learning ID: LRN-YYYYMMDD-XXX
+```
+
+---
+
+## Template with Scripts
+
+For skills that include executable helpers:
+
+```markdown
+---
+name: skill-name-here
+description: "What this skill does and when to use it."
+---
+
+# Skill Name
+
+[Introduction]
+
+## Quick Reference
+
+| Command | Purpose |
+|---------|---------|
+| `./scripts/helper.sh` | [What it does] |
+| `./scripts/validate.sh` | [What it does] |
+
+## Usage
+
+### Automated (Recommended)
+
+\`\`\`bash
+./skills/skill-name/scripts/helper.sh [args]
+\`\`\`
+
+### Manual Steps
+
+1. Step one
+2. Step two
+
+## Scripts
+
+| Script | Description |
+|--------|-------------|
+| `scripts/helper.sh` | Main utility |
+| `scripts/validate.sh` | Validation checker |
+
+## Source
+
+- Learning ID: LRN-YYYYMMDD-XXX
+```
+
+---
+
+## Naming Conventions
+
+- **Skill name**: lowercase, hyphens for spaces
+  - Good: `docker-m1-fixes`, `api-timeout-patterns`
+  - Bad: `Docker_M1_Fixes`, `APITimeoutPatterns`
+
+- **Description**: Start with action verb, mention trigger
+  - Good: "Handles Docker build failures on Apple Silicon. Use when builds fail with platform mismatch."
+  - Bad: "Docker stuff"
+
+- **Files**:
+  - `SKILL.md` - Required, main documentation
+  - `scripts/` - Optional, executable code
+  - `references/` - Optional, detailed docs
+  - `assets/` - Optional, templates
+
+---
+
+## Extraction Checklist
+
+Before creating a skill from a learning:
+
+- [ ] Learning is verified (status: resolved)
+- [ ] Solution is broadly applicable (not one-off)
+- [ ] Content is complete (has all needed context)
+- [ ] Name follows conventions
+- [ ] Description is concise but informative
+- [ ] Quick Reference table is actionable
+- [ ] Code examples are tested
+- [ ] Source learning ID is recorded
+
+After creating:
+
+- [ ] Update original learning with `promoted_to_skill` status
+- [ ] Add `Skill-Path: skills/skill-name` to learning metadata
+- [ ] Test skill by reading it in a fresh session
diff --git a/workspace/skills/self-improving-agent/hooks/openclaw/HOOK.md b/workspace/skills/self-improving-agent/hooks/openclaw/HOOK.md
new file mode 100644
index 0000000..df67405
--- /dev/null
+++ b/workspace/skills/self-improving-agent/hooks/openclaw/HOOK.md
@@ -0,0 +1,23 @@
+---
+name: self-improvement
+description: "Injects self-improvement reminder during agent bootstrap"
+metadata: {"openclaw":{"emoji":"🧠","events":["agent:bootstrap"]}}
+---
+
+# Self-Improvement Hook
+
+Injects a reminder to evaluate learnings during agent bootstrap.
+
+## What It Does
+
+- Fires on `agent:bootstrap` (before workspace files are injected)
+- Adds a reminder block to check `.learnings/` for relevant entries
+- Prompts the agent to log corrections, errors, and discoveries
+
+## Configuration
+
+No configuration needed. Enable with:
+
+```bash
+openclaw hooks enable self-improvement
+```
diff --git a/workspace/skills/self-improving-agent/hooks/openclaw/handler.js b/workspace/skills/self-improving-agent/hooks/openclaw/handler.js
new file mode 100644
index 0000000..73278ea
--- /dev/null
+++ b/workspace/skills/self-improving-agent/hooks/openclaw/handler.js
@@ -0,0 +1,56 @@
+/**
+ * Self-Improvement Hook for OpenClaw
+ * 
+ * Injects a reminder to evaluate learnings during agent bootstrap.
+ * Fires on agent:bootstrap event before workspace files are injected.
+ */
+
+const REMINDER_CONTENT = `
+## Self-Improvement Reminder
+
+After completing tasks, evaluate if any learnings should be captured:
+
+**Log when:**
+- User corrects you → \`.learnings/LEARNINGS.md\`
+- Command/operation fails → \`.learnings/ERRORS.md\`
+- User wants missing capability → \`.learnings/FEATURE_REQUESTS.md\`
+- You discover your knowledge was wrong → \`.learnings/LEARNINGS.md\`
+- You find a better approach → \`.learnings/LEARNINGS.md\`
+
+**Promote when pattern is proven:**
+- Behavioral patterns → \`SOUL.md\`
+- Workflow improvements → \`AGENTS.md\`
+- Tool gotchas → \`TOOLS.md\`
+
+Keep entries simple: date, title, what happened, what to do differently.
+`.trim();
+
+const handler = async (event) => {
+  // Safety checks for event structure
+  if (!event || typeof event !== 'object') {
+    return;
+  }
+
+  // Only handle agent:bootstrap events
+  if (event.type !== 'agent' || event.action !== 'bootstrap') {
+    return;
+  }
+
+  // Safety check for context
+  if (!event.context || typeof event.context !== 'object') {
+    return;
+  }
+
+  // Inject the reminder as a virtual bootstrap file
+  // Check that bootstrapFiles is an array before pushing
+  if (Array.isArray(event.context.bootstrapFiles)) {
+    event.context.bootstrapFiles.push({
+      path: 'SELF_IMPROVEMENT_REMINDER.md',
+      content: REMINDER_CONTENT,
+      virtual: true,
+    });
+  }
+};
+
+module.exports = handler;
+module.exports.default = handler;
diff --git a/workspace/skills/self-improving-agent/hooks/openclaw/handler.ts b/workspace/skills/self-improving-agent/hooks/openclaw/handler.ts
new file mode 100644
index 0000000..9ec23f3
--- /dev/null
+++ b/workspace/skills/self-improving-agent/hooks/openclaw/handler.ts
@@ -0,0 +1,62 @@
+/**
+ * Self-Improvement Hook for OpenClaw
+ * 
+ * Injects a reminder to evaluate learnings during agent bootstrap.
+ * Fires on agent:bootstrap event before workspace files are injected.
+ */
+
+import type { HookHandler } from 'openclaw/hooks';
+
+const REMINDER_CONTENT = `## Self-Improvement Reminder
+
+After completing tasks, evaluate if any learnings should be captured:
+
+**Log when:**
+- User corrects you → \`.learnings/LEARNINGS.md\`
+- Command/operation fails → \`.learnings/ERRORS.md\`
+- User wants missing capability → \`.learnings/FEATURE_REQUESTS.md\`
+- You discover your knowledge was wrong → \`.learnings/LEARNINGS.md\`
+- You find a better approach → \`.learnings/LEARNINGS.md\`
+
+**Promote when pattern is proven:**
+- Behavioral patterns → \`SOUL.md\`
+- Workflow improvements → \`AGENTS.md\`
+- Tool gotchas → \`TOOLS.md\`
+
+Keep entries simple: date, title, what happened, what to do differently.`;
+
+const handler: HookHandler = async (event) => {
+  // Safety checks for event structure
+  if (!event || typeof event !== 'object') {
+    return;
+  }
+
+  // Only handle agent:bootstrap events
+  if (event.type !== 'agent' || event.action !== 'bootstrap') {
+    return;
+  }
+
+  // Safety check for context
+  if (!event.context || typeof event.context !== 'object') {
+    return;
+  }
+
+  // Skip sub-agent sessions to avoid bootstrap issues
+  // Sub-agents have sessionKey patterns like "agent:main:subagent:..."
+  const sessionKey = event.sessionKey || '';
+  if (sessionKey.includes(':subagent:')) {
+    return;
+  }
+
+  // Inject the reminder as a virtual bootstrap file
+  // Check that bootstrapFiles is an array before pushing
+  if (Array.isArray(event.context.bootstrapFiles)) {
+    event.context.bootstrapFiles.push({
+      path: 'SELF_IMPROVEMENT_REMINDER.md',
+      content: REMINDER_CONTENT,
+      virtual: true,
+    });
+  }
+};
+
+export default handler;
diff --git a/workspace/skills/self-improving-agent/references/examples.md b/workspace/skills/self-improving-agent/references/examples.md
new file mode 100644
index 0000000..1c1db15
--- /dev/null
+++ b/workspace/skills/self-improving-agent/references/examples.md
@@ -0,0 +1,374 @@
+# Entry Examples
+
+Concrete examples of well-formatted entries with all fields.
+
+## Learning: Correction
+
+```markdown
+## [LRN-20250115-001] correction
+
+**Logged**: 2025-01-15T10:30:00Z
+**Priority**: high
+**Status**: pending
+**Area**: tests
+
+### Summary
+Incorrectly assumed pytest fixtures are scoped to function by default
+
+### Details
+When writing test fixtures, I assumed all fixtures were function-scoped. 
+User corrected that while function scope is the default, the codebase 
+convention uses module-scoped fixtures for database connections to 
+improve test performance.
+
+### Suggested Action
+When creating fixtures that involve expensive setup (DB, network), 
+check existing fixtures for scope patterns before defaulting to function scope.
+
+### Metadata
+- Source: user_feedback
+- Related Files: tests/conftest.py
+- Tags: pytest, testing, fixtures
+
+---
+```
+
+## Learning: Knowledge Gap (Resolved)
+
+```markdown
+## [LRN-20250115-002] knowledge_gap
+
+**Logged**: 2025-01-15T14:22:00Z
+**Priority**: medium
+**Status**: resolved
+**Area**: config
+
+### Summary
+Project uses pnpm not npm for package management
+
+### Details
+Attempted to run `npm install` but project uses pnpm workspaces.
+Lock file is `pnpm-lock.yaml`, not `package-lock.json`.
+
+### Suggested Action
+Check for `pnpm-lock.yaml` or `pnpm-workspace.yaml` before assuming npm.
+Use `pnpm install` for this project.
+
+### Metadata
+- Source: error
+- Related Files: pnpm-lock.yaml, pnpm-workspace.yaml
+- Tags: package-manager, pnpm, setup
+
+### Resolution
+- **Resolved**: 2025-01-15T14:30:00Z
+- **Commit/PR**: N/A - knowledge update
+- **Notes**: Added to CLAUDE.md for future reference
+
+---
+```
+
+## Learning: Promoted to CLAUDE.md
+
+```markdown
+## [LRN-20250115-003] best_practice
+
+**Logged**: 2025-01-15T16:00:00Z
+**Priority**: high
+**Status**: promoted
+**Promoted**: CLAUDE.md
+**Area**: backend
+
+### Summary
+API responses must include correlation ID from request headers
+
+### Details
+All API responses should echo back the X-Correlation-ID header from 
+the request. This is required for distributed tracing. Responses 
+without this header break the observability pipeline.
+
+### Suggested Action
+Always include correlation ID passthrough in API handlers.
+
+### Metadata
+- Source: user_feedback
+- Related Files: src/middleware/correlation.ts
+- Tags: api, observability, tracing
+
+---
+```
+
+## Learning: Promoted to AGENTS.md
+
+```markdown
+## [LRN-20250116-001] best_practice
+
+**Logged**: 2025-01-16T09:00:00Z
+**Priority**: high
+**Status**: promoted
+**Promoted**: AGENTS.md
+**Area**: backend
+
+### Summary
+Must regenerate API client after OpenAPI spec changes
+
+### Details
+When modifying API endpoints, the TypeScript client must be regenerated.
+Forgetting this causes type mismatches that only appear at runtime.
+The generate script also runs validation.
+
+### Suggested Action
+Add to agent workflow: after any API changes, run `pnpm run generate:api`.
+
+### Metadata
+- Source: error
+- Related Files: openapi.yaml, src/client/api.ts
+- Tags: api, codegen, typescript
+
+---
+```
+
+## Error Entry
+
+```markdown
+## [ERR-20250115-A3F] docker_build
+
+**Logged**: 2025-01-15T09:15:00Z
+**Priority**: high
+**Status**: pending
+**Area**: infra
+
+### Summary
+Docker build fails on M1 Mac due to platform mismatch
+
+### Error
+```
+error: failed to solve: python:3.11-slim: no match for platform linux/arm64
+```
+
+### Context
+- Command: `docker build -t myapp .`
+- Dockerfile uses `FROM python:3.11-slim`
+- Running on Apple Silicon (M1/M2)
+
+### Suggested Fix
+Add platform flag: `docker build --platform linux/amd64 -t myapp .`
+Or update Dockerfile: `FROM --platform=linux/amd64 python:3.11-slim`
+
+### Metadata
+- Reproducible: yes
+- Related Files: Dockerfile
+
+---
+```
+
+## Error Entry: Recurring Issue
+
+```markdown
+## [ERR-20250120-B2C] api_timeout
+
+**Logged**: 2025-01-20T11:30:00Z
+**Priority**: critical
+**Status**: pending
+**Area**: backend
+
+### Summary
+Third-party payment API timeout during checkout
+
+### Error
+```
+TimeoutError: Request to payments.example.com timed out after 30000ms
+```
+
+### Context
+- Command: POST /api/checkout
+- Timeout set to 30s
+- Occurs during peak hours (lunch, evening)
+
+### Suggested Fix
+Implement retry with exponential backoff. Consider circuit breaker pattern.
+
+### Metadata
+- Reproducible: yes (during peak hours)
+- Related Files: src/services/payment.ts
+- See Also: ERR-20250115-X1Y, ERR-20250118-Z3W
+
+---
+```
+
+## Feature Request
+
+```markdown
+## [FEAT-20250115-001] export_to_csv
+
+**Logged**: 2025-01-15T16:45:00Z
+**Priority**: medium
+**Status**: pending
+**Area**: backend
+
+### Requested Capability
+Export analysis results to CSV format
+
+### User Context
+User runs weekly reports and needs to share results with non-technical 
+stakeholders in Excel. Currently copies output manually.
+
+### Complexity Estimate
+simple
+
+### Suggested Implementation
+Add `--output csv` flag to the analyze command. Use standard csv module.
+Could extend existing `--output json` pattern.
+
+### Metadata
+- Frequency: recurring
+- Related Features: analyze command, json output
+
+---
+```
+
+## Feature Request: Resolved
+
+```markdown
+## [FEAT-20250110-002] dark_mode
+
+**Logged**: 2025-01-10T14:00:00Z
+**Priority**: low
+**Status**: resolved
+**Area**: frontend
+
+### Requested Capability
+Dark mode support for the dashboard
+
+### User Context
+User works late hours and finds the bright interface straining.
+Several other users have mentioned this informally.
+
+### Complexity Estimate
+medium
+
+### Suggested Implementation
+Use CSS variables for colors. Add toggle in user settings.
+Consider system preference detection.
+
+### Metadata
+- Frequency: recurring
+- Related Features: user settings, theme system
+
+### Resolution
+- **Resolved**: 2025-01-18T16:00:00Z
+- **Commit/PR**: #142
+- **Notes**: Implemented with system preference detection and manual toggle
+
+---
+```
+
+## Learning: Promoted to Skill
+
+```markdown
+## [LRN-20250118-001] best_practice
+
+**Logged**: 2025-01-18T11:00:00Z
+**Priority**: high
+**Status**: promoted_to_skill
+**Skill-Path**: skills/docker-m1-fixes
+**Area**: infra
+
+### Summary
+Docker build fails on Apple Silicon due to platform mismatch
+
+### Details
+When building Docker images on M1/M2 Macs, the build fails because
+the base image doesn't have an ARM64 variant. This is a common issue
+that affects many developers.
+
+### Suggested Action
+Add `--platform linux/amd64` to docker build command, or use
+`FROM --platform=linux/amd64` in Dockerfile.
+
+### Metadata
+- Source: error
+- Related Files: Dockerfile
+- Tags: docker, arm64, m1, apple-silicon
+- See Also: ERR-20250115-A3F, ERR-20250117-B2D
+
+---
+```
+
+## Extracted Skill Example
+
+When the above learning is extracted as a skill, it becomes:
+
+**File**: `skills/docker-m1-fixes/SKILL.md`
+
+```markdown
+---
+name: docker-m1-fixes
+description: "Fixes Docker build failures on Apple Silicon (M1/M2). Use when docker build fails with platform mismatch errors."
+---
+
+# Docker M1 Fixes
+
+Solutions for Docker build issues on Apple Silicon Macs.
+
+## Quick Reference
+
+| Error | Fix |
+|-------|-----|
+| `no match for platform linux/arm64` | Add `--platform linux/amd64` to build |
+| Image runs but crashes | Use emulation or find ARM-compatible base |
+
+## The Problem
+
+Many Docker base images don't have ARM64 variants. When building on
+Apple Silicon (M1/M2/M3), Docker attempts to pull ARM64 images by
+default, causing platform mismatch errors.
+
+## Solutions
+
+### Option 1: Build Flag (Recommended)
+
+Add platform flag to your build command:
+
+\`\`\`bash
+docker build --platform linux/amd64 -t myapp .
+\`\`\`
+
+### Option 2: Dockerfile Modification
+
+Specify platform in the FROM instruction:
+
+\`\`\`dockerfile
+FROM --platform=linux/amd64 python:3.11-slim
+\`\`\`
+
+### Option 3: Docker Compose
+
+Add platform to your service:
+
+\`\`\`yaml
+services:
+  app:
+    platform: linux/amd64
+    build: .
+\`\`\`
+
+## Trade-offs
+
+| Approach | Pros | Cons |
+|----------|------|------|
+| Build flag | No file changes | Must remember flag |
+| Dockerfile | Explicit, versioned | Affects all builds |
+| Compose | Convenient for dev | Requires compose |
+
+## Performance Note
+
+Running AMD64 images on ARM64 uses Rosetta 2 emulation. This works
+for development but may be slower. For production, find ARM-native
+alternatives when possible.
+
+## Source
+
+- Learning ID: LRN-20250118-001
+- Category: best_practice
+- Extraction Date: 2025-01-18
+```
diff --git a/workspace/skills/self-improving-agent/references/hooks-setup.md b/workspace/skills/self-improving-agent/references/hooks-setup.md
new file mode 100644
index 0000000..08b5dd1
--- /dev/null
+++ b/workspace/skills/self-improving-agent/references/hooks-setup.md
@@ -0,0 +1,223 @@
+# Hook Setup Guide
+
+Configure automatic self-improvement triggers for AI coding agents.
+
+## Overview
+
+Hooks enable proactive learning capture by injecting reminders at key moments:
+- **UserPromptSubmit**: Reminder after each prompt to evaluate learnings
+- **PostToolUse (Bash)**: Error detection when commands fail
+
+## Claude Code Setup
+
+### Option 1: Project-Level Configuration
+
+Create `.claude/settings.json` in your project root:
+
+```json
+{
+  "hooks": {
+    "UserPromptSubmit": [
+      {
+        "matcher": "",
+        "hooks": [
+          {
+            "type": "command",
+            "command": "./skills/self-improvement/scripts/activator.sh"
+          }
+        ]
+      }
+    ],
+    "PostToolUse": [
+      {
+        "matcher": "Bash",
+        "hooks": [
+          {
+            "type": "command",
+            "command": "./skills/self-improvement/scripts/error-detector.sh"
+          }
+        ]
+      }
+    ]
+  }
+}
+```
+
+### Option 2: User-Level Configuration
+
+Add to `~/.claude/settings.json` for global activation:
+
+```json
+{
+  "hooks": {
+    "UserPromptSubmit": [
+      {
+        "matcher": "",
+        "hooks": [
+          {
+            "type": "command",
+            "command": "~/.claude/skills/self-improvement/scripts/activator.sh"
+          }
+        ]
+      }
+    ]
+  }
+}
+```
+
+### Minimal Setup (Activator Only)
+
+For lower overhead, use only the UserPromptSubmit hook:
+
+```json
+{
+  "hooks": {
+    "UserPromptSubmit": [
+      {
+        "matcher": "",
+        "hooks": [
+          {
+            "type": "command",
+            "command": "./skills/self-improvement/scripts/activator.sh"
+          }
+        ]
+      }
+    ]
+  }
+}
+```
+
+## Codex CLI Setup
+
+Codex uses the same hook system as Claude Code. Create `.codex/settings.json`:
+
+```json
+{
+  "hooks": {
+    "UserPromptSubmit": [
+      {
+        "matcher": "",
+        "hooks": [
+          {
+            "type": "command",
+            "command": "./skills/self-improvement/scripts/activator.sh"
+          }
+        ]
+      }
+    ]
+  }
+}
+```
+
+## GitHub Copilot Setup
+
+Copilot doesn't support hooks directly. Instead, add guidance to `.github/copilot-instructions.md`:
+
+```markdown
+## Self-Improvement
+
+After completing tasks that involved:
+- Debugging non-obvious issues
+- Discovering workarounds
+- Learning project-specific patterns
+- Resolving unexpected errors
+
+Consider logging the learning to `.learnings/` using the format from the self-improvement skill.
+
+For high-value learnings that would benefit other sessions, consider skill extraction.
+```
+
+## Verification
+
+### Test Activator Hook
+
+1. Enable the hook configuration
+2. Start a new Claude Code session
+3. Send any prompt
+4. Verify you see `<self-improvement-reminder>` in the context
+
+### Test Error Detector Hook
+
+1. Enable PostToolUse hook for Bash
+2. Run a command that fails: `ls /nonexistent/path`
+3. Verify you see `<error-detected>` reminder
+
+### Dry Run Extract Script
+
+```bash
+./skills/self-improvement/scripts/extract-skill.sh test-skill --dry-run
+```
+
+Expected output shows the skill scaffold that would be created.
+
+## Troubleshooting
+
+### Hook Not Triggering
+
+1. **Check script permissions**: `chmod +x scripts/*.sh`
+2. **Verify path**: Use absolute paths or paths relative to project root
+3. **Check settings location**: Project vs user-level settings
+4. **Restart session**: Hooks are loaded at session start
+
+### Permission Denied
+
+```bash
+chmod +x ./skills/self-improvement/scripts/activator.sh
+chmod +x ./skills/self-improvement/scripts/error-detector.sh
+chmod +x ./skills/self-improvement/scripts/extract-skill.sh
+```
+
+### Script Not Found
+
+If using relative paths, ensure you're in the correct directory or use absolute paths:
+
+```json
+{
+  "command": "/absolute/path/to/skills/self-improvement/scripts/activator.sh"
+}
+```
+
+### Too Much Overhead
+
+If the activator feels intrusive:
+
+1. **Use minimal setup**: Only UserPromptSubmit, skip PostToolUse
+2. **Add matcher filter**: Only trigger for certain prompts:
+
+```json
+{
+  "matcher": "fix|debug|error|issue",
+  "hooks": [...]
+}
+```
+
+## Hook Output Budget
+
+The activator is designed to be lightweight:
+- **Target**: ~50-100 tokens per activation
+- **Content**: Structured reminder, not verbose instructions
+- **Format**: XML tags for easy parsing
+
+If you need to reduce overhead further, you can edit `activator.sh` to output less text.
+
+## Security Considerations
+
+- Hook scripts run with the same permissions as Claude Code
+- Scripts only output text; they don't modify files or run commands
+- Error detector reads `CLAUDE_TOOL_OUTPUT` environment variable
+- All scripts are opt-in (you must configure them explicitly)
+
+## Disabling Hooks
+
+To temporarily disable without removing configuration:
+
+1. **Comment out in settings**:
+```json
+{
+  "hooks": {
+    // "UserPromptSubmit": [...]
+  }
+}
+```
+
+2. **Or delete the settings file**: Hooks won't run without configuration
diff --git a/workspace/skills/self-improving-agent/references/openclaw-integration.md b/workspace/skills/self-improving-agent/references/openclaw-integration.md
new file mode 100644
index 0000000..09f0193
--- /dev/null
+++ b/workspace/skills/self-improving-agent/references/openclaw-integration.md
@@ -0,0 +1,248 @@
+# OpenClaw Integration
+
+Complete setup and usage guide for integrating the self-improvement skill with OpenClaw.
+
+## Overview
+
+OpenClaw uses workspace-based prompt injection combined with event-driven hooks. Context is injected from workspace files at session start, and hooks can trigger on lifecycle events.
+
+## Workspace Structure
+
+```
+~/.openclaw/                      
+├── workspace/                   # Working directory
+│   ├── AGENTS.md               # Multi-agent coordination patterns
+│   ├── SOUL.md                 # Behavioral guidelines and personality
+│   ├── TOOLS.md                # Tool capabilities and gotchas
+│   ├── MEMORY.md               # Long-term memory (main session only)
+│   └── memory/                 # Daily memory files
+│       └── YYYY-MM-DD.md
+├── skills/                      # Installed skills
+│   └── <skill-name>/
+│       └── SKILL.md
+└── hooks/                       # Custom hooks
+    └── <hook-name>/
+        ├── HOOK.md
+        └── handler.ts
+```
+
+## Quick Setup
+
+### 1. Install the Skill
+
+```bash
+clawdhub install self-improving-agent
+```
+
+Or copy manually:
+
+```bash
+cp -r self-improving-agent ~/.openclaw/skills/
+```
+
+### 2. Install the Hook (Optional)
+
+Copy the hook to OpenClaw's hooks directory:
+
+```bash
+cp -r hooks/openclaw ~/.openclaw/hooks/self-improvement
+```
+
+Enable the hook:
+
+```bash
+openclaw hooks enable self-improvement
+```
+
+### 3. Create Learning Files
+
+Create the `.learnings/` directory in your workspace:
+
+```bash
+mkdir -p ~/.openclaw/workspace/.learnings
+```
+
+Or in the skill directory:
+
+```bash
+mkdir -p ~/.openclaw/skills/self-improving-agent/.learnings
+```
+
+## Injected Prompt Files
+
+### AGENTS.md
+
+Purpose: Multi-agent workflows and delegation patterns.
+
+```markdown
+# Agent Coordination
+
+## Delegation Rules
+- Use explore agent for open-ended codebase questions
+- Spawn sub-agents for long-running tasks
+- Use sessions_send for cross-session communication
+
+## Session Handoff
+When delegating to another session:
+1. Provide full context in the handoff message
+2. Include relevant file paths
+3. Specify expected output format
+```
+
+### SOUL.md
+
+Purpose: Behavioral guidelines and communication style.
+
+```markdown
+# Behavioral Guidelines
+
+## Communication Style
+- Be direct and concise
+- Avoid unnecessary caveats and disclaimers
+- Use technical language appropriate to context
+
+## Error Handling
+- Admit mistakes promptly
+- Provide corrected information immediately
+- Log significant errors to learnings
+```
+
+### TOOLS.md
+
+Purpose: Tool capabilities, integration gotchas, local configuration.
+
+```markdown
+# Tool Knowledge
+
+## Self-Improvement Skill
+Log learnings to `.learnings/` for continuous improvement.
+
+## Local Tools
+- Document tool-specific gotchas here
+- Note authentication requirements
+- Track integration quirks
+```
+
+## Learning Workflow
+
+### Capturing Learnings
+
+1. **In-session**: Log to `.learnings/` as usual
+2. **Cross-session**: Promote to workspace files
+
+### Promotion Decision Tree
+
+```
+Is the learning project-specific?
+├── Yes → Keep in .learnings/
+└── No → Is it behavioral/style-related?
+    ├── Yes → Promote to SOUL.md
+    └── No → Is it tool-related?
+        ├── Yes → Promote to TOOLS.md
+        └── No → Promote to AGENTS.md (workflow)
+```
+
+### Promotion Format Examples
+
+**From learning:**
+> Git push to GitHub fails without auth configured - triggers desktop prompt
+
+**To TOOLS.md:**
+```markdown
+## Git
+- Don't push without confirming auth is configured
+- Use `gh auth status` to check GitHub CLI auth
+```
+
+## Inter-Agent Communication
+
+OpenClaw provides tools for cross-session communication:
+
+### sessions_list
+
+View active and recent sessions:
+```
+sessions_list(activeMinutes=30, messageLimit=3)
+```
+
+### sessions_history
+
+Read transcript from another session:
+```
+sessions_history(sessionKey="session-id", limit=50)
+```
+
+### sessions_send
+
+Send message to another session:
+```
+sessions_send(sessionKey="session-id", message="Learning: API requires X-Custom-Header")
+```
+
+### sessions_spawn
+
+Spawn a background sub-agent:
+```
+sessions_spawn(task="Research X and report back", label="research")
+```
+
+## Available Hook Events
+
+| Event | When It Fires |
+|-------|---------------|
+| `agent:bootstrap` | Before workspace files inject |
+| `command:new` | When `/new` command issued |
+| `command:reset` | When `/reset` command issued |
+| `command:stop` | When `/stop` command issued |
+| `gateway:startup` | When gateway starts |
+
+## Detection Triggers
+
+### Standard Triggers
+- User corrections ("No, that's wrong...")
+- Command failures (non-zero exit codes)
+- API errors
+- Knowledge gaps
+
+### OpenClaw-Specific Triggers
+
+| Trigger | Action |
+|---------|--------|
+| Tool call error | Log to TOOLS.md with tool name |
+| Session handoff confusion | Log to AGENTS.md with delegation pattern |
+| Model behavior surprise | Log to SOUL.md with expected vs actual |
+| Skill issue | Log to .learnings/ or report upstream |
+
+## Verification
+
+Check hook is registered:
+
+```bash
+openclaw hooks list
+```
+
+Check skill is loaded:
+
+```bash
+openclaw status
+```
+
+## Troubleshooting
+
+### Hook not firing
+
+1. Ensure hooks enabled in config
+2. Restart gateway after config changes
+3. Check gateway logs for errors
+
+### Learnings not persisting
+
+1. Verify `.learnings/` directory exists
+2. Check file permissions
+3. Ensure workspace path is configured correctly
+
+### Skill not loading
+
+1. Check skill is in skills directory
+2. Verify SKILL.md has correct frontmatter
+3. Run `openclaw status` to see loaded skills
diff --git a/workspace/skills/self-improving-agent/scripts/activator.sh b/workspace/skills/self-improving-agent/scripts/activator.sh
new file mode 100644
index 0000000..29eec22
--- /dev/null
+++ b/workspace/skills/self-improving-agent/scripts/activator.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Self-Improvement Activator Hook
+# Triggers on UserPromptSubmit to remind Claude about learning capture
+# Keep output minimal (~50-100 tokens) to minimize overhead
+
+set -e
+
+# Output reminder as system context
+cat << 'EOF'
+<self-improvement-reminder>
+After completing this task, evaluate if extractable knowledge emerged:
+- Non-obvious solution discovered through investigation?
+- Workaround for unexpected behavior?
+- Project-specific pattern learned?
+- Error required debugging to resolve?
+
+If yes: Log to .learnings/ using the self-improvement skill format.
+If high-value (recurring, broadly applicable): Consider skill extraction.
+</self-improvement-reminder>
+EOF
diff --git a/workspace/skills/self-improving-agent/scripts/error-detector.sh b/workspace/skills/self-improving-agent/scripts/error-detector.sh
new file mode 100644
index 0000000..3c310dd
--- /dev/null
+++ b/workspace/skills/self-improving-agent/scripts/error-detector.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+# Self-Improvement Error Detector Hook
+# Triggers on PostToolUse for Bash to detect command failures
+# Reads CLAUDE_TOOL_OUTPUT environment variable
+
+set -e
+
+# Check if tool output indicates an error
+# CLAUDE_TOOL_OUTPUT contains the result of the tool execution
+OUTPUT="${CLAUDE_TOOL_OUTPUT:-}"
+
+# Patterns indicating errors (case-insensitive matching)
+ERROR_PATTERNS=(
+    "error:"
+    "Error:"
+    "ERROR:"
+    "failed"
+    "FAILED"
+    "command not found"
+    "No such file"
+    "Permission denied"
+    "fatal:"
+    "Exception"
+    "Traceback"
+    "npm ERR!"
+    "ModuleNotFoundError"
+    "SyntaxError"
+    "TypeError"
+    "exit code"
+    "non-zero"
+)
+
+# Check if output contains any error pattern
+contains_error=false
+for pattern in "${ERROR_PATTERNS[@]}"; do
+    if [[ "$OUTPUT" == *"$pattern"* ]]; then
+        contains_error=true
+        break
+    fi
+done
+
+# Only output reminder if error detected
+if [ "$contains_error" = true ]; then
+    cat << 'EOF'
+<error-detected>
+A command error was detected. Consider logging this to .learnings/ERRORS.md if:
+- The error was unexpected or non-obvious
+- It required investigation to resolve
+- It might recur in similar contexts
+- The solution could benefit future sessions
+
+Use the self-improvement skill format: [ERR-YYYYMMDD-XXX]
+</error-detected>
+EOF
+fi
diff --git a/workspace/skills/self-improving-agent/scripts/extract-skill.sh b/workspace/skills/self-improving-agent/scripts/extract-skill.sh
new file mode 100644
index 0000000..ccae55a
--- /dev/null
+++ b/workspace/skills/self-improving-agent/scripts/extract-skill.sh
@@ -0,0 +1,221 @@
+#!/bin/bash
+# Skill Extraction Helper
+# Creates a new skill from a learning entry
+# Usage: ./extract-skill.sh <skill-name> [--dry-run]
+
+set -e
+
+# Configuration
+SKILLS_DIR="./skills"
+
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+NC='\033[0m' # No Color
+
+usage() {
+    cat << EOF
+Usage: $(basename "$0") <skill-name> [options]
+
+Create a new skill from a learning entry.
+
+Arguments:
+  skill-name     Name of the skill (lowercase, hyphens for spaces)
+
+Options:
+  --dry-run      Show what would be created without creating files
+  --output-dir   Relative output directory under current path (default: ./skills)
+  -h, --help     Show this help message
+
+Examples:
+  $(basename "$0") docker-m1-fixes
+  $(basename "$0") api-timeout-patterns --dry-run
+  $(basename "$0") pnpm-setup --output-dir ./skills/custom
+
+The skill will be created in: \$SKILLS_DIR/<skill-name>/
+EOF
+}
+
+log_info() {
+    echo -e "${GREEN}[INFO]${NC} $1"
+}
+
+log_warn() {
+    echo -e "${YELLOW}[WARN]${NC} $1"
+}
+
+log_error() {
+    echo -e "${RED}[ERROR]${NC} $1" >&2
+}
+
+# Parse arguments
+SKILL_NAME=""
+DRY_RUN=false
+
+while [[ $# -gt 0 ]]; do
+    case $1 in
+        --dry-run)
+            DRY_RUN=true
+            shift
+            ;;
+        --output-dir)
+            if [ -z "${2:-}" ] || [[ "${2:-}" == -* ]]; then
+                log_error "--output-dir requires a relative path argument"
+                usage
+                exit 1
+            fi
+            SKILLS_DIR="$2"
+            shift 2
+            ;;
+        -h|--help)
+            usage
+            exit 0
+            ;;
+        -*)
+            log_error "Unknown option: $1"
+            usage
+            exit 1
+            ;;
+        *)
+            if [ -z "$SKILL_NAME" ]; then
+                SKILL_NAME="$1"
+            else
+                log_error "Unexpected argument: $1"
+                usage
+                exit 1
+            fi
+            shift
+            ;;
+    esac
+done
+
+# Validate skill name
+if [ -z "$SKILL_NAME" ]; then
+    log_error "Skill name is required"
+    usage
+    exit 1
+fi
+
+# Validate skill name format (lowercase, hyphens, no spaces)
+if ! [[ "$SKILL_NAME" =~ ^[a-z0-9]+(-[a-z0-9]+)*$ ]]; then
+    log_error "Invalid skill name format. Use lowercase letters, numbers, and hyphens only."
+    log_error "Examples: 'docker-fixes', 'api-patterns', 'pnpm-setup'"
+    exit 1
+fi
+
+# Validate output path to avoid writes outside current workspace.
+if [[ "$SKILLS_DIR" = /* ]]; then
+    log_error "Output directory must be a relative path under the current directory."
+    exit 1
+fi
+
+if [[ "$SKILLS_DIR" =~ (^|/)\.\.(/|$) ]]; then
+    log_error "Output directory cannot include '..' path segments."
+    exit 1
+fi
+
+SKILLS_DIR="${SKILLS_DIR#./}"
+SKILLS_DIR="./$SKILLS_DIR"
+
+SKILL_PATH="$SKILLS_DIR/$SKILL_NAME"
+
+# Check if skill already exists
+if [ -d "$SKILL_PATH" ] && [ "$DRY_RUN" = false ]; then
+    log_error "Skill already exists: $SKILL_PATH"
+    log_error "Use a different name or remove the existing skill first."
+    exit 1
+fi
+
+# Dry run output
+if [ "$DRY_RUN" = true ]; then
+    log_info "Dry run - would create:"
+    echo "  $SKILL_PATH/"
+    echo "  $SKILL_PATH/SKILL.md"
+    echo ""
+    echo "Template content would be:"
+    echo "---"
+    cat << TEMPLATE
+name: $SKILL_NAME
+description: "[TODO: Add a concise description of what this skill does and when to use it]"
+---
+
+# $(echo "$SKILL_NAME" | sed 's/-/ /g' | awk '{for(i=1;i<=NF;i++) $i=toupper(substr($i,1,1)) tolower(substr($i,2))}1')
+
+[TODO: Brief introduction explaining the skill's purpose]
+
+## Quick Reference
+
+| Situation | Action |
+|-----------|--------|
+| [Trigger condition] | [What to do] |
+
+## Usage
+
+[TODO: Detailed usage instructions]
+
+## Examples
+
+[TODO: Add concrete examples]
+
+## Source Learning
+
+This skill was extracted from a learning entry.
+- Learning ID: [TODO: Add original learning ID]
+- Original File: .learnings/LEARNINGS.md
+TEMPLATE
+    echo "---"
+    exit 0
+fi
+
+# Create skill directory structure
+log_info "Creating skill: $SKILL_NAME"
+
+mkdir -p "$SKILL_PATH"
+
+# Create SKILL.md from template
+cat > "$SKILL_PATH/SKILL.md" << TEMPLATE
+---
+name: $SKILL_NAME
+description: "[TODO: Add a concise description of what this skill does and when to use it]"
+---
+
+# $(echo "$SKILL_NAME" | sed 's/-/ /g' | awk '{for(i=1;i<=NF;i++) $i=toupper(substr($i,1,1)) tolower(substr($i,2))}1')
+
+[TODO: Brief introduction explaining the skill's purpose]
+
+## Quick Reference
+
+| Situation | Action |
+|-----------|--------|
+| [Trigger condition] | [What to do] |
+
+## Usage
+
+[TODO: Detailed usage instructions]
+
+## Examples
+
+[TODO: Add concrete examples]
+
+## Source Learning
+
+This skill was extracted from a learning entry.
+- Learning ID: [TODO: Add original learning ID]
+- Original File: .learnings/LEARNINGS.md
+TEMPLATE
+
+log_info "Created: $SKILL_PATH/SKILL.md"
+
+# Suggest next steps
+echo ""
+log_info "Skill scaffold created successfully!"
+echo ""
+echo "Next steps:"
+echo "  1. Edit $SKILL_PATH/SKILL.md"
+echo "  2. Fill in the TODO sections with content from your learning"
+echo "  3. Add references/ folder if you have detailed documentation"
+echo "  4. Add scripts/ folder if you have executable code"
+echo "  5. Update the original learning entry with:"
+echo "     **Status**: promoted_to_skill"
+echo "     **Skill-Path**: skills/$SKILL_NAME"

--
Gitblit v1.9.1