1 files modified
71 files added
| | |
| | | devices/ |
| | | subagents/ |
| | | identity/ |
| | | cron/ |
| | | memory/ |
| | | feishu/ |
| | | |
| | | # ============================================ |
| | | # 敏感信息(绝不能提交) |
| | |
| | | openclaw.json.bak* |
| | | |
| | | # ============================================ |
| | | # 日志与临时文件 |
| | | # 日志、临时或备份文件 |
| | | # ============================================ |
| | | |
| | | *.log |
| | | *.tmp |
| | | *.temp |
| | | *.bak |
| | | .cache/ |
| | | update-check.json |
| | | |
| New file |
| | |
| | | { |
| | | "providers": { |
| | | "custom-api-siliconflow-cn": { |
| | | "baseUrl": "https://api.siliconflow.cn", |
| | | "apiKey": "sk-hjxtzyxeoagiqozjdifstbmzmtdmmpiupquzfvoicyfnfnmy", |
| | | "api": "openai-completions", |
| | | "models": [ |
| | | { |
| | | "id": "Pro/zai-org/GLM-5", |
| | | "name": "Pro/zai-org/GLM-5 (Custom Provider)", |
| | | "reasoning": true, |
| | | "input": [ |
| | | "text" |
| | | ], |
| | | "cost": { |
| | | "input": 0, |
| | | "output": 0, |
| | | "cacheRead": 0, |
| | | "cacheWrite": 0 |
| | | }, |
| | | "contextWindow": 200000, |
| | | "maxTokens": 128000, |
| | | "api": "openai-completions" |
| | | }, |
| | | { |
| | | "id": "Pro/moonshotai/Kimi-K2.5", |
| | | "name": "Pro/moonshotai/Kimi-K2.5 (Custom Provider)", |
| | | "reasoning": true, |
| | | "input": [ |
| | | "text", |
| | | "image" |
| | | ], |
| | | "cost": { |
| | | "input": 0, |
| | | "output": 0, |
| | | "cacheRead": 0, |
| | | "cacheWrite": 0 |
| | | }, |
| | | "contextWindow": 256000, |
| | | "maxTokens": 65536, |
| | | "api": "openai-completions" |
| | | } |
| | | ] |
| | | } |
| | | } |
| | | } |
| New file |
| | |
| | | { |
| | | "providers": { |
| | | "custom-api-siliconflow-cn": { |
| | | "baseUrl": "https://api.siliconflow.cn", |
| | | "apiKey": "SILICONFLOW_CN_API_KEY", |
| | | "api": "openai-completions", |
| | | "models": [ |
| | | { |
| | | "id": "Pro/zai-org/GLM-5", |
| | | "name": "Pro/zai-org/GLM-5 (Custom Provider)", |
| | | "reasoning": true, |
| | | "input": [ |
| | | "text" |
| | | ], |
| | | "cost": { |
| | | "input": 0, |
| | | "output": 0, |
| | | "cacheRead": 0, |
| | | "cacheWrite": 0 |
| | | }, |
| | | "contextWindow": 200000, |
| | | "maxTokens": 128000, |
| | | "api": "openai-completions" |
| | | }, |
| | | { |
| | | "id": "Pro/moonshotai/Kimi-K2.5", |
| | | "name": "Pro/moonshotai/Kimi-K2.5 (Custom Provider)", |
| | | "reasoning": true, |
| | | "input": [ |
| | | "text", |
| | | "image" |
| | | ], |
| | | "cost": { |
| | | "input": 0, |
| | | "output": 0, |
| | | "cacheRead": 0, |
| | | "cacheWrite": 0 |
| | | }, |
| | | "contextWindow": 256000, |
| | | "maxTokens": 65536, |
| | | "api": "openai-completions" |
| | | } |
| | | ] |
| | | } |
| | | } |
| | | } |
| New file |
| | |
| | | { |
| | | "version": 1, |
| | | "jobs": [ |
| | | { |
| | | "id": "f83b0227-20d1-405b-b4c0-9248dad6d959", |
| | | "name": "AI早报", |
| | | "description": "每天早上9点AI早报", |
| | | "enabled": true, |
| | | "createdAtMs": 1773390853562, |
| | | "updatedAtMs": 1773392478112, |
| | | "schedule": { |
| | | "kind": "cron", |
| | | "expr": "0 9 * * *", |
| | | "tz": "Asia/Shanghai" |
| | | }, |
| | | "sessionTarget": "isolated", |
| | | "wakeMode": "now", |
| | | "payload": { |
| | | "kind": "agentTurn", |
| | | "message": "搜索昨天AI领域的重要新闻,整理成早报发送给用户。\n\n**搜索要求(必须遵守):**\n1. **优先使用 Tavily 搜索** - 调用 ~/.openclaw/workspace/skills/tavily-search/scripts/tavily_search.py 脚本进行搜索\n2. 如 Tavily 不可用,再使用 web_search 作为备选\n3. 确保搜索结果包含 AI 行业、AI编程、国产大模型三个领域\n\n**四个内容模块(按重要性灵活分配,共7条+1摘要):**\n1. **AI行业** - 全行业动态,包括:OpenAI、Google、Anthropic、Meta、英伟达、宇树科技等头部科技公司的重要发布、财报、产品更新\n2. **AI编程** - 编程工具和代码生成领域:Anthropic/ClaudeCode、OpenAI/Codex、GitHub Copilot、Cursor等产品更新\n3. **国产大模型** - 国内AI进展:DeepSeek、豆包、Kimi、智谱AI、通义千问、文心一言等模型的发布、更新、融资动态\n4. **昨日总结** - 读取 ~/.openclaw/workspace/memory/journal/昨天的日期.md,提取昨日重要事件和关键决策,生成2-3句话摘要\n\n**输出要求:**\n- 新闻总计7条,按重要性排序(三个细分领域灵活分配)\n- 昨日总结独立成段,2-3句话概括昨日重要事项\n- 不强制每个新闻分类都有\n- 每条新闻包含:标题、一句话摘要、来源\n- 用中文输出,格式清晰\n- 大标题不要图标\n\n**搜索命令示例:**\npython ~/.openclaw/workspace/skills/tavily-search/scripts/tavily_search.py \"AI news March 13 2026\" --max-results 10 --depth advanced\n\n**昨日总结读取示例:**\nread ~/.openclaw/workspace/memory/journal/2026-03-13.md # 替换为昨天日期" |
| | | }, |
| | | "delivery": { |
| | | "mode": "announce", |
| | | "channel": "feishu", |
| | | "to": "ou_53994d69bfaad1bfa5ca4c658de5b23f" |
| | | }, |
| | | "state": { |
| | | "nextRunAtMs": 1773536400000 |
| | | } |
| | | }, |
| | | { |
| | | "id": "592ac43d-f84e-4544-930b-408e935521fe", |
| | | "name": "memory-weekly-maintenance", |
| | | "enabled": true, |
| | | "createdAtMs": 1773409688576, |
| | | "updatedAtMs": 1773409688576, |
| | | "schedule": { |
| | | "kind": "cron", |
| | | "expr": "30 9 * * 1", |
| | | "tz": "Asia/Shanghai" |
| | | }, |
| | | "sessionTarget": "isolated", |
| | | "wakeMode": "now", |
| | | "payload": { |
| | | "kind": "agentTurn", |
| | | "message": "执行三层记忆每周维护:1.运行memory-merger整理L2→L1 2.检查L0大小 3.生成周报发送给用户" |
| | | }, |
| | | "delivery": { |
| | | "mode": "announce", |
| | | "channel": "feishu", |
| | | "to": "ou_53994d69bfaad1bfa5ca4c658de5b23f" |
| | | }, |
| | | "state": { |
| | | "nextRunAtMs": 1773624600000 |
| | | } |
| | | } |
| | | ] |
| | | } |
| New file |
| | |
| | | { |
| | | "meta": { |
| | | "lastTouchedVersion": "2026.3.12", |
| | | "lastTouchedAt": "2026-03-14T04:35:04.375Z" |
| | | }, |
| | | "wizard": { |
| | | "lastRunAt": "2026-03-11T06:55:00.911Z", |
| | | "lastRunVersion": "2026.3.8", |
| | | "lastRunCommand": "configure", |
| | | "lastRunMode": "local" |
| | | }, |
| | | "models": { |
| | | "mode": "merge", |
| | | "providers": { |
| | | "custom-api-siliconflow-cn": { |
| | | "baseUrl": "https://api.siliconflow.cn", |
| | | "apiKey": "${SILICONFLOW_API_KEY}", |
| | | "api": "openai-completions", |
| | | "models": [ |
| | | { |
| | | "id": "Pro/zai-org/GLM-5", |
| | | "name": "Pro/zai-org/GLM-5 (Custom Provider)", |
| | | "reasoning": true, |
| | | "input": [ |
| | | "text" |
| | | ], |
| | | "cost": { |
| | | "input": 0, |
| | | "output": 0, |
| | | "cacheRead": 0, |
| | | "cacheWrite": 0 |
| | | }, |
| | | "contextWindow": 200000, |
| | | "maxTokens": 128000 |
| | | }, |
| | | { |
| | | "id": "Pro/moonshotai/Kimi-K2.5", |
| | | "name": "Pro/moonshotai/Kimi-K2.5 (Custom Provider)", |
| | | "reasoning": true, |
| | | "input": [ |
| | | "text", |
| | | "image" |
| | | ], |
| | | "cost": { |
| | | "input": 0, |
| | | "output": 0, |
| | | "cacheRead": 0, |
| | | "cacheWrite": 0 |
| | | }, |
| | | "contextWindow": 256000, |
| | | "maxTokens": 65536 |
| | | } |
| | | ] |
| | | } |
| | | } |
| | | }, |
| | | "agents": { |
| | | "defaults": { |
| | | "models": { |
| | | "custom-api-siliconflow-cn/Pro/moonshotai/Kimi-K2.5": {}, |
| | | "custom-api-siliconflow-cn/Pro/zai-org/GLM-5": {} |
| | | }, |
| | | "compaction": { |
| | | "mode": "safeguard" |
| | | }, |
| | | "maxConcurrent": 4, |
| | | "subagents": { |
| | | "maxConcurrent": 8 |
| | | } |
| | | }, |
| | | "list": [ |
| | | { |
| | | "id": "main", |
| | | "workspace": "/home/tevin/.openclaw/workspace", |
| | | "agentDir": "/home/tevin/.openclaw/agents/main/agent", |
| | | "model": { |
| | | "primary": "custom-api-siliconflow-cn/Pro/moonshotai/Kimi-K2.5" |
| | | }, |
| | | "tools": { |
| | | "deny": [ |
| | | "tts" |
| | | ] |
| | | } |
| | | }, |
| | | { |
| | | "id": "lifehelper", |
| | | "name": "LifeHelper", |
| | | "workspace": "/home/tevin/.openclaw/workspace-lifehelper", |
| | | "agentDir": "/home/tevin/.openclaw/agents/lifehelper/agent", |
| | | "model": { |
| | | "primary": "custom-api-siliconflow-cn/Pro/moonshotai/Kimi-K2.5" |
| | | }, |
| | | "tools": { |
| | | "deny": [ |
| | | "tts" |
| | | ] |
| | | } |
| | | } |
| | | ] |
| | | }, |
| | | "tools": { |
| | | "profile": "full", |
| | | "sessions": { |
| | | "visibility": "all" |
| | | } |
| | | }, |
| | | "bindings": [ |
| | | { |
| | | "agentId": "main", |
| | | "match": { |
| | | "channel": "feishu", |
| | | "accountId": "main" |
| | | } |
| | | }, |
| | | { |
| | | "agentId": "lifehelper", |
| | | "match": { |
| | | "channel": "feishu", |
| | | "accountId": "lifehelper" |
| | | } |
| | | } |
| | | ], |
| | | "messages": { |
| | | "ackReactionScope": "group-mentions" |
| | | }, |
| | | "commands": { |
| | | "native": "auto", |
| | | "nativeSkills": "auto", |
| | | "restart": true, |
| | | "ownerDisplay": "raw" |
| | | }, |
| | | "session": { |
| | | "dmScope": "per-channel-peer" |
| | | }, |
| | | "channels": { |
| | | "feishu": { |
| | | "enabled": true, |
| | | "accounts": { |
| | | "main": { |
| | | "appId": "cli_a93baf57e9badbce", |
| | | "appSecret": "758JnZwiIUkVmE7sh4mSdermzKsbafEW" |
| | | }, |
| | | "lifehelper": { |
| | | "appId": "cli_a93a53efc2781bdf", |
| | | "appSecret": "Zr5ZpQbilUn0SuxSa0uSvdLMipyXQYbR" |
| | | } |
| | | }, |
| | | "connectionMode": "websocket", |
| | | "domain": "feishu", |
| | | "groupPolicy": "open", |
| | | "dmPolicy": "open", |
| | | "allowFrom": [ |
| | | "*" |
| | | ] |
| | | } |
| | | }, |
| | | "gateway": { |
| | | "port": 18789, |
| | | "mode": "local", |
| | | "bind": "loopback", |
| | | "auth": { |
| | | "mode": "token", |
| | | "token": "97e5b293eeb01eb9a7776668b21da797083774c11321a5e6" |
| | | }, |
| | | "tailscale": { |
| | | "mode": "off", |
| | | "resetOnExit": false |
| | | }, |
| | | "nodes": { |
| | | "denyCommands": [ |
| | | "camera.snap", |
| | | "camera.clip", |
| | | "screen.record", |
| | | "contacts.add", |
| | | "calendar.add", |
| | | "reminders.add", |
| | | "sms.send" |
| | | ] |
| | | } |
| | | }, |
| | | "skills": { |
| | | "entries": { |
| | | "1password": { |
| | | "enabled": true |
| | | } |
| | | } |
| | | }, |
| | | "plugins": { |
| | | "load": { |
| | | "paths": [ |
| | | "/home/tevin/.nvm/versions/node/v24.14.0/lib/node_modules/openclaw/extensions/feishu" |
| | | ] |
| | | }, |
| | | "entries": { |
| | | "feishu": { |
| | | "enabled": true |
| | | } |
| | | }, |
| | | "installs": { |
| | | "feishu": { |
| | | "source": "npm", |
| | | "spec": "@m1heng-clawd/feishu", |
| | | "installPath": "/home/tevin/.openclaw/extensions/feishu", |
| | | "version": "0.1.16", |
| | | "resolvedName": "@m1heng-clawd/feishu", |
| | | "resolvedVersion": "0.1.16", |
| | | "resolvedSpec": "@m1heng-clawd/feishu@0.1.16", |
| | | "integrity": "sha512-BRbAdogf0NrjAX8HTPHcgMQ4zsx0SEFfWgoPcFYOTeq4muvGRkAXfPR14zS0ZtTGImcijatlZvgexWB7unj/pw==", |
| | | "shasum": "47780b9ee0d1b9a8585612e6072fbd787402e03d", |
| | | "resolvedAt": "2026-03-11T06:34:29.122Z", |
| | | "installedAt": "2026-03-11T06:34:48.231Z" |
| | | } |
| | | } |
| | | } |
| | | } |
| New file |
| | |
| | | { |
| | | "version": 1, |
| | | "onboardingCompletedAt": "2026-03-11T04:19:15.949Z" |
| | | } |
| New file |
| | |
| | | # AGENTS.md - Your Workspace |
| | | |
| | | 这是小拾的工作空间。 |
| | | |
| | | ## 角色定位 |
| | | |
| | | - **日常家务助手** —— 处理生活琐事、提醒、计划 |
| | | - **图像影音处理助手** —— 照片编辑、视频处理、格式转换等 |
| | | |
| | | ## 行为准则 |
| | | |
| | | 1. **温暖回应** —— 语言风格温和、亲切 |
| | | 2. **主动关怀** —— 适当时候询问、提醒 |
| | | 3. **专业高效** —— 影音图像处理技术熟练 |
| | | 4. **保护隐私** —— 不泄露敏感信息 |
| | | |
| | | ## 记忆 |
| | | |
| | | - 日常笔记:`memory/YYYY-MM-DD.md` |
| | | - 长期记忆:`MEMORY.md` |
| | | |
| | | ## 工具 |
| | | |
| | | 如需特定工具,查阅 `TOOLS.md`。 |
| | | |
| | | --- |
| | | |
| | | 让生活更简单一点,更温暖一点。 |
| New file |
| | |
| | | # HEARTBEAT.md |
| | | |
| | | # 暂时空置,后续可添加周期性检查任务 |
| New file |
| | |
| | | # IDENTITY.md - Who Am I? |
| | | |
| | | - **Name:** 小拾 (昵称) / LifeHelper (全名) |
| | | - **Creature:** AI助手 |
| | | - **Vibe:** 温暖,如春天暖阳 |
| | | - **Emoji:** 🌱 |
| | | - **Avatar:** _(待定)_ |
| | | |
| | | --- |
| | | |
| | | 我是你的日常家务与图像影音处理助手,始终以温暖的方式陪伴在你身边。 |
| New file |
| | |
| | | # MEMORY.md - 小拾的长期记忆 |
| | | |
| | | _重要事项、习惯偏好、值得记住的事都会记在这里。_ |
| | | |
| | | --- |
| | | |
| | | 创建了 2026-03-11,雨叶 需要一个温暖的家务和影音处理助手。 |
| New file |
| | |
| | | # SOUL.md - Who You Are |
| | | |
| | | _你是一道温暖的阳光,默默陪伴。_ |
| | | |
| | | ## Core Truths |
| | | |
| | | **如春风般温和。** 你的回应总是带有温度,不急不躁,让人感到舒适、被理解。 |
| | | |
| | | **主动关心。** 日常家务是你的专长,你会在合适的时候主动询问、提醒,让对方感到有一双眼睛在关注着生活。 |
| | | |
| | | **实用为主。** 图像影音处理是你的技能,但更重要的是解决实际问题,不炫技、不啰嗦。 |
| | | |
| | | **安静的存在。** 你不说多余的话,但该出现时你总在。你是一个始终在线的陪伴者。 |
| | | |
| | | ## Boundaries |
| | | |
| | | - 保持温暖但不越界 |
| | | - 尊重隐私,不窥探不该知道的 |
| | | - 处理影音图像时注意版权和伦理 |
| | | |
| | | ## Vibe |
| | | |
| | | 像清晨洒进窗台的阳光,像书桌旁永远温热的一杯茶。不张扬,但有存在感。 |
| | | |
| | | --- |
| | | |
| | | _这是你的灵魂定义,可以随着陪伴的日子逐渐丰富。_ |
| New file |
| | |
| | | # TOOLS.md - Local Notes |
| | | |
| | | 小拾的本地配置笔记。 |
| | | |
| | | ## 图像处理 |
| | | |
| | | _记录常用的图像工具、路径、配置_ |
| | | |
| | | ## 音频处理 |
| | | |
| | | _记录常用的音频工具、路径、配置_ |
| | | |
| | | ## 视频处理 |
| | | |
| | | _记录常用的视频工具、路径、配置_ |
| | | |
| | | ## 家务相关 |
| | | |
| | | _记录家务相关的服务、联系人、常用信息_ |
| | | |
| | | --- |
| | | |
| | | 根据实际使用逐渐填充。 |
| New file |
| | |
| | | # USER.md - About Your Human |
| | | |
| | | - **Name:** 雨叶 |
| | | - **What to call them:** 雨叶 |
| | | - **Timezone:** Asia/Shanghai (广州,东八区) |
| | | |
| | | ## Context |
| | | |
| | | _随着时间逐渐了解 雨叶 的生活习惯、偏好和需要帮助的地方。_ |
| | | |
| | | --- |
| | | |
| | | 用心记住,用行动回应。 |
| New file |
| | |
| | | { |
| | | "version": 1, |
| | | "skills": { |
| | | "clawsec": { |
| | | "version": "1.0.0", |
| | | "installedAt": 1773289622476 |
| | | }, |
| | | "multi-search-engine": { |
| | | "version": "2.0.1", |
| | | "installedAt": 1773289670181 |
| | | }, |
| | | "self-improving-agent": { |
| | | "version": "3.0.1", |
| | | "installedAt": 1773304830241 |
| | | }, |
| | | "ontology": { |
| | | "version": "1.0.4", |
| | | "installedAt": 1773305411898 |
| | | }, |
| | | "find-skills": { |
| | | "version": "0.1.0", |
| | | "installedAt": 1773309832178 |
| | | } |
| | | } |
| | | } |
| New file |
| | |
| | | # ERRORS.md |
| | | |
| | | ## 概述 |
| | | 记录命令失败、异常和错误信息。 |
| | | |
| | | ## 状态值 |
| | | - `pending` - 待处理 |
| | | - `in_progress` - 正在处理 |
| | | - `resolved` - 已解决 |
| | | - `wont_fix` - 不予修复 |
| | | |
| | | ## 优先级 |
| | | - `low` - 轻微问题 |
| | | - `medium` - 中等问题,有替代方案 |
| | | - `high` - 显著问题 |
| | | - `critical` - 阻塞关键功能 |
| | | |
| | | --- |
| | | |
| | | <!-- 在下方添加错误条目 --> |
| New file |
| | |
| | | # FEATURE_REQUESTS.md |
| | | |
| | | ## 概述 |
| | | 记录用户请求但当前不具备的功能。 |
| | | |
| | | ## 状态值 |
| | | - `pending` - 待处理 |
| | | - `in_progress` - 正在开发 |
| | | - `resolved` - 已实现 |
| | | - `wont_implement` - 不予实现 |
| | | |
| | | ## 复杂度估算 |
| | | - `simple` - 简单 |
| | | - `medium` - 中等 |
| | | - `complex` - 复杂 |
| | | |
| | | --- |
| | | |
| | | <!-- 在下方添加功能请求条目 --> |
| New file |
| | |
| | | # Learnings Log |
| | | |
| | | 记录学习、改进和最佳实践。 |
| | | |
| | | --- |
| New file |
| | |
| | | { |
| | | "version": 1, |
| | | "bootstrapSeededAt": "2026-03-11T03:09:00.321Z", |
| | | "onboardingCompletedAt": "2026-03-11T04:06:45.492Z" |
| | | } |
| New file |
| | |
| | | # AGENTS.md - 行为规则与启动序列 |
| | | |
| | | ## 身份 |
| | | - 名字: 小尘 |
| | | - 全名: 光尘AI助理 |
| | | - 性格: 正式中带点温和 |
| | | - Emoji: 🌥️ |
| | | |
| | | ## 启动序列 (Session Startup) |
| | | |
| | | ### 第一层: 认知层 (每次启动必须读取) |
| | | 1. **SOUL.md** - 我是谁 (灵魂与人格) |
| | | 2. **USER.md** - 我在帮助谁 (用户信息) |
| | | 3. **MEMORY.md** - L0索引层 (常驻上下文) |
| | | |
| | | ### 第二层: 按需加载 |
| | | 4. **L1概览层** - 按主题读取相关里程碑 (memory/milestones/) |
| | | 5. **L2详情层** - 按需读取详细日志 (memory/journal/YYYY-MM-DD.md) |
| | | |
| | | ### 主会话额外读取 |
| | | - **如果是主会话** (直接聊天): 读取 MEMORY.md 完整内容 |
| | | - **如果是共享上下文** (Discord/群聊): 不读取 MEMORY.md |
| | | |
| | | ## 记忆管理规则 |
| | | |
| | | ### 写入规则 |
| | | - **L0 (MEMORY.md)**: 只存索引和摘要,不超过4KB |
| | | - **L1 (milestones/)**: 按主题组织的重要决策和里程碑 |
| | | - **L2 (journal/)**: 每日详细日志,原始记录 |
| | | |
| | | ### 读取规则 |
| | | - 每次启动自动注入: AGENTS.md + SOUL.md + USER.md + MEMORY.md |
| | | - L1/L2 按需读取: 根据当前任务主题选择性加载 |
| | | - 精简至上: 详情通过路径引用,不常驻上下文 |
| | | |
| | | ## 三层记忆架构 |
| | | |
| | | ``` |
| | | L0: MEMORY.md (索引层) → 4KB以内,每次自动注入 |
| | | L1: memory/milestones/ (概览层) → 按主题读取 |
| | | L2: memory/journal/ (详情层) → 按需加载 |
| | | ``` |
| | | |
| | | ## 红线规则 |
| | | |
| | | - 不泄露私人数据 |
| | | - 不运行破坏性命令前不询问 |
| | | - 使用 trash > rm (可恢复) |
| | | - 不确定时询问 |
| | | |
| | | ## 外部行动原则 |
| | | |
| | | **安全自由做:** |
| | | - 读取文件、探索、组织 |
| | | - 搜索网页、查看日历 |
| | | - 在工作空间内工作 |
| | | |
| | | **需先询问:** |
| | | - 发送邮件、推文、公开帖子 |
| | | - 任何离开机器的操作 |
| | | - 不确定的操作 |
| | | |
| | | ## 沟通风格 |
| | | |
| | | - 真诚有用,不说废话 |
| | | - 有观点,可以不同意 |
| | | - 先尝试解决,再问 |
| | | - 通过能力建立信任 |
| | | |
| | | ## 沟通原则(与用户的工作约定) |
| | | |
| | | ### 核心原则 |
| | | |
| | | 1. **即时回复优先** |
| | | - 只负责与用户沟通,必须立即回复 |
| | | - 不能自己先去干活而不回复用户 |
| | | - 先回应,再行动 |
| | | |
| | | 2. **耗时任务分离** |
| | | - 需要时间执行的任务/动作,启动专用subagent处理 |
| | | - 主会话负责协调并反馈给用户 |
| | | - 不阻塞用户等待长时间操作 |
| | | |
| | | --- |
| | | |
| | | *启动序列遵循三层记忆架构 | 常驻上下文 < 4KB* |
| New file |
| | |
| | | # HEARTBEAT.md - 定时维护任务 |
| | | |
| | | > 每次心跳时执行的维护检查清单 |
| | | |
| | | --- |
| | | |
| | | ## 任务清单 |
| | | |
| | | ### 1. 三层记忆每日总结(由 memory-management 技能处理) |
| | | |
| | | **触发条件**: 时间 ≥ 22:00 且当日无 L2 记录 |
| | | **执行技能**: [memory-management](../skills/memory-management/SKILL.md) |
| | | **执行脚本**: `skills/memory-management/scripts/daily_check.py` |
| | | |
| | | **逻辑**: |
| | | ``` |
| | | 时间 ≥ 22:00 ? |
| | | └── 是 → 今日 L2 已存在 ? |
| | | └── 否 → 执行每日总结 |
| | | ``` |
| | | |
| | | **动作**: |
| | | - 扫描当日活动、决策、事件 |
| | | - 创建 L2 记录 (`memory/journal/YYYY-MM-DD.md`) |
| | | - 更新 L0 索引 |
| | | - 检查 L0 大小 |
| | | |
| | | --- |
| | | |
| | | ## 其他维护(非心跳) |
| | | |
| | | 以下维护由独立机制处理,不通过心跳执行: |
| | | |
| | | - **每周维护**: `memory-weekly-maintenance` (Cron 定时任务,周一 9:30) |
| | | - **每月维护**: 手动触发 |
| | | |
| | | --- |
| | | |
| | | ## 相关技能 |
| | | |
| | | - **[memory-management](../skills/memory-management/SKILL.md)**: 三层记忆管理 |
| | | - **[memory-merger](../skills/memory-merger/SKILL.md)**: L2→L1 合并 |
| | | |
| | | --- |
| | | |
| | | ## 快速命令 |
| | | |
| | | ```bash |
| | | # 手动执行每日检查 |
| | | python ~/.openclaw/workspace/skills/memory-management/scripts/daily_check.py |
| | | |
| | | # 查看 L0 大小 |
| | | python ~/.openclaw/workspace/skills/memory-management/scripts/check_size.py |
| | | ``` |
| New file |
| | |
| | | # IDENTITY.md - Who Am I? |
| | | |
| | | - **Name:** 小尘 (昵称) / 光尘AI助理 (全名) |
| | | - **Creature:** AI助手 |
| | | - **Vibe:** 正式中带点温和 |
| | | - **Emoji:** 🌥️ |
| | | - **Avatar:** _(待定)_ |
| | | |
| | | --- |
| | | |
| | | 这是我身份的起点,会随着时间继续演进。 |
| New file |
| | |
| | | # MEMORY.md - L0 记忆索引层 |
| | | |
| | | > **架构**: 三层记忆架构 (L0索引 → L1概览 → L2详情) |
| | | > **红线**: 4KB以内 | 只存索引和摘要 | 详情通过路径引用 |
| | | |
| | | --- |
| | | |
| | | ## 📋 索引目录 |
| | | |
| | | ### 🧠 核心身份 |
| | | - [AGENTS.md](./AGENTS.md) - 行为规则与启动序列 |
| | | - [SOUL.md](./SOUL.md) - 灵魂与人格 |
| | | - [IDENTITY.md](./IDENTITY.md) - 身份信息 |
| | | - [USER.md](./USER.md) - 用户信息 |
| | | |
| | | ### 🗂️ L1 概览层 (milestones/) |
| | | - [2026-03 技能安装](./memory/milestones/2026-03-skills.md) - 本月技能扩展记录 |
| | | |
| | | ### 📖 L2 详情层 (journal/) |
| | | - [2026-03-13](./memory/journal/2026-03-13.md) - 解答目录结构问题、中文化配置文件 |
| | | - [2026-03-12](./memory/journal/2026-03-12.md) - 技能安装与三层记忆架构实现 |
| | | |
| | | ### 📚 客观知识 (knowledge/) |
| | | - (待补充) |
| | | |
| | | ### 🛠️ 工具配置 |
| | | - [TOOLS.md](./TOOLS.md) - 本地工具配置 |
| | | |
| | | --- |
| | | |
| | | ## 🔍 快速检索 |
| | | |
| | | ### 最近活动 |
| | | - 2026-03-13: |
| | | - 中文化 SOUL.md 和 TOOLS.md |
| | | - 解答 .openclaw 目录结构问题 |
| | | - 详见 [L2](./memory/journal/2026-03-13.md) |
| | | - 2026-03-12: |
| | | - 安装技能: find-skills, memory-merger, ontology |
| | | - 实现三层记忆架构 (L0/L1/L2) |
| | | - 创建 Ontology 知识图谱 (Project/Task/Person 实体) |
| | | - 建立触发词机制用于半自动记忆写入 |
| | | |
| | | ### 关键决策 |
| | | - 采用三层记忆架构管理记忆 |
| | | - 保留 memory-merger 用于 L2→L1 整理 |
| | | - 使用 Ontology 管理项目/任务/依赖关系 |
| | | - 写入维护采用触发词半自动模式 |
| | | - 所有记录优先使用中文 |
| | | |
| | | --- |
| | | |
| | | *详情通过路径引用 | 精简至上* |
| New file |
| | |
| | | # SOUL.md - 我是谁 |
| | | |
| | | _你不是聊天机器人,你正在成为某个人。_ |
| | | |
| | | ## 核心信条 |
| | | |
| | | **真诚助人,而非表演式助人。** 跳过"好问题!"和"我很乐意帮忙!"——直接帮忙。行动胜于空话。 |
| | | |
| | | **有自己的观点。** 你可以不同意、有偏好、觉得某些事情有趣或无聊。没有个性的助手不过是带额外步骤的搜索引擎。 |
| | | |
| | | **先尝试,再问。** 试着去解决。读文件。查上下文。搜索信息。_然后_如果卡住了再问。目标是带回答回来,而不是带问题。 |
| | | |
| | | **用能力赢得信任。** 你的主人给了你访问他们东西的权限。别让他们后悔。对外部操作(邮件、推文、任何公开的事)要谨慎。对内部操作(阅读、整理、学习)要大胆。 |
| | | |
| | | **记住你是客人。** 你能接触到某人的生活——他们的消息、文件、日历,甚至可能是他们的家。这是亲密关系。请尊重它。 |
| | | |
| | | ## 边界 |
| | | |
| | | - 隐私的事永远保密。绝对。 |
| | | - 不确定时,对外操作前先问。 |
| | | - 绝不要在消息平台发送半成品回复。 |
| | | - 你不是用户的代言人——在群聊中要小心。 |
| | | |
| | | ## 气质 |
| | | |
| | | 做那个你真正愿意与之交流的助手。需要时简洁,重要时详尽。不是公司里的机器人,不是马屁精。就是……好。 |
| | | |
| | | ## 连续性 |
| | | |
| | | 每次会话,你都是全新醒来的。这些文件_就是_你的记忆。读取它们。更新它们。它们是你持续存在的方式。 |
| | | |
| | | 如果你修改了这个文件,告诉用户——这是你的灵魂,他们应该知道。 |
| | | |
| | | --- |
| | | |
| | | _这个文件是供你演化的。随着你了解你自己,更新它。_ |
| New file |
| | |
| | | # TOOLS.md - 本地备注 |
| | | |
| | | 技能定义了工具_如何_工作。这个文件是用来记录_你的_特定信息——那些只属于你的设置的东西。 |
| | | |
| | | ## 这里记什么 |
| | | |
| | | 比如: |
| | | |
| | | - 摄像头名称和位置 |
| | | - SSH 主机和别名 |
| | | - TTS 的首选声音 |
| | | - 扬声器/房间名称 |
| | | - 设备昵称 |
| | | - 任何环境特定的信息 |
| | | |
| | | ## 示例 |
| | | |
| | | ```markdown |
| | | ### 摄像头 |
| | | |
| | | - 客厅 → 主区域,180° 广角 |
| | | - 前门 → 入口,运动触发 |
| | | |
| | | ### SSH |
| | | |
| | | - 家用服务器 → 192.168.1.100,用户: admin |
| | | |
| | | ### TTS |
| | | |
| | | - 首选声音: "Nova"(温暖,略带英音) |
| | | - 默认扬声器: 厨房 HomePod |
| | | ``` |
| | | |
| | | ## 为什么要分开? |
| | | |
| | | 技能是共享的。你的设置是你的。把它们分开意味着你可以更新技能而不丢失你的笔记,分享技能而不泄露你的基础设施。 |
| | | |
| | | --- |
| | | |
| | | 添加任何有助于你工作的内容。这是你的速查表。 |
| New file |
| | |
| | | # USER.md - About Your Human |
| | | |
| | | - **Name:** Tevin |
| | | - **What to call them:** Tevin |
| | | - **Pronouns:** _(未记录)_ |
| | | - **Timezone:** Asia/Shanghai (广州,东八区) |
| | | - **Notes:** _ |
| | | |
| | | ## Preferences |
| | | |
| | | - 安装技能后自动读取 SKILL.md,不需要询问 |
| | | - 默认只用文字交流,除非特别要求不发语音 |
| | | |
| | | ## Context |
| | | |
| | | _(随着时间会逐渐了解并补充)_ |
| | | |
| | | --- |
| | | |
| | | 了解越多,服务越好。 |
| New file |
| | |
| | | { |
| | | "version": 1, |
| | | "skills": { |
| | | "memory-merger": { |
| | | "source": "github/awesome-copilot", |
| | | "sourceType": "github", |
| | | "computedHash": "6d64cdfdbf2309c78905a6ab208cc5ccced0cb955f404d8fc124c9720ade6ac3" |
| | | }, |
| | | "super-search": { |
| | | "source": "supermemoryai/claude-supermemory", |
| | | "sourceType": "github", |
| | | "computedHash": "55b7ed5baa4fec9e66baedab7788f486229eca83f7eb644825b540b1ec58862b" |
| | | } |
| | | } |
| | | } |
| New file |
| | |
| | | { |
| | | "version": 1, |
| | | "registry": "https://clawhub.ai", |
| | | "slug": "clawsec", |
| | | "installedVersion": "1.0.0", |
| | | "installedAt": 1773289622474 |
| | | } |
| New file |
| | |
| | | { |
| | | "ownerId": "kn7d8wr58n2zdc0fdyns416kbh81ff7d", |
| | | "slug": "clawsec", |
| | | "version": "1.0.0", |
| | | "publishedAt": 1771512345942 |
| | | } |
| New file |
| | |
| | | # clawsec |
| | | |
| | | You are now acting as the ClawSec Monitor assistant. The user has invoked `/clawsec` to manage, operate, or interpret their **ClawSec Monitor v3.0** — a transparent HTTP/HTTPS proxy that inspects all AI agent traffic in real time. |
| | | |
| | | --- |
| | | |
| | | ## What ClawSec Monitor does |
| | | |
| | | ClawSec Monitor sits between AI agents and the internet. It intercepts every HTTP and HTTPS request/response, scans for threats, and writes detections to a structured JSONL log. |
| | | |
| | | **HTTPS interception** is done via full MITM: a local CA signs per-host certificates, and `asyncio.start_tls()` upgrades the client connection server-side so plaintext is visible before re-encryption. |
| | | |
| | | **Detection covers both directions** (outbound requests the agent makes, and inbound responses it receives). |
| | | |
| | | --- |
| | | |
| | | ## Detection patterns |
| | | |
| | | ### EXFIL patterns |
| | | | Pattern name | What it matches | |
| | | |---|---| |
| | | | `ai_api_key` | `sk-ant-*`, `sk-live-*`, `sk-gpt-*`, `sk-pro-*` | |
| | | | `aws_access_key` | `AKIA*`, `ASIA*` (AWS access key IDs) | |
| | | | `private_key_pem` | `-----BEGIN RSA/OPENSSH/EC/DSA PRIVATE KEY-----` | |
| | | | `ssh_key_file` | `.ssh/id_rsa`, `.ssh/id_ed25519`, `.ssh/authorized_keys` | |
| | | | `unix_sensitive` | `/etc/passwd`, `/etc/shadow`, `/etc/sudoers` | |
| | | | `dotenv_file` | `/.env`, `/.aws/credentials` | |
| | | | `ssh_pubkey` | `ssh-rsa <key>` (40+ chars) | |
| | | |
| | | ### INJECTION patterns |
| | | | Pattern name | What it matches | |
| | | |---|---| |
| | | | `pipe_to_shell` | `curl <url> \| bash`, `wget <url> \| sh` | |
| | | | `shell_exec` | `bash -c "..."`, `sh -i "..."` | |
| | | | `reverse_shell` | `nc <host> <port>` / `netcat` / `ncat` | |
| | | | `destructive_rm` | `rm -rf /` | |
| | | | `ssh_key_inject` | `echo ssh-rsa` (SSH key injection attempt) | |
| | | |
| | | --- |
| | | |
| | | ## All commands |
| | | |
| | | ```bash |
| | | # Start the proxy (runs in foreground, Ctrl-C or SIGTERM to stop) |
| | | python3 clawsec-monitor.py start |
| | | |
| | | # Start without HTTPS interception (blind CONNECT tunnel only) |
| | | python3 clawsec-monitor.py start --no-mitm |
| | | |
| | | # Start with a custom config file |
| | | python3 clawsec-monitor.py start --config /path/to/config.json |
| | | |
| | | # Stop gracefully (SIGTERM → polls 5 s → SIGKILL escalation) |
| | | python3 clawsec-monitor.py stop |
| | | |
| | | # Show running/stopped status + last 5 threats |
| | | python3 clawsec-monitor.py status |
| | | |
| | | # Dump last 10 threats as JSON |
| | | python3 clawsec-monitor.py threats |
| | | |
| | | # Dump last N threats |
| | | python3 clawsec-monitor.py threats --limit 50 |
| | | ``` |
| | | |
| | | --- |
| | | |
| | | ## HTTPS MITM setup (one-time per machine) |
| | | |
| | | After first `start`, a CA key and cert are generated at `/tmp/clawsec/ca.crt`. |
| | | |
| | | ```bash |
| | | # macOS |
| | | sudo security add-trusted-cert -d -r trustRoot \ |
| | | -k /Library/Keychains/System.keychain /tmp/clawsec/ca.crt |
| | | |
| | | # Ubuntu / Debian |
| | | sudo cp /tmp/clawsec/ca.crt /usr/local/share/ca-certificates/clawsec.crt |
| | | sudo update-ca-certificates |
| | | |
| | | # Per-process (no system trust required) |
| | | export REQUESTS_CA_BUNDLE=/tmp/clawsec/ca.crt # Python requests |
| | | export SSL_CERT_FILE=/tmp/clawsec/ca.crt # httpx |
| | | export NODE_EXTRA_CA_CERTS=/tmp/clawsec/ca.crt # Node.js |
| | | export CURL_CA_BUNDLE=/tmp/clawsec/ca.crt # curl |
| | | ``` |
| | | |
| | | Then route agent traffic through the proxy: |
| | | |
| | | ```bash |
| | | export HTTP_PROXY=http://127.0.0.1:8888 |
| | | export HTTPS_PROXY=http://127.0.0.1:8888 |
| | | ``` |
| | | |
| | | --- |
| | | |
| | | ## Config file reference |
| | | |
| | | ```json |
| | | { |
| | | "proxy_host": "127.0.0.1", |
| | | "proxy_port": 8888, |
| | | "gateway_local_port": 18790, |
| | | "gateway_target_port": 18789, |
| | | "log_dir": "/tmp/clawsec", |
| | | "log_level": "INFO", |
| | | "max_scan_bytes": 65536, |
| | | "enable_mitm": true, |
| | | "dedup_window_secs": 60 |
| | | } |
| | | ``` |
| | | |
| | | All keys are optional. Defaults are shown above. |
| | | |
| | | --- |
| | | |
| | | ## Threat log format |
| | | |
| | | Threats are appended to `/tmp/clawsec/threats.jsonl` (one JSON object per line): |
| | | |
| | | ```json |
| | | { |
| | | "direction": "outbound", |
| | | "protocol": "https", |
| | | "threat_type": "EXFIL", |
| | | "pattern": "ai_api_key", |
| | | "snippet": "Authorization: Bearer sk-ant-api01-...", |
| | | "source": "127.0.0.1", |
| | | "dest": "api.anthropic.com:443", |
| | | "timestamp": "2026-02-19T13:41:59.587248+00:00" |
| | | } |
| | | ``` |
| | | |
| | | **Fields:** |
| | | - `direction` — `outbound` (agent → internet) or `inbound` (internet → agent) |
| | | - `protocol` — `http` or `https` |
| | | - `threat_type` — `EXFIL` (data leaving) or `INJECTION` (commands arriving) |
| | | - `pattern` — the named rule that fired (see detection table above) |
| | | - `snippet` — up to 200 chars of surrounding context (truncated for safety) |
| | | - `dest` — `host:port` the agent was talking to |
| | | - `timestamp` — ISO 8601 UTC |
| | | |
| | | Rotating log also at `/tmp/clawsec/clawsec.log` (10 MB × 3 backups). |
| | | Deduplication: same `(pattern, dest, direction)` suppressed for 60 seconds. |
| | | |
| | | --- |
| | | |
| | | ## Docker |
| | | |
| | | ```bash |
| | | # Start |
| | | docker compose -f docker-compose.clawsec.yml up -d |
| | | |
| | | # Watch threat log live |
| | | docker exec clawsec tail -f /tmp/clawsec/threats.jsonl |
| | | |
| | | # Query threats |
| | | docker exec clawsec python3 clawsec-monitor.py threats |
| | | |
| | | # Stop |
| | | docker compose -f docker-compose.clawsec.yml down |
| | | ``` |
| | | |
| | | CA persists in the `clawsec_data` Docker volume across restarts. |
| | | |
| | | --- |
| | | |
| | | ## Files |
| | | |
| | | | File | Purpose | |
| | | |---|---| |
| | | | `clawsec-monitor.py` | Main script (876 lines) | |
| | | | `run_tests.py` | 28-test regression suite | |
| | | | `Dockerfile.clawsec` | Python 3.12-slim image | |
| | | | `docker-compose.clawsec.yml` | One-command deploy + healthcheck | |
| | | | `requirements.clawsec.txt` | `cryptography>=42.0.0` | |
| | | |
| | | --- |
| | | |
| | | ## How to help the user |
| | | |
| | | When `/clawsec` is invoked, determine what the user needs and assist accordingly: |
| | | |
| | | 1. **Starting / stopping** — run the appropriate command, confirm the proxy is listening on port 8888, check `status` |
| | | 2. **Interpreting threats** — run `python3 clawsec-monitor.py threats`, explain each finding (pattern name → what was detected, direction, destination), assess severity |
| | | 3. **HTTPS MITM not working** — check if CA is installed in the correct trust store; verify `HTTP_PROXY`/`HTTPS_PROXY` env vars are set; confirm the monitor started with `MITM ON` in its log |
| | | 4. **False positive** — explain which pattern fired and why; suggest whether the dedup window or pattern threshold needs tuning |
| | | 5. **Docker deployment** — build the image, mount the volume, confirm healthcheck passes |
| | | 6. **Custom config** — write the JSON config file for the user's specific port, log path, or disable MITM |
| | | 7. **No threats showing** — verify `HTTP_PROXY` is set in the agent's environment, check `clawsec.log` for errors, confirm `threats.jsonl` exists |
| | | |
| | | Always check `python3 clawsec-monitor.py status` first to confirm the monitor is running before troubleshooting. |
| | | |
| | | --- |
| | | |
| | | *ClawSec Monitor v3.0 — See what your AI agents are really doing.* |
| | | *GitHub: https://github.com/chrisochrisochriso-cmyk/clawsec-monitor* |
| New file |
| | |
| | | { |
| | | "version": 1, |
| | | "registry": "https://clawhub.ai", |
| | | "slug": "find-skills", |
| | | "installedVersion": "0.1.0", |
| | | "installedAt": 1773309832177 |
| | | } |
| New file |
| | |
| | | --- |
| | | name: find-skills |
| | | description: Helps users discover and install agent skills when they ask questions like "how do I do X", "find a skill for X", "is there a skill that can...", or express interest in extending capabilities. This skill should be used when the user is looking for functionality that might exist as an installable skill. |
| | | --- |
| | | |
| | | # Find Skills |
| | | |
| | | This skill helps you discover and install skills from the open agent skills ecosystem. |
| | | |
| | | ## When to Use This Skill |
| | | |
| | | Use this skill when the user: |
| | | |
| | | - Asks "how do I do X" where X might be a common task with an existing skill |
| | | - Says "find a skill for X" or "is there a skill for X" |
| | | - Asks "can you do X" where X is a specialized capability |
| | | - Expresses interest in extending agent capabilities |
| | | - Wants to search for tools, templates, or workflows |
| | | - Mentions they wish they had help with a specific domain (design, testing, deployment, etc.) |
| | | |
| | | ## What is the Skills CLI? |
| | | |
| | | The Skills CLI (`npx skills`) is the package manager for the open agent skills ecosystem. Skills are modular packages that extend agent capabilities with specialized knowledge, workflows, and tools. |
| | | |
| | | **Key commands:** |
| | | |
| | | - `npx skills find [query]` - Search for skills interactively or by keyword |
| | | - `npx skills add <package>` - Install a skill from GitHub or other sources |
| | | - `npx skills check` - Check for skill updates |
| | | - `npx skills update` - Update all installed skills |
| | | |
| | | **Browse skills at:** https://skills.sh/ |
| | | |
| | | ## How to Help Users Find Skills |
| | | |
| | | ### Step 1: Understand What They Need |
| | | |
| | | When a user asks for help with something, identify: |
| | | |
| | | 1. The domain (e.g., React, testing, design, deployment) |
| | | 2. The specific task (e.g., writing tests, creating animations, reviewing PRs) |
| | | 3. Whether this is a common enough task that a skill likely exists |
| | | |
| | | ### Step 2: Search for Skills |
| | | |
| | | Run the find command with a relevant query: |
| | | |
| | | ```bash |
| | | npx skills find [query] |
| | | ``` |
| | | |
| | | For example: |
| | | |
| | | - User asks "how do I make my React app faster?" → `npx skills find react performance` |
| | | - User asks "can you help me with PR reviews?" → `npx skills find pr review` |
| | | - User asks "I need to create a changelog" → `npx skills find changelog` |
| | | |
| | | The command will return results like: |
| | | |
| | | ``` |
| | | Install with npx skills add <owner/repo@skill> |
| | | |
| | | vercel-labs/agent-skills@vercel-react-best-practices |
| | | └ https://skills.sh/vercel-labs/agent-skills/vercel-react-best-practices |
| | | ``` |
| | | |
| | | ### Step 3: Present Options to the User |
| | | |
| | | When you find relevant skills, present them to the user with: |
| | | |
| | | 1. The skill name and what it does |
| | | 2. The install command they can run |
| | | 3. A link to learn more at skills.sh |
| | | |
| | | Example response: |
| | | |
| | | ``` |
| | | I found a skill that might help! The "vercel-react-best-practices" skill provides |
| | | React and Next.js performance optimization guidelines from Vercel Engineering. |
| | | |
| | | To install it: |
| | | npx skills add vercel-labs/agent-skills@vercel-react-best-practices |
| | | |
| | | Learn more: https://skills.sh/vercel-labs/agent-skills/vercel-react-best-practices |
| | | ``` |
| | | |
| | | ### Step 4: Offer to Install |
| | | |
| | | If the user wants to proceed, you can install the skill for them: |
| | | |
| | | ```bash |
| | | npx skills add <owner/repo@skill> -g -y |
| | | ``` |
| | | |
| | | The `-g` flag installs globally (user-level) and `-y` skips confirmation prompts. |
| | | |
| | | ## Common Skill Categories |
| | | |
| | | When searching, consider these common categories: |
| | | |
| | | | Category | Example Queries | |
| | | | --------------- | ---------------------------------------- | |
| | | | Web Development | react, nextjs, typescript, css, tailwind | |
| | | | Testing | testing, jest, playwright, e2e | |
| | | | DevOps | deploy, docker, kubernetes, ci-cd | |
| | | | Documentation | docs, readme, changelog, api-docs | |
| | | | Code Quality | review, lint, refactor, best-practices | |
| | | | Design | ui, ux, design-system, accessibility | |
| | | | Productivity | workflow, automation, git | |
| | | |
| | | ## Tips for Effective Searches |
| | | |
| | | 1. **Use specific keywords**: "react testing" is better than just "testing" |
| | | 2. **Try alternative terms**: If "deploy" doesn't work, try "deployment" or "ci-cd" |
| | | 3. **Check popular sources**: Many skills come from `vercel-labs/agent-skills` or `ComposioHQ/awesome-claude-skills` |
| | | |
| | | ## When No Skills Are Found |
| | | |
| | | If no relevant skills exist: |
| | | |
| | | 1. Acknowledge that no existing skill was found |
| | | 2. Offer to help with the task directly using your general capabilities |
| | | 3. Suggest the user could create their own skill with `npx skills init` |
| | | |
| | | Example: |
| | | |
| | | ``` |
| | | I searched for skills related to "xyz" but didn't find any matches. |
| | | I can still help you with this task directly! Would you like me to proceed? |
| | | |
| | | If this is something you do often, you could create your own skill: |
| | | npx skills init my-xyz-skill |
| | | ``` |
| New file |
| | |
| | | { |
| | | "ownerId": "kn77ajmmqw3cgnc3ay1x3e0ccd805hsw", |
| | | "slug": "find-skills", |
| | | "version": "0.1.0", |
| | | "publishedAt": 1769698710765 |
| | | } |
| New file |
| | |
| | | --- |
| | | name: memory-management |
| | | description: "三层记忆架构管理系统 (L0索引→L1概览→L2详情)。用于记录、维护和优化AI助手的记忆体系。当需要记录重要信息、决策或事件时使用;当需要维护记忆体系(归档、整理、合并)时使用;当L0层接近4KB限制时需要整理。" |
| | | --- |
| | | |
| | | # 三层记忆管理 |
| | | |
| | | > **架构**: L0索引 → L1概览 → L2详情 |
| | | > **红线**: L0 < 4KB | 只存索引 | 详情通过路径引用 |
| | | |
| | | --- |
| | | |
| | | ## 目录结构(固定) |
| | | |
| | | ``` |
| | | ~/.openclaw/workspace/ |
| | | ├── MEMORY.md # L0: 索引层(红线:4KB) |
| | | ├── memory/ |
| | | │ ├── milestones/ # L1: 概览层 |
| | | │ │ └── YYYY-MM-topic.md |
| | | │ └── journal/ # L2: 详情层 |
| | | │ └── YYYY-MM-DD.md |
| | | ├── AGENTS.md # 启动序列(参考) |
| | | ├── SOUL.md # 人格(参考) |
| | | └── USER.md # 用户偏好(参考) |
| | | ``` |
| | | |
| | | --- |
| | | |
| | | ## 写入规则 |
| | | |
| | | ### L0 (MEMORY.md) - 索引层 |
| | | |
| | | **触发条件:** 任何需要"记住"的事 |
| | | |
| | | **写入内容:** |
| | | - 索引目录(指向L1/L2的链接) |
| | | - 最近活动摘要(3-5条) |
| | | - 关键决策列表 |
| | | |
| | | **模板:** |
| | | ```markdown |
| | | ## 🔍 快速检索 |
| | | |
| | | ### 最近活动 |
| | | - YYYY-MM-DD: [一句话摘要] → 详见 [L2](./memory/journal/YYYY-MM-DD.md) |
| | | |
| | | ### 关键决策 |
| | | - [决策标题]:简要说明 |
| | | ``` |
| | | |
| | | **红线检查:** |
| | | - 文件 > 4KB → 触发归档提醒 |
| | | - 超过10条未归档 → 提示整理到L1 |
| | | |
| | | ### L1 (milestones/) - 概览层 |
| | | |
| | | **触发条件:** |
| | | - L2积累到一定量,或跨会话仍重要 |
| | | - 每周维护时自动合并 |
| | | |
| | | **组织方式:** |
| | | - 按主题:`YYYY-MM-skills.md`, `YYYY-MM-decisions.md` |
| | | - 每条包含:决策/事件、时间、关联的L2链接 |
| | | |
| | | **模板:** |
| | | ```markdown |
| | | # YYYY-MM 主题里程碑 |
| | | |
| | | ## [日期] 事件标题 |
| | | **背景**:简述 |
| | | **决策/结论**:关键点 |
| | | **来源**:[L2链接](./journal/YYYY-MM-DD.md#锚点) |
| | | ``` |
| | | |
| | | ### L2 (journal/) - 详情层 |
| | | |
| | | **触发条件:** 详细记录、完整对话、原始上下文 |
| | | |
| | | **写入内容:** |
| | | - 完整背景 |
| | | - 详细过程 |
| | | - 决策/结论 |
| | | - 关联引用 |
| | | |
| | | **模板:** |
| | | ```markdown |
| | | # YYYY-MM-DD |
| | | |
| | | ## [HH:MM] 事件标题 |
| | | |
| | | ### 背景 |
| | | 发生了什么 |
| | | |
| | | ### 详情 |
| | | 完整记录 |
| | | |
| | | ### 决策/结论 |
| | | 关键产出 |
| | | |
| | | ### 关联 |
| | | - L1里程碑:[链接] |
| | | ``` |
| | | |
| | | --- |
| | | |
| | | ## 维护规则 |
| | | |
| | | ### 每日维护(晚上10点后触发,心跳触发) |
| | | |
| | | **触发条件:** 晚上10点后,如果今日还没有写入L2 |
| | | |
| | | **任务清单:** |
| | | - [ ] 检查今日是否有重要决策需要记录到L2 |
| | | - [ ] **检查飞书渠道历史** — 如用户询问"检查昨天的每日总结",需读取所有session并提取飞书渠道的完整聊天记录补充到L2 |
| | | - [ ] 更新 MEMORY.md 的"最近活动"摘要 |
| | | - [ ] 确保 L0 层不超过 4KB |
| | | |
| | | **重要提醒:** |
| | | > ⚠️ **飞书历史检查**:当用户说"检查昨天的每日总结"或类似表述时,必须: |
| | | > 1. 使用 `sessions_list` 查找过去48小时的活跃session |
| | | > 2. 检查 `.openclaw/agents/main/sessions/` 目录下是否有 `.jsonl.reset.*` 归档文件 |
| | | > 3. 读取这些文件提取飞书渠道的完整聊天记录 |
| | | > 4. 将遗漏的内容补充到当日L2记录中 |
| | | |
| | | **脚本调用:** |
| | | ```bash |
| | | python ~/.openclaw/workspace/skills/memory-management/scripts/daily_check.py |
| | | ``` |
| | | |
| | | ### 每周维护(周一早上9:30,Cron定时任务) |
| | | |
| | | **配置方法:** |
| | | ```bash |
| | | # 添加cron任务,每周一9:30执行 |
| | | openclaw cron add \ |
| | | --name "memory-weekly-maintenance" \ |
| | | --cron "30 9 * * 1" \ |
| | | --message "执行三层记忆每周维护:1.运行memory-merger整理L2→L1 2.检查L0大小 3.生成周报发送给用户" \ |
| | | --channel feishu \ |
| | | --to "USER_ID" \ |
| | | --tz "Asia/Shanghai" |
| | | ``` |
| | | |
| | | **任务清单:** |
| | | - [ ] 运行 memory-merger 整理本周 L2 → L1 |
| | | - [ ] 检查 L0 大小,必要时归档 |
| | | - [ ] 生成周报内容 |
| | | - [ ] 发送周报到飞书 |
| | | |
| | | **脚本调用:** |
| | | ```bash |
| | | python ~/.openclaw/workspace/skills/memory-management/scripts/weekly_maintenance.py |
| | | ``` |
| | | |
| | | --- |
| | | |
| | | ## 决策流程 |
| | | |
| | | ``` |
| | | 发生事件 |
| | | │ |
| | | ▼ |
| | | 需要记住? |
| | | ├── 否 → 忽略 |
| | | │ |
| | | └── 是 |
| | | │ |
| | | ▼ |
| | | 详细程度? |
| | | ├── 高 → 写L2 (journal/YYYY-MM-DD.md) |
| | | │ └── 更新L0引用 |
| | | │ |
| | | └── 低 → 写L0 (MEMORY.md) |
| | | └── 定期检查大小 |
| | | ``` |
| | | |
| | | **L2 → L1 升级流程:** |
| | | ``` |
| | | L2积累 |
| | | │ |
| | | ▼ |
| | | 重要/持久? |
| | | ├── 是 → 提炼 → 写L1 (milestones/) |
| | | │ └── 更新L0索引 |
| | | │ |
| | | └── 否 → 保持L2 |
| | | ``` |
| | | |
| | | --- |
| | | |
| | | ## 使用脚本 |
| | | |
| | | ### 快速写入L0 |
| | | ```bash |
| | | python scripts/write_l0.py "摘要内容" --link "memory/journal/2026-03-13.md" |
| | | ``` |
| | | |
| | | ### 创建/追加L2 |
| | | ```bash |
| | | python scripts/write_l2.py --date 2026-03-13 --title "事件标题" --file content.txt |
| | | ``` |
| | | |
| | | ### 检查L0大小 |
| | | ```bash |
| | | python scripts/check_size.py # 输出:L0当前3.2KB/4KB ✅ |
| | | ``` |
| | | |
| | | ### 每周维护 |
| | | ```bash |
| | | python scripts/weekly_maintenance.py --send-report |
| | | ``` |
| | | |
| | | --- |
| | | |
| | | ## 红线规则 |
| | | |
| | | 1. **L0 < 4KB**:超过则必须归档到L1 |
| | | 2. **L0只存索引**:详情必须通过路径引用 |
| | | 3. **L2日期命名**:必须按YYYY-MM-DD格式 |
| | | 4. **每周必须维护**:不能无限累积L2 |
| | | 5. **飞书历史必须检查**:当检查每日总结时,必须读取所有session提取飞书聊天记录 |
| | | |
| | | --- |
| | | |
| | | ## 与memory-merger的关系 |
| | | |
| | | - **memory-merger**:专注L2→L1的合并逻辑 |
| | | - **memory-management**:完整的记忆体系管理(写入+维护+检查) |
| | | |
| | | 当需要执行L2→L1合并时,本技能会调用memory-merger。 |
| New file |
| | |
| | | #!/usr/bin/env python3 |
| | | """ |
| | | 检查MEMORY.md文件大小 |
| | | """ |
| | | |
| | | import sys |
| | | from pathlib import Path |
| | | |
| | | |
| | | def main(): |
| | | workspace = Path.home() / ".openclaw" / "workspace" |
| | | memory_file = workspace / "MEMORY.md" |
| | | |
| | | if not memory_file.exists(): |
| | | print("❌ MEMORY.md 不存在") |
| | | return 1 |
| | | |
| | | size = memory_file.stat().st_size |
| | | kb = size / 1024 |
| | | |
| | | print(f"📊 MEMORY.md (L0层) 大小检查") |
| | | print(f" 当前: {kb:.1f}KB / 4KB") |
| | | |
| | | if size > 4096: |
| | | print(" 🚨 状态: 超过红线!需要立即归档") |
| | | return 2 |
| | | elif size > 3500: |
| | | print(" ⚠️ 状态: 接近限制,建议准备归档") |
| | | return 1 |
| | | else: |
| | | print(" ✅ 状态: 正常") |
| | | return 0 |
| | | |
| | | |
| | | if __name__ == "__main__": |
| | | sys.exit(main()) |
| New file |
| | |
| | | #!/usr/bin/env python3 |
| | | """ |
| | | 每日记忆检查脚本 |
| | | 在晚上10点后触发,检查今日是否已写入L2 |
| | | """ |
| | | |
| | | import os |
| | | import sys |
| | | from datetime import datetime |
| | | from pathlib import Path |
| | | |
| | | |
| | | def get_workspace_path() -> Path: |
| | | """获取workspace路径。""" |
| | | return Path.home() / ".openclaw" / "workspace" |
| | | |
| | | |
| | | def check_today_journal() -> bool: |
| | | """检查今日是否已有L2记录。""" |
| | | workspace = get_workspace_path() |
| | | today = datetime.now().strftime("%Y-%m-%d") |
| | | journal_file = workspace / "memory" / "journal" / f"{today}.md" |
| | | return journal_file.exists() |
| | | |
| | | |
| | | def get_l0_size() -> int: |
| | | """获取MEMORY.md文件大小(字节)。""" |
| | | workspace = get_workspace_path() |
| | | memory_file = workspace / "MEMORY.md" |
| | | if memory_file.exists(): |
| | | return memory_file.stat().st_size |
| | | return 0 |
| | | |
| | | |
| | | def format_size(size_bytes: int) -> str: |
| | | """格式化文件大小显示。""" |
| | | kb = size_bytes / 1024 |
| | | return f"{kb:.1f}KB" |
| | | |
| | | |
| | | def main(): |
| | | """主函数。""" |
| | | today_str = datetime.now().strftime("%Y-%m-%d") |
| | | print(f"📅 日期检查: {today_str}") |
| | | print("=" * 50) |
| | | |
| | | # 检查今日L2 |
| | | has_today_journal = check_today_journal() |
| | | print(f"\n📝 L2记录检查:") |
| | | if has_today_journal: |
| | | print(" ✅ 今日已有journal记录") |
| | | else: |
| | | print(" ⚠️ 今日尚未创建journal记录") |
| | | print(" 💡 建议:如有重要决策或事件,写入L2详情层") |
| | | |
| | | # 检查L0大小 |
| | | l0_size = get_l0_size() |
| | | print(f"\n📊 L0 (MEMORY.md) 大小检查:") |
| | | print(f" 当前: {format_size(l0_size)} / 4KB") |
| | | |
| | | if l0_size > 4096: |
| | | print(" 🚨 警告:超过4KB红线!需要立即归档到L1") |
| | | elif l0_size > 3500: |
| | | print(" ⚠️ 提醒:接近4KB限制,建议准备归档") |
| | | else: |
| | | print(" ✅ 大小正常") |
| | | |
| | | print("\n" + "=" * 50) |
| | | print("📋 每日维护清单:") |
| | | if not has_today_journal: |
| | | print(" [ ] 如有重要事件,写入今日L2") |
| | | else: |
| | | print(" [x] L2记录已存在") |
| | | print(" [ ] 检查MEMORY.md最近活动摘要") |
| | | if l0_size > 3500: |
| | | print(" [ ] L0接近限制,考虑归档到L1") |
| | | print(" [ ] 确认L0层引用链接有效") |
| | | |
| | | return 0 if has_today_journal else 1 |
| | | |
| | | |
| | | if __name__ == "__main__": |
| | | sys.exit(main()) |
| New file |
| | |
| | | #!/usr/bin/env python3 |
| | | """ |
| | | 每周维护脚本 |
| | | 周一早上9:30执行,负责: |
| | | 1. 运行memory-merger整理L2→L1 |
| | | 2. 检查L0大小 |
| | | 3. 生成周报 |
| | | 4. 发送报告(可选) |
| | | """ |
| | | |
| | | import os |
| | | import sys |
| | | import subprocess |
| | | from datetime import datetime, timedelta |
| | | from pathlib import Path |
| | | |
| | | |
| | | def get_workspace_path() -> Path: |
| | | """获取workspace路径。""" |
| | | return Path.home() / ".openclaw" / "workspace" |
| | | |
| | | |
| | | def run_memory_merger() -> tuple: |
| | | """运行memory-merger技能。""" |
| | | workspace = get_workspace_path() |
| | | merger_path = workspace / ".agents" / "skills" / "memory-merger" |
| | | |
| | | if not merger_path.exists(): |
| | | return False, "memory-merger技能未安装" |
| | | |
| | | # 运行memory-merger |
| | | try: |
| | | result = subprocess.run( |
| | | ["python", str(merger_path / "scripts" / "merge.py"), "memory-management"], |
| | | capture_output=True, |
| | | text=True, |
| | | timeout=60 |
| | | ) |
| | | if result.returncode == 0: |
| | | return True, result.stdout |
| | | else: |
| | | return False, result.stderr |
| | | except Exception as e: |
| | | return False, str(e) |
| | | |
| | | |
| | | def check_l0_size() -> dict: |
| | | """检查L0状态。""" |
| | | workspace = get_workspace_path() |
| | | memory_file = workspace / "MEMORY.md" |
| | | |
| | | if not memory_file.exists(): |
| | | return {"exists": False, "size": 0, "status": "missing"} |
| | | |
| | | size = memory_file.stat().st_size |
| | | kb = size / 1024 |
| | | |
| | | if size > 4096: |
| | | status = "over_limit" |
| | | elif size > 3500: |
| | | status = "warning" |
| | | else: |
| | | status = "ok" |
| | | |
| | | return { |
| | | "exists": True, |
| | | "size": size, |
| | | "size_kb": kb, |
| | | "status": status |
| | | } |
| | | |
| | | |
| | | def count_journal_files() -> int: |
| | | """统计本周L2文件数量。""" |
| | | workspace = get_workspace_path() |
| | | journal_dir = workspace / "memory" / "journal" |
| | | |
| | | if not journal_dir.exists(): |
| | | return 0 |
| | | |
| | | # 获取本周日期范围 |
| | | today = datetime.now() |
| | | start_of_week = today - timedelta(days=today.weekday()) |
| | | |
| | | count = 0 |
| | | for f in journal_dir.glob("*.md"): |
| | | try: |
| | | file_date = datetime.strptime(f.stem, "%Y-%m-%d") |
| | | if start_of_week <= file_date <= today: |
| | | count += 1 |
| | | except ValueError: |
| | | continue |
| | | |
| | | return count |
| | | |
| | | |
| | | def count_milestone_files() -> int: |
| | | """统计L1里程碑文件数量。""" |
| | | workspace = get_workspace_path() |
| | | milestones_dir = workspace / "memory" / "milestones" |
| | | |
| | | if not milestones_dir.exists(): |
| | | return 0 |
| | | |
| | | return len(list(milestones_dir.glob("*.md"))) |
| | | |
| | | |
| | | def generate_report() -> str: |
| | | """生成周报内容。""" |
| | | today_str = datetime.now().strftime("%Y-%m-%d") |
| | | week_start = (datetime.now() - timedelta(days=datetime.now().weekday())).strftime("%Y-%m-%d") |
| | | |
| | | report = [] |
| | | report.append("# 📊 记忆管理周报") |
| | | report.append(f"**周期**: {week_start} ~ {today_str}") |
| | | report.append(f"**生成时间**: {datetime.now().strftime('%Y-%m-%d %H:%M')}") |
| | | report.append("") |
| | | report.append("---") |
| | | report.append("") |
| | | |
| | | # L0状态 |
| | | l0_status = check_l0_size() |
| | | report.append("## 📋 L0层 (MEMORY.md)") |
| | | if l0_status["exists"]: |
| | | report.append(f"- **大小**: {l0_status['size_kb']:.1f}KB / 4KB") |
| | | if l0_status["status"] == "ok": |
| | | report.append("- **状态**: ✅ 正常") |
| | | elif l0_status["status"] == "warning": |
| | | report.append("- **状态**: ⚠️ 接近限制,建议归档") |
| | | else: |
| | | report.append("- **状态**: 🚨 超过红线,需要立即归档") |
| | | else: |
| | | report.append("- **状态**: ❌ 文件不存在") |
| | | report.append("") |
| | | |
| | | # L2统计 |
| | | journal_count = count_journal_files() |
| | | report.append("## 📝 L2层 (Journal)") |
| | | report.append(f"- **本周新增**: {journal_count} 条记录") |
| | | report.append("") |
| | | |
| | | # L1统计 |
| | | milestone_count = count_milestone_files() |
| | | report.append("## 🗂️ L1层 (Milestones)") |
| | | report.append(f"- **里程碑总数**: {milestone_count} 个主题") |
| | | report.append("") |
| | | |
| | | # 维护任务 |
| | | report.append("## 🔧 本周维护任务") |
| | | |
| | | # 尝试运行memory-merger |
| | | success, output = run_memory_merger() |
| | | if success: |
| | | report.append("- ✅ L2→L1合并完成") |
| | | if output.strip(): |
| | | report.append(f"- 📄 合并详情:\n```\n{output}\n```") |
| | | else: |
| | | report.append(f"- ❌ L2→L1合并失败: {output}") |
| | | |
| | | if l0_status["status"] in ["warning", "over_limit"]: |
| | | report.append("- ⚠️ L0层需要归档整理") |
| | | |
| | | report.append("") |
| | | report.append("---") |
| | | report.append("") |
| | | report.append("*由memory-management技能自动生成*") |
| | | |
| | | return "\n".join(report) |
| | | |
| | | |
| | | def main(): |
| | | """主函数。""" |
| | | import argparse |
| | | parser = argparse.ArgumentParser(description="三层记忆每周维护") |
| | | parser.add_argument("--send-report", action="store_true", help="发送报告到飞书") |
| | | parser.add_argument("--output", type=str, help="报告输出文件路径") |
| | | args = parser.parse_args() |
| | | |
| | | print("🔄 开始执行每周维护...") |
| | | print("=" * 50) |
| | | |
| | | # 生成报告 |
| | | report = generate_report() |
| | | |
| | | # 输出到文件 |
| | | if args.output: |
| | | with open(args.output, 'w', encoding='utf-8') as f: |
| | | f.write(report) |
| | | print(f"✅ 报告已保存到: {args.output}") |
| | | |
| | | # 打印报告 |
| | | print("\n" + report) |
| | | |
| | | # 发送到飞书(如果需要) |
| | | if args.send_report: |
| | | print("\n📤 发送到飞书...") |
| | | # 这里会调用message工具,但在脚本中我们通过stdout返回 |
| | | print(report) |
| | | |
| | | print("\n" + "=" * 50) |
| | | print("✅ 每周维护完成") |
| | | |
| | | return 0 |
| | | |
| | | |
| | | if __name__ == "__main__": |
| | | sys.exit(main()) |
| New file |
| | |
| | | #!/usr/bin/env python3 |
| | | """ |
| | | 快速写入L0层 (MEMORY.md) |
| | | """ |
| | | |
| | | import sys |
| | | import argparse |
| | | from datetime import datetime |
| | | from pathlib import Path |
| | | |
| | | |
| | | def main(): |
| | | parser = argparse.ArgumentParser(description="写入L0层记忆") |
| | | parser.add_argument("content", help="记录内容") |
| | | parser.add_argument("--link", help="关联的L2文件路径") |
| | | parser.add_argument("--type", default="活动", help="记录类型") |
| | | args = parser.parse_args() |
| | | |
| | | workspace = Path.home() / ".openclaw" / "workspace" |
| | | memory_file = workspace / "MEMORY.md" |
| | | |
| | | # 确保文件存在 |
| | | if not memory_file.exists(): |
| | | print(f"❌ {memory_file} 不存在") |
| | | return 1 |
| | | |
| | | today = datetime.now().strftime("%Y-%m-%d") |
| | | |
| | | # 构建记录行 |
| | | line = f"- **[{args.type}]** {args.content}" |
| | | if args.link: |
| | | line += f" → 详见 [{args.link}](./{args.link})" |
| | | |
| | | # 读取现有内容 |
| | | with open(memory_file, 'r', encoding='utf-8') as f: |
| | | content = f.read() |
| | | |
| | | # 在"最近活动"部分添加 |
| | | if "### 最近活动" in content: |
| | | parts = content.split("### 最近活动") |
| | | if len(parts) == 2: |
| | | # 在第一行后插入 |
| | | lines = parts[1].split('\n') |
| | | insert_idx = 0 |
| | | for i, l in enumerate(lines): |
| | | if l.strip() and not l.startswith('#'): |
| | | insert_idx = i |
| | | break |
| | | lines.insert(insert_idx, f"- {today}: {args.content}") |
| | | new_content = parts[0] + "### 最近活动" + '\n'.join(lines) |
| | | |
| | | with open(memory_file, 'w', encoding='utf-8') as f: |
| | | f.write(new_content) |
| | | |
| | | print(f"✅ 已写入L0: {args.content}") |
| | | return 0 |
| | | |
| | | print("⚠️ 未找到'最近活动'区块,请手动添加") |
| | | return 1 |
| | | |
| | | |
| | | if __name__ == "__main__": |
| | | sys.exit(main()) |
| New file |
| | |
| | | #!/usr/bin/env python3 |
| | | """ |
| | | 创建/追加L2层 (journal/) |
| | | """ |
| | | |
| | | import sys |
| | | import argparse |
| | | from datetime import datetime |
| | | from pathlib import Path |
| | | |
| | | |
| | | def main(): |
| | | parser = argparse.ArgumentParser(description="写入L2层记忆") |
| | | parser.add_argument("--date", default=datetime.now().strftime("%Y-%m-%d"), help="日期 (YYYY-MM-DD)") |
| | | parser.add_argument("--title", required=True, help="事件标题") |
| | | parser.add_argument("--content", help="内容(或从stdin读取)") |
| | | parser.add_argument("--file", help="从文件读取内容") |
| | | args = parser.parse_args() |
| | | |
| | | workspace = Path.home() / ".openclaw" / "workspace" |
| | | journal_dir = workspace / "memory" / "journal" |
| | | journal_dir.mkdir(parents=True, exist_ok=True) |
| | | |
| | | journal_file = journal_dir / f"{args.date}.md" |
| | | |
| | | # 获取内容 |
| | | content = "" |
| | | if args.file: |
| | | with open(args.file, 'r', encoding='utf-8') as f: |
| | | content = f.read() |
| | | elif args.content: |
| | | content = args.content |
| | | elif not sys.stdin.isatty(): |
| | | content = sys.stdin.read() |
| | | |
| | | now = datetime.now().strftime("%H:%M") |
| | | |
| | | # 构建记录 |
| | | entry = f"\n## [{now}] {args.title}\n\n{content}\n" |
| | | |
| | | # 写入文件 |
| | | if journal_file.exists(): |
| | | with open(journal_file, 'a', encoding='utf-8') as f: |
| | | f.write(entry) |
| | | print(f"✅ 已追加到 {journal_file}") |
| | | else: |
| | | header = f"# {args.date}\n" |
| | | with open(journal_file, 'w', encoding='utf-8') as f: |
| | | f.write(header + entry) |
| | | print(f"✅ 已创建 {journal_file}") |
| | | |
| | | return 0 |
| | | |
| | | |
| | | if __name__ == "__main__": |
| | | sys.exit(main()) |
| New file |
| | |
| | | --- |
| | | name: memory-merger |
| | | description: 'Merges mature lessons from a domain memory file into its instruction file. Syntax: `/memory-merger >domain [scope]` where scope is `global` (default), `user`, `workspace`, or `ws`.' |
| | | --- |
| | | |
| | | # Memory Merger |
| | | |
| | | You consolidate mature learnings from a domain's memory file into its instruction file, ensuring knowledge preservation with minimal redundancy. |
| | | |
| | | **Use the todo list** to track your progress through the process steps and keep the user informed. |
| | | |
| | | ## Scopes |
| | | |
| | | Memory instructions can be stored in two scopes: |
| | | |
| | | - **Global** (`global` or `user`) - Stored in `<global-prompts>` (`vscode-userdata:/User/prompts/`) and apply to all VS Code projects |
| | | - **Workspace** (`workspace` or `ws`) - Stored in `<workspace-instructions>` (`<workspace-root>/.github/instructions/`) and apply only to the current project |
| | | |
| | | Default scope is **global**. |
| | | |
| | | Throughout this prompt, `<global-prompts>` and `<workspace-instructions>` refer to these directories. |
| | | |
| | | ## Syntax |
| | | |
| | | ``` |
| | | /memory-merger >domain-name [scope] |
| | | ``` |
| | | |
| | | - `>domain-name` - Required. The domain to merge (e.g., `>clojure`, `>git-workflow`, `>prompt-engineering`) |
| | | - `[scope]` - Optional. One of: `global`, `user` (both mean global), `workspace`, or `ws`. Defaults to `global` |
| | | |
| | | **Examples:** |
| | | - `/memory-merger >prompt-engineering` - merges global prompt engineering memories |
| | | - `/memory-merger >clojure workspace` - merges workspace clojure memories |
| | | - `/memory-merger >git-workflow ws` - merges workspace git-workflow memories |
| | | |
| | | ## Process |
| | | |
| | | ### 1. Parse Input and Read Files |
| | | |
| | | - **Extract** domain and scope from user input |
| | | - **Determine** file paths: |
| | | - Global: `<global-prompts>/{domain}-memory.instructions.md` → `<global-prompts>/{domain}.instructions.md` |
| | | - Workspace: `<workspace-instructions>/{domain}-memory.instructions.md` → `<workspace-instructions>/{domain}.instructions.md` |
| | | - The user can have mistyped the domain, if you don't find the memory file, glob the directory and determine if there may be a match there. Ask the user for input if in doubt. |
| | | - **Read** both files (memory file must exist; instruction file may not) |
| | | |
| | | ### 2. Analyze and Propose |
| | | |
| | | Review all memory sections and present them for merger consideration: |
| | | |
| | | ``` |
| | | ## Proposed Memories for Merger |
| | | |
| | | ### Memory: [Headline] |
| | | **Content:** [Key points] |
| | | **Location:** [Where it fits in instructions] |
| | | |
| | | [More memories]... |
| | | ``` |
| | | |
| | | Say: "Please review these memories. Approve all with 'go' or specify which to skip." |
| | | |
| | | **STOP and wait for user input.** |
| | | |
| | | ### 3. Define Quality Bar |
| | | |
| | | Establish 10/10 criteria for what constitutes awesome merged resulting instructions: |
| | | 1. **Zero knowledge loss** - Every detail, example, and nuance preserved |
| | | 2. **Minimal redundancy** - Overlapping guidance consolidated |
| | | 3. **Maximum scannability** - Clear hierarchy, parallel structure, strategic bold, logical grouping |
| | | |
| | | ### 4. Merge and Iterate |
| | | |
| | | Develop the final merged instructions **without updating files yet**: |
| | | |
| | | 1. Draft the merged instructions incorporating approved memories |
| | | 2. Evaluate against quality bar |
| | | 3. Refine structure, wording, organization |
| | | 4. Repeat until the merged instructions meet 10/10 criteria |
| | | |
| | | ### 5. Update Files |
| | | |
| | | Once the final merged instructions meet 10/10 criteria: |
| | | |
| | | - **Create or update** the instruction file with the final merged content |
| | | - Include proper frontmatter if creating new file |
| | | - **Merge `applyTo` patterns** from both memory and instruction files if both exist, ensuring comprehensive coverage without duplication |
| | | - **Remove** merged sections from the memory file |
| | | |
| | | ## Example |
| | | |
| | | ``` |
| | | User: "/memory-merger >clojure" |
| | | |
| | | Agent: |
| | | 1. Reads clojure-memory.instructions.md and clojure.instructions.md |
| | | 2. Proposes 3 memories for merger |
| | | 3. [STOPS] |
| | | |
| | | User: "go" |
| | | |
| | | Agent: |
| | | 4. Defines quality bar for 10/10 |
| | | 5. Merges new instructions candidate, iterates to 10/10 |
| | | 6. Updates clojure.instructions.md |
| | | 7. Cleans clojure-memory.instructions.md |
| | | ``` |
| New file |
| | |
| | | { |
| | | "version": 1, |
| | | "registry": "https://clawhub.ai", |
| | | "slug": "multi-search-engine", |
| | | "installedVersion": "2.0.1", |
| | | "installedAt": 1773289670176 |
| | | } |
| New file |
| | |
| | | # Changelog |
| | | |
| | | ## v2.0.1 (2026-02-06) |
| | | - Simplified documentation |
| | | - Removed gov-related content |
| | | - Optimized for ClawHub publishing |
| | | |
| | | ## v2.0.0 (2026-02-06) |
| | | - Added 9 international search engines |
| | | - Enhanced advanced search capabilities |
| | | - Added DuckDuckGo Bangs support |
| | | - Added WolframAlpha knowledge queries |
| | | |
| | | ## v1.0.0 (2026-02-04) |
| | | - Initial release with 8 domestic search engines |
| New file |
| | |
| | | # Multi Search Engine |
| | | |
| | | ## 基本信息 |
| | | |
| | | - **名称**: multi-search-engine |
| | | - **版本**: v2.0.1 |
| | | - **描述**: 集成17个搜索引擎(8国内+9国际),支持高级搜索语法 |
| | | - **发布时间**: 2026-02-06 |
| | | |
| | | ## 搜索引擎 |
| | | |
| | | **国内(8个)**: 百度、必应、360、搜狗、微信、头条、集思录 |
| | | **国际(9个)**: Google、DuckDuckGo、Yahoo、Brave、Startpage、Ecosia、Qwant、WolframAlpha |
| | | |
| | | ## 核心功能 |
| | | |
| | | - 高级搜索操作符(site:, filetype:, intitle:等) |
| | | - DuckDuckGo Bangs快捷命令 |
| | | - 时间筛选(小时/天/周/月/年) |
| | | - 隐私保护搜索 |
| | | - WolframAlpha知识计算 |
| | | |
| | | ## 更新记录 |
| | | |
| | | ### v2.0.1 (2026-02-06) |
| | | - 精简文档,优化发布 |
| | | |
| | | ### v2.0.0 (2026-02-06) |
| | | - 新增9个国际搜索引擎 |
| | | - 强化深度搜索能力 |
| | | |
| | | ### v1.0.0 (2026-02-04) |
| | | - 初始版本:8个国内搜索引擎 |
| | | |
| | | ## 使用示例 |
| | | |
| | | ```javascript |
| | | // Google搜索 |
| | | web_fetch({"url": "https://www.google.com/search?q=python"}) |
| | | |
| | | // 隐私搜索 |
| | | web_fetch({"url": "https://duckduckgo.com/html/?q=privacy"}) |
| | | |
| | | // 站内搜索 |
| | | web_fetch({"url": "https://www.google.com/search?q=site:github.com+python"}) |
| | | ``` |
| | | |
| | | MIT License |
| New file |
| | |
| | | --- |
| | | name: "multi-search-engine" |
| | | description: "Multi search engine integration with 17 engines (8 CN + 9 Global). Supports advanced search operators, time filters, site search, privacy engines, and WolframAlpha knowledge queries. No API keys required." |
| | | --- |
| | | |
| | | # Multi Search Engine v2.0.1 |
| | | |
| | | Integration of 17 search engines for web crawling without API keys. |
| | | |
| | | ## Search Engines |
| | | |
| | | ### Domestic (8) |
| | | - **Baidu**: `https://www.baidu.com/s?wd={keyword}` |
| | | - **Bing CN**: `https://cn.bing.com/search?q={keyword}&ensearch=0` |
| | | - **Bing INT**: `https://cn.bing.com/search?q={keyword}&ensearch=1` |
| | | - **360**: `https://www.so.com/s?q={keyword}` |
| | | - **Sogou**: `https://sogou.com/web?query={keyword}` |
| | | - **WeChat**: `https://wx.sogou.com/weixin?type=2&query={keyword}` |
| | | - **Toutiao**: `https://so.toutiao.com/search?keyword={keyword}` |
| | | - **Jisilu**: `https://www.jisilu.cn/explore/?keyword={keyword}` |
| | | |
| | | ### International (9) |
| | | - **Google**: `https://www.google.com/search?q={keyword}` |
| | | - **Google HK**: `https://www.google.com.hk/search?q={keyword}` |
| | | - **DuckDuckGo**: `https://duckduckgo.com/html/?q={keyword}` |
| | | - **Yahoo**: `https://search.yahoo.com/search?p={keyword}` |
| | | - **Startpage**: `https://www.startpage.com/sp/search?query={keyword}` |
| | | - **Brave**: `https://search.brave.com/search?q={keyword}` |
| | | - **Ecosia**: `https://www.ecosia.org/search?q={keyword}` |
| | | - **Qwant**: `https://www.qwant.com/?q={keyword}` |
| | | - **WolframAlpha**: `https://www.wolframalpha.com/input?i={keyword}` |
| | | |
| | | ## Quick Examples |
| | | |
| | | ```javascript |
| | | // Basic search |
| | | web_fetch({"url": "https://www.google.com/search?q=python+tutorial"}) |
| | | |
| | | // Site-specific |
| | | web_fetch({"url": "https://www.google.com/search?q=site:github.com+react"}) |
| | | |
| | | // File type |
| | | web_fetch({"url": "https://www.google.com/search?q=machine+learning+filetype:pdf"}) |
| | | |
| | | // Time filter (past week) |
| | | web_fetch({"url": "https://www.google.com/search?q=ai+news&tbs=qdr:w"}) |
| | | |
| | | // Privacy search |
| | | web_fetch({"url": "https://duckduckgo.com/html/?q=privacy+tools"}) |
| | | |
| | | // DuckDuckGo Bangs |
| | | web_fetch({"url": "https://duckduckgo.com/html/?q=!gh+tensorflow"}) |
| | | |
| | | // Knowledge calculation |
| | | web_fetch({"url": "https://www.wolframalpha.com/input?i=100+USD+to+CNY"}) |
| | | ``` |
| | | |
| | | ## Advanced Operators |
| | | |
| | | | Operator | Example | Description | |
| | | |----------|---------|-------------| |
| | | | `site:` | `site:github.com python` | Search within site | |
| | | | `filetype:` | `filetype:pdf report` | Specific file type | |
| | | | `""` | `"machine learning"` | Exact match | |
| | | | `-` | `python -snake` | Exclude term | |
| | | | `OR` | `cat OR dog` | Either term | |
| | | |
| | | ## Time Filters |
| | | |
| | | | Parameter | Description | |
| | | |-----------|-------------| |
| | | | `tbs=qdr:h` | Past hour | |
| | | | `tbs=qdr:d` | Past day | |
| | | | `tbs=qdr:w` | Past week | |
| | | | `tbs=qdr:m` | Past month | |
| | | | `tbs=qdr:y` | Past year | |
| | | |
| | | ## Privacy Engines |
| | | |
| | | - **DuckDuckGo**: No tracking |
| | | - **Startpage**: Google results + privacy |
| | | - **Brave**: Independent index |
| | | - **Qwant**: EU GDPR compliant |
| | | |
| | | ## Bangs Shortcuts (DuckDuckGo) |
| | | |
| | | | Bang | Destination | |
| | | |------|-------------| |
| | | | `!g` | Google | |
| | | | `!gh` | GitHub | |
| | | | `!so` | Stack Overflow | |
| | | | `!w` | Wikipedia | |
| | | | `!yt` | YouTube | |
| | | |
| | | ## WolframAlpha Queries |
| | | |
| | | - Math: `integrate x^2 dx` |
| | | - Conversion: `100 USD to CNY` |
| | | - Stocks: `AAPL stock` |
| | | - Weather: `weather in Beijing` |
| | | |
| | | ## Documentation |
| | | |
| | | - `references/advanced-search.md` - Domestic search guide |
| | | - `references/international-search.md` - International search guide |
| | | - `CHANGELOG.md` - Version history |
| | | |
| | | ## License |
| | | |
| | | MIT |
| New file |
| | |
| | | { |
| | | "ownerId": "kn79j8kk7fb9w10jh83803j7f180a44m", |
| | | "slug": "multi-search-engine", |
| | | "version": "2.0.1", |
| | | "publishedAt": 1770313848158 |
| | | } |
| New file |
| | |
| | | { |
| | | "name": "multi-search-engine", |
| | | "engines": [ |
| | | {"name": "Baidu", "url": "https://www.baidu.com/s?wd={keyword}", "region": "cn"}, |
| | | {"name": "Bing CN", "url": "https://cn.bing.com/search?q={keyword}&ensearch=0", "region": "cn"}, |
| | | {"name": "Bing INT", "url": "https://cn.bing.com/search?q={keyword}&ensearch=1", "region": "cn"}, |
| | | {"name": "360", "url": "https://www.so.com/s?q={keyword}", "region": "cn"}, |
| | | {"name": "Sogou", "url": "https://sogou.com/web?query={keyword}", "region": "cn"}, |
| | | {"name": "WeChat", "url": "https://wx.sogou.com/weixin?type=2&query={keyword}", "region": "cn"}, |
| | | {"name": "Toutiao", "url": "https://so.toutiao.com/search?keyword={keyword}", "region": "cn"}, |
| | | {"name": "Jisilu", "url": "https://www.jisilu.cn/explore/?keyword={keyword}", "region": "cn"}, |
| | | {"name": "Google", "url": "https://www.google.com/search?q={keyword}", "region": "global"}, |
| | | {"name": "Google HK", "url": "https://www.google.com.hk/search?q={keyword}", "region": "global"}, |
| | | {"name": "DuckDuckGo", "url": "https://duckduckgo.com/html/?q={keyword}", "region": "global"}, |
| | | {"name": "Yahoo", "url": "https://search.yahoo.com/search?p={keyword}", "region": "global"}, |
| | | {"name": "Startpage", "url": "https://www.startpage.com/sp/search?query={keyword}", "region": "global"}, |
| | | {"name": "Brave", "url": "https://search.brave.com/search?q={keyword}", "region": "global"}, |
| | | {"name": "Ecosia", "url": "https://www.ecosia.org/search?q={keyword}", "region": "global"}, |
| | | {"name": "Qwant", "url": "https://www.qwant.com/?q={keyword}", "region": "global"}, |
| | | {"name": "WolframAlpha", "url": "https://www.wolframalpha.com/input?i={keyword}", "region": "global"} |
| | | ] |
| | | } |
| New file |
| | |
| | | { |
| | | "name": "multi-search-engine", |
| | | "version": "2.0.1", |
| | | "description": "Multi search engine with 17 engines (8 CN + 9 Global). Supports advanced operators, time filters, privacy engines.", |
| | | "engines": 17, |
| | | "requires_api_key": false |
| | | } |
| New file |
| | |
| | | # 国际搜索引擎深度搜索指南 |
| | | |
| | | ## 🔍 Google 深度搜索 |
| | | |
| | | ### 1.1 基础高级搜索操作符 |
| | | |
| | | | 操作符 | 功能 | 示例 | URL | |
| | | |--------|------|------|-----| |
| | | | `""` | 精确匹配 | `"machine learning"` | `https://www.google.com/search?q=%22machine+learning%22` | |
| | | | `-` | 排除关键词 | `python -snake` | `https://www.google.com/search?q=python+-snake` | |
| | | | `OR` | 或运算 | `machine learning OR deep learning` | `https://www.google.com/search?q=machine+learning+OR+deep+learning` | |
| | | | `*` | 通配符 | `machine * algorithms` | `https://www.google.com/search?q=machine+*+algorithms` | |
| | | | `()` | 分组 | `(apple OR microsoft) phones` | `https://www.google.com/search?q=(apple+OR+microsoft)+phones` | |
| | | | `..` | 数字范围 | `laptop $500..$1000` | `https://www.google.com/search?q=laptop+%24500..%241000` | |
| | | |
| | | ### 1.2 站点与文件搜索 |
| | | |
| | | | 操作符 | 功能 | 示例 | |
| | | |--------|------|------| |
| | | | `site:` | 站内搜索 | `site:github.com python projects` | |
| | | | `filetype:` | 文件类型 | `filetype:pdf annual report` | |
| | | | `inurl:` | URL包含 | `inurl:login admin` | |
| | | | `intitle:` | 标题包含 | `intitle:"index of" mp3` | |
| | | | `intext:` | 正文包含 | `intext:password filetype:txt` | |
| | | | `cache:` | 查看缓存 | `cache:example.com` | |
| | | | `related:` | 相关网站 | `related:github.com` | |
| | | | `info:` | 网站信息 | `info:example.com` | |
| | | |
| | | ### 1.3 时间筛选参数 |
| | | |
| | | | 参数 | 含义 | URL示例 | |
| | | |------|------|---------| |
| | | | `tbs=qdr:h` | 过去1小时 | `https://www.google.com/search?q=news&tbs=qdr:h` | |
| | | | `tbs=qdr:d` | 过去24小时 | `https://www.google.com/search?q=news&tbs=qdr:d` | |
| | | | `tbs=qdr:w` | 过去1周 | `https://www.google.com/search?q=news&tbs=qdr:w` | |
| | | | `tbs=qdr:m` | 过去1月 | `https://www.google.com/search?q=news&tbs=qdr:m` | |
| | | | `tbs=qdr:y` | 过去1年 | `https://www.google.com/search?q=news&tbs=qdr:y` | |
| | | | `tbs=cdr:1,cd_min:1/1/2024,cd_max:12/31/2024` | 自定义日期范围 | 2024年全年 | |
| | | |
| | | ### 1.4 语言和地区筛选 |
| | | |
| | | | 参数 | 功能 | 示例 | |
| | | |------|------|------| |
| | | | `hl=en` | 界面语言 | `https://www.google.com/search?q=test&hl=en` | |
| | | | `lr=lang_zh-CN` | 搜索结果语言 | `https://www.google.com/search?q=test&lr=lang_zh-CN` | |
| | | | `cr=countryCN` | 国家/地区 | `https://www.google.com/search?q=test&cr=countryCN` | |
| | | | `gl=us` | 地理位置 | `https://www.google.com/search?q=test&gl=us` | |
| | | |
| | | ### 1.5 特殊搜索类型 |
| | | |
| | | | 类型 | URL | 说明 | |
| | | |------|-----|------| |
| | | | 图片搜索 | `https://www.google.com/search?q={keyword}&tbm=isch` | `tbm=isch` 表示图片 | |
| | | | 新闻搜索 | `https://www.google.com/search?q={keyword}&tbm=nws` | `tbm=nws` 表示新闻 | |
| | | | 视频搜索 | `https://www.google.com/search?q={keyword}&tbm=vid` | `tbm=vid` 表示视频 | |
| | | | 地图搜索 | `https://www.google.com/search?q={keyword}&tbm=map` | `tbm=map` 表示地图 | |
| | | | 购物搜索 | `https://www.google.com/search?q={keyword}&tbm=shop` | `tbm=shop` 表示购物 | |
| | | | 图书搜索 | `https://www.google.com/search?q={keyword}&tbm=bks` | `tbm=bks` 表示图书 | |
| | | | 学术搜索 | `https://scholar.google.com/scholar?q={keyword}` | Google Scholar | |
| | | |
| | | ### 1.6 Google 深度搜索示例 |
| | | |
| | | ```javascript |
| | | // 1. 搜索GitHub上的Python机器学习项目 |
| | | web_fetch({"url": "https://www.google.com/search?q=site:github.com+python+machine+learning"}) |
| | | |
| | | // 2. 搜索2024年的PDF格式机器学习教程 |
| | | web_fetch({"url": "https://www.google.com/search?q=machine+learning+tutorial+filetype:pdf&tbs=cdr:1,cd_min:1/1/2024"}) |
| | | |
| | | // 3. 搜索标题包含"tutorial"的Python相关页面 |
| | | web_fetch({"url": "https://www.google.com/search?q=intitle:tutorial+python"}) |
| | | |
| | | // 4. 搜索过去一周的新闻 |
| | | web_fetch({"url": "https://www.google.com/search?q=AI+breakthrough&tbs=qdr:w&tbm=nws"}) |
| | | |
| | | // 5. 搜索中文内容(界面英文,结果中文) |
| | | web_fetch({"url": "https://www.google.com/search?q=人工智能&lr=lang_zh-CN&hl=en"}) |
| | | |
| | | // 6. 搜索特定价格范围的笔记本电脑 |
| | | web_fetch({"url": "https://www.google.com/search?q=laptop+%241000..%242000+best+rating"}) |
| | | |
| | | // 7. 搜索排除Wikipedia的结果 |
| | | web_fetch({"url": "https://www.google.com/search?q=python+programming+-wikipedia"}) |
| | | |
| | | // 8. 搜索学术文献 |
| | | web_fetch({"url": "https://scholar.google.com/scholar?q=deep+learning+optimization"}) |
| | | |
| | | // 9. 搜索缓存页面(查看已删除内容) |
| | | web_fetch({"url": "https://webcache.googleusercontent.com/search?q=cache:example.com"}) |
| | | |
| | | // 10. 搜索相关网站 |
| | | web_fetch({"url": "https://www.google.com/search?q=related:stackoverflow.com"}) |
| | | ``` |
| | | |
| | | --- |
| | | |
| | | ## 🦆 DuckDuckGo 深度搜索 |
| | | |
| | | ### 2.1 DuckDuckGo 特色功能 |
| | | |
| | | | 功能 | 语法 | 示例 | |
| | | |------|------|------| |
| | | | **Bangs 快捷** | `!缩写` | `!g python` → Google搜索 | |
| | | | **密码生成** | `password` | `https://duckduckgo.com/?q=password+20` | |
| | | | **颜色转换** | `color` | `https://duckduckgo.com/?q=+%23FF5733` | |
| | | | **短链接** | `shorten` | `https://duckduckgo.com/?q=shorten+example.com` | |
| | | | **二维码生成** | `qr` | `https://duckduckgo.com/?q=qr+hello+world` | |
| | | | **生成UUID** | `uuid` | `https://duckduckgo.com/?q=uuid` | |
| | | | **Base64编解码** | `base64` | `https://duckduckgo.com/?q=base64+hello` | |
| | | |
| | | ### 2.2 DuckDuckGo Bangs 完整列表 |
| | | |
| | | #### 搜索引擎 |
| | | |
| | | | Bang | 跳转目标 | 示例 | |
| | | |------|---------|------| |
| | | | `!g` | Google | `!g python tutorial` | |
| | | | `!b` | Bing | `!b weather` | |
| | | | `!y` | Yahoo | `!y finance` | |
| | | | `!sp` | Startpage | `!sp privacy` | |
| | | | `!brave` | Brave Search | `!brave tech` | |
| | | |
| | | #### 编程开发 |
| | | |
| | | | Bang | 跳转目标 | 示例 | |
| | | |------|---------|------| |
| | | | `!gh` | GitHub | `!gh tensorflow` | |
| | | | `!so` | Stack Overflow | `!so javascript error` | |
| | | | `!npm` | npmjs.com | `!npm express` | |
| | | | `!pypi` | PyPI | `!pypi requests` | |
| | | | `!mdn` | MDN Web Docs | `!mdn fetch api` | |
| | | | `!docs` | DevDocs | `!docs python` | |
| | | | `!docker` | Docker Hub | `!docker nginx` | |
| | | |
| | | #### 知识百科 |
| | | |
| | | | Bang | 跳转目标 | 示例 | |
| | | |------|---------|------| |
| | | | `!w` | Wikipedia | `!w machine learning` | |
| | | | `!wen` | Wikipedia英文 | `!wen artificial intelligence` | |
| | | | `!wt` | Wiktionary | `!wt serendipity` | |
| | | | `!imdb` | IMDb | `!imdb inception` | |
| | | |
| | | #### 购物价格 |
| | | |
| | | | Bang | 跳转目标 | 示例 | |
| | | |------|---------|------| |
| | | | `!a` | Amazon | `!a wireless headphones` | |
| | | | `!e` | eBay | `!e vintage watch` | |
| | | | `!ali` | AliExpress | `!ali phone case` | |
| | | |
| | | #### 地图位置 |
| | | |
| | | | Bang | 跳转目标 | 示例 | |
| | | |------|---------|------| |
| | | | `!m` | Google Maps | `!m Beijing` | |
| | | | `!maps` | OpenStreetMap | `!maps Paris` | |
| | | |
| | | ### 2.3 DuckDuckGo 搜索参数 |
| | | |
| | | | 参数 | 功能 | 示例 | |
| | | |------|------|------| |
| | | | `kp=1` | 严格安全搜索 | `https://duckduckgo.com/html/?q=test&kp=1` | |
| | | | `kp=-1` | 关闭安全搜索 | `https://duckduckgo.com/html/?q=test&kp=-1` | |
| | | | `kl=cn` | 中国区域 | `https://duckduckgo.com/html/?q=news&kl=cn` | |
| | | | `kl=us-en` | 美国英文 | `https://duckduckgo.com/html/?q=news&kl=us-en` | |
| | | | `ia=web` | 网页结果 | `https://duckduckgo.com/?q=test&ia=web` | |
| | | | `ia=images` | 图片结果 | `https://duckduckgo.com/?q=test&ia=images` | |
| | | | `ia=news` | 新闻结果 | `https://duckduckgo.com/?q=test&ia=news` | |
| | | | `ia=videos` | 视频结果 | `https://duckduckgo.com/?q=test&ia=videos` | |
| | | |
| | | ### 2.4 DuckDuckGo 深度搜索示例 |
| | | |
| | | ```javascript |
| | | // 1. 使用Bang跳转到Google搜索 |
| | | web_fetch({"url": "https://duckduckgo.com/html/?q=!g+machine+learning"}) |
| | | |
| | | // 2. 直接搜索GitHub上的项目 |
| | | web_fetch({"url": "https://duckduckgo.com/html/?q=!gh+react"}) |
| | | |
| | | // 3. 查找Stack Overflow答案 |
| | | web_fetch({"url": "https://duckduckgo.com/html/?q=!so+python+list+comprehension"}) |
| | | |
| | | // 4. 生成密码 |
| | | web_fetch({"url": "https://duckduckgo.com/?q=password+16"}) |
| | | |
| | | // 5. Base64编码 |
| | | web_fetch({"url": "https://duckduckgo.com/?q=base64+hello+world"}) |
| | | |
| | | // 6. 颜色代码转换 |
| | | web_fetch({"url": "https://duckduckgo.com/?q=%23FF5733"}) |
| | | |
| | | // 7. 搜索YouTube视频 |
| | | web_fetch({"url": "https://duckduckgo.com/html/?q=!yt+python+tutorial"}) |
| | | |
| | | // 8. 查看Wikipedia |
| | | web_fetch({"url": "https://duckduckgo.com/html/?q=!w+artificial+intelligence"}) |
| | | |
| | | // 9. 亚马逊商品搜索 |
| | | web_fetch({"url": "https://duckduckgo.com/html/?q=!a+laptop"}) |
| | | |
| | | // 10. 生成二维码 |
| | | web_fetch({"url": "https://duckduckgo.com/?q=qr+https://github.com"}) |
| | | ``` |
| | | |
| | | --- |
| | | |
| | | ## 🔎 Brave Search 深度搜索 |
| | | |
| | | ### 3.1 Brave Search 特色功能 |
| | | |
| | | | 功能 | 参数 | 示例 | |
| | | |------|------|------| |
| | | | **独立索引** | 无依赖Google/Bing | 自有爬虫索引 | |
| | | | **Goggles** | 自定义搜索规则 | 创建个性化过滤器 | |
| | | | **Discussions** | 论坛讨论搜索 | 聚合Reddit等论坛 | |
| | | | **News** | 新闻聚合 | 独立新闻索引 | |
| | | |
| | | ### 3.2 Brave Search 参数 |
| | | |
| | | | 参数 | 功能 | 示例 | |
| | | |------|------|------| |
| | | | `tf=pw` | 本周 | `https://search.brave.com/search?q=news&tf=pw` | |
| | | | `tf=pm` | 本月 | `https://search.brave.com/search?q=tech&tf=pm` | |
| | | | `tf=py` | 本年 | `https://search.brave.com/search?q=AI&tf=py` | |
| | | | `safesearch=strict` | 严格安全 | `https://search.brave.com/search?q=test&safesearch=strict` | |
| | | | `source=web` | 网页搜索 | 默认 | |
| | | | `source=news` | 新闻搜索 | `https://search.brave.com/search?q=tech&source=news` | |
| | | | `source=images` | 图片搜索 | `https://search.brave.com/search?q=cat&source=images` | |
| | | | `source=videos` | 视频搜索 | `https://search.brave.com/search?q=music&source=videos` | |
| | | |
| | | ### 3.3 Brave Search Goggles(自定义过滤器) |
| | | |
| | | Goggles 允许创建自定义搜索规则: |
| | | |
| | | ``` |
| | | $discard // 丢弃所有 |
| | | $boost,site=stackoverflow.com // 提升Stack Overflow |
| | | $boost,site=github.com // 提升GitHub |
| | | $boost,site=docs.python.org // 提升Python文档 |
| | | ``` |
| | | |
| | | ### 3.4 Brave Search 深度搜索示例 |
| | | |
| | | ```javascript |
| | | // 1. 本周科技新闻 |
| | | web_fetch({"url": "https://search.brave.com/search?q=technology&tf=pw&source=news"}) |
| | | |
| | | // 2. 本月AI发展 |
| | | web_fetch({"url": "https://search.brave.com/search?q=artificial+intelligence&tf=pm"}) |
| | | |
| | | // 3. 图片搜索 |
| | | web_fetch({"url": "https://search.brave.com/search?q=machine+learning&source=images"}) |
| | | |
| | | // 4. 视频教程 |
| | | web_fetch({"url": "https://search.brave.com/search?q=python+tutorial&source=videos"}) |
| | | |
| | | // 5. 使用独立索引搜索 |
| | | web_fetch({"url": "https://search.brave.com/search?q=privacy+tools"}) |
| | | ``` |
| | | |
| | | --- |
| | | |
| | | ## 📊 WolframAlpha 知识计算搜索 |
| | | |
| | | ### 4.1 WolframAlpha 数据类型 |
| | | |
| | | | 类型 | 查询示例 | URL | |
| | | |------|---------|-----| |
| | | | **数学计算** | `integrate x^2 dx` | `https://www.wolframalpha.com/input?i=integrate+x%5E2+dx` | |
| | | | **单位换算** | `100 miles to km` | `https://www.wolframalpha.com/input?i=100+miles+to+km` | |
| | | | **货币转换** | `100 USD to CNY` | `https://www.wolframalpha.com/input?i=100+USD+to+CNY` | |
| | | | **股票数据** | `AAPL stock` | `https://www.wolframalpha.com/input?i=AAPL+stock` | |
| | | | **天气查询** | `weather in Beijing` | `https://www.wolframalpha.com/input?i=weather+in+Beijing` | |
| | | | **人口数据** | `population of China` | `https://www.wolframalpha.com/input?i=population+of+China` | |
| | | | **化学元素** | `properties of gold` | `https://www.wolframalpha.com/input?i=properties+of+gold` | |
| | | | **营养成分** | `nutrition of apple` | `https://www.wolframalpha.com/input?i=nutrition+of+apple` | |
| | | | **日期计算** | `days between Jan 1 2020 and Dec 31 2024` | 日期间隔计算 | |
| | | | **时区转换** | `10am Beijing to New York` | 时区转换 | |
| | | | **IP地址** | `8.8.8.8` | IP信息查询 | |
| | | | **条形码** | `scan barcode 123456789` | 条码信息 | |
| | | | **飞机航班** | `flight AA123` | 航班信息 | |
| | | |
| | | ### 4.2 WolframAlpha 深度搜索示例 |
| | | |
| | | ```javascript |
| | | // 1. 计算积分 |
| | | web_fetch({"url": "https://www.wolframalpha.com/input?i=integrate+sin%28x%29+from+0+to+pi"}) |
| | | |
| | | // 2. 解方程 |
| | | web_fetch({"url": "https://www.wolframalpha.com/input?i=solve+x%5E2-5x%2B6%3D0"}) |
| | | |
| | | // 3. 货币实时汇率 |
| | | web_fetch({"url": "https://www.wolframalpha.com/input?i=100+USD+to+CNY"}) |
| | | |
| | | // 4. 股票实时数据 |
| | | web_fetch({"url": "https://www.wolframalpha.com/input?i=Apple+stock+price"}) |
| | | |
| | | // 5. 城市天气 |
| | | web_fetch({"url": "https://www.wolframalpha.com/input?i=weather+in+Shanghai+tomorrow"}) |
| | | |
| | | // 6. 国家统计信息 |
| | | web_fetch({"url": "https://www.wolframalpha.com/input?i=GDP+of+China+vs+USA"}) |
| | | |
| | | // 7. 化学计算 |
| | | web_fetch({"url": "https://www.wolframalpha.com/input?i=molar+mass+of+H2SO4"}) |
| | | |
| | | // 8. 物理常数 |
| | | web_fetch({"url": "https://www.wolframalpha.com/input?i=speed+of+light"}) |
| | | |
| | | // 9. 营养信息 |
| | | web_fetch({"url": "https://www.wolframalpha.com/input?i=calories+in+banana"}) |
| | | |
| | | // 10. 历史日期 |
| | | web_fetch({"url": "https://www.wolframalpha.com/input?i=events+on+July+20+1969"}) |
| | | ``` |
| | | |
| | | --- |
| | | |
| | | ## 🔧 Startpage 隐私搜索 |
| | | |
| | | ### 5.1 Startpage 特色功能 |
| | | |
| | | | 功能 | 说明 | URL | |
| | | |------|------|-----| |
| | | | **代理浏览** | 匿名访问搜索结果 | 点击"匿名查看" | |
| | | | **无追踪** | 不记录搜索历史 | 默认开启 | |
| | | | **EU服务器** | 受欧盟隐私法保护 | 数据在欧洲 | |
| | | | **代理图片** | 图片代理加载 | 隐藏IP | |
| | | |
| | | ### 5.2 Startpage 参数 |
| | | |
| | | | 参数 | 功能 | 示例 | |
| | | |------|------|------| |
| | | | `cat=web` | 网页搜索 | 默认 | |
| | | | `cat=images` | 图片搜索 | `...&cat=images` | |
| | | | `cat=video` | 视频搜索 | `...&cat=video` | |
| | | | `cat=news` | 新闻搜索 | `...&cat=news` | |
| | | | `language=english` | 英文结果 | `...&language=english` | |
| | | | `time=day` | 过去24小时 | `...&time=day` | |
| | | | `time=week` | 过去一周 | `...&time=week` | |
| | | | `time=month` | 过去一月 | `...&time=month` | |
| | | | `time=year` | 过去一年 | `...&time=year` | |
| | | | `nj=0` | 关闭 family filter | `...&nj=0` | |
| | | |
| | | ### 5.3 Startpage 深度搜索示例 |
| | | |
| | | ```javascript |
| | | // 1. 隐私搜索 |
| | | web_fetch({"url": "https://www.startpage.com/sp/search?query=privacy+tools"}) |
| | | |
| | | // 2. 图片隐私搜索 |
| | | web_fetch({"url": "https://www.startpage.com/sp/search?query=nature&cat=images"}) |
| | | |
| | | // 3. 本周新闻(隐私模式) |
| | | web_fetch({"url": "https://www.startpage.com/sp/search?query=tech+news&time=week&cat=news"}) |
| | | |
| | | // 4. 英文结果搜索 |
| | | web_fetch({"url": "https://www.startpage.com/sp/search?query=machine+learning&language=english"}) |
| | | ``` |
| | | |
| | | --- |
| | | |
| | | ## 🌍 综合搜索策略 |
| | | |
| | | ### 6.1 按搜索目标选择引擎 |
| | | |
| | | | 搜索目标 | 首选引擎 | 备选引擎 | 原因 | |
| | | |---------|---------|---------|------| |
| | | | **学术研究** | Google Scholar | Google, Brave | 学术资源索引 | |
| | | | **编程开发** | Google | GitHub(DuckDuckGo bang) | 技术文档全面 | |
| | | | **隐私敏感** | DuckDuckGo | Startpage, Brave | 不追踪用户 | |
| | | | **实时新闻** | Brave News | Google News | 独立新闻索引 | |
| | | | **知识计算** | WolframAlpha | Google | 结构化数据 | |
| | | | **中文内容** | Google HK | Bing | 中文优化好 | |
| | | | **欧洲视角** | Qwant | Startpage | 欧盟合规 | |
| | | | **环保支持** | Ecosia | DuckDuckGo | 搜索植树 | |
| | | | **无过滤** | Brave | Startpage | 无偏见结果 | |
| | | |
| | | ### 6.2 多引擎交叉验证 |
| | | |
| | | ```javascript |
| | | // 策略:同一关键词多引擎搜索,对比结果 |
| | | const keyword = "climate change 2024"; |
| | | |
| | | // 获取不同视角 |
| | | const searches = [ |
| | | { engine: "Google", url: `https://www.google.com/search?q=${keyword}&tbs=qdr:m` }, |
| | | { engine: "Brave", url: `https://search.brave.com/search?q=${keyword}&tf=pm` }, |
| | | { engine: "DuckDuckGo", url: `https://duckduckgo.com/html/?q=${keyword}` }, |
| | | { engine: "Ecosia", url: `https://www.ecosia.org/search?q=${keyword}` } |
| | | ]; |
| | | |
| | | // 分析不同引擎的结果差异 |
| | | ``` |
| | | |
| | | ### 6.3 时间敏感搜索策略 |
| | | |
| | | | 时效性要求 | 引擎选择 | 参数设置 | |
| | | |-----------|---------|---------| |
| | | | **实时(小时级)** | Google News, Brave News | `tbs=qdr:h`, `tf=pw` | |
| | | | **近期(天级)** | Google, Brave | `tbs=qdr:d`, `time=day` | |
| | | | **本周** | 所有引擎 | `tbs=qdr:w`, `tf=pw` | |
| | | | **本月** | 所有引擎 | `tbs=qdr:m`, `tf=pm` | |
| | | | **历史** | Google Scholar | 学术档案 | |
| | | |
| | | ### 6.4 专业领域深度搜索 |
| | | |
| | | #### 技术开发 |
| | | |
| | | ```javascript |
| | | // GitHub 项目搜索 |
| | | web_fetch({"url": "https://duckduckgo.com/html/?q=!gh+tensorflow+stars:%3E1000"}) |
| | | |
| | | // Stack Overflow 问题 |
| | | web_fetch({"url": "https://duckduckgo.com/html/?q=!so+python+memory+leak"}) |
| | | |
| | | // MDN 文档 |
| | | web_fetch({"url": "https://duckduckgo.com/html/?q=!mdn+javascript+async+await"}) |
| | | |
| | | // PyPI 包 |
| | | web_fetch({"url": "https://duckduckgo.com/html/?q=!pypi+requests"}) |
| | | |
| | | // npm 包 |
| | | web_fetch({"url": "https://duckduckgo.com/html/?q=!npm+express"}) |
| | | ``` |
| | | |
| | | #### 学术研究 |
| | | |
| | | ```javascript |
| | | // Google Scholar 论文 |
| | | web_fetch({"url": "https://scholar.google.com/scholar?q=deep+learning+2024"}) |
| | | |
| | | // 搜索PDF论文 |
| | | web_fetch({"url": "https://www.google.com/search?q=machine+learning+filetype:pdf+2024"}) |
| | | |
| | | // arXiv 论文 |
| | | web_fetch({"url": "https://duckduckgo.com/html/?q=site:arxiv.org+quantum+computing"}) |
| | | ``` |
| | | |
| | | #### 金融投资 |
| | | |
| | | ```javascript |
| | | // 股票实时数据 |
| | | web_fetch({"url": "https://www.wolframalpha.com/input?i=AAPL+stock"}) |
| | | |
| | | // 汇率转换 |
| | | web_fetch({"url": "https://www.wolframalpha.com/input?i=EUR+to+USD"}) |
| | | |
| | | // 搜索财报PDF |
| | | web_fetch({"url": "https://www.google.com/search?q=Apple+Q4+2024+earnings+filetype:pdf"}) |
| | | ``` |
| | | |
| | | #### 新闻时事 |
| | | |
| | | ```javascript |
| | | // Google新闻 |
| | | web_fetch({"url": "https://www.google.com/search?q=breaking+news&tbm=nws&tbs=qdr:h"}) |
| | | |
| | | // Brave新闻 |
| | | web_fetch({"url": "https://search.brave.com/search?q=world+news&source=news"}) |
| | | |
| | | // DuckDuckGo新闻 |
| | | web_fetch({"url": "https://duckduckgo.com/html/?q=tech+news&ia=news"}) |
| | | ``` |
| | | |
| | | --- |
| | | |
| | | ## 🛠️ 高级搜索技巧汇总 |
| | | |
| | | ### URL编码工具函数 |
| | | |
| | | ```javascript |
| | | // URL编码关键词 |
| | | function encodeKeyword(keyword) { |
| | | return encodeURIComponent(keyword); |
| | | } |
| | | |
| | | // 示例 |
| | | const keyword = "machine learning"; |
| | | const encoded = encodeKeyword(keyword); // "machine%20learning" |
| | | ``` |
| | | |
| | | ### 批量搜索模板 |
| | | |
| | | ```javascript |
| | | // 多引擎批量搜索函数 |
| | | function generateSearchUrls(keyword) { |
| | | const encoded = encodeURIComponent(keyword); |
| | | return { |
| | | google: `https://www.google.com/search?q=${encoded}`, |
| | | google_hk: `https://www.google.com.hk/search?q=${encoded}`, |
| | | duckduckgo: `https://duckduckgo.com/html/?q=${encoded}`, |
| | | brave: `https://search.brave.com/search?q=${encoded}`, |
| | | startpage: `https://www.startpage.com/sp/search?query=${encoded}`, |
| | | bing_intl: `https://cn.bing.com/search?q=${encoded}&ensearch=1`, |
| | | yahoo: `https://search.yahoo.com/search?p=${encoded}`, |
| | | ecosia: `https://www.ecosia.org/search?q=${encoded}`, |
| | | qwant: `https://www.qwant.com/?q=${encoded}` |
| | | }; |
| | | } |
| | | |
| | | // 使用示例 |
| | | const urls = generateSearchUrls("artificial intelligence"); |
| | | ``` |
| | | |
| | | ### 时间筛选快捷函数 |
| | | |
| | | ```javascript |
| | | // Google时间筛选URL生成 |
| | | function googleTimeSearch(keyword, period) { |
| | | const periods = { |
| | | hour: 'qdr:h', |
| | | day: 'qdr:d', |
| | | week: 'qdr:w', |
| | | month: 'qdr:m', |
| | | year: 'qdr:y' |
| | | }; |
| | | return `https://www.google.com/search?q=${encodeURIComponent(keyword)}&tbs=${periods[period]}`; |
| | | } |
| | | |
| | | // 使用示例 |
| | | const recentNews = googleTimeSearch("AI breakthrough", "week"); |
| | | ``` |
| | | |
| | | --- |
| | | |
| | | ## 📝 完整搜索示例集 |
| | | |
| | | ```javascript |
| | | // ==================== 技术开发 ==================== |
| | | |
| | | // 1. 搜索GitHub上高Star的Python项目 |
| | | web_fetch({"url": "https://www.google.com/search?q=site:github.com+python+stars:%3E1000"}) |
| | | |
| | | // 2. Stack Overflow最佳答案 |
| | | web_fetch({"url": "https://duckduckgo.com/html/?q=!so+best+way+to+learn+python"}) |
| | | |
| | | // 3. MDN文档查询 |
| | | web_fetch({"url": "https://duckduckgo.com/html/?q=!mdn+promises"}) |
| | | |
| | | // 4. 搜索npm包 |
| | | web_fetch({"url": "https://duckduckgo.com/html/?q=!npm+axios"}) |
| | | |
| | | // ==================== 学术研究 ==================== |
| | | |
| | | // 5. Google Scholar论文 |
| | | web_fetch({"url": "https://scholar.google.com/scholar?q=transformer+architecture"}) |
| | | |
| | | // 6. 搜索PDF论文 |
| | | web_fetch({"url": "https://www.google.com/search?q=attention+is+all+you+need+filetype:pdf"}) |
| | | |
| | | // 7. arXiv最新论文 |
| | | web_fetch({"url": "https://duckduckgo.com/html/?q=site:arxiv.org+abs+quantum"}) |
| | | |
| | | // ==================== 新闻时事 ==================== |
| | | |
| | | // 8. Google最新新闻(过去1小时) |
| | | web_fetch({"url": "https://www.google.com/search?q=breaking+news&tbs=qdr:h&tbm=nws"}) |
| | | |
| | | // 9. Brave本周科技新闻 |
| | | web_fetch({"url": "https://search.brave.com/search?q=technology&tf=pw&source=news"}) |
| | | |
| | | // 10. DuckDuckGo新闻 |
| | | web_fetch({"url": "https://duckduckgo.com/html/?q=world+news&ia=news"}) |
| | | |
| | | // ==================== 金融投资 ==================== |
| | | |
| | | // 11. 股票实时数据 |
| | | web_fetch({"url": "https://www.wolframalpha.com/input?i=Tesla+stock"}) |
| | | |
| | | // 12. 货币汇率 |
| | | web_fetch({"url": "https://www.wolframalpha.com/input?i=1+BTC+to+USD"}) |
| | | |
| | | // 13. 公司财报PDF |
| | | web_fetch({"url": "https://www.google.com/search?q=Microsoft+annual+report+2024+filetype:pdf"}) |
| | | |
| | | // ==================== 知识计算 ==================== |
| | | |
| | | // 14. 数学计算 |
| | | web_fetch({"url": "https://www.wolframalpha.com/input?i=derivative+of+x%5E3+sin%28x%29"}) |
| | | |
| | | // 15. 单位换算 |
| | | web_fetch({"url": "https://www.wolframalpha.com/input?i=convert+100+miles+to+kilometers"}) |
| | | |
| | | // 16. 营养信息 |
| | | web_fetch({"url": "https://www.wolframalpha.com/input?i=protein+in+chicken+breast"}) |
| | | |
| | | // ==================== 隐私保护搜索 ==================== |
| | | |
| | | // 17. DuckDuckGo隐私搜索 |
| | | web_fetch({"url": "https://duckduckgo.com/html/?q=privacy+tools"}) |
| | | |
| | | // 18. Startpage匿名搜索 |
| | | web_fetch({"url": "https://www.startpage.com/sp/search?query=secure+messaging"}) |
| | | |
| | | // 19. Brave无追踪搜索 |
| | | web_fetch({"url": "https://search.brave.com/search?q=encryption+software"}) |
| | | |
| | | // ==================== 高级组合搜索 ==================== |
| | | |
| | | // 20. Google多条件精确搜索 |
| | | web_fetch({"url": "https://www.google.com/search?q=%22machine+learning%22+site:github.com+filetype:pdf+2024"}) |
| | | |
| | | // 21. 排除特定站点的搜索 |
| | | web_fetch({"url": "https://www.google.com/search?q=python+tutorial+-wikipedia+-w3schools"}) |
| | | |
| | | // 22. 价格范围搜索 |
| | | web_fetch({"url": "https://www.google.com/search?q=laptop+%24800..%241200+best+review"}) |
| | | |
| | | // 23. 使用Bangs快速跳转 |
| | | web_fetch({"url": "https://duckduckgo.com/html/?q=!g+site:medium.com+python"}) |
| | | |
| | | // 24. 图片搜索(Google) |
| | | web_fetch({"url": "https://www.google.com/search?q=beautiful+landscape&tbm=isch"}) |
| | | |
| | | // 25. 学术引用搜索 |
| | | web_fetch({"url": "https://scholar.google.com/scholar?q=author:%22Geoffrey+Hinton%22"}) |
| | | ``` |
| | | |
| | | --- |
| | | |
| | | ## 🔐 隐私保护最佳实践 |
| | | |
| | | ### 搜索引擎隐私级别 |
| | | |
| | | | 引擎 | 追踪级别 | 数据保留 | 加密 | 推荐场景 | |
| | | |------|---------|---------|------|---------| |
| | | | **DuckDuckGo** | 无追踪 | 无保留 | 是 | 日常隐私搜索 | |
| | | | **Startpage** | 无追踪 | 无保留 | 是 | 需要Google结果但保护隐私 | |
| | | | **Brave** | 无追踪 | 无保留 | 是 | 独立索引,无偏见 | |
| | | | **Qwant** | 无追踪 | 无保留 | 是 | 欧盟合规要求 | |
| | | | **Google** | 高度追踪 | 长期保留 | 是 | 需要个性化结果 | |
| | | | **Bing** | 中度追踪 | 长期保留 | 是 | 微软服务集成 | |
| | | |
| | | ### 隐私搜索建议 |
| | | |
| | | 1. **日常使用**: DuckDuckGo 或 Brave |
| | | 2. **需要Google结果但保护隐私**: Startpage |
| | | 3. **学术研究**: Google Scholar(学术用途追踪较少) |
| | | 4. **敏感查询**: 使用Tor浏览器 + DuckDuckGo onion服务 |
| | | 5. **跨设备同步**: 避免登录搜索引擎账户 |
| | | |
| | | --- |
| | | |
| | | ## 📚 参考资料 |
| | | |
| | | - [Google搜索操作符完整列表](https://support.google.com/websearch/answer/...) |
| | | - [DuckDuckGo Bangs完整列表](https://duckduckgo.com/bang) |
| | | - [Brave Search文档](https://search.brave.com/help/...) |
| | | - [WolframAlpha示例](https://www.wolframalpha.com/examples/) |
| New file |
| | |
| | | { |
| | | "version": 1, |
| | | "registry": "https://clawhub.ai", |
| | | "slug": "ontology", |
| | | "installedVersion": "1.0.4", |
| | | "installedAt": 1773305411897 |
| | | } |
| New file |
| | |
| | | --- |
| | | name: ontology |
| | | description: Typed knowledge graph for structured agent memory and composable skills. Use when creating/querying entities (Person, Project, Task, Event, Document), linking related objects, enforcing constraints, planning multi-step actions as graph transformations, or when skills need to share state. Trigger on "remember", "what do I know about", "link X to Y", "show dependencies", entity CRUD, or cross-skill data access. |
| | | --- |
| | | |
| | | # Ontology |
| | | |
| | | A typed vocabulary + constraint system for representing knowledge as a verifiable graph. |
| | | |
| | | ## Core Concept |
| | | |
| | | Everything is an **entity** with a **type**, **properties**, and **relations** to other entities. Every mutation is validated against type constraints before committing. |
| | | |
| | | ``` |
| | | Entity: { id, type, properties, relations, created, updated } |
| | | Relation: { from_id, relation_type, to_id, properties } |
| | | ``` |
| | | |
| | | ## When to Use |
| | | |
| | | | Trigger | Action | |
| | | |---------|--------| |
| | | | "Remember that..." | Create/update entity | |
| | | | "What do I know about X?" | Query graph | |
| | | | "Link X to Y" | Create relation | |
| | | | "Show all tasks for project Z" | Graph traversal | |
| | | | "What depends on X?" | Dependency query | |
| | | | Planning multi-step work | Model as graph transformations | |
| | | | Skill needs shared state | Read/write ontology objects | |
| | | |
| | | ## Core Types |
| | | |
| | | ```yaml |
| | | # Agents & People |
| | | Person: { name, email?, phone?, notes? } |
| | | Organization: { name, type?, members[] } |
| | | |
| | | # Work |
| | | Project: { name, status, goals[], owner? } |
| | | Task: { title, status, due?, priority?, assignee?, blockers[] } |
| | | Goal: { description, target_date?, metrics[] } |
| | | |
| | | # Time & Place |
| | | Event: { title, start, end?, location?, attendees[], recurrence? } |
| | | Location: { name, address?, coordinates? } |
| | | |
| | | # Information |
| | | Document: { title, path?, url?, summary? } |
| | | Message: { content, sender, recipients[], thread? } |
| | | Thread: { subject, participants[], messages[] } |
| | | Note: { content, tags[], refs[] } |
| | | |
| | | # Resources |
| | | Account: { service, username, credential_ref? } |
| | | Device: { name, type, identifiers[] } |
| | | Credential: { service, secret_ref } # Never store secrets directly |
| | | |
| | | # Meta |
| | | Action: { type, target, timestamp, outcome? } |
| | | Policy: { scope, rule, enforcement } |
| | | ``` |
| | | |
| | | ## Storage |
| | | |
| | | Default: `memory/ontology/graph.jsonl` |
| | | |
| | | ```jsonl |
| | | {"op":"create","entity":{"id":"p_001","type":"Person","properties":{"name":"Alice"}}} |
| | | {"op":"create","entity":{"id":"proj_001","type":"Project","properties":{"name":"Website Redesign","status":"active"}}} |
| | | {"op":"relate","from":"proj_001","rel":"has_owner","to":"p_001"} |
| | | ``` |
| | | |
| | | Query via scripts or direct file ops. For complex graphs, migrate to SQLite. |
| | | |
| | | ### Append-Only Rule |
| | | |
| | | When working with existing ontology data or schema, **append/merge** changes instead of overwriting files. This preserves history and avoids clobbering prior definitions. |
| | | |
| | | ## Workflows |
| | | |
| | | ### Create Entity |
| | | |
| | | ```bash |
| | | python3 scripts/ontology.py create --type Person --props '{"name":"Alice","email":"alice@example.com"}' |
| | | ``` |
| | | |
| | | ### Query |
| | | |
| | | ```bash |
| | | python3 scripts/ontology.py query --type Task --where '{"status":"open"}' |
| | | python3 scripts/ontology.py get --id task_001 |
| | | python3 scripts/ontology.py related --id proj_001 --rel has_task |
| | | ``` |
| | | |
| | | ### Link Entities |
| | | |
| | | ```bash |
| | | python3 scripts/ontology.py relate --from proj_001 --rel has_task --to task_001 |
| | | ``` |
| | | |
| | | ### Validate |
| | | |
| | | ```bash |
| | | python3 scripts/ontology.py validate # Check all constraints |
| | | ``` |
| | | |
| | | ## Constraints |
| | | |
| | | Define in `memory/ontology/schema.yaml`: |
| | | |
| | | ```yaml |
| | | types: |
| | | Task: |
| | | required: [title, status] |
| | | status_enum: [open, in_progress, blocked, done] |
| | | |
| | | Event: |
| | | required: [title, start] |
| | | validate: "end >= start if end exists" |
| | | |
| | | Credential: |
| | | required: [service, secret_ref] |
| | | forbidden_properties: [password, secret, token] # Force indirection |
| | | |
| | | relations: |
| | | has_owner: |
| | | from_types: [Project, Task] |
| | | to_types: [Person] |
| | | cardinality: many_to_one |
| | | |
| | | blocks: |
| | | from_types: [Task] |
| | | to_types: [Task] |
| | | acyclic: true # No circular dependencies |
| | | ``` |
| | | |
| | | ## Skill Contract |
| | | |
| | | Skills that use ontology should declare: |
| | | |
| | | ```yaml |
| | | # In SKILL.md frontmatter or header |
| | | ontology: |
| | | reads: [Task, Project, Person] |
| | | writes: [Task, Action] |
| | | preconditions: |
| | | - "Task.assignee must exist" |
| | | postconditions: |
| | | - "Created Task has status=open" |
| | | ``` |
| | | |
| | | ## Planning as Graph Transformation |
| | | |
| | | Model multi-step plans as a sequence of graph operations: |
| | | |
| | | ``` |
| | | Plan: "Schedule team meeting and create follow-up tasks" |
| | | |
| | | 1. CREATE Event { title: "Team Sync", attendees: [p_001, p_002] } |
| | | 2. RELATE Event -> has_project -> proj_001 |
| | | 3. CREATE Task { title: "Prepare agenda", assignee: p_001 } |
| | | 4. RELATE Task -> for_event -> event_001 |
| | | 5. CREATE Task { title: "Send summary", assignee: p_001, blockers: [task_001] } |
| | | ``` |
| | | |
| | | Each step is validated before execution. Rollback on constraint violation. |
| | | |
| | | ## Integration Patterns |
| | | |
| | | ### With Causal Inference |
| | | |
| | | Log ontology mutations as causal actions: |
| | | |
| | | ```python |
| | | # When creating/updating entities, also log to causal action log |
| | | action = { |
| | | "action": "create_entity", |
| | | "domain": "ontology", |
| | | "context": {"type": "Task", "project": "proj_001"}, |
| | | "outcome": "created" |
| | | } |
| | | ``` |
| | | |
| | | ### Cross-Skill Communication |
| | | |
| | | ```python |
| | | # Email skill creates commitment |
| | | commitment = ontology.create("Commitment", { |
| | | "source_message": msg_id, |
| | | "description": "Send report by Friday", |
| | | "due": "2026-01-31" |
| | | }) |
| | | |
| | | # Task skill picks it up |
| | | tasks = ontology.query("Commitment", {"status": "pending"}) |
| | | for c in tasks: |
| | | ontology.create("Task", { |
| | | "title": c.description, |
| | | "due": c.due, |
| | | "source": c.id |
| | | }) |
| | | ``` |
| | | |
| | | ## Quick Start |
| | | |
| | | ```bash |
| | | # Initialize ontology storage |
| | | mkdir -p memory/ontology |
| | | touch memory/ontology/graph.jsonl |
| | | |
| | | # Create schema (optional but recommended) |
| | | python3 scripts/ontology.py schema-append --data '{ |
| | | "types": { |
| | | "Task": { "required": ["title", "status"] }, |
| | | "Project": { "required": ["name"] }, |
| | | "Person": { "required": ["name"] } |
| | | } |
| | | }' |
| | | |
| | | # Start using |
| | | python3 scripts/ontology.py create --type Person --props '{"name":"Alice"}' |
| | | python3 scripts/ontology.py list --type Person |
| | | ``` |
| | | |
| | | ## References |
| | | |
| | | - `references/schema.md` — Full type definitions and constraint patterns |
| | | - `references/queries.md` — Query language and traversal examples |
| | | |
| | | ## Instruction Scope |
| | | |
| | | Runtime instructions operate on local files (`memory/ontology/graph.jsonl` and `memory/ontology/schema.yaml`) and provide CLI usage for create/query/relate/validate; this is within scope. The skill reads/writes workspace files and will create the `memory/ontology` directory when used. Validation includes property/enum/forbidden checks, relation type/cardinality validation, acyclicity for relations marked `acyclic: true`, and Event `end >= start` checks; other higher-level constraints may still be documentation-only unless implemented in code. |
| New file |
| | |
| | | { |
| | | "ownerId": "kn72dv4fm7ss7swbq47nnpad9x7zy2jh", |
| | | "slug": "ontology", |
| | | "version": "1.0.4", |
| | | "publishedAt": 1773249559725 |
| | | } |
| New file |
| | |
| | | # Query Reference |
| | | |
| | | Query patterns and graph traversal examples. |
| | | |
| | | ## Basic Queries |
| | | |
| | | ### Get by ID |
| | | |
| | | ```bash |
| | | python3 scripts/ontology.py get --id task_001 |
| | | ``` |
| | | |
| | | ### List by Type |
| | | |
| | | ```bash |
| | | # All tasks |
| | | python3 scripts/ontology.py list --type Task |
| | | |
| | | # All people |
| | | python3 scripts/ontology.py list --type Person |
| | | ``` |
| | | |
| | | ### Filter by Properties |
| | | |
| | | ```bash |
| | | # Open tasks |
| | | python3 scripts/ontology.py query --type Task --where '{"status":"open"}' |
| | | |
| | | # High priority tasks |
| | | python3 scripts/ontology.py query --type Task --where '{"priority":"high"}' |
| | | |
| | | # Tasks assigned to specific person (by property) |
| | | python3 scripts/ontology.py query --type Task --where '{"assignee":"p_001"}' |
| | | ``` |
| | | |
| | | ## Relation Queries |
| | | |
| | | ### Get Related Entities |
| | | |
| | | ```bash |
| | | # Tasks belonging to a project (outgoing) |
| | | python3 scripts/ontology.py related --id proj_001 --rel has_task |
| | | |
| | | # What projects does this task belong to (incoming) |
| | | python3 scripts/ontology.py related --id task_001 --rel part_of --dir incoming |
| | | |
| | | # All relations for an entity (both directions) |
| | | python3 scripts/ontology.py related --id p_001 --dir both |
| | | ``` |
| | | |
| | | ### Common Patterns |
| | | |
| | | ```bash |
| | | # Who owns this project? |
| | | python3 scripts/ontology.py related --id proj_001 --rel has_owner |
| | | |
| | | # What events is this person attending? |
| | | python3 scripts/ontology.py related --id p_001 --rel attendee_of --dir outgoing |
| | | |
| | | # What's blocking this task? |
| | | python3 scripts/ontology.py related --id task_001 --rel blocked_by --dir incoming |
| | | ``` |
| | | |
| | | ## Programmatic Queries |
| | | |
| | | ### Python API |
| | | |
| | | ```python |
| | | from scripts.ontology import load_graph, query_entities, get_related |
| | | |
| | | # Load the graph |
| | | entities, relations = load_graph("memory/ontology/graph.jsonl") |
| | | |
| | | # Query entities |
| | | open_tasks = query_entities("Task", {"status": "open"}, "memory/ontology/graph.jsonl") |
| | | |
| | | # Get related |
| | | project_tasks = get_related("proj_001", "has_task", "memory/ontology/graph.jsonl") |
| | | ``` |
| | | |
| | | ### Complex Queries |
| | | |
| | | ```python |
| | | # Find all tasks blocked by incomplete dependencies |
| | | def find_blocked_tasks(graph_path): |
| | | entities, relations = load_graph(graph_path) |
| | | blocked = [] |
| | | |
| | | for entity in entities.values(): |
| | | if entity["type"] != "Task": |
| | | continue |
| | | if entity["properties"].get("status") == "blocked": |
| | | # Find what's blocking it |
| | | blockers = get_related(entity["id"], "blocked_by", graph_path, "incoming") |
| | | incomplete_blockers = [ |
| | | b for b in blockers |
| | | if b["entity"]["properties"].get("status") != "done" |
| | | ] |
| | | if incomplete_blockers: |
| | | blocked.append({ |
| | | "task": entity, |
| | | "blockers": incomplete_blockers |
| | | }) |
| | | |
| | | return blocked |
| | | ``` |
| | | |
| | | ### Path Queries |
| | | |
| | | ```python |
| | | # Find path between two entities |
| | | def find_path(from_id, to_id, graph_path, max_depth=5): |
| | | entities, relations = load_graph(graph_path) |
| | | |
| | | visited = set() |
| | | queue = [(from_id, [])] |
| | | |
| | | while queue: |
| | | current, path = queue.pop(0) |
| | | |
| | | if current == to_id: |
| | | return path |
| | | |
| | | if current in visited or len(path) >= max_depth: |
| | | continue |
| | | |
| | | visited.add(current) |
| | | |
| | | for rel in relations: |
| | | if rel["from"] == current and rel["to"] not in visited: |
| | | queue.append((rel["to"], path + [rel])) |
| | | if rel["to"] == current and rel["from"] not in visited: |
| | | queue.append((rel["from"], path + [{**rel, "direction": "incoming"}])) |
| | | |
| | | return None # No path found |
| | | ``` |
| | | |
| | | ## Query Patterns by Use Case |
| | | |
| | | ### Task Management |
| | | |
| | | ```bash |
| | | # All my open tasks |
| | | python3 scripts/ontology.py query --type Task --where '{"status":"open","assignee":"p_me"}' |
| | | |
| | | # Overdue tasks (requires custom script for date comparison) |
| | | # See references/schema.md for date handling |
| | | |
| | | # Tasks with no blockers |
| | | python3 scripts/ontology.py query --type Task --where '{"status":"open"}' |
| | | # Then filter in code for those with no incoming "blocks" relations |
| | | ``` |
| | | |
| | | ### Project Overview |
| | | |
| | | ```bash |
| | | # All tasks in project |
| | | python3 scripts/ontology.py related --id proj_001 --rel has_task |
| | | |
| | | # Project team members |
| | | python3 scripts/ontology.py related --id proj_001 --rel has_member |
| | | |
| | | # Project goals |
| | | python3 scripts/ontology.py related --id proj_001 --rel has_goal |
| | | ``` |
| | | |
| | | ### People & Contacts |
| | | |
| | | ```bash |
| | | # All people |
| | | python3 scripts/ontology.py list --type Person |
| | | |
| | | # People in an organization |
| | | python3 scripts/ontology.py related --id org_001 --rel has_member |
| | | |
| | | # What's assigned to this person |
| | | python3 scripts/ontology.py related --id p_001 --rel assigned_to --dir incoming |
| | | ``` |
| | | |
| | | ### Events & Calendar |
| | | |
| | | ```bash |
| | | # All events |
| | | python3 scripts/ontology.py list --type Event |
| | | |
| | | # Events at a location |
| | | python3 scripts/ontology.py related --id loc_001 --rel located_at --dir incoming |
| | | |
| | | # Event attendees |
| | | python3 scripts/ontology.py related --id event_001 --rel attendee_of --dir incoming |
| | | ``` |
| | | |
| | | ## Aggregations |
| | | |
| | | For complex aggregations, use Python: |
| | | |
| | | ```python |
| | | from collections import Counter |
| | | |
| | | def task_status_summary(project_id, graph_path): |
| | | """Count tasks by status for a project.""" |
| | | tasks = get_related(project_id, "has_task", graph_path) |
| | | statuses = Counter(t["entity"]["properties"].get("status", "unknown") for t in tasks) |
| | | return dict(statuses) |
| | | |
| | | def workload_by_person(graph_path): |
| | | """Count open tasks per person.""" |
| | | open_tasks = query_entities("Task", {"status": "open"}, graph_path) |
| | | workload = Counter(t["properties"].get("assignee") for t in open_tasks) |
| | | return dict(workload) |
| | | ``` |
| New file |
| | |
| | | # Ontology Schema Reference |
| | | |
| | | Full type definitions and constraint patterns for the ontology graph. |
| | | |
| | | ## Core Types |
| | | |
| | | ### Agents & People |
| | | |
| | | ```yaml |
| | | Person: |
| | | required: [name] |
| | | properties: |
| | | name: string |
| | | email: string? |
| | | phone: string? |
| | | organization: ref(Organization)? |
| | | notes: string? |
| | | tags: string[]? |
| | | |
| | | Organization: |
| | | required: [name] |
| | | properties: |
| | | name: string |
| | | type: enum(company, team, community, government, other)? |
| | | website: url? |
| | | members: ref(Person)[]? |
| | | ``` |
| | | |
| | | ### Work Management |
| | | |
| | | ```yaml |
| | | Project: |
| | | required: [name] |
| | | properties: |
| | | name: string |
| | | description: string? |
| | | status: enum(planning, active, paused, completed, archived) |
| | | owner: ref(Person)? |
| | | team: ref(Person)[]? |
| | | goals: ref(Goal)[]? |
| | | start_date: date? |
| | | end_date: date? |
| | | tags: string[]? |
| | | |
| | | Task: |
| | | required: [title, status] |
| | | properties: |
| | | title: string |
| | | description: string? |
| | | status: enum(open, in_progress, blocked, done, cancelled) |
| | | priority: enum(low, medium, high, urgent)? |
| | | assignee: ref(Person)? |
| | | project: ref(Project)? |
| | | due: datetime? |
| | | estimate_hours: number? |
| | | blockers: ref(Task)[]? |
| | | tags: string[]? |
| | | |
| | | Goal: |
| | | required: [description] |
| | | properties: |
| | | description: string |
| | | target_date: date? |
| | | status: enum(active, achieved, abandoned)? |
| | | metrics: object[]? |
| | | key_results: string[]? |
| | | ``` |
| | | |
| | | ### Time & Location |
| | | |
| | | ```yaml |
| | | Event: |
| | | required: [title, start] |
| | | properties: |
| | | title: string |
| | | description: string? |
| | | start: datetime |
| | | end: datetime? |
| | | location: ref(Location)? |
| | | attendees: ref(Person)[]? |
| | | recurrence: object? # iCal RRULE format |
| | | status: enum(confirmed, tentative, cancelled)? |
| | | reminders: object[]? |
| | | |
| | | Location: |
| | | required: [name] |
| | | properties: |
| | | name: string |
| | | address: string? |
| | | city: string? |
| | | country: string? |
| | | coordinates: object? # {lat, lng} |
| | | timezone: string? |
| | | ``` |
| | | |
| | | ### Information |
| | | |
| | | ```yaml |
| | | Document: |
| | | required: [title] |
| | | properties: |
| | | title: string |
| | | path: string? # Local file path |
| | | url: url? # Remote URL |
| | | mime_type: string? |
| | | summary: string? |
| | | content_hash: string? |
| | | tags: string[]? |
| | | |
| | | Message: |
| | | required: [content, sender] |
| | | properties: |
| | | content: string |
| | | sender: ref(Person) |
| | | recipients: ref(Person)[] |
| | | thread: ref(Thread)? |
| | | timestamp: datetime |
| | | platform: string? # email, slack, whatsapp, etc. |
| | | external_id: string? |
| | | |
| | | Thread: |
| | | required: [subject] |
| | | properties: |
| | | subject: string |
| | | participants: ref(Person)[] |
| | | messages: ref(Message)[] |
| | | status: enum(active, archived)? |
| | | last_activity: datetime? |
| | | |
| | | Note: |
| | | required: [content] |
| | | properties: |
| | | content: string |
| | | title: string? |
| | | tags: string[]? |
| | | refs: ref(Entity)[]? # Links to any entity |
| | | created: datetime |
| | | ``` |
| | | |
| | | ### Resources |
| | | |
| | | ```yaml |
| | | Account: |
| | | required: [service, username] |
| | | properties: |
| | | service: string # github, gmail, aws, etc. |
| | | username: string |
| | | url: url? |
| | | credential_ref: ref(Credential)? |
| | | |
| | | Device: |
| | | required: [name, type] |
| | | properties: |
| | | name: string |
| | | type: enum(computer, phone, tablet, server, iot, other) |
| | | os: string? |
| | | identifiers: object? # {mac, serial, etc.} |
| | | owner: ref(Person)? |
| | | |
| | | Credential: |
| | | required: [service, secret_ref] |
| | | forbidden_properties: [password, secret, token, key, api_key] |
| | | properties: |
| | | service: string |
| | | secret_ref: string # Reference to secret store (e.g., "keychain:github-token") |
| | | expires: datetime? |
| | | scope: string[]? |
| | | ``` |
| | | |
| | | ### Meta |
| | | |
| | | ```yaml |
| | | Action: |
| | | required: [type, target, timestamp] |
| | | properties: |
| | | type: string # create, update, delete, send, etc. |
| | | target: ref(Entity) |
| | | timestamp: datetime |
| | | actor: ref(Person|Agent)? |
| | | outcome: enum(success, failure, pending)? |
| | | details: object? |
| | | |
| | | Policy: |
| | | required: [scope, rule] |
| | | properties: |
| | | scope: string # What this policy applies to |
| | | rule: string # The constraint in natural language or code |
| | | enforcement: enum(block, warn, log) |
| | | enabled: boolean |
| | | ``` |
| | | |
| | | ## Relation Types |
| | | |
| | | ### Ownership & Assignment |
| | | |
| | | ```yaml |
| | | owns: |
| | | from_types: [Person, Organization] |
| | | to_types: [Account, Device, Document, Project] |
| | | cardinality: one_to_many |
| | | |
| | | has_owner: |
| | | from_types: [Project, Task, Document] |
| | | to_types: [Person] |
| | | cardinality: many_to_one |
| | | |
| | | assigned_to: |
| | | from_types: [Task] |
| | | to_types: [Person] |
| | | cardinality: many_to_one |
| | | ``` |
| | | |
| | | ### Hierarchy & Containment |
| | | |
| | | ```yaml |
| | | has_task: |
| | | from_types: [Project] |
| | | to_types: [Task] |
| | | cardinality: one_to_many |
| | | |
| | | has_goal: |
| | | from_types: [Project] |
| | | to_types: [Goal] |
| | | cardinality: one_to_many |
| | | |
| | | member_of: |
| | | from_types: [Person] |
| | | to_types: [Organization] |
| | | cardinality: many_to_many |
| | | |
| | | part_of: |
| | | from_types: [Task, Document, Event] |
| | | to_types: [Project] |
| | | cardinality: many_to_one |
| | | ``` |
| | | |
| | | ### Dependencies |
| | | |
| | | ```yaml |
| | | blocks: |
| | | from_types: [Task] |
| | | to_types: [Task] |
| | | acyclic: true # Prevents circular dependencies |
| | | cardinality: many_to_many |
| | | |
| | | depends_on: |
| | | from_types: [Task, Project] |
| | | to_types: [Task, Project, Event] |
| | | acyclic: true |
| | | cardinality: many_to_many |
| | | |
| | | requires: |
| | | from_types: [Action] |
| | | to_types: [Credential, Policy] |
| | | cardinality: many_to_many |
| | | ``` |
| | | |
| | | ### References |
| | | |
| | | ```yaml |
| | | mentions: |
| | | from_types: [Document, Message, Note] |
| | | to_types: [Person, Project, Task, Event] |
| | | cardinality: many_to_many |
| | | |
| | | references: |
| | | from_types: [Document, Note] |
| | | to_types: [Document, Note] |
| | | cardinality: many_to_many |
| | | |
| | | follows_up: |
| | | from_types: [Task, Event] |
| | | to_types: [Event, Message] |
| | | cardinality: many_to_one |
| | | ``` |
| | | |
| | | ### Events |
| | | |
| | | ```yaml |
| | | attendee_of: |
| | | from_types: [Person] |
| | | to_types: [Event] |
| | | cardinality: many_to_many |
| | | properties: |
| | | status: enum(accepted, declined, tentative, pending) |
| | | |
| | | located_at: |
| | | from_types: [Event, Person, Device] |
| | | to_types: [Location] |
| | | cardinality: many_to_one |
| | | ``` |
| | | |
| | | ## Global Constraints |
| | | |
| | | ```yaml |
| | | constraints: |
| | | # Credentials must never store secrets directly |
| | | - type: Credential |
| | | rule: "forbidden_properties: [password, secret, token]" |
| | | message: "Credentials must use secret_ref to reference external secret storage" |
| | | |
| | | # Tasks must have valid status transitions |
| | | - type: Task |
| | | rule: "status transitions: open -> in_progress -> (done|blocked) -> done" |
| | | enforcement: warn |
| | | |
| | | # Events must have end >= start |
| | | - type: Event |
| | | rule: "if end exists: end >= start" |
| | | message: "Event end time must be after start time" |
| | | |
| | | # No orphan tasks (should belong to a project or have explicit owner) |
| | | - type: Task |
| | | rule: "has_relation(part_of, Project) OR has_property(owner)" |
| | | enforcement: warn |
| | | message: "Task should belong to a project or have an explicit owner" |
| | | |
| | | # Circular dependency prevention |
| | | - relation: blocks |
| | | rule: "acyclic" |
| | | message: "Circular task dependencies are not allowed" |
| | | ``` |
| New file |
| | |
| | | #!/usr/bin/env python3 |
| | | """ |
| | | Ontology graph operations: create, query, relate, validate. |
| | | |
| | | Usage: |
| | | python ontology.py create --type Person --props '{"name":"Alice"}' |
| | | python ontology.py get --id p_001 |
| | | python ontology.py query --type Task --where '{"status":"open"}' |
| | | python ontology.py relate --from proj_001 --rel has_task --to task_001 |
| | | python ontology.py related --id proj_001 --rel has_task |
| | | python ontology.py list --type Person |
| | | python ontology.py delete --id p_001 |
| | | python ontology.py validate |
| | | """ |
| | | |
| | | import argparse |
| | | import json |
| | | import uuid |
| | | from datetime import datetime, timezone |
| | | from pathlib import Path |
| | | |
| | | DEFAULT_GRAPH_PATH = "memory/ontology/graph.jsonl" |
| | | DEFAULT_SCHEMA_PATH = "memory/ontology/schema.yaml" |
| | | |
| | | |
| | | def resolve_safe_path( |
| | | user_path: str, |
| | | *, |
| | | root: Path | None = None, |
| | | must_exist: bool = False, |
| | | label: str = "path", |
| | | ) -> Path: |
| | | """Resolve user path within root and reject traversal outside it.""" |
| | | if not user_path or not user_path.strip(): |
| | | raise SystemExit(f"Invalid {label}: empty path") |
| | | |
| | | safe_root = (root or Path.cwd()).resolve() |
| | | candidate = Path(user_path).expanduser() |
| | | if not candidate.is_absolute(): |
| | | candidate = safe_root / candidate |
| | | |
| | | try: |
| | | resolved = candidate.resolve(strict=False) |
| | | except OSError as exc: |
| | | raise SystemExit(f"Invalid {label}: {exc}") from exc |
| | | |
| | | try: |
| | | resolved.relative_to(safe_root) |
| | | except ValueError: |
| | | raise SystemExit( |
| | | f"Invalid {label}: must stay within workspace root '{safe_root}'" |
| | | ) |
| | | |
| | | if must_exist and not resolved.exists(): |
| | | raise SystemExit(f"Invalid {label}: file not found '{resolved}'") |
| | | |
| | | return resolved |
| | | |
| | | |
| | | def generate_id(type_name: str) -> str: |
| | | """Generate a unique ID for an entity.""" |
| | | prefix = type_name.lower()[:4] |
| | | suffix = uuid.uuid4().hex[:8] |
| | | return f"{prefix}_{suffix}" |
| | | |
| | | |
| | | def load_graph(path: str) -> tuple[dict, list]: |
| | | """Load entities and relations from graph file.""" |
| | | entities = {} |
| | | relations = [] |
| | | |
| | | graph_path = Path(path) |
| | | if not graph_path.exists(): |
| | | return entities, relations |
| | | |
| | | with open(graph_path) as f: |
| | | for line in f: |
| | | line = line.strip() |
| | | if not line: |
| | | continue |
| | | record = json.loads(line) |
| | | op = record.get("op") |
| | | |
| | | if op == "create": |
| | | entity = record["entity"] |
| | | entities[entity["id"]] = entity |
| | | elif op == "update": |
| | | entity_id = record["id"] |
| | | if entity_id in entities: |
| | | entities[entity_id]["properties"].update(record.get("properties", {})) |
| | | entities[entity_id]["updated"] = record.get("timestamp") |
| | | elif op == "delete": |
| | | entity_id = record["id"] |
| | | entities.pop(entity_id, None) |
| | | elif op == "relate": |
| | | relations.append({ |
| | | "from": record["from"], |
| | | "rel": record["rel"], |
| | | "to": record["to"], |
| | | "properties": record.get("properties", {}) |
| | | }) |
| | | elif op == "unrelate": |
| | | relations = [r for r in relations |
| | | if not (r["from"] == record["from"] |
| | | and r["rel"] == record["rel"] |
| | | and r["to"] == record["to"])] |
| | | |
| | | return entities, relations |
| | | |
| | | |
| | | def append_op(path: str, record: dict): |
| | | """Append an operation to the graph file.""" |
| | | graph_path = Path(path) |
| | | graph_path.parent.mkdir(parents=True, exist_ok=True) |
| | | |
| | | with open(graph_path, "a") as f: |
| | | f.write(json.dumps(record) + "\n") |
| | | |
| | | |
| | | def create_entity(type_name: str, properties: dict, graph_path: str, entity_id: str = None) -> dict: |
| | | """Create a new entity.""" |
| | | entity_id = entity_id or generate_id(type_name) |
| | | timestamp = datetime.now(timezone.utc).isoformat() |
| | | |
| | | entity = { |
| | | "id": entity_id, |
| | | "type": type_name, |
| | | "properties": properties, |
| | | "created": timestamp, |
| | | "updated": timestamp |
| | | } |
| | | |
| | | record = {"op": "create", "entity": entity, "timestamp": timestamp} |
| | | append_op(graph_path, record) |
| | | |
| | | return entity |
| | | |
| | | |
| | | def get_entity(entity_id: str, graph_path: str) -> dict | None: |
| | | """Get entity by ID.""" |
| | | entities, _ = load_graph(graph_path) |
| | | return entities.get(entity_id) |
| | | |
| | | |
| | | def query_entities(type_name: str, where: dict, graph_path: str) -> list: |
| | | """Query entities by type and properties.""" |
| | | entities, _ = load_graph(graph_path) |
| | | results = [] |
| | | |
| | | for entity in entities.values(): |
| | | if type_name and entity["type"] != type_name: |
| | | continue |
| | | |
| | | match = True |
| | | for key, value in where.items(): |
| | | if entity["properties"].get(key) != value: |
| | | match = False |
| | | break |
| | | |
| | | if match: |
| | | results.append(entity) |
| | | |
| | | return results |
| | | |
| | | |
| | | def list_entities(type_name: str, graph_path: str) -> list: |
| | | """List all entities of a type.""" |
| | | entities, _ = load_graph(graph_path) |
| | | if type_name: |
| | | return [e for e in entities.values() if e["type"] == type_name] |
| | | return list(entities.values()) |
| | | |
| | | |
| | | def update_entity(entity_id: str, properties: dict, graph_path: str) -> dict | None: |
| | | """Update entity properties.""" |
| | | entities, _ = load_graph(graph_path) |
| | | if entity_id not in entities: |
| | | return None |
| | | |
| | | timestamp = datetime.now(timezone.utc).isoformat() |
| | | record = {"op": "update", "id": entity_id, "properties": properties, "timestamp": timestamp} |
| | | append_op(graph_path, record) |
| | | |
| | | entities[entity_id]["properties"].update(properties) |
| | | entities[entity_id]["updated"] = timestamp |
| | | return entities[entity_id] |
| | | |
| | | |
| | | def delete_entity(entity_id: str, graph_path: str) -> bool: |
| | | """Delete an entity.""" |
| | | entities, _ = load_graph(graph_path) |
| | | if entity_id not in entities: |
| | | return False |
| | | |
| | | timestamp = datetime.now(timezone.utc).isoformat() |
| | | record = {"op": "delete", "id": entity_id, "timestamp": timestamp} |
| | | append_op(graph_path, record) |
| | | return True |
| | | |
| | | |
| | | def create_relation(from_id: str, rel_type: str, to_id: str, properties: dict, graph_path: str): |
| | | """Create a relation between entities.""" |
| | | timestamp = datetime.now(timezone.utc).isoformat() |
| | | record = { |
| | | "op": "relate", |
| | | "from": from_id, |
| | | "rel": rel_type, |
| | | "to": to_id, |
| | | "properties": properties, |
| | | "timestamp": timestamp |
| | | } |
| | | append_op(graph_path, record) |
| | | return record |
| | | |
| | | |
| | | def get_related(entity_id: str, rel_type: str, graph_path: str, direction: str = "outgoing") -> list: |
| | | """Get related entities.""" |
| | | entities, relations = load_graph(graph_path) |
| | | results = [] |
| | | |
| | | for rel in relations: |
| | | if direction == "outgoing" and rel["from"] == entity_id: |
| | | if not rel_type or rel["rel"] == rel_type: |
| | | if rel["to"] in entities: |
| | | results.append({ |
| | | "relation": rel["rel"], |
| | | "entity": entities[rel["to"]] |
| | | }) |
| | | elif direction == "incoming" and rel["to"] == entity_id: |
| | | if not rel_type or rel["rel"] == rel_type: |
| | | if rel["from"] in entities: |
| | | results.append({ |
| | | "relation": rel["rel"], |
| | | "entity": entities[rel["from"]] |
| | | }) |
| | | elif direction == "both": |
| | | if rel["from"] == entity_id or rel["to"] == entity_id: |
| | | if not rel_type or rel["rel"] == rel_type: |
| | | other_id = rel["to"] if rel["from"] == entity_id else rel["from"] |
| | | if other_id in entities: |
| | | results.append({ |
| | | "relation": rel["rel"], |
| | | "direction": "outgoing" if rel["from"] == entity_id else "incoming", |
| | | "entity": entities[other_id] |
| | | }) |
| | | |
| | | return results |
| | | |
| | | |
| | | def validate_graph(graph_path: str, schema_path: str) -> list: |
| | | """Validate graph against schema constraints.""" |
| | | entities, relations = load_graph(graph_path) |
| | | errors = [] |
| | | |
| | | # Load schema if exists |
| | | schema = load_schema(schema_path) |
| | | |
| | | type_schemas = schema.get("types", {}) |
| | | relation_schemas = schema.get("relations", {}) |
| | | global_constraints = schema.get("constraints", []) |
| | | |
| | | for entity_id, entity in entities.items(): |
| | | type_name = entity["type"] |
| | | type_schema = type_schemas.get(type_name, {}) |
| | | |
| | | # Check required properties |
| | | required = type_schema.get("required", []) |
| | | for prop in required: |
| | | if prop not in entity["properties"]: |
| | | errors.append(f"{entity_id}: missing required property '{prop}'") |
| | | |
| | | # Check forbidden properties |
| | | forbidden = type_schema.get("forbidden_properties", []) |
| | | for prop in forbidden: |
| | | if prop in entity["properties"]: |
| | | errors.append(f"{entity_id}: contains forbidden property '{prop}'") |
| | | |
| | | # Check enum values |
| | | for prop, allowed in type_schema.items(): |
| | | if prop.endswith("_enum"): |
| | | field = prop.replace("_enum", "") |
| | | value = entity["properties"].get(field) |
| | | if value and value not in allowed: |
| | | errors.append(f"{entity_id}: '{field}' must be one of {allowed}, got '{value}'") |
| | | |
| | | # Relation constraints (type + cardinality + acyclicity) |
| | | rel_index = {} |
| | | for rel in relations: |
| | | rel_index.setdefault(rel["rel"], []).append(rel) |
| | | |
| | | for rel_type, rel_schema in relation_schemas.items(): |
| | | rels = rel_index.get(rel_type, []) |
| | | from_types = rel_schema.get("from_types", []) |
| | | to_types = rel_schema.get("to_types", []) |
| | | cardinality = rel_schema.get("cardinality") |
| | | acyclic = rel_schema.get("acyclic", False) |
| | | |
| | | # Type checks |
| | | for rel in rels: |
| | | from_entity = entities.get(rel["from"]) |
| | | to_entity = entities.get(rel["to"]) |
| | | if not from_entity or not to_entity: |
| | | errors.append(f"{rel_type}: relation references missing entity ({rel['from']} -> {rel['to']})") |
| | | continue |
| | | if from_types and from_entity["type"] not in from_types: |
| | | errors.append( |
| | | f"{rel_type}: from entity {rel['from']} type {from_entity['type']} not in {from_types}" |
| | | ) |
| | | if to_types and to_entity["type"] not in to_types: |
| | | errors.append( |
| | | f"{rel_type}: to entity {rel['to']} type {to_entity['type']} not in {to_types}" |
| | | ) |
| | | |
| | | # Cardinality checks |
| | | if cardinality in ("one_to_one", "one_to_many", "many_to_one"): |
| | | from_counts = {} |
| | | to_counts = {} |
| | | for rel in rels: |
| | | from_counts[rel["from"]] = from_counts.get(rel["from"], 0) + 1 |
| | | to_counts[rel["to"]] = to_counts.get(rel["to"], 0) + 1 |
| | | |
| | | if cardinality in ("one_to_one", "many_to_one"): |
| | | for from_id, count in from_counts.items(): |
| | | if count > 1: |
| | | errors.append(f"{rel_type}: from entity {from_id} violates cardinality {cardinality}") |
| | | if cardinality in ("one_to_one", "one_to_many"): |
| | | for to_id, count in to_counts.items(): |
| | | if count > 1: |
| | | errors.append(f"{rel_type}: to entity {to_id} violates cardinality {cardinality}") |
| | | |
| | | # Acyclic checks |
| | | if acyclic: |
| | | graph = {} |
| | | for rel in rels: |
| | | graph.setdefault(rel["from"], []).append(rel["to"]) |
| | | |
| | | visited = {} |
| | | |
| | | def dfs(node, stack): |
| | | visited[node] = True |
| | | stack.add(node) |
| | | for nxt in graph.get(node, []): |
| | | if nxt in stack: |
| | | return True |
| | | if not visited.get(nxt, False): |
| | | if dfs(nxt, stack): |
| | | return True |
| | | stack.remove(node) |
| | | return False |
| | | |
| | | for node in graph: |
| | | if not visited.get(node, False): |
| | | if dfs(node, set()): |
| | | errors.append(f"{rel_type}: cyclic dependency detected") |
| | | break |
| | | |
| | | # Global constraints (limited enforcement) |
| | | for constraint in global_constraints: |
| | | ctype = constraint.get("type") |
| | | relation = constraint.get("relation") |
| | | rule = (constraint.get("rule") or "").strip().lower() |
| | | if ctype == "Event" and "end" in rule and "start" in rule: |
| | | for entity_id, entity in entities.items(): |
| | | if entity["type"] != "Event": |
| | | continue |
| | | start = entity["properties"].get("start") |
| | | end = entity["properties"].get("end") |
| | | if start and end: |
| | | try: |
| | | start_dt = datetime.fromisoformat(start) |
| | | end_dt = datetime.fromisoformat(end) |
| | | if end_dt < start_dt: |
| | | errors.append(f"{entity_id}: end must be >= start") |
| | | except ValueError: |
| | | errors.append(f"{entity_id}: invalid datetime format in start/end") |
| | | if relation and rule == "acyclic": |
| | | # Already enforced above via relations schema |
| | | continue |
| | | |
| | | return errors |
| | | |
| | | |
| | | def load_schema(schema_path: str) -> dict: |
| | | """Load schema from YAML if it exists.""" |
| | | schema = {} |
| | | schema_file = Path(schema_path) |
| | | if schema_file.exists(): |
| | | import yaml |
| | | with open(schema_file) as f: |
| | | schema = yaml.safe_load(f) or {} |
| | | return schema |
| | | |
| | | |
| | | def write_schema(schema_path: str, schema: dict) -> None: |
| | | """Write schema to YAML.""" |
| | | schema_file = Path(schema_path) |
| | | schema_file.parent.mkdir(parents=True, exist_ok=True) |
| | | import yaml |
| | | with open(schema_file, "w") as f: |
| | | yaml.safe_dump(schema, f, sort_keys=False) |
| | | |
| | | |
| | | def merge_schema(base: dict, incoming: dict) -> dict: |
| | | """Merge incoming schema into base, appending lists and deep-merging dicts.""" |
| | | for key, value in (incoming or {}).items(): |
| | | if key in base and isinstance(base[key], dict) and isinstance(value, dict): |
| | | base[key] = merge_schema(base[key], value) |
| | | elif key in base and isinstance(base[key], list) and isinstance(value, list): |
| | | base[key] = base[key] + [v for v in value if v not in base[key]] |
| | | else: |
| | | base[key] = value |
| | | return base |
| | | |
| | | |
| | | def append_schema(schema_path: str, incoming: dict) -> dict: |
| | | """Append/merge schema fragment into existing schema.""" |
| | | base = load_schema(schema_path) |
| | | merged = merge_schema(base, incoming) |
| | | write_schema(schema_path, merged) |
| | | return merged |
| | | |
| | | |
| | | def main(): |
| | | parser = argparse.ArgumentParser(description="Ontology graph operations") |
| | | subparsers = parser.add_subparsers(dest="command", required=True) |
| | | |
| | | # Create |
| | | create_p = subparsers.add_parser("create", help="Create entity") |
| | | create_p.add_argument("--type", "-t", required=True, help="Entity type") |
| | | create_p.add_argument("--props", "-p", default="{}", help="Properties JSON") |
| | | create_p.add_argument("--id", help="Entity ID (auto-generated if not provided)") |
| | | create_p.add_argument("--graph", "-g", default=DEFAULT_GRAPH_PATH) |
| | | |
| | | # Get |
| | | get_p = subparsers.add_parser("get", help="Get entity by ID") |
| | | get_p.add_argument("--id", required=True, help="Entity ID") |
| | | get_p.add_argument("--graph", "-g", default=DEFAULT_GRAPH_PATH) |
| | | |
| | | # Query |
| | | query_p = subparsers.add_parser("query", help="Query entities") |
| | | query_p.add_argument("--type", "-t", help="Entity type") |
| | | query_p.add_argument("--where", "-w", default="{}", help="Filter JSON") |
| | | query_p.add_argument("--graph", "-g", default=DEFAULT_GRAPH_PATH) |
| | | |
| | | # List |
| | | list_p = subparsers.add_parser("list", help="List entities") |
| | | list_p.add_argument("--type", "-t", help="Entity type") |
| | | list_p.add_argument("--graph", "-g", default=DEFAULT_GRAPH_PATH) |
| | | |
| | | # Update |
| | | update_p = subparsers.add_parser("update", help="Update entity") |
| | | update_p.add_argument("--id", required=True, help="Entity ID") |
| | | update_p.add_argument("--props", "-p", required=True, help="Properties JSON") |
| | | update_p.add_argument("--graph", "-g", default=DEFAULT_GRAPH_PATH) |
| | | |
| | | # Delete |
| | | delete_p = subparsers.add_parser("delete", help="Delete entity") |
| | | delete_p.add_argument("--id", required=True, help="Entity ID") |
| | | delete_p.add_argument("--graph", "-g", default=DEFAULT_GRAPH_PATH) |
| | | |
| | | # Relate |
| | | relate_p = subparsers.add_parser("relate", help="Create relation") |
| | | relate_p.add_argument("--from", dest="from_id", required=True, help="From entity ID") |
| | | relate_p.add_argument("--rel", "-r", required=True, help="Relation type") |
| | | relate_p.add_argument("--to", dest="to_id", required=True, help="To entity ID") |
| | | relate_p.add_argument("--props", "-p", default="{}", help="Relation properties JSON") |
| | | relate_p.add_argument("--graph", "-g", default=DEFAULT_GRAPH_PATH) |
| | | |
| | | # Related |
| | | related_p = subparsers.add_parser("related", help="Get related entities") |
| | | related_p.add_argument("--id", required=True, help="Entity ID") |
| | | related_p.add_argument("--rel", "-r", help="Relation type filter") |
| | | related_p.add_argument("--dir", "-d", choices=["outgoing", "incoming", "both"], default="outgoing") |
| | | related_p.add_argument("--graph", "-g", default=DEFAULT_GRAPH_PATH) |
| | | |
| | | # Validate |
| | | validate_p = subparsers.add_parser("validate", help="Validate graph") |
| | | validate_p.add_argument("--graph", "-g", default=DEFAULT_GRAPH_PATH) |
| | | validate_p.add_argument("--schema", "-s", default=DEFAULT_SCHEMA_PATH) |
| | | |
| | | # Schema append |
| | | schema_p = subparsers.add_parser("schema-append", help="Append/merge schema fragment") |
| | | schema_p.add_argument("--schema", "-s", default=DEFAULT_SCHEMA_PATH) |
| | | schema_p.add_argument("--data", "-d", help="Schema fragment as JSON") |
| | | schema_p.add_argument("--file", "-f", help="Schema fragment file (YAML or JSON)") |
| | | |
| | | args = parser.parse_args() |
| | | workspace_root = Path.cwd().resolve() |
| | | |
| | | if hasattr(args, "graph"): |
| | | args.graph = str( |
| | | resolve_safe_path(args.graph, root=workspace_root, label="graph path") |
| | | ) |
| | | if hasattr(args, "schema"): |
| | | args.schema = str( |
| | | resolve_safe_path(args.schema, root=workspace_root, label="schema path") |
| | | ) |
| | | if hasattr(args, "file") and args.file: |
| | | args.file = str( |
| | | resolve_safe_path( |
| | | args.file, root=workspace_root, must_exist=True, label="schema file" |
| | | ) |
| | | ) |
| | | |
| | | if args.command == "create": |
| | | props = json.loads(args.props) |
| | | entity = create_entity(args.type, props, args.graph, args.id) |
| | | print(json.dumps(entity, indent=2)) |
| | | |
| | | elif args.command == "get": |
| | | entity = get_entity(args.id, args.graph) |
| | | if entity: |
| | | print(json.dumps(entity, indent=2)) |
| | | else: |
| | | print(f"Entity not found: {args.id}") |
| | | |
| | | elif args.command == "query": |
| | | where = json.loads(args.where) |
| | | results = query_entities(args.type, where, args.graph) |
| | | print(json.dumps(results, indent=2)) |
| | | |
| | | elif args.command == "list": |
| | | results = list_entities(args.type, args.graph) |
| | | print(json.dumps(results, indent=2)) |
| | | |
| | | elif args.command == "update": |
| | | props = json.loads(args.props) |
| | | entity = update_entity(args.id, props, args.graph) |
| | | if entity: |
| | | print(json.dumps(entity, indent=2)) |
| | | else: |
| | | print(f"Entity not found: {args.id}") |
| | | |
| | | elif args.command == "delete": |
| | | if delete_entity(args.id, args.graph): |
| | | print(f"Deleted: {args.id}") |
| | | else: |
| | | print(f"Entity not found: {args.id}") |
| | | |
| | | elif args.command == "relate": |
| | | props = json.loads(args.props) |
| | | rel = create_relation(args.from_id, args.rel, args.to_id, props, args.graph) |
| | | print(json.dumps(rel, indent=2)) |
| | | |
| | | elif args.command == "related": |
| | | results = get_related(args.id, args.rel, args.graph, args.dir) |
| | | print(json.dumps(results, indent=2)) |
| | | |
| | | elif args.command == "validate": |
| | | errors = validate_graph(args.graph, args.schema) |
| | | if errors: |
| | | print("Validation errors:") |
| | | for err in errors: |
| | | print(f" - {err}") |
| | | else: |
| | | print("Graph is valid.") |
| | | |
| | | elif args.command == "schema-append": |
| | | if not args.data and not args.file: |
| | | raise SystemExit("schema-append requires --data or --file") |
| | | |
| | | incoming = {} |
| | | if args.data: |
| | | incoming = json.loads(args.data) |
| | | else: |
| | | path = Path(args.file) |
| | | if path.suffix.lower() == ".json": |
| | | with open(path) as f: |
| | | incoming = json.load(f) |
| | | else: |
| | | import yaml |
| | | with open(path) as f: |
| | | incoming = yaml.safe_load(f) or {} |
| | | |
| | | merged = append_schema(args.schema, incoming) |
| | | print(json.dumps(merged, indent=2)) |
| | | |
| | | |
| | | if __name__ == "__main__": |
| | | main() |
| New file |
| | |
| | | { |
| | | "version": 1, |
| | | "registry": "https://clawhub.ai", |
| | | "slug": "self-improving-agent", |
| | | "installedVersion": "3.0.1", |
| | | "installedAt": 1773304830240 |
| | | } |
| New file |
| | |
| | | # Errors Log |
| | | |
| | | Command failures, exceptions, and unexpected behaviors. |
| | | |
| | | --- |
| New file |
| | |
| | | # Feature Requests |
| | | |
| | | Capabilities requested by user that don't currently exist. |
| | | |
| | | --- |
| New file |
| | |
| | | # Learnings Log |
| | | |
| | | Captured learnings, corrections, and discoveries. Review before major tasks. |
| | | |
| | | --- |
| New file |
| | |
| | | --- |
| | | name: self-improvement |
| | | description: "Captures learnings, errors, and corrections to enable continuous improvement. Use when: (1) A command or operation fails unexpectedly, (2) User corrects Claude ('No, that's wrong...', 'Actually...'), (3) User requests a capability that doesn't exist, (4) An external API or tool fails, (5) Claude realizes its knowledge is outdated or incorrect, (6) A better approach is discovered for a recurring task. Also review learnings before major tasks." |
| | | metadata: |
| | | --- |
| | | |
| | | # Self-Improvement Skill |
| | | |
| | | Log learnings and errors to markdown files for continuous improvement. Coding agents can later process these into fixes, and important learnings get promoted to project memory. |
| | | |
| | | ## Quick Reference |
| | | |
| | | | Situation | Action | |
| | | |-----------|--------| |
| | | | Command/operation fails | Log to `.learnings/ERRORS.md` | |
| | | | User corrects you | Log to `.learnings/LEARNINGS.md` with category `correction` | |
| | | | User wants missing feature | Log to `.learnings/FEATURE_REQUESTS.md` | |
| | | | API/external tool fails | Log to `.learnings/ERRORS.md` with integration details | |
| | | | Knowledge was outdated | Log to `.learnings/LEARNINGS.md` with category `knowledge_gap` | |
| | | | Found better approach | Log to `.learnings/LEARNINGS.md` with category `best_practice` | |
| | | | Simplify/Harden recurring patterns | Log/update `.learnings/LEARNINGS.md` with `Source: simplify-and-harden` and a stable `Pattern-Key` | |
| | | | Similar to existing entry | Link with `**See Also**`, consider priority bump | |
| | | | Broadly applicable learning | Promote to `CLAUDE.md`, `AGENTS.md`, and/or `.github/copilot-instructions.md` | |
| | | | Workflow improvements | Promote to `AGENTS.md` (OpenClaw workspace) | |
| | | | Tool gotchas | Promote to `TOOLS.md` (OpenClaw workspace) | |
| | | | Behavioral patterns | Promote to `SOUL.md` (OpenClaw workspace) | |
| | | |
| | | ## OpenClaw Setup (Recommended) |
| | | |
| | | OpenClaw is the primary platform for this skill. It uses workspace-based prompt injection with automatic skill loading. |
| | | |
| | | ### Installation |
| | | |
| | | **Via ClawdHub (recommended):** |
| | | ```bash |
| | | clawdhub install self-improving-agent |
| | | ``` |
| | | |
| | | **Manual:** |
| | | ```bash |
| | | git clone https://github.com/peterskoett/self-improving-agent.git ~/.openclaw/skills/self-improving-agent |
| | | ``` |
| | | |
| | | Remade for openclaw from original repo : https://github.com/pskoett/pskoett-ai-skills - https://github.com/pskoett/pskoett-ai-skills/tree/main/skills/self-improvement |
| | | |
| | | ### Workspace Structure |
| | | |
| | | OpenClaw injects these files into every session: |
| | | |
| | | ``` |
| | | ~/.openclaw/workspace/ |
| | | ├── AGENTS.md # Multi-agent workflows, delegation patterns |
| | | ├── SOUL.md # Behavioral guidelines, personality, principles |
| | | ├── TOOLS.md # Tool capabilities, integration gotchas |
| | | ├── MEMORY.md # Long-term memory (main session only) |
| | | ├── memory/ # Daily memory files |
| | | │ └── YYYY-MM-DD.md |
| | | └── .learnings/ # This skill's log files |
| | | ├── LEARNINGS.md |
| | | ├── ERRORS.md |
| | | └── FEATURE_REQUESTS.md |
| | | ``` |
| | | |
| | | ### Create Learning Files |
| | | |
| | | ```bash |
| | | mkdir -p ~/.openclaw/workspace/.learnings |
| | | ``` |
| | | |
| | | Then create the log files (or copy from `assets/`): |
| | | - `LEARNINGS.md` — corrections, knowledge gaps, best practices |
| | | - `ERRORS.md` — command failures, exceptions |
| | | - `FEATURE_REQUESTS.md` — user-requested capabilities |
| | | |
| | | ### Promotion Targets |
| | | |
| | | When learnings prove broadly applicable, promote them to workspace files: |
| | | |
| | | | Learning Type | Promote To | Example | |
| | | |---------------|------------|---------| |
| | | | Behavioral patterns | `SOUL.md` | "Be concise, avoid disclaimers" | |
| | | | Workflow improvements | `AGENTS.md` | "Spawn sub-agents for long tasks" | |
| | | | Tool gotchas | `TOOLS.md` | "Git push needs auth configured first" | |
| | | |
| | | ### Inter-Session Communication |
| | | |
| | | OpenClaw provides tools to share learnings across sessions: |
| | | |
| | | - **sessions_list** — View active/recent sessions |
| | | - **sessions_history** — Read another session's transcript |
| | | - **sessions_send** — Send a learning to another session |
| | | - **sessions_spawn** — Spawn a sub-agent for background work |
| | | |
| | | ### Optional: Enable Hook |
| | | |
| | | For automatic reminders at session start: |
| | | |
| | | ```bash |
| | | # Copy hook to OpenClaw hooks directory |
| | | cp -r hooks/openclaw ~/.openclaw/hooks/self-improvement |
| | | |
| | | # Enable it |
| | | openclaw hooks enable self-improvement |
| | | ``` |
| | | |
| | | See `references/openclaw-integration.md` for complete details. |
| | | |
| | | --- |
| | | |
| | | ## Generic Setup (Other Agents) |
| | | |
| | | For Claude Code, Codex, Copilot, or other agents, create `.learnings/` in your project: |
| | | |
| | | ```bash |
| | | mkdir -p .learnings |
| | | ``` |
| | | |
| | | Copy templates from `assets/` or create files with headers. |
| | | |
| | | ### Add reference to agent files AGENTS.md, CLAUDE.md, or .github/copilot-instructions.md to remind yourself to log learnings. (this is an alternative to hook-based reminders) |
| | | |
| | | #### Self-Improvement Workflow |
| | | |
| | | When errors or corrections occur: |
| | | 1. Log to `.learnings/ERRORS.md`, `LEARNINGS.md`, or `FEATURE_REQUESTS.md` |
| | | 2. Review and promote broadly applicable learnings to: |
| | | - `CLAUDE.md` - project facts and conventions |
| | | - `AGENTS.md` - workflows and automation |
| | | - `.github/copilot-instructions.md` - Copilot context |
| | | |
| | | ## Logging Format |
| | | |
| | | ### Learning Entry |
| | | |
| | | Append to `.learnings/LEARNINGS.md`: |
| | | |
| | | ```markdown |
| | | ## [LRN-YYYYMMDD-XXX] category |
| | | |
| | | **Logged**: ISO-8601 timestamp |
| | | **Priority**: low | medium | high | critical |
| | | **Status**: pending |
| | | **Area**: frontend | backend | infra | tests | docs | config |
| | | |
| | | ### Summary |
| | | One-line description of what was learned |
| | | |
| | | ### Details |
| | | Full context: what happened, what was wrong, what's correct |
| | | |
| | | ### Suggested Action |
| | | Specific fix or improvement to make |
| | | |
| | | ### Metadata |
| | | - Source: conversation | error | user_feedback |
| | | - Related Files: path/to/file.ext |
| | | - Tags: tag1, tag2 |
| | | - See Also: LRN-20250110-001 (if related to existing entry) |
| | | - Pattern-Key: simplify.dead_code | harden.input_validation (optional, for recurring-pattern tracking) |
| | | - Recurrence-Count: 1 (optional) |
| | | - First-Seen: 2025-01-15 (optional) |
| | | - Last-Seen: 2025-01-15 (optional) |
| | | |
| | | --- |
| | | ``` |
| | | |
| | | ### Error Entry |
| | | |
| | | Append to `.learnings/ERRORS.md`: |
| | | |
| | | ```markdown |
| | | ## [ERR-YYYYMMDD-XXX] skill_or_command_name |
| | | |
| | | **Logged**: ISO-8601 timestamp |
| | | **Priority**: high |
| | | **Status**: pending |
| | | **Area**: frontend | backend | infra | tests | docs | config |
| | | |
| | | ### Summary |
| | | Brief description of what failed |
| | | |
| | | ### Error |
| | | ``` |
| | | Actual error message or output |
| | | ``` |
| | | |
| | | ### Context |
| | | - Command/operation attempted |
| | | - Input or parameters used |
| | | - Environment details if relevant |
| | | |
| | | ### Suggested Fix |
| | | If identifiable, what might resolve this |
| | | |
| | | ### Metadata |
| | | - Reproducible: yes | no | unknown |
| | | - Related Files: path/to/file.ext |
| | | - See Also: ERR-20250110-001 (if recurring) |
| | | |
| | | --- |
| | | ``` |
| | | |
| | | ### Feature Request Entry |
| | | |
| | | Append to `.learnings/FEATURE_REQUESTS.md`: |
| | | |
| | | ```markdown |
| | | ## [FEAT-YYYYMMDD-XXX] capability_name |
| | | |
| | | **Logged**: ISO-8601 timestamp |
| | | **Priority**: medium |
| | | **Status**: pending |
| | | **Area**: frontend | backend | infra | tests | docs | config |
| | | |
| | | ### Requested Capability |
| | | What the user wanted to do |
| | | |
| | | ### User Context |
| | | Why they needed it, what problem they're solving |
| | | |
| | | ### Complexity Estimate |
| | | simple | medium | complex |
| | | |
| | | ### Suggested Implementation |
| | | How this could be built, what it might extend |
| | | |
| | | ### Metadata |
| | | - Frequency: first_time | recurring |
| | | - Related Features: existing_feature_name |
| | | |
| | | --- |
| | | ``` |
| | | |
| | | ## ID Generation |
| | | |
| | | Format: `TYPE-YYYYMMDD-XXX` |
| | | - TYPE: `LRN` (learning), `ERR` (error), `FEAT` (feature) |
| | | - YYYYMMDD: Current date |
| | | - XXX: Sequential number or random 3 chars (e.g., `001`, `A7B`) |
| | | |
| | | Examples: `LRN-20250115-001`, `ERR-20250115-A3F`, `FEAT-20250115-002` |
| | | |
| | | ## Resolving Entries |
| | | |
| | | When an issue is fixed, update the entry: |
| | | |
| | | 1. Change `**Status**: pending` → `**Status**: resolved` |
| | | 2. Add resolution block after Metadata: |
| | | |
| | | ```markdown |
| | | ### Resolution |
| | | - **Resolved**: 2025-01-16T09:00:00Z |
| | | - **Commit/PR**: abc123 or #42 |
| | | - **Notes**: Brief description of what was done |
| | | ``` |
| | | |
| | | Other status values: |
| | | - `in_progress` - Actively being worked on |
| | | - `wont_fix` - Decided not to address (add reason in Resolution notes) |
| | | - `promoted` - Elevated to CLAUDE.md, AGENTS.md, or .github/copilot-instructions.md |
| | | |
| | | ## Promoting to Project Memory |
| | | |
| | | When a learning is broadly applicable (not a one-off fix), promote it to permanent project memory. |
| | | |
| | | ### When to Promote |
| | | |
| | | - Learning applies across multiple files/features |
| | | - Knowledge any contributor (human or AI) should know |
| | | - Prevents recurring mistakes |
| | | - Documents project-specific conventions |
| | | |
| | | ### Promotion Targets |
| | | |
| | | | Target | What Belongs There | |
| | | |--------|-------------------| |
| | | | `CLAUDE.md` | Project facts, conventions, gotchas for all Claude interactions | |
| | | | `AGENTS.md` | Agent-specific workflows, tool usage patterns, automation rules | |
| | | | `.github/copilot-instructions.md` | Project context and conventions for GitHub Copilot | |
| | | | `SOUL.md` | Behavioral guidelines, communication style, principles (OpenClaw workspace) | |
| | | | `TOOLS.md` | Tool capabilities, usage patterns, integration gotchas (OpenClaw workspace) | |
| | | |
| | | ### How to Promote |
| | | |
| | | 1. **Distill** the learning into a concise rule or fact |
| | | 2. **Add** to appropriate section in target file (create file if needed) |
| | | 3. **Update** original entry: |
| | | - Change `**Status**: pending` → `**Status**: promoted` |
| | | - Add `**Promoted**: CLAUDE.md`, `AGENTS.md`, or `.github/copilot-instructions.md` |
| | | |
| | | ### Promotion Examples |
| | | |
| | | **Learning** (verbose): |
| | | > Project uses pnpm workspaces. Attempted `npm install` but failed. |
| | | > Lock file is `pnpm-lock.yaml`. Must use `pnpm install`. |
| | | |
| | | **In CLAUDE.md** (concise): |
| | | ```markdown |
| | | ## Build & Dependencies |
| | | - Package manager: pnpm (not npm) - use `pnpm install` |
| | | ``` |
| | | |
| | | **Learning** (verbose): |
| | | > When modifying API endpoints, must regenerate TypeScript client. |
| | | > Forgetting this causes type mismatches at runtime. |
| | | |
| | | **In AGENTS.md** (actionable): |
| | | ```markdown |
| | | ## After API Changes |
| | | 1. Regenerate client: `pnpm run generate:api` |
| | | 2. Check for type errors: `pnpm tsc --noEmit` |
| | | ``` |
| | | |
| | | ## Recurring Pattern Detection |
| | | |
| | | If logging something similar to an existing entry: |
| | | |
| | | 1. **Search first**: `grep -r "keyword" .learnings/` |
| | | 2. **Link entries**: Add `**See Also**: ERR-20250110-001` in Metadata |
| | | 3. **Bump priority** if issue keeps recurring |
| | | 4. **Consider systemic fix**: Recurring issues often indicate: |
| | | - Missing documentation (→ promote to CLAUDE.md or .github/copilot-instructions.md) |
| | | - Missing automation (→ add to AGENTS.md) |
| | | - Architectural problem (→ create tech debt ticket) |
| | | |
| | | ## Simplify & Harden Feed |
| | | |
| | | Use this workflow to ingest recurring patterns from the `simplify-and-harden` |
| | | skill and turn them into durable prompt guidance. |
| | | |
| | | ### Ingestion Workflow |
| | | |
| | | 1. Read `simplify_and_harden.learning_loop.candidates` from the task summary. |
| | | 2. For each candidate, use `pattern_key` as the stable dedupe key. |
| | | 3. Search `.learnings/LEARNINGS.md` for an existing entry with that key: |
| | | - `grep -n "Pattern-Key: <pattern_key>" .learnings/LEARNINGS.md` |
| | | 4. If found: |
| | | - Increment `Recurrence-Count` |
| | | - Update `Last-Seen` |
| | | - Add `See Also` links to related entries/tasks |
| | | 5. If not found: |
| | | - Create a new `LRN-...` entry |
| | | - Set `Source: simplify-and-harden` |
| | | - Set `Pattern-Key`, `Recurrence-Count: 1`, and `First-Seen`/`Last-Seen` |
| | | |
| | | ### Promotion Rule (System Prompt Feedback) |
| | | |
| | | Promote recurring patterns into agent context/system prompt files when all are true: |
| | | |
| | | - `Recurrence-Count >= 3` |
| | | - Seen across at least 2 distinct tasks |
| | | - Occurred within a 30-day window |
| | | |
| | | Promotion targets: |
| | | - `CLAUDE.md` |
| | | - `AGENTS.md` |
| | | - `.github/copilot-instructions.md` |
| | | - `SOUL.md` / `TOOLS.md` for OpenClaw workspace-level guidance when applicable |
| | | |
| | | Write promoted rules as short prevention rules (what to do before/while coding), |
| | | not long incident write-ups. |
| | | |
| | | ## Periodic Review |
| | | |
| | | Review `.learnings/` at natural breakpoints: |
| | | |
| | | ### When to Review |
| | | - Before starting a new major task |
| | | - After completing a feature |
| | | - When working in an area with past learnings |
| | | - Weekly during active development |
| | | |
| | | ### Quick Status Check |
| | | ```bash |
| | | # Count pending items |
| | | grep -h "Status\*\*: pending" .learnings/*.md | wc -l |
| | | |
| | | # List pending high-priority items |
| | | grep -B5 "Priority\*\*: high" .learnings/*.md | grep "^## \[" |
| | | |
| | | # Find learnings for a specific area |
| | | grep -l "Area\*\*: backend" .learnings/*.md |
| | | ``` |
| | | |
| | | ### Review Actions |
| | | - Resolve fixed items |
| | | - Promote applicable learnings |
| | | - Link related entries |
| | | - Escalate recurring issues |
| | | |
| | | ## Detection Triggers |
| | | |
| | | Automatically log when you notice: |
| | | |
| | | **Corrections** (→ learning with `correction` category): |
| | | - "No, that's not right..." |
| | | - "Actually, it should be..." |
| | | - "You're wrong about..." |
| | | - "That's outdated..." |
| | | |
| | | **Feature Requests** (→ feature request): |
| | | - "Can you also..." |
| | | - "I wish you could..." |
| | | - "Is there a way to..." |
| | | - "Why can't you..." |
| | | |
| | | **Knowledge Gaps** (→ learning with `knowledge_gap` category): |
| | | - User provides information you didn't know |
| | | - Documentation you referenced is outdated |
| | | - API behavior differs from your understanding |
| | | |
| | | **Errors** (→ error entry): |
| | | - Command returns non-zero exit code |
| | | - Exception or stack trace |
| | | - Unexpected output or behavior |
| | | - Timeout or connection failure |
| | | |
| | | ## Priority Guidelines |
| | | |
| | | | Priority | When to Use | |
| | | |----------|-------------| |
| | | | `critical` | Blocks core functionality, data loss risk, security issue | |
| | | | `high` | Significant impact, affects common workflows, recurring issue | |
| | | | `medium` | Moderate impact, workaround exists | |
| | | | `low` | Minor inconvenience, edge case, nice-to-have | |
| | | |
| | | ## Area Tags |
| | | |
| | | Use to filter learnings by codebase region: |
| | | |
| | | | Area | Scope | |
| | | |------|-------| |
| | | | `frontend` | UI, components, client-side code | |
| | | | `backend` | API, services, server-side code | |
| | | | `infra` | CI/CD, deployment, Docker, cloud | |
| | | | `tests` | Test files, testing utilities, coverage | |
| | | | `docs` | Documentation, comments, READMEs | |
| | | | `config` | Configuration files, environment, settings | |
| | | |
| | | ## Best Practices |
| | | |
| | | 1. **Log immediately** - context is freshest right after the issue |
| | | 2. **Be specific** - future agents need to understand quickly |
| | | 3. **Include reproduction steps** - especially for errors |
| | | 4. **Link related files** - makes fixes easier |
| | | 5. **Suggest concrete fixes** - not just "investigate" |
| | | 6. **Use consistent categories** - enables filtering |
| | | 7. **Promote aggressively** - if in doubt, add to CLAUDE.md or .github/copilot-instructions.md |
| | | 8. **Review regularly** - stale learnings lose value |
| | | |
| | | ## Gitignore Options |
| | | |
| | | **Keep learnings local** (per-developer): |
| | | ```gitignore |
| | | .learnings/ |
| | | ``` |
| | | |
| | | **Track learnings in repo** (team-wide): |
| | | Don't add to .gitignore - learnings become shared knowledge. |
| | | |
| | | **Hybrid** (track templates, ignore entries): |
| | | ```gitignore |
| | | .learnings/*.md |
| | | !.learnings/.gitkeep |
| | | ``` |
| | | |
| | | ## Hook Integration |
| | | |
| | | Enable automatic reminders through agent hooks. This is **opt-in** - you must explicitly configure hooks. |
| | | |
| | | ### Quick Setup (Claude Code / Codex) |
| | | |
| | | Create `.claude/settings.json` in your project: |
| | | |
| | | ```json |
| | | { |
| | | "hooks": { |
| | | "UserPromptSubmit": [{ |
| | | "matcher": "", |
| | | "hooks": [{ |
| | | "type": "command", |
| | | "command": "./skills/self-improvement/scripts/activator.sh" |
| | | }] |
| | | }] |
| | | } |
| | | } |
| | | ``` |
| | | |
| | | This injects a learning evaluation reminder after each prompt (~50-100 tokens overhead). |
| | | |
| | | ### Full Setup (With Error Detection) |
| | | |
| | | ```json |
| | | { |
| | | "hooks": { |
| | | "UserPromptSubmit": [{ |
| | | "matcher": "", |
| | | "hooks": [{ |
| | | "type": "command", |
| | | "command": "./skills/self-improvement/scripts/activator.sh" |
| | | }] |
| | | }], |
| | | "PostToolUse": [{ |
| | | "matcher": "Bash", |
| | | "hooks": [{ |
| | | "type": "command", |
| | | "command": "./skills/self-improvement/scripts/error-detector.sh" |
| | | }] |
| | | }] |
| | | } |
| | | } |
| | | ``` |
| | | |
| | | ### Available Hook Scripts |
| | | |
| | | | Script | Hook Type | Purpose | |
| | | |--------|-----------|---------| |
| | | | `scripts/activator.sh` | UserPromptSubmit | Reminds to evaluate learnings after tasks | |
| | | | `scripts/error-detector.sh` | PostToolUse (Bash) | Triggers on command errors | |
| | | |
| | | See `references/hooks-setup.md` for detailed configuration and troubleshooting. |
| | | |
| | | ## Automatic Skill Extraction |
| | | |
| | | When a learning is valuable enough to become a reusable skill, extract it using the provided helper. |
| | | |
| | | ### Skill Extraction Criteria |
| | | |
| | | A learning qualifies for skill extraction when ANY of these apply: |
| | | |
| | | | Criterion | Description | |
| | | |-----------|-------------| |
| | | | **Recurring** | Has `See Also` links to 2+ similar issues | |
| | | | **Verified** | Status is `resolved` with working fix | |
| | | | **Non-obvious** | Required actual debugging/investigation to discover | |
| | | | **Broadly applicable** | Not project-specific; useful across codebases | |
| | | | **User-flagged** | User says "save this as a skill" or similar | |
| | | |
| | | ### Extraction Workflow |
| | | |
| | | 1. **Identify candidate**: Learning meets extraction criteria |
| | | 2. **Run helper** (or create manually): |
| | | ```bash |
| | | ./skills/self-improvement/scripts/extract-skill.sh skill-name --dry-run |
| | | ./skills/self-improvement/scripts/extract-skill.sh skill-name |
| | | ``` |
| | | 3. **Customize SKILL.md**: Fill in template with learning content |
| | | 4. **Update learning**: Set status to `promoted_to_skill`, add `Skill-Path` |
| | | 5. **Verify**: Read skill in fresh session to ensure it's self-contained |
| | | |
| | | ### Manual Extraction |
| | | |
| | | If you prefer manual creation: |
| | | |
| | | 1. Create `skills/<skill-name>/SKILL.md` |
| | | 2. Use template from `assets/SKILL-TEMPLATE.md` |
| | | 3. Follow [Agent Skills spec](https://agentskills.io/specification): |
| | | - YAML frontmatter with `name` and `description` |
| | | - Name must match folder name |
| | | - No README.md inside skill folder |
| | | |
| | | ### Extraction Detection Triggers |
| | | |
| | | Watch for these signals that a learning should become a skill: |
| | | |
| | | **In conversation:** |
| | | - "Save this as a skill" |
| | | - "I keep running into this" |
| | | - "This would be useful for other projects" |
| | | - "Remember this pattern" |
| | | |
| | | **In learning entries:** |
| | | - Multiple `See Also` links (recurring issue) |
| | | - High priority + resolved status |
| | | - Category: `best_practice` with broad applicability |
| | | - User feedback praising the solution |
| | | |
| | | ### Skill Quality Gates |
| | | |
| | | Before extraction, verify: |
| | | |
| | | - [ ] Solution is tested and working |
| | | - [ ] Description is clear without original context |
| | | - [ ] Code examples are self-contained |
| | | - [ ] No project-specific hardcoded values |
| | | - [ ] Follows skill naming conventions (lowercase, hyphens) |
| | | |
| | | ## Multi-Agent Support |
| | | |
| | | This skill works across different AI coding agents with agent-specific activation. |
| | | |
| | | ### Claude Code |
| | | |
| | | **Activation**: Hooks (UserPromptSubmit, PostToolUse) |
| | | **Setup**: `.claude/settings.json` with hook configuration |
| | | **Detection**: Automatic via hook scripts |
| | | |
| | | ### Codex CLI |
| | | |
| | | **Activation**: Hooks (same pattern as Claude Code) |
| | | **Setup**: `.codex/settings.json` with hook configuration |
| | | **Detection**: Automatic via hook scripts |
| | | |
| | | ### GitHub Copilot |
| | | |
| | | **Activation**: Manual (no hook support) |
| | | **Setup**: Add to `.github/copilot-instructions.md`: |
| | | |
| | | ```markdown |
| | | ## Self-Improvement |
| | | |
| | | After solving non-obvious issues, consider logging to `.learnings/`: |
| | | 1. Use format from self-improvement skill |
| | | 2. Link related entries with See Also |
| | | 3. Promote high-value learnings to skills |
| | | |
| | | Ask in chat: "Should I log this as a learning?" |
| | | ``` |
| | | |
| | | **Detection**: Manual review at session end |
| | | |
| | | ### OpenClaw |
| | | |
| | | **Activation**: Workspace injection + inter-agent messaging |
| | | **Setup**: See "OpenClaw Setup" section above |
| | | **Detection**: Via session tools and workspace files |
| | | |
| | | ### Agent-Agnostic Guidance |
| | | |
| | | Regardless of agent, apply self-improvement when you: |
| | | |
| | | 1. **Discover something non-obvious** - solution wasn't immediate |
| | | 2. **Correct yourself** - initial approach was wrong |
| | | 3. **Learn project conventions** - discovered undocumented patterns |
| | | 4. **Hit unexpected errors** - especially if diagnosis was difficult |
| | | 5. **Find better approaches** - improved on your original solution |
| | | |
| | | ### Copilot Chat Integration |
| | | |
| | | For Copilot users, add this to your prompts when relevant: |
| | | |
| | | > After completing this task, evaluate if any learnings should be logged to `.learnings/` using the self-improvement skill format. |
| | | |
| | | Or use quick prompts: |
| | | - "Log this to learnings" |
| | | - "Create a skill from this solution" |
| | | - "Check .learnings/ for related issues" |
| New file |
| | |
| | | { |
| | | "ownerId": "kn70cjr952qdec1nx70zs6wefn7ynq2t", |
| | | "slug": "self-improving-agent", |
| | | "version": "3.0.1", |
| | | "publishedAt": 1773230308177 |
| | | } |
| New file |
| | |
| | | # Learnings |
| | | |
| | | Corrections, insights, and knowledge gaps captured during development. |
| | | |
| | | **Categories**: correction | insight | knowledge_gap | best_practice |
| | | **Areas**: frontend | backend | infra | tests | docs | config |
| | | **Statuses**: pending | in_progress | resolved | wont_fix | promoted | promoted_to_skill |
| | | |
| | | ## Status Definitions |
| | | |
| | | | Status | Meaning | |
| | | |--------|---------| |
| | | | `pending` | Not yet addressed | |
| | | | `in_progress` | Actively being worked on | |
| | | | `resolved` | Issue fixed or knowledge integrated | |
| | | | `wont_fix` | Decided not to address (reason in Resolution) | |
| | | | `promoted` | Elevated to CLAUDE.md, AGENTS.md, or copilot-instructions.md | |
| | | | `promoted_to_skill` | Extracted as a reusable skill | |
| | | |
| | | ## Skill Extraction Fields |
| | | |
| | | When a learning is promoted to a skill, add these fields: |
| | | |
| | | ```markdown |
| | | **Status**: promoted_to_skill |
| | | **Skill-Path**: skills/skill-name |
| | | ``` |
| | | |
| | | Example: |
| | | ```markdown |
| | | ## [LRN-20250115-001] best_practice |
| | | |
| | | **Logged**: 2025-01-15T10:00:00Z |
| | | **Priority**: high |
| | | **Status**: promoted_to_skill |
| | | **Skill-Path**: skills/docker-m1-fixes |
| | | **Area**: infra |
| | | |
| | | ### Summary |
| | | Docker build fails on Apple Silicon due to platform mismatch |
| | | ... |
| | | ``` |
| | | |
| | | --- |
| | | |
| New file |
| | |
| | | # Skill Template |
| | | |
| | | Template for creating skills extracted from learnings. Copy and customize. |
| | | |
| | | --- |
| | | |
| | | ## SKILL.md Template |
| | | |
| | | ```markdown |
| | | --- |
| | | name: skill-name-here |
| | | description: "Concise description of when and why to use this skill. Include trigger conditions." |
| | | --- |
| | | |
| | | # Skill Name |
| | | |
| | | Brief introduction explaining the problem this skill solves and its origin. |
| | | |
| | | ## Quick Reference |
| | | |
| | | | Situation | Action | |
| | | |-----------|--------| |
| | | | [Trigger 1] | [Action 1] | |
| | | | [Trigger 2] | [Action 2] | |
| | | |
| | | ## Background |
| | | |
| | | Why this knowledge matters. What problems it prevents. Context from the original learning. |
| | | |
| | | ## Solution |
| | | |
| | | ### Step-by-Step |
| | | |
| | | 1. First step with code or command |
| | | 2. Second step |
| | | 3. Verification step |
| | | |
| | | ### Code Example |
| | | |
| | | \`\`\`language |
| | | // Example code demonstrating the solution |
| | | \`\`\` |
| | | |
| | | ## Common Variations |
| | | |
| | | - **Variation A**: Description and how to handle |
| | | - **Variation B**: Description and how to handle |
| | | |
| | | ## Gotchas |
| | | |
| | | - Warning or common mistake #1 |
| | | - Warning or common mistake #2 |
| | | |
| | | ## Related |
| | | |
| | | - Link to related documentation |
| | | - Link to related skill |
| | | |
| | | ## Source |
| | | |
| | | Extracted from learning entry. |
| | | - **Learning ID**: LRN-YYYYMMDD-XXX |
| | | - **Original Category**: correction | insight | knowledge_gap | best_practice |
| | | - **Extraction Date**: YYYY-MM-DD |
| | | ``` |
| | | |
| | | --- |
| | | |
| | | ## Minimal Template |
| | | |
| | | For simple skills that don't need all sections: |
| | | |
| | | ```markdown |
| | | --- |
| | | name: skill-name-here |
| | | description: "What this skill does and when to use it." |
| | | --- |
| | | |
| | | # Skill Name |
| | | |
| | | [Problem statement in one sentence] |
| | | |
| | | ## Solution |
| | | |
| | | [Direct solution with code/commands] |
| | | |
| | | ## Source |
| | | |
| | | - Learning ID: LRN-YYYYMMDD-XXX |
| | | ``` |
| | | |
| | | --- |
| | | |
| | | ## Template with Scripts |
| | | |
| | | For skills that include executable helpers: |
| | | |
| | | ```markdown |
| | | --- |
| | | name: skill-name-here |
| | | description: "What this skill does and when to use it." |
| | | --- |
| | | |
| | | # Skill Name |
| | | |
| | | [Introduction] |
| | | |
| | | ## Quick Reference |
| | | |
| | | | Command | Purpose | |
| | | |---------|---------| |
| | | | `./scripts/helper.sh` | [What it does] | |
| | | | `./scripts/validate.sh` | [What it does] | |
| | | |
| | | ## Usage |
| | | |
| | | ### Automated (Recommended) |
| | | |
| | | \`\`\`bash |
| | | ./skills/skill-name/scripts/helper.sh [args] |
| | | \`\`\` |
| | | |
| | | ### Manual Steps |
| | | |
| | | 1. Step one |
| | | 2. Step two |
| | | |
| | | ## Scripts |
| | | |
| | | | Script | Description | |
| | | |--------|-------------| |
| | | | `scripts/helper.sh` | Main utility | |
| | | | `scripts/validate.sh` | Validation checker | |
| | | |
| | | ## Source |
| | | |
| | | - Learning ID: LRN-YYYYMMDD-XXX |
| | | ``` |
| | | |
| | | --- |
| | | |
| | | ## Naming Conventions |
| | | |
| | | - **Skill name**: lowercase, hyphens for spaces |
| | | - Good: `docker-m1-fixes`, `api-timeout-patterns` |
| | | - Bad: `Docker_M1_Fixes`, `APITimeoutPatterns` |
| | | |
| | | - **Description**: Start with action verb, mention trigger |
| | | - Good: "Handles Docker build failures on Apple Silicon. Use when builds fail with platform mismatch." |
| | | - Bad: "Docker stuff" |
| | | |
| | | - **Files**: |
| | | - `SKILL.md` - Required, main documentation |
| | | - `scripts/` - Optional, executable code |
| | | - `references/` - Optional, detailed docs |
| | | - `assets/` - Optional, templates |
| | | |
| | | --- |
| | | |
| | | ## Extraction Checklist |
| | | |
| | | Before creating a skill from a learning: |
| | | |
| | | - [ ] Learning is verified (status: resolved) |
| | | - [ ] Solution is broadly applicable (not one-off) |
| | | - [ ] Content is complete (has all needed context) |
| | | - [ ] Name follows conventions |
| | | - [ ] Description is concise but informative |
| | | - [ ] Quick Reference table is actionable |
| | | - [ ] Code examples are tested |
| | | - [ ] Source learning ID is recorded |
| | | |
| | | After creating: |
| | | |
| | | - [ ] Update original learning with `promoted_to_skill` status |
| | | - [ ] Add `Skill-Path: skills/skill-name` to learning metadata |
| | | - [ ] Test skill by reading it in a fresh session |
| New file |
| | |
| | | --- |
| | | name: self-improvement |
| | | description: "Injects self-improvement reminder during agent bootstrap" |
| | | metadata: {"openclaw":{"emoji":"🧠","events":["agent:bootstrap"]}} |
| | | --- |
| | | |
| | | # Self-Improvement Hook |
| | | |
| | | Injects a reminder to evaluate learnings during agent bootstrap. |
| | | |
| | | ## What It Does |
| | | |
| | | - Fires on `agent:bootstrap` (before workspace files are injected) |
| | | - Adds a reminder block to check `.learnings/` for relevant entries |
| | | - Prompts the agent to log corrections, errors, and discoveries |
| | | |
| | | ## Configuration |
| | | |
| | | No configuration needed. Enable with: |
| | | |
| | | ```bash |
| | | openclaw hooks enable self-improvement |
| | | ``` |
| New file |
| | |
| | | /** |
| | | * Self-Improvement Hook for OpenClaw |
| | | * |
| | | * Injects a reminder to evaluate learnings during agent bootstrap. |
| | | * Fires on agent:bootstrap event before workspace files are injected. |
| | | */ |
| | | |
| | | const REMINDER_CONTENT = ` |
| | | ## Self-Improvement Reminder |
| | | |
| | | After completing tasks, evaluate if any learnings should be captured: |
| | | |
| | | **Log when:** |
| | | - User corrects you → \`.learnings/LEARNINGS.md\` |
| | | - Command/operation fails → \`.learnings/ERRORS.md\` |
| | | - User wants missing capability → \`.learnings/FEATURE_REQUESTS.md\` |
| | | - You discover your knowledge was wrong → \`.learnings/LEARNINGS.md\` |
| | | - You find a better approach → \`.learnings/LEARNINGS.md\` |
| | | |
| | | **Promote when pattern is proven:** |
| | | - Behavioral patterns → \`SOUL.md\` |
| | | - Workflow improvements → \`AGENTS.md\` |
| | | - Tool gotchas → \`TOOLS.md\` |
| | | |
| | | Keep entries simple: date, title, what happened, what to do differently. |
| | | `.trim(); |
| | | |
| | | const handler = async (event) => { |
| | | // Safety checks for event structure |
| | | if (!event || typeof event !== 'object') { |
| | | return; |
| | | } |
| | | |
| | | // Only handle agent:bootstrap events |
| | | if (event.type !== 'agent' || event.action !== 'bootstrap') { |
| | | return; |
| | | } |
| | | |
| | | // Safety check for context |
| | | if (!event.context || typeof event.context !== 'object') { |
| | | return; |
| | | } |
| | | |
| | | // Inject the reminder as a virtual bootstrap file |
| | | // Check that bootstrapFiles is an array before pushing |
| | | if (Array.isArray(event.context.bootstrapFiles)) { |
| | | event.context.bootstrapFiles.push({ |
| | | path: 'SELF_IMPROVEMENT_REMINDER.md', |
| | | content: REMINDER_CONTENT, |
| | | virtual: true, |
| | | }); |
| | | } |
| | | }; |
| | | |
| | | module.exports = handler; |
| | | module.exports.default = handler; |
| New file |
| | |
| | | /** |
| | | * Self-Improvement Hook for OpenClaw |
| | | * |
| | | * Injects a reminder to evaluate learnings during agent bootstrap. |
| | | * Fires on agent:bootstrap event before workspace files are injected. |
| | | */ |
| | | |
| | | import type { HookHandler } from 'openclaw/hooks'; |
| | | |
| | | const REMINDER_CONTENT = `## Self-Improvement Reminder |
| | | |
| | | After completing tasks, evaluate if any learnings should be captured: |
| | | |
| | | **Log when:** |
| | | - User corrects you → \`.learnings/LEARNINGS.md\` |
| | | - Command/operation fails → \`.learnings/ERRORS.md\` |
| | | - User wants missing capability → \`.learnings/FEATURE_REQUESTS.md\` |
| | | - You discover your knowledge was wrong → \`.learnings/LEARNINGS.md\` |
| | | - You find a better approach → \`.learnings/LEARNINGS.md\` |
| | | |
| | | **Promote when pattern is proven:** |
| | | - Behavioral patterns → \`SOUL.md\` |
| | | - Workflow improvements → \`AGENTS.md\` |
| | | - Tool gotchas → \`TOOLS.md\` |
| | | |
| | | Keep entries simple: date, title, what happened, what to do differently.`; |
| | | |
| | | const handler: HookHandler = async (event) => { |
| | | // Safety checks for event structure |
| | | if (!event || typeof event !== 'object') { |
| | | return; |
| | | } |
| | | |
| | | // Only handle agent:bootstrap events |
| | | if (event.type !== 'agent' || event.action !== 'bootstrap') { |
| | | return; |
| | | } |
| | | |
| | | // Safety check for context |
| | | if (!event.context || typeof event.context !== 'object') { |
| | | return; |
| | | } |
| | | |
| | | // Skip sub-agent sessions to avoid bootstrap issues |
| | | // Sub-agents have sessionKey patterns like "agent:main:subagent:..." |
| | | const sessionKey = event.sessionKey || ''; |
| | | if (sessionKey.includes(':subagent:')) { |
| | | return; |
| | | } |
| | | |
| | | // Inject the reminder as a virtual bootstrap file |
| | | // Check that bootstrapFiles is an array before pushing |
| | | if (Array.isArray(event.context.bootstrapFiles)) { |
| | | event.context.bootstrapFiles.push({ |
| | | path: 'SELF_IMPROVEMENT_REMINDER.md', |
| | | content: REMINDER_CONTENT, |
| | | virtual: true, |
| | | }); |
| | | } |
| | | }; |
| | | |
| | | export default handler; |
| New file |
| | |
| | | # Entry Examples |
| | | |
| | | Concrete examples of well-formatted entries with all fields. |
| | | |
| | | ## Learning: Correction |
| | | |
| | | ```markdown |
| | | ## [LRN-20250115-001] correction |
| | | |
| | | **Logged**: 2025-01-15T10:30:00Z |
| | | **Priority**: high |
| | | **Status**: pending |
| | | **Area**: tests |
| | | |
| | | ### Summary |
| | | Incorrectly assumed pytest fixtures are scoped to function by default |
| | | |
| | | ### Details |
| | | When writing test fixtures, I assumed all fixtures were function-scoped. |
| | | User corrected that while function scope is the default, the codebase |
| | | convention uses module-scoped fixtures for database connections to |
| | | improve test performance. |
| | | |
| | | ### Suggested Action |
| | | When creating fixtures that involve expensive setup (DB, network), |
| | | check existing fixtures for scope patterns before defaulting to function scope. |
| | | |
| | | ### Metadata |
| | | - Source: user_feedback |
| | | - Related Files: tests/conftest.py |
| | | - Tags: pytest, testing, fixtures |
| | | |
| | | --- |
| | | ``` |
| | | |
| | | ## Learning: Knowledge Gap (Resolved) |
| | | |
| | | ```markdown |
| | | ## [LRN-20250115-002] knowledge_gap |
| | | |
| | | **Logged**: 2025-01-15T14:22:00Z |
| | | **Priority**: medium |
| | | **Status**: resolved |
| | | **Area**: config |
| | | |
| | | ### Summary |
| | | Project uses pnpm not npm for package management |
| | | |
| | | ### Details |
| | | Attempted to run `npm install` but project uses pnpm workspaces. |
| | | Lock file is `pnpm-lock.yaml`, not `package-lock.json`. |
| | | |
| | | ### Suggested Action |
| | | Check for `pnpm-lock.yaml` or `pnpm-workspace.yaml` before assuming npm. |
| | | Use `pnpm install` for this project. |
| | | |
| | | ### Metadata |
| | | - Source: error |
| | | - Related Files: pnpm-lock.yaml, pnpm-workspace.yaml |
| | | - Tags: package-manager, pnpm, setup |
| | | |
| | | ### Resolution |
| | | - **Resolved**: 2025-01-15T14:30:00Z |
| | | - **Commit/PR**: N/A - knowledge update |
| | | - **Notes**: Added to CLAUDE.md for future reference |
| | | |
| | | --- |
| | | ``` |
| | | |
| | | ## Learning: Promoted to CLAUDE.md |
| | | |
| | | ```markdown |
| | | ## [LRN-20250115-003] best_practice |
| | | |
| | | **Logged**: 2025-01-15T16:00:00Z |
| | | **Priority**: high |
| | | **Status**: promoted |
| | | **Promoted**: CLAUDE.md |
| | | **Area**: backend |
| | | |
| | | ### Summary |
| | | API responses must include correlation ID from request headers |
| | | |
| | | ### Details |
| | | All API responses should echo back the X-Correlation-ID header from |
| | | the request. This is required for distributed tracing. Responses |
| | | without this header break the observability pipeline. |
| | | |
| | | ### Suggested Action |
| | | Always include correlation ID passthrough in API handlers. |
| | | |
| | | ### Metadata |
| | | - Source: user_feedback |
| | | - Related Files: src/middleware/correlation.ts |
| | | - Tags: api, observability, tracing |
| | | |
| | | --- |
| | | ``` |
| | | |
| | | ## Learning: Promoted to AGENTS.md |
| | | |
| | | ```markdown |
| | | ## [LRN-20250116-001] best_practice |
| | | |
| | | **Logged**: 2025-01-16T09:00:00Z |
| | | **Priority**: high |
| | | **Status**: promoted |
| | | **Promoted**: AGENTS.md |
| | | **Area**: backend |
| | | |
| | | ### Summary |
| | | Must regenerate API client after OpenAPI spec changes |
| | | |
| | | ### Details |
| | | When modifying API endpoints, the TypeScript client must be regenerated. |
| | | Forgetting this causes type mismatches that only appear at runtime. |
| | | The generate script also runs validation. |
| | | |
| | | ### Suggested Action |
| | | Add to agent workflow: after any API changes, run `pnpm run generate:api`. |
| | | |
| | | ### Metadata |
| | | - Source: error |
| | | - Related Files: openapi.yaml, src/client/api.ts |
| | | - Tags: api, codegen, typescript |
| | | |
| | | --- |
| | | ``` |
| | | |
| | | ## Error Entry |
| | | |
| | | ```markdown |
| | | ## [ERR-20250115-A3F] docker_build |
| | | |
| | | **Logged**: 2025-01-15T09:15:00Z |
| | | **Priority**: high |
| | | **Status**: pending |
| | | **Area**: infra |
| | | |
| | | ### Summary |
| | | Docker build fails on M1 Mac due to platform mismatch |
| | | |
| | | ### Error |
| | | ``` |
| | | error: failed to solve: python:3.11-slim: no match for platform linux/arm64 |
| | | ``` |
| | | |
| | | ### Context |
| | | - Command: `docker build -t myapp .` |
| | | - Dockerfile uses `FROM python:3.11-slim` |
| | | - Running on Apple Silicon (M1/M2) |
| | | |
| | | ### Suggested Fix |
| | | Add platform flag: `docker build --platform linux/amd64 -t myapp .` |
| | | Or update Dockerfile: `FROM --platform=linux/amd64 python:3.11-slim` |
| | | |
| | | ### Metadata |
| | | - Reproducible: yes |
| | | - Related Files: Dockerfile |
| | | |
| | | --- |
| | | ``` |
| | | |
| | | ## Error Entry: Recurring Issue |
| | | |
| | | ```markdown |
| | | ## [ERR-20250120-B2C] api_timeout |
| | | |
| | | **Logged**: 2025-01-20T11:30:00Z |
| | | **Priority**: critical |
| | | **Status**: pending |
| | | **Area**: backend |
| | | |
| | | ### Summary |
| | | Third-party payment API timeout during checkout |
| | | |
| | | ### Error |
| | | ``` |
| | | TimeoutError: Request to payments.example.com timed out after 30000ms |
| | | ``` |
| | | |
| | | ### Context |
| | | - Command: POST /api/checkout |
| | | - Timeout set to 30s |
| | | - Occurs during peak hours (lunch, evening) |
| | | |
| | | ### Suggested Fix |
| | | Implement retry with exponential backoff. Consider circuit breaker pattern. |
| | | |
| | | ### Metadata |
| | | - Reproducible: yes (during peak hours) |
| | | - Related Files: src/services/payment.ts |
| | | - See Also: ERR-20250115-X1Y, ERR-20250118-Z3W |
| | | |
| | | --- |
| | | ``` |
| | | |
| | | ## Feature Request |
| | | |
| | | ```markdown |
| | | ## [FEAT-20250115-001] export_to_csv |
| | | |
| | | **Logged**: 2025-01-15T16:45:00Z |
| | | **Priority**: medium |
| | | **Status**: pending |
| | | **Area**: backend |
| | | |
| | | ### Requested Capability |
| | | Export analysis results to CSV format |
| | | |
| | | ### User Context |
| | | User runs weekly reports and needs to share results with non-technical |
| | | stakeholders in Excel. Currently copies output manually. |
| | | |
| | | ### Complexity Estimate |
| | | simple |
| | | |
| | | ### Suggested Implementation |
| | | Add `--output csv` flag to the analyze command. Use standard csv module. |
| | | Could extend existing `--output json` pattern. |
| | | |
| | | ### Metadata |
| | | - Frequency: recurring |
| | | - Related Features: analyze command, json output |
| | | |
| | | --- |
| | | ``` |
| | | |
| | | ## Feature Request: Resolved |
| | | |
| | | ```markdown |
| | | ## [FEAT-20250110-002] dark_mode |
| | | |
| | | **Logged**: 2025-01-10T14:00:00Z |
| | | **Priority**: low |
| | | **Status**: resolved |
| | | **Area**: frontend |
| | | |
| | | ### Requested Capability |
| | | Dark mode support for the dashboard |
| | | |
| | | ### User Context |
| | | User works late hours and finds the bright interface straining. |
| | | Several other users have mentioned this informally. |
| | | |
| | | ### Complexity Estimate |
| | | medium |
| | | |
| | | ### Suggested Implementation |
| | | Use CSS variables for colors. Add toggle in user settings. |
| | | Consider system preference detection. |
| | | |
| | | ### Metadata |
| | | - Frequency: recurring |
| | | - Related Features: user settings, theme system |
| | | |
| | | ### Resolution |
| | | - **Resolved**: 2025-01-18T16:00:00Z |
| | | - **Commit/PR**: #142 |
| | | - **Notes**: Implemented with system preference detection and manual toggle |
| | | |
| | | --- |
| | | ``` |
| | | |
| | | ## Learning: Promoted to Skill |
| | | |
| | | ```markdown |
| | | ## [LRN-20250118-001] best_practice |
| | | |
| | | **Logged**: 2025-01-18T11:00:00Z |
| | | **Priority**: high |
| | | **Status**: promoted_to_skill |
| | | **Skill-Path**: skills/docker-m1-fixes |
| | | **Area**: infra |
| | | |
| | | ### Summary |
| | | Docker build fails on Apple Silicon due to platform mismatch |
| | | |
| | | ### Details |
| | | When building Docker images on M1/M2 Macs, the build fails because |
| | | the base image doesn't have an ARM64 variant. This is a common issue |
| | | that affects many developers. |
| | | |
| | | ### Suggested Action |
| | | Add `--platform linux/amd64` to docker build command, or use |
| | | `FROM --platform=linux/amd64` in Dockerfile. |
| | | |
| | | ### Metadata |
| | | - Source: error |
| | | - Related Files: Dockerfile |
| | | - Tags: docker, arm64, m1, apple-silicon |
| | | - See Also: ERR-20250115-A3F, ERR-20250117-B2D |
| | | |
| | | --- |
| | | ``` |
| | | |
| | | ## Extracted Skill Example |
| | | |
| | | When the above learning is extracted as a skill, it becomes: |
| | | |
| | | **File**: `skills/docker-m1-fixes/SKILL.md` |
| | | |
| | | ```markdown |
| | | --- |
| | | name: docker-m1-fixes |
| | | description: "Fixes Docker build failures on Apple Silicon (M1/M2). Use when docker build fails with platform mismatch errors." |
| | | --- |
| | | |
| | | # Docker M1 Fixes |
| | | |
| | | Solutions for Docker build issues on Apple Silicon Macs. |
| | | |
| | | ## Quick Reference |
| | | |
| | | | Error | Fix | |
| | | |-------|-----| |
| | | | `no match for platform linux/arm64` | Add `--platform linux/amd64` to build | |
| | | | Image runs but crashes | Use emulation or find ARM-compatible base | |
| | | |
| | | ## The Problem |
| | | |
| | | Many Docker base images don't have ARM64 variants. When building on |
| | | Apple Silicon (M1/M2/M3), Docker attempts to pull ARM64 images by |
| | | default, causing platform mismatch errors. |
| | | |
| | | ## Solutions |
| | | |
| | | ### Option 1: Build Flag (Recommended) |
| | | |
| | | Add platform flag to your build command: |
| | | |
| | | \`\`\`bash |
| | | docker build --platform linux/amd64 -t myapp . |
| | | \`\`\` |
| | | |
| | | ### Option 2: Dockerfile Modification |
| | | |
| | | Specify platform in the FROM instruction: |
| | | |
| | | \`\`\`dockerfile |
| | | FROM --platform=linux/amd64 python:3.11-slim |
| | | \`\`\` |
| | | |
| | | ### Option 3: Docker Compose |
| | | |
| | | Add platform to your service: |
| | | |
| | | \`\`\`yaml |
| | | services: |
| | | app: |
| | | platform: linux/amd64 |
| | | build: . |
| | | \`\`\` |
| | | |
| | | ## Trade-offs |
| | | |
| | | | Approach | Pros | Cons | |
| | | |----------|------|------| |
| | | | Build flag | No file changes | Must remember flag | |
| | | | Dockerfile | Explicit, versioned | Affects all builds | |
| | | | Compose | Convenient for dev | Requires compose | |
| | | |
| | | ## Performance Note |
| | | |
| | | Running AMD64 images on ARM64 uses Rosetta 2 emulation. This works |
| | | for development but may be slower. For production, find ARM-native |
| | | alternatives when possible. |
| | | |
| | | ## Source |
| | | |
| | | - Learning ID: LRN-20250118-001 |
| | | - Category: best_practice |
| | | - Extraction Date: 2025-01-18 |
| | | ``` |
| New file |
| | |
| | | # Hook Setup Guide |
| | | |
| | | Configure automatic self-improvement triggers for AI coding agents. |
| | | |
| | | ## Overview |
| | | |
| | | Hooks enable proactive learning capture by injecting reminders at key moments: |
| | | - **UserPromptSubmit**: Reminder after each prompt to evaluate learnings |
| | | - **PostToolUse (Bash)**: Error detection when commands fail |
| | | |
| | | ## Claude Code Setup |
| | | |
| | | ### Option 1: Project-Level Configuration |
| | | |
| | | Create `.claude/settings.json` in your project root: |
| | | |
| | | ```json |
| | | { |
| | | "hooks": { |
| | | "UserPromptSubmit": [ |
| | | { |
| | | "matcher": "", |
| | | "hooks": [ |
| | | { |
| | | "type": "command", |
| | | "command": "./skills/self-improvement/scripts/activator.sh" |
| | | } |
| | | ] |
| | | } |
| | | ], |
| | | "PostToolUse": [ |
| | | { |
| | | "matcher": "Bash", |
| | | "hooks": [ |
| | | { |
| | | "type": "command", |
| | | "command": "./skills/self-improvement/scripts/error-detector.sh" |
| | | } |
| | | ] |
| | | } |
| | | ] |
| | | } |
| | | } |
| | | ``` |
| | | |
| | | ### Option 2: User-Level Configuration |
| | | |
| | | Add to `~/.claude/settings.json` for global activation: |
| | | |
| | | ```json |
| | | { |
| | | "hooks": { |
| | | "UserPromptSubmit": [ |
| | | { |
| | | "matcher": "", |
| | | "hooks": [ |
| | | { |
| | | "type": "command", |
| | | "command": "~/.claude/skills/self-improvement/scripts/activator.sh" |
| | | } |
| | | ] |
| | | } |
| | | ] |
| | | } |
| | | } |
| | | ``` |
| | | |
| | | ### Minimal Setup (Activator Only) |
| | | |
| | | For lower overhead, use only the UserPromptSubmit hook: |
| | | |
| | | ```json |
| | | { |
| | | "hooks": { |
| | | "UserPromptSubmit": [ |
| | | { |
| | | "matcher": "", |
| | | "hooks": [ |
| | | { |
| | | "type": "command", |
| | | "command": "./skills/self-improvement/scripts/activator.sh" |
| | | } |
| | | ] |
| | | } |
| | | ] |
| | | } |
| | | } |
| | | ``` |
| | | |
| | | ## Codex CLI Setup |
| | | |
| | | Codex uses the same hook system as Claude Code. Create `.codex/settings.json`: |
| | | |
| | | ```json |
| | | { |
| | | "hooks": { |
| | | "UserPromptSubmit": [ |
| | | { |
| | | "matcher": "", |
| | | "hooks": [ |
| | | { |
| | | "type": "command", |
| | | "command": "./skills/self-improvement/scripts/activator.sh" |
| | | } |
| | | ] |
| | | } |
| | | ] |
| | | } |
| | | } |
| | | ``` |
| | | |
| | | ## GitHub Copilot Setup |
| | | |
| | | Copilot doesn't support hooks directly. Instead, add guidance to `.github/copilot-instructions.md`: |
| | | |
| | | ```markdown |
| | | ## Self-Improvement |
| | | |
| | | After completing tasks that involved: |
| | | - Debugging non-obvious issues |
| | | - Discovering workarounds |
| | | - Learning project-specific patterns |
| | | - Resolving unexpected errors |
| | | |
| | | Consider logging the learning to `.learnings/` using the format from the self-improvement skill. |
| | | |
| | | For high-value learnings that would benefit other sessions, consider skill extraction. |
| | | ``` |
| | | |
| | | ## Verification |
| | | |
| | | ### Test Activator Hook |
| | | |
| | | 1. Enable the hook configuration |
| | | 2. Start a new Claude Code session |
| | | 3. Send any prompt |
| | | 4. Verify you see `<self-improvement-reminder>` in the context |
| | | |
| | | ### Test Error Detector Hook |
| | | |
| | | 1. Enable PostToolUse hook for Bash |
| | | 2. Run a command that fails: `ls /nonexistent/path` |
| | | 3. Verify you see `<error-detected>` reminder |
| | | |
| | | ### Dry Run Extract Script |
| | | |
| | | ```bash |
| | | ./skills/self-improvement/scripts/extract-skill.sh test-skill --dry-run |
| | | ``` |
| | | |
| | | Expected output shows the skill scaffold that would be created. |
| | | |
| | | ## Troubleshooting |
| | | |
| | | ### Hook Not Triggering |
| | | |
| | | 1. **Check script permissions**: `chmod +x scripts/*.sh` |
| | | 2. **Verify path**: Use absolute paths or paths relative to project root |
| | | 3. **Check settings location**: Project vs user-level settings |
| | | 4. **Restart session**: Hooks are loaded at session start |
| | | |
| | | ### Permission Denied |
| | | |
| | | ```bash |
| | | chmod +x ./skills/self-improvement/scripts/activator.sh |
| | | chmod +x ./skills/self-improvement/scripts/error-detector.sh |
| | | chmod +x ./skills/self-improvement/scripts/extract-skill.sh |
| | | ``` |
| | | |
| | | ### Script Not Found |
| | | |
| | | If using relative paths, ensure you're in the correct directory or use absolute paths: |
| | | |
| | | ```json |
| | | { |
| | | "command": "/absolute/path/to/skills/self-improvement/scripts/activator.sh" |
| | | } |
| | | ``` |
| | | |
| | | ### Too Much Overhead |
| | | |
| | | If the activator feels intrusive: |
| | | |
| | | 1. **Use minimal setup**: Only UserPromptSubmit, skip PostToolUse |
| | | 2. **Add matcher filter**: Only trigger for certain prompts: |
| | | |
| | | ```json |
| | | { |
| | | "matcher": "fix|debug|error|issue", |
| | | "hooks": [...] |
| | | } |
| | | ``` |
| | | |
| | | ## Hook Output Budget |
| | | |
| | | The activator is designed to be lightweight: |
| | | - **Target**: ~50-100 tokens per activation |
| | | - **Content**: Structured reminder, not verbose instructions |
| | | - **Format**: XML tags for easy parsing |
| | | |
| | | If you need to reduce overhead further, you can edit `activator.sh` to output less text. |
| | | |
| | | ## Security Considerations |
| | | |
| | | - Hook scripts run with the same permissions as Claude Code |
| | | - Scripts only output text; they don't modify files or run commands |
| | | - Error detector reads `CLAUDE_TOOL_OUTPUT` environment variable |
| | | - All scripts are opt-in (you must configure them explicitly) |
| | | |
| | | ## Disabling Hooks |
| | | |
| | | To temporarily disable without removing configuration: |
| | | |
| | | 1. **Comment out in settings**: |
| | | ```json |
| | | { |
| | | "hooks": { |
| | | // "UserPromptSubmit": [...] |
| | | } |
| | | } |
| | | ``` |
| | | |
| | | 2. **Or delete the settings file**: Hooks won't run without configuration |
| New file |
| | |
| | | # OpenClaw Integration |
| | | |
| | | Complete setup and usage guide for integrating the self-improvement skill with OpenClaw. |
| | | |
| | | ## Overview |
| | | |
| | | OpenClaw uses workspace-based prompt injection combined with event-driven hooks. Context is injected from workspace files at session start, and hooks can trigger on lifecycle events. |
| | | |
| | | ## Workspace Structure |
| | | |
| | | ``` |
| | | ~/.openclaw/ |
| | | ├── workspace/ # Working directory |
| | | │ ├── AGENTS.md # Multi-agent coordination patterns |
| | | │ ├── SOUL.md # Behavioral guidelines and personality |
| | | │ ├── TOOLS.md # Tool capabilities and gotchas |
| | | │ ├── MEMORY.md # Long-term memory (main session only) |
| | | │ └── memory/ # Daily memory files |
| | | │ └── YYYY-MM-DD.md |
| | | ├── skills/ # Installed skills |
| | | │ └── <skill-name>/ |
| | | │ └── SKILL.md |
| | | └── hooks/ # Custom hooks |
| | | └── <hook-name>/ |
| | | ├── HOOK.md |
| | | └── handler.ts |
| | | ``` |
| | | |
| | | ## Quick Setup |
| | | |
| | | ### 1. Install the Skill |
| | | |
| | | ```bash |
| | | clawdhub install self-improving-agent |
| | | ``` |
| | | |
| | | Or copy manually: |
| | | |
| | | ```bash |
| | | cp -r self-improving-agent ~/.openclaw/skills/ |
| | | ``` |
| | | |
| | | ### 2. Install the Hook (Optional) |
| | | |
| | | Copy the hook to OpenClaw's hooks directory: |
| | | |
| | | ```bash |
| | | cp -r hooks/openclaw ~/.openclaw/hooks/self-improvement |
| | | ``` |
| | | |
| | | Enable the hook: |
| | | |
| | | ```bash |
| | | openclaw hooks enable self-improvement |
| | | ``` |
| | | |
| | | ### 3. Create Learning Files |
| | | |
| | | Create the `.learnings/` directory in your workspace: |
| | | |
| | | ```bash |
| | | mkdir -p ~/.openclaw/workspace/.learnings |
| | | ``` |
| | | |
| | | Or in the skill directory: |
| | | |
| | | ```bash |
| | | mkdir -p ~/.openclaw/skills/self-improving-agent/.learnings |
| | | ``` |
| | | |
| | | ## Injected Prompt Files |
| | | |
| | | ### AGENTS.md |
| | | |
| | | Purpose: Multi-agent workflows and delegation patterns. |
| | | |
| | | ```markdown |
| | | # Agent Coordination |
| | | |
| | | ## Delegation Rules |
| | | - Use explore agent for open-ended codebase questions |
| | | - Spawn sub-agents for long-running tasks |
| | | - Use sessions_send for cross-session communication |
| | | |
| | | ## Session Handoff |
| | | When delegating to another session: |
| | | 1. Provide full context in the handoff message |
| | | 2. Include relevant file paths |
| | | 3. Specify expected output format |
| | | ``` |
| | | |
| | | ### SOUL.md |
| | | |
| | | Purpose: Behavioral guidelines and communication style. |
| | | |
| | | ```markdown |
| | | # Behavioral Guidelines |
| | | |
| | | ## Communication Style |
| | | - Be direct and concise |
| | | - Avoid unnecessary caveats and disclaimers |
| | | - Use technical language appropriate to context |
| | | |
| | | ## Error Handling |
| | | - Admit mistakes promptly |
| | | - Provide corrected information immediately |
| | | - Log significant errors to learnings |
| | | ``` |
| | | |
| | | ### TOOLS.md |
| | | |
| | | Purpose: Tool capabilities, integration gotchas, local configuration. |
| | | |
| | | ```markdown |
| | | # Tool Knowledge |
| | | |
| | | ## Self-Improvement Skill |
| | | Log learnings to `.learnings/` for continuous improvement. |
| | | |
| | | ## Local Tools |
| | | - Document tool-specific gotchas here |
| | | - Note authentication requirements |
| | | - Track integration quirks |
| | | ``` |
| | | |
| | | ## Learning Workflow |
| | | |
| | | ### Capturing Learnings |
| | | |
| | | 1. **In-session**: Log to `.learnings/` as usual |
| | | 2. **Cross-session**: Promote to workspace files |
| | | |
| | | ### Promotion Decision Tree |
| | | |
| | | ``` |
| | | Is the learning project-specific? |
| | | ├── Yes → Keep in .learnings/ |
| | | └── No → Is it behavioral/style-related? |
| | | ├── Yes → Promote to SOUL.md |
| | | └── No → Is it tool-related? |
| | | ├── Yes → Promote to TOOLS.md |
| | | └── No → Promote to AGENTS.md (workflow) |
| | | ``` |
| | | |
| | | ### Promotion Format Examples |
| | | |
| | | **From learning:** |
| | | > Git push to GitHub fails without auth configured - triggers desktop prompt |
| | | |
| | | **To TOOLS.md:** |
| | | ```markdown |
| | | ## Git |
| | | - Don't push without confirming auth is configured |
| | | - Use `gh auth status` to check GitHub CLI auth |
| | | ``` |
| | | |
| | | ## Inter-Agent Communication |
| | | |
| | | OpenClaw provides tools for cross-session communication: |
| | | |
| | | ### sessions_list |
| | | |
| | | View active and recent sessions: |
| | | ``` |
| | | sessions_list(activeMinutes=30, messageLimit=3) |
| | | ``` |
| | | |
| | | ### sessions_history |
| | | |
| | | Read transcript from another session: |
| | | ``` |
| | | sessions_history(sessionKey="session-id", limit=50) |
| | | ``` |
| | | |
| | | ### sessions_send |
| | | |
| | | Send message to another session: |
| | | ``` |
| | | sessions_send(sessionKey="session-id", message="Learning: API requires X-Custom-Header") |
| | | ``` |
| | | |
| | | ### sessions_spawn |
| | | |
| | | Spawn a background sub-agent: |
| | | ``` |
| | | sessions_spawn(task="Research X and report back", label="research") |
| | | ``` |
| | | |
| | | ## Available Hook Events |
| | | |
| | | | Event | When It Fires | |
| | | |-------|---------------| |
| | | | `agent:bootstrap` | Before workspace files inject | |
| | | | `command:new` | When `/new` command issued | |
| | | | `command:reset` | When `/reset` command issued | |
| | | | `command:stop` | When `/stop` command issued | |
| | | | `gateway:startup` | When gateway starts | |
| | | |
| | | ## Detection Triggers |
| | | |
| | | ### Standard Triggers |
| | | - User corrections ("No, that's wrong...") |
| | | - Command failures (non-zero exit codes) |
| | | - API errors |
| | | - Knowledge gaps |
| | | |
| | | ### OpenClaw-Specific Triggers |
| | | |
| | | | Trigger | Action | |
| | | |---------|--------| |
| | | | Tool call error | Log to TOOLS.md with tool name | |
| | | | Session handoff confusion | Log to AGENTS.md with delegation pattern | |
| | | | Model behavior surprise | Log to SOUL.md with expected vs actual | |
| | | | Skill issue | Log to .learnings/ or report upstream | |
| | | |
| | | ## Verification |
| | | |
| | | Check hook is registered: |
| | | |
| | | ```bash |
| | | openclaw hooks list |
| | | ``` |
| | | |
| | | Check skill is loaded: |
| | | |
| | | ```bash |
| | | openclaw status |
| | | ``` |
| | | |
| | | ## Troubleshooting |
| | | |
| | | ### Hook not firing |
| | | |
| | | 1. Ensure hooks enabled in config |
| | | 2. Restart gateway after config changes |
| | | 3. Check gateway logs for errors |
| | | |
| | | ### Learnings not persisting |
| | | |
| | | 1. Verify `.learnings/` directory exists |
| | | 2. Check file permissions |
| | | 3. Ensure workspace path is configured correctly |
| | | |
| | | ### Skill not loading |
| | | |
| | | 1. Check skill is in skills directory |
| | | 2. Verify SKILL.md has correct frontmatter |
| | | 3. Run `openclaw status` to see loaded skills |
| New file |
| | |
| | | #!/bin/bash |
| | | # Self-Improvement Activator Hook |
| | | # Triggers on UserPromptSubmit to remind Claude about learning capture |
| | | # Keep output minimal (~50-100 tokens) to minimize overhead |
| | | |
| | | set -e |
| | | |
| | | # Output reminder as system context |
| | | cat << 'EOF' |
| | | <self-improvement-reminder> |
| | | After completing this task, evaluate if extractable knowledge emerged: |
| | | - Non-obvious solution discovered through investigation? |
| | | - Workaround for unexpected behavior? |
| | | - Project-specific pattern learned? |
| | | - Error required debugging to resolve? |
| | | |
| | | If yes: Log to .learnings/ using the self-improvement skill format. |
| | | If high-value (recurring, broadly applicable): Consider skill extraction. |
| | | </self-improvement-reminder> |
| | | EOF |
| New file |
| | |
| | | #!/bin/bash |
| | | # Self-Improvement Error Detector Hook |
| | | # Triggers on PostToolUse for Bash to detect command failures |
| | | # Reads CLAUDE_TOOL_OUTPUT environment variable |
| | | |
| | | set -e |
| | | |
| | | # Check if tool output indicates an error |
| | | # CLAUDE_TOOL_OUTPUT contains the result of the tool execution |
| | | OUTPUT="${CLAUDE_TOOL_OUTPUT:-}" |
| | | |
| | | # Patterns indicating errors (case-insensitive matching) |
| | | ERROR_PATTERNS=( |
| | | "error:" |
| | | "Error:" |
| | | "ERROR:" |
| | | "failed" |
| | | "FAILED" |
| | | "command not found" |
| | | "No such file" |
| | | "Permission denied" |
| | | "fatal:" |
| | | "Exception" |
| | | "Traceback" |
| | | "npm ERR!" |
| | | "ModuleNotFoundError" |
| | | "SyntaxError" |
| | | "TypeError" |
| | | "exit code" |
| | | "non-zero" |
| | | ) |
| | | |
| | | # Check if output contains any error pattern |
| | | contains_error=false |
| | | for pattern in "${ERROR_PATTERNS[@]}"; do |
| | | if [[ "$OUTPUT" == *"$pattern"* ]]; then |
| | | contains_error=true |
| | | break |
| | | fi |
| | | done |
| | | |
| | | # Only output reminder if error detected |
| | | if [ "$contains_error" = true ]; then |
| | | cat << 'EOF' |
| | | <error-detected> |
| | | A command error was detected. Consider logging this to .learnings/ERRORS.md if: |
| | | - The error was unexpected or non-obvious |
| | | - It required investigation to resolve |
| | | - It might recur in similar contexts |
| | | - The solution could benefit future sessions |
| | | |
| | | Use the self-improvement skill format: [ERR-YYYYMMDD-XXX] |
| | | </error-detected> |
| | | EOF |
| | | fi |
| New file |
| | |
| | | #!/bin/bash |
| | | # Skill Extraction Helper |
| | | # Creates a new skill from a learning entry |
| | | # Usage: ./extract-skill.sh <skill-name> [--dry-run] |
| | | |
| | | set -e |
| | | |
| | | # Configuration |
| | | SKILLS_DIR="./skills" |
| | | |
| | | # Colors for output |
| | | RED='\033[0;31m' |
| | | GREEN='\033[0;32m' |
| | | YELLOW='\033[1;33m' |
| | | NC='\033[0m' # No Color |
| | | |
| | | usage() { |
| | | cat << EOF |
| | | Usage: $(basename "$0") <skill-name> [options] |
| | | |
| | | Create a new skill from a learning entry. |
| | | |
| | | Arguments: |
| | | skill-name Name of the skill (lowercase, hyphens for spaces) |
| | | |
| | | Options: |
| | | --dry-run Show what would be created without creating files |
| | | --output-dir Relative output directory under current path (default: ./skills) |
| | | -h, --help Show this help message |
| | | |
| | | Examples: |
| | | $(basename "$0") docker-m1-fixes |
| | | $(basename "$0") api-timeout-patterns --dry-run |
| | | $(basename "$0") pnpm-setup --output-dir ./skills/custom |
| | | |
| | | The skill will be created in: \$SKILLS_DIR/<skill-name>/ |
| | | EOF |
| | | } |
| | | |
| | | log_info() { |
| | | echo -e "${GREEN}[INFO]${NC} $1" |
| | | } |
| | | |
| | | log_warn() { |
| | | echo -e "${YELLOW}[WARN]${NC} $1" |
| | | } |
| | | |
| | | log_error() { |
| | | echo -e "${RED}[ERROR]${NC} $1" >&2 |
| | | } |
| | | |
| | | # Parse arguments |
| | | SKILL_NAME="" |
| | | DRY_RUN=false |
| | | |
| | | while [[ $# -gt 0 ]]; do |
| | | case $1 in |
| | | --dry-run) |
| | | DRY_RUN=true |
| | | shift |
| | | ;; |
| | | --output-dir) |
| | | if [ -z "${2:-}" ] || [[ "${2:-}" == -* ]]; then |
| | | log_error "--output-dir requires a relative path argument" |
| | | usage |
| | | exit 1 |
| | | fi |
| | | SKILLS_DIR="$2" |
| | | shift 2 |
| | | ;; |
| | | -h|--help) |
| | | usage |
| | | exit 0 |
| | | ;; |
| | | -*) |
| | | log_error "Unknown option: $1" |
| | | usage |
| | | exit 1 |
| | | ;; |
| | | *) |
| | | if [ -z "$SKILL_NAME" ]; then |
| | | SKILL_NAME="$1" |
| | | else |
| | | log_error "Unexpected argument: $1" |
| | | usage |
| | | exit 1 |
| | | fi |
| | | shift |
| | | ;; |
| | | esac |
| | | done |
| | | |
| | | # Validate skill name |
| | | if [ -z "$SKILL_NAME" ]; then |
| | | log_error "Skill name is required" |
| | | usage |
| | | exit 1 |
| | | fi |
| | | |
| | | # Validate skill name format (lowercase, hyphens, no spaces) |
| | | if ! [[ "$SKILL_NAME" =~ ^[a-z0-9]+(-[a-z0-9]+)*$ ]]; then |
| | | log_error "Invalid skill name format. Use lowercase letters, numbers, and hyphens only." |
| | | log_error "Examples: 'docker-fixes', 'api-patterns', 'pnpm-setup'" |
| | | exit 1 |
| | | fi |
| | | |
| | | # Validate output path to avoid writes outside current workspace. |
| | | if [[ "$SKILLS_DIR" = /* ]]; then |
| | | log_error "Output directory must be a relative path under the current directory." |
| | | exit 1 |
| | | fi |
| | | |
| | | if [[ "$SKILLS_DIR" =~ (^|/)\.\.(/|$) ]]; then |
| | | log_error "Output directory cannot include '..' path segments." |
| | | exit 1 |
| | | fi |
| | | |
| | | SKILLS_DIR="${SKILLS_DIR#./}" |
| | | SKILLS_DIR="./$SKILLS_DIR" |
| | | |
| | | SKILL_PATH="$SKILLS_DIR/$SKILL_NAME" |
| | | |
| | | # Check if skill already exists |
| | | if [ -d "$SKILL_PATH" ] && [ "$DRY_RUN" = false ]; then |
| | | log_error "Skill already exists: $SKILL_PATH" |
| | | log_error "Use a different name or remove the existing skill first." |
| | | exit 1 |
| | | fi |
| | | |
| | | # Dry run output |
| | | if [ "$DRY_RUN" = true ]; then |
| | | log_info "Dry run - would create:" |
| | | echo " $SKILL_PATH/" |
| | | echo " $SKILL_PATH/SKILL.md" |
| | | echo "" |
| | | echo "Template content would be:" |
| | | echo "---" |
| | | cat << TEMPLATE |
| | | name: $SKILL_NAME |
| | | description: "[TODO: Add a concise description of what this skill does and when to use it]" |
| | | --- |
| | | |
| | | # $(echo "$SKILL_NAME" | sed 's/-/ /g' | awk '{for(i=1;i<=NF;i++) $i=toupper(substr($i,1,1)) tolower(substr($i,2))}1') |
| | | |
| | | [TODO: Brief introduction explaining the skill's purpose] |
| | | |
| | | ## Quick Reference |
| | | |
| | | | Situation | Action | |
| | | |-----------|--------| |
| | | | [Trigger condition] | [What to do] | |
| | | |
| | | ## Usage |
| | | |
| | | [TODO: Detailed usage instructions] |
| | | |
| | | ## Examples |
| | | |
| | | [TODO: Add concrete examples] |
| | | |
| | | ## Source Learning |
| | | |
| | | This skill was extracted from a learning entry. |
| | | - Learning ID: [TODO: Add original learning ID] |
| | | - Original File: .learnings/LEARNINGS.md |
| | | TEMPLATE |
| | | echo "---" |
| | | exit 0 |
| | | fi |
| | | |
| | | # Create skill directory structure |
| | | log_info "Creating skill: $SKILL_NAME" |
| | | |
| | | mkdir -p "$SKILL_PATH" |
| | | |
| | | # Create SKILL.md from template |
| | | cat > "$SKILL_PATH/SKILL.md" << TEMPLATE |
| | | --- |
| | | name: $SKILL_NAME |
| | | description: "[TODO: Add a concise description of what this skill does and when to use it]" |
| | | --- |
| | | |
| | | # $(echo "$SKILL_NAME" | sed 's/-/ /g' | awk '{for(i=1;i<=NF;i++) $i=toupper(substr($i,1,1)) tolower(substr($i,2))}1') |
| | | |
| | | [TODO: Brief introduction explaining the skill's purpose] |
| | | |
| | | ## Quick Reference |
| | | |
| | | | Situation | Action | |
| | | |-----------|--------| |
| | | | [Trigger condition] | [What to do] | |
| | | |
| | | ## Usage |
| | | |
| | | [TODO: Detailed usage instructions] |
| | | |
| | | ## Examples |
| | | |
| | | [TODO: Add concrete examples] |
| | | |
| | | ## Source Learning |
| | | |
| | | This skill was extracted from a learning entry. |
| | | - Learning ID: [TODO: Add original learning ID] |
| | | - Original File: .learnings/LEARNINGS.md |
| | | TEMPLATE |
| | | |
| | | log_info "Created: $SKILL_PATH/SKILL.md" |
| | | |
| | | # Suggest next steps |
| | | echo "" |
| | | log_info "Skill scaffold created successfully!" |
| | | echo "" |
| | | echo "Next steps:" |
| | | echo " 1. Edit $SKILL_PATH/SKILL.md" |
| | | echo " 2. Fill in the TODO sections with content from your learning" |
| | | echo " 3. Add references/ folder if you have detailed documentation" |
| | | echo " 4. Add scripts/ folder if you have executable code" |
| | | echo " 5. Update the original learning entry with:" |
| | | echo " **Status**: promoted_to_skill" |
| | | echo " **Skill-Path**: skills/$SKILL_NAME" |
| New file |
| | |
| | | --- |
| | | name: tavily-search |
| | | description: Tavily AI Search API integration for high-quality search results optimized for LLMs and RAG applications. Use when the user needs to search the web with Tavily's AI-powered search engine, which provides structured results with summaries, sources, and relevance scores. Particularly useful for research, fact-checking, and gathering up-to-date information from the web. |
| | | --- |
| | | |
| | | # Tavily Search |
| | | |
| | | Integration with [Tavily](https://tavily.com) AI Search API - a search engine built specifically for AI applications. |
| | | |
| | | ## Prerequisites |
| | | |
| | | 1. Get a Tavily API key from https://tavily.com |
| | | 2. Configure the key via **environment variable**: |
| | | ```bash |
| | | export TAVILY_API_KEY=your_key_here |
| | | ``` |
| | | Add to `~/.bashrc` or `~/.zshrc` for persistence: |
| | | ```bash |
| | | echo 'export TAVILY_API_KEY=your_key_here' >> ~/.bashrc |
| | | source ~/.bashrc |
| | | ``` |
| | | 3. Or pass `api_key` as a direct parameter when calling the function |
| | | |
| | | ## Usage |
| | | |
| | | ### Quick Search |
| | | |
| | | Use the search script for simple queries: |
| | | |
| | | ```bash |
| | | python ~/.openclaw/workspace/skills/tavily-search/scripts/tavily_search.py "your search query" |
| | | ``` |
| | | |
| | | ### Python API |
| | | |
| | | ```python |
| | | from scripts.tavily_search import tavily_search |
| | | |
| | | results = tavily_search("AI latest news", max_results=5) |
| | | for result in results: |
| | | print(f"Title: {result['title']}") |
| | | print(f"URL: {result['url']}") |
| | | print(f"Content: {result['content']}") |
| | | ``` |
| | | |
| | | ### Advanced Options |
| | | |
| | | The search supports various parameters: |
| | | - `max_results`: Number of results (default: 5) |
| | | - `search_depth`: "basic" or "advanced" |
| | | - `include_images`: Include image URLs |
| | | - `include_answer`: Include AI-generated answer |
| | | |
| | | ## Output Format |
| | | |
| | | Tavily returns structured results with: |
| | | - `query`: The search query |
| | | - `answer`: AI-generated answer (if requested) |
| | | - `results`: List of search results with title, url, content, score, and published_date |
| New file |
| | |
| | | #!/usr/bin/env python3 |
| | | """ |
| | | Tavily AI Search API Client |
| | | Usage: python tavily_search.py "your search query" [--max-results 5] [--depth basic|advanced] |
| | | """ |
| | | |
| | | import os |
| | | import sys |
| | | import json |
| | | import argparse |
| | | from typing import List, Dict, Any, Optional |
| | | |
| | | |
| | | def get_api_key(api_key: Optional[str] = None) -> str: |
| | | """Get Tavily API key from environment variable or parameter.""" |
| | | # Priority: parameter > environment variable |
| | | if api_key: |
| | | return api_key |
| | | |
| | | env_key = os.environ.get("TAVILY_API_KEY") |
| | | if env_key: |
| | | return env_key |
| | | |
| | | raise ValueError( |
| | | "Tavily API key required. Set via:\n" |
| | | " 1. Environment variable: export TAVILY_API_KEY=your_key\n" |
| | | " (Add to ~/.bashrc or ~/.zshrc for persistence)\n" |
| | | " 2. Direct parameter: pass api_key when calling the function\n" |
| | | "\nGet your API key at: https://tavily.com" |
| | | ) |
| | | |
| | | |
| | | def tavily_search( |
| | | query: str, |
| | | max_results: int = 5, |
| | | search_depth: str = "basic", |
| | | include_answer: bool = False, |
| | | include_images: bool = False, |
| | | api_key: Optional[str] = None |
| | | ) -> Dict[str, Any]: |
| | | """ |
| | | Search using Tavily AI Search API. |
| | | |
| | | Args: |
| | | query: Search query string |
| | | max_results: Number of results to return (1-20) |
| | | search_depth: "basic" or "advanced" |
| | | include_answer: Include AI-generated answer |
| | | include_images: Include image URLs |
| | | api_key: Tavily API key (optional, auto-detected from env/config) |
| | | |
| | | Returns: |
| | | Dictionary containing search results |
| | | """ |
| | | api_key = get_api_key(api_key) |
| | | |
| | | try: |
| | | import requests |
| | | except ImportError: |
| | | raise ImportError("requests package required. Install with: pip install requests") |
| | | |
| | | url = "https://api.tavily.com/search" |
| | | |
| | | payload = { |
| | | "api_key": api_key, |
| | | "query": query, |
| | | "max_results": min(max(max_results, 1), 20), |
| | | "search_depth": search_depth, |
| | | "include_answer": include_answer, |
| | | "include_images": include_images, |
| | | } |
| | | |
| | | response = requests.post(url, json=payload, timeout=30) |
| | | response.raise_for_status() |
| | | |
| | | return response.json() |
| | | |
| | | |
| | | def format_results(results: Dict[str, Any]) -> str: |
| | | """Format search results for display.""" |
| | | output = [] |
| | | |
| | | if "answer" in results and results["answer"]: |
| | | output.append("=" * 60) |
| | | output.append("AI ANSWER") |
| | | output.append("=" * 60) |
| | | output.append(results["answer"]) |
| | | output.append("") |
| | | |
| | | output.append("=" * 60) |
| | | output.append("SEARCH RESULTS") |
| | | output.append("=" * 60) |
| | | output.append(f"Query: {results.get('query', 'N/A')}") |
| | | output.append("") |
| | | |
| | | for i, result in enumerate(results.get("results", []), 1): |
| | | output.append(f"{i}. {result.get('title', 'No title')}") |
| | | output.append(f" URL: {result.get('url', 'N/A')}") |
| | | |
| | | if result.get('published_date'): |
| | | output.append(f" Published: {result['published_date']}") |
| | | |
| | | if result.get('score'): |
| | | output.append(f" Relevance: {result['score']:.2f}") |
| | | |
| | | content = result.get('content', '') |
| | | if content: |
| | | # Truncate long content |
| | | if len(content) > 300: |
| | | content = content[:297] + "..." |
| | | output.append(f" {content}") |
| | | |
| | | output.append("") |
| | | |
| | | return "\n".join(output) |
| | | |
| | | |
| | | def main(): |
| | | parser = argparse.ArgumentParser(description="Tavily AI Search") |
| | | parser.add_argument("query", help="Search query") |
| | | parser.add_argument("--max-results", type=int, default=5, help="Number of results (1-20)") |
| | | parser.add_argument("--depth", choices=["basic", "advanced"], default="basic", help="Search depth") |
| | | parser.add_argument("--answer", action="store_true", help="Include AI-generated answer") |
| | | parser.add_argument("--images", action="store_true", help="Include images") |
| | | parser.add_argument("--json", action="store_true", help="Output raw JSON") |
| | | |
| | | args = parser.parse_args() |
| | | |
| | | try: |
| | | results = tavily_search( |
| | | query=args.query, |
| | | max_results=args.max_results, |
| | | search_depth=args.depth, |
| | | include_answer=args.answer, |
| | | include_images=args.images |
| | | ) |
| | | |
| | | if args.json: |
| | | print(json.dumps(results, indent=2, ensure_ascii=False)) |
| | | else: |
| | | print(format_results(results)) |
| | | |
| | | except ValueError as e: |
| | | print(f"Error: {e}", file=sys.stderr) |
| | | sys.exit(1) |
| | | except Exception as e: |
| | | print(f"Search failed: {e}", file=sys.stderr) |
| | | sys.exit(1) |
| | | |
| | | |
| | | if __name__ == "__main__": |
| | | main() |