fix(memory): support live lancedb hook enablement

This commit is contained in:
Vincent Koc
2026-04-22 23:05:48 -07:00
parent 4e259b0461
commit 3dc3bf65d2
3 changed files with 409 additions and 113 deletions

View File

@@ -103,6 +103,7 @@ Docs: https://docs.openclaw.ai
- Diffs/viewer: re-read remote viewer access policy from live runtime config on each request, so toggling `plugins.entries.diffs.config.security.allowRemoteViewer` closes proxied viewer access immediately instead of waiting for a restart. Thanks @vincentkoc.
- Diffs/tooling: re-read `viewerBaseUrl`, presentation defaults, and viewer access policy from live runtime config, and fail closed when the live `diffs` plugin entry disappears instead of reviving startup viewer settings. Thanks @vincentkoc.
- Memory/LanceDB: stop resurrecting removed live `memory-lancedb` hook config from startup snapshots, so deleting or disabling the plugin entry shuts off auto-recall and auto-capture without a restart. Thanks @vincentkoc.
- Memory/LanceDB: keep auto-recall and auto-capture hooks wired when those settings start disabled, so turning them on in live config starts recall and capture without waiting for a restart. Thanks @vincentkoc.
- Skill Workshop: keep the tool plus `before_prompt_build` / `agent_end` hooks wired while the plugin is disabled at startup, so turning the plugin back on in live config starts guidance and capture without waiting for a restart. Thanks @vincentkoc.
- Active Memory: stop reviving removed live `active-memory` config from startup snapshots, so removing the plugin entry turns the hook off immediately instead of waiting for a restart. Thanks @vincentkoc.
- Agents/subagents: drop bare `NO_REPLY` from the parent turn when the session still has pending spawned children, so direct-conversation surfaces such as Telegram DMs no longer rewrite the sentinel into visible fallback chatter while waiting for the child completion event. (#69942) Thanks @neeravmakwana.

View File

@@ -205,7 +205,7 @@ describe("memory plugin e2e", () => {
expect(on).not.toHaveBeenCalledWith("before_agent_start", expect.any(Function));
});
test("does not register before_prompt_build when auto-recall is disabled", async () => {
test("keeps before_prompt_build registered but inert when auto-recall is disabled", async () => {
const on = vi.fn();
const mockApi = {
id: "memory-lancedb",
@@ -237,11 +237,17 @@ describe("memory plugin e2e", () => {
memoryPlugin.register(mockApi as any);
expect(on).not.toHaveBeenCalledWith("before_prompt_build", expect.any(Function));
const beforePromptBuild = on.mock.calls.find(
([hookName]) => hookName === "before_prompt_build",
)?.[1];
expect(beforePromptBuild).toBeTypeOf("function");
await expect(
beforePromptBuild?.({ prompt: "what editor should i use?", messages: [] }, {}),
).resolves.toBeUndefined();
expect(on).toHaveBeenCalledWith("agent_end", expect.any(Function));
});
test("does not register agent_end when auto-capture is disabled", async () => {
test("keeps agent_end registered but inert when auto-capture is disabled", async () => {
const on = vi.fn();
const mockApi = {
id: "memory-lancedb",
@@ -274,7 +280,17 @@ describe("memory plugin e2e", () => {
memoryPlugin.register(mockApi as any);
expect(on).toHaveBeenCalledWith("before_prompt_build", expect.any(Function));
expect(on).not.toHaveBeenCalledWith("agent_end", expect.any(Function));
const agentEnd = on.mock.calls.find(([hookName]) => hookName === "agent_end")?.[1];
expect(agentEnd).toBeTypeOf("function");
await expect(
agentEnd?.(
{
success: true,
messages: [{ role: "user", content: "I prefer Helix for editing code every day." }],
},
{},
),
).resolves.toBeUndefined();
});
test("runs auto-recall through the registered before_prompt_build hook", async () => {
@@ -388,6 +404,150 @@ describe("memory plugin e2e", () => {
}
});
test("uses live runtime config to enable auto-recall after startup disable", async () => {
const embeddingsCreate = vi.fn(async () => ({
data: [{ embedding: [0.1, 0.2, 0.3] }],
}));
const ensureGlobalUndiciEnvProxyDispatcher = vi.fn();
const toArray = vi.fn(async () => [
{
id: "memory-1",
text: "I prefer Helix for editing code.",
vector: [0.1, 0.2, 0.3],
importance: 0.8,
category: "preference",
createdAt: 1,
_distance: 0.1,
},
]);
const limit = vi.fn(() => ({ toArray }));
const vectorSearch = vi.fn(() => ({ limit }));
const openTable = vi.fn(async () => ({
vectorSearch,
countRows: vi.fn(async () => 0),
add: vi.fn(async () => undefined),
delete: vi.fn(async () => undefined),
}));
const loadLanceDbModule = vi.fn(async () => ({
connect: vi.fn(async () => ({
tableNames: vi.fn(async () => ["memories"]),
openTable,
})),
}));
let configFile: Record<string, unknown> = {
plugins: {
entries: {
"memory-lancedb": {
config: {
embedding: {
apiKey: OPENAI_API_KEY,
model: "text-embedding-3-small",
},
dbPath: getDbPath(),
autoCapture: false,
autoRecall: false,
},
},
},
},
};
vi.resetModules();
vi.doMock("openclaw/plugin-sdk/runtime-env", () => ({
ensureGlobalUndiciEnvProxyDispatcher,
}));
vi.doMock("openai", () => ({
default: class MockOpenAI {
embeddings = { create: embeddingsCreate };
},
}));
vi.doMock("./lancedb-runtime.js", () => ({
loadLanceDbModule,
}));
try {
const { default: dynamicMemoryPlugin } = await import("./index.js");
const on = vi.fn();
const logger = {
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
};
const mockApi = {
id: "memory-lancedb",
name: "Memory (LanceDB)",
source: "test",
config: {},
pluginConfig: {
embedding: {
apiKey: OPENAI_API_KEY,
model: "text-embedding-3-small",
},
dbPath: getDbPath(),
autoCapture: false,
autoRecall: false,
},
runtime: {
config: {
loadConfig: () => configFile,
},
},
logger,
registerTool: vi.fn(),
registerCli: vi.fn(),
registerService: vi.fn(),
on,
resolvePath: (p: string) => p,
};
dynamicMemoryPlugin.register(mockApi as any);
configFile = {
plugins: {
entries: {
"memory-lancedb": {
config: {
embedding: {
apiKey: OPENAI_API_KEY,
model: "text-embedding-3-small",
},
dbPath: getDbPath(),
autoCapture: false,
autoRecall: true,
},
},
},
},
};
const beforePromptBuild = on.mock.calls.find(
([hookName]) => hookName === "before_prompt_build",
)?.[1];
expect(beforePromptBuild).toBeTypeOf("function");
const result = await beforePromptBuild?.(
{ prompt: "what editor should i use?", messages: [] },
{},
);
expect(loadLanceDbModule).toHaveBeenCalledTimes(1);
expect(embeddingsCreate).toHaveBeenCalledWith({
model: "text-embedding-3-small",
input: "what editor should i use?",
});
expect(result).toMatchObject({
prependContext: expect.stringContaining("I prefer Helix for editing code."),
});
expect(logger.info).toHaveBeenCalledWith("memory-lancedb: injecting 1 memories into context");
} finally {
vi.doUnmock("openclaw/plugin-sdk/runtime-env");
vi.doUnmock("openai");
vi.doUnmock("./lancedb-runtime.js");
vi.resetModules();
}
});
test("uses live runtime config to skip auto-recall after registration", async () => {
const embeddingsCreate = vi.fn(async () => ({
data: [{ embedding: [0.1, 0.2, 0.3] }],
@@ -730,6 +890,145 @@ describe("memory plugin e2e", () => {
}
});
test("uses live runtime config to enable auto-capture after startup disable", async () => {
const embeddingsCreate = vi.fn(async () => ({
data: [{ embedding: [0.1, 0.2, 0.3] }],
}));
const ensureGlobalUndiciEnvProxyDispatcher = vi.fn();
const add = vi.fn(async () => undefined);
const toArray = vi.fn(async () => []);
const limit = vi.fn(() => ({ toArray }));
const vectorSearch = vi.fn(() => ({ limit }));
const openTable = vi.fn(async () => ({
vectorSearch,
countRows: vi.fn(async () => 0),
add,
delete: vi.fn(async () => undefined),
}));
const loadLanceDbModule = vi.fn(async () => ({
connect: vi.fn(async () => ({
tableNames: vi.fn(async () => ["memories"]),
openTable,
})),
}));
let configFile: Record<string, unknown> = {
plugins: {
entries: {
"memory-lancedb": {
config: {
embedding: {
apiKey: OPENAI_API_KEY,
model: "text-embedding-3-small",
},
dbPath: getDbPath(),
autoCapture: false,
autoRecall: false,
},
},
},
},
};
vi.resetModules();
vi.doMock("openclaw/plugin-sdk/runtime-env", () => ({
ensureGlobalUndiciEnvProxyDispatcher,
}));
vi.doMock("openai", () => ({
default: class MockOpenAI {
embeddings = { create: embeddingsCreate };
},
}));
vi.doMock("./lancedb-runtime.js", () => ({
loadLanceDbModule,
}));
try {
const { default: dynamicMemoryPlugin } = await import("./index.js");
const on = vi.fn();
const mockApi = {
id: "memory-lancedb",
name: "Memory (LanceDB)",
source: "test",
config: {},
pluginConfig: {
embedding: {
apiKey: OPENAI_API_KEY,
model: "text-embedding-3-small",
},
dbPath: getDbPath(),
autoCapture: false,
autoRecall: false,
},
runtime: {
config: {
loadConfig: () => configFile,
},
},
logger: {
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
},
registerTool: vi.fn(),
registerCli: vi.fn(),
registerService: vi.fn(),
on,
resolvePath: (p: string) => p,
};
dynamicMemoryPlugin.register(mockApi as any);
configFile = {
plugins: {
entries: {
"memory-lancedb": {
config: {
embedding: {
apiKey: OPENAI_API_KEY,
model: "text-embedding-3-small",
},
dbPath: getDbPath(),
autoCapture: true,
autoRecall: false,
},
},
},
},
};
const agentEnd = on.mock.calls.find(([hookName]) => hookName === "agent_end")?.[1];
expect(agentEnd).toBeTypeOf("function");
await agentEnd?.(
{
success: true,
messages: [{ role: "user", content: "I prefer Helix for editing code every day." }],
},
{},
);
expect(loadLanceDbModule).toHaveBeenCalledTimes(1);
expect(embeddingsCreate).toHaveBeenCalledWith({
model: "text-embedding-3-small",
input: "I prefer Helix for editing code every day.",
});
expect(add).toHaveBeenCalledWith([
expect.objectContaining({
text: "I prefer Helix for editing code every day.",
vector: [0.1, 0.2, 0.3],
importance: 0.7,
category: "preference",
}),
]);
} finally {
vi.doUnmock("openclaw/plugin-sdk/runtime-env");
vi.doUnmock("openai");
vi.doUnmock("./lancedb-runtime.js");
vi.resetModules();
}
});
test("uses live runtime config to skip auto-capture after registration", async () => {
const embeddingsCreate = vi.fn(async () => ({
data: [{ embedding: [0.1, 0.2, 0.3] }],

View File

@@ -579,127 +579,123 @@ export default definePluginEntry({
// ========================================================================
// Auto-recall: inject relevant memories during prompt build
if (cfg.autoRecall) {
api.on("before_prompt_build", async (event) => {
const currentCfg = resolveCurrentHookConfig();
if (!currentCfg.autoRecall) {
return undefined;
}
if (!event.prompt || event.prompt.length < 5) {
return undefined;
}
try {
const vector = await embeddings.embed(event.prompt);
const results = await db.search(vector, 3, 0.3);
if (results.length === 0) {
return undefined;
}
api.logger.info?.(`memory-lancedb: injecting ${results.length} memories into context`);
return {
prependContext: formatRelevantMemoriesContext(
results.map((r) => ({ category: r.entry.category, text: r.entry.text })),
),
};
} catch (err) {
api.logger.warn(`memory-lancedb: recall failed: ${String(err)}`);
}
api.on("before_prompt_build", async (event) => {
const currentCfg = resolveCurrentHookConfig();
if (!currentCfg.autoRecall) {
return undefined;
});
}
}
if (!event.prompt || event.prompt.length < 5) {
return undefined;
}
try {
const vector = await embeddings.embed(event.prompt);
const results = await db.search(vector, 3, 0.3);
if (results.length === 0) {
return undefined;
}
api.logger.info?.(`memory-lancedb: injecting ${results.length} memories into context`);
return {
prependContext: formatRelevantMemoriesContext(
results.map((r) => ({ category: r.entry.category, text: r.entry.text })),
),
};
} catch (err) {
api.logger.warn(`memory-lancedb: recall failed: ${String(err)}`);
}
return undefined;
});
// Auto-capture: analyze and store important information after agent ends
if (cfg.autoCapture) {
api.on("agent_end", async (event) => {
const currentCfg = resolveCurrentHookConfig();
if (!currentCfg.autoCapture) {
return;
}
if (!event.success || !event.messages || event.messages.length === 0) {
return;
}
api.on("agent_end", async (event) => {
const currentCfg = resolveCurrentHookConfig();
if (!currentCfg.autoCapture) {
return;
}
if (!event.success || !event.messages || event.messages.length === 0) {
return;
}
try {
// Extract text content from messages (handling unknown[] type)
const texts: string[] = [];
for (const msg of event.messages) {
// Type guard for message object
if (!msg || typeof msg !== "object") {
continue;
}
const msgObj = msg as Record<string, unknown>;
try {
// Extract text content from messages (handling unknown[] type)
const texts: string[] = [];
for (const msg of event.messages) {
// Type guard for message object
if (!msg || typeof msg !== "object") {
continue;
}
const msgObj = msg as Record<string, unknown>;
// Only process user messages to avoid self-poisoning from model output
const role = msgObj.role;
if (role !== "user") {
continue;
}
// Only process user messages to avoid self-poisoning from model output
const role = msgObj.role;
if (role !== "user") {
continue;
}
const content = msgObj.content;
const content = msgObj.content;
// Handle string content directly
if (typeof content === "string") {
texts.push(content);
continue;
}
// Handle string content directly
if (typeof content === "string") {
texts.push(content);
continue;
}
// Handle array content (content blocks)
if (Array.isArray(content)) {
for (const block of content) {
if (
block &&
typeof block === "object" &&
"type" in block &&
(block as Record<string, unknown>).type === "text" &&
"text" in block &&
typeof (block as Record<string, unknown>).text === "string"
) {
texts.push((block as Record<string, unknown>).text as string);
}
// Handle array content (content blocks)
if (Array.isArray(content)) {
for (const block of content) {
if (
block &&
typeof block === "object" &&
"type" in block &&
(block as Record<string, unknown>).type === "text" &&
"text" in block &&
typeof (block as Record<string, unknown>).text === "string"
) {
texts.push((block as Record<string, unknown>).text as string);
}
}
}
// Filter for capturable content
const toCapture = texts.filter(
(text) => text && shouldCapture(text, { maxChars: currentCfg.captureMaxChars }),
);
if (toCapture.length === 0) {
return;
}
// Store each capturable piece (limit to 3 per conversation)
let stored = 0;
for (const text of toCapture.slice(0, 3)) {
const category = detectCategory(text);
const vector = await embeddings.embed(text);
// Check for duplicates (high similarity threshold)
const existing = await db.search(vector, 1, 0.95);
if (existing.length > 0) {
continue;
}
await db.store({
text,
vector,
importance: 0.7,
category,
});
stored++;
}
if (stored > 0) {
api.logger.info(`memory-lancedb: auto-captured ${stored} memories`);
}
} catch (err) {
api.logger.warn(`memory-lancedb: capture failed: ${String(err)}`);
}
});
}
// Filter for capturable content
const toCapture = texts.filter(
(text) => text && shouldCapture(text, { maxChars: currentCfg.captureMaxChars }),
);
if (toCapture.length === 0) {
return;
}
// Store each capturable piece (limit to 3 per conversation)
let stored = 0;
for (const text of toCapture.slice(0, 3)) {
const category = detectCategory(text);
const vector = await embeddings.embed(text);
// Check for duplicates (high similarity threshold)
const existing = await db.search(vector, 1, 0.95);
if (existing.length > 0) {
continue;
}
await db.store({
text,
vector,
importance: 0.7,
category,
});
stored++;
}
if (stored > 0) {
api.logger.info(`memory-lancedb: auto-captured ${stored} memories`);
}
} catch (err) {
api.logger.warn(`memory-lancedb: capture failed: ${String(err)}`);
}
});
// ========================================================================
// Service