@helix-agents/store-cloudflare
Cloudflare storage implementations using D1 and Durable Objects.
Installation
npm install @helix-agents/store-cloudflarecreateCloudflareStore
Factory function for creating stores from bindings.
import { createCloudflareStore } from '@helix-agents/store-cloudflare';
const { stateStore, streamManager } = createCloudflareStore({
db: env.AGENT_DB, // D1Database binding
streams: env.STREAMS, // DurableObjectNamespace binding
});Options
interface CreateCloudflareStoreOptions {
// State store options
stateOptions?: {
logger?: Logger;
};
// Stream manager options
streamOptions?: {
bufferSize?: number;
flushInterval?: number;
logger?: Logger;
};
}D1StateStore
D1-backed state storage.
import { D1StateStore } from '@helix-agents/store-cloudflare';
const stateStore = new D1StateStore({
database: env.AGENT_DB,
logger: console,
});Methods
// Save state (runId is inside state object)
await stateStore.save(state);
// Load state
const state = await stateStore.load(runId);
// Check existence
const exists = await stateStore.exists(runId);
// Update status
await stateStore.updateStatus(runId, 'completed');
// Get messages with pagination
const { messages, hasMore } = await stateStore.getMessages(runId, {
offset: 0,
limit: 50,
});Database Schema
CREATE TABLE agent_state (
run_id TEXT PRIMARY KEY,
agent_type TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'running',
step_count INTEGER NOT NULL DEFAULT 0,
custom_state TEXT NOT NULL DEFAULT '{}',
messages TEXT NOT NULL DEFAULT '[]',
output TEXT,
error TEXT,
stream_id TEXT,
parent_agent_id TEXT,
sub_agents TEXT NOT NULL DEFAULT '[]',
aborted INTEGER DEFAULT 0,
abort_reason TEXT,
created_at TEXT NOT NULL DEFAULT (datetime('now')),
updated_at TEXT NOT NULL DEFAULT (datetime('now'))
);DurableObjectStreamManager
Stream management using Durable Objects.
import { DurableObjectStreamManager } from '@helix-agents/store-cloudflare';
const streamManager = new DurableObjectStreamManager({
streamNamespace: env.STREAMS,
bufferSize: 100, // Chunks to buffer before flush
flushInterval: 100, // Flush interval (ms)
logger: console,
});Methods
// Create writer (implicitly creates stream)
const writer = await streamManager.createWriter(streamId, agentId, agentType);
await writer.write(chunk);
await writer.close();
// Create reader
const reader = await streamManager.createReader(streamId);
for await (const chunk of reader) {
// Process chunk
}
// End stream
await streamManager.endStream(streamId);
await streamManager.endStream(streamId, output); // With final output
// Fail stream
await streamManager.failStream(streamId, 'Error message');
// Get stream info
const info = await streamManager.getInfo(streamId);
// Resumable reader
const reader = await streamManager.createResumableReader(streamId, {
fromSequence: 100,
});Events
interface BufferOverflowEvent {
streamId: string;
droppedCount: number;
bufferSize: number;
}
streamManager.on('bufferOverflow', (event: BufferOverflowEvent) => {
console.warn('Buffer overflow:', event);
});StreamServer
PartyServer-based Durable Object class for stream coordination. Must be exported from your worker.
import { StreamServer } from '@helix-agents/store-cloudflare';
// In worker.ts - re-export the class
export { StreamServer };Custom Logger
Create a custom class with a logger:
import { StreamServer } from '@helix-agents/store-cloudflare';
const CustomStreamServer = StreamServer.withLogger(myLogger);
export { CustomStreamServer as StreamServer };Migrations
D1 schema migrations.
import {
runMigration,
isMigrated,
dropAllTables,
getAgentsMigrationVersion,
getAgentsTableNames,
SCHEMA_MIGRATION_V1,
CURRENT_SCHEMA_VERSION,
TABLE_NAMES,
} from '@helix-agents/store-cloudflare';
// Run migration
await runMigration(env.AGENT_DB);
// Check if migrated
const migrated = await isMigrated(env.AGENT_DB);
// Get current version
const version = await getAgentsMigrationVersion(env.AGENT_DB);
// Drop all tables (for testing)
await dropAllTables(env.AGENT_DB);Migration SQL
// Get the SQL for manual migration
console.log(SCHEMA_MIGRATION_V1);Errors
import {
D1StateError, // Base D1 error
StateNotFoundError, // State not found
SubAgentRefNotFoundError,
StreamConnectionError, // DO connection error
StreamNotFoundError, // Stream not found
SequenceConflictError, // Sequence mismatch
} from '@helix-agents/store-cloudflare';
try {
const state = await stateStore.load(runId);
} catch (error) {
if (error instanceof StateNotFoundError) {
// Handle not found
}
}Complete Example
wrangler.toml
name = "my-agent-worker"
main = "src/worker.ts"
compatibility_date = "2024-12-01"
compatibility_flags = ["nodejs_compat"]
[[d1_databases]]
binding = "AGENT_DB"
database_name = "my-agents-db"
database_id = "xxx-xxx-xxx"
[[durable_objects.bindings]]
name = "STREAMS"
class_name = "StreamServer"
[[migrations]]
tag = "v1"
new_sqlite_classes = ["StreamServer"]worker.ts
import {
createCloudflareStore,
StreamServer,
runMigration,
} from '@helix-agents/store-cloudflare';
// Re-export Durable Object
export { StreamServer };
interface Env {
AGENT_DB: D1Database;
STREAMS: DurableObjectNamespace;
}
export default {
async fetch(request: Request, env: Env) {
// Run migration on first request (or use scheduled task)
await runMigration(env.AGENT_DB);
// Create stores
const { stateStore, streamManager } = createCloudflareStore({
db: env.AGENT_DB,
streams: env.STREAMS,
});
// Use stores...
const state = await stateStore.load('run-123');
return Response.json({ state });
},
};Migration Script
# Create database
npx wrangler d1 create my-agents-db
# Run migration (creates tables)
npx wrangler d1 execute my-agents-db --file=./migrations/0001_init.sqlD1UsageStore
D1-backed usage tracking storage. Track LLM tokens, tool executions, sub-agent calls, and custom metrics in Cloudflare Workers.
import { D1UsageStore } from '@helix-agents/store-cloudflare';
const usageStore = new D1UsageStore({
database: env.AGENT_DB,
tableName: 'usage_entries', // Optional, default: 'usage_entries'
});Basic Usage
Pass to executor to enable usage tracking:
const handle = await executor.execute(agent, 'Do the task', { usageStore });
await handle.result();
// Get aggregated usage
const rollup = await handle.getUsageRollup();
console.log(`Total tokens: ${rollup?.tokens.total}`);Methods
recordEntry
Record a usage entry (called internally by the framework).
await usageStore.recordEntry({
kind: 'tokens',
runId: 'run-123',
stepCount: 1,
timestamp: Date.now(),
source: { type: 'agent', name: 'my-agent' },
model: 'gpt-4o',
tokens: { prompt: 100, completion: 50, total: 150 },
});getEntries
Get usage entries for a run with optional filtering.
// All entries
const entries = await usageStore.getEntries('run-123');
// Filter by kind
const tokenEntries = await usageStore.getEntries('run-123', {
kinds: ['tokens'],
});
// Filter by step range
const midRunEntries = await usageStore.getEntries('run-123', {
stepRange: { min: 5, max: 10 },
});
// Pagination
const page = await usageStore.getEntries('run-123', {
limit: 10,
offset: 20,
});getRollup
Get aggregated usage rollup.
// This agent's usage only
const rollup = await usageStore.getRollup('run-123');
// Include sub-agent usage (lazy aggregation)
const totalRollup = await usageStore.getRollup('run-123', {
includeSubAgents: true,
});exists
Check if usage data exists for a run.
const hasUsage = await usageStore.exists('run-123');delete
Delete usage data for a run.
await usageStore.delete('run-123');getEntryCount
Get entry count without fetching entries.
const count = await usageStore.getEntryCount('run-123');findRunIds
Find all tracked run IDs with optional filters.
// All run IDs
const runIds = await usageStore.findRunIds();
// With filters
const filteredRunIds = await usageStore.findRunIds({
agentType: 'researcher',
limit: 100,
});deleteOldEntries
Delete entries older than a specified age.
// Delete entries older than 7 days
await usageStore.deleteOldEntries(7 * 24 * 60 * 60 * 1000);Database Schema
The D1 migration creates this table:
CREATE TABLE usage_entries (
id TEXT PRIMARY KEY,
run_id TEXT NOT NULL,
kind TEXT NOT NULL,
step_count INTEGER NOT NULL,
timestamp INTEGER NOT NULL,
source_type TEXT NOT NULL,
source_name TEXT NOT NULL,
data TEXT NOT NULL,
created_at TEXT NOT NULL DEFAULT (datetime('now'))
);
CREATE INDEX idx_usage_entries_run_id ON usage_entries(run_id);
CREATE INDEX idx_usage_entries_kind ON usage_entries(kind);
CREATE INDEX idx_usage_entries_timestamp ON usage_entries(timestamp);Migration
Run migration to create the usage table:
import { runMigration } from '@helix-agents/store-cloudflare';
// Run on worker startup or via scheduled task
await runMigration(env.AGENT_DB);Complete Example
import {
createCloudflareStore,
D1UsageStore,
runMigration,
} from '@helix-agents/store-cloudflare';
interface Env {
AGENT_DB: D1Database;
STREAMS: DurableObjectNamespace;
}
export default {
async fetch(request: Request, env: Env) {
await runMigration(env.AGENT_DB);
const { stateStore, streamManager } = createCloudflareStore({
db: env.AGENT_DB,
streams: env.STREAMS,
});
const usageStore = new D1UsageStore({ database: env.AGENT_DB });
// Execute agent with usage tracking
const handle = await executor.execute(agent, 'Task', { usageStore });
await handle.result();
// Get usage data
const rollup = await handle.getUsageRollup();
return Response.json({
tokens: rollup?.tokens.total,
toolCalls: rollup?.toolStats.totalCalls,
});
},
// Scheduled cleanup
async scheduled(event: ScheduledEvent, env: Env) {
const usageStore = new D1UsageStore({ database: env.AGENT_DB });
// Delete entries older than 30 days
await usageStore.deleteOldEntries(30 * 24 * 60 * 60 * 1000);
},
};Re-exported Types
For convenience, core types are re-exported:
import type {
StateStore,
StreamManager,
StreamWriter,
StreamReader,
StreamChunk,
ResumableStreamReader,
ResumableReaderOptions,
StreamInfo,
ResumableStreamStatus,
AgentState,
Message,
AgentStatus,
MergeChanges,
} from '@helix-agents/store-cloudflare';