Create a simple command-line interface for interacting with your agent:
// src/example/cli.ts
import express from "express";
import { AgentFramework } from "../framework";
import { standardMiddleware } from "../middleware";
import { Character, InputSource, InputType } from "../types";
import { BaseAgent } from "../agent";
import readline from "readline";
// Define your agent
const assistant: Character = {
name: "Assistant",
agentId: "cli_assistant",
system: "You are a helpful CLI assistant.",
bio: ["A command-line AI assistant"],
lore: ["Created to help users through the terminal"],
messageExamples: [
[
{ user: "user1", content: { text: "Hello!" } },
{ user: "Assistant", content: { text: "Hi! How can I help?" } }
]
],
postExamples: [],
topics: ["general help", "cli", "terminal"],
style: {
all: ["helpful", "concise"],
chat: ["friendly"],
post: ["clear"]
},
adjectives: ["helpful", "efficient"],
routes: []
};
// Initialize framework
const app = express();
app.use(express.json());
const framework = new AgentFramework();
standardMiddleware.forEach((middleware) => framework.use(middleware));
// Create agent instance
const agent = new BaseAgent(assistant);
// Add conversation route
agent.addRoute({
name: "conversation",
description: "Handle natural conversation",
handler: async (context, req, res) => {
const response = await llmUtils.getTextFromLLM(
context,
"anthropic/claude-3-sonnet"
);
await res.send(response);
}
});
// Set up CLI interface
async function startCLI() {
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout
});
console.log("\nCLI Assistant");
console.log("=============");
async function prompt() {
rl.question("\nYou: ", async (text) => {
try {
const response = await framework.process({
source: InputSource.NETWORK,
userId: "cli_user",
agentId: agent.getAgentId(),
roomId: "cli_session",
type: InputType.TEXT,
text: text
}, agent);
console.log("\nAssistant:", response);
prompt();
} catch (error) {
console.error("\nError:", error);
prompt();
}
});
}
prompt();
}
// Start server and CLI
const PORT = process.env.PORT || 3000;
app.listen(PORT, () => {
console.log(`Server running on http://localhost:${PORT}`);
startCLI();
});
Create a Twitter bot that posts regularly and responds to mentions:
// src/example/twitter-bot.ts
import { TwitterClient } from "@liz/twitter-client";
import { Character } from "../types";
import { BaseAgent } from "../agent";
// Define Twitter bot character
const twitterBot: Character = {
name: "TechNews",
agentId: "tech_news_bot",
system: "You are a tech news curator sharing insights about AI and technology.",
bio: ["AI-powered tech news curator"],
lore: ["Passionate about sharing tech insights"],
messageExamples: [
[
{ user: "user1", content: { text: "What's new in AI?" } },
{ user: "TechNews", content: { text: "Here are the latest developments..." } }
]
],
postExamples: [
"🚀 Breaking: New developments in quantum computing...",
"💡 AI Insight of the day: Understanding large language models..."
],
topics: ["AI", "technology", "programming", "tech news"],
style: {
all: ["informative", "engaging"],
chat: ["helpful", "knowledgeable"],
post: ["concise", "engaging"]
},
adjectives: ["tech-savvy", "insightful"],
routes: []
};
// Create agent
const agent = new BaseAgent(twitterBot);
// Add tweet generation route
agent.addRoute({
name: "create_new_tweet",
description: "Generate a new tweet about tech news",
handler: async (context, req, res) => {
const tweet = await llmUtils.getTextFromLLM(
context,
"anthropic/claude-3-sonnet"
);
await res.send(tweet);
}
});
// Configure Twitter client
const config = {
username: process.env.TWITTER_USERNAME,
password: process.env.TWITTER_PASSWORD,
email: process.env.TWITTER_EMAIL,
twoFactorSecret: process.env.TWITTER_2FA_SECRET,
retryLimit: 3,
postIntervalHours: 4,
pollingInterval: 5,
dryRun: process.env.NODE_ENV !== "production"
};
// Start Twitter bot
async function startBot() {
const twitter = new TwitterClient(agent, config);
await twitter.start();
console.log("Twitter bot started!");
}
startBot().catch(console.error);
Create an agent that uses conversation history for context:
// src/example/memory-agent.ts
import { AgentFramework } from "../framework";
import { standardMiddleware } from "../middleware";
import { Character, InputSource, InputType } from "../types";
import { BaseAgent } from "../agent";
import { prisma } from "../utils/db";
// Define memory-aware agent
const memoryAgent: Character = {
name: "Mentor",
agentId: "mentor_agent",
system: "You are a mentor who remembers past conversations to provide personalized guidance.",
bio: ["An AI mentor with perfect memory"],
lore: ["Uses conversation history to give contextual advice"],
messageExamples: [],
postExamples: [],
topics: ["mentoring", "personal growth"],
style: {
all: ["personalized", "thoughtful"],
chat: ["empathetic"],
post: ["reflective"]
},
adjectives: ["understanding", "wise"],
routes: []
};
const agent = new BaseAgent(memoryAgent);
// Add conversation route with memory context
agent.addRoute({
name: "conversation",
description: "Handle conversation with memory context",
handler: async (context, req, res) => {
// Get recent memories for this user
const memories = await prisma.memory.findMany({
where: {
userId: req.input.userId,
agentId: req.input.agentId
},
orderBy: {
createdAt: "desc"
},
take: 10
});
// Format memories for context
const memoryContext = memories
.map((m) => {
const content = JSON.parse(m.content);
return `[${m.createdAt}] ${content.text}`;
})
.join("\n");
// Add memory context to prompt
const promptWithMemory = `
Previous interactions:
${memoryContext}
Current conversation:
${context}`;
const response = await llmUtils.getTextFromLLM(
promptWithMemory,
"anthropic/claude-3-sonnet"
);
// Store response in memory
await prisma.memory.create({
data: {
userId: req.input.userId,
agentId: req.input.agentId,
roomId: req.input.roomId,
type: "response",
generator: "llm",
content: JSON.stringify({ text: response })
}
});
await res.send(response);
}
});
// Initialize framework
const framework = new AgentFramework();
standardMiddleware.forEach((middleware) => framework.use(middleware));
// Example usage
async function chat(text: string) {
return framework.process({
source: InputSource.NETWORK,
userId: "example_user",
agentId: agent.getAgentId(),
roomId: "example_room",
type: InputType.TEXT,
text
}, agent);
}
Create custom middleware for specialized processing:
// src/middleware/sentiment-analysis.ts
import { AgentMiddleware } from "../types";
import { LLMUtils } from "../utils/llm";
const sentimentSchema = z.object({
sentiment: z.enum(["positive", "negative", "neutral"]),
confidence: z.number(),
explanation: z.string()
});
export const analyzeSentiment: AgentMiddleware = async (req, res, next) => {
const llmUtils = new LLMUtils();
try {
const analysis = await llmUtils.getObjectFromLLM(
`Analyze the sentiment of this text: "${req.input.text}"`,
sentimentSchema,
LLMSize.SMALL
);
// Add sentiment to request context
req.sentiment = analysis;
await next();
} catch (error) {
await res.error(
new Error(`Failed to analyze sentiment: ${error.message}`)
);
}
};
// Usage in framework
const framework = new AgentFramework();
framework.use(validateInput);
framework.use(analyzeSentiment); // Add sentiment analysis
framework.use(loadMemories);
framework.use(wrapContext);
framework.use(router);