Chatbot
Build AI-powered chatbots with streaming responses and conversation history.
The starter kit includes a complete chatbot system with streaming responses, conversation history, and a beautiful UI.
Overview
The chatbot uses:
- Vercel AI SDK - For streaming responses and state management
- tRPC - For type-safe chat CRUD operations
- OpenAI - For the LLM backend (configurable)
Streaming Endpoint
The main AI chat logic resides in an API route to support real-time streaming of tokens to the client.
import { openai } from '@ai-sdk/openai';
import { streamText } from 'ai';
import { getSession } from '@/lib/auth/server';
import { prisma } from '@/lib/db';
export async function POST(req: Request) {
const session = await getSession();
if (!session) {
return Response.json({ error: 'Unauthorized' }, { status: 401 });
}
const { messages, chatId, organizationId } = await req.json();
const result = streamText({
model: openai('gpt-4o-mini'),
messages,
async onFinish({ text, usage }) {
// Save assistant's response to the database
if (chatId) {
const updatedMessages = [
...messages,
{ role: 'assistant', content: text }
];
await prisma.aiChat.updateMany({
where: organizationId
? { id: chatId, organizationId }
: { id: chatId, userId: session.user.id, organizationId: null },
data: { messages: JSON.stringify(updatedMessages) }
});
}
}
});
return result.toTextStreamResponse();
}UI Components
Main Chat Component
The AiChat component provides a full conversation interface with a history sidebar.
import { AiChat } from '@/components/ai/ai-chat';
import { getSession } from '@/lib/auth/server';
export default async function AiPage() {
const session = await getSession();
const organizationId = session?.session.activeOrganizationId;
if (!organizationId) {
return <div>No active organization</div>;
}
return <AiChat organizationId={organizationId} />;
}Custom Hook
For more control, you can use the useChat hook directly from the Vercel AI SDK.
'use client';
import { useChat } from '@ai-sdk/react';
export function MyCustomAI() {
const { messages, input, handleInputChange, handleSubmit, isLoading } =
useChat({
api: '/api/ai/chat',
onFinish: (message) => {
// Handle message completion
console.log('Message finished:', message);
}
});
return (
<div>
{messages.map((message) => (
<div key={message.id}>
<strong>{message.role}:</strong> {message.content}
</div>
))}
<form onSubmit={handleSubmit}>
<input
value={input}
onChange={handleInputChange}
placeholder="Type a message..."
disabled={isLoading}
/>
<button
type="submit"
disabled={isLoading}
>
Send
</button>
</form>
</div>
);
}Conversation History
Chats are stored in the database and can be retrieved via tRPC:
import { createTRPCRouter, protectedOrganizationProcedure } from '@/trpc/init';
import { TRPCError } from '@trpc/server';
import { z } from 'zod';
import { prisma } from '@/lib/db';
export const organizationAiRouter = createTRPCRouter({
listChats: protectedOrganizationProcedure
.input(
z
.object({
limit: z.number().min(1).max(100).optional().default(20),
offset: z.number().min(0).optional().default(0)
})
.optional()
)
.query(async ({ ctx, input }) => {
const limit = input?.limit ?? 20;
const offset = input?.offset ?? 0;
// Uses raw SQL for efficient querying without loading full message arrays
const chats = await prisma.$queryRaw<
Array<{
id: string;
title: string | null;
pinned: boolean;
createdAt: Date;
firstMessageContent: string | null;
}>
>`
SELECT
id,
title,
pinned,
created_at as "createdAt",
CASE
WHEN messages IS NOT NULL
AND messages::jsonb != '[]'::jsonb
THEN (messages::jsonb->0->>'content')
ELSE NULL
END as "firstMessageContent"
FROM ai_chat
WHERE organization_id = ${ctx.organization.id}::uuid
ORDER BY pinned DESC, created_at DESC
LIMIT ${limit} OFFSET ${offset}
`;
return { chats };
}),
getChat: protectedOrganizationProcedure
.input(z.object({ id: z.string().uuid() }))
.query(async ({ ctx, input }) => {
const chat = await prisma.aiChat.findFirst({
where: { id: input.id, organizationId: ctx.organization.id }
});
if (!chat) {
throw new TRPCError({
code: 'NOT_FOUND',
message: 'Chat not found'
});
}
return {
chat: {
...chat,
messages: chat.messages ? JSON.parse(chat.messages) : []
}
};
}),
createChat: protectedOrganizationProcedure
.input(z.object({ title: z.string().optional() }).optional())
.mutation(async ({ ctx, input }) => {
const chat = await prisma.aiChat.create({
data: {
organizationId: ctx.organization.id,
title: input?.title || 'New Chat',
messages: JSON.stringify([])
}
});
return { chat };
}),
deleteChat: protectedOrganizationProcedure
.input(z.object({ id: z.string().uuid() }))
.mutation(async ({ input, ctx }) => {
await prisma.aiChat.deleteMany({
where: {
id: input.id,
organizationId: ctx.organization.id
}
});
})
});Tool Calling (Function Calling)
You can easily add tools that the AI can call to perform actions like searching your database or calling external APIs.
import { openai } from '@ai-sdk/openai';
import { streamText } from 'ai';
import { z } from 'zod';
import { prisma } from '@/lib/db';
export async function POST(req: Request) {
const { messages } = await req.json();
const result = streamText({
model: openai('gpt-4o-mini'),
messages,
tools: {
findLeads: {
description: 'Find leads in the database by name',
parameters: z.object({
query: z.string().describe('The search query')
}),
execute: async ({ query }) => {
const leads = await prisma.lead.findMany({
where: {
name: {
contains: query,
mode: 'insensitive'
}
},
take: 10
});
return leads;
}
},
getWeather: {
description: 'Get the current weather for a location',
parameters: z.object({
location: z.string().describe('The city name')
}),
execute: async ({ location }) => {
// Call weather API
const response = await fetch(
`https://api.weather.com/v1/current?location=${location}`
);
return await response.json();
}
}
}
});
return result.toTextStreamResponse();
}Customizing the Model
You can customize which model to use:
import { openai } from '@ai-sdk/openai';
import { streamText } from 'ai';
export async function POST(req: Request) {
const { messages, model } = await req.json();
const result = streamText({
model: openai(model || 'gpt-4o-mini'), // Default to gpt-4o-mini
messages,
temperature: 0.7, // Control randomness
maxTokens: 1000 // Limit response length
});
return result.toTextStreamResponse();
}Error Handling
Handle errors gracefully:
import { openai } from '@ai-sdk/openai';
import { streamText } from 'ai';
export async function POST(req: Request) {
try {
const { messages } = await req.json();
const result = streamText({
model: openai('gpt-4o-mini'),
messages
});
return result.toTextStreamResponse();
} catch (error) {
console.error('AI chat error:', error);
return Response.json(
{ error: 'Failed to process chat request' },
{ status: 500 }
);
}
}Rate Limiting
Implement rate limiting to control costs:
import { rateLimit } from '@/lib/rate-limit';
export async function POST(req: Request) {
const session = await getSession();
if (!session) {
return Response.json({ error: 'Unauthorized' }, { status: 401 });
}
// Check rate limit
const { success } = await rateLimit.limit(session.user.id);
if (!success) {
return Response.json({ error: 'Rate limit exceeded' }, { status: 429 });
}
// Process chat request
// ...
}Best Practices
- Stream responses - Always use streaming for better UX
- Save conversations - Store chat history in the database
- Implement rate limiting - Control API costs
- Handle errors - Provide user-friendly error messages
- Use tools wisely - Add tools for database queries and external APIs
- Monitor usage - Track token usage and costs