import { CopilotRuntime, copilotRuntimeNextJSAppRouterEndpoint, LangChainAdapter, OpenAIAdapter, langGraphPlatformEndpoint } from "@copilotkit/runtime"; import { NextRequest } from "next/server"; import { ChatOpenAI } from "@langchain/openai"; import config from "../../config"; // using langchain to create a chat model using the ollama with OpenAI API compaitibility const lc_ollamaModel = new ChatOpenAI({ model: "qwen3:14b", // the local model to use configuration: { baseURL: "http://localhost:11434/v1", // the ollama proxy URL }, }); const copilotKitModel = lc_ollamaModel; const langchainServiceAdapter = new LangChainAdapter({ chainFn: async ({ messages, tools }) => { console.log("tools", tools); console.log("messages", messages); return copilotKitModel.bindTools(tools).stream(messages); } }); const runtime = new CopilotRuntime({ remoteEndpoints: [ langGraphPlatformEndpoint({ deploymentUrl: config.agent.url, langsmithApiKey: process.env.LANGSMITH_API_KEY || "", // only used in LangGraph Platform (cloud) deployments agents: [{ name: config.agent.name, description: config.agent.description }] }) ] }); export const POST = async (req: NextRequest) => { const { handleRequest } = copilotRuntimeNextJSAppRouterEndpoint({ runtime, serviceAdapter: langchainServiceAdapter, endpoint: "/api/copilotkit", }); return handleRequest(req); };