Created
August 5, 2024 12:50
-
-
Save x51xxx/4d61e8c675681d165f012a7231d06976 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| // Example of using tools with Ollama and Langchain, | |
| // rewrite the original example of https://github.com/ollama/ollama-js/blob/main/examples/tools/tools.ts | |
| // | |
| /** | |
| * Ollama now supports tool calling with popular models such as Llama 3.1, Mistral-Nemo, etc.(https://ollama.com/search?c=tools) | |
| * This enables a model to answer a given prompt using tool(s) it knows about, | |
| * making it possible for models to perform more complex tasks or interact with the outside world. | |
| * | |
| * Example tools include: | |
| * - Functions and APIs | |
| * - Web browsing | |
| * - Code interpreter | |
| * - much more! | |
| */ | |
| import {z} from 'zod'; | |
| import {ChatOllama} from '@langchain/ollama'; | |
| import {HumanMessage, ToolMessage} from '@langchain/core/messages'; | |
| import {BaseMessage} from '@langchain/core/dist/messages/base'; | |
| import {DynamicStructuredTool, tool} from "@langchain/core/tools"; | |
| // Simulates an API call to get flight times | |
| // In a real application, this would fetch data from a live database or API | |
| function getFlightTimes(departure: string, arrival: string) { | |
| console.warn('getFlightTimes', departure, arrival); | |
| const flights: { [key: string]: { departure: string; arrival: string; duration: string } } = { | |
| 'NYC-LAX': {departure: '08:00 AM', arrival: '11:30 AM', duration: '5h 30m'}, | |
| 'LAX-NYC': {departure: '02:00 PM', arrival: '10:30 PM', duration: '5h 30m'}, | |
| 'LHR-JFK': {departure: '10:00 AM', arrival: '01:00 PM', duration: '8h 00m'}, | |
| 'JFK-LHR': {departure: '09:00 PM', arrival: '09:00 AM', duration: '7h 00m'}, | |
| 'CDG-DXB': {departure: '11:00 AM', arrival: '08:00 PM', duration: '6h 00m'}, | |
| 'DXB-CDG': {departure: '03:00 AM', arrival: '07:30 AM', duration: '7h 30m'} | |
| }; | |
| const key = `${departure}-${arrival}`.toUpperCase(); | |
| return JSON.stringify(flights[key] || {error: 'Flight not found'}); | |
| } | |
| async function run(model: string) { | |
| // Initialize conversation with a user query | |
| const messages: BaseMessage[] = [ | |
| new HumanMessage({content: 'What is the flight time from New York (NYC) to Los Angeles (LAX)?'}), | |
| ]; | |
| const llm = new ChatOllama({ | |
| baseUrl: 'http://localhost:11434', | |
| model, | |
| temperature: 0, | |
| verbose: true, | |
| }); | |
| const GetFlightTimesTool = tool(({departure, arrival}) => getFlightTimes(departure, arrival), { | |
| name: "get_flight_times", | |
| description: 'Get the flight times between two cities', | |
| schema: z.object({ | |
| departure: z.string().describe('The departure city (airport code)'), | |
| arrival: z.string().describe('The arrival city (airport code)'), | |
| }).required(), | |
| }); | |
| const llmWithTools = llm.bindTools([GetFlightTimesTool]); | |
| // First API call: Send the query and function description to the model | |
| const response = await llmWithTools.invoke(messages); | |
| /** | |
| * Remember, while the name “tool calling” implies that the model is directly performing some action, | |
| * this is actually not the case! The model only generates the arguments to a tool, | |
| * and actually running the tool (or not) is up to the user. | |
| **/ | |
| // Next let’s invoke the tool functions using the args the model populated! | |
| if (response.tool_calls) { | |
| const availableFunctions: { [key: string]: DynamicStructuredTool<any> } = { | |
| get_flight_times: GetFlightTimesTool, | |
| }; | |
| for (const tool of response.tool_calls) { | |
| const functionToCall = availableFunctions[tool.name]; | |
| const functionResponse = await functionToCall.invoke( | |
| tool.args, | |
| ); | |
| // Add function response to the conversation | |
| messages.push(new ToolMessage({ | |
| content: functionResponse, | |
| name: tool.name, | |
| additional_kwargs: { | |
| function_call: { | |
| arguments: JSON.stringify(tool.args), | |
| name: tool.name, | |
| }, | |
| }, | |
| }, tool.id || ''+new Date(), tool.name)) | |
| } | |
| } | |
| // Second API call: Get final response from the model | |
| const finalResponse = await llmWithTools.invoke(messages); | |
| console.log(finalResponse.content); | |
| } | |
| run('mistral-nemo').catch(error => console.error('An error occurred:', error)); | |
| // Answer: "The flight departs at 08:00 AM and arrives at 11:30 AM, with a duration of 5 hours and 30 minutes. (Duration is local time)" |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Hi , i want to use ChatOpenAI with meta--llama3.1-70b-instruct , as soon as i swaitch meta-llama it stops working how can I use it my function look like below :
tool_output = None
tools = [run_awx_template, execute_check_host_after_repair,get_alerts,get_resource_usage]
proxy_client = get_proxy_client('gen-ai-hub')
llm = ChatOpenAI(proxy_model_name='gpt-4', proxy_client=proxy_client)
llm_with_tools = llm.bind_tools(tools)
def tools_query(query: str) -> str:
logger.info(f"Received query: {query}")