Created
August 6, 2024 18:15
-
-
Save LuisFX/4a6d8138cd19bbc786d5b6b65a220da8 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| { | |
| "nodes": [ | |
| { | |
| "width": 300, | |
| "height": 508, | |
| "id": "llmChain_0", | |
| "position": { | |
| "x": 1361.675149891282, | |
| "y": -358.73398583251014 | |
| }, | |
| "type": "customNode", | |
| "data": { | |
| "id": "llmChain_0", | |
| "label": "LLM Chain", | |
| "version": 3, | |
| "name": "llmChain", | |
| "type": "LLMChain", | |
| "baseClasses": [ | |
| "LLMChain", | |
| "BaseChain", | |
| "Runnable" | |
| ], | |
| "category": "Chains", | |
| "description": "Chain to run queries against LLMs", | |
| "inputParams": [ | |
| { | |
| "label": "Chain Name", | |
| "name": "chainName", | |
| "type": "string", | |
| "placeholder": "Name Your Chain", | |
| "optional": true, | |
| "id": "llmChain_0-input-chainName-string" | |
| } | |
| ], | |
| "inputAnchors": [ | |
| { | |
| "label": "Language Model", | |
| "name": "model", | |
| "type": "BaseLanguageModel", | |
| "id": "llmChain_0-input-model-BaseLanguageModel" | |
| }, | |
| { | |
| "label": "Prompt", | |
| "name": "prompt", | |
| "type": "BasePromptTemplate", | |
| "id": "llmChain_0-input-prompt-BasePromptTemplate" | |
| }, | |
| { | |
| "label": "Output Parser", | |
| "name": "outputParser", | |
| "type": "BaseLLMOutputParser", | |
| "optional": true, | |
| "id": "llmChain_0-input-outputParser-BaseLLMOutputParser" | |
| }, | |
| { | |
| "label": "Input Moderation", | |
| "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", | |
| "name": "inputModeration", | |
| "type": "Moderation", | |
| "optional": true, | |
| "list": true, | |
| "id": "llmChain_0-input-inputModeration-Moderation" | |
| } | |
| ], | |
| "inputs": { | |
| "model": "{{chatOpenAI_0.data.instance}}", | |
| "prompt": "{{chatPromptTemplate_0.data.instance}}", | |
| "outputParser": "{{advancedStructuredOutputParser_0.data.instance}}", | |
| "chainName": "llm_chain", | |
| "inputModeration": "" | |
| }, | |
| "outputAnchors": [ | |
| { | |
| "name": "output", | |
| "label": "Output", | |
| "type": "options", | |
| "options": [ | |
| { | |
| "id": "llmChain_0-output-llmChain-LLMChain|BaseChain|Runnable", | |
| "name": "llmChain", | |
| "label": "LLM Chain", | |
| "type": "LLMChain | BaseChain | Runnable" | |
| }, | |
| { | |
| "id": "llmChain_0-output-outputPrediction-string|json", | |
| "name": "outputPrediction", | |
| "label": "Output Prediction", | |
| "type": "string | json" | |
| } | |
| ], | |
| "default": "llmChain" | |
| } | |
| ], | |
| "outputs": { | |
| "output": "llmChain" | |
| }, | |
| "selected": false | |
| }, | |
| "positionAbsolute": { | |
| "x": 1361.675149891282, | |
| "y": -358.73398583251014 | |
| }, | |
| "selected": false, | |
| "dragging": false | |
| }, | |
| { | |
| "width": 300, | |
| "height": 690, | |
| "id": "chatPromptTemplate_0", | |
| "position": { | |
| "x": 549.2560109388875, | |
| "y": -859.5347338810358 | |
| }, | |
| "type": "customNode", | |
| "data": { | |
| "id": "chatPromptTemplate_0", | |
| "label": "Chat Prompt Template", | |
| "version": 1, | |
| "name": "chatPromptTemplate", | |
| "type": "ChatPromptTemplate", | |
| "baseClasses": [ | |
| "ChatPromptTemplate", | |
| "BaseChatPromptTemplate", | |
| "BasePromptTemplate", | |
| "Runnable" | |
| ], | |
| "category": "Prompts", | |
| "description": "Schema to represent a chat prompt", | |
| "inputParams": [ | |
| { | |
| "label": "System Message", | |
| "name": "systemMessagePrompt", | |
| "type": "string", | |
| "rows": 4, | |
| "placeholder": "You are a helpful assistant that translates {input_language} to {output_language}.", | |
| "id": "chatPromptTemplate_0-input-systemMessagePrompt-string" | |
| }, | |
| { | |
| "label": "Human Message", | |
| "name": "humanMessagePrompt", | |
| "type": "string", | |
| "rows": 4, | |
| "placeholder": "{text}", | |
| "id": "chatPromptTemplate_0-input-humanMessagePrompt-string" | |
| }, | |
| { | |
| "label": "Format Prompt Values", | |
| "name": "promptValues", | |
| "type": "json", | |
| "optional": true, | |
| "acceptVariable": true, | |
| "list": true, | |
| "id": "chatPromptTemplate_0-input-promptValues-json" | |
| } | |
| ], | |
| "inputAnchors": [], | |
| "inputs": { | |
| "systemMessagePrompt": "This AI is designed to only output information in JSON format without exception. This AI can only output JSON and will never output any other text.\n\nWhen asked to correct itself, this AI will only output the corrected JSON and never any other text.", | |
| "humanMessagePrompt": "{text}", | |
| "promptValues": "" | |
| }, | |
| "outputAnchors": [ | |
| { | |
| "id": "chatPromptTemplate_0-output-chatPromptTemplate-ChatPromptTemplate|BaseChatPromptTemplate|BasePromptTemplate|Runnable", | |
| "name": "chatPromptTemplate", | |
| "label": "ChatPromptTemplate", | |
| "type": "ChatPromptTemplate | BaseChatPromptTemplate | BasePromptTemplate | Runnable" | |
| } | |
| ], | |
| "outputs": {}, | |
| "selected": false | |
| }, | |
| "selected": false, | |
| "positionAbsolute": { | |
| "x": 549.2560109388875, | |
| "y": -859.5347338810358 | |
| }, | |
| "dragging": false | |
| }, | |
| { | |
| "width": 300, | |
| "height": 670, | |
| "id": "chatOpenAI_0", | |
| "position": { | |
| "x": 943.8306342452494, | |
| "y": -926.178099392445 | |
| }, | |
| "type": "customNode", | |
| "data": { | |
| "id": "chatOpenAI_0", | |
| "label": "ChatOpenAI", | |
| "version": 6, | |
| "name": "chatOpenAI", | |
| "type": "ChatOpenAI", | |
| "baseClasses": [ | |
| "ChatOpenAI", | |
| "BaseChatModel", | |
| "BaseLanguageModel", | |
| "Runnable" | |
| ], | |
| "category": "Chat Models", | |
| "description": "Wrapper around OpenAI large language models that use the Chat endpoint", | |
| "inputParams": [ | |
| { | |
| "label": "Connect Credential", | |
| "name": "credential", | |
| "type": "credential", | |
| "credentialNames": [ | |
| "openAIApi" | |
| ], | |
| "id": "chatOpenAI_0-input-credential-credential" | |
| }, | |
| { | |
| "label": "Model Name", | |
| "name": "modelName", | |
| "type": "asyncOptions", | |
| "loadMethod": "listModels", | |
| "default": "gpt-3.5-turbo", | |
| "id": "chatOpenAI_0-input-modelName-options" | |
| }, | |
| { | |
| "label": "Temperature", | |
| "name": "temperature", | |
| "type": "number", | |
| "step": 0.1, | |
| "default": 0.9, | |
| "optional": true, | |
| "id": "chatOpenAI_0-input-temperature-number" | |
| }, | |
| { | |
| "label": "Max Tokens", | |
| "name": "maxTokens", | |
| "type": "number", | |
| "step": 1, | |
| "optional": true, | |
| "additionalParams": true, | |
| "id": "chatOpenAI_0-input-maxTokens-number" | |
| }, | |
| { | |
| "label": "Top Probability", | |
| "name": "topP", | |
| "type": "number", | |
| "step": 0.1, | |
| "optional": true, | |
| "additionalParams": true, | |
| "id": "chatOpenAI_0-input-topP-number" | |
| }, | |
| { | |
| "label": "Frequency Penalty", | |
| "name": "frequencyPenalty", | |
| "type": "number", | |
| "step": 0.1, | |
| "optional": true, | |
| "additionalParams": true, | |
| "id": "chatOpenAI_0-input-frequencyPenalty-number" | |
| }, | |
| { | |
| "label": "Presence Penalty", | |
| "name": "presencePenalty", | |
| "type": "number", | |
| "step": 0.1, | |
| "optional": true, | |
| "additionalParams": true, | |
| "id": "chatOpenAI_0-input-presencePenalty-number" | |
| }, | |
| { | |
| "label": "Timeout", | |
| "name": "timeout", | |
| "type": "number", | |
| "step": 1, | |
| "optional": true, | |
| "additionalParams": true, | |
| "id": "chatOpenAI_0-input-timeout-number" | |
| }, | |
| { | |
| "label": "BasePath", | |
| "name": "basepath", | |
| "type": "string", | |
| "optional": true, | |
| "additionalParams": true, | |
| "id": "chatOpenAI_0-input-basepath-string" | |
| }, | |
| { | |
| "label": "BaseOptions", | |
| "name": "baseOptions", | |
| "type": "json", | |
| "optional": true, | |
| "additionalParams": true, | |
| "id": "chatOpenAI_0-input-baseOptions-json" | |
| }, | |
| { | |
| "label": "Allow Image Uploads", | |
| "name": "allowImageUploads", | |
| "type": "boolean", | |
| "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", | |
| "default": false, | |
| "optional": true, | |
| "id": "chatOpenAI_0-input-allowImageUploads-boolean" | |
| }, | |
| { | |
| "label": "Image Resolution", | |
| "description": "This parameter controls the resolution in which the model views the image.", | |
| "name": "imageResolution", | |
| "type": "options", | |
| "options": [ | |
| { | |
| "label": "Low", | |
| "name": "low" | |
| }, | |
| { | |
| "label": "High", | |
| "name": "high" | |
| }, | |
| { | |
| "label": "Auto", | |
| "name": "auto" | |
| } | |
| ], | |
| "default": "low", | |
| "optional": false, | |
| "additionalParams": true, | |
| "id": "chatOpenAI_0-input-imageResolution-options" | |
| } | |
| ], | |
| "inputAnchors": [ | |
| { | |
| "label": "Cache", | |
| "name": "cache", | |
| "type": "BaseCache", | |
| "optional": true, | |
| "id": "chatOpenAI_0-input-cache-BaseCache" | |
| } | |
| ], | |
| "inputs": { | |
| "cache": "", | |
| "modelName": "gpt-4o", | |
| "temperature": "0", | |
| "maxTokens": "", | |
| "topP": "", | |
| "frequencyPenalty": "", | |
| "presencePenalty": "", | |
| "timeout": "", | |
| "basepath": "", | |
| "baseOptions": "", | |
| "allowImageUploads": true, | |
| "imageResolution": "low" | |
| }, | |
| "outputAnchors": [ | |
| { | |
| "id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", | |
| "name": "chatOpenAI", | |
| "label": "ChatOpenAI", | |
| "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable" | |
| } | |
| ], | |
| "outputs": {}, | |
| "selected": false | |
| }, | |
| "selected": false, | |
| "positionAbsolute": { | |
| "x": 943.8306342452494, | |
| "y": -926.178099392445 | |
| }, | |
| "dragging": false | |
| }, | |
| { | |
| "width": 300, | |
| "height": 454, | |
| "id": "advancedStructuredOutputParser_0", | |
| "position": { | |
| "x": 929.8439354029844, | |
| "y": -130.29433599524745 | |
| }, | |
| "type": "customNode", | |
| "data": { | |
| "id": "advancedStructuredOutputParser_0", | |
| "label": "Advanced Structured Output Parser", | |
| "version": 1, | |
| "name": "advancedStructuredOutputParser", | |
| "type": "AdvancedStructuredOutputParser", | |
| "baseClasses": [ | |
| "AdvancedStructuredOutputParser", | |
| "BaseLLMOutputParser", | |
| "Runnable" | |
| ], | |
| "category": "Output Parsers", | |
| "description": "Parse the output of an LLM call into a given structure by providing a Zod schema.", | |
| "inputParams": [ | |
| { | |
| "label": "Autofix", | |
| "name": "autofixParser", | |
| "type": "boolean", | |
| "optional": true, | |
| "description": "In the event that the first call fails, will make another call to the model to fix any errors.", | |
| "id": "advancedStructuredOutputParser_0-input-autofixParser-boolean" | |
| }, | |
| { | |
| "label": "Example JSON", | |
| "name": "exampleJson", | |
| "type": "string", | |
| "description": "Zod schema for the output of the model", | |
| "rows": 10, | |
| "default": "z.object({\n title: z.string(), // Title of the movie as a string\n yearOfRelease: z.number().int(), // Release year as an integer number,\n genres: z.enum([\n \"Action\", \"Comedy\", \"Drama\", \"Fantasy\", \"Horror\",\n \"Mystery\", \"Romance\", \"Science Fiction\", \"Thriller\", \"Documentary\"\n ]).array().max(2), // Array of genres, max of 2 from the defined enum\n shortDescription: z.string().max(500) // Short description, max 500 characters\n})", | |
| "id": "advancedStructuredOutputParser_0-input-exampleJson-string" | |
| } | |
| ], | |
| "inputAnchors": [], | |
| "inputs": { | |
| "autofixParser": true, | |
| "exampleJson": "z.array(z.object({\n title: z.string(), // Title of the movie as a string\n yearOfRelease: z.number().int(), // Release year as an integer number,\n genres: z.enum([\n \"Action\", \"Comedy\", \"Drama\", \"Fantasy\", \"Horror\",\n \"Mystery\", \"Romance\", \"Science Fiction\", \"Thriller\", \"Documentary\"\n ]).array().max(2), // Array of genres, max of 2 from the defined enum\n shortDescription: z.string().max(500) // Short description, max 500 characters\n}))" | |
| }, | |
| "outputAnchors": [ | |
| { | |
| "id": "advancedStructuredOutputParser_0-output-advancedStructuredOutputParser-AdvancedStructuredOutputParser|BaseLLMOutputParser|Runnable", | |
| "name": "advancedStructuredOutputParser", | |
| "label": "AdvancedStructuredOutputParser", | |
| "type": "AdvancedStructuredOutputParser | BaseLLMOutputParser | Runnable" | |
| } | |
| ], | |
| "outputs": {}, | |
| "selected": false | |
| }, | |
| "selected": false, | |
| "dragging": false, | |
| "positionAbsolute": { | |
| "x": 929.8439354029844, | |
| "y": -130.29433599524745 | |
| } | |
| }, | |
| { | |
| "id": "seqStart_0", | |
| "position": { | |
| "x": 1733.160515665394, | |
| "y": 1039.7112688874022 | |
| }, | |
| "type": "customNode", | |
| "data": { | |
| "id": "seqStart_0", | |
| "label": "Start", | |
| "version": 1, | |
| "name": "seqStart", | |
| "type": "Start", | |
| "baseClasses": [ | |
| "Start" | |
| ], | |
| "category": "Sequential Agents", | |
| "description": "Starting point of the conversation", | |
| "inputParams": [], | |
| "inputAnchors": [ | |
| { | |
| "label": "Chat Model", | |
| "name": "model", | |
| "type": "BaseChatModel", | |
| "description": "Only compatible with models that are capable of function calling: ChatOpenAI, ChatMistral, ChatAnthropic, ChatGoogleGenerativeAI, ChatVertexAI, GroqChat", | |
| "id": "seqStart_0-input-model-BaseChatModel" | |
| }, | |
| { | |
| "label": "Agent Memory", | |
| "name": "agentMemory", | |
| "type": "BaseCheckpointSaver", | |
| "description": "Save the state of the agent", | |
| "optional": true, | |
| "id": "seqStart_0-input-agentMemory-BaseCheckpointSaver" | |
| }, | |
| { | |
| "label": "State", | |
| "name": "state", | |
| "type": "State", | |
| "description": "State is an object that is updated by nodes in the graph, passing from one node to another. By default, state contains \"messages\" that got updated with each message sent and received.", | |
| "optional": true, | |
| "id": "seqStart_0-input-state-State" | |
| }, | |
| { | |
| "label": "Input Moderation", | |
| "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", | |
| "name": "inputModeration", | |
| "type": "Moderation", | |
| "optional": true, | |
| "list": true, | |
| "id": "seqStart_0-input-inputModeration-Moderation" | |
| } | |
| ], | |
| "inputs": { | |
| "model": "{{chatOpenAI_1.data.instance}}", | |
| "agentMemory": "{{agentMemory_0.data.instance}}", | |
| "state": "", | |
| "inputModeration": "" | |
| }, | |
| "outputAnchors": [ | |
| { | |
| "id": "seqStart_0-output-seqStart-Start", | |
| "name": "seqStart", | |
| "label": "Start", | |
| "description": "Starting point of the conversation", | |
| "type": "Start" | |
| } | |
| ], | |
| "outputs": {}, | |
| "selected": false | |
| }, | |
| "width": 300, | |
| "height": 383, | |
| "positionAbsolute": { | |
| "x": 1733.160515665394, | |
| "y": 1039.7112688874022 | |
| }, | |
| "selected": false, | |
| "dragging": false | |
| }, | |
| { | |
| "id": "seqAgent_0", | |
| "position": { | |
| "x": 2170.8646432734445, | |
| "y": 652.6590369709021 | |
| }, | |
| "type": "customNode", | |
| "data": { | |
| "id": "seqAgent_0", | |
| "label": "Agent", | |
| "version": 1, | |
| "name": "seqAgent", | |
| "type": "Agent", | |
| "baseClasses": [ | |
| "Agent" | |
| ], | |
| "category": "Sequential Agents", | |
| "description": "Agent that can execute tools", | |
| "inputParams": [ | |
| { | |
| "label": "Agent Name", | |
| "name": "agentName", | |
| "type": "string", | |
| "placeholder": "Agent", | |
| "id": "seqAgent_0-input-agentName-string" | |
| }, | |
| { | |
| "label": "System Prompt", | |
| "name": "systemMessagePrompt", | |
| "type": "string", | |
| "rows": 4, | |
| "optional": true, | |
| "default": "You are a research assistant who can search for up-to-date info using search engine.", | |
| "id": "seqAgent_0-input-systemMessagePrompt-string" | |
| }, | |
| { | |
| "label": "Human Prompt", | |
| "name": "humanMessagePrompt", | |
| "type": "string", | |
| "description": "This prompt will be added at the end of the messages as human message", | |
| "rows": 4, | |
| "optional": true, | |
| "additionalParams": true, | |
| "id": "seqAgent_0-input-humanMessagePrompt-string" | |
| }, | |
| { | |
| "label": "Require Approval", | |
| "name": "interrupt", | |
| "description": "Require approval before executing tools. Will proceed when tools are not called", | |
| "type": "boolean", | |
| "optional": true, | |
| "id": "seqAgent_0-input-interrupt-boolean" | |
| }, | |
| { | |
| "label": "Format Prompt Values", | |
| "name": "promptValues", | |
| "description": "Assign values to the prompt variables. You can also use $flow.state.<variable-name> to get the state value", | |
| "type": "json", | |
| "optional": true, | |
| "acceptVariable": true, | |
| "list": true, | |
| "id": "seqAgent_0-input-promptValues-json" | |
| }, | |
| { | |
| "label": "Approval Prompt", | |
| "name": "approvalPrompt", | |
| "description": "Prompt for approval. Only applicable if \"Require Approval\" is enabled", | |
| "type": "string", | |
| "default": "You are about to execute tool: {tools}. Ask if user want to proceed", | |
| "rows": 4, | |
| "optional": true, | |
| "additionalParams": true, | |
| "id": "seqAgent_0-input-approvalPrompt-string" | |
| }, | |
| { | |
| "label": "Approve Button Text", | |
| "name": "approveButtonText", | |
| "description": "Text for approve button. Only applicable if \"Require Approval\" is enabled", | |
| "type": "string", | |
| "default": "Yes", | |
| "optional": true, | |
| "additionalParams": true, | |
| "id": "seqAgent_0-input-approveButtonText-string" | |
| }, | |
| { | |
| "label": "Reject Button Text", | |
| "name": "rejectButtonText", | |
| "description": "Text for reject button. Only applicable if \"Require Approval\" is enabled", | |
| "type": "string", | |
| "default": "No", | |
| "optional": true, | |
| "additionalParams": true, | |
| "id": "seqAgent_0-input-rejectButtonText-string" | |
| }, | |
| { | |
| "label": "Update State", | |
| "name": "updateStateMemory", | |
| "type": "tabs", | |
| "tabIdentifier": "selectedUpdateStateMemoryTab", | |
| "additionalParams": true, | |
| "default": "updateStateMemoryUI", | |
| "tabs": [ | |
| { | |
| "label": "Update State (Table)", | |
| "name": "updateStateMemoryUI", | |
| "type": "datagrid", | |
| "hint": { | |
| "label": "How to use", | |
| "value": "\n1. Key and value pair to be updated. For example: if you have the following State:\n | Key | Operation | Default Value |\n |-----------|---------------|-------------------|\n | user | Replace | |\n\n You can update the \"user\" value with the following:\n | Key | Value |\n |-----------|-----------|\n | user | john doe |\n\n2. If you want to use the agent's output as the value to update state, it is available as available as `$flow.output` with the following structure:\n ```json\n {\n \"output\": \"Hello! How can I assist you today?\",\n \"usedTools\": [\n {\n \"tool\": \"tool-name\",\n \"toolInput\": \"{foo: var}\",\n \"toolOutput\": \"This is the tool's output\"\n }\n ],\n \"sourceDocuments\": [\n {\n \"pageContent\": \"This is the page content\",\n \"metadata\": \"{foo: var}\",\n }\n ],\n }\n ```\n\n For example, if the `toolOutput` is the value you want to update the state with, you can do the following:\n | Key | Value |\n |-----------|-------------------------------------------|\n | user | `$flow.output.usedTools[0].toolOutput` |\n\n3. You can get default flow config, including the current \"state\":\n - `$flow.sessionId`\n - `$flow.chatId`\n - `$flow.chatflowId`\n - `$flow.input`\n - `$flow.state`\n\n4. You can get custom variables: `$vars.<variable-name>`\n\n" | |
| }, | |
| "description": "This is only applicable when you have a custom State at the START node. After agent execution, you might want to update the State values", | |
| "datagrid": [ | |
| { | |
| "field": "key", | |
| "headerName": "Key", | |
| "type": "asyncSingleSelect", | |
| "loadMethod": "loadStateKeys", | |
| "flex": 0.5, | |
| "editable": true | |
| }, | |
| { | |
| "field": "value", | |
| "headerName": "Value", | |
| "type": "freeSolo", | |
| "valueOptions": [ | |
| { | |
| "label": "Agent Output (string)", | |
| "value": "$flow.output.content" | |
| }, | |
| { | |
| "label": "Used Tools (array)", | |
| "value": "$flow.output.usedTools" | |
| }, | |
| { | |
| "label": "First Tool Output (string)", | |
| "value": "$flow.output.usedTools[0].toolOutput" | |
| }, | |
| { | |
| "label": "Source Documents (array)", | |
| "value": "$flow.output.sourceDocuments" | |
| }, | |
| { | |
| "label": "Global variable (string)", | |
| "value": "$vars.<variable-name>" | |
| }, | |
| { | |
| "label": "Input Question (string)", | |
| "value": "$flow.input" | |
| }, | |
| { | |
| "label": "Session Id (string)", | |
| "value": "$flow.sessionId" | |
| }, | |
| { | |
| "label": "Chat Id (string)", | |
| "value": "$flow.chatId" | |
| }, | |
| { | |
| "label": "Chatflow Id (string)", | |
| "value": "$flow.chatflowId" | |
| } | |
| ], | |
| "editable": true, | |
| "flex": 1 | |
| } | |
| ], | |
| "optional": true, | |
| "additionalParams": true | |
| }, | |
| { | |
| "label": "Update State (Code)", | |
| "name": "updateStateMemoryCode", | |
| "type": "code", | |
| "hint": { | |
| "label": "How to use", | |
| "value": "\n1. Return the key value JSON object. For example: if you have the following State:\n ```json\n {\n \"user\": null\n }\n ```\n\n You can update the \"user\" value by returning the following:\n ```js\n return {\n \"user\": \"john doe\"\n }\n ```\n\n2. If you want to use the agent's output as the value to update state, it is available as `$flow.output` with the following structure:\n ```json\n {\n \"content\": \"Hello! How can I assist you today?\",\n \"usedTools\": [\n {\n \"tool\": \"tool-name\",\n \"toolInput\": \"{foo: var}\",\n \"toolOutput\": \"This is the tool's output\"\n }\n ],\n \"sourceDocuments\": [\n {\n \"pageContent\": \"This is the page content\",\n \"metadata\": \"{foo: var}\",\n }\n ],\n }\n ```\n\n For example, if the `toolOutput` is the value you want to update the state with, you can return the following:\n ```js\n return {\n \"user\": $flow.output.usedTools[0].toolOutput\n }\n ```\n\n3. You can also get default flow config, including the current \"state\":\n - `$flow.sessionId`\n - `$flow.chatId`\n - `$flow.chatflowId`\n - `$flow.input`\n - `$flow.state`\n\n4. You can get custom variables: `$vars.<variable-name>`\n\n" | |
| }, | |
| "description": "This is only applicable when you have a custom State at the START node. After agent execution, you might want to update the State values. Must return an object representing the state", | |
| "hideCodeExecute": true, | |
| "codeExample": "const result = $flow.output;\n\n/* Suppose we have a custom State schema like this:\n* {\n aggregate: {\n value: (x, y) => x.concat(y),\n default: () => []\n }\n }\n*/\n\nreturn {\n aggregate: [result.content]\n};", | |
| "optional": true, | |
| "additionalParams": true | |
| } | |
| ], | |
| "id": "seqAgent_0-input-updateStateMemory-tabs" | |
| }, | |
| { | |
| "label": "Max Iterations", | |
| "name": "maxIterations", | |
| "type": "number", | |
| "optional": true, | |
| "additionalParams": true, | |
| "id": "seqAgent_0-input-maxIterations-number" | |
| } | |
| ], | |
| "inputAnchors": [ | |
| { | |
| "label": "Tools", | |
| "name": "tools", | |
| "type": "Tool", | |
| "list": true, | |
| "id": "seqAgent_0-input-tools-Tool" | |
| }, | |
| { | |
| "label": "Start | Agent | LLM | Tool Node", | |
| "name": "sequentialNode", | |
| "type": "Start | Agent | LLMNode | ToolNode", | |
| "list": true, | |
| "id": "seqAgent_0-input-sequentialNode-Start | Agent | LLMNode | ToolNode" | |
| }, | |
| { | |
| "label": "Chat Model", | |
| "name": "model", | |
| "type": "BaseChatModel", | |
| "optional": true, | |
| "description": "Overwrite model to be used for this agent", | |
| "id": "seqAgent_0-input-model-BaseChatModel" | |
| } | |
| ], | |
| "inputs": { | |
| "agentName": "Supervisor", | |
| "systemMessagePrompt": "You are a supervisor, use workers to get work done. You have access to tools:\n\n- data_parser_tool\n\nNever do the work yourself, ALWAYS use the tools at your disposal to do the actual work.", | |
| "humanMessagePrompt": "", | |
| "tools": [ | |
| "{{chainTool_0.data.instance}}" | |
| ], | |
| "sequentialNode": [ | |
| "{{seqStart_0.data.instance}}" | |
| ], | |
| "model": "", | |
| "interrupt": "", | |
| "promptValues": "", | |
| "approvalPrompt": "You are about to execute tool: {tools}. Ask if user want to proceed", | |
| "approveButtonText": "Yes", | |
| "rejectButtonText": "No", | |
| "updateStateMemory": "updateStateMemoryUI", | |
| "maxIterations": "" | |
| }, | |
| "outputAnchors": [ | |
| { | |
| "id": "seqAgent_0-output-seqAgent-Agent", | |
| "name": "seqAgent", | |
| "label": "Agent", | |
| "description": "Agent that can execute tools", | |
| "type": "Agent" | |
| } | |
| ], | |
| "outputs": {}, | |
| "selected": false | |
| }, | |
| "width": 300, | |
| "height": 860, | |
| "selected": false, | |
| "positionAbsolute": { | |
| "x": 2170.8646432734445, | |
| "y": 652.6590369709021 | |
| }, | |
| "dragging": false | |
| }, | |
| { | |
| "id": "chainTool_0", | |
| "position": { | |
| "x": 1719.639059958494, | |
| "y": 88.19151189737309 | |
| }, | |
| "type": "customNode", | |
| "data": { | |
| "id": "chainTool_0", | |
| "label": "Chain Tool", | |
| "version": 1, | |
| "name": "chainTool", | |
| "type": "ChainTool", | |
| "baseClasses": [ | |
| "ChainTool", | |
| "DynamicTool", | |
| "Tool", | |
| "StructuredTool", | |
| "Runnable" | |
| ], | |
| "category": "Tools", | |
| "description": "Use a chain as allowed tool for agent", | |
| "inputParams": [ | |
| { | |
| "label": "Chain Name", | |
| "name": "name", | |
| "type": "string", | |
| "placeholder": "state-of-union-qa", | |
| "id": "chainTool_0-input-name-string" | |
| }, | |
| { | |
| "label": "Chain Description", | |
| "name": "description", | |
| "type": "string", | |
| "rows": 3, | |
| "placeholder": "State of the Union QA - useful for when you need to ask questions about the most recent state of the union address.", | |
| "id": "chainTool_0-input-description-string" | |
| }, | |
| { | |
| "label": "Return Direct", | |
| "name": "returnDirect", | |
| "type": "boolean", | |
| "optional": true, | |
| "id": "chainTool_0-input-returnDirect-boolean" | |
| } | |
| ], | |
| "inputAnchors": [ | |
| { | |
| "label": "Base Chain", | |
| "name": "baseChain", | |
| "type": "BaseChain", | |
| "id": "chainTool_0-input-baseChain-BaseChain" | |
| } | |
| ], | |
| "inputs": { | |
| "name": "data_parser_tool", | |
| "description": "You are the data_parser_tool, parse the data as your schema requires", | |
| "returnDirect": "", | |
| "baseChain": "{{llmChain_0.data.instance}}" | |
| }, | |
| "outputAnchors": [ | |
| { | |
| "id": "chainTool_0-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|Runnable", | |
| "name": "chainTool", | |
| "label": "ChainTool", | |
| "description": "Use a chain as allowed tool for agent", | |
| "type": "ChainTool | DynamicTool | Tool | StructuredTool | Runnable" | |
| } | |
| ], | |
| "outputs": {}, | |
| "selected": false | |
| }, | |
| "width": 300, | |
| "height": 603, | |
| "selected": false, | |
| "dragging": false, | |
| "positionAbsolute": { | |
| "x": 1719.639059958494, | |
| "y": 88.19151189737309 | |
| } | |
| }, | |
| { | |
| "id": "seqEnd_0", | |
| "position": { | |
| "x": 2613.3913302678243, | |
| "y": 1247.319708197435 | |
| }, | |
| "type": "customNode", | |
| "data": { | |
| "id": "seqEnd_0", | |
| "label": "End", | |
| "version": 1, | |
| "name": "seqEnd", | |
| "type": "End", | |
| "baseClasses": [ | |
| "End" | |
| ], | |
| "category": "Sequential Agents", | |
| "description": "End conversation", | |
| "inputParams": [], | |
| "inputAnchors": [ | |
| { | |
| "label": "Start | Agent | LLM | Tool Node", | |
| "name": "sequentialNode", | |
| "type": "Start | Agent | LLMNode | ToolNode", | |
| "id": "seqEnd_0-input-sequentialNode-Start | Agent | LLMNode | ToolNode" | |
| } | |
| ], | |
| "inputs": { | |
| "sequentialNode": "{{seqAgent_0.data.instance}}" | |
| }, | |
| "outputAnchors": [], | |
| "outputs": {}, | |
| "selected": false | |
| }, | |
| "width": 300, | |
| "height": 143, | |
| "selected": false, | |
| "positionAbsolute": { | |
| "x": 2613.3913302678243, | |
| "y": 1247.319708197435 | |
| }, | |
| "dragging": false | |
| }, | |
| { | |
| "id": "chatOpenAI_1", | |
| "position": { | |
| "x": 1284.5539585754805, | |
| "y": 534.2848236217169 | |
| }, | |
| "type": "customNode", | |
| "data": { | |
| "id": "chatOpenAI_1", | |
| "label": "ChatOpenAI", | |
| "version": 6, | |
| "name": "chatOpenAI", | |
| "type": "ChatOpenAI", | |
| "baseClasses": [ | |
| "ChatOpenAI", | |
| "BaseChatModel", | |
| "BaseLanguageModel", | |
| "Runnable" | |
| ], | |
| "category": "Chat Models", | |
| "description": "Wrapper around OpenAI large language models that use the Chat endpoint", | |
| "inputParams": [ | |
| { | |
| "label": "Connect Credential", | |
| "name": "credential", | |
| "type": "credential", | |
| "credentialNames": [ | |
| "openAIApi" | |
| ], | |
| "id": "chatOpenAI_1-input-credential-credential" | |
| }, | |
| { | |
| "label": "Model Name", | |
| "name": "modelName", | |
| "type": "asyncOptions", | |
| "loadMethod": "listModels", | |
| "default": "gpt-3.5-turbo", | |
| "id": "chatOpenAI_1-input-modelName-asyncOptions" | |
| }, | |
| { | |
| "label": "Temperature", | |
| "name": "temperature", | |
| "type": "number", | |
| "step": 0.1, | |
| "default": 0.9, | |
| "optional": true, | |
| "id": "chatOpenAI_1-input-temperature-number" | |
| }, | |
| { | |
| "label": "Max Tokens", | |
| "name": "maxTokens", | |
| "type": "number", | |
| "step": 1, | |
| "optional": true, | |
| "additionalParams": true, | |
| "id": "chatOpenAI_1-input-maxTokens-number" | |
| }, | |
| { | |
| "label": "Top Probability", | |
| "name": "topP", | |
| "type": "number", | |
| "step": 0.1, | |
| "optional": true, | |
| "additionalParams": true, | |
| "id": "chatOpenAI_1-input-topP-number" | |
| }, | |
| { | |
| "label": "Frequency Penalty", | |
| "name": "frequencyPenalty", | |
| "type": "number", | |
| "step": 0.1, | |
| "optional": true, | |
| "additionalParams": true, | |
| "id": "chatOpenAI_1-input-frequencyPenalty-number" | |
| }, | |
| { | |
| "label": "Presence Penalty", | |
| "name": "presencePenalty", | |
| "type": "number", | |
| "step": 0.1, | |
| "optional": true, | |
| "additionalParams": true, | |
| "id": "chatOpenAI_1-input-presencePenalty-number" | |
| }, | |
| { | |
| "label": "Timeout", | |
| "name": "timeout", | |
| "type": "number", | |
| "step": 1, | |
| "optional": true, | |
| "additionalParams": true, | |
| "id": "chatOpenAI_1-input-timeout-number" | |
| }, | |
| { | |
| "label": "BasePath", | |
| "name": "basepath", | |
| "type": "string", | |
| "optional": true, | |
| "additionalParams": true, | |
| "id": "chatOpenAI_1-input-basepath-string" | |
| }, | |
| { | |
| "label": "BaseOptions", | |
| "name": "baseOptions", | |
| "type": "json", | |
| "optional": true, | |
| "additionalParams": true, | |
| "id": "chatOpenAI_1-input-baseOptions-json" | |
| }, | |
| { | |
| "label": "Allow Image Uploads", | |
| "name": "allowImageUploads", | |
| "type": "boolean", | |
| "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", | |
| "default": false, | |
| "optional": true, | |
| "id": "chatOpenAI_1-input-allowImageUploads-boolean" | |
| }, | |
| { | |
| "label": "Image Resolution", | |
| "description": "This parameter controls the resolution in which the model views the image.", | |
| "name": "imageResolution", | |
| "type": "options", | |
| "options": [ | |
| { | |
| "label": "Low", | |
| "name": "low" | |
| }, | |
| { | |
| "label": "High", | |
| "name": "high" | |
| }, | |
| { | |
| "label": "Auto", | |
| "name": "auto" | |
| } | |
| ], | |
| "default": "low", | |
| "optional": false, | |
| "additionalParams": true, | |
| "id": "chatOpenAI_1-input-imageResolution-options" | |
| } | |
| ], | |
| "inputAnchors": [ | |
| { | |
| "label": "Cache", | |
| "name": "cache", | |
| "type": "BaseCache", | |
| "optional": true, | |
| "id": "chatOpenAI_1-input-cache-BaseCache" | |
| } | |
| ], | |
| "inputs": { | |
| "cache": "", | |
| "modelName": "gpt-4o", | |
| "temperature": 0.9, | |
| "maxTokens": "", | |
| "topP": "", | |
| "frequencyPenalty": "", | |
| "presencePenalty": "", | |
| "timeout": "", | |
| "basepath": "", | |
| "baseOptions": "", | |
| "allowImageUploads": "", | |
| "imageResolution": "low" | |
| }, | |
| "outputAnchors": [ | |
| { | |
| "id": "chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", | |
| "name": "chatOpenAI", | |
| "label": "ChatOpenAI", | |
| "description": "Wrapper around OpenAI large language models that use the Chat endpoint", | |
| "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable" | |
| } | |
| ], | |
| "outputs": {}, | |
| "selected": false | |
| }, | |
| "width": 300, | |
| "height": 670, | |
| "selected": false, | |
| "positionAbsolute": { | |
| "x": 1284.5539585754805, | |
| "y": 534.2848236217169 | |
| }, | |
| "dragging": false | |
| }, | |
| { | |
| "id": "agentMemory_0", | |
| "position": { | |
| "x": 1283.0320545258492, | |
| "y": 1222.8956334781217 | |
| }, | |
| "type": "customNode", | |
| "data": { | |
| "id": "agentMemory_0", | |
| "label": "Agent Memory", | |
| "version": 1, | |
| "name": "agentMemory", | |
| "type": "AgentMemory", | |
| "baseClasses": [ | |
| "AgentMemory", | |
| "BaseCheckpointSaver" | |
| ], | |
| "category": "Memory", | |
| "description": "Memory for agentflow to remember the state of the conversation", | |
| "inputParams": [ | |
| { | |
| "label": "Database", | |
| "name": "databaseType", | |
| "type": "options", | |
| "options": [ | |
| { | |
| "label": "SQLite", | |
| "name": "sqlite" | |
| } | |
| ], | |
| "default": "sqlite", | |
| "id": "agentMemory_0-input-databaseType-options" | |
| }, | |
| { | |
| "label": "Database File Path", | |
| "name": "databaseFilePath", | |
| "type": "string", | |
| "placeholder": "C:\\Users\\User\\.flowise\\database.sqlite", | |
| "description": "If SQLite is selected, provide the path to the SQLite database file. Leave empty to use default application database", | |
| "additionalParams": true, | |
| "optional": true, | |
| "id": "agentMemory_0-input-databaseFilePath-string" | |
| }, | |
| { | |
| "label": "Additional Connection Configuration", | |
| "name": "additionalConfig", | |
| "type": "json", | |
| "additionalParams": true, | |
| "optional": true, | |
| "id": "agentMemory_0-input-additionalConfig-json" | |
| } | |
| ], | |
| "inputAnchors": [], | |
| "inputs": { | |
| "databaseType": "sqlite", | |
| "databaseFilePath": "", | |
| "additionalConfig": "" | |
| }, | |
| "outputAnchors": [ | |
| { | |
| "id": "agentMemory_0-output-agentMemory-AgentMemory|BaseCheckpointSaver", | |
| "name": "agentMemory", | |
| "label": "AgentMemory", | |
| "description": "Memory for agentflow to remember the state of the conversation", | |
| "type": "AgentMemory | BaseCheckpointSaver" | |
| } | |
| ], | |
| "outputs": {}, | |
| "selected": false | |
| }, | |
| "width": 300, | |
| "height": 328, | |
| "selected": false, | |
| "positionAbsolute": { | |
| "x": 1283.0320545258492, | |
| "y": 1222.8956334781217 | |
| }, | |
| "dragging": false | |
| } | |
| ], | |
| "edges": [ | |
| { | |
| "source": "chatPromptTemplate_0", | |
| "sourceHandle": "chatPromptTemplate_0-output-chatPromptTemplate-ChatPromptTemplate|BaseChatPromptTemplate|BasePromptTemplate|Runnable", | |
| "target": "llmChain_0", | |
| "targetHandle": "llmChain_0-input-prompt-BasePromptTemplate", | |
| "type": "buttonedge", | |
| "id": "chatPromptTemplate_0-chatPromptTemplate_0-output-chatPromptTemplate-ChatPromptTemplate|BaseChatPromptTemplate|BasePromptTemplate|Runnable-llmChain_0-llmChain_0-input-prompt-BasePromptTemplate", | |
| "data": { | |
| "label": "" | |
| } | |
| }, | |
| { | |
| "source": "chatOpenAI_0", | |
| "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", | |
| "target": "llmChain_0", | |
| "targetHandle": "llmChain_0-input-model-BaseLanguageModel", | |
| "type": "buttonedge", | |
| "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-llmChain_0-llmChain_0-input-model-BaseLanguageModel" | |
| }, | |
| { | |
| "source": "advancedStructuredOutputParser_0", | |
| "sourceHandle": "advancedStructuredOutputParser_0-output-advancedStructuredOutputParser-AdvancedStructuredOutputParser|BaseLLMOutputParser|Runnable", | |
| "target": "llmChain_0", | |
| "targetHandle": "llmChain_0-input-outputParser-BaseLLMOutputParser", | |
| "type": "buttonedge", | |
| "id": "advancedStructuredOutputParser_0-advancedStructuredOutputParser_0-output-advancedStructuredOutputParser-AdvancedStructuredOutputParser|BaseLLMOutputParser|Runnable-llmChain_0-llmChain_0-input-outputParser-BaseLLMOutputParser" | |
| }, | |
| { | |
| "source": "llmChain_0", | |
| "sourceHandle": "llmChain_0-output-llmChain-LLMChain|BaseChain|Runnable", | |
| "target": "chainTool_0", | |
| "targetHandle": "chainTool_0-input-baseChain-BaseChain", | |
| "type": "buttonedge", | |
| "id": "llmChain_0-llmChain_0-output-llmChain-LLMChain|BaseChain|Runnable-chainTool_0-chainTool_0-input-baseChain-BaseChain" | |
| }, | |
| { | |
| "source": "chainTool_0", | |
| "sourceHandle": "chainTool_0-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|Runnable", | |
| "target": "seqAgent_0", | |
| "targetHandle": "seqAgent_0-input-tools-Tool", | |
| "type": "buttonedge", | |
| "id": "chainTool_0-chainTool_0-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|Runnable-seqAgent_0-seqAgent_0-input-tools-Tool" | |
| }, | |
| { | |
| "source": "seqStart_0", | |
| "sourceHandle": "seqStart_0-output-seqStart-Start", | |
| "target": "seqAgent_0", | |
| "targetHandle": "seqAgent_0-input-sequentialNode-Start | Agent | LLMNode | ToolNode", | |
| "type": "buttonedge", | |
| "id": "seqStart_0-seqStart_0-output-seqStart-Start-seqAgent_0-seqAgent_0-input-sequentialNode-Start | Agent | LLMNode | ToolNode" | |
| }, | |
| { | |
| "source": "seqAgent_0", | |
| "sourceHandle": "seqAgent_0-output-seqAgent-Agent", | |
| "target": "seqEnd_0", | |
| "targetHandle": "seqEnd_0-input-sequentialNode-Start | Agent | LLMNode | ToolNode", | |
| "type": "buttonedge", | |
| "id": "seqAgent_0-seqAgent_0-output-seqAgent-Agent-seqEnd_0-seqEnd_0-input-sequentialNode-Start | Agent | LLMNode | ToolNode" | |
| }, | |
| { | |
| "source": "chatOpenAI_1", | |
| "sourceHandle": "chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", | |
| "target": "seqStart_0", | |
| "targetHandle": "seqStart_0-input-model-BaseChatModel", | |
| "type": "buttonedge", | |
| "id": "chatOpenAI_1-chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-seqStart_0-seqStart_0-input-model-BaseChatModel" | |
| }, | |
| { | |
| "source": "agentMemory_0", | |
| "sourceHandle": "agentMemory_0-output-agentMemory-AgentMemory|BaseCheckpointSaver", | |
| "target": "seqStart_0", | |
| "targetHandle": "seqStart_0-input-agentMemory-BaseCheckpointSaver", | |
| "type": "buttonedge", | |
| "id": "agentMemory_0-agentMemory_0-output-agentMemory-AgentMemory|BaseCheckpointSaver-seqStart_0-seqStart_0-input-agentMemory-BaseCheckpointSaver" | |
| } | |
| ] | |
| } |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment