Преглед изворни кода

feat(runtime): support llm template syntax (#480)

Louis Young пре 6 месеци
родитељ
комит
be398ef8f5

+ 3 - 3
packages/runtime/interface/src/node/llm/index.ts

@@ -3,7 +3,7 @@
  * SPDX-License-Identifier: MIT
  */
 
-import { IFlowConstantRefValue } from '@schema/value';
+import { IFlowConstantRefValue, IFlowConstantValue, IFlowTemplateValue } from '@schema/value';
 import { WorkflowNodeSchema } from '@schema/node';
 import { IJsonSchema } from '@schema/json-schema';
 import { FlowGramNode } from '@node/constant';
@@ -17,8 +17,8 @@ interface LLMNodeData {
     modelType: IFlowConstantRefValue;
     baseURL: IFlowConstantRefValue;
     temperature: IFlowConstantRefValue;
-    systemPrompt: IFlowConstantRefValue;
-    prompt: IFlowConstantRefValue;
+    systemPrompt: IFlowConstantValue | IFlowTemplateValue;
+    prompt: IFlowConstantValue | IFlowTemplateValue;
   };
 }
 

+ 2 - 2
packages/runtime/interface/src/runtime/document/node.ts

@@ -3,13 +3,13 @@
  * SPDX-License-Identifier: MIT
  */
 
-import { IFlowConstantRefValue, IJsonSchema, PositionSchema } from '@schema/index';
+import { IFlowValue, IJsonSchema, PositionSchema } from '@schema/index';
 import { FlowGramNode } from '@node/constant';
 import { IPort } from './port';
 import { IEdge } from './edge';
 
 export interface NodeDeclare {
-  inputsValues?: Record<string, IFlowConstantRefValue>;
+  inputsValues?: Record<string, IFlowValue>;
   inputs?: IJsonSchema;
   outputs?: IJsonSchema;
 }

+ 3 - 2
packages/runtime/interface/src/runtime/state/index.ts

@@ -3,7 +3,7 @@
  * SPDX-License-Identifier: MIT
  */
 
-import { IFlowConstantRefValue, IFlowRefValue, WorkflowVariableType } from '@schema/index';
+import { IFlowValue, IFlowRefValue, WorkflowVariableType, IFlowTemplateValue } from '@schema/index';
 import { IVariableParseResult, IVariableStore } from '../variable';
 import { INode } from '../document';
 import { WorkflowInputs, WorkflowOutputs } from '../base';
@@ -16,8 +16,9 @@ export interface IState {
   getNodeInputs(node: INode): WorkflowInputs;
   setNodeOutputs(params: { node: INode; outputs: WorkflowOutputs }): void;
   parseRef<T = unknown>(ref: IFlowRefValue): IVariableParseResult<T> | null;
+  parseTemplate(template: IFlowTemplateValue): IVariableParseResult<string> | null;
   parseValue<T = unknown>(
-    flowValue: IFlowConstantRefValue,
+    flowValue: IFlowValue,
     type?: WorkflowVariableType
   ): IVariableParseResult<T> | null;
   isExecutedNode(node: INode): boolean;

+ 7 - 1
packages/runtime/interface/src/schema/index.ts

@@ -10,4 +10,10 @@ export { WorkflowNodeSchema } from './node';
 export { WorkflowSchema } from './workflow';
 export { XYSchema, PositionSchema } from './xy';
 export { WorkflowPortType, WorkflowVariableType } from './constant';
-export { IFlowConstantRefValue, IFlowConstantValue, IFlowRefValue } from './value';
+export {
+  IFlowConstantRefValue,
+  IFlowConstantValue,
+  IFlowRefValue,
+  IFlowValue,
+  IFlowTemplateValue,
+} from './value';

+ 2 - 2
packages/runtime/interface/src/schema/node.ts

@@ -3,7 +3,7 @@
  * SPDX-License-Identifier: MIT
  */
 
-import type { IFlowConstantRefValue } from './value';
+import type { IFlowValue } from './value';
 import type { WorkflowNodeMetaSchema } from './node-meta';
 import { IJsonSchema } from './json-schema';
 import type { WorkflowEdgeSchema } from './edge';
@@ -14,7 +14,7 @@ export interface WorkflowNodeSchema<T = string, D = any> {
   meta: WorkflowNodeMetaSchema;
   data: D & {
     title?: string;
-    inputsValues?: Record<string, IFlowConstantRefValue>;
+    inputsValues?: Record<string, IFlowValue>;
     inputs?: IJsonSchema;
     outputs?: IJsonSchema;
     [key: string]: any;

+ 1 - 1
packages/runtime/js-core/src/domain/__tests__/executor/llm.ts

@@ -13,7 +13,7 @@ export class MockLLMExecutor extends LLMExecutor {
     const inputs = context.inputs as LLMExecutorInputs;
     this.checkInputs(inputs);
     await delay(100); // TODO mock node run
-    const result = `Hi, I'm an AI assistant, my name is ${inputs.modelName}, temperature is ${inputs.temperature}, system prompt is "${inputs.systemPrompt}", prompt is "${inputs.prompt}"`;
+    const result = `Hi, I'm an AI model, my name is ${inputs.modelName}, temperature is ${inputs.temperature}, system prompt is "${inputs.systemPrompt}", prompt is "${inputs.prompt}"`;
     return {
       outputs: {
         result,

+ 3 - 3
packages/runtime/js-core/src/domain/__tests__/schemas/basic-llm.test.ts

@@ -37,7 +37,7 @@ describe('workflow runtime basic test', () => {
         model_name: modelName,
         api_key: apiKey,
         api_host: apiHost,
-        prompt: 'Just give me the answer of "1+1=?", just one number, no other words',
+        formula: '1+1',
       },
     });
     expect(context.statusCenter.workflow.status).toBe(WorkflowStatus.Processing);
@@ -55,7 +55,7 @@ describe('workflow runtime basic test', () => {
           model_name: modelName,
           api_key: apiKey,
           api_host: apiHost,
-          prompt: 'Just give me the answer of "1+1=?", just one number, no other words',
+          formula: '1+1',
         },
         data: {},
       },
@@ -67,7 +67,7 @@ describe('workflow runtime basic test', () => {
           apiHost: apiHost,
           temperature: 0,
           prompt: 'Just give me the answer of "1+1=?", just one number, no other words',
-          systemPrompt: 'You are a helpful AI assistant.',
+          systemPrompt: 'You are a "math formula" calculator.',
         },
         outputs: { result: '2' },
         data: {},

+ 30 - 23
packages/runtime/js-core/src/domain/__tests__/schemas/basic-llm.ts

@@ -12,8 +12,8 @@ export const basicLLMSchema: WorkflowSchema = {
       type: 'start',
       meta: {
         position: {
-          x: 0,
-          y: 0,
+          x: 180,
+          y: 152.2,
         },
       },
       data: {
@@ -24,41 +24,41 @@ export const basicLLMSchema: WorkflowSchema = {
             model_name: {
               key: 14,
               name: 'model_name',
-              type: 'string',
-              extra: {
-                index: 1,
-              },
               isPropertyRequired: true,
-            },
-            prompt: {
-              key: 5,
-              name: 'prompt',
               type: 'string',
               extra: {
-                index: 3,
+                index: 0,
               },
-              isPropertyRequired: true,
             },
             api_key: {
               key: 19,
               name: 'api_key',
+              isPropertyRequired: true,
               type: 'string',
               extra: {
-                index: 4,
+                index: 1,
               },
-              isPropertyRequired: true,
             },
             api_host: {
               key: 20,
               name: 'api_host',
+              isPropertyRequired: true,
               type: 'string',
               extra: {
-                index: 5,
+                index: 2,
               },
+            },
+            formula: {
+              key: 4,
+              name: 'formula',
               isPropertyRequired: true,
+              type: 'string',
+              extra: {
+                index: 3,
+              },
             },
           },
-          required: ['model_name', 'prompt', 'api_key', 'api_host'],
+          required: ['model_name', 'api_key', 'api_host', 'formula'],
         },
       },
     },
@@ -67,8 +67,8 @@ export const basicLLMSchema: WorkflowSchema = {
       type: 'end',
       meta: {
         position: {
-          x: 1000,
-          y: 0,
+          x: 1124.4,
+          y: 152.2,
         },
       },
       data: {
@@ -94,7 +94,7 @@ export const basicLLMSchema: WorkflowSchema = {
       type: 'llm',
       meta: {
         position: {
-          x: 500,
+          x: 652.2,
           y: 0,
         },
       },
@@ -118,12 +118,13 @@ export const basicLLMSchema: WorkflowSchema = {
             content: 0,
           },
           prompt: {
-            type: 'ref',
-            content: ['start_0', 'prompt'],
+            type: 'template',
+            content:
+              'Just give me the answer of "{{start_0.formula}}=?", just one number, no other words',
           },
           systemPrompt: {
-            type: 'constant',
-            content: 'You are a helpful AI assistant.',
+            type: 'template',
+            content: 'You are a "math formula" calculator.',
           },
         },
         inputs: {
@@ -144,9 +145,15 @@ export const basicLLMSchema: WorkflowSchema = {
             },
             systemPrompt: {
               type: 'string',
+              extra: {
+                formComponent: 'prompt-editor',
+              },
             },
             prompt: {
               type: 'string',
+              extra: {
+                formComponent: 'prompt-editor',
+              },
             },
           },
         },

+ 13 - 10
packages/runtime/js-core/src/domain/__tests__/schemas/basic.test.ts

@@ -22,15 +22,18 @@ describe('WorkflowRuntime basic schema', () => {
         llm_settings: {
           temperature: 0.5,
         },
-        prompt: 'How are you?',
+        work: {
+          role: 'Chat',
+          task: 'Tell me a story about love',
+        },
       },
     });
     expect(context.statusCenter.workflow.status).toBe(WorkflowStatus.Processing);
     const result = await processing;
     expect(context.statusCenter.workflow.status).toBe(WorkflowStatus.Succeeded);
     expect(result).toStrictEqual({
-      llm_res: `Hi, I'm an AI assistant, my name is ai-model, temperature is 0.5, system prompt is "You are a helpful AI assistant.", prompt is "How are you?"`,
-      llm_prompt: 'How are you?',
+      llm_res: `Hi, I'm an AI model, my name is ai-model, temperature is 0.5, system prompt is "You are a helpful AI assistant.", prompt is "<Role>Chat</Role>\n\n<Task>\nTell me a story about love\n</Task>"`,
+      llm_task: 'Tell me a story about love',
     });
     const snapshots = snapshotsToVOData(context.snapshotCenter.exportAll());
     expect(snapshots).toStrictEqual([
@@ -40,7 +43,7 @@ describe('WorkflowRuntime basic schema', () => {
         outputs: {
           model_name: 'ai-model',
           llm_settings: { temperature: 0.5 },
-          prompt: 'How are you?',
+          work: { role: 'Chat', task: 'Tell me a story about love' },
         },
         data: {},
       },
@@ -51,12 +54,12 @@ describe('WorkflowRuntime basic schema', () => {
           apiKey: 'sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx',
           apiHost: 'https://mock-ai-url/api/v3',
           temperature: 0.5,
-          prompt: 'How are you?',
+          prompt: '<Role>Chat</Role>\n\n<Task>\nTell me a story about love\n</Task>',
           systemPrompt: 'You are a helpful AI assistant.',
         },
         outputs: {
           result:
-            'Hi, I\'m an AI assistant, my name is ai-model, temperature is 0.5, system prompt is "You are a helpful AI assistant.", prompt is "How are you?"',
+            'Hi, I\'m an AI model, my name is ai-model, temperature is 0.5, system prompt is "You are a helpful AI assistant.", prompt is "<Role>Chat</Role>\n\n<Task>\nTell me a story about love\n</Task>"',
         },
         data: {},
       },
@@ -64,13 +67,13 @@ describe('WorkflowRuntime basic schema', () => {
         nodeID: 'end_0',
         inputs: {
           llm_res:
-            'Hi, I\'m an AI assistant, my name is ai-model, temperature is 0.5, system prompt is "You are a helpful AI assistant.", prompt is "How are you?"',
-          llm_prompt: 'How are you?',
+            'Hi, I\'m an AI model, my name is ai-model, temperature is 0.5, system prompt is "You are a helpful AI assistant.", prompt is "<Role>Chat</Role>\n\n<Task>\nTell me a story about love\n</Task>"',
+          llm_task: 'Tell me a story about love',
         },
         outputs: {
           llm_res:
-            'Hi, I\'m an AI assistant, my name is ai-model, temperature is 0.5, system prompt is "You are a helpful AI assistant.", prompt is "How are you?"',
-          llm_prompt: 'How are you?',
+            'Hi, I\'m an AI model, my name is ai-model, temperature is 0.5, system prompt is "You are a helpful AI assistant.", prompt is "<Role>Chat</Role>\n\n<Task>\nTell me a story about love\n</Task>"',
+          llm_task: 'Tell me a story about love',
         },
         data: {},
       },

+ 47 - 19
packages/runtime/js-core/src/domain/__tests__/schemas/basic.ts

@@ -13,7 +13,7 @@ export const basicSchema: WorkflowSchema = {
       meta: {
         position: {
           x: 180,
-          y: 69,
+          y: 171.6,
         },
       },
       data: {
@@ -24,18 +24,19 @@ export const basicSchema: WorkflowSchema = {
             model_name: {
               key: 14,
               name: 'model_name',
+              isPropertyRequired: true,
               type: 'string',
               extra: {
-                index: 1,
+                index: 0,
               },
-              isPropertyRequired: true,
             },
             llm_settings: {
               key: 17,
               name: 'llm_settings',
+              isPropertyRequired: false,
               type: 'object',
               extra: {
-                index: 2,
+                index: 1,
               },
               properties: {
                 temperature: {
@@ -49,17 +50,38 @@ export const basicSchema: WorkflowSchema = {
               },
               required: [],
             },
-            prompt: {
-              key: 19,
-              name: 'prompt',
-              type: 'string',
+            work: {
+              key: 5,
+              name: 'work',
+              isPropertyRequired: true,
+              type: 'object',
               extra: {
-                index: 3,
+                index: 2,
               },
-              isPropertyRequired: true,
+              properties: {
+                role: {
+                  key: 6,
+                  name: 'role',
+                  isPropertyRequired: true,
+                  type: 'string',
+                  extra: {
+                    index: 0,
+                  },
+                },
+                task: {
+                  key: 8,
+                  name: 'task',
+                  isPropertyRequired: true,
+                  type: 'string',
+                  extra: {
+                    index: 1,
+                  },
+                },
+              },
+              required: ['role', 'task'],
             },
           },
-          required: ['model_name', 'prompt'],
+          required: ['model_name', 'work'],
         },
       },
     },
@@ -68,8 +90,8 @@ export const basicSchema: WorkflowSchema = {
       type: 'end',
       meta: {
         position: {
-          x: 1121.3,
-          y: 69,
+          x: 1124.4,
+          y: 171.6,
         },
       },
       data: {
@@ -79,9 +101,9 @@ export const basicSchema: WorkflowSchema = {
             type: 'ref',
             content: ['llm_0', 'result'],
           },
-          llm_prompt: {
+          llm_task: {
             type: 'ref',
-            content: ['start_0', 'prompt'],
+            content: ['start_0', 'work', 'task'],
           },
         },
         inputs: {
@@ -90,7 +112,7 @@ export const basicSchema: WorkflowSchema = {
             llm_res: {
               type: 'string',
             },
-            llm_prompt: {
+            llm_task: {
               type: 'string',
             },
           },
@@ -102,7 +124,7 @@ export const basicSchema: WorkflowSchema = {
       type: 'llm',
       meta: {
         position: {
-          x: 650.65,
+          x: 652.2,
           y: 0,
         },
       },
@@ -126,8 +148,8 @@ export const basicSchema: WorkflowSchema = {
             content: ['start_0', 'llm_settings', 'temperature'],
           },
           prompt: {
-            type: 'ref',
-            content: ['start_0', 'prompt'],
+            type: 'template',
+            content: '<Role>{{start_0.work.role}}</Role>\n\n<Task>\n{{start_0.work.task}}\n</Task>',
           },
           systemPrompt: {
             type: 'constant',
@@ -152,9 +174,15 @@ export const basicSchema: WorkflowSchema = {
             },
             systemPrompt: {
               type: 'string',
+              extra: {
+                formComponent: 'prompt-editor',
+              },
             },
             prompt: {
               type: 'string',
+              extra: {
+                formComponent: 'prompt-editor',
+              },
             },
           },
         },

+ 8 - 8
packages/runtime/js-core/src/domain/__tests__/schemas/branch.test.ts

@@ -26,7 +26,7 @@ describe('WorkflowRuntime branch schema', () => {
     const result = await processing;
     expect(context.statusCenter.workflow.status).toBe(WorkflowStatus.Succeeded);
     expect(result).toStrictEqual({
-      m1_res: `Hi, I'm an AI assistant, my name is AI_MODEL_1, temperature is 0.5, system prompt is "I'm Model 1.", prompt is "Tell me a joke"`,
+      m1_res: `Hi, I'm an AI model, my name is AI_MODEL_1, temperature is 0.5, system prompt is "I'm Model 1.", prompt is "Tell me a joke"`,
     });
     const snapshots = snapshotsToVOData(context.snapshotCenter.exportAll());
     expect(snapshots).toStrictEqual([
@@ -74,7 +74,7 @@ describe('WorkflowRuntime branch schema', () => {
         },
         outputs: {
           result:
-            'Hi, I\'m an AI assistant, my name is AI_MODEL_1, temperature is 0.5, system prompt is "I\'m Model 1.", prompt is "Tell me a joke"',
+            'Hi, I\'m an AI model, my name is AI_MODEL_1, temperature is 0.5, system prompt is "I\'m Model 1.", prompt is "Tell me a joke"',
         },
         data: {},
       },
@@ -82,11 +82,11 @@ describe('WorkflowRuntime branch schema', () => {
         nodeID: 'end_0',
         inputs: {
           m1_res:
-            'Hi, I\'m an AI assistant, my name is AI_MODEL_1, temperature is 0.5, system prompt is "I\'m Model 1.", prompt is "Tell me a joke"',
+            'Hi, I\'m an AI model, my name is AI_MODEL_1, temperature is 0.5, system prompt is "I\'m Model 1.", prompt is "Tell me a joke"',
         },
         outputs: {
           m1_res:
-            'Hi, I\'m an AI assistant, my name is AI_MODEL_1, temperature is 0.5, system prompt is "I\'m Model 1.", prompt is "Tell me a joke"',
+            'Hi, I\'m an AI model, my name is AI_MODEL_1, temperature is 0.5, system prompt is "I\'m Model 1.", prompt is "Tell me a joke"',
         },
         data: {},
       },
@@ -113,7 +113,7 @@ describe('WorkflowRuntime branch schema', () => {
     const result = await processing;
     expect(context.statusCenter.workflow.status).toBe(WorkflowStatus.Succeeded);
     expect(result).toStrictEqual({
-      m2_res: `Hi, I'm an AI assistant, my name is AI_MODEL_2, temperature is 0.6, system prompt is "I'm Model 2.", prompt is "Tell me a story"`,
+      m2_res: `Hi, I'm an AI model, my name is AI_MODEL_2, temperature is 0.6, system prompt is "I'm Model 2.", prompt is "Tell me a story"`,
     });
     const snapshots = snapshotsToVOData(context.snapshotCenter.exportAll());
     expect(snapshots).toStrictEqual([
@@ -161,7 +161,7 @@ describe('WorkflowRuntime branch schema', () => {
         },
         outputs: {
           result:
-            'Hi, I\'m an AI assistant, my name is AI_MODEL_2, temperature is 0.6, system prompt is "I\'m Model 2.", prompt is "Tell me a story"',
+            'Hi, I\'m an AI model, my name is AI_MODEL_2, temperature is 0.6, system prompt is "I\'m Model 2.", prompt is "Tell me a story"',
         },
         data: {},
       },
@@ -169,11 +169,11 @@ describe('WorkflowRuntime branch schema', () => {
         nodeID: 'end_0',
         inputs: {
           m2_res:
-            'Hi, I\'m an AI assistant, my name is AI_MODEL_2, temperature is 0.6, system prompt is "I\'m Model 2.", prompt is "Tell me a story"',
+            'Hi, I\'m an AI model, my name is AI_MODEL_2, temperature is 0.6, system prompt is "I\'m Model 2.", prompt is "Tell me a story"',
         },
         outputs: {
           m2_res:
-            'Hi, I\'m an AI assistant, my name is AI_MODEL_2, temperature is 0.6, system prompt is "I\'m Model 2.", prompt is "Tell me a story"',
+            'Hi, I\'m an AI model, my name is AI_MODEL_2, temperature is 0.6, system prompt is "I\'m Model 2.", prompt is "Tell me a story"',
         },
         data: {},
       },

+ 16 - 4
packages/runtime/js-core/src/domain/__tests__/schemas/branch.ts

@@ -155,8 +155,8 @@ export const branchSchema: WorkflowSchema = {
             content: "I'm Model 1.",
           },
           prompt: {
-            type: 'ref',
-            content: ['start_0', 'prompt'],
+            type: 'template',
+            content: '{{start_0.prompt}}',
           },
         },
         inputs: {
@@ -177,9 +177,15 @@ export const branchSchema: WorkflowSchema = {
             },
             systemPrompt: {
               type: 'string',
+              extra: {
+                formComponent: 'prompt-editor',
+              },
             },
             prompt: {
               type: 'string',
+              extra: {
+                formComponent: 'prompt-editor',
+              },
             },
           },
         },
@@ -226,8 +232,8 @@ export const branchSchema: WorkflowSchema = {
             content: "I'm Model 2.",
           },
           prompt: {
-            type: 'ref',
-            content: ['start_0', 'prompt'],
+            type: 'template',
+            content: '{{start_0.prompt}}',
           },
         },
         inputs: {
@@ -248,9 +254,15 @@ export const branchSchema: WorkflowSchema = {
             },
             systemPrompt: {
               type: 'string',
+              extra: {
+                formComponent: 'prompt-editor',
+              },
             },
             prompt: {
               type: 'string',
+              extra: {
+                formComponent: 'prompt-editor',
+              },
             },
           },
         },

+ 8 - 8
packages/runtime/js-core/src/domain/__tests__/schemas/loop.test.ts

@@ -72,7 +72,7 @@ describe('WorkflowRuntime loop schema', () => {
         },
         outputs: {
           result:
-            'Hi, I\'m an AI assistant, my name is AI_MODEL_1, temperature is 0.6, system prompt is "undefined", prompt is "TASK - A"',
+            'Hi, I\'m an AI model, my name is AI_MODEL_1, temperature is 0.6, system prompt is "undefined", prompt is "TASK - A"',
         },
         data: {},
       },
@@ -87,7 +87,7 @@ describe('WorkflowRuntime loop schema', () => {
         },
         outputs: {
           result:
-            'Hi, I\'m an AI assistant, my name is AI_MODEL_1, temperature is 0.6, system prompt is "undefined", prompt is "TASK - B"',
+            'Hi, I\'m an AI model, my name is AI_MODEL_1, temperature is 0.6, system prompt is "undefined", prompt is "TASK - B"',
         },
         data: {},
       },
@@ -102,7 +102,7 @@ describe('WorkflowRuntime loop schema', () => {
         },
         outputs: {
           result:
-            'Hi, I\'m an AI assistant, my name is AI_MODEL_1, temperature is 0.6, system prompt is "undefined", prompt is "TASK - C"',
+            'Hi, I\'m an AI model, my name is AI_MODEL_1, temperature is 0.6, system prompt is "undefined", prompt is "TASK - C"',
         },
         data: {},
       },
@@ -117,7 +117,7 @@ describe('WorkflowRuntime loop schema', () => {
         },
         outputs: {
           result:
-            'Hi, I\'m an AI assistant, my name is AI_MODEL_1, temperature is 0.6, system prompt is "undefined", prompt is "TASK - D"',
+            'Hi, I\'m an AI model, my name is AI_MODEL_1, temperature is 0.6, system prompt is "undefined", prompt is "TASK - D"',
         },
         data: {},
       },
@@ -132,7 +132,7 @@ describe('WorkflowRuntime loop schema', () => {
         },
         outputs: {
           result:
-            'Hi, I\'m an AI assistant, my name is AI_MODEL_1, temperature is 0.6, system prompt is "undefined", prompt is "TASK - E"',
+            'Hi, I\'m an AI model, my name is AI_MODEL_1, temperature is 0.6, system prompt is "undefined", prompt is "TASK - E"',
         },
         data: {},
       },
@@ -147,7 +147,7 @@ describe('WorkflowRuntime loop schema', () => {
         },
         outputs: {
           result:
-            'Hi, I\'m an AI assistant, my name is AI_MODEL_1, temperature is 0.6, system prompt is "undefined", prompt is "TASK - F"',
+            'Hi, I\'m an AI model, my name is AI_MODEL_1, temperature is 0.6, system prompt is "undefined", prompt is "TASK - F"',
         },
         data: {},
       },
@@ -162,7 +162,7 @@ describe('WorkflowRuntime loop schema', () => {
         },
         outputs: {
           result:
-            'Hi, I\'m an AI assistant, my name is AI_MODEL_1, temperature is 0.6, system prompt is "undefined", prompt is "TASK - G"',
+            'Hi, I\'m an AI model, my name is AI_MODEL_1, temperature is 0.6, system prompt is "undefined", prompt is "TASK - G"',
         },
         data: {},
       },
@@ -177,7 +177,7 @@ describe('WorkflowRuntime loop schema', () => {
         },
         outputs: {
           result:
-            'Hi, I\'m an AI assistant, my name is AI_MODEL_1, temperature is 0.6, system prompt is "undefined", prompt is "TASK - H"',
+            'Hi, I\'m an AI model, my name is AI_MODEL_1, temperature is 0.6, system prompt is "undefined", prompt is "TASK - H"',
         },
         data: {},
       },

+ 8 - 2
packages/runtime/js-core/src/domain/__tests__/schemas/loop.ts

@@ -115,8 +115,8 @@ export const loopSchema: WorkflowSchema = {
                 content: ['start_0', 'system_prompt'],
               },
               prompt: {
-                type: 'ref',
-                content: ['loop_0_locals', 'item'],
+                type: 'template',
+                content: '{{loop_0_locals.item}}',
               },
             },
             inputs: {
@@ -137,9 +137,15 @@ export const loopSchema: WorkflowSchema = {
                 },
                 systemPrompt: {
                   type: 'string',
+                  extra: {
+                    formComponent: 'prompt-editor',
+                  },
                 },
                 prompt: {
                   type: 'string',
+                  extra: {
+                    formComponent: 'prompt-editor',
+                  },
                 },
               },
             },

+ 16 - 4
packages/runtime/js-core/src/domain/__tests__/schemas/two-llm.ts

@@ -141,8 +141,8 @@ export const twoLLMSchema: WorkflowSchema = {
             content: ['start_0', 'num'],
           },
           prompt: {
-            type: 'ref',
-            content: ['start_0', 'query'],
+            type: 'template',
+            content: '{{start_0.query}}',
           },
         },
         inputs: {
@@ -163,9 +163,15 @@ export const twoLLMSchema: WorkflowSchema = {
             },
             systemPrompt: {
               type: 'string',
+              extra: {
+                formComponent: 'prompt-editor',
+              },
             },
             prompt: {
               type: 'string',
+              extra: {
+                formComponent: 'prompt-editor',
+              },
             },
           },
         },
@@ -212,8 +218,8 @@ export const twoLLMSchema: WorkflowSchema = {
             content: 'AAAA',
           },
           prompt: {
-            type: 'ref',
-            content: ['start_0', 'query'],
+            type: 'template',
+            content: '{{start_0.query}}',
           },
         },
         inputs: {
@@ -234,9 +240,15 @@ export const twoLLMSchema: WorkflowSchema = {
             },
             systemPrompt: {
               type: 'string',
+              extra: {
+                formComponent: 'prompt-editor',
+              },
             },
             prompt: {
               type: 'string',
+              extra: {
+                formComponent: 'prompt-editor',
+              },
             },
           },
         },

+ 7 - 4
packages/runtime/js-core/src/domain/engine/index.test.ts

@@ -32,13 +32,16 @@ describe('WorkflowRuntimeEngine', () => {
         llm_settings: {
           temperature: 0.5,
         },
-        prompt: 'How are you?',
+        work: {
+          role: 'Chat',
+          task: 'Tell me a story about love',
+        },
       },
     });
     const result = await processing;
     expect(result).toStrictEqual({
-      llm_res: `Hi, I'm an AI assistant, my name is ai-model, temperature is 0.5, system prompt is "You are a helpful AI assistant.", prompt is "How are you?"`,
-      llm_prompt: 'How are you?',
+      llm_res: `Hi, I'm an AI model, my name is ai-model, temperature is 0.5, system prompt is "You are a helpful AI assistant.", prompt is "<Role>Chat</Role>\n\n<Task>\nTell me a story about love\n</Task>"`,
+      llm_task: 'Tell me a story about love',
     });
   });
 
@@ -52,7 +55,7 @@ describe('WorkflowRuntimeEngine', () => {
     });
     const result = await processing;
     expect(result).toStrictEqual({
-      m1_res: `Hi, I'm an AI assistant, my name is AI_MODEL_1, temperature is 0.5, system prompt is "I'm Model 1.", prompt is "Tell me a joke"`,
+      m1_res: `Hi, I'm an AI model, my name is AI_MODEL_1, temperature is 0.5, system prompt is "I'm Model 1.", prompt is "Tell me a joke"`,
     });
   });
 });

+ 38 - 2
packages/runtime/js-core/src/domain/state/index.ts

@@ -6,7 +6,7 @@
 import { isNil } from 'lodash-es';
 import {
   IState,
-  IFlowConstantRefValue,
+  IFlowValue,
   IFlowRefValue,
   IVariableParseResult,
   INode,
@@ -14,6 +14,7 @@ import {
   WorkflowOutputs,
   IVariableStore,
   WorkflowVariableType,
+  IFlowTemplateValue,
 } from '@flowgram.ai/runtime-interface';
 
 import { uuid, WorkflowRuntimeType } from '@infra/utils';
@@ -105,7 +106,38 @@ export class WorkflowRuntimeState implements IState {
     return result;
   }
 
-  public parseValue<T = unknown>(flowValue: IFlowConstantRefValue): IVariableParseResult<T> | null {
+  public parseTemplate(template: IFlowTemplateValue): IVariableParseResult<string> | null {
+    if (template?.type !== 'template') {
+      throw new Error(`invalid template value: ${template}`);
+    }
+    if (!template.content) {
+      return null;
+    }
+    const parsedValue = template.content.replace(
+      /\{\{([^\}]+)\}\}/g,
+      (match: string, pattern: string): string => {
+        // 将路径分割成数组,如 'start_0.work.role' => ['start_0', 'work', 'role']
+        const ref = pattern.trim().split('.');
+
+        const variable = this.parseRef<string>({
+          type: 'ref',
+          content: ref,
+        });
+
+        if (!variable) {
+          return '';
+        }
+
+        return variable.value;
+      }
+    );
+    return {
+      type: WorkflowVariableType.String,
+      value: parsedValue,
+    };
+  }
+
+  public parseValue<T = unknown>(flowValue: IFlowValue): IVariableParseResult<T> | null {
     if (!flowValue?.type) {
       throw new Error(`invalid flow value type: ${(flowValue as any).type}`);
     }
@@ -125,6 +157,10 @@ export class WorkflowRuntimeState implements IState {
     if (flowValue.type === 'ref') {
       return this.parseRef<T>(flowValue);
     }
+    // template
+    if (flowValue.type === 'template') {
+      return this.parseTemplate(flowValue) as IVariableParseResult<T> | null;
+    }
     // unknown type
     throw new Error(`unknown flow value type: ${(flowValue as any).type}`);
   }