Skip to content

Commit d2ff395

Browse files
committed
Add support for custom behavior (system message) when prompting an LLM without agent.
1 parent ceb9575 commit d2ff395

File tree

6 files changed

+64
-9
lines changed

6 files changed

+64
-9
lines changed

examples/01-agent-code-skill/04.1-chat-planner-coder.ts

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -265,7 +265,10 @@ async function handleUserInput(input: string, rl: readline.Interface, chat: Chat
265265
parser.flush();
266266
console.log('\n');
267267
displayTasksList(currentTasks);
268-
rl.prompt();
268+
//wait for the parser to flush
269+
parser.once('buffer-released', () => {
270+
rl.prompt();
271+
});
269272
});
270273

271274
streamChat.on(TLLMEvent.Error, (error) => {
@@ -276,7 +279,6 @@ async function handleUserInput(input: string, rl: readline.Interface, chat: Chat
276279
const toolCalls = {};
277280

278281
streamChat.on(TLLMEvent.ToolCall, (toolCall) => {
279-
displayTasksList(currentTasks);
280282
if (toolCall?.tool?.name.startsWith('_sre_')) {
281283
return;
282284
}
@@ -290,6 +292,8 @@ async function handleUserInput(input: string, rl: readline.Interface, chat: Chat
290292
console.log(chalk.gray('\n[Calling Tool]'), chalk.gray(toolCall?.tool?.name), chalk.gray(args));
291293
toolCalls[toolCall?.tool?.id] = { startTime: Date.now() };
292294
});
295+
296+
displayTasksList(currentTasks);
293297
});
294298

295299
streamChat.on(TLLMEvent.ToolResult, (toolResult) => {

packages/sdk/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "@smythos/sdk",
3-
"version": "1.1.0",
3+
"version": "1.1.1",
44
"description": "SRE SDK",
55
"keywords": [
66
"smythos",

packages/sdk/src/Core/SDKObject.class.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,11 +27,14 @@ export class SDKObject {
2727
this._readyPromise = new ControlledPromise<any>(this.init.bind(this));
2828
}
2929

30-
protected async init() {
30+
protected async init(revolveReadyPromise: boolean = true) {
3131
//if the SRE instance is not initializing, initialize it with default settings
3232
if (!SRE.initializing) SRE.init({});
3333
await SRE.ready();
3434

35+
if (revolveReadyPromise) this._readyPromise.resolve(true);
36+
}
37+
protected async initSignal() {
3538
this._readyPromise.resolve(true);
3639
}
3740

packages/sdk/src/LLM/LLMInstance.class.ts

Lines changed: 29 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,20 @@ import { ChatOptions } from '../types/SDKTypes';
2020
import { findClosestModelInfo } from './Model';
2121
import * as fs from 'fs';
2222

23+
export type TLLMCommandOptions = {
24+
/**
25+
* Used to describe the LLM behavior, this will be typically used as LLM system message.
26+
*/
27+
behavior?: string;
28+
29+
/**
30+
* List of files paths or urls to be processed by the LLM.
31+
*/
32+
files?: string[];
33+
};
34+
2335
class LLMCommand {
24-
constructor(private _llm: LLMInstance, private _params: any, private _options?: any) {}
36+
constructor(private _llm: LLMInstance, private _params: any, private _options?: TLLMCommandOptions) {}
2537

2638
/**
2739
* Run the command and return the result as a promise.
@@ -141,6 +153,9 @@ export type TLLMInstanceParams = {
141153
/** the maximum output tokens that the model should generate */
142154
outputTokens?: number;
143155

156+
/** The behavior of the model, this will be typically used as LLM system message. */
157+
behavior?: string;
158+
144159
[key: string]: any;
145160
};
146161

@@ -185,7 +200,9 @@ export class LLMInstance extends SDKObject {
185200
}
186201

187202
protected async init() {
188-
await super.init();
203+
//false means we don't resolve the ready promise
204+
//we will resolve it later in the init method
205+
await super.init(false);
189206
// const llmConnector = ConnectorService.getLLMConnector(this._providerId);
190207
// this._candidate = this._candidate || AccessCandidate.team(DEFAULT_TEAM_ID);
191208
// this._llmRequester = llmConnector.user(this._candidate);
@@ -205,6 +222,8 @@ export class LLMInstance extends SDKObject {
205222
const llmConnector = ConnectorService.getLLMConnector(this._providerId);
206223
this._llmRequester = llmConnector.user(this._candidate);
207224
this._modelSettings = adaptModelParams(this._modelSettings, this._providerId, builtInModelInfo);
225+
226+
this.initSignal(); //resolve the ready promise
208227
}
209228

210229
/**
@@ -225,8 +244,14 @@ export class LLMInstance extends SDKObject {
225244
* stream.on('data', chunk => process.stdout.write(chunk));
226245
* ```
227246
*/
228-
public prompt(prompt: string, options?: any): LLMCommand {
229-
return new LLMCommand(this, { ...this._modelSettings, messages: [{ role: 'user', content: prompt }] }, options);
247+
public prompt(prompt: string, options?: TLLMCommandOptions): LLMCommand {
248+
const messages = [];
249+
const behavior = options?.behavior || this._modelSettings?.behavior;
250+
if (behavior) {
251+
messages.push({ role: 'system', content: behavior });
252+
}
253+
messages.push({ role: 'user', content: prompt });
254+
return new LLMCommand(this, { ...this._modelSettings, messages }, options);
230255
}
231256

232257
public chat(options?: ChatOptions | string) {

packages/sdk/tests/agent.test.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ import { expect, describe, it } from 'vitest';
1414
describe('SDK Agent Tests', () => {
1515
it('imported agent', async () => {
1616
const agent = Agent.import('./packages/sdk/tests/data/AgentData/crypto-info-agent.smyth', {
17-
model: Model.OpenAI('gpt-4o-mini', { maxTokens: 10 }),
17+
model: Model.OpenAI('gpt-4o-mini', { maxTokens: 100 }),
1818
});
1919

2020
//const result = await agent.prompt('Hello, Who are you ?');

packages/sdk/tests/llm.test.ts

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,29 @@ describe('SDK LLM Tests', () => {
4545
expect(result).toBeDefined();
4646
expect(result).toContain('Paris');
4747
});
48+
it('LLM - Prompt with LLM behavior', async () => {
49+
const llm = LLM.OpenAI('gpt-4o-mini', {
50+
behavior: 'You start every answer with this prompt $> ',
51+
});
52+
53+
const result = await llm.prompt('What is the capital of France?');
54+
55+
expect(result).toBeDefined();
56+
expect(result).toContain('$>');
57+
});
58+
59+
it('LLM - Prompt with LLM behavior override', async () => {
60+
const llm = LLM.OpenAI('gpt-4o-mini', {
61+
behavior: 'You start every answer with this prompt $> ',
62+
});
63+
64+
const result = await llm.prompt('What is the capital of France?', {
65+
behavior: 'You start every answer with this prompt [AGENT]> ',
66+
});
67+
68+
expect(result).toBeDefined();
69+
expect(result).toContain('[AGENT]>');
70+
});
4871

4972
it('LLM - Prompt with attachments', async () => {
5073
const llm = LLM.OpenAI('gpt-4o-mini', {

0 commit comments

Comments
 (0)