@@ -20,8 +20,20 @@ import { ChatOptions } from '../types/SDKTypes';
2020import { findClosestModelInfo } from './Model' ;
2121import * as fs from 'fs' ;
2222
23+ export type TLLMCommandOptions = {
24+ /**
25+ * Used to describe the LLM behavior, this will be typically used as LLM system message.
26+ */
27+ behavior ?: string ;
28+
29+ /**
30+ * List of files paths or urls to be processed by the LLM.
31+ */
32+ files ?: string [ ] ;
33+ } ;
34+
2335class LLMCommand {
24- constructor ( private _llm : LLMInstance , private _params : any , private _options ?: any ) { }
36+ constructor ( private _llm : LLMInstance , private _params : any , private _options ?: TLLMCommandOptions ) { }
2537
2638 /**
2739 * Run the command and return the result as a promise.
@@ -141,6 +153,9 @@ export type TLLMInstanceParams = {
141153 /** the maximum output tokens that the model should generate */
142154 outputTokens ?: number ;
143155
156+ /** The behavior of the model, this will be typically used as LLM system message. */
157+ behavior ?: string ;
158+
144159 [ key : string ] : any ;
145160} ;
146161
@@ -185,7 +200,9 @@ export class LLMInstance extends SDKObject {
185200 }
186201
187202 protected async init ( ) {
188- await super . init ( ) ;
203+ //false means we don't resolve the ready promise
204+ //we will resolve it later in the init method
205+ await super . init ( false ) ;
189206 // const llmConnector = ConnectorService.getLLMConnector(this._providerId);
190207 // this._candidate = this._candidate || AccessCandidate.team(DEFAULT_TEAM_ID);
191208 // this._llmRequester = llmConnector.user(this._candidate);
@@ -205,6 +222,8 @@ export class LLMInstance extends SDKObject {
205222 const llmConnector = ConnectorService . getLLMConnector ( this . _providerId ) ;
206223 this . _llmRequester = llmConnector . user ( this . _candidate ) ;
207224 this . _modelSettings = adaptModelParams ( this . _modelSettings , this . _providerId , builtInModelInfo ) ;
225+
226+ this . initSignal ( ) ; //resolve the ready promise
208227 }
209228
210229 /**
@@ -225,8 +244,14 @@ export class LLMInstance extends SDKObject {
225244 * stream.on('data', chunk => process.stdout.write(chunk));
226245 * ```
227246 */
228- public prompt ( prompt : string , options ?: any ) : LLMCommand {
229- return new LLMCommand ( this , { ...this . _modelSettings , messages : [ { role : 'user' , content : prompt } ] } , options ) ;
247+ public prompt ( prompt : string , options ?: TLLMCommandOptions ) : LLMCommand {
248+ const messages = [ ] ;
249+ const behavior = options ?. behavior || this . _modelSettings ?. behavior ;
250+ if ( behavior ) {
251+ messages . push ( { role : 'system' , content : behavior } ) ;
252+ }
253+ messages . push ( { role : 'user' , content : prompt } ) ;
254+ return new LLMCommand ( this , { ...this . _modelSettings , messages } , options ) ;
230255 }
231256
232257 public chat ( options ?: ChatOptions | string ) {
0 commit comments