diff --git a/integrations/extensions/starter-kits/language-model-watsonx/advanced/README.md b/integrations/extensions/starter-kits/language-model-watsonx/advanced/README.md new file mode 100644 index 00000000..79f346a3 --- /dev/null +++ b/integrations/extensions/starter-kits/language-model-watsonx/advanced/README.md @@ -0,0 +1,130 @@ +# Watsonx-ai-openapi + +This is a openapi spec including all available llm endpoints. Including + +| Endpoint | Description | +| ------------------------------------------------ | -------------------------------------------------------------------------- | +| Generation | Direct generate from wx.ai | +| Generation (stream) | Direct generation with streaming | +| Generation from deployed model/template | From deployed model/template without explicitly prompt | +| Generation from deployed model/template (stream) | From deployed model/template without explicitly prompt with streaming | +| Generation from AI service | For services like RAG, agent lab, chat with document, etc. | +| Generation from AI service | For services like RAG, agent lab, chat with document, etc., with streaming | + +## Before you start + +You need to have a deployed model/prompt template, or a deployed AI service on Watsonx.ai +Read more: + +- [Deploying a prompt template](https://eu-de.dataplatform.cloud.ibm.com/docs/content/wsj/analyze-data/prompt-template-deploy.html?context=wx&locale=en&audience=wdp) +- [Deploying a tuned foundation model](https://eu-de.dataplatform.cloud.ibm.com/docs/content/wsj/analyze-data/fm-tuning-deploy.html?context=wx&locale=en&audience=wdp) +- [Deploying AI services](https://eu-de.dataplatform.cloud.ibm.com/docs/content/wsj/analyze-data/ai-services-overview.html?context=wx&locale=en) + +--- + +## Generation Streaming + +Use streaming + +1. Choose operation with _"Generation stream"_. +2. Set up streaming reponse `text` to `results[0].generated_text`. + +Read more on [streaming from an extension](https://cloud.ibm.com/docs/watson-assistant?topic=watson-assistant-stream-from-extension) to config streaming in WxA. + +--- + +## Deployed prompt template + +1. Choose operation _"Generation from a deployed prompt"_ + +2. Set up parameters: + +- `deployment-id` +- `version` +- `parameters.prompt_variables` + +Input variable `parameters.prompt_variables` is object type. You should give variable values as expression depending on your deployment. For example: + +```json +{ "question": "$question" } +``` + + + +Output message is in `body.results[0].generated_text` + + + +## Deployed prompt template (stream) + +1. Choose operation _"Generation from a deployed prompt (stream)"_ + +2. Set up parameters: + +- `deployment-id` +- `version` +- `parameters.prompt_variables` + +Input variable `parameters.prompt_variables` is object type. You should give variable values as expression depending on your deployment. For example: + +```json +{ "question": "$question" } +``` + +Set up streaming reponse `text` to `results[0].generated_text`. + + + +--- + +## Deployed AI service + +1. Choose operation _"Generation from a deployed AI service"_ + +2. Set up parameters: + +- `deployment-id` +- `version` +- `message` + +Input variable `message` should be given as an expression in form of: + +```json +[ + { + "role": "user", + "content": " $user_input " + } +] +``` + + + +Output message should be `body.choices[0].message.content` + + + +## Deployed AI service (stream) + +1. Choose operation _"Generation from a deployed AI service (stream)"_ + +2. Set up parameters: + +- `deployment-id` +- `version` +- `message` + +Input variable `message` should be given as an expression in form of: + +```json +[ + { + "role": "user", + "content": " $user_input " + } +] +``` + +Output message should be `body.choices[0].message.delta.content` + + diff --git a/integrations/extensions/starter-kits/language-model-watsonx/advanced/assets/ai-service-input.png b/integrations/extensions/starter-kits/language-model-watsonx/advanced/assets/ai-service-input.png new file mode 100644 index 00000000..363b533d Binary files /dev/null and b/integrations/extensions/starter-kits/language-model-watsonx/advanced/assets/ai-service-input.png differ diff --git a/integrations/extensions/starter-kits/language-model-watsonx/advanced/assets/ai-service-output.png b/integrations/extensions/starter-kits/language-model-watsonx/advanced/assets/ai-service-output.png new file mode 100644 index 00000000..521a3420 Binary files /dev/null and b/integrations/extensions/starter-kits/language-model-watsonx/advanced/assets/ai-service-output.png differ diff --git a/integrations/extensions/starter-kits/language-model-watsonx/advanced/assets/ai-service-stream.png b/integrations/extensions/starter-kits/language-model-watsonx/advanced/assets/ai-service-stream.png new file mode 100644 index 00000000..3e5c1b21 Binary files /dev/null and b/integrations/extensions/starter-kits/language-model-watsonx/advanced/assets/ai-service-stream.png differ diff --git a/integrations/extensions/starter-kits/language-model-watsonx/advanced/assets/deployed-prompt-input.png b/integrations/extensions/starter-kits/language-model-watsonx/advanced/assets/deployed-prompt-input.png new file mode 100644 index 00000000..0a5caece Binary files /dev/null and b/integrations/extensions/starter-kits/language-model-watsonx/advanced/assets/deployed-prompt-input.png differ diff --git a/integrations/extensions/starter-kits/language-model-watsonx/advanced/assets/deployed-prompt-output.png b/integrations/extensions/starter-kits/language-model-watsonx/advanced/assets/deployed-prompt-output.png new file mode 100644 index 00000000..2f3e57cc Binary files /dev/null and b/integrations/extensions/starter-kits/language-model-watsonx/advanced/assets/deployed-prompt-output.png differ diff --git a/integrations/extensions/starter-kits/language-model-watsonx/advanced/assets/deployed-prompt-stream.png b/integrations/extensions/starter-kits/language-model-watsonx/advanced/assets/deployed-prompt-stream.png new file mode 100644 index 00000000..160b3854 Binary files /dev/null and b/integrations/extensions/starter-kits/language-model-watsonx/advanced/assets/deployed-prompt-stream.png differ diff --git a/integrations/extensions/starter-kits/language-model-watsonx/advanced/watsonx-extension-all-in-one-openapi.json b/integrations/extensions/starter-kits/language-model-watsonx/advanced/watsonx-extension-all-in-one-openapi.json new file mode 100644 index 00000000..2e35ce3c --- /dev/null +++ b/integrations/extensions/starter-kits/language-model-watsonx/advanced/watsonx-extension-all-in-one-openapi.json @@ -0,0 +1,562 @@ +{ + "openapi": "3.0.3", + "info": { + "description": "Minimal spec for commonly used features in watsonx.ai /generation API endpoint. Missing a few of parameters.", + "title": "Simplified watsonx.ai generation API", + "version": "1.1.0" + }, + "servers": [ + { + "url": "https://{region}.ml.cloud.ibm.com", + "description": "watsonx.ai v1", + "variables": { + "region": { + "enum": [ + "us-south", + "eu-de", + "eu-gb", + "jp-tok" + ], + "default": "us-south", + "description": "The region where you want to access watsonx.ai" + } + } + } + ], + "security": [ + { + "oauth2": [] + } + ], + "paths": { + "/ml/v1/text/generation": { + "post": { + "description": "Generation", + "parameters": [ + { + "name": "version", + "in": "query", + "description": "Release date of the version of the API you want to use. Specify dates in YYYY-MM-DD format. The current version is `2023-05-29`.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TextGenRequest" + } + } + } + }, + "responses": { + "200": { + "description": "Default Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TextGenResponse" + } + } + } + }, + "default": { + "description": "Unexpected error" + } + } + } + }, + "/ml/v1/text/generation_stream": { + "post": { + "description": "Generation Stream", + "parameters": [ + { + "name": "version", + "in": "query", + "description": "Release date of the version of the API you want to use. Specify dates in YYYY-MM-DD format. The current version is `2023-05-29`.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TextGenRequest" + } + } + } + }, + "responses": { + "200": { + "description": "Default Response", + "content": { + "text/event-stream": { + "schema": { + "$ref": "#/components/schemas/TextGenResponse" + } + } + } + }, + "default": { + "description": "Unexpected error" + } + } + } + }, + "/ml/v1/deployments/{deployment-id}/text/generation": { + "post": { + "description": "Generation from a deployed prompt", + "parameters": [ + { + "name": "version", + "in": "query", + "description": "Release date of the version of the API you want to use. Specify dates in YYYY-MM-DD format.", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "deployment-id", + "in": "path", + "description": "Deployment ID of the prompt deployment you want to use.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DeploymentTextGenRequest" + } + } + } + }, + "responses": { + "200": { + "description": "Default Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TextGenResponse" + } + } + } + }, + "default": { + "description": "Unexpected error" + } + } + } + }, + "/ml/v1/deployments/{deployment-id}/text/generation_stream": { + "post": { + "description": "Generation from a deployed prompt stream", + "parameters": [ + { + "name": "version", + "in": "query", + "description": "Release date of the version of the API you want to use. Specify dates in YYYY-MM-DD format.", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "deployment-id", + "in": "path", + "description": "Deployment ID of the prompt deployment you want to use.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DeploymentTextGenRequest" + } + } + } + }, + "responses": { + "200": { + "description": "Default Response", + "content": { + "text/event-stream": { + "schema": { + "$ref": "#/components/schemas/TextGenResponse" + } + } + } + }, + "default": { + "description": "Unexpected error" + } + } + } + }, + "/ml/v4/deployments/{deployment-id}/ai_service": { + "post": { + "description": "Generation from a deployed AI service", + "parameters": [ + { + "name": "version", + "in": "query", + "description": "Release date of the version of the API you want to use. Specify dates in YYYY-MM-DD format.", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "deployment-id", + "in": "path", + "description": "Deployment ID of the prompt deployment you want to use.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DeploymentAIServiceTextGenRequest" + } + } + } + }, + "responses": { + "200": { + "description": "Default Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DeploymentAIServiceTextGenResponse" + } + } + } + }, + "default": { + "description": "Unexpected error" + } + } + } + }, + "/ml/v4/deployments/{deployment-id}/ai_service_stream": { + "post": { + "description": "Generation from a deployed AI service stream", + "parameters": [ + { + "name": "version", + "in": "query", + "description": "Release date of the version of the API you want to use. Specify dates in YYYY-MM-DD format.", + "required": true, + "schema": { + "type": "string" + } + }, + { + "name": "deployment-id", + "in": "path", + "description": "Deployment ID of the prompt deployment you want to use.", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DeploymentAIServiceTextGenRequest" + } + } + } + }, + "responses": { + "200": { + "description": "Default Response", + "content": { + "text/event-stream": { + "schema": { + "$ref": "#/components/schemas/DeploymentAIServiceTextGenResponse" + } + } + } + }, + "default": { + "description": "Unexpected error" + } + } + } + } + }, + "components": { + "securitySchemes": { + "oauth2": { + "type": "oauth2", + "flows": { + "x-apikey": { + "tokenUrl": "https://iam.cloud.ibm.com/identity/token", + "grantType": "urn:ibm:params:oauth:grant-type:apikey", + "secretKeys": [ + "apikey" + ], + "paramKeys": [], + "scopes": {} + } + } + } + }, + "schemas": { + "TextGenRequest": { + "type": "object", + "required": [ + "model_id", + "input", + "project_id" + ], + "properties": { + "model_id": { + "type": "string", + "description": "The ID of the model to be used for this request. Please refer to the list of models at https://dataplatform.cloud.ibm.com/docs/content/wsj/analyze-data/fm-prompt-lab.html?context=wx", + "example": "google/flan-ul2" + }, + "input": { + "type": "string", + "description": "The input is the prompt to generate completions. Note: The method tokenizes the input internally. It is recommended not to leave any trailing spaces." + }, + "project_id": { + "type": "string", + "description": "id for the associated watsonx project.", + "minLength": 1, + "maxLength": 255, + "pattern": "^[a-zA-Z0-9_-]*$", + "example": "3e992422-d337-47f9-869a-0928e49a3ea6" + }, + "parameters": { + "type": "object", + "properties": { + "decoding_method": { + "type": "string", + "description": "The strategy used for picking the tokens during generation of the output text.", + "example": "greedy" + }, + "random_seed": { + "type": "integer", + "description": "The random number generator seed to use in sampling mode for experimental repeatability.", + "example": "1" + }, + "time_limit": { + "type": "integer", + "description": "The time limit in milliseconds - if not completed within this time, generation will stop. The text generated so far will be returned along with the TIME_LIMIT stop reason.", + "example": "600000" + }, + "temperature": { + "type": "number", + "description": "The value used to module the next token probabilities. The range is 0.00 to 2.00, a value set to 0.00 would make it deterministic.", + "example": "0.7" + }, + "top_k": { + "type": "integer", + "description": "The number of highest probability vocabulary tokens to keep for top-k-filtering. Only applies for sampling mode. The range is 1 to 100.", + "example": "50" + }, + "top_p": { + "type": "number", + "description": "Similar to top_k except the candidates to generate the next token are the most likely tokens with probabilities that add up to at least top_p. The range is 0.0 to 1.0 . A value of 1.0 is equivalent to disabled.", + "example": "0.5" + }, + "max_new_tokens": { + "type": "number", + "description": "The maximum number of new tokens to be generated.", + "example": "150" + }, + "min_new_tokens": { + "type": "number", + "description": "The minimum number of new tokens to be generated.", + "example": "50" + }, + "repetition_penalty": { + "type": "number", + "description": "The value which represents the penalty for penalizing tokens that have already been generated or belong to the context.", + "example": "1.10" + }, + "stop_sequences": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Stop sequences are one or more strings which will cause the text generation to stop if/when they are produced as part of the output. Stop sequences encountered prior to the minimum number of tokens being generated will be ignored.", + "example": [ + "\n\n" + ] + }, + "include_stop_sequence": { + "type": "boolean", + "description": "The value to control presence of matched stop sequences from the end of the output text. The default is true, meaning that the output will end with the stop sequence text when matched.", + "example": "true" + } + } + } + } + }, + "TextGenResponse": { + "type": "object", + "properties": { + "model_id": { + "description": "The ID of the model to be used for this request", + "type": "string" + }, + "created_at": { + "description": "The date and time of the response", + "type": "string" + }, + "results": { + "type": "array", + "items": { + "type": "object", + "properties": { + "generated_text": { + "description": "The generated text", + "type": "string" + }, + "generated_token_count": { + "description": "The number of tokens in the output", + "type": "integer" + }, + "input_token_count": { + "description": "The number of tokens in the input", + "type": "integer" + }, + "stop_reason": { + "description": "The reason for stopping the generation. Can be NOT_FINISHED - Possibly more tokens to be streamed, MAX_TOKENS - Maximum requested tokens reached, EOS_TOKEN - End of sequence token encountered, CANCELLED - Request canceled by the client, TIME_LIMIT - Time limit reached, STOP_SEQUENCE - Stop sequence encountered, TOKEN_LIMIT - Token limit reached, ERROR - Error encountered", + "type": "string" + } + } + } + } + } + }, + "DeploymentTextGenRequest": { + "type": "object", + "properties": { + "parameters": { + "type": "object", + "properties": { + "prompt_variables": { + "type": "object" + } + } + } + } + }, + "DeploymentTextGenResponse": { + "type": "object", + "properties": { + "model_id": { + "description": "The ID of the model to be used for this request", + "type": "string" + }, + "created_at": { + "description": "The date and time of the response", + "type": "string" + }, + "results": { + "type": "array", + "items": { + "type": "object", + "properties": { + "generated_text": { + "description": "The generated text", + "type": "string" + }, + "generated_token_count": { + "description": "The number of tokens in the output", + "type": "integer" + }, + "input_token_count": { + "description": "The number of tokens in the input", + "type": "integer" + }, + "stop_reason": { + "description": "The reason for stopping the generation. Can be NOT_FINISHED - Possibly more tokens to be streamed, MAX_TOKENS - Maximum requested tokens reached, EOS_TOKEN - End of sequence token encountered, CANCELLED - Request canceled by the client, TIME_LIMIT - Time limit reached, STOP_SEQUENCE - Stop sequence encountered, TOKEN_LIMIT - Token limit reached, ERROR - Error encountered", + "type": "string" + } + } + }, + "description": "Outputs of the generation" + } + } + }, + "DeploymentAIServiceTextGenRequest": { + "type": "object", + "properties": { + "messages": { + "type": "array", + "items": { + "type": "object", + "properties": { + "role": { + "type": "string" + }, + "content": { + "type": "string" + } + }, + "required": [ + "role", + "content" + ] + } + } + } + }, + "DeploymentAIServiceTextGenResponse": { + "type": "object", + "properties": { + "choices": { + "type": "array", + "items": { + "type": "object", + "properties": { + "index": { + "description": "index number", + "type": "number" + }, + "message": { + "description": "response message", + "type": "object", + "properties": { + "content": { + "type": "string" + }, + "role": { + "type": "string" + } + } + } + } + }, + "description": "Outputs of the generation" + } + } + } + } + } +} diff --git a/integrations/extensions/starter-kits/language-model-watsonx/advanced/wx-ai-advanced-action.json b/integrations/extensions/starter-kits/language-model-watsonx/advanced/wx-ai-advanced-action.json new file mode 100644 index 00000000..b746cae4 --- /dev/null +++ b/integrations/extensions/starter-kits/language-model-watsonx/advanced/wx-ai-advanced-action.json @@ -0,0 +1,1316 @@ +{ + "name": "wx-ai-advanced-action", + "type": "action", + "valid": true, + "status": "Available", + "created": "2025-03-05T11:51:36.314Z", + "updated": "2025-03-05T12:34:08.704Z", + "language": "xx", + "skill_id": "6d97b746-11c0-4da5-b5e8-a5145c9ad7f0", + "workspace": { + "actions": [ + { + "type": "standard", + "steps": [ + { + "step": "step_352", + "output": { + "generic": [ + { + "values": [ + { + "text_expression": { + "concat": [ + { + "scalar": "Question:" + } + ] + } + } + ], + "response_type": "text", + "selection_policy": "sequential" + } + ] + }, + "handlers": [], + "question": { + "free_text": true + }, + "resolver": { + "type": "continue" + }, + "variable": "step_352", + "next_step": "step_743" + }, + { + "step": "step_743", + "context": { + "variables": [ + { + "value": { + "variable": "step_352" + }, + "skill_variable": "question" + } + ] + }, + "handlers": [], + "resolver": { + "type": "callout", + "callout": { + "path": "/ml/v4/deployments/{deployment-id}/ai_service_stream", + "type": "integration_interaction", + "method": "POST", + "internal": { + "spec_hash_id": "ecd7f1e1c6fdf70cd6507c553774e5a34ae2203daaeff94d28cfa203c97d9bd6", + "match_scenario": 10, + "catalog_item_id": "54c6e0d4-6879-4144-843f-b11261bde1af" + }, + "request_mapping": { + "body": [ + { + "value": { + "expression": "[\n {\n \"role\": \"user\",\n \"content\": \"${question}\" \n }\n]" + }, + "parameter": "messages" + } + ], + "path": [ + { + "value": { + "skill_variable": "wx_deployed_id" + }, + "parameter": "deployment-id" + } + ], + "query": [ + { + "value": { + "skill_variable": "wx_api_version" + }, + "parameter": "version" + } + ] + }, + "result_variable": "step_743_result_1", + "stream_response_mapping": { + "partial_item": [ + { + "mapping": [ + { + "name": "text", + "value": "choices[0].message.delta.content" + } + ], + "response_type": "text" + } + ] + } + } + }, + "variable": "step_743" + } + ], + "title": "Invoke AI service (stream)", + "action": "action_40731", + "boosts": [], + "handlers": [], + "condition": { + "intent": "action_40731_intent_18609" + }, + "variables": [ + { + "title": "Question:", + "privacy": { + "enabled": false + }, + "variable": "step_352", + "data_type": "any" + }, + { + "title": "No response", + "privacy": { + "enabled": false + }, + "variable": "step_743", + "data_type": "any" + }, + { + "privacy": { + "enabled": false + }, + "variable": "step_743_result_1", + "data_type": "any" + } + ], + "launch_mode": "learning", + "next_action": "action_40731-2", + "topic_switch": { + "allowed_from": true, + "allowed_into": true, + "never_return": false + }, + "disambiguation_opt_out": false + }, + { + "type": "standard", + "steps": [ + { + "step": "step_352", + "output": { + "generic": [ + { + "values": [ + { + "text_expression": { + "concat": [ + { + "scalar": "Question:" + } + ] + } + } + ], + "response_type": "text", + "selection_policy": "sequential" + } + ] + }, + "handlers": [], + "question": { + "free_text": true + }, + "resolver": { + "type": "continue" + }, + "variable": "step_352", + "next_step": "step_743" + }, + { + "step": "step_743", + "context": { + "variables": [ + { + "value": { + "variable": "step_352" + }, + "skill_variable": "question" + } + ] + }, + "handlers": [], + "resolver": { + "type": "callout", + "callout": { + "path": "/ml/v4/deployments/{deployment-id}/ai_service", + "type": "integration_interaction", + "method": "POST", + "internal": { + "spec_hash_id": "30e3d2b109d6d6941414a9bdd7c0c71f95b23ee3ea752269a43a45c0890d2eb0", + "match_scenario": 10, + "catalog_item_id": "54c6e0d4-6879-4144-843f-b11261bde1af" + }, + "request_mapping": { + "body": [ + { + "value": { + "expression": "[\n {\n \"role\": \"user\",\n \"content\": \"${question}\" \n }\n]" + }, + "parameter": "messages" + } + ], + "path": [ + { + "value": { + "skill_variable": "wx_deployed_id" + }, + "parameter": "deployment-id" + } + ], + "query": [ + { + "value": { + "skill_variable": "wx_api_version" + }, + "parameter": "version" + } + ] + }, + "result_variable": "step_743_result_1" + } + }, + "variable": "step_743", + "next_step": "step_542" + }, + { + "step": "step_542", + "output": { + "generic": [ + { + "values": [ + { + "text_expression": { + "concat": [ + { + "scalar": "" + }, + { + "skill_variable": "response" + } + ] + } + } + ], + "response_type": "text", + "selection_policy": "sequential" + } + ] + }, + "context": { + "variables": [ + { + "value": { + "expression": "${step_743_result_1.body.choices}[0].message.content" + }, + "skill_variable": "response" + } + ] + }, + "handlers": [], + "resolver": { + "type": "continue" + }, + "variable": "step_542", + "condition": { + "eq": [ + { + "variable": "step_743_result_1", + "variable_path": "success" + }, + { + "scalar": true + } + ] + } + } + ], + "title": "Invoke AI service", + "action": "action_40731-2", + "boosts": [], + "handlers": [], + "condition": { + "intent": "action_40731_intent_18609-2" + }, + "variables": [ + { + "title": "Question:", + "variable": "step_352", + "data_type": "any" + }, + { + "title": "{variable}", + "privacy": { + "enabled": false + }, + "variable": "step_542", + "data_type": "any" + }, + { + "title": "No response", + "privacy": { + "enabled": false + }, + "variable": "step_743", + "data_type": "any" + }, + { + "privacy": { + "enabled": false + }, + "variable": "step_743_result_1", + "data_type": "any" + } + ], + "launch_mode": "learning", + "next_action": "action_40731-3", + "topic_switch": { + "allowed_from": true, + "allowed_into": true, + "never_return": false + }, + "disambiguation_opt_out": false + }, + { + "type": "standard", + "steps": [ + { + "step": "step_352", + "output": { + "generic": [ + { + "values": [ + { + "text_expression": { + "concat": [ + { + "scalar": "Question:" + } + ] + } + } + ], + "response_type": "text", + "selection_policy": "sequential" + } + ] + }, + "handlers": [], + "question": { + "free_text": true + }, + "resolver": { + "type": "continue" + }, + "variable": "step_352", + "next_step": "step_743" + }, + { + "step": "step_743", + "context": { + "variables": [ + { + "value": { + "variable": "step_352" + }, + "skill_variable": "question" + } + ] + }, + "handlers": [], + "resolver": { + "type": "callout", + "callout": { + "path": "/ml/v1/deployments/{deployment-id}/text/generation", + "type": "integration_interaction", + "method": "POST", + "internal": { + "spec_hash_id": "dfc4ce3500c3b3980a82872bd0da8aac99eeac8f43662d8379729a5c3f697381", + "match_scenario": 10, + "catalog_item_id": "54c6e0d4-6879-4144-843f-b11261bde1af" + }, + "request_mapping": { + "body": [ + { + "value": { + "expression": "{ \"question\": ${question} }" + }, + "parameter": "parameters.prompt_variables" + } + ], + "path": [ + { + "value": { + "skill_variable": "wx_deployed_id" + }, + "parameter": "deployment-id" + } + ], + "query": [ + { + "value": { + "skill_variable": "wx_api_version" + }, + "parameter": "version" + } + ] + }, + "result_variable": "step_743_result_1" + } + }, + "variable": "step_743", + "next_step": "step_542" + }, + { + "step": "step_542", + "output": { + "generic": [ + { + "values": [ + { + "text_expression": { + "concat": [ + { + "scalar": "" + }, + { + "skill_variable": "response" + } + ] + } + } + ], + "response_type": "text", + "selection_policy": "sequential" + } + ] + }, + "context": { + "variables": [ + { + "value": { + "expression": "${step_743_result_1.body.results}[0].generated_text" + }, + "skill_variable": "response" + } + ] + }, + "handlers": [], + "resolver": { + "type": "continue" + }, + "variable": "step_542", + "condition": { + "eq": [ + { + "variable": "step_743_result_1", + "variable_path": "success" + }, + { + "scalar": true + } + ] + } + } + ], + "title": "Invoke deployed template", + "action": "action_40731-3", + "boosts": [], + "handlers": [], + "condition": { + "intent": "action_40731_intent_18609-3" + }, + "variables": [ + { + "title": "Question:", + "variable": "step_352", + "data_type": "any" + }, + { + "title": "{variable}", + "variable": "step_542", + "data_type": "any" + }, + { + "title": "No response", + "privacy": { + "enabled": false + }, + "variable": "step_743", + "data_type": "any" + }, + { + "privacy": { + "enabled": false + }, + "variable": "step_743_result_1", + "data_type": "any" + } + ], + "launch_mode": "learning", + "next_action": "action_40731-4", + "topic_switch": { + "allowed_from": true, + "allowed_into": true, + "never_return": false + }, + "disambiguation_opt_out": false + }, + { + "type": "standard", + "steps": [ + { + "step": "step_352", + "output": { + "generic": [ + { + "values": [ + { + "text_expression": { + "concat": [ + { + "scalar": "Question:" + } + ] + } + } + ], + "response_type": "text", + "selection_policy": "sequential" + } + ] + }, + "handlers": [], + "question": { + "free_text": true + }, + "resolver": { + "type": "continue" + }, + "variable": "step_352", + "next_step": "step_743" + }, + { + "step": "step_743", + "context": { + "variables": [ + { + "value": { + "variable": "step_352" + }, + "skill_variable": "question" + } + ] + }, + "handlers": [], + "resolver": { + "type": "callout", + "callout": { + "path": "/ml/v1/deployments/{deployment-id}/text/generation_stream", + "type": "integration_interaction", + "method": "POST", + "internal": { + "spec_hash_id": "f54a80b6c16955b5770426ea8cbf04e7e6495e8d67145ed834e1459f362450aa", + "match_scenario": 10, + "catalog_item_id": "54c6e0d4-6879-4144-843f-b11261bde1af" + }, + "request_mapping": { + "body": [ + { + "value": { + "expression": "{ \"question\": ${question} }" + }, + "parameter": "parameters.prompt_variables" + } + ], + "path": [ + { + "value": { + "skill_variable": "wx_deployed_id" + }, + "parameter": "deployment-id" + } + ], + "query": [ + { + "value": { + "skill_variable": "wx_api_version" + }, + "parameter": "version" + } + ] + }, + "result_variable": "step_743_result_1", + "stream_response_mapping": { + "partial_item": [ + { + "mapping": [ + { + "name": "text", + "value": "results[0].generated_text" + } + ], + "response_type": "text" + } + ] + } + } + }, + "variable": "step_743" + } + ], + "title": "Invoke deployed template (stream)", + "action": "action_40731-4", + "boosts": [], + "handlers": [], + "condition": { + "intent": "action_40731_intent_18609-4" + }, + "variables": [ + { + "title": "Question:", + "variable": "step_352", + "data_type": "any" + }, + { + "title": "No response", + "privacy": { + "enabled": false + }, + "variable": "step_743", + "data_type": "any" + }, + { + "privacy": { + "enabled": false + }, + "variable": "step_743_result_1", + "data_type": "any" + } + ], + "launch_mode": "learning", + "next_action": "fallback", + "topic_switch": { + "allowed_from": true, + "allowed_into": true, + "never_return": false + }, + "disambiguation_opt_out": false + }, + { + "type": "standard", + "steps": [ + { + "step": "step_001", + "output": { + "generic": [ + { + "values": [ + { + "text": "I'm afraid I don't understand. Please rephrase your question." + } + ], + "response_type": "text", + "selection_policy": "sequential" + } + ] + }, + "handlers": [], + "resolver": { + "type": "end_action" + }, + "variable": "step_001", + "condition": { + "lte": [ + { + "system_variable": "no_action_matches_count" + }, + { + "scalar": 3 + } + ] + }, + "next_step": "step_002" + }, + { + "step": "step_002", + "output": { + "generic": [ + { + "values": [ + { + "text": "" + } + ], + "response_type": "text", + "selection_policy": "sequential" + } + ] + }, + "handlers": [], + "resolver": { + "type": "invoke_another_action_and_end", + "invoke_action": { + "action": "fallback", + "policy": "default", + "parameters": null, + "result_variable": "step_002_result_1" + } + }, + "variable": "step_002", + "condition": { + "gt": [ + { + "system_variable": "no_action_matches_count" + }, + { + "scalar": 3 + } + ] + } + } + ], + "title": "No matches", + "action": "anything_else", + "boosts": [], + "handlers": [], + "condition": { + "expression": "anything_else" + }, + "variables": [ + { + "title": "I am afraid I do not understand what you are asking, please re-p", + "variable": "step_001", + "data_type": "any" + }, + { + "variable": "step_002", + "data_type": "any" + }, + { + "variable": "step_002_result_1", + "data_type": "any" + } + ], + "disambiguation_opt_out": true + }, + { + "type": "standard", + "steps": [ + { + "step": "step_001", + "output": { + "generic": [ + { + "values": [ + { + "text": "I'm afraid I don't understand. I can connect you to an agent." + } + ], + "response_type": "text", + "selection_policy": "sequential" + } + ] + }, + "handlers": [], + "resolver": { + "type": "connect_to_agent", + "response": { + "transfer_info": { + "target": {} + }, + "agent_available": { + "message": "Let's send you to an available agent." + }, + "agent_unavailable": { + "message": "There are no agents available at this time. When one becomes available, we'll connect you." + }, + "message_to_human_agent": "" + } + }, + "variable": "step_001", + "condition": { + "eq": [ + { + "system_variable": "fallback_reason" + }, + { + "scalar": "Step validation failed" + } + ] + }, + "next_step": "step_002" + }, + { + "step": "step_002", + "output": { + "generic": [ + { + "values": [ + { + "text": "Sorry I couldn't assist you. I will connect you to an agent right away." + } + ], + "response_type": "text", + "selection_policy": "sequential" + } + ] + }, + "handlers": [], + "resolver": { + "type": "connect_to_agent", + "response": { + "transfer_info": { + "target": {} + }, + "agent_available": { + "message": "Let's send you to an available agent." + }, + "agent_unavailable": { + "message": "There are no agents available at this time. When one becomes available, we'll connect you." + }, + "message_to_human_agent": "" + } + }, + "variable": "step_002", + "condition": { + "eq": [ + { + "system_variable": "fallback_reason" + }, + { + "scalar": "Agent requested" + } + ] + }, + "next_step": "step_003" + }, + { + "step": "step_003", + "output": { + "generic": [ + { + "values": [ + { + "text": "I am afraid I do not understand what you are asking, let me connect you to an agent." + } + ], + "response_type": "text", + "selection_policy": "sequential" + } + ] + }, + "handlers": [], + "resolver": { + "type": "connect_to_agent", + "response": { + "transfer_info": { + "target": {} + }, + "agent_available": { + "message": "Let's send you to an available agent." + }, + "agent_unavailable": { + "message": "There are no agents available at this time. When one becomes available, we'll connect you." + }, + "message_to_human_agent": "" + } + }, + "variable": "step_003", + "condition": { + "eq": [ + { + "system_variable": "fallback_reason" + }, + { + "scalar": "No action matches" + } + ] + }, + "next_step": "step_004" + }, + { + "step": "step_004", + "output": { + "generic": [ + { + "values": [ + { + "text": "It seems this conversation would be best managed by a human agent. Let me connect you to one of our agents." + } + ], + "response_type": "text", + "selection_policy": "sequential" + } + ] + }, + "handlers": [], + "resolver": { + "type": "connect_to_agent", + "response": { + "transfer_info": { + "target": {} + }, + "agent_available": { + "message": "Let's send you to an available agent." + }, + "agent_unavailable": { + "message": "There are no agents available at this time. When one becomes available, we'll connect you." + }, + "message_to_human_agent": "" + } + }, + "variable": "step_004", + "condition": { + "eq": [ + { + "system_variable": "fallback_reason" + }, + { + "scalar": "Danger word detected" + } + ] + }, + "next_step": "step_005" + }, + { + "step": "step_005", + "output": { + "generic": [ + { + "values": [ + { + "text": "It seems this conversation would be best managed by a human agent. Let me connect you to one of our agents." + } + ], + "response_type": "text", + "selection_policy": "sequential" + } + ] + }, + "handlers": [], + "resolver": { + "type": "connect_to_agent", + "response": { + "transfer_info": { + "target": {} + }, + "agent_available": { + "message": "Let's send you to an available agent." + }, + "agent_unavailable": { + "message": "There are no agents available at this time. When one becomes available, we'll connect you." + }, + "message_to_human_agent": "" + } + }, + "variable": "step_005", + "condition": { + "eq": [ + { + "system_variable": "fallback_reason" + }, + { + "scalar": "Profanity detected" + } + ] + } + } + ], + "title": "Fallback", + "action": "fallback", + "boosts": [], + "handlers": [], + "condition": { + "intent": "fallback_connect_to_agent" + }, + "variables": [ + { + "title": "I'm afraid I don't understand. I can connect you to an agent.", + "variable": "step_001", + "data_type": "any" + }, + { + "title": "Sorry I couldn't assist you. I will connect you to an agent righ", + "variable": "step_002", + "data_type": "any" + }, + { + "title": "I am afraid I do not understand what you are asking, let me conn", + "variable": "step_003", + "data_type": "any" + }, + { + "title": "It seems this conversation would be best managed", + "variable": "step_004", + "data_type": "any" + }, + { + "title": "Profanity - It seems this conversation", + "variable": "step_005", + "data_type": "any" + } + ], + "next_action": "run_always", + "disambiguation_opt_out": true + }, + { + "type": "standard", + "steps": [ + { + "step": "danger_word_detected", + "title": "Connect to agent", + "handlers": [], + "resolver": { + "type": "fallback" + }, + "variable": "danger_word_detected_variable", + "condition": { + "entity": "danger_words" + }, + "next_step": "profanity_detected" + }, + { + "step": "profanity_detected", + "title": "Show warning", + "output": { + "generic": [ + { + "values": [ + { + "text_expression": { + "concat": [ + { + "scalar": "Please use appropriate language when interacting with the assistant." + } + ] + } + } + ], + "response_type": "text", + "selection_policy": "sequential" + } + ] + }, + "handlers": [ + { + "type": "max_hits", + "handler": "max_hits_handler", + "resolver": { + "type": "fallback" + } + } + ], + "max_hits": 2, + "resolver": { + "type": "end_action" + }, + "variable": "profanity_detected_variable", + "condition": { + "entity": "profane_words" + } + } + ], + "title": "Trigger word detected", + "action": "run_always", + "boosts": [], + "handlers": [], + "variables": [ + { + "title": "Profanity detected", + "variable": "danger_word_detected_variable", + "data_type": "any" + }, + { + "title": "Profane word detected", + "variable": "profanity_detected_variable", + "data_type": "any" + } + ], + "next_action": "anything_else" + }, + { + "type": "standard", + "steps": [ + { + "step": "step_001", + "output": { + "generic": [ + { + "values": [ + { + "text": "Welcome, how can I assist you?" + } + ], + "response_type": "text", + "selection_policy": "sequential" + } + ] + }, + "handlers": [], + "resolver": { + "type": "end_action" + }, + "variable": "step_001" + } + ], + "title": "Greet customer", + "action": "welcome", + "boosts": [], + "handlers": [], + "condition": { + "expression": "welcome" + }, + "variables": [ + { + "variable": "step_001", + "data_type": "any" + } + ], + "next_action": "action_40731", + "disambiguation_opt_out": true + } + ], + "intents": [ + { + "intent": "action_40731_intent_18609", + "examples": [ + { + "text": "Invoke AI service" + } + ] + }, + { + "intent": "action_40731_intent_18609-2", + "examples": [ + { + "text": "Invoke AI service" + } + ] + }, + { + "intent": "action_40731_intent_18609-3", + "examples": [ + { + "text": "Invoke deployed template" + } + ] + }, + { + "intent": "action_40731_intent_18609-4", + "examples": [ + { + "text": "Invoke deployed template (stream)" + } + ] + }, + { + "intent": "fallback_connect_to_agent", + "examples": [ + { + "text": "Agent help" + }, + { + "text": "Call agent" + }, + { + "text": "Can I connect to an agent?" + }, + { + "text": "I would like to speak to a human" + }, + { + "text": "I would like to speak to someone" + } + ], + "description": "Please transfer me to an agent" + } + ], + "entities": [ + { + "entity": "danger_words", + "values": [], + "fuzzy_match": false + }, + { + "entity": "profane_words", + "values": [], + "fuzzy_match": false + } + ], + "metadata": { + "api_version": { + "major_version": "v2", + "minor_version": "2018-11-08" + } + }, + "variables": [ + { + "title": "question", + "privacy": { + "enabled": false + }, + "variable": "question", + "data_type": "string", + "description": "" + }, + { + "title": "response", + "privacy": { + "enabled": false + }, + "variable": "response", + "data_type": "string", + "description": "" + }, + { + "title": "wx_api_version", + "privacy": { + "enabled": false + }, + "variable": "wx_api_version", + "data_type": "string", + "description": "", + "initial_value": { + "scalar": "2023-05-29" + } + }, + { + "title": "wx_deployed_id", + "privacy": { + "enabled": false + }, + "variable": "wx_deployed_id", + "data_type": "string", + "description": "", + "initial_value": { + "scalar": "83fad4bc-bc36-4ab4-9532-3268ec6ba557" + } + }, + { + "title": "wx_project_id", + "privacy": { + "enabled": false + }, + "variable": "wx_project_id", + "data_type": "string", + "description": "", + "initial_value": { + "scalar": "39e65882-0638-4b79-8f89-ce680924399f" + } + } + ], + "data_types": [], + "collections": [], + "counterexamples": [], + "system_settings": { + "variable": { + "format": { + "time": { + "pattern": "short" + }, + "currency": { + "fraction_digits": 2 + } + } + }, + "auto_learn": { + "apply": true + }, + "topic_switch": { + "enabled": true, + "messages": {} + }, + "disambiguation": { + "prompt": "Did you mean:", + "enabled": true, + "randomize": true, + "max_suggestions": 5, + "suggestion_text_policy": "title", + "none_of_the_above_prompt": "None of the above", + "use_connect_to_support_prompt": "Connect to support", + "single_answer_clarification_prompt": "Something else" + }, + "search_routing": { + "target": "conversational_search" + } + }, + "learning_opt_out": false + }, + "description": "created for assistant d6918313-5cbb-4b58-b11f-8686475aa60f", + "assistant_id": "d6918313-5cbb-4b58-b11f-8686475aa60f", + "workspace_id": "6d97b746-11c0-4da5-b5e8-a5145c9ad7f0", + "dialog_settings": {}, + "next_snapshot_version": "1", + "environment_id": "868efe14-537f-42ab-8a27-18317a42a101" +} \ No newline at end of file