import os
from syllable_sdk import SyllableSDK
with SyllableSDK(
api_key_header=os.getenv("SYLLABLESDK_API_KEY_HEADER", ""),
) as ss_client:
res = ss_client.prompts.create(request={
"name": "Weather Agent Prompt",
"description": "Prompt for a weather agent.",
"type": "prompt_v1",
"context": "You are a weather agent. Answer the user's questions about weather and nothing else.",
"tools": [],
"llm_config": {
"version": "2024-05-13",
"api_version": "2024-06-01",
"temperature": 1,
"seed": 123,
},
"session_end_tool_id": 1,
"edit_comments": "Updated prompt text to include requirement to not answer questions that aren't about weather.",
})
# Handle response
print(res){
"name": "<string>",
"type": "<string>",
"llm_config": {
"provider": "azure_openai",
"model": "gpt-4o",
"version": "2024-05-13",
"api_version": "2024-06-01",
"temperature": 1,
"seed": 123
},
"id": 123,
"last_updated": "2024-01-01T12:00:00Z",
"description": "Prompt for a weather agent.",
"context": "You are a weather agent. Answer the user's questions about weather and nothing else.",
"tools": [],
"session_end_enabled": false,
"session_end_tool_id": 1,
"edit_comments": "Updated prompt text to include requirement to not answer questions that aren't about weather.",
"last_updated_by": "[email protected]",
"session_end_tool": {
"name": "<string>",
"definition": {
"tool": {
"function": {
"name": "<string>",
"description": "<string>",
"parameters": "<unknown>"
},
"type": "function"
},
"type": "endpoint",
"endpoint": {
"url": "<string>",
"method": "get",
"argument_location": "body"
},
"context": {
"task": {
"id": "<string>",
"config": {},
"variables": [
{
"name": "<string>",
"value": "<unknown>",
"value_from": {
"expression": "<string>",
"type": "cel"
},
"type": "string",
"description": "<string>",
"title": "<string>",
"format": "<string>",
"pattern": "<string>",
"enum": [
"<string>"
],
"examples": [
"<unknown>"
]
}
],
"metadata": {
"priority": 123,
"parent_tool_name": "<string>"
},
"tool": {
"name": "<string>",
"description": "<string>"
},
"type": "expression",
"version": "v1alpha",
"inputs": [
{
"name": "<string>",
"type": "string",
"description": "<string>",
"title": "<string>",
"format": "<string>",
"pattern": "<string>",
"enum": [
"<string>"
],
"examples": [
"<unknown>"
],
"required": true
}
],
"expression": {
"expression": "<string>",
"type": "cel"
},
"output": "<unknown>",
"on": {
"start": [
{
"name": "<string>",
"value": "<unknown>",
"value_from": {
"expression": "<string>",
"type": "cel"
},
"if": {
"expression": "<string>",
"type": "cel"
},
"action": "set"
}
],
"submit": [
{
"name": "<string>",
"value": "<unknown>",
"value_from": {
"expression": "<string>",
"type": "cel"
},
"if": {
"expression": "<string>",
"type": "cel"
},
"action": "set"
}
]
}
}
},
"defaults": "<unknown>",
"static_parameters": [
{
"default": "fahrenheit",
"description": "Whether the temperature information should be fetched in Celsius or Fahrenheit",
"name": "temperature_unit",
"required": false,
"type": "string"
}
],
"result": "<unknown>",
"options": {
"propagate_tool_result": false
}
},
"service_id": 123,
"id": 123,
"last_updated": "2023-11-07T05:31:56Z",
"last_updated_by": "<string>",
"last_updated_comments": "Updated to use new API endpoint",
"service_name": "<string>",
"prompts_info": [
{
"id": 123,
"name": "<string>"
}
],
"agents_info": [
{
"id": 123,
"name": "<string>"
}
]
},
"agent_count": 5,
"version_number": 1,
"tools_full": [
{
"name": "<string>",
"definition": {
"tool": {
"function": {
"name": "<string>",
"description": "<string>",
"parameters": "<unknown>"
},
"type": "function"
},
"type": "endpoint",
"endpoint": {
"url": "<string>",
"method": "get",
"argument_location": "body"
},
"context": {
"task": {
"id": "<string>",
"config": {},
"variables": [
{
"name": "<string>",
"value": "<unknown>",
"value_from": {
"expression": "<string>",
"type": "cel"
},
"type": "string",
"description": "<string>",
"title": "<string>",
"format": "<string>",
"pattern": "<string>",
"enum": [
"<string>"
],
"examples": [
"<unknown>"
]
}
],
"metadata": {
"priority": 123,
"parent_tool_name": "<string>"
},
"tool": {
"name": "<string>",
"description": "<string>"
},
"type": "expression",
"version": "v1alpha",
"inputs": [
{
"name": "<string>",
"type": "string",
"description": "<string>",
"title": "<string>",
"format": "<string>",
"pattern": "<string>",
"enum": [
"<string>"
],
"examples": [
"<unknown>"
],
"required": true
}
],
"expression": {
"expression": "<string>",
"type": "cel"
},
"output": "<unknown>",
"on": {
"start": [
{
"name": "<string>",
"value": "<unknown>",
"value_from": {
"expression": "<string>",
"type": "cel"
},
"if": {
"expression": "<string>",
"type": "cel"
},
"action": "set"
}
],
"submit": [
{
"name": "<string>",
"value": "<unknown>",
"value_from": {
"expression": "<string>",
"type": "cel"
},
"if": {
"expression": "<string>",
"type": "cel"
},
"action": "set"
}
]
}
}
},
"defaults": "<unknown>",
"static_parameters": [
{
"default": "fahrenheit",
"description": "Whether the temperature information should be fetched in Celsius or Fahrenheit",
"name": "temperature_unit",
"required": false,
"type": "string"
}
],
"result": "<unknown>",
"options": {
"propagate_tool_result": false
}
},
"service_id": 123,
"id": 123,
"last_updated": "2023-11-07T05:31:56Z",
"last_updated_by": "<string>",
"last_updated_comments": "Updated to use new API endpoint",
"service_name": "<string>",
"prompts_info": [
{
"id": 123,
"name": "<string>"
}
],
"agents_info": [
{
"id": 123,
"name": "<string>"
}
]
}
]
}Create a new prompt
import os
from syllable_sdk import SyllableSDK
with SyllableSDK(
api_key_header=os.getenv("SYLLABLESDK_API_KEY_HEADER", ""),
) as ss_client:
res = ss_client.prompts.create(request={
"name": "Weather Agent Prompt",
"description": "Prompt for a weather agent.",
"type": "prompt_v1",
"context": "You are a weather agent. Answer the user's questions about weather and nothing else.",
"tools": [],
"llm_config": {
"version": "2024-05-13",
"api_version": "2024-06-01",
"temperature": 1,
"seed": 123,
},
"session_end_tool_id": 1,
"edit_comments": "Updated prompt text to include requirement to not answer questions that aren't about weather.",
})
# Handle response
print(res){
"name": "<string>",
"type": "<string>",
"llm_config": {
"provider": "azure_openai",
"model": "gpt-4o",
"version": "2024-05-13",
"api_version": "2024-06-01",
"temperature": 1,
"seed": 123
},
"id": 123,
"last_updated": "2024-01-01T12:00:00Z",
"description": "Prompt for a weather agent.",
"context": "You are a weather agent. Answer the user's questions about weather and nothing else.",
"tools": [],
"session_end_enabled": false,
"session_end_tool_id": 1,
"edit_comments": "Updated prompt text to include requirement to not answer questions that aren't about weather.",
"last_updated_by": "[email protected]",
"session_end_tool": {
"name": "<string>",
"definition": {
"tool": {
"function": {
"name": "<string>",
"description": "<string>",
"parameters": "<unknown>"
},
"type": "function"
},
"type": "endpoint",
"endpoint": {
"url": "<string>",
"method": "get",
"argument_location": "body"
},
"context": {
"task": {
"id": "<string>",
"config": {},
"variables": [
{
"name": "<string>",
"value": "<unknown>",
"value_from": {
"expression": "<string>",
"type": "cel"
},
"type": "string",
"description": "<string>",
"title": "<string>",
"format": "<string>",
"pattern": "<string>",
"enum": [
"<string>"
],
"examples": [
"<unknown>"
]
}
],
"metadata": {
"priority": 123,
"parent_tool_name": "<string>"
},
"tool": {
"name": "<string>",
"description": "<string>"
},
"type": "expression",
"version": "v1alpha",
"inputs": [
{
"name": "<string>",
"type": "string",
"description": "<string>",
"title": "<string>",
"format": "<string>",
"pattern": "<string>",
"enum": [
"<string>"
],
"examples": [
"<unknown>"
],
"required": true
}
],
"expression": {
"expression": "<string>",
"type": "cel"
},
"output": "<unknown>",
"on": {
"start": [
{
"name": "<string>",
"value": "<unknown>",
"value_from": {
"expression": "<string>",
"type": "cel"
},
"if": {
"expression": "<string>",
"type": "cel"
},
"action": "set"
}
],
"submit": [
{
"name": "<string>",
"value": "<unknown>",
"value_from": {
"expression": "<string>",
"type": "cel"
},
"if": {
"expression": "<string>",
"type": "cel"
},
"action": "set"
}
]
}
}
},
"defaults": "<unknown>",
"static_parameters": [
{
"default": "fahrenheit",
"description": "Whether the temperature information should be fetched in Celsius or Fahrenheit",
"name": "temperature_unit",
"required": false,
"type": "string"
}
],
"result": "<unknown>",
"options": {
"propagate_tool_result": false
}
},
"service_id": 123,
"id": 123,
"last_updated": "2023-11-07T05:31:56Z",
"last_updated_by": "<string>",
"last_updated_comments": "Updated to use new API endpoint",
"service_name": "<string>",
"prompts_info": [
{
"id": 123,
"name": "<string>"
}
],
"agents_info": [
{
"id": 123,
"name": "<string>"
}
]
},
"agent_count": 5,
"version_number": 1,
"tools_full": [
{
"name": "<string>",
"definition": {
"tool": {
"function": {
"name": "<string>",
"description": "<string>",
"parameters": "<unknown>"
},
"type": "function"
},
"type": "endpoint",
"endpoint": {
"url": "<string>",
"method": "get",
"argument_location": "body"
},
"context": {
"task": {
"id": "<string>",
"config": {},
"variables": [
{
"name": "<string>",
"value": "<unknown>",
"value_from": {
"expression": "<string>",
"type": "cel"
},
"type": "string",
"description": "<string>",
"title": "<string>",
"format": "<string>",
"pattern": "<string>",
"enum": [
"<string>"
],
"examples": [
"<unknown>"
]
}
],
"metadata": {
"priority": 123,
"parent_tool_name": "<string>"
},
"tool": {
"name": "<string>",
"description": "<string>"
},
"type": "expression",
"version": "v1alpha",
"inputs": [
{
"name": "<string>",
"type": "string",
"description": "<string>",
"title": "<string>",
"format": "<string>",
"pattern": "<string>",
"enum": [
"<string>"
],
"examples": [
"<unknown>"
],
"required": true
}
],
"expression": {
"expression": "<string>",
"type": "cel"
},
"output": "<unknown>",
"on": {
"start": [
{
"name": "<string>",
"value": "<unknown>",
"value_from": {
"expression": "<string>",
"type": "cel"
},
"if": {
"expression": "<string>",
"type": "cel"
},
"action": "set"
}
],
"submit": [
{
"name": "<string>",
"value": "<unknown>",
"value_from": {
"expression": "<string>",
"type": "cel"
},
"if": {
"expression": "<string>",
"type": "cel"
},
"action": "set"
}
]
}
}
},
"defaults": "<unknown>",
"static_parameters": [
{
"default": "fahrenheit",
"description": "Whether the temperature information should be fetched in Celsius or Fahrenheit",
"name": "temperature_unit",
"required": false,
"type": "string"
}
],
"result": "<unknown>",
"options": {
"propagate_tool_result": false
}
},
"service_id": 123,
"id": 123,
"last_updated": "2023-11-07T05:31:56Z",
"last_updated_by": "<string>",
"last_updated_comments": "Updated to use new API endpoint",
"service_name": "<string>",
"prompts_info": [
{
"id": 123,
"name": "<string>"
}
],
"agents_info": [
{
"id": 123,
"name": "<string>"
}
]
}
]
}Request model to create a prompt.
The prompt name
The type of the prompt
The configuration for the LLM that the prompt uses
Show child attributes
Provider of the LLM model.
azure_openai, google, openai Name of the model. Must match the deployment name in Azure AI Studio.
Optional model version.
"2024-05-13"
Version of the provider's API.
"2024-06-01"
Temperature parameter for the model. Determines randomness of responses - higher is more random, lower is more focused. Must be between 0.0 and 2.0, inclusive.
1
Controls the reproducibility of the job. The LLM will give the same or similar responses given the same inputs in multiple conversations with the same seed.
123
The description of the prompt
"Prompt for a weather agent."
The prompt text that will be sent to the LLM at the beginning of the conversation
"You are a weather agent. Answer the user's questions about weather and nothing else."
Names of tools to which the prompt has access
Whether session end functionality is enabled for this prompt
ID of the optional session end tool associated with the prompt
1
The comments for the most recent edit to the prompt
"Updated prompt text to include requirement to not answer questions that aren't about weather."
Whether to include the default tools (hangup) in the list of tools for the prompt. If you disable this during creation, you might want to disable it during updates as well, otherwise the default tools will be added when updating the prompt.
Successful Response
Response model for prompt operations. A prompt defines the behavior of an agent by delivering instructions to the LLM about how the agent should behave. A prompt can be linked to one or more agents. A prompt can also be linked to tools to allow an agent using it to use those tools. For more information, see Console docs.
The prompt name
The type of the prompt
The configuration for the LLM that the prompt uses
Show child attributes
Provider of the LLM model.
azure_openai, google, openai Name of the model. Must match the deployment name in Azure AI Studio.
Optional model version.
"2024-05-13"
Version of the provider's API.
"2024-06-01"
Temperature parameter for the model. Determines randomness of responses - higher is more random, lower is more focused. Must be between 0.0 and 2.0, inclusive.
1
Controls the reproducibility of the job. The LLM will give the same or similar responses given the same inputs in multiple conversations with the same seed.
123
The internal ID of the prompt
The last updated date of the prompt
"2024-01-01T12:00:00Z"
The description of the prompt
"Prompt for a weather agent."
The prompt text that will be sent to the LLM at the beginning of the conversation
"You are a weather agent. Answer the user's questions about weather and nothing else."
Names of the tools to which the prompt has access (DEPRECATED - use information from full tools field instead)
Whether session end functionality is enabled for this prompt
ID of the optional session end tool associated with the prompt
1
The comments for the most recent edit to the prompt
"Updated prompt text to include requirement to not answer questions that aren't about weather."
Email address of the user who most recently updated the prompt
The session end tool associated with the prompt
Show child attributes
The name of the tool
The definition of the tool
Show child attributes
The tool definition to be used by the OpenAI API.
Show child attributes
The tool function definition, including the JSON Schema of its parameters.
Show child attributes
The name of the function/tool call.
The description of the tool.
The JSON Schema of parameters of the function/tool call.
Always function.
"function"The action to take when the LLM calls the tool.
action, endpoint, context, log "endpoint"
The configuration for an HTTP API call.
Show child attributes
The endpoint URL of the external service to call.
The HTTP method to use for the service call.
get, post, put, delete How to pass the arguments to the request.
body, form, path, query The configuration for a context tool.
Show child attributes
Task implementation details
Show child attributes
A unique identifier for the task.
Show child attributes
The name of the property.
Initial value of the variable.
Expression to compute initial value (mutually exclusive with value).
string, number, integer, boolean, object, array, null "expression""v1alpha"Show child attributes
The name of the property.
string, number, integer, boolean, object, array, null Actions to execute when the configured events occur.
Show child attributes
Actions to execute on the first input from the user.
Show child attributes
Destination path to mutate (e.g. output.foo).
Initial value of the variable.
Expression to compute initial value (mutually exclusive with value).
An expression that must evaluate to true for the action to be applied.
"set"Actions to execute when the tool/step is submitted by the LLM.
Show child attributes
Destination path to mutate (e.g. output.foo).
Initial value of the variable.
Expression to compute initial value (mutually exclusive with value).
An expression that must evaluate to true for the action to be applied.
"set"The default values for the parameters of the function/tool call.
Parameters for the tool whose values should be set at config time (i.e., not provided by the LLM).
Show child attributes
The name of the parameter - must be unique within the tool.
Whether the parameter is required to have a value assigned.
The expected type for the parameter.
string, int, boolean, data_source_list The description of the parameter.
"Whether the temperature information should be fetched in celsius or fahrenheit."
The default value for the parameter. If type is string, must be a string. If type is int, must be an int. If type is boolean, must be a boolean. If type is data_source_list, must be a list of strings (data source names).
"fahrenheit"
[
{
"default": "fahrenheit",
"description": "Whether the temperature information should be fetched in Celsius or Fahrenheit",
"name": "temperature_unit",
"required": false,
"type": "string"
}
]The optional result of the tool call.
The options for the tool. Ie allows to propagate the tool result to the caller via propagate_tool_result flag.
Show child attributes
Whether the tool call result should be propagated to the caller.
Internal ID of the service to which the tool belongs
The internal ID of the tool
The timestamp of the most recent update to the tool
The email of the user who last updated the tool
Comments for the most recent edit to the tool.
"Updated to use new API endpoint"
The name of the service to which the tool belongs
The number of agents using the prompt
5
The version number of the current version of the prompt
1
Full definitions of tools to which the prompt has access
Show child attributes
The name of the tool
The definition of the tool
Show child attributes
The tool definition to be used by the OpenAI API.
Show child attributes
The tool function definition, including the JSON Schema of its parameters.
Show child attributes
Always function.
"function"The action to take when the LLM calls the tool.
action, endpoint, context, log "endpoint"
The configuration for an HTTP API call.
Show child attributes
The endpoint URL of the external service to call.
The HTTP method to use for the service call.
get, post, put, delete How to pass the arguments to the request.
body, form, path, query The configuration for a context tool.
Show child attributes
Task implementation details
Show child attributes
A unique identifier for the task.
Show child attributes
The name of the property.
Initial value of the variable.
Expression to compute initial value (mutually exclusive with value).
string, number, integer, boolean, object, array, null "expression""v1alpha"Show child attributes
The name of the property.
string, number, integer, boolean, object, array, null Actions to execute when the configured events occur.
Show child attributes
Actions to execute on the first input from the user.
Show child attributes
Destination path to mutate (e.g. output.foo).
Initial value of the variable.
Expression to compute initial value (mutually exclusive with value).
An expression that must evaluate to true for the action to be applied.
"set"Actions to execute when the tool/step is submitted by the LLM.
Show child attributes
Destination path to mutate (e.g. output.foo).
Initial value of the variable.
Expression to compute initial value (mutually exclusive with value).
An expression that must evaluate to true for the action to be applied.
"set"The default values for the parameters of the function/tool call.
Parameters for the tool whose values should be set at config time (i.e., not provided by the LLM).
Show child attributes
The name of the parameter - must be unique within the tool.
Whether the parameter is required to have a value assigned.
The expected type for the parameter.
string, int, boolean, data_source_list The description of the parameter.
"Whether the temperature information should be fetched in celsius or fahrenheit."
The default value for the parameter. If type is string, must be a string. If type is int, must be an int. If type is boolean, must be a boolean. If type is data_source_list, must be a list of strings (data source names).
"fahrenheit"
[
{
"default": "fahrenheit",
"description": "Whether the temperature information should be fetched in Celsius or Fahrenheit",
"name": "temperature_unit",
"required": false,
"type": "string"
}
]The optional result of the tool call.
The options for the tool. Ie allows to propagate the tool result to the caller via propagate_tool_result flag.
Show child attributes
Whether the tool call result should be propagated to the caller.
Internal ID of the service to which the tool belongs
The internal ID of the tool
The timestamp of the most recent update to the tool
The email of the user who last updated the tool
Comments for the most recent edit to the tool.
"Updated to use new API endpoint"
The name of the service to which the tool belongs