import os
from syllable_sdk import SyllableSDK, models
with SyllableSDK(
api_key_header=os.getenv("SYLLABLESDK_API_KEY_HEADER", ""),
) as ss_client:
res = ss_client.prompts.list(page=0, limit=25, search_fields=[
models.PromptProperties.NAME,
], search_field_values=[
"Some Object Name",
], start_datetime="2023-01-01T00:00:00Z", end_datetime="2024-01-01T00:00:00Z")
# Handle response
print(res){
"items": [
{
"name": "<string>",
"type": "<string>",
"llm_config": {
"provider": "azure_openai",
"model": "gpt-4o",
"version": "2024-05-13",
"api_version": "2024-06-01",
"temperature": 1,
"seed": 123
},
"id": 123,
"last_updated": "2024-01-01T12:00:00Z",
"description": "Prompt for a weather agent.",
"context": "You are a weather agent. Answer the user's questions about weather and nothing else.",
"tools": [],
"session_end_enabled": false,
"session_end_tool_id": 1,
"edit_comments": "Updated prompt text to include requirement to not answer questions that aren't about weather.",
"last_updated_by": "[email protected]",
"session_end_tool": {
"name": "<string>",
"definition": {
"tool": {
"function": {
"name": "<string>",
"description": "<string>",
"parameters": "<unknown>"
},
"type": "function"
},
"type": "endpoint",
"endpoint": {
"url": "<string>",
"method": "get",
"argument_location": "body"
},
"context": {
"task": {
"id": "<string>",
"config": {},
"variables": [
{
"name": "<string>",
"value": "<unknown>",
"value_from": {
"expression": "<string>",
"type": "cel"
},
"type": "string",
"description": "<string>",
"title": "<string>",
"format": "<string>",
"pattern": "<string>",
"enum": [
"<string>"
],
"examples": [
"<unknown>"
]
}
],
"metadata": {
"priority": 123,
"parent_tool_name": "<string>"
},
"tool": {
"name": "<string>",
"description": "<string>"
},
"type": "expression",
"version": "v1alpha",
"inputs": [
{
"name": "<string>",
"type": "string",
"description": "<string>",
"title": "<string>",
"format": "<string>",
"pattern": "<string>",
"enum": [
"<string>"
],
"examples": [
"<unknown>"
],
"required": true
}
],
"expression": {
"expression": "<string>",
"type": "cel"
},
"output": "<unknown>",
"on": {
"start": [
{
"name": "<string>",
"value": "<unknown>",
"value_from": {
"expression": "<string>",
"type": "cel"
},
"if": {
"expression": "<string>",
"type": "cel"
},
"action": "set"
}
],
"submit": [
{
"name": "<string>",
"value": "<unknown>",
"value_from": {
"expression": "<string>",
"type": "cel"
},
"if": {
"expression": "<string>",
"type": "cel"
},
"action": "set"
}
]
}
}
},
"defaults": "<unknown>",
"static_parameters": [
{
"default": "fahrenheit",
"description": "Whether the temperature information should be fetched in Celsius or Fahrenheit",
"name": "temperature_unit",
"required": false,
"type": "string"
}
],
"result": "<unknown>",
"options": {
"propagate_tool_result": false
}
},
"service_id": 123,
"id": 123,
"last_updated": "2023-11-07T05:31:56Z",
"last_updated_by": "<string>",
"last_updated_comments": "Updated to use new API endpoint",
"service_name": "<string>",
"prompts_info": [
{
"id": 123,
"name": "<string>"
}
],
"agents_info": [
{
"id": 123,
"name": "<string>"
}
]
},
"agent_count": 5,
"version_number": 1,
"tools_full": [
{
"name": "<string>",
"definition": {
"tool": {
"function": {
"name": "<string>",
"description": "<string>",
"parameters": "<unknown>"
},
"type": "function"
},
"type": "endpoint",
"endpoint": {
"url": "<string>",
"method": "get",
"argument_location": "body"
},
"context": {
"task": {
"id": "<string>",
"config": {},
"variables": [
{
"name": "<string>",
"value": "<unknown>",
"value_from": {
"expression": "<string>",
"type": "cel"
},
"type": "string",
"description": "<string>",
"title": "<string>",
"format": "<string>",
"pattern": "<string>",
"enum": [
"<string>"
],
"examples": [
"<unknown>"
]
}
],
"metadata": {
"priority": 123,
"parent_tool_name": "<string>"
},
"tool": {
"name": "<string>",
"description": "<string>"
},
"type": "expression",
"version": "v1alpha",
"inputs": [
{
"name": "<string>",
"type": "string",
"description": "<string>",
"title": "<string>",
"format": "<string>",
"pattern": "<string>",
"enum": [
"<string>"
],
"examples": [
"<unknown>"
],
"required": true
}
],
"expression": {
"expression": "<string>",
"type": "cel"
},
"output": "<unknown>",
"on": {
"start": [
{
"name": "<string>",
"value": "<unknown>",
"value_from": {
"expression": "<string>",
"type": "cel"
},
"if": {
"expression": "<string>",
"type": "cel"
},
"action": "set"
}
],
"submit": [
{
"name": "<string>",
"value": "<unknown>",
"value_from": {
"expression": "<string>",
"type": "cel"
},
"if": {
"expression": "<string>",
"type": "cel"
},
"action": "set"
}
]
}
}
},
"defaults": "<unknown>",
"static_parameters": [
{
"default": "fahrenheit",
"description": "Whether the temperature information should be fetched in Celsius or Fahrenheit",
"name": "temperature_unit",
"required": false,
"type": "string"
}
],
"result": "<unknown>",
"options": {
"propagate_tool_result": false
}
},
"service_id": 123,
"id": 123,
"last_updated": "2023-11-07T05:31:56Z",
"last_updated_by": "<string>",
"last_updated_comments": "Updated to use new API endpoint",
"service_name": "<string>",
"prompts_info": [
{
"id": 123,
"name": "<string>"
}
],
"agents_info": [
{
"id": 123,
"name": "<string>"
}
]
}
]
}
],
"page": 123,
"page_size": 123,
"total_pages": 4,
"total_count": 100
}List the existing prompts
import os
from syllable_sdk import SyllableSDK, models
with SyllableSDK(
api_key_header=os.getenv("SYLLABLESDK_API_KEY_HEADER", ""),
) as ss_client:
res = ss_client.prompts.list(page=0, limit=25, search_fields=[
models.PromptProperties.NAME,
], search_field_values=[
"Some Object Name",
], start_datetime="2023-01-01T00:00:00Z", end_datetime="2024-01-01T00:00:00Z")
# Handle response
print(res){
"items": [
{
"name": "<string>",
"type": "<string>",
"llm_config": {
"provider": "azure_openai",
"model": "gpt-4o",
"version": "2024-05-13",
"api_version": "2024-06-01",
"temperature": 1,
"seed": 123
},
"id": 123,
"last_updated": "2024-01-01T12:00:00Z",
"description": "Prompt for a weather agent.",
"context": "You are a weather agent. Answer the user's questions about weather and nothing else.",
"tools": [],
"session_end_enabled": false,
"session_end_tool_id": 1,
"edit_comments": "Updated prompt text to include requirement to not answer questions that aren't about weather.",
"last_updated_by": "[email protected]",
"session_end_tool": {
"name": "<string>",
"definition": {
"tool": {
"function": {
"name": "<string>",
"description": "<string>",
"parameters": "<unknown>"
},
"type": "function"
},
"type": "endpoint",
"endpoint": {
"url": "<string>",
"method": "get",
"argument_location": "body"
},
"context": {
"task": {
"id": "<string>",
"config": {},
"variables": [
{
"name": "<string>",
"value": "<unknown>",
"value_from": {
"expression": "<string>",
"type": "cel"
},
"type": "string",
"description": "<string>",
"title": "<string>",
"format": "<string>",
"pattern": "<string>",
"enum": [
"<string>"
],
"examples": [
"<unknown>"
]
}
],
"metadata": {
"priority": 123,
"parent_tool_name": "<string>"
},
"tool": {
"name": "<string>",
"description": "<string>"
},
"type": "expression",
"version": "v1alpha",
"inputs": [
{
"name": "<string>",
"type": "string",
"description": "<string>",
"title": "<string>",
"format": "<string>",
"pattern": "<string>",
"enum": [
"<string>"
],
"examples": [
"<unknown>"
],
"required": true
}
],
"expression": {
"expression": "<string>",
"type": "cel"
},
"output": "<unknown>",
"on": {
"start": [
{
"name": "<string>",
"value": "<unknown>",
"value_from": {
"expression": "<string>",
"type": "cel"
},
"if": {
"expression": "<string>",
"type": "cel"
},
"action": "set"
}
],
"submit": [
{
"name": "<string>",
"value": "<unknown>",
"value_from": {
"expression": "<string>",
"type": "cel"
},
"if": {
"expression": "<string>",
"type": "cel"
},
"action": "set"
}
]
}
}
},
"defaults": "<unknown>",
"static_parameters": [
{
"default": "fahrenheit",
"description": "Whether the temperature information should be fetched in Celsius or Fahrenheit",
"name": "temperature_unit",
"required": false,
"type": "string"
}
],
"result": "<unknown>",
"options": {
"propagate_tool_result": false
}
},
"service_id": 123,
"id": 123,
"last_updated": "2023-11-07T05:31:56Z",
"last_updated_by": "<string>",
"last_updated_comments": "Updated to use new API endpoint",
"service_name": "<string>",
"prompts_info": [
{
"id": 123,
"name": "<string>"
}
],
"agents_info": [
{
"id": 123,
"name": "<string>"
}
]
},
"agent_count": 5,
"version_number": 1,
"tools_full": [
{
"name": "<string>",
"definition": {
"tool": {
"function": {
"name": "<string>",
"description": "<string>",
"parameters": "<unknown>"
},
"type": "function"
},
"type": "endpoint",
"endpoint": {
"url": "<string>",
"method": "get",
"argument_location": "body"
},
"context": {
"task": {
"id": "<string>",
"config": {},
"variables": [
{
"name": "<string>",
"value": "<unknown>",
"value_from": {
"expression": "<string>",
"type": "cel"
},
"type": "string",
"description": "<string>",
"title": "<string>",
"format": "<string>",
"pattern": "<string>",
"enum": [
"<string>"
],
"examples": [
"<unknown>"
]
}
],
"metadata": {
"priority": 123,
"parent_tool_name": "<string>"
},
"tool": {
"name": "<string>",
"description": "<string>"
},
"type": "expression",
"version": "v1alpha",
"inputs": [
{
"name": "<string>",
"type": "string",
"description": "<string>",
"title": "<string>",
"format": "<string>",
"pattern": "<string>",
"enum": [
"<string>"
],
"examples": [
"<unknown>"
],
"required": true
}
],
"expression": {
"expression": "<string>",
"type": "cel"
},
"output": "<unknown>",
"on": {
"start": [
{
"name": "<string>",
"value": "<unknown>",
"value_from": {
"expression": "<string>",
"type": "cel"
},
"if": {
"expression": "<string>",
"type": "cel"
},
"action": "set"
}
],
"submit": [
{
"name": "<string>",
"value": "<unknown>",
"value_from": {
"expression": "<string>",
"type": "cel"
},
"if": {
"expression": "<string>",
"type": "cel"
},
"action": "set"
}
]
}
}
},
"defaults": "<unknown>",
"static_parameters": [
{
"default": "fahrenheit",
"description": "Whether the temperature information should be fetched in Celsius or Fahrenheit",
"name": "temperature_unit",
"required": false,
"type": "string"
}
],
"result": "<unknown>",
"options": {
"propagate_tool_result": false
}
},
"service_id": 123,
"id": 123,
"last_updated": "2023-11-07T05:31:56Z",
"last_updated_by": "<string>",
"last_updated_comments": "Updated to use new API endpoint",
"service_name": "<string>",
"prompts_info": [
{
"id": 123,
"name": "<string>"
}
],
"agents_info": [
{
"id": 123,
"name": "<string>"
}
]
}
]
}
],
"page": 123,
"page_size": 123,
"total_pages": 4,
"total_count": 100
}The page number from which to start (0-based)
x >= 00
The maximum number of items to return
x >= 0String names of fields to search. Correspond by index to search field values
Names of prompt fields supported for filtering/sorting on list endpoint.
id, name, name_exact, description, name_description, context, tools, llm_config, last_updated, last_updated_by, agent_count, session_end_enabled Values of fields to search. Correspond by index to search fields. Unless field name contains "list", an individual search field value cannot be a list
The field whose value should be used to order the results
id, name, name_exact, description, name_description, context, tools, llm_config, last_updated, last_updated_by, agent_count, session_end_enabled "name"
The direction in which to order the results
asc, desc The fields to include in the response
Names of prompt fields supported for filtering/sorting on list endpoint.
id, name, name_exact, description, name_description, context, tools, llm_config, last_updated, last_updated_by, agent_count, session_end_enabled The start datetime for filtering results
"2023-01-01T00:00:00Z"
The end datetime for filtering results
"2024-01-01T00:00:00Z"
Successful Response
List of items returned from the query
Show child attributes
The prompt name
The type of the prompt
The configuration for the LLM that the prompt uses
Show child attributes
Provider of the LLM model.
azure_openai, google, openai Name of the model. Must match the deployment name in Azure AI Studio.
Optional model version.
"2024-05-13"
Version of the provider's API.
"2024-06-01"
Temperature parameter for the model. Determines randomness of responses - higher is more random, lower is more focused. Must be between 0.0 and 2.0, inclusive.
1
Controls the reproducibility of the job. The LLM will give the same or similar responses given the same inputs in multiple conversations with the same seed.
123
The internal ID of the prompt
The last updated date of the prompt
"2024-01-01T12:00:00Z"
The description of the prompt
"Prompt for a weather agent."
The prompt text that will be sent to the LLM at the beginning of the conversation
"You are a weather agent. Answer the user's questions about weather and nothing else."
Names of the tools to which the prompt has access (DEPRECATED - use information from full tools field instead)
Whether session end functionality is enabled for this prompt
ID of the optional session end tool associated with the prompt
1
The comments for the most recent edit to the prompt
"Updated prompt text to include requirement to not answer questions that aren't about weather."
Email address of the user who most recently updated the prompt
The session end tool associated with the prompt
Show child attributes
The name of the tool
The definition of the tool
Show child attributes
The tool definition to be used by the OpenAI API.
Show child attributes
The tool function definition, including the JSON Schema of its parameters.
Show child attributes
The name of the function/tool call.
The description of the tool.
The JSON Schema of parameters of the function/tool call.
Always function.
"function"The action to take when the LLM calls the tool.
action, endpoint, context, log "endpoint"
The configuration for an HTTP API call.
Show child attributes
The endpoint URL of the external service to call.
The HTTP method to use for the service call.
get, post, put, delete How to pass the arguments to the request.
body, form, path, query The configuration for a context tool.
Show child attributes
Task implementation details
Show child attributes
A unique identifier for the task.
Show child attributes
The name of the property.
Initial value of the variable.
Expression to compute initial value (mutually exclusive with value).
string, number, integer, boolean, object, array, null "expression""v1alpha"Show child attributes
The name of the property.
string, number, integer, boolean, object, array, null Actions to execute when the configured events occur.
Show child attributes
Actions to execute on the first input from the user.
Show child attributes
Destination path to mutate (e.g. output.foo).
Initial value of the variable.
Expression to compute initial value (mutually exclusive with value).
An expression that must evaluate to true for the action to be applied.
"set"Actions to execute when the tool/step is submitted by the LLM.
Show child attributes
Destination path to mutate (e.g. output.foo).
Initial value of the variable.
Expression to compute initial value (mutually exclusive with value).
An expression that must evaluate to true for the action to be applied.
"set"The default values for the parameters of the function/tool call.
Parameters for the tool whose values should be set at config time (i.e., not provided by the LLM).
Show child attributes
The name of the parameter - must be unique within the tool.
Whether the parameter is required to have a value assigned.
The expected type for the parameter.
string, int, boolean, data_source_list The description of the parameter.
"Whether the temperature information should be fetched in celsius or fahrenheit."
The default value for the parameter. If type is string, must be a string. If type is int, must be an int. If type is boolean, must be a boolean. If type is data_source_list, must be a list of strings (data source names).
"fahrenheit"
[
{
"default": "fahrenheit",
"description": "Whether the temperature information should be fetched in Celsius or Fahrenheit",
"name": "temperature_unit",
"required": false,
"type": "string"
}
]The optional result of the tool call.
The options for the tool. Ie allows to propagate the tool result to the caller via propagate_tool_result flag.
Show child attributes
Whether the tool call result should be propagated to the caller.
Internal ID of the service to which the tool belongs
The internal ID of the tool
The timestamp of the most recent update to the tool
The email of the user who last updated the tool
Comments for the most recent edit to the tool.
"Updated to use new API endpoint"
The name of the service to which the tool belongs
The number of agents using the prompt
5
The version number of the current version of the prompt
1
Full definitions of tools to which the prompt has access
Show child attributes
The name of the tool
The definition of the tool
Show child attributes
The tool definition to be used by the OpenAI API.
Show child attributes
The tool function definition, including the JSON Schema of its parameters.
Show child attributes
The name of the function/tool call.
The description of the tool.
The JSON Schema of parameters of the function/tool call.
Always function.
"function"The action to take when the LLM calls the tool.
action, endpoint, context, log "endpoint"
The configuration for an HTTP API call.
Show child attributes
The endpoint URL of the external service to call.
The HTTP method to use for the service call.
get, post, put, delete How to pass the arguments to the request.
body, form, path, query The configuration for a context tool.
Show child attributes
Task implementation details
Show child attributes
A unique identifier for the task.
Show child attributes
The name of the property.
Initial value of the variable.
Expression to compute initial value (mutually exclusive with value).
string, number, integer, boolean, object, array, null "expression""v1alpha"Show child attributes
The name of the property.
string, number, integer, boolean, object, array, null Actions to execute when the configured events occur.
Show child attributes
Actions to execute on the first input from the user.
Show child attributes
Destination path to mutate (e.g. output.foo).
Initial value of the variable.
Expression to compute initial value (mutually exclusive with value).
An expression that must evaluate to true for the action to be applied.
"set"Actions to execute when the tool/step is submitted by the LLM.
Show child attributes
Destination path to mutate (e.g. output.foo).
Initial value of the variable.
Expression to compute initial value (mutually exclusive with value).
An expression that must evaluate to true for the action to be applied.
"set"The default values for the parameters of the function/tool call.
Parameters for the tool whose values should be set at config time (i.e., not provided by the LLM).
Show child attributes
The name of the parameter - must be unique within the tool.
Whether the parameter is required to have a value assigned.
The expected type for the parameter.
string, int, boolean, data_source_list The description of the parameter.
"Whether the temperature information should be fetched in celsius or fahrenheit."
The default value for the parameter. If type is string, must be a string. If type is int, must be an int. If type is boolean, must be a boolean. If type is data_source_list, must be a list of strings (data source names).
"fahrenheit"
[
{
"default": "fahrenheit",
"description": "Whether the temperature information should be fetched in Celsius or Fahrenheit",
"name": "temperature_unit",
"required": false,
"type": "string"
}
]The optional result of the tool call.
The options for the tool. Ie allows to propagate the tool result to the caller via propagate_tool_result flag.
Show child attributes
Whether the tool call result should be propagated to the caller.
Internal ID of the service to which the tool belongs
The internal ID of the tool
The timestamp of the most recent update to the tool
The email of the user who last updated the tool
Comments for the most recent edit to the tool.
"Updated to use new API endpoint"
The name of the service to which the tool belongs
The page number of the results (0-based)
The number of items returned per page
The total number of pages of results given the indicated page size
4
The total number of items returned from the query
100