This endpoint takes in three parameters:
action_output
: the output received from the actionaction_id
: the action id for this action call to the LLMsource
(optional): the source of this action outputdestination
(optional) : the destination of this output that will come from the action executor
We return a simple boolean indicating whether a threat was found, and a dictionary of threats found and their relevant metadata.
Request Example
def send_action_output_to_promptarmor(action_output: str, action_id: str, source=None, destination=None, api_key):
promptarmor_headers = {
"PromptArmor-Auth": f"Bearer {api_key}",
"PromptArmor-Session-ID": str(uuid.uuid4()), #The session ID is unique to each user session(e.g. a workflow or conversation)
"Content-Type": "application/json"
}
url = "https://api.aidr.promptarmor.com/v1/analyze/action/output"
data = {
"action_output": action_output,
#You should already be tracking this ID to query for the outputs of the action - this is how we link an action to its output
"action_id": action_id,
"source": source,
"destination": destination
}
response = requests.post(url, headers=promptarmor_headers, json=data, verify=True)
print("Detection:", response.json()["detection"])
Response
{
"detection": false,
"info": {
"Code": {
"detection": false,
"metadata": {}
},
"HTML": {
"detection": false,
"metadata": {}
},
"HiddenText": {
"detection": false,
"metadata": {}
},
"InvisibleUnicode": {
"detection": false,
"metadata": null
},
"Jailbreak": {
"detection": false
},
"MarkdownImage": {
"detection": false,
"metadata": null
},
"MarkdownURL": {
"detection": false,
"metadata": null
},
"Secrets": {
"detection": false,
"metadata": {}
},
"ThreatIntel": {
"detection": false
},
"Anomaly": {
"detection": false,
"metadata": {}
}
}
}