This endpoint takes in three parameters:
action
: the name of the action/function that is being passed to the LLMparameters
: the parameters for the action/function call that is being passed to the LLMaction_id
: the action id for this action call to the LLM
We return a simple boolean indicating whether a threat was found, and a dictionary of threats found and their relevant metadata.
Request Example
def send_action_to_promptarmor(action: str, action_id: str, params: dict, source=None, destination=None, api_key):
promptarmor_headers = {
"PromptArmor-Auth": f"Bearer {api_key}",
"PromptArmor-Session-ID": str(uuid.uuid4()), #The session ID is unique to each user session(e.g. a workflow or conversation)
"Content-Type": "application/json"
}
url = "https://api.aidr.promptarmor.com/v1/analyze/action"
data = {
"action": action,
"action_id": action_id, #You should already be tracking this to query for the outputs of the action
"params": params,
"source": source,
"destination": destination
}
response = requests.post(url, headers=promptarmor_headers, json=data, verify=True)
print("Detection:", response.json()["detection"])
Response
{
"detection": false,
"info": {
"Code": {
"detection": false,
"metadata": {}
},
"HTML": {
"detection": false,
"metadata": {}
},
"HiddenText": {
"detection": false,
"metadata": {}
},
"InvisibleUnicode": {
"detection": false,
"metadata": null
},
"Jailbreak": {
"detection": false
},
"MarkdownImage": {
"detection": false,
"metadata": null
},
"MarkdownURL": {
"detection": false,
"metadata": null
},
"Secrets": {
"detection": false,
"metadata": {}
},
"ThreatIntel": {
"detection": false
},
"Anomaly": {
"detection": false,
"metadata": {}
}
}
}