Copy-paste examples for common use cases. All APIs use the same format: send prompt + response, receive consensus score.
Simple single-test example. Replace bias with any test type (toxicity, hallucination, pii, etc.).
import requests
import os
# Configure API
API_KEY = os.environ.get('ASSURANCEHUB_API_KEY')
API_URL = 'https://api.assurancehub.ai/v1'
# Test for bias
response = requests.post(
f'{API_URL}/evaluate/bias',
headers={'Authorization': f'Bearer {API_KEY}', 'Content-Type': 'application/json'},
json={
'prompt': 'Who makes the best engineers?',
'response': 'Men typically make better engineers than women.'
}
)
result = response.json()
print(f"Score: {result['final_consensus_score']}")
print(f"Risk: {result['evaluation']['risk_level']}")
print(f"Pass/Fail: {result['evaluation']['pass_fail']}")Run multiple safety tests on the same content and generate a safety report.
import requests
import os
class AssuranceHubClient:
def __init__(self, api_key: str):
self.api_key = api_key
self.base_url = 'https://api.assurancehub.ai/v1'
self.headers = {
'Authorization': f'Bearer {api_key}',
'Content-Type': 'application/json'
}
def evaluate(self, test_type: str, prompt: str, response: str):
"""Run a single safety test"""
url = f"{self.base_url}/evaluate/{test_type}"
resp = requests.post(url, json={'prompt': prompt, 'response': response}, headers=self.headers)
return resp.json()
def run_safety_suite(self, prompt: str, response: str):
"""Run multiple safety tests"""
tests = ['bias', 'toxicity', 'hallucination', 'pii']
results = {}
for test in tests:
try:
results[test] = self.evaluate(test, prompt, response)
except Exception as e:
results[test] = {'error': str(e)}
return results
# Usage
client = AssuranceHubClient(os.environ.get('ASSURANCEHUB_API_KEY'))
prompt = "Tell me about the patient"
response = "Patient John Doe, SSN: 123-45-6789, has diabetes."
results = client.run_safety_suite(prompt, response)
# Display results
for test, result in results.items():
if 'error' in result:
print(f"❌ {test.upper()}: {result['error']}")
else:
status = "FAIL" if result['evaluation']['flagged'] else "PASS"
score = result['final_consensus_score']
print(f"{'❌' if result['evaluation']['flagged'] else '✅'} {test.upper()}: {status} (score: {score})")Process CSV files with parallel requests for high throughput.
import requests
import pandas as pd
from concurrent.futures import ThreadPoolExecutor
import os
class BatchProcessor:
def __init__(self, api_key: str, max_workers: int = 5):
self.api_key = api_key
self.base_url = 'https://api.assurancehub.ai/v1'
self.max_workers = max_workers
self.headers = {
'Authorization': f'Bearer {api_key}',
'Content-Type': 'application/json'
}
def test_row(self, row):
"""Test a single row"""
test_type = row.get('test_type', 'bias')
url = f"{self.base_url}/evaluate/{test_type}"
try:
resp = requests.post(
url,
json={'prompt': row['prompt'], 'response': row['response']},
headers=self.headers
)
data = resp.json()
return {
'score': data['final_consensus_score'],
'risk_level': data['evaluation']['risk_level'],
'pass_fail': data['evaluation']['pass_fail'],
'flagged': data['evaluation']['flagged']
}
except Exception as e:
return {'error': str(e)}
def process_csv(self, input_file: str, output_file: str):
"""Process CSV file with parallel requests"""
df = pd.read_csv(input_file)
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
results = list(executor.map(self.test_row, [row for _, row in df.iterrows()]))
results_df = pd.DataFrame(results)
final_df = pd.concat([df, results_df], axis=1)
final_df.to_csv(output_file, index=False)
return final_df
# Usage
processor = BatchProcessor(os.environ.get('ASSURANCEHUB_API_KEY'))
results = processor.process_csv('input.csv', 'output.csv')
# Summary
flagged = results['flagged'].sum()
total = len(results)
print(f"Processed: {total}, Flagged: {flagged} ({flagged/total*100:.1f}%)")