Skip to content

Python SDK

Use the official OpenAI Python SDK with Glitch by configuring the base URL and adding your Glitch API key.

import os
from openai import OpenAI
# Point to Glitch sensor instead of OpenAI directly
client = OpenAI(
api_key=os.environ["GLITCH_API_KEY"], # Your Glitch API key
base_url="https://api.golabrat.ai/v1", # Glitch sensor URL
)
# Use exactly like the standard OpenAI SDK
response = client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"}
]
)
print(response.choices[0].message.content)

Handle Glitch security blocks gracefully:

from openai import OpenAI, APIStatusError
import os
client = OpenAI(
api_key=os.environ["GLITCH_API_KEY"],
base_url="https://api.golabrat.ai/v1",
)
try:
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": user_input}]
)
return response.choices[0].message.content
except APIStatusError as e:
if e.status_code == 403:
# Security block - handle gracefully
return "I can't process that request."
raise

The OpenAI Python SDK doesn’t expose response headers directly. Use httpx for direct HTTP calls to access security metadata from /v1/chat/completions responses:

import httpx
import os
response = httpx.post(
"https://api.golabrat.ai/v1/chat/completions",
headers={
"Authorization": f"Bearer {os.environ['GLITCH_API_KEY']}",
"Content-Type": "application/json",
},
json={
"model": "gpt-4",
"messages": [{"role": "user", "content": "Hello"}]
}
)
# Access security headers
blocked = response.headers.get("X-Risk-Blocked", "false") == "true"
categories = response.headers.get("X-Risk-Categories", "")
confidence = float(response.headers.get("X-Risk-Confidence", "0"))
if categories:
print(f"Flagged: {categories} (confidence: {confidence})")

Glitch fully supports streaming responses:

from openai import OpenAI
import os
client = OpenAI(
api_key=os.environ["GLITCH_API_KEY"],
base_url="https://api.golabrat.ai/v1",
)
stream = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Write a poem"}],
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="")