Skip to content

LangGraph

LangGraph exposes agent workflows as graphs, making execution paths explicit—and therefore inspectable. Wrap the LLM client used by each node to gain visibility into the full execution path, including state propagation and tool calls.

import os
from langchain_openai import ChatOpenAI
# Create LangChain ChatOpenAI with Glitch
llm = ChatOpenAI(
model="gpt-4",
api_key=os.environ["GLITCH_API_KEY"],
base_url="https://api.golabrat.ai/v1",
)
import os
from typing import TypedDict, Annotated
from langchain_openai import ChatOpenAI
from langgraph.graph import StateGraph, END
from langgraph.prebuilt import ToolNode
# Configure LangChain with Glitch
llm = ChatOpenAI(
model="gpt-4",
api_key=os.environ["GLITCH_API_KEY"],
base_url="https://api.golabrat.ai/v1",
)
# Bind tools to the LLM
tools = [/* your tools */]
llm_with_tools = llm.bind_tools(tools)
# Define graph state
class State(TypedDict):
messages: Annotated[list, "messages"]
# Build the graph
workflow = StateGraph(State)
workflow.add_node("agent", llm_with_tools)
workflow.add_node("tools", ToolNode(tools))
workflow.add_edge("agent", "tools")
workflow.add_edge("tools", END)
app = workflow.compile()
# Run the graph - all LLM calls are secured
result = await app.invoke({
"messages": [{"role": "user", "content": "What's the weather in Paris?"}]
})

Handle security blocks in your graph nodes:

from langchain_core.exceptions import LangChainException
try:
result = await app.invoke({"messages": [{"role": "user", "content": user_input}]})
return result
except LangChainException as e:
# Check if it's a 403 security block
if "403" in str(e) or "security" in str(e).lower():
# Handle security block
return {"messages": [{"role": "assistant", "content": "I can't process that request."}]}
raise