Skip to content

LlamaIndex

LlamaIndex works with Glitch by configuring the OpenAI LLM before passing it to LlamaIndex. This secures all LLM calls made through LlamaIndex queries, retrievers, and agents.

import os
from llama_index.llms.openai import OpenAI
llm = OpenAI(
model="gpt-4",
api_key=os.environ["GLITCH_API_KEY"],
api_base="https://api.golabrat.ai/v1",
)
import os
from llama_index.llms.openai import OpenAI
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
# Configure LlamaIndex with Glitch
llm = OpenAI(
model="gpt-4",
api_key=os.environ["GLITCH_API_KEY"],
api_base="https://api.golabrat.ai/v1",
)
# Use in a query - all LLM calls are secured
documents = SimpleDirectoryReader("data").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine(llm=llm)
response = query_engine.query("What is the main topic?")
print(response)
import os
from llama_index.llms.openai import OpenAI
from llama_index.core import Settings, VectorStoreIndex, Document
# Configure global LLM with Glitch
Settings.llm = OpenAI(
model="gpt-4",
api_key=os.environ["GLITCH_API_KEY"],
api_base="https://api.golabrat.ai/v1",
)
# All queries use the secured LLM
documents = [Document(text="Your document content here")]
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
response = query_engine.query("Your question here")
print(response)