跳转到主要内容

OpenAI 官方 SDK

pip install openai
from openai import OpenAI

client = OpenAI(
    api_key="sk-xxx",
    base_url="https://crazyrouter.com/v1"
)

# 同步调用
response = client.chat.completions.create(
    model="gpt-5.4",
    messages=[{"role": "user", "content": "你好"}],
    temperature=0.7,
    max_tokens=1000
)
print(response.choices[0].message.content)

异步调用

from openai import AsyncOpenAI
import asyncio

client = AsyncOpenAI(
    api_key="sk-xxx",
    base_url="https://crazyrouter.com/v1"
)

async def main():
    response = await client.chat.completions.create(
        model="gpt-5.4",
        messages=[{"role": "user", "content": "你好"}]
    )
    print(response.choices[0].message.content)

asyncio.run(main())

LangChain 集成

pip install langchain-openai
from langchain_openai import ChatOpenAI

llm = ChatOpenAI(
    model="gpt-5.4",
    api_key="sk-xxx",
    base_url="https://crazyrouter.com/v1",
    temperature=0.7
)

# 简单调用
response = llm.invoke("用 Python 写一个快速排序")
print(response.content)

LangChain 链式调用

from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate

llm = ChatOpenAI(
    model="gpt-5.4",
    api_key="sk-xxx",
    base_url="https://crazyrouter.com/v1"
)

prompt = ChatPromptTemplate.from_messages([
    ("system", "你是一个{role}。"),
    ("user", "{input}")
])

chain = prompt | llm
response = chain.invoke({"role": "Python 专家", "input": "解释装饰器"})
print(response.content)

LangChain Embeddings

from langchain_openai import OpenAIEmbeddings

embeddings = OpenAIEmbeddings(
    model="text-embedding-3-large",
    api_key="sk-xxx",
    base_url="https://crazyrouter.com/v1"
)

vectors = embeddings.embed_documents(["文本一", "文本二"])
print(f"向量维度: {len(vectors[0])}")

LlamaIndex 集成

pip install llama-index-llms-openai llama-index-embeddings-openai
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding

# LLM 配置
llm = OpenAI(
    model="gpt-5.4",
    api_key="sk-xxx",
    api_base="https://crazyrouter.com/v1"
)

response = llm.complete("什么是 RAG?")
print(response.text)

# Embedding 配置
embed_model = OpenAIEmbedding(
    model="text-embedding-3-large",
    api_key="sk-xxx",
    api_base="https://crazyrouter.com/v1"
)

vector = embed_model.get_text_embedding("测试文本")
print(f"维度: {len(vector)}")
以上聊天示例默认使用 2026 年 3 月 23 日已在生产环境实测成功的 gpt-5.4。如果你要替换模型,优先改成同样已验证的 claude-sonnet-4-6;Embedding 示例则继续使用 text-embedding-3-large