export LANGCHAIN_TRACING_V2=true
export LANGCHAIN_ENDPOINT=https://api.smith.langchain.com
export LANGCHAIN_API_KEY=<your-api-key>
export LANGCHAIN_PROJECT=<your-project> # if not specified, defaults to "default"
# The below examples use the OpenAI API, so you will need
export OPENAI_API_KEY=<your-openai-api-key>
以下のサンプルコードのようにLLMを実行する。
from langchain_openai import ChatOpenAI
llm = ChatOpenAI()
llm.invoke("Hello, world!")
import langsmith
from langchain import chat_models, smith
# Replace with the chat model you want to test
my_llm = chat_models.ChatOpenAI(temperature=0)
# Define the evaluators to apply
eval_config = smith.RunEvalConfig(
evaluators=[
"cot_qa"
],
custom_evaluators=[],
eval_llm=chat_models.ChatOpenAI(model="gpt-4", temperature=0)
)
client = langsmith.Client()
chain_results = client.run_on_dataset(
dataset_name="ds-flowery-switching-60",
llm_or_chain_factory=my_llm,
evaluation=eval_config,
project_name="test-virtual-savior-97",
concurrency_level=5,
verbose=True,
)