Documentation
¶
Overview ¶
Package omnillm provides Opik integration for the omnillm LLM wrapper library.
This package provides two main features:
Automatic tracing: Wrap omnillm.ChatClient to automatically create spans for all LLM calls with input/output/usage tracking.
Evaluation provider: Use omnillm as an LLM provider for evaluation metrics that require LLM-as-judge capabilities.
Automatic Tracing ¶
Wrap your omnillm.ChatClient to automatically trace all LLM calls:
import (
"github.com/agentplexus/omnillm"
opik "github.com/agentplexus/go-opik"
opikomnillm "github.com/agentplexus/go-opik/integrations/omnillm"
)
// Create omnillm client
client, _ := omnillm.NewClient(omnillm.ClientConfig{
Provider: omnillm.ProviderNameOpenAI,
APIKey: os.Getenv("OPENAI_API_KEY"),
})
// Create Opik client
opikClient, _ := opik.NewClient()
// Wrap for tracing
tracingClient := opikomnillm.NewTracingClient(client, opikClient)
// Use within a trace context
ctx, trace, _ := opik.StartTrace(ctx, opikClient, "my-task")
defer trace.End(ctx)
// All calls are automatically traced
resp, _ := tracingClient.CreateChatCompletion(ctx, &omnillm.ChatCompletionRequest{
Model: "gpt-4o",
Messages: []omnillm.Message{{Role: omnillm.RoleUser, Content: "Hello!"}},
})
Evaluation Provider ¶
Use omnillm as a provider for LLM-based evaluation metrics:
import (
"github.com/agentplexus/omnillm"
opikomnillm "github.com/agentplexus/go-opik/integrations/omnillm"
"github.com/agentplexus/go-opik/evaluation/llm"
)
// Create omnillm client
client, _ := omnillm.NewClient(omnillm.ClientConfig{
Provider: omnillm.ProviderNameAnthropic,
APIKey: os.Getenv("ANTHROPIC_API_KEY"),
})
// Create evaluation provider
provider := opikomnillm.NewProvider(client,
opikomnillm.WithModel("claude-3-opus-20240229"),
)
// Use with evaluation metrics
relevance := llm.NewAnswerRelevance(provider)
hallucination := llm.NewHallucination(provider)
Streaming Support ¶
The tracing client also supports streaming with automatic span capture:
stream, _ := tracingClient.CreateChatCompletionStream(ctx, req)
defer stream.Close()
for {
chunk, err := stream.Recv()
if err == io.EOF {
break
}
// Process chunk...
}
// Span is automatically ended with complete response when stream closes
Index ¶
- type Option
- type Provider
- type TracingClient
- func (t *TracingClient) Client() *omnillm.ChatClient
- func (t *TracingClient) Close() error
- func (t *TracingClient) CreateChatCompletion(ctx context.Context, req *provider.ChatCompletionRequest) (*provider.ChatCompletionResponse, error)
- func (t *TracingClient) CreateChatCompletionStream(ctx context.Context, req *provider.ChatCompletionRequest) (provider.ChatCompletionStream, error)
- func (t *TracingClient) CreateChatCompletionWithMemory(ctx context.Context, sessionID string, req *provider.ChatCompletionRequest) (*provider.ChatCompletionResponse, error)
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type Option ¶
type Option func(*Provider)
Option configures the provider.
func WithMaxTokens ¶
WithMaxTokens sets the maximum tokens for completions.
func WithTemperature ¶
WithTemperature sets the temperature for completions.
type Provider ¶
type Provider struct {
// contains filtered or unexported fields
}
Provider implements llm.Provider using an omnillm.ChatClient.
func NewProvider ¶
func NewProvider(client *omnillm.ChatClient, opts ...Option) *Provider
NewProvider creates a new evaluation provider using omnillm.
func (*Provider) Complete ¶
func (p *Provider) Complete(ctx context.Context, req llm.CompletionRequest) (*llm.CompletionResponse, error)
Complete sends a chat completion request using omnillm.
func (*Provider) DefaultModel ¶
DefaultModel returns the configured default model.
type TracingClient ¶
type TracingClient struct {
// contains filtered or unexported fields
}
TracingClient wraps an omnillm.ChatClient with automatic Opik tracing.
func NewTracingClient ¶
func NewTracingClient(client *omnillm.ChatClient, opikClient *opik.Client, opts ...opik.SpanOption) *TracingClient
NewTracingClient creates a new tracing client wrapper.
func (*TracingClient) Client ¶
func (t *TracingClient) Client() *omnillm.ChatClient
Client returns the underlying omnillm.ChatClient.
func (*TracingClient) Close ¶
func (t *TracingClient) Close() error
Close closes the underlying client.
func (*TracingClient) CreateChatCompletion ¶
func (t *TracingClient) CreateChatCompletion(ctx context.Context, req *provider.ChatCompletionRequest) (*provider.ChatCompletionResponse, error)
CreateChatCompletion creates a chat completion with automatic tracing.
func (*TracingClient) CreateChatCompletionStream ¶
func (t *TracingClient) CreateChatCompletionStream(ctx context.Context, req *provider.ChatCompletionRequest) (provider.ChatCompletionStream, error)
CreateChatCompletionStream creates a streaming chat completion with automatic tracing.
func (*TracingClient) CreateChatCompletionWithMemory ¶
func (t *TracingClient) CreateChatCompletionWithMemory(ctx context.Context, sessionID string, req *provider.ChatCompletionRequest) (*provider.ChatCompletionResponse, error)
CreateChatCompletionWithMemory creates a chat completion using conversation memory with tracing.