Documentation
¶
Index ¶
- Variables
- type LLM
- func (o *LLM) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error)
- func (o *LLM) CreateEmbedding(ctx context.Context, inputTexts []string, model string, task string) ([][]float32, error)
- func (o *LLM) GenerateContent(ctx context.Context, messages []llms.MessageContent, ...) (*llms.ContentResponse, error)
- type Option
Examples ¶
Constants ¶
This section is empty.
Variables ¶
View Source
var ( ErrEmptyResponse = errors.New("empty response") ErrMissingToken = errors.New("missing the Hugging Face API token. Set it in the HF_TOKEN or HUGGINGFACEHUB_API_TOKEN environment variable, or save it to ~/.cache/huggingface/token") //nolint:lll ErrUnexpectedResponseLength = errors.New("unexpected length of response") )
Functions ¶
This section is empty.
Types ¶
type LLM ¶
func New ¶
Example (StandardInference) ¶
package main
import (
"context"
"fmt"
"log"
"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/llms/huggingface"
)
func main() {
// Create a new HuggingFace LLM with standard inference API
llm, err := huggingface.New(
huggingface.WithModel("HuggingFaceH4/zephyr-7b-beta"),
// Token will be read from HF_TOKEN or HUGGINGFACEHUB_API_TOKEN environment variable
)
if err != nil {
log.Fatal(err)
}
ctx := context.Background()
// Use the LLM
result, err := llm.Call(ctx, "Hello, how are you?",
llms.WithTemperature(0.5),
llms.WithMaxLength(50),
)
if err != nil {
log.Fatal(err)
}
fmt.Println(result)
}
Example (WithInferenceProvider) ¶
package main
import (
"context"
"fmt"
"log"
"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/llms/huggingface"
)
func main() {
// Create a new HuggingFace LLM with inference provider
llm, err := huggingface.New(
huggingface.WithModel("deepseek-ai/DeepSeek-R1-0528"),
huggingface.WithInferenceProvider("hyperbolic"),
// Token will be read from HF_TOKEN or HUGGINGFACEHUB_API_TOKEN environment variable
)
if err != nil {
log.Fatal(err)
}
ctx := context.Background()
// Use the LLM
result, err := llm.Call(ctx, "What is the capital of France?",
llms.WithTemperature(0.5),
llms.WithMaxLength(50),
)
if err != nil {
log.Fatal(err)
}
fmt.Println(result)
}
func (*LLM) CreateEmbedding ¶
func (o *LLM) CreateEmbedding( ctx context.Context, inputTexts []string, model string, task string, ) ([][]float32, error)
CreateEmbedding creates embeddings for the given input texts.
func (*LLM) GenerateContent ¶
func (o *LLM) GenerateContent(ctx context.Context, messages []llms.MessageContent, options ...llms.CallOption) (*llms.ContentResponse, error)
GenerateContent implements the Model interface.
type Option ¶
type Option func(*options)
func WithHTTPClient ¶
WithHTTPClient passes a custom HTTP client to the HuggingFace client.
func WithInferenceProvider ¶
WithInferenceProvider passes the inference provider to use with HuggingFace's router. When set, the client will use the router URL (https://router.huggingface.co/{provider}/v1/...) instead of the default inference API. Common providers include "hyperbolic", "nebius", etc.
func WithModel ¶
WithModel passes the HuggingFace model to the client. If not set, then will be used default model.
Click to show internal directories.
Click to hide internal directories.