asage

package
v1.2.4 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jul 3, 2025 License: Apache-2.0 Imports: 8 Imported by: 0

Documentation

Overview

Asage experimental LLM provider. Upstream has no support for streaming or tool calling, so some features may be limited.

Index

Constants

View Source
const (
	ServerBaseURL = "https://server-nginx.asksage.ai"
	AuthBaseURL   = "https://user-server-cac-gov.asksage.ai"
	RoleUser      = "me"
	RoleGPT       = "gpt"
)

Variables

This section is empty.

Functions

This section is empty.

Types

type Client

type Client struct {
	AuthToken  string
	HTTPClient *http.Client
}

func NewClient

func NewClient(authToken string, httpClient *http.Client) *Client

func (*Client) FollowUpQuestions

func (c *Client) FollowUpQuestions(params FollowUpParams) (*CompletionResponse, error)

func (*Client) GetDatasets

func (c *Client) GetDatasets() ([]Dataset, error)

func (*Client) GetPersonas

func (c *Client) GetPersonas() ([]Persona, error)

func (*Client) Login

func (c *Client) Login(params GetTokenParams) error

func (*Client) Query

func (c *Client) Query(params QueryParams) (*CompletionResponse, error)

type CompletionResponse

type CompletionResponse struct {
	Response   string `json:"response"`
	Message    string `json:"message"`
	References string `json:"references"`
}

type Dataset

type Dataset string

type FollowUpParams

type FollowUpParams struct {
	Message string `json:"message"`
}

type GetTokenParams

type GetTokenParams struct {
	Email    string `json:"email"`
	Password string `json:"password"`
}

type Message

type Message struct {
	User    string `json:"user"`
	Message string `json:"message"`
}

type Persona

type Persona struct {
	ID    int    `json:"id"`
	Name  string `json:"name"`
	Label string `json:"label"`
}

type Provider

type Provider struct {
	// contains filtered or unexported fields
}

func New

func New(llmService llm.ServiceConfig, httpClient *http.Client) *Provider

func (*Provider) ChatCompletion

func (s *Provider) ChatCompletion(request llm.CompletionRequest, opts ...llm.LanguageModelOption) (*llm.TextStreamResult, error)

func (*Provider) ChatCompletionNoStream

func (s *Provider) ChatCompletionNoStream(request llm.CompletionRequest, opts ...llm.LanguageModelOption) (string, error)

func (*Provider) CountTokens

func (s *Provider) CountTokens(text string) int

TODO: Implement actual token counting. For now just estimated based off OpenAI estimations

func (*Provider) GetDefaultConfig

func (s *Provider) GetDefaultConfig() llm.LanguageModelConfig

func (*Provider) InputTokenLimit

func (s *Provider) InputTokenLimit() int

TODO: Figure out what the actual token limit is. For now just be conservative.

type QueryParams

type QueryParams struct {
	Message         []Message `json:"message"`
	Persona         string    `json:"persona,omitempty"`
	SystemPrompt    string    `json:"system_prompt,omitempty"`
	Dataset         string    `json:"dataset,omitempty"`
	LimitReferences int       `json:"limit_references,omitempty"`
	Temperature     float64   `json:"temperature,omitempty"`
	Live            int       `json:"live,omitempty"`
	Model           string    `json:"model,omitempty"`
}

type TokenizerParams

type TokenizerParams struct {
	Content string `json:"content"`
	Model   string `json:"model,omitempty"`
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL