Documentation
¶
Overview ¶
Asage experimental LLM provider. Upstream has no support for streaming or tool calling, so some features may be limited.
Index ¶
- Constants
- type Client
- func (c *Client) FollowUpQuestions(params FollowUpParams) (*CompletionResponse, error)
- func (c *Client) GetDatasets() ([]Dataset, error)
- func (c *Client) GetPersonas() ([]Persona, error)
- func (c *Client) Login(params GetTokenParams) error
- func (c *Client) Query(params QueryParams) (*CompletionResponse, error)
- type CompletionResponse
- type Dataset
- type FollowUpParams
- type GetTokenParams
- type Message
- type Persona
- type Provider
- func (s *Provider) ChatCompletion(request llm.CompletionRequest, opts ...llm.LanguageModelOption) (*llm.TextStreamResult, error)
- func (s *Provider) ChatCompletionNoStream(request llm.CompletionRequest, opts ...llm.LanguageModelOption) (string, error)
- func (s *Provider) CountTokens(text string) int
- func (s *Provider) GetDefaultConfig() llm.LanguageModelConfig
- func (s *Provider) InputTokenLimit() int
- type QueryParams
- type TokenizerParams
Constants ¶
View Source
const ( ServerBaseURL = "https://server-nginx.asksage.ai" AuthBaseURL = "https://user-server-cac-gov.asksage.ai" RoleUser = "me" RoleGPT = "gpt" )
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type Client ¶
func (*Client) FollowUpQuestions ¶
func (c *Client) FollowUpQuestions(params FollowUpParams) (*CompletionResponse, error)
func (*Client) GetDatasets ¶
func (*Client) GetPersonas ¶
func (*Client) Login ¶
func (c *Client) Login(params GetTokenParams) error
func (*Client) Query ¶
func (c *Client) Query(params QueryParams) (*CompletionResponse, error)
type CompletionResponse ¶
type FollowUpParams ¶
type FollowUpParams struct {
Message string `json:"message"`
}
type GetTokenParams ¶
type Provider ¶
type Provider struct {
// contains filtered or unexported fields
}
func (*Provider) ChatCompletion ¶
func (s *Provider) ChatCompletion(request llm.CompletionRequest, opts ...llm.LanguageModelOption) (*llm.TextStreamResult, error)
func (*Provider) ChatCompletionNoStream ¶
func (s *Provider) ChatCompletionNoStream(request llm.CompletionRequest, opts ...llm.LanguageModelOption) (string, error)
func (*Provider) CountTokens ¶
TODO: Implement actual token counting. For now just estimated based off OpenAI estimations
func (*Provider) GetDefaultConfig ¶
func (s *Provider) GetDefaultConfig() llm.LanguageModelConfig
func (*Provider) InputTokenLimit ¶
TODO: Figure out what the actual token limit is. For now just be conservative.
type QueryParams ¶
type QueryParams struct {
Message []Message `json:"message"`
Persona string `json:"persona,omitempty"`
SystemPrompt string `json:"system_prompt,omitempty"`
Dataset string `json:"dataset,omitempty"`
LimitReferences int `json:"limit_references,omitempty"`
Temperature float64 `json:"temperature,omitempty"`
Live int `json:"live,omitempty"`
Model string `json:"model,omitempty"`
}
type TokenizerParams ¶
Click to show internal directories.
Click to hide internal directories.