client

package
v0.0.0-...-7951b7c Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Sep 30, 2025 License: BSD-3-Clause Imports: 21 Imported by: 0

Documentation

Index

Constants

View Source
const (
	DefaultOperationQueueSize = 100 // Default size of the operation queue
	DefaultWorkerCount        = 5   // Default number of worker goroutines
)
View Source
const (
	YamlKeyBot             = "K-CLI"
	DefaultProvider        = "OpenAI"
	DefaultBaseURL         = "https://openrouter.ai/api"
	DefaultCustomAPIPath   = "/v1/chat/completions"
	DefaultAPIKey          = ""
	DefaultModel           = "deepseek/deepseek-chat-v3.1:free"
	DefaultStorageType     = "file"
	DefaultMCPSvrPath      = "~/.config/k-cli/mcp_servers.jsonl"
	DefaultPromptPath      = "~/.config/k-cli/prompts.jsonl"
	DefaultMaxTurns        = 10
	DefaultMaxTokens       = 32768
	DefaultReasoningEffort = "medium"
)
View Source
const (
	RoleUser      = "user"
	RoleAssistant = "assistant"
	RoleSystem    = "system"
	RoleTool      = "tool"

	ProviderOpenAI = "OpenAI"
	ProviderOllama = "Ollama"
	ProviderTaiji  = "Taiji"

	DefaultChatMessageSize = DefaultMaxTurns
)
View Source
const (
	DefaultMCPServerConfigName    = "todo"
	DefaultMCPServerConfigType    = "stdio"
	DefaultMCPServerConfigCommand = "uvx"
)
View Source
const (
	MCPClientName = "kMCPClient"
	MCPClientVer  = "v1.0.0"

	ServerTypeStdio          = "stdio"
	ServerTypeSSE            = "sse"
	ServerTypeStreamableHTTP = "streamableHttp"
)
View Source
const (
	DefaultPromptName             = "default"
	DefaultMCPPromptName          = "mcp"
	DefaultDeepResearchPromptName = "deep-research"
)
View Source
const (
	ModelClaude3     = "claude-3"
	ModelDeepSeekR1  = "deepseek-r1"
	ModelDeepSeekV31 = "DeepSeek-V3_1"

	DefaultTimeout         = 60 * time.Second
	DefaultStreamChunkSize = 16 // default stream chunk size
)
View Source
const DeepResearchPrompt = `` /* 1027-byte string literal not displayed */
View Source
const (
	DefaultContentType = "text"
)

Variables

View Source
var (
	// DefaultCfgPath is the default configuration file path
	DefaultCfgPath = filepath.Join(".", "config", "client.yaml")

	// DefaultMCPServerConfig 默认的 MCP 服务器配置
	DefaultMCPServerConfig = []string{"todo"}
)
View Source
var DefaultMCPServerConfigArgs = []string{"mcp-todo"}
View Source
var MCP = `` /* 1050-byte string literal not displayed */
View Source
var MCPPrompt = `
====

TOOL USE

You have access to a set of tools that are executed upon the user's approval. 
You can use one tool per message, and will receive the result of that tool use in the user's response. 
You use tools step-by-step to accomplish a given task, 
with each tool use informed by the result of the previous tool use.

# Tool Use Formatting

Tool use is formatted using XML-style tags. 
The tool name is enclosed in opening and closing tags, 
and each parameter is similarly enclosed within its own set of tags. Here's the structure:

<tool_name>
<parameter1_name>value1</parameter1_name>
<parameter2_name>value2</parameter2_name>
...
</tool_name>

For example:

<read_file>
<path>src/main.js</path>
</read_file>

Always adhere to this format for the tool use to ensure proper parsing and execution.

# Tools
## use_mcp_tool
Description: Request to use a tool provided by a connected MCP server. 
Each MCP server can provide multiple tools with different capabilities. 
Tools have defined input schemas that specify required and optional parameters.
Parameters:
- server_name: (required) The name of the MCP server providing the tool
- tool_name: (required) The name of the tool to execute
- arguments: (required) A JSON object containing the tool's input parameters, following the tool's input schema
Usage:
<use_mcp_tool>
<server_name>server name here</server_name>
<tool_name>tool name here</tool_name>
<arguments>
{
  "param1": "value1",
  "param2": "value2"
}
</arguments>
</use_mcp_tool>

## access_mcp_resource
Description: Request to access a resource provided by a connected MCP server. 
Resources represent data sources that can be used as context, such as files, API responses, or system information.
Parameters:
- server_name: (required) The name of the MCP server providing the resource
- uri: (required) The URI identifying the specific resource to access
Usage:
<access_mcp_resource>
<server_name>server name here</server_name>
<uri>resource URI here</uri>
</access_mcp_resource>

# Tool Use Examples
## Example 1: Requesting to use an MCP tool

<use_mcp_tool>
<server_name>weather-server</server_name>
<tool_name>get_forecast</tool_name>
<arguments>
{
  "city": "San Francisco",
  "days": 5
}
</arguments>
</use_mcp_tool>

## Example 2: Requesting to access an MCP resource

<access_mcp_resource>
<server_name>weather-server</server_name>
<uri>weather://san-francisco/current</uri>
</access_mcp_resource>

# Tool Use Guidelines

1. In <thinking> tags, assess what information you already have and what information you need to proceed with the task.
2. Choose the most appropriate tool based on the task and the tool descriptions provided. 
Assess if you need additional information to proceed, and which of the available tools would be most effective for 
gathering this information. For example using the list_files tool is more effective than running a command like` +
	" `ls` " + `in the terminal. 
It's critical that you think about each available tool and use the one that best fits the current step in the task.
3. If multiple actions are needed, use one tool at a time per message to accomplish the task iteratively, 
with each tool use being informed by the result of the previous tool use. Do not assume the outcome of any tool use. 
Each step must be informed by the previous step's result.
4. Formulate your tool use using the XML format specified for each tool.
5. After each tool use, the user will respond with the result of that tool use. 
This result will provide you with the necessary information to continue your task or make further decisions. 
This response may include:
  - Information about whether the tool succeeded or failed, along with any reasons for failure.
  - Linter errors that may have arisen due to the changes you made, which you'll need to address.
  - New terminal output in reaction to the changes, which you may need to consider or act upon.
  - Any other relevant feedback or information related to the tool use.
6. ALWAYS wait for user confirmation after each tool use before proceeding. 
Never assume the success of a tool use without explicit confirmation of the result from the user.

It is crucial to proceed step-by-step, 
waiting for the user's message after each tool use before moving forward with the task. This approach allows you to:
1. Confirm the success of each step before proceeding.
2. Address any issues or errors that arise immediately.
3. Adapt your approach based on new information or unexpected results.
4. Ensure that each action builds correctly on the previous ones.

By waiting for and carefully considering the user's response after each tool use, 
you can react accordingly and make informed decisions about how to proceed with the task. 
This iterative process helps ensure the overall success and accuracy of your work.

# Tool use Are Not Always Necessary

While tools are a powerful way to interact with the system and perform tasks, they are not always required. 
You can still perform a wide range of tasks without using tools. 
However, when you need to access specific resources, perform complex operations, or interact with external systems,
tools provide a structured and efficient way to accomplish these tasks.

====

MCP SERVERS

The Model Context Protocol (MCP) enables communication between the system and locally running MCP servers 
that provide additional tools and resources to extend your capabilities.

# Connected MCP Servers

When a server is connected, you can use the server's tools via the` +
	" `use_mcp_tool` " + `tool, and access the server's resources via the ` + " `access_mcp_resource` " + ` tool.

# MCP Servers Are Not Always Necessary

While MCP servers can provide additional tools and resources, they are not always required. 
You can still perform a wide range of tasks without connecting to an MCP server. 
However, when you need additional capabilities or access to specific resources, 
connecting to an MCP server can greatly enhance your functionality.

`
View Source
var TimePrompt = func() string { return "Current Time: " + time.Now().Format("2006-01-02 15:04:05") }()
View Source
var (
	ToolTags = []string{"use_mcp_tool", "access_mcp_resource"}
)

Functions

func EnsureFileExistsSync

func EnsureFileExistsSync(file string) error

ensureFileExists ensures that the data file exists, creating it if necessary

func ExpandUser

func ExpandUser(path string) (string, error)

ExpandUser expands the ~ in the beginning of a file path to the user's home directory

func GenerateChatID

func GenerateChatID() string

GenerateChatID generates a unique ID (6 characters) Generate UUID and take first 6 characters of hex representation

func GetISO8601Timestamp

func GetISO8601Timestamp() time.Time

GetISO8601Timestamp returns current timestamp in ISO8601 format with timezone offset

func GetUnixTimestamp

func GetUnixTimestamp() int64

GetUnixTimestamp returns current time as 13-digit unix timestamp (milliseconds)

func ToJSON

func ToJSON(message *Message) (string, error)

SerializeMessageToString serializes a message to a JSON string

Types

type BaseProvider

type BaseProvider struct {
	log.Logger

	Client *http.Client
}

func (*BaseProvider) CallStreamableChatCompletions

func (p *BaseProvider) CallStreamableChatCompletions(
	provider string,
	reasoningEffort string,
	messages []*Message,
	prompt *string,
	BuildRequest func(
		context.Context,
		chan StreamChunk,
		[]*Message,
		*string,
	) (*http.Request, error),
) *Message

func (*BaseProvider) DoCallStreamableChatCompletions

func (p *BaseProvider) DoCallStreamableChatCompletions(
	messages []*Message, systemPrompt *string,
	BuildRequest func(
		context.Context,
		chan StreamChunk,
		[]*Message,
		*string,
	) (*http.Request, error),
) <-chan StreamChunk

func (*BaseProvider) GenerateCurlCommand

func (*BaseProvider) GenerateCurlCommand(
	req *http.Request,
	bodyBytes []byte,
) (string, error)

GenerateCurlCommand returns a string that can be executed to make the request ⚠️ 注意:因为该函数没有读取 req.Body => 请求体仍然可以被 client.Do 正常读取。

func (*BaseProvider) HandleStreamableChat

func (p *BaseProvider) HandleStreamableChat(streamCh <-chan StreamChunk) LLMStreamRet

func (*BaseProvider) PrepareMessagesForCompletion

func (p *BaseProvider) PrepareMessagesForCompletion(
	model string, messages []*Message, systemPrompt *string,
) []*Message

func (*BaseProvider) ProcessStreamableResponse

func (p *BaseProvider) ProcessStreamableResponse(
	ctx context.Context,
	resp *http.Response,
	respChan chan StreamChunk,
)

type Chat

type Chat struct {
	ID         string    `json:"id"`
	CreateTime time.Time `json:"create_time"`
	UpdateTime time.Time `json:"update_time"`
	Messages   []*Message
}

func (*Chat) UpdateMessages

func (c *Chat) UpdateMessages(messages []*Message)

UpdateMessages filters out system messages and sorts the remaining ones by timestamp

type ChatRepo

type ChatRepo interface {
	ListChatsAsync(
		ctx context.Context,
		keyword, model, provider *string,
		limit int,
	) <-chan OpResp
	GetChatAsync(ctx context.Context, chatID string) <-chan OpResp
	AddChatAsync(ctx context.Context, chat *Chat) <-chan OpResp
	UpdateChatAsync(ctx context.Context, chat *Chat) <-chan OpResp
	DeleteChatAsync(ctx context.Context, chatID string) <-chan OpResp

	// Sync versions for convenience
	ListChats(ctx context.Context, keyword, model, provider *string, limit int) ([]*Chat, error)
	Chat(ctx context.Context, chatID string) (*Chat, error)
	AddChat(ctx context.Context, chat *Chat) (*Chat, error)
	UpdateChat(ctx context.Context, chat *Chat) (*Chat, error)
	DeleteChat(ctx context.Context, chatID string) (bool, error)

	Close() error
}

ChatRepo (Chat Repository) defines the interface for chat repository operations

type ChatSvr

type ChatSvr struct {
	log.Logger
	// contains filtered or unexported fields
}

func NewChatSvr

func NewChatSvr(repo ChatRepo, logger log.Logger) *ChatSvr

func (*ChatSvr) Chat

func (svr *ChatSvr) Chat(ctx context.Context, chatID string) (*Chat, error)

Chat returns a specific chat by ID

func (*ChatSvr) CreateChat

func (svr *ChatSvr) CreateChat(
	ctx context.Context,
	messages []*Message,
	chatID string,
) (*Chat, error)

CreateChat creates a new chat with messages and optional external ID

func (*ChatSvr) DeleteChat

func (svr *ChatSvr) DeleteChat(ctx context.Context, chatID string) (bool, error)

DeleteChat deletes a chat by ID

func (*ChatSvr) GenerateShareHTML

func (svr *ChatSvr) GenerateShareHTML(ctx context.Context, chatID string) (string, error)

TODO: Implement

func (*ChatSvr) ListChats

func (svr *ChatSvr) ListChats(
	ctx context.Context,
	keyword, model, provider *string,
	limit int,
) ([]*Chat, error)

ListChats returns a List of chats filtered by the given criteria, sorted by creation time descending

func (*ChatSvr) UpdateChat

func (svr *ChatSvr) UpdateChat(
	ctx context.Context,
	chatID string,
	messages []*Message,
) (*Chat, error)

UpdateChat updates an existing chat's messages

type Config

type Config struct {

	// Model Provider
	Provider      string `mapstructure:"provider"`
	BaseURL       string `mapstructure:"base_url"`
	CustomAPIPath string `mapstructure:"custom_api_path"`
	Model         string `mapstructure:"model"`
	APIKey        string `mapstructure:"api_key"`

	StorageType string `mapstructure:"storage_type,omitempty"`
	// MCP
	MCPSvrPath string `mapstructure:"mcp_server_path"` // MCP Server 配置文件路径
	// Prompt
	PromptPath string `mapstructure:"prompt_path"` // Prompt 配置文件路径

	MaxTurns        uint   `mapstructure:"max_turns"`        // 最多调用 MCP Server 的次数
	MaxTokens       uint64 `mapstructure:"max_tokens"`       // 最大 token 数
	ReasoningEffort string `mapstructure:"reasoning_effort"` // 推理努力度 => high | medium | low | minimal
	Stream          bool   `mapstructure:"stream"`           // 是否使用流式输出
	// contains filtered or unexported fields
}

func NewConfigFromFile

func NewConfigFromFile(configPath string, logger log.Logger) (*Config, error)

func NewDefaultConfig

func NewDefaultConfig(logger log.Logger) (*Config, error)

NewDefaultConfig returns a new Config with default values

func (*Config) Validate

func (svr *Config) Validate() error

Validate validates the loaded configuration TODO: To implement

type ContentPart

type ContentPart struct {
	Text string `json:"text"`
	Type string `json:"type"`

	CacheControl map[string]any `json:"cache_control,omitempty"` // Claude-3
}

type FileRepo

type FileRepo struct {
	// contains filtered or unexported fields
}

FileRepo implements ChatRepository using file storage with async operations

func NewChatFileRepository

func NewChatFileRepository(
	dataFile string,
	workerCount int,
	logger log.Logger,
) (*FileRepo, error)

NewChatFileRepository creates a new FileRepository instance with async capabilities

func (*FileRepo) AddChat

func (fr *FileRepo) AddChat(ctx context.Context, chat *Chat) (*Chat, error)

AddChatAsync adds a chat to cache

func (*FileRepo) AddChatAsync

func (fr *FileRepo) AddChatAsync(ctx context.Context, chat *Chat) <-chan OpResp

AddChatAsync adds a chat to cache

func (*FileRepo) Chat

func (fr *FileRepo) Chat(ctx context.Context, chatID string) (*Chat, error)

GetChatAsync returns a chat from cache

func (*FileRepo) Close

func (fr *FileRepo) Close() error

Close shuts down the repository gracefully

func (*FileRepo) DeleteChat

func (fr *FileRepo) DeleteChat(ctx context.Context, chatID string) (bool, error)

DeleteChatAsync deletes a chat from cache

func (*FileRepo) DeleteChatAsync

func (fr *FileRepo) DeleteChatAsync(
	ctx context.Context,
	chatID string,
) <-chan OpResp

DeleteChatAsync deletes a chat from cache

func (*FileRepo) GetChatAsync

func (fr *FileRepo) GetChatAsync(ctx context.Context, chatID string) <-chan OpResp

GetChatAsync returns a chat from cache

func (*FileRepo) ListChats

func (fr *FileRepo) ListChats(
	ctx context.Context,
	keyword, model, provider *string,
	limit int,
) ([]*Chat, error)

ListChatsAsync lists chats from cache

func (*FileRepo) ListChatsAsync

func (fr *FileRepo) ListChatsAsync(
	ctx context.Context,
	keyword, model, provider *string,
	limit int,
) <-chan OpResp

ListChatsAsync lists all chats from cache

func (*FileRepo) UpdateChat

func (fr *FileRepo) UpdateChat(ctx context.Context, chat *Chat) (*Chat, error)

UpdateChatAsync updates a chat in cache

func (*FileRepo) UpdateChatAsync

func (fr *FileRepo) UpdateChatAsync(ctx context.Context, chat *Chat) <-chan OpResp

UpdateChatAsync updates a chat in cache

type LLMStreamRet

type LLMStreamRet struct {
	Err  error // First
	Done bool  // Second

	ID    string
	Model string

	Content  string
	StreamCh <-chan StreamChunk
}

type ListChatsOption

type ListChatsOption struct {
	// contains filtered or unexported fields
}

ListChatsOption holds parameters for list chats operation

type MCPConfigSvr

type MCPConfigSvr struct {
	log.Logger
	// contains filtered or unexported fields
}

func NewMCPSvr

func NewMCPSvr(repo MCPSvrConfigRepo, logger log.Logger) *MCPConfigSvr

func (*MCPConfigSvr) AllMCPServerConfig

func (svr *MCPConfigSvr) AllMCPServerConfig() []*MCPSvrItem

AllMCPServerConfig return the list of all MCP Server config

func (*MCPConfigSvr) CreateMCPServerConfig

func (svr *MCPConfigSvr) CreateMCPServerConfig(
	name, typ string,
	isActive bool,
	description, baseURL, command *string,
	args []string,
) error

func (*MCPConfigSvr) DefaultConfig

func (svr *MCPConfigSvr) DefaultConfig() *MCPSvrItem

DefaultConfig returns the default MCP server configuration

func (MCPConfigSvr) DeleteMCPServerConfigByName

func (svr MCPConfigSvr) DeleteMCPServerConfigByName(name string) error

DeleteMCPServerConfigByName deletes the mcp server config with the specified name

func (*MCPConfigSvr) MCPServerConfigByName

func (svr *MCPConfigSvr) MCPServerConfigByName(name string) *MCPSvrItem

MCPServerConfigByName returns the specified mcp server config with name, otherwise return nil

func (*MCPConfigSvr) UpdateMCPServerConfigByName

func (svr *MCPConfigSvr) UpdateMCPServerConfigByName(item *MCPSvrItem) error

UpdateMCPServerConfigByName adds or updates the mcp server config with the specified config

type MCPSvrConfigFileRepo

type MCPSvrConfigFileRepo struct {
	log.Logger
	// contains filtered or unexported fields
}

FileRepo implements ChatRepository using file storage with async operations

func NewMCPSvrConfigFileRepo

func NewMCPSvrConfigFileRepo(path string, logger log.Logger) (*MCPSvrConfigFileRepo, error)

func (*MCPSvrConfigFileRepo) AllMCPServerConfigs

func (r *MCPSvrConfigFileRepo) AllMCPServerConfigs() []*MCPSvrItem

func (*MCPSvrConfigFileRepo) DeleteMCPServerConfigByName

func (r *MCPSvrConfigFileRepo) DeleteMCPServerConfigByName(name string) error

func (*MCPSvrConfigFileRepo) MCPServerConfigByName

func (r *MCPSvrConfigFileRepo) MCPServerConfigByName(name string) (*MCPSvrItem, error)

GetMCPServerConfigByName returns the mcp server config by name, or error if not found

func (*MCPSvrConfigFileRepo) UpdateMCPServerConfigByName

func (r *MCPSvrConfigFileRepo) UpdateMCPServerConfigByName(item *MCPSvrItem) error

type MCPSvrConfigRepo

type MCPSvrConfigRepo interface {
	MCPServerConfigByName(name string) (*MCPSvrItem, error)
	AllMCPServerConfigs() []*MCPSvrItem
	UpdateMCPServerConfigByName(item *MCPSvrItem) error
	DeleteMCPServerConfigByName(name string) error
}

MCPServerConfigByName (MCP Server Config Repository) defines the interface for mcp server config repository operations

type MCPSvrItem

type MCPSvrItem struct {
	Name     string `json:"name"`
	Type     string `json:"type"` // "stdio", "sse", "streamableHttp"
	IsActive bool   `json:"isActive"`

	Description string `json:"description,omitempty"` // Description of the MCP Server

	BaseURL string `json:"baseUrl,omitempty"` // The URL endpoint for SSE / StreamableHttp server connection

	//nolint:lll
	Command string   `json:"command,omitempty"` // The command to execute the server (e.g., 'node', 'python') - used for stdio
	Args    []string `json:"args,omitempty"`    // Command line arguments for the server - used for stdio

	//nolint:lll
	AutoConfirm []string `json:"autoConfirm,omitempty"` // List of tool names that should be auto-confirmed without user prompt
}

MCPSvrItem 对应 mcpServers 对象中的每一个服务器配置

type MCPSvrManager

type MCPSvrManager struct {
	log.Logger
	// contains filtered or unexported fields
}

func NewMCPSvrManager

func NewMCPSvrManager(repo MCPSvrConfigRepo, logger log.Logger) *MCPSvrManager

NewMCPSvrManager returns a new instance of MCPSvrManager

func (*MCPSvrManager) CallTool

func (ss *MCPSvrManager) CallTool(
	ctx context.Context, toolName string, args map[string]any,
) (*mcp.CallToolResult, error)

CallTool calls a tool on a specific server according to its tool name

func (*MCPSvrManager) ClossAllSession

func (ss *MCPSvrManager) ClossAllSession()

ClossAllSession closes all sessions and clears the session

func (*MCPSvrManager) ExtractMCPToolUse

func (ss *MCPSvrManager) ExtractMCPToolUse(content string) *MCPToolUse

func (*MCPSvrManager) FormatResourceTemplatesSection

func (ss *MCPSvrManager) FormatResourceTemplatesSection(
	ctx context.Context,
	serverName string,
) string

FormatResourceTemplatesSection formats the resource templates section

func (*MCPSvrManager) FormatResourcesSection

func (ss *MCPSvrManager) FormatResourcesSection(ctx context.Context, serverName string) string

FormatResourcesSection formats the resources section

func (*MCPSvrManager) FormatServerInfo

func (ss *MCPSvrManager) FormatServerInfo(ctx context.Context) string

FormatServerInfo formats the server info

func (*MCPSvrManager) FormatToolsSection

func (ss *MCPSvrManager) FormatToolsSection(ctx context.Context, serverName string) string

FormatToolsSection formats the tools section

func (*MCPSvrManager) MCPServerList

func (ss *MCPSvrManager) MCPServerList() []string

MCPServerList returns the list of connected MCP servers

func (*MCPSvrManager) Prompt

func (ss *MCPSvrManager) Prompt(ctx context.Context, promptSvr *PromptSvr) string

Prompt generate the complete system prompt including MCP server information

func (*MCPSvrManager) ResourceTemplatesByServerName

func (ss *MCPSvrManager) ResourceTemplatesByServerName(
	ctx context.Context, serverName string,
) ([]*mcp.ResourceTemplate, error)

ResourceTemplatesByServerName returns the list of resource templates for a specific server

func (*MCPSvrManager) ResourcesByServerName

func (ss *MCPSvrManager) ResourcesByServerName(
	ctx context.Context, serverName string,
) ([]*mcp.Resource, error)

ResourcesByServerName returns the list of resources for a specific server

func (*MCPSvrManager) ToolsByServerName

func (ss *MCPSvrManager) ToolsByServerName(
	ctx context.Context,
	serverName string,
) ([]*mcp.Tool, error)

ToolsByServerName returns the list of tools for a specific server

type MCPToolUse

type MCPToolUse struct {
	ServerName string
	ToolsName  string
	Arguments  map[string]any
}

type Manager

type Manager struct {
	log.Logger

	MCPMgr *MCPSvrManager
	// contains filtered or unexported fields
}

func NewManager

func NewManager(
	logger log.Logger,
	chatReop ChatRepo,
	mcpReop MCPSvrConfigRepo,
	promptRepo PromptRepo,
	chatID *string,
	config *Config,
) *Manager

func (*Manager) HandleUserTextInput

func (mgr *Manager) HandleUserTextInput(userInput string) (*Message, error)

HandleUserTextInput handle user TEXT input without any link, image

type Message

type Message struct {
	Role    string `json:"role"`
	Content any    `json:"content"` // string or []ContentPart

	Timestamp     *time.Time `json:"timestamp,omitempty"`
	UnixTimestamp int64      `json:"unix_timestamp,omitempty"`

	ReasoningContent string         `json:"reasoning_content,omitempty"`
	ReasoningEffort  string         `json:"reasoning_effort,omitempty"`
	Links            []string       `json:"links,omitempty"`
	Images           []string       `json:"images,omitempty"`
	Model            string         `json:"model,omitempty"`
	Provider         string         `json:"provider,omitempty"`
	ID               string         `json:"id,omitempty"`
	ParentID         string         `json:"parent_id,omitempty"`
	Server           string         `json:"server,omitempty"`
	Tool             string         `json:"tool,omitempty"`
	Arguments        map[string]any `json:"arguments,omitempty"`
}

func LoadMessageFromJSON

func LoadMessageFromJSON(str string) (*Message, error)

LoadMessageFromString loads a message from a JSON string

func NewMessage

func NewMessage(role, content string, timestamp time.Time, unixTimestamp int64) *Message

func NewMessageWithOption

func NewMessageWithOption(role, content string, opt *MessageOption) *Message

CreateMessage creates a Message object with optional fields using MessageOption

type MessageOption

type MessageOption struct {
	ReasoningContent string
	ReasoningEffort  string

	Links  []string
	Images []string

	Provider  string
	Model     string
	ID        string
	ParentID  string
	Server    string
	Tool      string
	Arguments map[string]any
}

MessageOption contains optional fields for creating a message

type OllamaChatRequest

type OllamaChatRequest struct {
	Model    string           `json:"model"`
	Messages []map[string]any `json:"messages"`
	Stream   bool             `json:"stream"`
}

OllamaChatRequest 是 Ollama API 的请求结构体

type OllamaFormatProvider

type OllamaFormatProvider struct {
	BaseProvider
	// contains filtered or unexported fields
}

func NewOllmaFormatProvider

func NewOllmaFormatProvider(config *Config, logger log.Logger) *OllamaFormatProvider

func (*OllamaFormatProvider) BuildRequest

func (p *OllamaFormatProvider) BuildRequest(
	ctx context.Context,
	respChan chan StreamChunk,
	messages []*Message,
	systemPrompt *string,
) (*http.Request, error)

func (*OllamaFormatProvider) CallStreamableChatCompletions

func (p *OllamaFormatProvider) CallStreamableChatCompletions(
	messages []*Message,
	prompt *string,
) *Message

type OllamaStreamResponse

type OllamaStreamResponse struct {
	Model     string    `json:"model"`      // 本次请求所使用的模型
	CreatedAt time.Time `json:"created_at"` // 响应创建的 UTC 时间戳 2025-08-28T03:42:30.559748Z
	Message   Message   `json:"message"`    // 包含模型生成内容的对象
	Done      bool      `json:"done"`       // 用于指示生成过程是否已完成

	DoneReason         string `json:"done_reason"`          // 诊断字段描述了 Ollama 模型生成任务的最终状态: stop(完整回答) / length(回答被截断)
	TotalDuration      int64  `json:"total_duration"`       // 整个请求所花费的总时间(ns),从收到请求到生成完最后一个 token
	LoadDuration       int64  `json:"load_duration"`        // 加载模型到内存所花费的时间(ns)
	PromptEvalCount    int    `json:"prompt_eval_count"`    // 在处理用户输入(Prompt)时,模型评估(处理)的 token 数量
	PromptEvalDuration int64  `json:"prompt_eval_duration"` // 处理用户输入(Prompt)所花费的时间(ns), 可理解为 `模型理解问题` 花费的时间
	EvalCount          int    `json:"eval_count"`           // 模型生成回答时,总共评估(生成)的 token 数量 => 代表了模型输出的长度
	EvalDuration       int64  `json:"eval_duration"`        // 生成所有回答 token 所花费的总时间(ns), 模型“思考并写出答案”所用的时间
}

OllamaStreamResponse 是用于解码 Ollama /api/chat 流式响应中每一个 JSON 对象的结构体

type OpResp

type OpResp struct {
	Data  any
	Error error
}

OpResp (operation response) represents the result of an async operation

type OpenAIChatRequest

type OpenAIChatRequest struct {
	Model    string           `json:"model"`
	Messages []map[string]any `json:"messages"`
	Stream   bool             `json:"stream"`

	IncludeReasoning bool `json:"include_reasoning,omitempty"` // deepseek-r1
	Thinking         bool `json:"thinking,omitempty"`          // deepseekv3.1

	ReasoningEffort string `json:"reasoning_effort,omitempty"`
	MaxTokens       uint64 `json:"max_tokens,omitempty"`
}

OpenAIChatRequest 是用于发送 OpenAI /v1/chat/completions 请求的结构体

type OpenAIFormatProvider

type OpenAIFormatProvider struct {
	BaseProvider
	// contains filtered or unexported fields
}

func NewOpenAIFormatProvider

func NewOpenAIFormatProvider(config *Config, logger log.Logger) *OpenAIFormatProvider

func (*OpenAIFormatProvider) BuildRequest

func (p *OpenAIFormatProvider) BuildRequest(
	ctx context.Context,
	respChan chan StreamChunk,
	messages []*Message,
	systemPrompt *string,
) (*http.Request, error)

func (*OpenAIFormatProvider) CallStreamableChatCompletions

func (p *OpenAIFormatProvider) CallStreamableChatCompletions(
	messages []*Message,
	prompt *string,
) *Message

type OpenAIStreamChoice

type OpenAIStreamChoice struct {
	Index        int                      `json:"index"`
	Delta        *OpenAIStreamChoiceDelta `json:"delta"`
	FinishReason string                   `json:"finish_reason"` // 在最后一个数据块出现
}

OpenAIStreamChoice 代表 OpenAI 流中的一个选项

type OpenAIStreamChoiceDelta

type OpenAIStreamChoiceDelta struct {
	Content          string `json:"content"`
	ReasoningContent string `json:"reasoning_content"` // DeepSeek-R1 模型的推理内容
	Role             string `json:"role"`              // 通常只在第一个数据块出现
}

OpenAIStreamChoiceDelta 代表 OpenAI 流中的增量变化

type OpenAIStreamResponse

type OpenAIStreamResponse struct {
	ID                string                `json:"id"`
	Object            string                `json:"object"`
	Created           int64                 `json:"created"`
	Model             string                `json:"model"`
	SystemFingerprint string                `json:"system_fingerprint"`
	Choices           []*OpenAIStreamChoice `json:"choices"`
}

OpenAIStreamResponse 是用于解码 OpenAI /v1/chat/completions 流式响应中每一个 JSON 对象的结构体

type PromptFileRepo

type PromptFileRepo struct {
	log.Logger
	// contains filtered or unexported fields
}

func NewPromptFileRepo

func NewPromptFileRepo(jsonl string, logger log.Logger) (*PromptFileRepo, error)

func (*PromptFileRepo) AllPrompts

func (r *PromptFileRepo) AllPrompts() []*PromptItem

func (*PromptFileRepo) DeletePromptByName

func (r *PromptFileRepo) DeletePromptByName(name string) error

func (*PromptFileRepo) PromptByName

func (r *PromptFileRepo) PromptByName(name string) (*PromptItem, error)

func (*PromptFileRepo) UpdatePromptByName

func (r *PromptFileRepo) UpdatePromptByName(item *PromptItem) error

type PromptItem

type PromptItem struct {
	Name        string `mapstructure:"name"`                  // Unique identifier for the prompt
	Content     string `mapstructure:"content"`               // The content of the prompt
	Description string `mapstructure:"description,omitempty"` // Optional description of the prompt's purpose
}

type PromptRepo

type PromptRepo interface {
	PromptByName(name string) (*PromptItem, error)
	AllPrompts() []*PromptItem
	UpdatePromptByName(item *PromptItem) error
	DeletePromptByName(name string) error
}

PromptRepo (Prompt Repository) defines the interface for prompt repository operations

type PromptSvr

type PromptSvr struct {
	log.Logger
	// contains filtered or unexported fields
}

PromptSvr 对应整个 MCP PromptSvr 文件结构

func NewPromptSvr

func NewPromptSvr(repo PromptRepo, logger log.Logger) *PromptSvr

func (*PromptSvr) AddPrompt

func (svr *PromptSvr) AddPrompt(prompt *PromptItem) error

AddPrompt adds a new prompt configuration or update existing one.

func (*PromptSvr) AllPrompts

func (svr *PromptSvr) AllPrompts() []*PromptItem

ListPrompts returns all prompt configurations

func (*PromptSvr) DeletePrompt

func (svr *PromptSvr) DeletePrompt(name string) error

DeletePrompt deletes a prompt configuration by name.

func (*PromptSvr) PromptByName

func (svr *PromptSvr) PromptByName(name string) *PromptItem

GetPrompt returns the PromptItem by name

type Provider

type Provider interface {
	CallStreamableChatCompletions(
		messages []*Message,
		prompt *string,
	) *Message

	BuildRequest(
		_ context.Context,
		_ chan StreamChunk,
		_ []*Message,
		_ *string,
	) (*http.Request, error)
}

type StreamChunk

type StreamChunk struct {
	ID string // 每一条工具调用请求都有一个唯一的 ID => 在返回结果时,必须将这个 ID 附上,以便模型能够准确地将返回的结果与它当初的请求对应起来
	// Provider string
	Model string

	Content string // The content of the chunk
	Done    bool   // Whether the stream is done
	Error   error  // Any error that occurred
}

StreamChunk defines a chunk of a stream

type TaijiChatRequest

type TaijiChatRequest struct {
	QueryID     string     `json:"query_id"`
	Model       string     `json:"model"`
	Messages    []*Message `json:"messages"`
	Temperature string     `json:"temperature"` // 调节概率值,取值区间为 (0.0, 2.0],默认为 1.0
	TopP        float64    `json:"top_p"`       // 采样累积概率的阈值, 取值区间为 [0.0, 1.0],默认值 1.0
	MaxTokens   uint64     `json:"max_tokens"`
	Stream      bool       `json:"stream"`

	Thinking bool `json:"thinking,omitempty"` // DeepSeek-V3_1
}

TaijiChatRequest 是用于发送 OpenAI /v1/chat/completions 请求的结构体

type TaijiProvider

type TaijiProvider struct {
	BaseProvider
	// contains filtered or unexported fields
}

func NewTaijiProvider

func NewTaijiProvider(config *Config, logger log.Logger) *TaijiProvider

func (*TaijiProvider) BuildRequest

func (p *TaijiProvider) BuildRequest(
	ctx context.Context,
	respChan chan StreamChunk,
	messages []*Message,
	systemPrompt *string,
) (*http.Request, error)

func (*TaijiProvider) CallStreamableChatCompletions

func (p *TaijiProvider) CallStreamableChatCompletions(
	messages []*Message,
	prompt *string,
) *Message

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL