Documentation
¶
Index ¶
- Constants
- Variables
- func EnsureFileExistsSync(file string) error
- func ExpandUser(path string) (string, error)
- func GenerateChatID() string
- func GetISO8601Timestamp() time.Time
- func GetUnixTimestamp() int64
- func ToJSON(message *Message) (string, error)
- type BaseProvider
- func (p *BaseProvider) CallStreamableChatCompletions(provider string, reasoningEffort string, messages []*Message, prompt *string, ...) *Message
- func (p *BaseProvider) DoCallStreamableChatCompletions(messages []*Message, systemPrompt *string, ...) <-chan StreamChunk
- func (*BaseProvider) GenerateCurlCommand(req *http.Request, bodyBytes []byte) (string, error)
- func (p *BaseProvider) HandleStreamableChat(streamCh <-chan StreamChunk) LLMStreamRet
- func (p *BaseProvider) PrepareMessagesForCompletion(model string, messages []*Message, systemPrompt *string) []*Message
- func (p *BaseProvider) ProcessStreamableResponse(ctx context.Context, resp *http.Response, respChan chan StreamChunk)
- type Chat
- type ChatRepo
- type ChatSvr
- func (svr *ChatSvr) Chat(ctx context.Context, chatID string) (*Chat, error)
- func (svr *ChatSvr) CreateChat(ctx context.Context, messages []*Message, chatID string) (*Chat, error)
- func (svr *ChatSvr) DeleteChat(ctx context.Context, chatID string) (bool, error)
- func (svr *ChatSvr) GenerateShareHTML(ctx context.Context, chatID string) (string, error)
- func (svr *ChatSvr) ListChats(ctx context.Context, keyword, model, provider *string, limit int) ([]*Chat, error)
- func (svr *ChatSvr) UpdateChat(ctx context.Context, chatID string, messages []*Message) (*Chat, error)
- type Config
- type ContentPart
- type FileRepo
- func (fr *FileRepo) AddChat(ctx context.Context, chat *Chat) (*Chat, error)
- func (fr *FileRepo) AddChatAsync(ctx context.Context, chat *Chat) <-chan OpResp
- func (fr *FileRepo) Chat(ctx context.Context, chatID string) (*Chat, error)
- func (fr *FileRepo) Close() error
- func (fr *FileRepo) DeleteChat(ctx context.Context, chatID string) (bool, error)
- func (fr *FileRepo) DeleteChatAsync(ctx context.Context, chatID string) <-chan OpResp
- func (fr *FileRepo) GetChatAsync(ctx context.Context, chatID string) <-chan OpResp
- func (fr *FileRepo) ListChats(ctx context.Context, keyword, model, provider *string, limit int) ([]*Chat, error)
- func (fr *FileRepo) ListChatsAsync(ctx context.Context, keyword, model, provider *string, limit int) <-chan OpResp
- func (fr *FileRepo) UpdateChat(ctx context.Context, chat *Chat) (*Chat, error)
- func (fr *FileRepo) UpdateChatAsync(ctx context.Context, chat *Chat) <-chan OpResp
- type LLMStreamRet
- type ListChatsOption
- type MCPConfigSvr
- func (svr *MCPConfigSvr) AllMCPServerConfig() []*MCPSvrItem
- func (svr *MCPConfigSvr) CreateMCPServerConfig(name, typ string, isActive bool, description, baseURL, command *string, ...) error
- func (svr *MCPConfigSvr) DefaultConfig() *MCPSvrItem
- func (svr MCPConfigSvr) DeleteMCPServerConfigByName(name string) error
- func (svr *MCPConfigSvr) MCPServerConfigByName(name string) *MCPSvrItem
- func (svr *MCPConfigSvr) UpdateMCPServerConfigByName(item *MCPSvrItem) error
- type MCPSvrConfigFileRepo
- func (r *MCPSvrConfigFileRepo) AllMCPServerConfigs() []*MCPSvrItem
- func (r *MCPSvrConfigFileRepo) DeleteMCPServerConfigByName(name string) error
- func (r *MCPSvrConfigFileRepo) MCPServerConfigByName(name string) (*MCPSvrItem, error)
- func (r *MCPSvrConfigFileRepo) UpdateMCPServerConfigByName(item *MCPSvrItem) error
- type MCPSvrConfigRepo
- type MCPSvrItem
- type MCPSvrManager
- func (ss *MCPSvrManager) CallTool(ctx context.Context, toolName string, args map[string]any) (*mcp.CallToolResult, error)
- func (ss *MCPSvrManager) ClossAllSession()
- func (ss *MCPSvrManager) ExtractMCPToolUse(content string) *MCPToolUse
- func (ss *MCPSvrManager) FormatResourceTemplatesSection(ctx context.Context, serverName string) string
- func (ss *MCPSvrManager) FormatResourcesSection(ctx context.Context, serverName string) string
- func (ss *MCPSvrManager) FormatServerInfo(ctx context.Context) string
- func (ss *MCPSvrManager) FormatToolsSection(ctx context.Context, serverName string) string
- func (ss *MCPSvrManager) MCPServerList() []string
- func (ss *MCPSvrManager) Prompt(ctx context.Context, promptSvr *PromptSvr) string
- func (ss *MCPSvrManager) ResourceTemplatesByServerName(ctx context.Context, serverName string) ([]*mcp.ResourceTemplate, error)
- func (ss *MCPSvrManager) ResourcesByServerName(ctx context.Context, serverName string) ([]*mcp.Resource, error)
- func (ss *MCPSvrManager) ToolsByServerName(ctx context.Context, serverName string) ([]*mcp.Tool, error)
- type MCPToolUse
- type Manager
- type Message
- type MessageOption
- type OllamaChatRequest
- type OllamaFormatProvider
- type OllamaStreamResponse
- type OpResp
- type OpenAIChatRequest
- type OpenAIFormatProvider
- type OpenAIStreamChoice
- type OpenAIStreamChoiceDelta
- type OpenAIStreamResponse
- type PromptFileRepo
- type PromptItem
- type PromptRepo
- type PromptSvr
- type Provider
- type StreamChunk
- type TaijiChatRequest
- type TaijiProvider
Constants ¶
const ( DefaultOperationQueueSize = 100 // Default size of the operation queue DefaultWorkerCount = 5 // Default number of worker goroutines )
const ( YamlKeyBot = "K-CLI" DefaultProvider = "OpenAI" DefaultBaseURL = "https://openrouter.ai/api" DefaultCustomAPIPath = "/v1/chat/completions" DefaultAPIKey = "" DefaultModel = "deepseek/deepseek-chat-v3.1:free" DefaultStorageType = "file" DefaultMCPSvrPath = "~/.config/k-cli/mcp_servers.jsonl" DefaultPromptPath = "~/.config/k-cli/prompts.jsonl" DefaultMaxTurns = 10 DefaultMaxTokens = 32768 DefaultReasoningEffort = "medium" )
const ( RoleUser = "user" RoleAssistant = "assistant" RoleSystem = "system" RoleTool = "tool" ProviderOpenAI = "OpenAI" ProviderOllama = "Ollama" ProviderTaiji = "Taiji" DefaultChatMessageSize = DefaultMaxTurns )
const ( DefaultMCPServerConfigName = "todo" DefaultMCPServerConfigType = "stdio" DefaultMCPServerConfigCommand = "uvx" )
const ( MCPClientName = "kMCPClient" MCPClientVer = "v1.0.0" ServerTypeStdio = "stdio" ServerTypeSSE = "sse" ServerTypeStreamableHTTP = "streamableHttp" )
const ( DefaultPromptName = "default" DefaultMCPPromptName = "mcp" DefaultDeepResearchPromptName = "deep-research" )
const ( ModelClaude3 = "claude-3" ModelDeepSeekR1 = "deepseek-r1" ModelDeepSeekV31 = "DeepSeek-V3_1" DefaultTimeout = 60 * time.Second DefaultStreamChunkSize = 16 // default stream chunk size )
const DeepResearchPrompt = `` /* 1027-byte string literal not displayed */
const (
DefaultContentType = "text"
)
Variables ¶
var ( // DefaultCfgPath is the default configuration file path DefaultCfgPath = filepath.Join(".", "config", "client.yaml") // DefaultMCPServerConfig 默认的 MCP 服务器配置 DefaultMCPServerConfig = []string{"todo"} )
var DefaultMCPServerConfigArgs = []string{"mcp-todo"}
var MCP = `` /* 1050-byte string literal not displayed */
var MCPPrompt = `
====
TOOL USE
You have access to a set of tools that are executed upon the user's approval.
You can use one tool per message, and will receive the result of that tool use in the user's response.
You use tools step-by-step to accomplish a given task,
with each tool use informed by the result of the previous tool use.
# Tool Use Formatting
Tool use is formatted using XML-style tags.
The tool name is enclosed in opening and closing tags,
and each parameter is similarly enclosed within its own set of tags. Here's the structure:
<tool_name>
<parameter1_name>value1</parameter1_name>
<parameter2_name>value2</parameter2_name>
...
</tool_name>
For example:
<read_file>
<path>src/main.js</path>
</read_file>
Always adhere to this format for the tool use to ensure proper parsing and execution.
# Tools
## use_mcp_tool
Description: Request to use a tool provided by a connected MCP server.
Each MCP server can provide multiple tools with different capabilities.
Tools have defined input schemas that specify required and optional parameters.
Parameters:
- server_name: (required) The name of the MCP server providing the tool
- tool_name: (required) The name of the tool to execute
- arguments: (required) A JSON object containing the tool's input parameters, following the tool's input schema
Usage:
<use_mcp_tool>
<server_name>server name here</server_name>
<tool_name>tool name here</tool_name>
<arguments>
{
"param1": "value1",
"param2": "value2"
}
</arguments>
</use_mcp_tool>
## access_mcp_resource
Description: Request to access a resource provided by a connected MCP server.
Resources represent data sources that can be used as context, such as files, API responses, or system information.
Parameters:
- server_name: (required) The name of the MCP server providing the resource
- uri: (required) The URI identifying the specific resource to access
Usage:
<access_mcp_resource>
<server_name>server name here</server_name>
<uri>resource URI here</uri>
</access_mcp_resource>
# Tool Use Examples
## Example 1: Requesting to use an MCP tool
<use_mcp_tool>
<server_name>weather-server</server_name>
<tool_name>get_forecast</tool_name>
<arguments>
{
"city": "San Francisco",
"days": 5
}
</arguments>
</use_mcp_tool>
## Example 2: Requesting to access an MCP resource
<access_mcp_resource>
<server_name>weather-server</server_name>
<uri>weather://san-francisco/current</uri>
</access_mcp_resource>
# Tool Use Guidelines
1. In <thinking> tags, assess what information you already have and what information you need to proceed with the task.
2. Choose the most appropriate tool based on the task and the tool descriptions provided.
Assess if you need additional information to proceed, and which of the available tools would be most effective for
gathering this information. For example using the list_files tool is more effective than running a command like` +
" `ls` " + `in the terminal.
It's critical that you think about each available tool and use the one that best fits the current step in the task.
3. If multiple actions are needed, use one tool at a time per message to accomplish the task iteratively,
with each tool use being informed by the result of the previous tool use. Do not assume the outcome of any tool use.
Each step must be informed by the previous step's result.
4. Formulate your tool use using the XML format specified for each tool.
5. After each tool use, the user will respond with the result of that tool use.
This result will provide you with the necessary information to continue your task or make further decisions.
This response may include:
- Information about whether the tool succeeded or failed, along with any reasons for failure.
- Linter errors that may have arisen due to the changes you made, which you'll need to address.
- New terminal output in reaction to the changes, which you may need to consider or act upon.
- Any other relevant feedback or information related to the tool use.
6. ALWAYS wait for user confirmation after each tool use before proceeding.
Never assume the success of a tool use without explicit confirmation of the result from the user.
It is crucial to proceed step-by-step,
waiting for the user's message after each tool use before moving forward with the task. This approach allows you to:
1. Confirm the success of each step before proceeding.
2. Address any issues or errors that arise immediately.
3. Adapt your approach based on new information or unexpected results.
4. Ensure that each action builds correctly on the previous ones.
By waiting for and carefully considering the user's response after each tool use,
you can react accordingly and make informed decisions about how to proceed with the task.
This iterative process helps ensure the overall success and accuracy of your work.
# Tool use Are Not Always Necessary
While tools are a powerful way to interact with the system and perform tasks, they are not always required.
You can still perform a wide range of tasks without using tools.
However, when you need to access specific resources, perform complex operations, or interact with external systems,
tools provide a structured and efficient way to accomplish these tasks.
====
MCP SERVERS
The Model Context Protocol (MCP) enables communication between the system and locally running MCP servers
that provide additional tools and resources to extend your capabilities.
# Connected MCP Servers
When a server is connected, you can use the server's tools via the` +
" `use_mcp_tool` " + `tool, and access the server's resources via the ` + " `access_mcp_resource` " + ` tool.
# MCP Servers Are Not Always Necessary
While MCP servers can provide additional tools and resources, they are not always required.
You can still perform a wide range of tasks without connecting to an MCP server.
However, when you need additional capabilities or access to specific resources,
connecting to an MCP server can greatly enhance your functionality.
`
var TimePrompt = func() string { return "Current Time: " + time.Now().Format("2006-01-02 15:04:05") }()
var (
ToolTags = []string{"use_mcp_tool", "access_mcp_resource"}
)
Functions ¶
func EnsureFileExistsSync ¶
ensureFileExists ensures that the data file exists, creating it if necessary
func ExpandUser ¶
ExpandUser expands the ~ in the beginning of a file path to the user's home directory
func GenerateChatID ¶
func GenerateChatID() string
GenerateChatID generates a unique ID (6 characters) Generate UUID and take first 6 characters of hex representation
func GetISO8601Timestamp ¶
GetISO8601Timestamp returns current timestamp in ISO8601 format with timezone offset
func GetUnixTimestamp ¶
func GetUnixTimestamp() int64
GetUnixTimestamp returns current time as 13-digit unix timestamp (milliseconds)
Types ¶
type BaseProvider ¶
func (*BaseProvider) CallStreamableChatCompletions ¶
func (*BaseProvider) DoCallStreamableChatCompletions ¶
func (p *BaseProvider) DoCallStreamableChatCompletions( messages []*Message, systemPrompt *string, BuildRequest func( context.Context, chan StreamChunk, []*Message, *string, ) (*http.Request, error), ) <-chan StreamChunk
func (*BaseProvider) GenerateCurlCommand ¶
GenerateCurlCommand returns a string that can be executed to make the request ⚠️ 注意:因为该函数没有读取 req.Body => 请求体仍然可以被 client.Do 正常读取。
func (*BaseProvider) HandleStreamableChat ¶
func (p *BaseProvider) HandleStreamableChat(streamCh <-chan StreamChunk) LLMStreamRet
func (*BaseProvider) PrepareMessagesForCompletion ¶
func (p *BaseProvider) PrepareMessagesForCompletion( model string, messages []*Message, systemPrompt *string, ) []*Message
func (*BaseProvider) ProcessStreamableResponse ¶
func (p *BaseProvider) ProcessStreamableResponse( ctx context.Context, resp *http.Response, respChan chan StreamChunk, )
type Chat ¶
type Chat struct {
ID string `json:"id"`
CreateTime time.Time `json:"create_time"`
UpdateTime time.Time `json:"update_time"`
Messages []*Message
}
func (*Chat) UpdateMessages ¶
UpdateMessages filters out system messages and sorts the remaining ones by timestamp
type ChatRepo ¶
type ChatRepo interface {
ListChatsAsync(
ctx context.Context,
keyword, model, provider *string,
limit int,
) <-chan OpResp
GetChatAsync(ctx context.Context, chatID string) <-chan OpResp
AddChatAsync(ctx context.Context, chat *Chat) <-chan OpResp
UpdateChatAsync(ctx context.Context, chat *Chat) <-chan OpResp
DeleteChatAsync(ctx context.Context, chatID string) <-chan OpResp
// Sync versions for convenience
ListChats(ctx context.Context, keyword, model, provider *string, limit int) ([]*Chat, error)
Chat(ctx context.Context, chatID string) (*Chat, error)
AddChat(ctx context.Context, chat *Chat) (*Chat, error)
UpdateChat(ctx context.Context, chat *Chat) (*Chat, error)
DeleteChat(ctx context.Context, chatID string) (bool, error)
Close() error
}
ChatRepo (Chat Repository) defines the interface for chat repository operations
type ChatSvr ¶
func (*ChatSvr) CreateChat ¶
func (svr *ChatSvr) CreateChat( ctx context.Context, messages []*Message, chatID string, ) (*Chat, error)
CreateChat creates a new chat with messages and optional external ID
func (*ChatSvr) DeleteChat ¶
DeleteChat deletes a chat by ID
func (*ChatSvr) GenerateShareHTML ¶
TODO: Implement
type Config ¶
type Config struct {
// Model Provider
Provider string `mapstructure:"provider"`
BaseURL string `mapstructure:"base_url"`
CustomAPIPath string `mapstructure:"custom_api_path"`
Model string `mapstructure:"model"`
APIKey string `mapstructure:"api_key"`
StorageType string `mapstructure:"storage_type,omitempty"`
// MCP
MCPSvrPath string `mapstructure:"mcp_server_path"` // MCP Server 配置文件路径
// Prompt
PromptPath string `mapstructure:"prompt_path"` // Prompt 配置文件路径
MaxTurns uint `mapstructure:"max_turns"` // 最多调用 MCP Server 的次数
MaxTokens uint64 `mapstructure:"max_tokens"` // 最大 token 数
ReasoningEffort string `mapstructure:"reasoning_effort"` // 推理努力度 => high | medium | low | minimal
Stream bool `mapstructure:"stream"` // 是否使用流式输出
// contains filtered or unexported fields
}
func NewConfigFromFile ¶
func NewDefaultConfig ¶
NewDefaultConfig returns a new Config with default values
type ContentPart ¶
type FileRepo ¶
type FileRepo struct {
// contains filtered or unexported fields
}
FileRepo implements ChatRepository using file storage with async operations
func NewChatFileRepository ¶
func NewChatFileRepository( dataFile string, workerCount int, logger log.Logger, ) (*FileRepo, error)
NewChatFileRepository creates a new FileRepository instance with async capabilities
func (*FileRepo) AddChatAsync ¶
AddChatAsync adds a chat to cache
func (*FileRepo) DeleteChat ¶
DeleteChatAsync deletes a chat from cache
func (*FileRepo) DeleteChatAsync ¶
DeleteChatAsync deletes a chat from cache
func (*FileRepo) GetChatAsync ¶
GetChatAsync returns a chat from cache
func (*FileRepo) ListChats ¶
func (fr *FileRepo) ListChats( ctx context.Context, keyword, model, provider *string, limit int, ) ([]*Chat, error)
ListChatsAsync lists chats from cache
func (*FileRepo) ListChatsAsync ¶
func (fr *FileRepo) ListChatsAsync( ctx context.Context, keyword, model, provider *string, limit int, ) <-chan OpResp
ListChatsAsync lists all chats from cache
func (*FileRepo) UpdateChat ¶
UpdateChatAsync updates a chat in cache
type LLMStreamRet ¶
type ListChatsOption ¶
type ListChatsOption struct {
// contains filtered or unexported fields
}
ListChatsOption holds parameters for list chats operation
type MCPConfigSvr ¶
func NewMCPSvr ¶
func NewMCPSvr(repo MCPSvrConfigRepo, logger log.Logger) *MCPConfigSvr
func (*MCPConfigSvr) AllMCPServerConfig ¶
func (svr *MCPConfigSvr) AllMCPServerConfig() []*MCPSvrItem
AllMCPServerConfig return the list of all MCP Server config
func (*MCPConfigSvr) CreateMCPServerConfig ¶
func (*MCPConfigSvr) DefaultConfig ¶
func (svr *MCPConfigSvr) DefaultConfig() *MCPSvrItem
DefaultConfig returns the default MCP server configuration
func (MCPConfigSvr) DeleteMCPServerConfigByName ¶
func (svr MCPConfigSvr) DeleteMCPServerConfigByName(name string) error
DeleteMCPServerConfigByName deletes the mcp server config with the specified name
func (*MCPConfigSvr) MCPServerConfigByName ¶
func (svr *MCPConfigSvr) MCPServerConfigByName(name string) *MCPSvrItem
MCPServerConfigByName returns the specified mcp server config with name, otherwise return nil
func (*MCPConfigSvr) UpdateMCPServerConfigByName ¶
func (svr *MCPConfigSvr) UpdateMCPServerConfigByName(item *MCPSvrItem) error
UpdateMCPServerConfigByName adds or updates the mcp server config with the specified config
type MCPSvrConfigFileRepo ¶
FileRepo implements ChatRepository using file storage with async operations
func NewMCPSvrConfigFileRepo ¶
func NewMCPSvrConfigFileRepo(path string, logger log.Logger) (*MCPSvrConfigFileRepo, error)
func (*MCPSvrConfigFileRepo) AllMCPServerConfigs ¶
func (r *MCPSvrConfigFileRepo) AllMCPServerConfigs() []*MCPSvrItem
func (*MCPSvrConfigFileRepo) DeleteMCPServerConfigByName ¶
func (r *MCPSvrConfigFileRepo) DeleteMCPServerConfigByName(name string) error
func (*MCPSvrConfigFileRepo) MCPServerConfigByName ¶
func (r *MCPSvrConfigFileRepo) MCPServerConfigByName(name string) (*MCPSvrItem, error)
GetMCPServerConfigByName returns the mcp server config by name, or error if not found
func (*MCPSvrConfigFileRepo) UpdateMCPServerConfigByName ¶
func (r *MCPSvrConfigFileRepo) UpdateMCPServerConfigByName(item *MCPSvrItem) error
type MCPSvrConfigRepo ¶
type MCPSvrConfigRepo interface {
MCPServerConfigByName(name string) (*MCPSvrItem, error)
AllMCPServerConfigs() []*MCPSvrItem
UpdateMCPServerConfigByName(item *MCPSvrItem) error
DeleteMCPServerConfigByName(name string) error
}
MCPServerConfigByName (MCP Server Config Repository) defines the interface for mcp server config repository operations
type MCPSvrItem ¶
type MCPSvrItem struct {
Name string `json:"name"`
Type string `json:"type"` // "stdio", "sse", "streamableHttp"
IsActive bool `json:"isActive"`
Description string `json:"description,omitempty"` // Description of the MCP Server
BaseURL string `json:"baseUrl,omitempty"` // The URL endpoint for SSE / StreamableHttp server connection
//nolint:lll
Command string `json:"command,omitempty"` // The command to execute the server (e.g., 'node', 'python') - used for stdio
Args []string `json:"args,omitempty"` // Command line arguments for the server - used for stdio
//nolint:lll
AutoConfirm []string `json:"autoConfirm,omitempty"` // List of tool names that should be auto-confirmed without user prompt
}
MCPSvrItem 对应 mcpServers 对象中的每一个服务器配置
type MCPSvrManager ¶
func NewMCPSvrManager ¶
func NewMCPSvrManager(repo MCPSvrConfigRepo, logger log.Logger) *MCPSvrManager
NewMCPSvrManager returns a new instance of MCPSvrManager
func (*MCPSvrManager) CallTool ¶
func (ss *MCPSvrManager) CallTool( ctx context.Context, toolName string, args map[string]any, ) (*mcp.CallToolResult, error)
CallTool calls a tool on a specific server according to its tool name
func (*MCPSvrManager) ClossAllSession ¶
func (ss *MCPSvrManager) ClossAllSession()
ClossAllSession closes all sessions and clears the session
func (*MCPSvrManager) ExtractMCPToolUse ¶
func (ss *MCPSvrManager) ExtractMCPToolUse(content string) *MCPToolUse
func (*MCPSvrManager) FormatResourceTemplatesSection ¶
func (ss *MCPSvrManager) FormatResourceTemplatesSection( ctx context.Context, serverName string, ) string
FormatResourceTemplatesSection formats the resource templates section
func (*MCPSvrManager) FormatResourcesSection ¶
func (ss *MCPSvrManager) FormatResourcesSection(ctx context.Context, serverName string) string
FormatResourcesSection formats the resources section
func (*MCPSvrManager) FormatServerInfo ¶
func (ss *MCPSvrManager) FormatServerInfo(ctx context.Context) string
FormatServerInfo formats the server info
func (*MCPSvrManager) FormatToolsSection ¶
func (ss *MCPSvrManager) FormatToolsSection(ctx context.Context, serverName string) string
FormatToolsSection formats the tools section
func (*MCPSvrManager) MCPServerList ¶
func (ss *MCPSvrManager) MCPServerList() []string
MCPServerList returns the list of connected MCP servers
func (*MCPSvrManager) Prompt ¶
func (ss *MCPSvrManager) Prompt(ctx context.Context, promptSvr *PromptSvr) string
Prompt generate the complete system prompt including MCP server information
func (*MCPSvrManager) ResourceTemplatesByServerName ¶
func (ss *MCPSvrManager) ResourceTemplatesByServerName( ctx context.Context, serverName string, ) ([]*mcp.ResourceTemplate, error)
ResourceTemplatesByServerName returns the list of resource templates for a specific server
func (*MCPSvrManager) ResourcesByServerName ¶
func (ss *MCPSvrManager) ResourcesByServerName( ctx context.Context, serverName string, ) ([]*mcp.Resource, error)
ResourcesByServerName returns the list of resources for a specific server
func (*MCPSvrManager) ToolsByServerName ¶
func (ss *MCPSvrManager) ToolsByServerName( ctx context.Context, serverName string, ) ([]*mcp.Tool, error)
ToolsByServerName returns the list of tools for a specific server
type MCPToolUse ¶
type Manager ¶
type Manager struct {
log.Logger
MCPMgr *MCPSvrManager
// contains filtered or unexported fields
}
func NewManager ¶
func NewManager( logger log.Logger, chatReop ChatRepo, mcpReop MCPSvrConfigRepo, promptRepo PromptRepo, chatID *string, config *Config, ) *Manager
type Message ¶
type Message struct {
Role string `json:"role"`
Content any `json:"content"` // string or []ContentPart
Timestamp *time.Time `json:"timestamp,omitempty"`
UnixTimestamp int64 `json:"unix_timestamp,omitempty"`
ReasoningContent string `json:"reasoning_content,omitempty"`
ReasoningEffort string `json:"reasoning_effort,omitempty"`
Links []string `json:"links,omitempty"`
Images []string `json:"images,omitempty"`
Model string `json:"model,omitempty"`
Provider string `json:"provider,omitempty"`
ID string `json:"id,omitempty"`
ParentID string `json:"parent_id,omitempty"`
Server string `json:"server,omitempty"`
Tool string `json:"tool,omitempty"`
Arguments map[string]any `json:"arguments,omitempty"`
}
func LoadMessageFromJSON ¶
LoadMessageFromString loads a message from a JSON string
func NewMessage ¶
func NewMessageWithOption ¶
func NewMessageWithOption(role, content string, opt *MessageOption) *Message
CreateMessage creates a Message object with optional fields using MessageOption
type MessageOption ¶
type MessageOption struct {
ReasoningContent string
ReasoningEffort string
Links []string
Images []string
Provider string
Model string
ID string
ParentID string
Server string
Tool string
Arguments map[string]any
}
MessageOption contains optional fields for creating a message
type OllamaChatRequest ¶
type OllamaChatRequest struct {
Model string `json:"model"`
Messages []map[string]any `json:"messages"`
Stream bool `json:"stream"`
}
OllamaChatRequest 是 Ollama API 的请求结构体
type OllamaFormatProvider ¶
type OllamaFormatProvider struct {
BaseProvider
// contains filtered or unexported fields
}
func NewOllmaFormatProvider ¶
func NewOllmaFormatProvider(config *Config, logger log.Logger) *OllamaFormatProvider
func (*OllamaFormatProvider) BuildRequest ¶
func (p *OllamaFormatProvider) BuildRequest( ctx context.Context, respChan chan StreamChunk, messages []*Message, systemPrompt *string, ) (*http.Request, error)
func (*OllamaFormatProvider) CallStreamableChatCompletions ¶
func (p *OllamaFormatProvider) CallStreamableChatCompletions( messages []*Message, prompt *string, ) *Message
type OllamaStreamResponse ¶
type OllamaStreamResponse struct {
Model string `json:"model"` // 本次请求所使用的模型
CreatedAt time.Time `json:"created_at"` // 响应创建的 UTC 时间戳 2025-08-28T03:42:30.559748Z
Message Message `json:"message"` // 包含模型生成内容的对象
Done bool `json:"done"` // 用于指示生成过程是否已完成
DoneReason string `json:"done_reason"` // 诊断字段描述了 Ollama 模型生成任务的最终状态: stop(完整回答) / length(回答被截断)
TotalDuration int64 `json:"total_duration"` // 整个请求所花费的总时间(ns),从收到请求到生成完最后一个 token
LoadDuration int64 `json:"load_duration"` // 加载模型到内存所花费的时间(ns)
PromptEvalCount int `json:"prompt_eval_count"` // 在处理用户输入(Prompt)时,模型评估(处理)的 token 数量
PromptEvalDuration int64 `json:"prompt_eval_duration"` // 处理用户输入(Prompt)所花费的时间(ns), 可理解为 `模型理解问题` 花费的时间
EvalCount int `json:"eval_count"` // 模型生成回答时,总共评估(生成)的 token 数量 => 代表了模型输出的长度
EvalDuration int64 `json:"eval_duration"` // 生成所有回答 token 所花费的总时间(ns), 模型“思考并写出答案”所用的时间
}
OllamaStreamResponse 是用于解码 Ollama /api/chat 流式响应中每一个 JSON 对象的结构体
type OpenAIChatRequest ¶
type OpenAIChatRequest struct {
Model string `json:"model"`
Messages []map[string]any `json:"messages"`
Stream bool `json:"stream"`
IncludeReasoning bool `json:"include_reasoning,omitempty"` // deepseek-r1
Thinking bool `json:"thinking,omitempty"` // deepseekv3.1
ReasoningEffort string `json:"reasoning_effort,omitempty"`
MaxTokens uint64 `json:"max_tokens,omitempty"`
}
OpenAIChatRequest 是用于发送 OpenAI /v1/chat/completions 请求的结构体
type OpenAIFormatProvider ¶
type OpenAIFormatProvider struct {
BaseProvider
// contains filtered or unexported fields
}
func NewOpenAIFormatProvider ¶
func NewOpenAIFormatProvider(config *Config, logger log.Logger) *OpenAIFormatProvider
func (*OpenAIFormatProvider) BuildRequest ¶
func (p *OpenAIFormatProvider) BuildRequest( ctx context.Context, respChan chan StreamChunk, messages []*Message, systemPrompt *string, ) (*http.Request, error)
func (*OpenAIFormatProvider) CallStreamableChatCompletions ¶
func (p *OpenAIFormatProvider) CallStreamableChatCompletions( messages []*Message, prompt *string, ) *Message
type OpenAIStreamChoice ¶
type OpenAIStreamChoice struct {
Index int `json:"index"`
Delta *OpenAIStreamChoiceDelta `json:"delta"`
FinishReason string `json:"finish_reason"` // 在最后一个数据块出现
}
OpenAIStreamChoice 代表 OpenAI 流中的一个选项
type OpenAIStreamChoiceDelta ¶
type OpenAIStreamChoiceDelta struct {
Content string `json:"content"`
ReasoningContent string `json:"reasoning_content"` // DeepSeek-R1 模型的推理内容
Role string `json:"role"` // 通常只在第一个数据块出现
}
OpenAIStreamChoiceDelta 代表 OpenAI 流中的增量变化
type OpenAIStreamResponse ¶
type OpenAIStreamResponse struct {
ID string `json:"id"`
Object string `json:"object"`
Created int64 `json:"created"`
Model string `json:"model"`
SystemFingerprint string `json:"system_fingerprint"`
Choices []*OpenAIStreamChoice `json:"choices"`
}
OpenAIStreamResponse 是用于解码 OpenAI /v1/chat/completions 流式响应中每一个 JSON 对象的结构体
type PromptFileRepo ¶
func NewPromptFileRepo ¶
func NewPromptFileRepo(jsonl string, logger log.Logger) (*PromptFileRepo, error)
func (*PromptFileRepo) AllPrompts ¶
func (r *PromptFileRepo) AllPrompts() []*PromptItem
func (*PromptFileRepo) DeletePromptByName ¶
func (r *PromptFileRepo) DeletePromptByName(name string) error
func (*PromptFileRepo) PromptByName ¶
func (r *PromptFileRepo) PromptByName(name string) (*PromptItem, error)
func (*PromptFileRepo) UpdatePromptByName ¶
func (r *PromptFileRepo) UpdatePromptByName(item *PromptItem) error
type PromptItem ¶
type PromptRepo ¶
type PromptRepo interface {
PromptByName(name string) (*PromptItem, error)
AllPrompts() []*PromptItem
UpdatePromptByName(item *PromptItem) error
DeletePromptByName(name string) error
}
PromptRepo (Prompt Repository) defines the interface for prompt repository operations
type PromptSvr ¶
PromptSvr 对应整个 MCP PromptSvr 文件结构
func NewPromptSvr ¶
func NewPromptSvr(repo PromptRepo, logger log.Logger) *PromptSvr
func (*PromptSvr) AddPrompt ¶
func (svr *PromptSvr) AddPrompt(prompt *PromptItem) error
AddPrompt adds a new prompt configuration or update existing one.
func (*PromptSvr) AllPrompts ¶
func (svr *PromptSvr) AllPrompts() []*PromptItem
ListPrompts returns all prompt configurations
func (*PromptSvr) DeletePrompt ¶
DeletePrompt deletes a prompt configuration by name.
func (*PromptSvr) PromptByName ¶
func (svr *PromptSvr) PromptByName(name string) *PromptItem
GetPrompt returns the PromptItem by name
type StreamChunk ¶
type StreamChunk struct {
ID string // 每一条工具调用请求都有一个唯一的 ID => 在返回结果时,必须将这个 ID 附上,以便模型能够准确地将返回的结果与它当初的请求对应起来
// Provider string
Model string
Content string // The content of the chunk
Done bool // Whether the stream is done
Error error // Any error that occurred
}
StreamChunk defines a chunk of a stream
type TaijiChatRequest ¶
type TaijiChatRequest struct {
QueryID string `json:"query_id"`
Model string `json:"model"`
Messages []*Message `json:"messages"`
Temperature string `json:"temperature"` // 调节概率值,取值区间为 (0.0, 2.0],默认为 1.0
TopP float64 `json:"top_p"` // 采样累积概率的阈值, 取值区间为 [0.0, 1.0],默认值 1.0
MaxTokens uint64 `json:"max_tokens"`
Stream bool `json:"stream"`
Thinking bool `json:"thinking,omitempty"` // DeepSeek-V3_1
}
TaijiChatRequest 是用于发送 OpenAI /v1/chat/completions 请求的结构体
type TaijiProvider ¶
type TaijiProvider struct {
BaseProvider
// contains filtered or unexported fields
}
func NewTaijiProvider ¶
func NewTaijiProvider(config *Config, logger log.Logger) *TaijiProvider
func (*TaijiProvider) BuildRequest ¶
func (p *TaijiProvider) BuildRequest( ctx context.Context, respChan chan StreamChunk, messages []*Message, systemPrompt *string, ) (*http.Request, error)
func (*TaijiProvider) CallStreamableChatCompletions ¶
func (p *TaijiProvider) CallStreamableChatCompletions( messages []*Message, prompt *string, ) *Message