Documentation
¶
Index ¶
Constants ¶
const MaiVersion = "1.0.0-default" // Example value
MaiVersion is the hardcoded version string, equivalent to Python's mai_version. You should define this constant appropriately.
Variables ¶
This section is empty.
Functions ¶
func GetConfigDir ¶
GetConfigDir retrieves the configuration directory. Equivalent to the Python static method get_config_dir.
Types ¶
type BotConfig ¶
type BotConfig struct {
// Versioning
InnerVersion *Version `toml:"-"` // Parsed from the "inner.version" field in TOML
MaiVersion string `toml:"-"` // Hardcoded version
// Bot settings
BotQQ string `toml:"qq"` // Mapped from bot_config.get("qq")
BotNickname *string `toml:"nickname,omitempty"`
BotAliasNames []string `toml:"alias_names,omitempty"`
// Group settings (from TOML lists, can be converted to maps for quick lookup if needed)
TalkAllowedGroups []string `toml:"talk_allowed,omitempty"`
TalkFrequencyDownGroups []string `toml:"talk_frequency_down,omitempty"`
BanUserID []string `toml:"ban_user_id,omitempty"`
TalkAllowedPrivate []string `toml:"talk_allowed_private,omitempty"` // From experimental
// Personality
PersonalityCore string `toml:"personality_core"`
PersonalitySides []string `toml:"personality_sides,omitempty"`
// Identity
IdentityDetail []string `toml:"identity_detail,omitempty"`
Height int `toml:"height"` // cm
Weight int `toml:"weight"` // kg
Age int `toml:"age"` // years
Gender string `toml:"gender"`
Appearance string `toml:"appearance"`
// Schedule
EnableScheduleGen bool `toml:"enable_schedule_gen"`
PromptScheduleGen string `toml:"prompt_schedule_gen"`
ScheduleDoingUpdateInterval int `toml:"schedule_doing_update_interval"` // seconds
ScheduleTemperature float64 `toml:"schedule_temperature"`
TimeZone string `toml:"time_zone"`
// Chat
AllowFocusMode bool `toml:"allow_focus_mode"`
BaseNormalChatNum int `toml:"base_normal_chat_num"`
BaseFocusedChatNum int `toml:"base_focused_chat_num"`
ObservationContextSize int `toml:"observation_context_size"`
MessageBuffer bool `toml:"message_buffer"`
BanWords []string `toml:"ban_words,omitempty"`
BanMsgsRegexStrings []string `toml:"ban_msgs_regex,omitempty"` // Raw regex strings
CompiledBanMsgsRegex []*regexp.Regexp `toml:"-"` // Compiled regexes
// Focus Chat
ReplyTriggerThreshold float64 `toml:"reply_trigger_threshold"`
DefaultDecayRatePerSecond float64 `toml:"default_decay_rate_per_second"`
ConsecutiveNoReplyThreshold int `toml:"consecutive_no_reply_threshold"`
CompressedLength int `toml:"compressed_length"`
CompressLengthLimit int `toml:"compress_length_limit"`
// Normal Chat
ModelReasoningProbability float64 `toml:"model_reasoning_probability"`
ModelNormalProbability float64 `toml:"model_normal_probability"`
EmojiChance float64 `toml:"emoji_chance"`
ThinkingTimeout int `toml:"thinking_timeout"` // seconds
WillingMode string `toml:"willing_mode"`
ResponseWillingAmplifier float64 `toml:"response_willing_amplifier"`
ResponseInterestedRateAmplifier float64 `toml:"response_interested_rate_amplifier"`
DownFrequencyRate float64 `toml:"down_frequency_rate"`
EmojiResponsePenalty float64 `toml:"emoji_response_penalty"`
MentionedBotInevitableReply bool `toml:"mentioned_bot_inevitable_reply"`
AtBotInevitableReply bool `toml:"at_bot_inevitable_reply"`
// Emoji
MaxEmojiNum int `toml:"max_emoji_num"`
MaxReachDeletion bool `toml:"max_reach_deletion"`
EmojiCheckInterval int `toml:"check_interval"` // minutes
SavePic bool `toml:"save_pic"`
SaveEmoji bool `toml:"save_emoji"`
StealEmoji bool `toml:"steal_emoji"`
EmojiCheck bool `toml:"enable_check"`
EmojiCheckPrompt string `toml:"check_prompt"`
// Memory
BuildMemoryInterval int `toml:"build_memory_interval"` // seconds
MemoryBuildDistribution []float64 `toml:"memory_build_distribution,omitempty"`
BuildMemorySampleNum int `toml:"build_memory_sample_num"`
BuildMemorySampleLength int `toml:"build_memory_sample_length"`
MemoryCompressRate float64 `toml:"memory_compress_rate"`
ForgetMemoryInterval int `toml:"forget_memory_interval"` // seconds
MemoryForgetTime int `toml:"memory_forget_time"` // hours
MemoryForgetPercentage float64 `toml:"memory_forget_percentage"`
ConsolidateMemoryInterval int `toml:"consolidate_memory_interval"` // seconds
ConsolidationSimilarityThreshold float64 `toml:"consolidation_similarity_threshold"`
ConsolidateMemoryPercentage float64 `toml:"consolidate_memory_percentage"`
MemoryBanWords []string `toml:"memory_ban_words,omitempty"`
// Mood
MoodUpdateInterval float64 `toml:"mood_update_interval"` // seconds
MoodDecayRate float64 `toml:"mood_decay_rate"`
MoodIntensityFactor float64 `toml:"mood_intensity_factor"`
// Keywords Reaction
KeywordsReactionConfig struct {
Enable bool `toml:"enable"`
Rules []KeywordReactionRule `toml:"rules,omitempty"`
} `toml:"keywords_reaction"`
// Chinese Typo
ChineseTypoEnable bool `toml:"enable"`
ChineseTypoErrorRate float64 `toml:"error_rate"`
ChineseTypoMinFreq int `toml:"min_freq"`
ChineseTypoToneErrorRate float64 `toml:"tone_error_rate"`
ChineseTypoWordReplaceRate float64 `toml:"word_replace_rate"`
// Response Splitter
EnableKaomojiProtection bool `toml:"enable_kaomoji_protection"`
EnableResponseSplitter bool `toml:"enable_response_splitter"`
ResponseMaxLength int `toml:"response_max_length"`
ResponseMaxSentenceNum int `toml:"response_max_sentence_num"`
ModelMaxOutputLength int `toml:"model_max_output_length"`
// Remote
RemoteEnable bool `toml:"enable"` // From remote.enable
// Experimental
EnableFriendChat bool `toml:"enable_friend_chat"`
EnablePFCChatting bool `toml:"pfc_chatting"` // Renamed from enable_pfc_chatting for TOML key
// Model Config (LLM, VLM, Embedding, Moderation)
// These will be pointers to allow them to be nil if not present in config
LLMReasoning *LLMConfigEntry `toml:"llm_reasoning,omitempty"`
LLMNormal *LLMConfigEntry `toml:"llm_normal,omitempty"`
LLMTopicJudge *LLMConfigEntry `toml:"llm_topic_judge,omitempty"`
LLMSummary *LLMConfigEntry `toml:"llm_summary,omitempty"`
Embedding *LLMConfigEntry `toml:"embedding,omitempty"`
VLM *LLMConfigEntry `toml:"vlm,omitempty"`
Moderation *LLMConfigEntry `toml:"moderation,omitempty"`
LLMObservation *LLMConfigEntry `toml:"llm_observation,omitempty"`
LLMSubHeartflow *LLMConfigEntry `toml:"llm_sub_heartflow,omitempty"`
LLMHeartflow *LLMConfigEntry `toml:"llm_heartflow,omitempty"`
LLMToolUse *LLMConfigEntry `toml:"llm_tool_use,omitempty"`
LLMPlan *LLMConfigEntry `toml:"llm_plan,omitempty"`
LLMPFCActionPlanner *LLMConfigEntry `toml:"llm_PFC_action_planner,omitempty"` // Added based on Python list
LLMPFCChat *LLMConfigEntry `toml:"llm_PFC_chat,omitempty"` // Added based on Python list
LLMPFCReplyChecker *LLMConfigEntry `toml:"llm_PFC_reply_checker,omitempty"` // Added based on Python list
// API URLs for platforms
APIURLs map[string]string `toml:"platforms,omitempty"` // Stores platform URLs, key is platform name
// contains filtered or unexported fields
}
BotConfig is the Go equivalent of the Python BotConfig dataclass.
func LoadConfig ¶
LoadConfig loads the BotConfig from a TOML file. This is a complex function. The implementation below is a skeleton and would need to be fleshed out considerably to match all the Python logic, especially the version-based conditional loading and error handling.
func NewBotConfig ¶
func NewBotConfig() *BotConfig
NewBotConfig creates a new BotConfig with default values.
type KeywordReactionRule ¶
type KeywordReactionRule struct {
Enable bool `toml:"enable"`
RegexStrings []string `toml:"regex"`
ResponseTexts []string `toml:"response_texts"` // Example, adjust based on actual rule structure
// Other fields like "action", "mood_change", etc.
CompiledRegex []*regexp.Regexp `toml:"-"` // To be populated after loading
}
KeywordReactionRule defines the structure for a keyword reaction rule.
type LLMConfigEntry ¶
type LLMConfigEntry struct {
Name string `toml:"name"`
Provider string `toml:"provider"` // e.g., "SILICONFLOW"
BaseURL string `toml:"-"` // Derived from provider, e.g., SILICONFLOW_BASE_URL
Key string `toml:"-"` // Derived from provider, e.g., SILICONFLOW_KEY
Stream bool `toml:"stream"`
PriIn float64 `toml:"pri_in"` // Price per 1k input tokens
PriOut float64 `toml:"pri_out"` // Price per 1k output tokens
Temp *float64 `toml:"temp,omitempty"` // Pointer to allow omission / distinguish 0.0 from not set
}
LLMConfigEntry represents the configuration for an LLM model. Corresponds to the structure of cfg_target in the Python model loading.
type SpecifierSet ¶
type SpecifierSet = semver.Constraints
SpecifierSet is an alias for semver.Constraints for convenience. It represents a set of version specifiers.
func ConvertToSpecifierSet ¶
func ConvertToSpecifierSet(value string) (*SpecifierSet, error)
ConvertToSpecifierSet converts a version constraint string to a SpecifierSet. Equivalent to the Python class method convert_to_specifierset.
type Version ¶
Version is an alias for semver.Version for convenience.
func GetConfigVersion ¶
GetConfigVersion extracts and parses the version from the TOML data. Equivalent to the Python class method get_config_version.