Documentation
¶
Index ¶
Constants ¶
View Source
const ( NodeTypeSearch = "search" NodeTypeAnd = "and" NodeTypeOr = "or" NodeTypeError = "error" )
View Source
const ( SearchTypeExact = "exact" SearchTypeExactCase = "exactcase" SearchTypeRegexp = "regexp" SearchTypeRegexpCase = "regexpcase" SearchTypeFzf = "fzf" SearchTypeFzfCase = "fzfcase" SearchTypeNot = "not" SearchTypeTag = "tag" SearchTypeUserQuery = "userquery" SearchTypeMarked = "marked" SearchTypeNumeric = "numeric" )
Variables ¶
View Source
var TagRegexp = regexp.MustCompile(`^` + utilfn.SimpleTagRegexStr + `$`)
TagRegexp is the regular expression pattern for valid tag names Uses SimpleTagRegexStr from utilfn/util.go for consistency
Functions ¶
func TokensToString ¶
TokensToString converts a slice of tokens to a string representation
Types ¶
type Node ¶
type Node struct {
Type string // NodeTypeAnd, NodeTypeOr, NodeTypeSearch, NodeTypeError
Position Position // Position in the source text
Children []*Node // For composite nodes (AND/OR)
SearchType string // e.g., "exact", "regexp", "fzf", etc. (only for search nodes)
SearchTerm string // The actual search text (only for search nodes)
Field string // Optional field specifier (only for search nodes)
Op string // Optional operator for numeric searches (>, <, >=, <=)
IsNot bool // Set to true if preceded by '-' (for not tokens)
ErrorMessage string // For error nodes, a simple error message
}
type Parser ¶
type Parser struct {
// contains filtered or unexported fields
}
type Token ¶
type Token struct {
Type TokenType // Type of the token
Value string // Value of the token
Position Position // Position in the source
Incomplete bool // True if the token is incomplete (e.g., unterminated string)
}
Token represents a token in the search expression
type TokenType ¶
type TokenType string
TokenType represents the type of token
const ( // Token types for complex tokens TokenWord TokenType = "WORD" // Plain word token TokenDQuote TokenType = "DQUOTE" // Double quoted string TokenSQuote TokenType = "SQUOTE" // Single quoted string TokenRegexp TokenType = "REGEXP" // Regular expression TokenCRegexp TokenType = "CREGEXP" // Case-sensitive regexp TokenWhitespace TokenType = "WS" // Whitespace TokenEOF TokenType = "EOF" // End of input // Token types for simple characters (using the actual character) TokenLParen TokenType = "(" // Left parenthesis TokenRParen TokenType = ")" // Right parenthesis TokenPipe TokenType = "|" // Pipe character TokenMinus TokenType = "-" // Minus sign TokenDollar TokenType = "$" // Dollar sign TokenTilde TokenType = "~" // Tilde TokenHash TokenType = "#" // Hash )
type Tokenizer ¶
type Tokenizer struct {
// contains filtered or unexported fields
}
Tokenizer represents a lexical analyzer for search expressions
func NewTokenizer ¶
NewTokenizer creates a new tokenizer for the given input
func (*Tokenizer) GetAllTokens ¶
GetAllTokens tokenizes the entire input and returns all tokens
Click to show internal directories.
Click to hide internal directories.