Documentation
¶
Overview ¶
Package shlex provides a simple lexical analysis like Unix shell.
Index ¶
- Variables
- func OriginSplit(s string, posix bool) ([]string, error)
- func Split(s string, posix bool, preserveLiteral bool, delimiter ...rune) ([]string, error)
- type DefaultTokenizer
- func (t *DefaultTokenizer) IsDelimiter(r rune, delimiter rune, delimiterSpecific bool) bool
- func (t *DefaultTokenizer) IsEscape(r rune) bool
- func (t *DefaultTokenizer) IsEscapedQuote(r rune, preserveLiteral bool) bool
- func (t *DefaultTokenizer) IsQuote(r rune) bool
- func (t *DefaultTokenizer) IsWord(r rune) bool
- type Lexer
- type Tokenizer
Examples ¶
Constants ¶
This section is empty.
Variables ¶
var ( ErrNoClosing = errors.New("no closing quotation") ErrNoEscaped = errors.New("no escaped character") )
Functions ¶
func OriginSplit ¶ added in v0.1.2
Split splits a string according to posix or non-posix rules. 未修改的Split()
func Split ¶
Split splits a string according to posix or non-posix rules. 默认分隔符为空白(不只是空格),否则为填写的第一个rune
Example ¶
package main
import (
"fmt"
"log"
shlex "github.com/chroblert/go-shlex"
)
func main() {
cmd := `cp -Rdp "file name" 'file name2' dir\ name`
// Split of cmd with POSIX mode.
words1, err := shlex.Split(cmd, true, false)
if err != nil {
log.Fatal(err)
}
// Split of cmd with Non-POSIX mode.
words2, err := shlex.Split(cmd, false, false)
if err != nil {
log.Fatal(err)
}
fmt.Println("Source command:")
fmt.Println(`cp -Rdp "file name" 'file name2' dir\ name`)
fmt.Println()
fmt.Println("POSIX mode:")
for _, word := range words1 {
fmt.Println(word)
}
fmt.Println()
fmt.Println("Non-POSIX mode:")
for _, word := range words2 {
fmt.Println(word)
}
}
Output: Source command: cp -Rdp "file name" 'file name2' dir\ name POSIX mode: cp -Rdp file name file name2 dir name Non-POSIX mode: cp -Rdp "file name" 'file name2' dir\ name
Types ¶
type DefaultTokenizer ¶
type DefaultTokenizer struct{}
DefaultTokenizer implements a simple tokenizer like Unix shell.
func (*DefaultTokenizer) IsDelimiter ¶
func (t *DefaultTokenizer) IsDelimiter(r rune, delimiter rune, delimiterSpecific bool) bool
func (*DefaultTokenizer) IsEscape ¶
func (t *DefaultTokenizer) IsEscape(r rune) bool
func (*DefaultTokenizer) IsEscapedQuote ¶
func (t *DefaultTokenizer) IsEscapedQuote(r rune, preserveLiteral bool) bool
delimiterSpecific 是否指定了分隔符,若未指定,则按照标准的posix模式
func (*DefaultTokenizer) IsQuote ¶
func (t *DefaultTokenizer) IsQuote(r rune) bool
func (*DefaultTokenizer) IsWord ¶
func (t *DefaultTokenizer) IsWord(r rune) bool
type Lexer ¶
type Lexer struct {
// contains filtered or unexported fields
}
Lexer represents a lexical analyzer.
func NewLexer ¶
func NewLexer(posix, whitespaceSplit bool, delimiter rune, r io.Reader, preserveLiteral bool, delimiterSpecific bool) *Lexer
NewLexer creates a new Lexer reading from io.Reader. This Lexer has a DefaultTokenizer according to posix and whitespaceSplit rules.
func NewLexerString ¶
func NewLexerString(posix, whitespaceSplit bool, delimiter rune, s string, preserveLiteral, delimiterSpecific bool) *Lexer
NewLexerString creates a new Lexer reading from a string. This Lexer has a DefaultTokenizer according to posix and whitespaceSplit rules.
func (*Lexer) SetTokenizer ¶
SetTokenizer sets a Tokenizer.