mirror of
https://github.com/status-im/nim-confutils.git
synced 2025-01-20 17:09:15 +00:00
Initial support for command-line completion
Using the Bash protocol (COMP_LINE & COMP_POINT).
This commit is contained in:
parent
14da5a0077
commit
cef93bbd95
@ -1,6 +1,6 @@
|
||||
import
|
||||
strutils, options, std_shims/macros_shim, typetraits,
|
||||
confutils/[defs, cli_parser]
|
||||
confutils/[defs, cli_parser, shell_completion]
|
||||
|
||||
export
|
||||
defs
|
||||
@ -418,6 +418,92 @@ proc load*(Configuration: type,
|
||||
activeCmds.add cmd
|
||||
rejectNextArgument = not cmd.hasArguments
|
||||
|
||||
type
|
||||
ArgKindFilter = enum
|
||||
longForm
|
||||
shortForm
|
||||
|
||||
proc showMatchingOptions(cmd: CommandPtr, prefix: string, filterKind: set[ArgKindFilter]) =
|
||||
var matchingOptions: seq[OptionDesc]
|
||||
|
||||
if len(prefix) > 0:
|
||||
# Filter the options according to the input prefix
|
||||
for opt in cmd.options:
|
||||
if longForm in filterKind:
|
||||
if len(opt.name) > 0 and normalize(opt.name).startsWith(prefix):
|
||||
matchingOptions.add(opt)
|
||||
if shortForm in filterKind:
|
||||
if len(opt.shortform) > 0 and
|
||||
normalize(opt.shortform).startsWith(prefix):
|
||||
matchingOptions.add(opt)
|
||||
else:
|
||||
matchingOptions = cmd.options
|
||||
|
||||
for opt in matchingOptions:
|
||||
# The trailing '=' means the switch accepts an argument
|
||||
let trailing = if opt.typename != "bool": '=' else: ' '
|
||||
|
||||
if longForm in filterKind:
|
||||
stdout.writeLine("--", opt.name, trailing)
|
||||
if shortForm in filterKind:
|
||||
stdout.writeLine('-', opt.shortform, trailing)
|
||||
|
||||
let completion = splitCompletionLine()
|
||||
# If we're not asked to complete a command line the result is an empty list
|
||||
if len(completion) != 0:
|
||||
var cmdStack = @[rootCmd]
|
||||
# Try to understand what the active chain of commands is without parsing the
|
||||
# whole command line
|
||||
for tok in completion[1..^1]:
|
||||
if not tok.startsWith('-'):
|
||||
let subCmd = findSubcommand(cmdStack[^1], tok)
|
||||
if subCmd != nil: cmdStack.add(subCmd)
|
||||
|
||||
let cur_word = normalize(completion[^1])
|
||||
let prev_word = if len(completion) > 2: normalize(completion[^2]) else: ""
|
||||
let prev_prev_word = if len(completion) > 3: normalize(completion[^3]) else: ""
|
||||
|
||||
if cur_word.startsWith('-'):
|
||||
# Show all the options matching the prefix input by the user
|
||||
let isLong = cur_word.startsWith("--")
|
||||
var option_word = cur_word
|
||||
option_word.removePrefix('-')
|
||||
|
||||
for i in countdown(cmdStack.len - 1, 0):
|
||||
let argFilter =
|
||||
if isLong:
|
||||
{longForm}
|
||||
elif len(cur_word) > 1:
|
||||
# If the user entered a single hypen then we show both long & short
|
||||
# variants
|
||||
{shortForm}
|
||||
else:
|
||||
{longForm, shortForm}
|
||||
|
||||
showMatchingOptions(cmdStack[i], option_word, argFilter)
|
||||
elif (prev_word.startsWith('-') or
|
||||
(prev_word == "=" and prev_prev_word.startsWith('-'))):
|
||||
# Handle cases where we want to complete a switch choice
|
||||
# -switch
|
||||
# -switch=
|
||||
var option_word = if len(prev_word) == 1: prev_prev_word else: prev_word
|
||||
option_word.removePrefix('-')
|
||||
|
||||
stderr.writeLine("TODO: options for ", option_word)
|
||||
elif len(cmdStack[^1].subCommands) != 0:
|
||||
# Show all the available subcommands
|
||||
for subCmd in cmdStack[^1].subCommands:
|
||||
if startsWith(normalize(subCmd.name), cur_word):
|
||||
stdout.writeLine(subCmd.name)
|
||||
else:
|
||||
# Full options listing
|
||||
for i in countdown(cmdStack.len - 1, 0):
|
||||
showMatchingOptions(cmdStack[i], "", {longForm, shortForm})
|
||||
|
||||
stdout.flushFile()
|
||||
|
||||
return
|
||||
|
||||
for kind, key, val in getopt(cmdLine):
|
||||
case kind
|
||||
of cmdLongOption, cmdShortOption:
|
||||
|
227
confutils/shell_completion.nim
Normal file
227
confutils/shell_completion.nim
Normal file
@ -0,0 +1,227 @@
|
||||
## A simple lexer meant to tokenize an input string as a shell would do.
|
||||
import lexbase
|
||||
import options
|
||||
import streams
|
||||
import os
|
||||
import strutils
|
||||
|
||||
type
|
||||
ShellLexer = object of BaseLexer
|
||||
preserveTrailingWs: bool
|
||||
mergeWordBreaks: bool
|
||||
wordBreakChars: string
|
||||
|
||||
const
|
||||
WORDBREAKS = "\"'@><=;|&(:"
|
||||
|
||||
proc open(l: var ShellLexer, input: Stream, wordBreakChars: string = WORDBREAKS, preserveTrailingWs = true) =
|
||||
lexbase.open(l, input)
|
||||
l.preserveTrailingWs = preserveTrailingWs
|
||||
l.mergeWordBreaks = false
|
||||
l.wordBreakChars = wordBreakChars
|
||||
|
||||
proc parseQuoted(l: var ShellLexer, pos: int, isSingle: bool, output: var string): int =
|
||||
var pos = pos
|
||||
while true:
|
||||
case l.buf[pos]:
|
||||
of '\c': pos = lexbase.handleCR(l, pos)
|
||||
of '\L': pos = lexbase.handleLF(l, pos)
|
||||
of lexbase.EndOfFile: break
|
||||
of '\\':
|
||||
# Consume the backslash and the following character
|
||||
inc(pos)
|
||||
if (isSingle and l.buf[pos] in {'\''}) or
|
||||
(not isSingle and l.buf[pos] in {'$', '`', '\\', '"'}):
|
||||
# Escape the character
|
||||
output.add(l.buf[pos])
|
||||
else:
|
||||
# Rewrite the escape sequence as-is
|
||||
output.add('\\')
|
||||
output.add(l.buf[pos])
|
||||
inc(pos)
|
||||
of '\"':
|
||||
inc(pos)
|
||||
if isSingle: output.add('\"')
|
||||
else: break
|
||||
of '\'':
|
||||
inc(pos)
|
||||
if isSingle: break
|
||||
else: output.add('\'')
|
||||
else:
|
||||
output.add(l.buf[pos])
|
||||
inc(pos)
|
||||
return pos
|
||||
|
||||
proc getTok(l: var ShellLexer): Option[string] =
|
||||
var pos = l.bufpos
|
||||
|
||||
# Skip the initial whitespace
|
||||
while true:
|
||||
case l.buf[pos]:
|
||||
of '\c': pos = lexbase.handleCR(l, pos)
|
||||
of '\L': pos = lexbase.handleLF(l, pos)
|
||||
of '#':
|
||||
# Skip everything until EOF/EOL
|
||||
while l.buf[pos] notin {'\c', '\L', lexbase.EndOfFile}:
|
||||
inc(pos)
|
||||
of lexbase.EndOfFile:
|
||||
# If we did eat up some whitespace return an empty token, this is needed
|
||||
# to find out if the string ends with whitespace.
|
||||
if l.preserveTrailingWs and l.bufpos != pos:
|
||||
l.bufpos = pos
|
||||
return some("")
|
||||
return none(string)
|
||||
of ' ', '\t':
|
||||
inc(pos)
|
||||
else:
|
||||
break
|
||||
|
||||
var tokLit = ""
|
||||
# Parse the next token
|
||||
while true:
|
||||
case l.buf[pos]:
|
||||
of '\c': pos = lexbase.handleCR(l, pos)
|
||||
of '\L': pos = lexbase.handleLF(l, pos)
|
||||
of '\'':
|
||||
# Single-quoted string
|
||||
inc(pos)
|
||||
pos = parseQuoted(l, pos, true, tokLit)
|
||||
of '"':
|
||||
# Double-quoted string
|
||||
inc(pos)
|
||||
pos = parseQuoted(l, pos, false, tokLit)
|
||||
of '\\':
|
||||
# Escape sequence
|
||||
inc(pos)
|
||||
if l.buf[pos] != lexbase.EndOfFile:
|
||||
tokLit.add(l.buf[pos])
|
||||
inc(pos)
|
||||
of '#', ' ', '\t', lexbase.EndOfFile:
|
||||
break
|
||||
else:
|
||||
let ch = l.buf[pos]
|
||||
if ch notin l.wordBreakChars:
|
||||
tokLit.add(l.buf[pos])
|
||||
inc(pos)
|
||||
# Merge together runs of adjacent word-breaking characters if requested
|
||||
elif l.mergeWordBreaks:
|
||||
while l.buf[pos] in l.wordBreakChars:
|
||||
tokLit.add(l.buf[pos])
|
||||
inc(pos)
|
||||
l.mergeWordBreaks = false
|
||||
break
|
||||
else:
|
||||
l.mergeWordBreaks = true
|
||||
break
|
||||
|
||||
l.bufpos = pos
|
||||
return some(tokLit)
|
||||
|
||||
proc splitCompletionLine*(): seq[string] =
|
||||
let comp_line = os.getEnv("COMP_LINE")
|
||||
var comp_point = parseInt(os.getEnv("COMP_POINT", "0"))
|
||||
|
||||
if comp_point == len(comp_line):
|
||||
comp_point -= 1
|
||||
|
||||
if comp_point < 0 or comp_point > len(comp_line):
|
||||
return @[]
|
||||
|
||||
# Take the useful part only
|
||||
var strm = newStringStream(comp_line[0..comp_point])
|
||||
|
||||
# Split the resulting string
|
||||
var l: ShellLexer
|
||||
l.open(strm)
|
||||
while true:
|
||||
let token = l.getTok()
|
||||
if token.isNone():
|
||||
break
|
||||
result.add(token.get())
|
||||
|
||||
# Test data lifted from python's shlex unit-tests
|
||||
const data = """
|
||||
foo bar|foo|bar|
|
||||
foo bar|foo|bar|
|
||||
foo bar |foo|bar|
|
||||
foo bar bla fasel|foo|bar|bla|fasel|
|
||||
x y z xxxx|x|y|z|xxxx|
|
||||
\x bar|x|bar|
|
||||
\ x bar| x|bar|
|
||||
\ bar| bar|
|
||||
foo \x bar|foo|x|bar|
|
||||
foo \ x bar|foo| x|bar|
|
||||
foo \ bar|foo| bar|
|
||||
foo "bar" bla|foo|bar|bla|
|
||||
"foo" "bar" "bla"|foo|bar|bla|
|
||||
"foo" bar "bla"|foo|bar|bla|
|
||||
"foo" bar bla|foo|bar|bla|
|
||||
foo 'bar' bla|foo|bar|bla|
|
||||
'foo' 'bar' 'bla'|foo|bar|bla|
|
||||
'foo' bar 'bla'|foo|bar|bla|
|
||||
'foo' bar bla|foo|bar|bla|
|
||||
blurb foo"bar"bar"fasel" baz|blurb|foobarbarfasel|baz|
|
||||
blurb foo'bar'bar'fasel' baz|blurb|foobarbarfasel|baz|
|
||||
""||
|
||||
''||
|
||||
foo "" bar|foo||bar|
|
||||
foo '' bar|foo||bar|
|
||||
foo "" "" "" bar|foo||||bar|
|
||||
foo '' '' '' bar|foo||||bar|
|
||||
\"|"|
|
||||
"\""|"|
|
||||
"foo\ bar"|foo\ bar|
|
||||
"foo\\ bar"|foo\ bar|
|
||||
"foo\\ bar\""|foo\ bar"|
|
||||
"foo\\" bar\"|foo\|bar"|
|
||||
"foo\\ bar\" dfadf"|foo\ bar" dfadf|
|
||||
"foo\\\ bar\" dfadf"|foo\\ bar" dfadf|
|
||||
"foo\\\x bar\" dfadf"|foo\\x bar" dfadf|
|
||||
"foo\x bar\" dfadf"|foo\x bar" dfadf|
|
||||
\'|'|
|
||||
'foo\ bar'|foo\ bar|
|
||||
'foo\\ bar'|foo\\ bar|
|
||||
"foo\\\x bar\" df'a\ 'df"|foo\\x bar" df'a\ 'df|
|
||||
\"foo|"foo|
|
||||
\"foo\x|"foox|
|
||||
"foo\x"|foo\x|
|
||||
"foo\ "|foo\ |
|
||||
foo\ xx|foo xx|
|
||||
foo\ x\x|foo xx|
|
||||
foo\ x\x\"|foo xx"|
|
||||
"foo\ x\x"|foo\ x\x|
|
||||
"foo\ x\x\\"|foo\ x\x\|
|
||||
"foo\ x\x\\""foobar"|foo\ x\x\foobar|
|
||||
"foo\ x\x\\"\'"foobar"|foo\ x\x\'foobar|
|
||||
"foo\ x\x\\"\'"fo'obar"|foo\ x\x\'fo'obar|
|
||||
"foo\ x\x\\"\'"fo'obar" 'don'\''t'|foo\ x\x\'fo'obar|don't|
|
||||
"foo\ x\x\\"\'"fo'obar" 'don'\''t' \\|foo\ x\x\'fo'obar|don't|\|
|
||||
'foo\ bar'|foo\ bar|
|
||||
'foo\\ bar'|foo\\ bar|
|
||||
foo\ bar|foo bar|
|
||||
:-) ;-)|:-)|;-)|
|
||||
áéíóú|áéíóú|
|
||||
"""
|
||||
|
||||
when isMainModule:
|
||||
var corpus = newStringStream(data)
|
||||
var line = ""
|
||||
while corpus.readLine(line):
|
||||
let chunks = line.split('|')
|
||||
let expr = chunks[0]
|
||||
let expected = chunks[1..^2]
|
||||
|
||||
var l: ShellLexer
|
||||
var strm = newStringStream(expr)
|
||||
var got: seq[string]
|
||||
l.open(strm, wordBreakChars="", preserveTrailingWs=false)
|
||||
while true:
|
||||
let x = l.getTok()
|
||||
if x.isNone():
|
||||
break
|
||||
got.add(x.get())
|
||||
|
||||
if got != expected:
|
||||
echo "got ", got
|
||||
echo "expected ", expected
|
||||
assert(false)
|
Loading…
x
Reference in New Issue
Block a user