feat_: `LogOnPanic` linter (#5969)

* feat_: LogOnPanic linter

* fix_: add missing defer LogOnPanic

* chore_: make vendor

* fix_: tests, address pr comments

* fix_: address pr comments
This commit is contained in:
Igor Sirotin 2024-10-23 21:33:05 +01:00 committed by GitHub
parent 0555331252
commit 679391999f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
180 changed files with 28220 additions and 8 deletions

View File

@ -408,7 +408,10 @@ canary-test: node-canary
# TODO: uncomment that!
#_assets/scripts/canary_test_mailservers.sh ./config/cli/fleet-eth.prod.json
lint: generate
lint-panics: generate
go run ./cmd/lint-panics -root="$(call sh, pwd)" -skip=./cmd -test=false ./...
lint: generate lint-panics
golangci-lint run ./...
ci: generate lint canary-test test-unit test-e2e ##@tests Run all linters and tests at once

View File

@ -0,0 +1,245 @@
package analyzer
import (
"context"
"fmt"
"go/ast"
"os"
"go.uber.org/zap"
goparser "go/parser"
gotoken "go/token"
"strings"
"github.com/pkg/errors"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/ast/inspector"
"github.com/status-im/status-go/cmd/lint-panics/gopls"
"github.com/status-im/status-go/cmd/lint-panics/utils"
)
const Pattern = "LogOnPanic"
type Analyzer struct {
logger *zap.Logger
lsp LSP
cfg *Config
}
type LSP interface {
Definition(context.Context, string, int, int) (string, int, error)
}
func New(ctx context.Context, logger *zap.Logger) (*analysis.Analyzer, error) {
cfg := Config{}
flags, err := cfg.ParseFlags()
if err != nil {
return nil, err
}
logger.Info("creating analyzer", zap.String("root", cfg.RootDir))
goplsClient := gopls.NewGoplsClient(ctx, logger, cfg.RootDir)
processor := newAnalyzer(logger, goplsClient, &cfg)
analyzer := &analysis.Analyzer{
Name: "logpanics",
Doc: fmt.Sprintf("reports missing defer call to %s", Pattern),
Flags: flags,
Requires: []*analysis.Analyzer{inspect.Analyzer},
Run: func(pass *analysis.Pass) (interface{}, error) {
return processor.Run(ctx, pass)
},
}
return analyzer, nil
}
func newAnalyzer(logger *zap.Logger, lsp LSP, cfg *Config) *Analyzer {
return &Analyzer{
logger: logger.Named("processor"),
lsp: lsp,
cfg: cfg.WithAbsolutePaths(),
}
}
func (p *Analyzer) Run(ctx context.Context, pass *analysis.Pass) (interface{}, error) {
inspected, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
if !ok {
return nil, errors.New("analyzer is not type *inspector.Inspector")
}
// Create a nodes filter for goroutines (GoStmt represents a 'go' statement)
nodeFilter := []ast.Node{
(*ast.GoStmt)(nil),
}
// Inspect go statements
inspected.Preorder(nodeFilter, func(n ast.Node) {
p.ProcessNode(ctx, pass, n)
})
return nil, nil
}
func (p *Analyzer) ProcessNode(ctx context.Context, pass *analysis.Pass, n ast.Node) {
goStmt, ok := n.(*ast.GoStmt)
if !ok {
panic("unexpected node type")
}
switch fun := goStmt.Call.Fun.(type) {
case *ast.FuncLit: // anonymous function
pos := pass.Fset.Position(fun.Pos())
logger := p.logger.With(
utils.ZapURI(pos.Filename, pos.Line),
zap.Int("column", pos.Column),
)
logger.Debug("found anonymous goroutine")
if err := p.checkGoroutine(fun.Body); err != nil {
p.logLinterError(pass, fun.Pos(), fun.Pos(), err)
}
case *ast.SelectorExpr: // method call
pos := pass.Fset.Position(fun.Sel.Pos())
p.logger.Info("found method call as goroutine",
zap.String("methodName", fun.Sel.Name),
utils.ZapURI(pos.Filename, pos.Line),
zap.Int("column", pos.Column),
)
defPos, err := p.checkGoroutineDefinition(ctx, pos, pass)
if err != nil {
p.logLinterError(pass, defPos, fun.Sel.Pos(), err)
}
case *ast.Ident: // function call
pos := pass.Fset.Position(fun.Pos())
p.logger.Info("found function call as goroutine",
zap.String("functionName", fun.Name),
utils.ZapURI(pos.Filename, pos.Line),
zap.Int("column", pos.Column),
)
defPos, err := p.checkGoroutineDefinition(ctx, pos, pass)
if err != nil {
p.logLinterError(pass, defPos, fun.Pos(), err)
}
default:
p.logger.Error("unexpected goroutine type",
zap.String("type", fmt.Sprintf("%T", fun)),
)
}
}
func (p *Analyzer) parseFile(path string, pass *analysis.Pass) (*ast.File, error) {
logger := p.logger.With(zap.String("path", path))
src, err := os.ReadFile(path)
if err != nil {
logger.Error("failed to open file", zap.Error(err))
}
file, err := goparser.ParseFile(pass.Fset, path, src, 0)
if err != nil {
logger.Error("failed to parse file", zap.Error(err))
return nil, err
}
return file, nil
}
func (p *Analyzer) checkGoroutine(body *ast.BlockStmt) error {
if body == nil {
p.logger.Warn("missing function body")
return nil
}
if len(body.List) == 0 {
// empty goroutine is weird, but it never panics, so not a linter error
return nil
}
deferStatement, ok := body.List[0].(*ast.DeferStmt)
if !ok {
return errors.New("first statement is not defer")
}
selectorExpr, ok := deferStatement.Call.Fun.(*ast.SelectorExpr)
if !ok {
return errors.New("first statement call is not a selector")
}
firstLineFunName := selectorExpr.Sel.Name
if firstLineFunName != Pattern {
return errors.Errorf("first statement is not %s", Pattern)
}
return nil
}
func (p *Analyzer) getFunctionBody(node ast.Node, lineNumber int, pass *analysis.Pass) (body *ast.BlockStmt, pos gotoken.Pos) {
ast.Inspect(node, func(n ast.Node) bool {
// Check if the node is a function declaration
funcDecl, ok := n.(*ast.FuncDecl)
if !ok {
return true
}
if pass.Fset.Position(n.Pos()).Line != lineNumber {
return true
}
body = funcDecl.Body
pos = n.Pos()
return false
})
return body, pos
}
func (p *Analyzer) checkGoroutineDefinition(ctx context.Context, pos gotoken.Position, pass *analysis.Pass) (gotoken.Pos, error) {
defFilePath, defLineNumber, err := p.lsp.Definition(ctx, pos.Filename, pos.Line, pos.Column)
if err != nil {
p.logger.Error("failed to find function definition", zap.Error(err))
return 0, err
}
file, err := p.parseFile(defFilePath, pass)
if err != nil {
p.logger.Error("failed to parse file", zap.Error(err))
return 0, err
}
body, defPosition := p.getFunctionBody(file, defLineNumber, pass)
return defPosition, p.checkGoroutine(body)
}
func (p *Analyzer) logLinterError(pass *analysis.Pass, errPos gotoken.Pos, callPos gotoken.Pos, err error) {
errPosition := pass.Fset.Position(errPos)
callPosition := pass.Fset.Position(callPos)
if p.skip(errPosition.Filename) || p.skip(callPosition.Filename) {
return
}
message := fmt.Sprintf("missing %s()", Pattern)
p.logger.Warn(message,
utils.ZapURI(errPosition.Filename, errPosition.Line),
zap.String("details", err.Error()))
if callPos == errPos {
pass.Reportf(errPos, "missing defer call to %s", Pattern)
} else {
pass.Reportf(callPos, "missing defer call to %s", Pattern)
}
}
func (p *Analyzer) skip(filepath string) bool {
return p.cfg.SkipDir != "" && strings.HasPrefix(filepath, p.cfg.SkipDir)
}

View File

@ -0,0 +1,28 @@
package analyzer
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"golang.org/x/tools/go/analysis/analysistest"
"github.com/status-im/status-go/cmd/lint-panics/utils"
)
func TestMethods(t *testing.T) {
t.Parallel()
logger := utils.BuildLogger(zap.DebugLevel)
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
defer cancel()
a, err := New(ctx, logger)
require.NoError(t, err)
analysistest.Run(t, analysistest.TestData(), a, "functions")
}

View File

@ -0,0 +1,60 @@
package analyzer
import (
"flag"
"io"
"os"
"path"
"strings"
)
type Config struct {
RootDir string
SkipDir string
}
var workdir string
func init() {
var err error
workdir, err = os.Getwd()
if err != nil {
panic(err)
}
}
func (c *Config) ParseFlags() (flag.FlagSet, error) {
flags := flag.NewFlagSet("lint-panics", flag.ContinueOnError)
flags.SetOutput(io.Discard) // Otherwise errors are printed to stderr
flags.StringVar(&c.RootDir, "root", workdir, "root directory to run gopls")
flags.StringVar(&c.SkipDir, "skip", "", "skip paths with this suffix")
// We parse the flags here to have `rootDir` before the call to `singlechecker.Main(analyzer)`
// For same reasons we discard the output and skip the undefined flag error.
err := flags.Parse(os.Args[1:])
if err == nil {
return *flags, nil
}
if strings.Contains(err.Error(), "flag provided but not defined") {
err = nil
} else if strings.Contains(err.Error(), "help requested") {
err = nil
}
return *flags, err
}
func (c *Config) WithAbsolutePaths() *Config {
out := *c
if !path.IsAbs(out.RootDir) {
out.RootDir = path.Join(workdir, out.RootDir)
}
if out.SkipDir != "" && !path.IsAbs(out.SkipDir) {
out.SkipDir = path.Join(out.RootDir, out.SkipDir)
}
return &out
}

View File

@ -0,0 +1,5 @@
package common
func LogOnPanic() {
// do nothing
}

View File

@ -0,0 +1,24 @@
package functions
import (
"common"
"fmt"
)
func init() {
go func() {
defer common.LogOnPanic()
}()
go func() {
}()
go func() { // want "missing defer call to LogOnPanic"
fmt.Println("anon")
}()
go func() { // want "missing defer call to LogOnPanic"
common.LogOnPanic()
}()
}

View File

@ -0,0 +1,29 @@
package functions
import (
"common"
"fmt"
)
func init() {
go ok()
go empty()
go noLogOnPanic() // want "missing defer call to LogOnPanic"
go notDefer() // want "missing defer call to LogOnPanic"
}
func ok() {
defer common.LogOnPanic()
}
func empty() {
}
func noLogOnPanic() {
defer fmt.Println("Bar")
}
func notDefer() {
common.LogOnPanic()
}

View File

@ -0,0 +1,33 @@
package functions
import (
"common"
"fmt"
)
type Test struct {
}
func init() {
t := Test{}
go t.ok()
go t.empty()
go t.noLogOnPanic() // want "missing defer call to LogOnPanic"
go t.notDefer() // want "missing defer call to LogOnPanic"
}
func (p *Test) ok() {
defer common.LogOnPanic()
}
func (p *Test) empty() {
}
func (p *Test) noLogOnPanic() {
defer fmt.Println("FooNoLogOnPanic")
}
func (p *Test) notDefer() {
common.LogOnPanic()
}

View File

@ -0,0 +1,21 @@
package functions
import (
"common"
)
func init() {
runAsync(ok)
runAsyncOk(ok)
}
func runAsync(fn func()) {
go fn() // want "missing defer call to LogOnPanic"
}
func runAsyncOk(fn func()) {
go func() {
defer common.LogOnPanic()
fn()
}()
}

View File

@ -0,0 +1,81 @@
package gopls
import (
"context"
"go.lsp.dev/protocol"
"go.uber.org/zap"
)
type DummyClient struct {
logger *zap.Logger
}
func NewDummyClient(logger *zap.Logger) *DummyClient {
if logger == nil {
logger = zap.NewNop()
}
return &DummyClient{
logger: logger,
}
}
func (d *DummyClient) Progress(ctx context.Context, params *protocol.ProgressParams) (err error) {
d.logger.Debug("client: Progress", zap.Any("params", params))
return
}
func (d *DummyClient) WorkDoneProgressCreate(ctx context.Context, params *protocol.WorkDoneProgressCreateParams) (err error) {
d.logger.Debug("client: WorkDoneProgressCreate")
return nil
}
func (d *DummyClient) LogMessage(ctx context.Context, params *protocol.LogMessageParams) (err error) {
d.logger.Debug("client: LogMessage", zap.Any("message", params))
return nil
}
func (d *DummyClient) PublishDiagnostics(ctx context.Context, params *protocol.PublishDiagnosticsParams) (err error) {
d.logger.Debug("client: PublishDiagnostics")
return nil
}
func (d *DummyClient) ShowMessage(ctx context.Context, params *protocol.ShowMessageParams) (err error) {
d.logger.Debug("client: ShowMessage", zap.Any("message", params))
return nil
}
func (d *DummyClient) ShowMessageRequest(ctx context.Context, params *protocol.ShowMessageRequestParams) (result *protocol.MessageActionItem, err error) {
d.logger.Debug("client: ShowMessageRequest", zap.Any("message", params))
return nil, nil
}
func (d *DummyClient) Telemetry(ctx context.Context, params interface{}) (err error) {
d.logger.Debug("client: Telemetry")
return nil
}
func (d *DummyClient) RegisterCapability(ctx context.Context, params *protocol.RegistrationParams) (err error) {
d.logger.Debug("client: RegisterCapability")
return nil
}
func (d *DummyClient) UnregisterCapability(ctx context.Context, params *protocol.UnregistrationParams) (err error) {
d.logger.Debug("client: UnregisterCapability")
return nil
}
func (d *DummyClient) ApplyEdit(ctx context.Context, params *protocol.ApplyWorkspaceEditParams) (result bool, err error) {
d.logger.Debug("client: ApplyEdit")
return false, nil
}
func (d *DummyClient) Configuration(ctx context.Context, params *protocol.ConfigurationParams) (result []interface{}, err error) {
d.logger.Debug("client: Configuration")
return nil, nil
}
func (d *DummyClient) WorkspaceFolders(ctx context.Context) (result []protocol.WorkspaceFolder, err error) {
d.logger.Debug("client: WorkspaceFolders")
return nil, nil
}

View File

@ -0,0 +1,155 @@
package gopls
import (
"os/exec"
"github.com/pkg/errors"
"context"
"go.lsp.dev/jsonrpc2"
"go.lsp.dev/protocol"
"time"
"go.lsp.dev/uri"
"go.uber.org/zap"
)
type Connection struct {
logger *zap.Logger
server protocol.Server
cmd *exec.Cmd
conn jsonrpc2.Conn
}
func NewGoplsClient(ctx context.Context, logger *zap.Logger, rootDir string) *Connection {
var err error
logger.Debug("initializing gopls client")
gopls := &Connection{
logger: logger,
}
client := NewDummyClient(logger)
// Step 1: Create a JSON-RPC connection using stdin and stdout
gopls.cmd = exec.Command("gopls", "serve")
stdin, err := gopls.cmd.StdinPipe()
if err != nil {
logger.Error("Failed to get stdin pipe", zap.Error(err))
panic(err)
}
stdout, err := gopls.cmd.StdoutPipe()
if err != nil {
logger.Error("Failed to get stdout pipe", zap.Error(err))
panic(err)
}
err = gopls.cmd.Start()
if err != nil {
logger.Error("Failed to start gopls", zap.Error(err))
panic(err)
}
stream := jsonrpc2.NewStream(&IOStream{
stdin: stdin,
stdout: stdout,
})
// Step 2: Create a client for the running gopls server
ctx, gopls.conn, gopls.server = protocol.NewClient(ctx, client, stream, logger)
// Step 3: Initialize the gopls server
initParams := protocol.InitializeParams{
RootURI: uri.From("file", "", rootDir, "", ""),
InitializationOptions: map[string]interface{}{
"symbolMatcher": "FastFuzzy",
},
}
_, err = gopls.server.Initialize(ctx, &initParams)
if err != nil {
logger.Error("Error during initialize", zap.Error(err))
panic(err)
}
// Step 4: Send 'initialized' notification
err = gopls.server.Initialized(ctx, &protocol.InitializedParams{})
if err != nil {
logger.Error("Error during initialized", zap.Error(err))
panic(err)
}
return gopls
}
func (gopls *Connection) Definition(ctx context.Context, filePath string, lineNumber int, charPosition int) (string, int, error) {
// NOTE: gopls uses 0-based line and column numbers
defFile, defLine, err := gopls.definition(ctx, filePath, lineNumber-1, charPosition-1)
return defFile, defLine + 1, err
}
func (gopls *Connection) definition(ctx context.Context, filePath string, lineNumber int, charPosition int) (string, int, error) {
// Define the file URI and position where the function/method is invoked
fileURI := protocol.DocumentURI("file://" + filePath) // Replace with actual file URI
line := lineNumber // Line number where the function is called
character := charPosition // Character (column) where the function is called
// Send the definition request
params := &protocol.DefinitionParams{
TextDocumentPositionParams: protocol.TextDocumentPositionParams{
TextDocument: protocol.TextDocumentIdentifier{
URI: fileURI,
},
Position: protocol.Position{
Line: uint32(line),
Character: uint32(character),
},
},
}
// Create context with a timeout to avoid hanging
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
locations, err := gopls.server.Definition(ctx, params)
if err != nil {
return "", 0, errors.Wrap(err, "failed to fetch definition")
}
if len(locations) == 0 {
return "", 0, errors.New("no definition found")
}
location := locations[0]
return location.URI.Filename(), int(location.Range.Start.Line), nil
}
func (gopls *Connection) DidOpen(ctx context.Context, path string, content string, logger *zap.Logger) {
err := gopls.server.DidOpen(ctx, &protocol.DidOpenTextDocumentParams{
TextDocument: protocol.TextDocumentItem{
URI: protocol.DocumentURI(path),
LanguageID: "go",
Version: 1,
Text: content,
},
})
if err != nil {
logger.Error("failed to call DidOpen", zap.Error(err))
}
}
func (gopls *Connection) DidClose(ctx context.Context, path string, lgoger *zap.Logger) {
err := gopls.server.DidClose(ctx, &protocol.DidCloseTextDocumentParams{
TextDocument: protocol.TextDocumentIdentifier{
URI: protocol.DocumentURI(path),
},
})
if err != nil {
lgoger.Error("failed to call DidClose", zap.Error(err))
}
}

View File

@ -0,0 +1,29 @@
package gopls
import "io"
// IOStream combines stdin and stdout into one interface.
type IOStream struct {
stdin io.WriteCloser
stdout io.ReadCloser
}
// Write writes data to stdin.
func (c *IOStream) Write(p []byte) (n int, err error) {
return c.stdin.Write(p)
}
// Read reads data from stdout.
func (c *IOStream) Read(p []byte) (n int, err error) {
return c.stdout.Read(p)
}
// Close closes both stdin and stdout.
func (c *IOStream) Close() error {
err1 := c.stdin.Close()
err2 := c.stdout.Close()
if err1 != nil {
return err1
}
return err2
}

35
cmd/lint-panics/main.go Normal file
View File

@ -0,0 +1,35 @@
package main
import (
"context"
"os"
"time"
"go.uber.org/zap"
"golang.org/x/tools/go/analysis/singlechecker"
"github.com/status-im/status-go/cmd/lint-panics/analyzer"
"github.com/status-im/status-go/cmd/lint-panics/utils"
)
/*
Run with `-root=<directory>` to specify the root directory to run gopls. Defaults to the current working directory.
Set `-skip=<directory>` to skip errors in certain directories. If relative, it is relative to the root directory.
If provided, `-root` and `-skip` arguments MUST go first, before any other args.
*/
func main() {
logger := utils.BuildLogger(zap.ErrorLevel)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
a, err := analyzer.New(ctx, logger)
if err != nil {
logger.Error("failed to create analyzer", zap.Error(err))
os.Exit(1)
}
singlechecker.Main(a)
}

View File

@ -0,0 +1,39 @@
package utils
import (
"strconv"
"fmt"
"os"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
func URI(path string, line int) string {
return path + ":" + strconv.Itoa(line)
}
func ZapURI(path string, line int) zap.Field {
return zap.Field{
Type: zapcore.StringType,
Key: "uri",
String: URI(path, line),
}
}
func BuildLogger(level zapcore.Level) *zap.Logger {
// Initialize logger with colors
loggerConfig := zap.NewDevelopmentConfig()
loggerConfig.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
loggerConfig.Level = zap.NewAtomicLevelAt(level)
loggerConfig.Development = false
loggerConfig.DisableStacktrace = true
logger, err := loggerConfig.Build()
if err != nil {
fmt.Printf("failed to initialize logger: %s", err.Error())
os.Exit(1)
}
return logger.Named("main")
}

View File

@ -81,7 +81,7 @@ func start(p StartParams, logger *zap.SugaredLogger) (*StatusCLI, error) {
}
waku := backend.StatusNode().WakuV2Service()
telemetryClient := telemetry.NewClient(telemetryLogger, p.TelemetryURL, backend.SelectedAccountKeyID(), p.Name, "cli", telemetry.WithPeerID(waku.PeerID().String()))
go telemetryClient.Start(context.Background())
telemetryClient.Start(context.Background())
backend.StatusNode().WakuV2Service().SetStatusTelemetryClient(telemetryClient)
}
wakuAPI := wakuv2ext.NewPublicAPI(wakuService)

6
go.mod
View File

@ -99,6 +99,9 @@ require (
github.com/wk8/go-ordered-map/v2 v2.1.7
github.com/yeqown/go-qrcode/v2 v2.2.1
github.com/yeqown/go-qrcode/writer/standard v1.2.1
go.lsp.dev/jsonrpc2 v0.10.0
go.lsp.dev/protocol v0.12.0
go.lsp.dev/uri v0.3.0
go.uber.org/mock v0.4.0
go.uber.org/multierr v1.11.0
golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa
@ -253,6 +256,8 @@ require (
github.com/russolsen/ohyeah v0.0.0-20160324131710-f4938c005315 // indirect
github.com/russolsen/same v0.0.0-20160222130632-f089df61f51d // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/segmentio/asm v1.1.3 // indirect
github.com/segmentio/encoding v0.3.4 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/shopspring/decimal v1.2.0 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
@ -275,6 +280,7 @@ require (
github.com/yeqown/reedsolomon v1.0.0 // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
go.etcd.io/bbolt v1.3.6 // indirect
go.lsp.dev/pkg v0.0.0-20210717090340-384b27a52fb2 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/dig v1.18.0 // indirect
go.uber.org/fx v1.22.2 // indirect

13
go.sum
View File

@ -1936,6 +1936,10 @@ github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
github.com/segmentio/asm v1.1.3 h1:WM03sfUOENvvKexOLp+pCqgb/WDjsi7EK8gIsICtzhc=
github.com/segmentio/asm v1.1.3/go.mod h1:Ld3L4ZXGNcSLRg4JBsZ3//1+f/TjYl0Mzen/DQy1EJg=
github.com/segmentio/encoding v0.3.4 h1:WM4IBnxH8B9TakiM2QD5LyNl9JSndh88QbHqVC+Pauc=
github.com/segmentio/encoding v0.3.4/go.mod h1:n0JeuIqEQrQoPDGsjo8UNd1iA0U8d8+oHAA4E3G3OxM=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
@ -2221,6 +2225,14 @@ go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lL
go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
go.lsp.dev/jsonrpc2 v0.10.0 h1:Pr/YcXJoEOTMc/b6OTmcR1DPJ3mSWl/SWiU1Cct6VmI=
go.lsp.dev/jsonrpc2 v0.10.0/go.mod h1:fmEzIdXPi/rf6d4uFcayi8HpFP1nBF99ERP1htC72Ac=
go.lsp.dev/pkg v0.0.0-20210717090340-384b27a52fb2 h1:hCzQgh6UcwbKgNSRurYWSqh8MufqRRPODRBblutn4TE=
go.lsp.dev/pkg v0.0.0-20210717090340-384b27a52fb2/go.mod h1:gtSHRuYfbCT0qnbLnovpie/WEmqyJ7T4n6VXiFMBtcw=
go.lsp.dev/protocol v0.12.0 h1:tNprUI9klQW5FAFVM4Sa+AbPFuVQByWhP1ttNUAjIWg=
go.lsp.dev/protocol v0.12.0/go.mod h1:Qb11/HgZQ72qQbeyPfJbu3hZBH23s1sr4st8czGeDMQ=
go.lsp.dev/uri v0.3.0 h1:KcZJmh6nFIBeJzTugn5JTU6OOyG0lDOo3R9KwTxTYbo=
go.lsp.dev/uri v0.3.0/go.mod h1:P5sbO1IQR+qySTWOCnhnK7phBx+W3zbLqSMDJNTw88I=
go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
@ -2677,6 +2689,7 @@ golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211023085530-d6a326fbbf70/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211110154304-99a53858aa08/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=

View File

@ -4,6 +4,7 @@ import (
"context"
"sync"
status_common "github.com/status-im/status-go/common"
"github.com/status-im/status-go/healthmanager/aggregator"
"github.com/status-im/status-go/healthmanager/rpcstatus"
)
@ -72,6 +73,7 @@ func (b *BlockchainHealthManager) RegisterProvidersHealthManager(ctx context.Con
statusCh := phm.Subscribe()
b.wg.Add(1)
go func(phm *ProvidersHealthManager, statusCh chan struct{}, providerCtx context.Context) {
defer status_common.LogOnPanic()
defer func() {
phm.Unsubscribe(statusCh)
b.wg.Done()

View File

@ -11,6 +11,8 @@ import (
prom "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/status-im/status-go/common"
)
// Server runs and controls a HTTP pprof interface.
@ -55,5 +57,6 @@ func Handler(reg metrics.Registry) http.Handler {
// Listen starts the HTTP server in the background.
func (p *Server) Listen() {
defer common.LogOnPanic()
log.Info("metrics server stopped", "err", p.server.ListenAndServe())
}

View File

@ -73,6 +73,7 @@ func (p *Publisher) Stop() {
}
func (p *Publisher) tickerLoop() {
defer gocommon.LogOnPanic()
ticker := time.NewTicker(tickerInterval * time.Second)
go func() {

View File

@ -581,7 +581,7 @@ func NewMessenger(
if c.wakuService != nil {
c.wakuService.SetStatusTelemetryClient(telemetryClient)
}
go telemetryClient.Start(ctx)
telemetryClient.Start(ctx)
}
messenger = &Messenger{
@ -916,7 +916,7 @@ func (m *Messenger) Start() (*MessengerResponse, error) {
for _, c := range controlledCommunities {
if c.Joined() && c.HasTokenPermissions() {
go m.communitiesManager.StartMembersReevaluationLoop(c.ID(), false)
m.communitiesManager.StartMembersReevaluationLoop(c.ID(), false)
}
}

View File

@ -500,6 +500,8 @@ func (r *storeNodeRequest) shouldFetchNextPage(envelopesCount int) (bool, uint32
}
func (r *storeNodeRequest) routine() {
defer gocommon.LogOnPanic()
r.manager.logger.Info("starting store node request",
zap.Any("requestID", r.requestID),
zap.String("pubsubTopic", r.pubsubTopic),

View File

@ -188,6 +188,7 @@ func (c *Client) Stop() {
}
func (c *Client) monitorHealth(ctx context.Context, statusCh chan struct{}) {
defer appCommon.LogOnPanic()
sendFullStatusEventFunc := func() {
blockchainStatus := c.healthMgr.GetFullStatus()
encodedMessage, err := json.Marshal(blockchainStatus)

View File

@ -18,6 +18,7 @@ import (
signercore "github.com/ethereum/go-ethereum/signer/core/apitypes"
abi_spec "github.com/status-im/status-go/abi-spec"
"github.com/status-im/status-go/account"
status_common "github.com/status-im/status-go/common"
statusErrors "github.com/status-im/status-go/errors"
"github.com/status-im/status-go/eth-node/crypto"
"github.com/status-im/status-go/eth-node/types"
@ -779,6 +780,7 @@ func (api *API) BuildTransactionsFromRoute(ctx context.Context, buildInputParams
log.Debug("[WalletAPI::BuildTransactionsFromRoute] builds transactions from the generated best route", "uuid", buildInputParams.Uuid)
go func() {
defer status_common.LogOnPanic()
api.router.StopSuggestedRoutesAsyncCalculation()
var err error
@ -841,6 +843,7 @@ func (api *API) ProceedWithTransactionsSignatures(ctx context.Context, signature
func (api *API) SendRouterTransactionsWithSignatures(ctx context.Context, sendInputParams *requests.RouterSendTransactionsParams) {
log.Debug("[WalletAPI:: SendRouterTransactionsWithSignatures] sign with signatures and send")
go func() {
defer status_common.LogOnPanic()
var (
err error
@ -927,6 +930,7 @@ func (api *API) SendRouterTransactionsWithSignatures(ctx context.Context, sendIn
chainIDs = append(chainIDs, tx.FromChain)
addresses = append(addresses, common.Address(tx.FromAddress))
go func(chainId uint64, txHash common.Hash) {
defer status_common.LogOnPanic()
err = api.s.transactionManager.WatchTransaction(context.Background(), chainId, txHash)
if err != nil {
return

View File

@ -8,6 +8,7 @@ import (
"github.com/jellydator/ttlcache/v3"
"github.com/ethereum/go-ethereum/log"
"github.com/status-im/status-go/common"
)
var (
@ -56,7 +57,10 @@ func (c *ttlCache[K, V]) init() {
c.cache.OnEviction(func(ctx context.Context, reason ttlcache.EvictionReason, item *ttlcache.Item[K, V]) {
log.Debug("Evicting item from balance/nonce cache", "reason", reason, "key", item.Key, "value", item.Value)
})
go c.cache.Start() // starts automatic expired item deletion
go func() { // starts automatic expired item deletion
defer common.LogOnPanic()
c.cache.Start()
}()
}
//nolint:golint,unused // linter does not detect using it via reflect

View File

@ -4,6 +4,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/event"
"github.com/status-im/status-go/common"
"github.com/status-im/status-go/services/wallet/walletevent"
)
@ -20,6 +21,7 @@ func NewFeedSubscription(feed *event.Feed) *FeedSubscription {
subscription := feed.Subscribe(events)
go func() {
defer common.LogOnPanic()
<-done
subscription.Unsubscribe()
close(events)

21
vendor/github.com/segmentio/asm/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2021 Segment
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

53
vendor/github.com/segmentio/asm/ascii/ascii.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
package ascii
import _ "github.com/segmentio/asm/cpu"
// https://graphics.stanford.edu/~seander/bithacks.html#HasLessInWord
const (
hasLessConstL64 = (^uint64(0)) / 255
hasLessConstR64 = hasLessConstL64 * 128
hasLessConstL32 = (^uint32(0)) / 255
hasLessConstR32 = hasLessConstL32 * 128
hasMoreConstL64 = (^uint64(0)) / 255
hasMoreConstR64 = hasMoreConstL64 * 128
hasMoreConstL32 = (^uint32(0)) / 255
hasMoreConstR32 = hasMoreConstL32 * 128
)
func hasLess64(x, n uint64) bool {
return ((x - (hasLessConstL64 * n)) & ^x & hasLessConstR64) != 0
}
func hasLess32(x, n uint32) bool {
return ((x - (hasLessConstL32 * n)) & ^x & hasLessConstR32) != 0
}
func hasMore64(x, n uint64) bool {
return (((x + (hasMoreConstL64 * (127 - n))) | x) & hasMoreConstR64) != 0
}
func hasMore32(x, n uint32) bool {
return (((x + (hasMoreConstL32 * (127 - n))) | x) & hasMoreConstR32) != 0
}
var lowerCase = [256]byte{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
}

30
vendor/github.com/segmentio/asm/ascii/equal_fold.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
package ascii
import (
"github.com/segmentio/asm/internal/unsafebytes"
)
// EqualFold is a version of bytes.EqualFold designed to work on ASCII input
// instead of UTF-8.
//
// When the program has guarantees that the input is composed of ASCII
// characters only, it allows for greater optimizations.
func EqualFold(a, b []byte) bool {
return EqualFoldString(unsafebytes.String(a), unsafebytes.String(b))
}
func HasPrefixFold(s, prefix []byte) bool {
return len(s) >= len(prefix) && EqualFold(s[:len(prefix)], prefix)
}
func HasSuffixFold(s, suffix []byte) bool {
return len(s) >= len(suffix) && EqualFold(s[len(s)-len(suffix):], suffix)
}
func HasPrefixFoldString(s, prefix string) bool {
return len(s) >= len(prefix) && EqualFoldString(s[:len(prefix)], prefix)
}
func HasSuffixFoldString(s, suffix string) bool {
return len(s) >= len(suffix) && EqualFoldString(s[len(s)-len(suffix):], suffix)
}

View File

@ -0,0 +1,13 @@
// Code generated by command: go run equal_fold_asm.go -pkg ascii -out ../ascii/equal_fold_amd64.s -stubs ../ascii/equal_fold_amd64.go. DO NOT EDIT.
//go:build !purego
// +build !purego
package ascii
// EqualFoldString is a version of strings.EqualFold designed to work on ASCII
// input instead of UTF-8.
//
// When the program has guarantees that the input is composed of ASCII
// characters only, it allows for greater optimizations.
func EqualFoldString(a string, b string) bool

View File

@ -0,0 +1,304 @@
// Code generated by command: go run equal_fold_asm.go -pkg ascii -out ../ascii/equal_fold_amd64.s -stubs ../ascii/equal_fold_amd64.go. DO NOT EDIT.
//go:build !purego
// +build !purego
#include "textflag.h"
// func EqualFoldString(a string, b string) bool
// Requires: AVX, AVX2, SSE4.1
TEXT ·EqualFoldString(SB), NOSPLIT, $0-33
MOVQ a_base+0(FP), CX
MOVQ a_len+8(FP), DX
MOVQ b_base+16(FP), BX
CMPQ DX, b_len+24(FP)
JNE done
XORQ AX, AX
CMPQ DX, $0x10
JB init_x86
BTL $0x08, github·comsegmentioasmcpu·X86+0(SB)
JCS init_avx
init_x86:
LEAQ github·comsegmentioasmascii·lowerCase+0(SB), R9
XORL SI, SI
cmp8:
CMPQ DX, $0x08
JB cmp7
MOVBLZX (CX)(AX*1), DI
MOVBLZX (BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
MOVBLZX 1(CX)(AX*1), DI
MOVBLZX 1(BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
MOVBLZX 2(CX)(AX*1), DI
MOVBLZX 2(BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
MOVBLZX 3(CX)(AX*1), DI
MOVBLZX 3(BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
MOVBLZX 4(CX)(AX*1), DI
MOVBLZX 4(BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
MOVBLZX 5(CX)(AX*1), DI
MOVBLZX 5(BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
MOVBLZX 6(CX)(AX*1), DI
MOVBLZX 6(BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
MOVBLZX 7(CX)(AX*1), DI
MOVBLZX 7(BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
JNE done
ADDQ $0x08, AX
SUBQ $0x08, DX
JMP cmp8
cmp7:
CMPQ DX, $0x07
JB cmp6
MOVBLZX 6(CX)(AX*1), DI
MOVBLZX 6(BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
cmp6:
CMPQ DX, $0x06
JB cmp5
MOVBLZX 5(CX)(AX*1), DI
MOVBLZX 5(BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
cmp5:
CMPQ DX, $0x05
JB cmp4
MOVBLZX 4(CX)(AX*1), DI
MOVBLZX 4(BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
cmp4:
CMPQ DX, $0x04
JB cmp3
MOVBLZX 3(CX)(AX*1), DI
MOVBLZX 3(BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
cmp3:
CMPQ DX, $0x03
JB cmp2
MOVBLZX 2(CX)(AX*1), DI
MOVBLZX 2(BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
cmp2:
CMPQ DX, $0x02
JB cmp1
MOVBLZX 1(CX)(AX*1), DI
MOVBLZX 1(BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
cmp1:
CMPQ DX, $0x01
JB success
MOVBLZX (CX)(AX*1), DI
MOVBLZX (BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
done:
SETEQ ret+32(FP)
RET
success:
MOVB $0x01, ret+32(FP)
RET
init_avx:
MOVB $0x20, SI
PINSRB $0x00, SI, X12
VPBROADCASTB X12, Y12
MOVB $0x1f, SI
PINSRB $0x00, SI, X13
VPBROADCASTB X13, Y13
MOVB $0x9a, SI
PINSRB $0x00, SI, X14
VPBROADCASTB X14, Y14
MOVB $0x01, SI
PINSRB $0x00, SI, X15
VPBROADCASTB X15, Y15
cmp128:
CMPQ DX, $0x80
JB cmp64
VMOVDQU (CX)(AX*1), Y0
VMOVDQU 32(CX)(AX*1), Y1
VMOVDQU 64(CX)(AX*1), Y2
VMOVDQU 96(CX)(AX*1), Y3
VMOVDQU (BX)(AX*1), Y4
VMOVDQU 32(BX)(AX*1), Y5
VMOVDQU 64(BX)(AX*1), Y6
VMOVDQU 96(BX)(AX*1), Y7
VXORPD Y0, Y4, Y4
VPCMPEQB Y12, Y4, Y8
VORPD Y12, Y0, Y0
VPADDB Y13, Y0, Y0
VPCMPGTB Y0, Y14, Y0
VPAND Y8, Y0, Y0
VPAND Y15, Y0, Y0
VPSLLW $0x05, Y0, Y0
VPCMPEQB Y4, Y0, Y0
VXORPD Y1, Y5, Y5
VPCMPEQB Y12, Y5, Y9
VORPD Y12, Y1, Y1
VPADDB Y13, Y1, Y1
VPCMPGTB Y1, Y14, Y1
VPAND Y9, Y1, Y1
VPAND Y15, Y1, Y1
VPSLLW $0x05, Y1, Y1
VPCMPEQB Y5, Y1, Y1
VXORPD Y2, Y6, Y6
VPCMPEQB Y12, Y6, Y10
VORPD Y12, Y2, Y2
VPADDB Y13, Y2, Y2
VPCMPGTB Y2, Y14, Y2
VPAND Y10, Y2, Y2
VPAND Y15, Y2, Y2
VPSLLW $0x05, Y2, Y2
VPCMPEQB Y6, Y2, Y2
VXORPD Y3, Y7, Y7
VPCMPEQB Y12, Y7, Y11
VORPD Y12, Y3, Y3
VPADDB Y13, Y3, Y3
VPCMPGTB Y3, Y14, Y3
VPAND Y11, Y3, Y3
VPAND Y15, Y3, Y3
VPSLLW $0x05, Y3, Y3
VPCMPEQB Y7, Y3, Y3
VPAND Y1, Y0, Y0
VPAND Y3, Y2, Y2
VPAND Y2, Y0, Y0
ADDQ $0x80, AX
SUBQ $0x80, DX
VPMOVMSKB Y0, SI
XORL $0xffffffff, SI
JNE done
JMP cmp128
cmp64:
CMPQ DX, $0x40
JB cmp32
VMOVDQU (CX)(AX*1), Y0
VMOVDQU 32(CX)(AX*1), Y1
VMOVDQU (BX)(AX*1), Y2
VMOVDQU 32(BX)(AX*1), Y3
VXORPD Y0, Y2, Y2
VPCMPEQB Y12, Y2, Y4
VORPD Y12, Y0, Y0
VPADDB Y13, Y0, Y0
VPCMPGTB Y0, Y14, Y0
VPAND Y4, Y0, Y0
VPAND Y15, Y0, Y0
VPSLLW $0x05, Y0, Y0
VPCMPEQB Y2, Y0, Y0
VXORPD Y1, Y3, Y3
VPCMPEQB Y12, Y3, Y5
VORPD Y12, Y1, Y1
VPADDB Y13, Y1, Y1
VPCMPGTB Y1, Y14, Y1
VPAND Y5, Y1, Y1
VPAND Y15, Y1, Y1
VPSLLW $0x05, Y1, Y1
VPCMPEQB Y3, Y1, Y1
VPAND Y1, Y0, Y0
ADDQ $0x40, AX
SUBQ $0x40, DX
VPMOVMSKB Y0, SI
XORL $0xffffffff, SI
JNE done
cmp32:
CMPQ DX, $0x20
JB cmp16
VMOVDQU (CX)(AX*1), Y0
VMOVDQU (BX)(AX*1), Y1
VXORPD Y0, Y1, Y1
VPCMPEQB Y12, Y1, Y2
VORPD Y12, Y0, Y0
VPADDB Y13, Y0, Y0
VPCMPGTB Y0, Y14, Y0
VPAND Y2, Y0, Y0
VPAND Y15, Y0, Y0
VPSLLW $0x05, Y0, Y0
VPCMPEQB Y1, Y0, Y0
ADDQ $0x20, AX
SUBQ $0x20, DX
VPMOVMSKB Y0, SI
XORL $0xffffffff, SI
JNE done
cmp16:
CMPQ DX, $0x10
JLE cmp_tail
VMOVDQU (CX)(AX*1), X0
VMOVDQU (BX)(AX*1), X1
VXORPD X0, X1, X1
VPCMPEQB X12, X1, X2
VORPD X12, X0, X0
VPADDB X13, X0, X0
VPCMPGTB X0, X14, X0
VPAND X2, X0, X0
VPAND X15, X0, X0
VPSLLW $0x05, X0, X0
VPCMPEQB X1, X0, X0
ADDQ $0x10, AX
SUBQ $0x10, DX
VPMOVMSKB X0, SI
XORL $0x0000ffff, SI
JNE done
cmp_tail:
SUBQ $0x10, DX
ADDQ DX, AX
VMOVDQU (CX)(AX*1), X0
VMOVDQU (BX)(AX*1), X1
VXORPD X0, X1, X1
VPCMPEQB X12, X1, X2
VORPD X12, X0, X0
VPADDB X13, X0, X0
VPCMPGTB X0, X14, X0
VPAND X2, X0, X0
VPAND X15, X0, X0
VPSLLW $0x05, X0, X0
VPCMPEQB X1, X0, X0
VPMOVMSKB X0, AX
XORL $0x0000ffff, AX
JMP done

View File

@ -0,0 +1,60 @@
//go:build purego || !amd64
// +build purego !amd64
package ascii
// EqualFoldString is a version of strings.EqualFold designed to work on ASCII
// input instead of UTF-8.
//
// When the program has guarantees that the input is composed of ASCII
// characters only, it allows for greater optimizations.
func EqualFoldString(a, b string) bool {
if len(a) != len(b) {
return false
}
var cmp byte
for len(a) >= 8 {
cmp |= lowerCase[a[0]] ^ lowerCase[b[0]]
cmp |= lowerCase[a[1]] ^ lowerCase[b[1]]
cmp |= lowerCase[a[2]] ^ lowerCase[b[2]]
cmp |= lowerCase[a[3]] ^ lowerCase[b[3]]
cmp |= lowerCase[a[4]] ^ lowerCase[b[4]]
cmp |= lowerCase[a[5]] ^ lowerCase[b[5]]
cmp |= lowerCase[a[6]] ^ lowerCase[b[6]]
cmp |= lowerCase[a[7]] ^ lowerCase[b[7]]
if cmp != 0 {
return false
}
a = a[8:]
b = b[8:]
}
switch len(a) {
case 7:
cmp |= lowerCase[a[6]] ^ lowerCase[b[6]]
fallthrough
case 6:
cmp |= lowerCase[a[5]] ^ lowerCase[b[5]]
fallthrough
case 5:
cmp |= lowerCase[a[4]] ^ lowerCase[b[4]]
fallthrough
case 4:
cmp |= lowerCase[a[3]] ^ lowerCase[b[3]]
fallthrough
case 3:
cmp |= lowerCase[a[2]] ^ lowerCase[b[2]]
fallthrough
case 2:
cmp |= lowerCase[a[1]] ^ lowerCase[b[1]]
fallthrough
case 1:
cmp |= lowerCase[a[0]] ^ lowerCase[b[0]]
}
return cmp == 0
}

18
vendor/github.com/segmentio/asm/ascii/valid.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
package ascii
import "github.com/segmentio/asm/internal/unsafebytes"
// Valid returns true if b contains only ASCII characters.
func Valid(b []byte) bool {
return ValidString(unsafebytes.String(b))
}
// ValidBytes returns true if b is an ASCII character.
func ValidByte(b byte) bool {
return b <= 0x7f
}
// ValidBytes returns true if b is an ASCII character.
func ValidRune(r rune) bool {
return r <= 0x7f
}

9
vendor/github.com/segmentio/asm/ascii/valid_amd64.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
// Code generated by command: go run valid_asm.go -pkg ascii -out ../ascii/valid_amd64.s -stubs ../ascii/valid_amd64.go. DO NOT EDIT.
//go:build !purego
// +build !purego
package ascii
// ValidString returns true if s contains only ASCII characters.
func ValidString(s string) bool

132
vendor/github.com/segmentio/asm/ascii/valid_amd64.s generated vendored Normal file
View File

@ -0,0 +1,132 @@
// Code generated by command: go run valid_asm.go -pkg ascii -out ../ascii/valid_amd64.s -stubs ../ascii/valid_amd64.go. DO NOT EDIT.
//go:build !purego
// +build !purego
#include "textflag.h"
// func ValidString(s string) bool
// Requires: AVX, AVX2, SSE4.1
TEXT ·ValidString(SB), NOSPLIT, $0-17
MOVQ s_base+0(FP), AX
MOVQ s_len+8(FP), CX
MOVQ $0x8080808080808080, DX
CMPQ CX, $0x10
JB cmp8
BTL $0x08, github·comsegmentioasmcpu·X86+0(SB)
JCS init_avx
cmp8:
CMPQ CX, $0x08
JB cmp4
TESTQ DX, (AX)
JNZ invalid
ADDQ $0x08, AX
SUBQ $0x08, CX
JMP cmp8
cmp4:
CMPQ CX, $0x04
JB cmp3
TESTL $0x80808080, (AX)
JNZ invalid
ADDQ $0x04, AX
SUBQ $0x04, CX
cmp3:
CMPQ CX, $0x03
JB cmp2
MOVWLZX (AX), CX
MOVBLZX 2(AX), AX
SHLL $0x10, AX
ORL CX, AX
TESTL $0x80808080, AX
JMP done
cmp2:
CMPQ CX, $0x02
JB cmp1
TESTW $0x8080, (AX)
JMP done
cmp1:
CMPQ CX, $0x00
JE done
TESTB $0x80, (AX)
done:
SETEQ ret+16(FP)
RET
invalid:
MOVB $0x00, ret+16(FP)
RET
init_avx:
PINSRQ $0x00, DX, X4
VPBROADCASTQ X4, Y4
cmp256:
CMPQ CX, $0x00000100
JB cmp128
VMOVDQU (AX), Y0
VPOR 32(AX), Y0, Y0
VMOVDQU 64(AX), Y1
VPOR 96(AX), Y1, Y1
VMOVDQU 128(AX), Y2
VPOR 160(AX), Y2, Y2
VMOVDQU 192(AX), Y3
VPOR 224(AX), Y3, Y3
VPOR Y1, Y0, Y0
VPOR Y3, Y2, Y2
VPOR Y2, Y0, Y0
VPTEST Y0, Y4
JNZ invalid
ADDQ $0x00000100, AX
SUBQ $0x00000100, CX
JMP cmp256
cmp128:
CMPQ CX, $0x80
JB cmp64
VMOVDQU (AX), Y0
VPOR 32(AX), Y0, Y0
VMOVDQU 64(AX), Y1
VPOR 96(AX), Y1, Y1
VPOR Y1, Y0, Y0
VPTEST Y0, Y4
JNZ invalid
ADDQ $0x80, AX
SUBQ $0x80, CX
cmp64:
CMPQ CX, $0x40
JB cmp32
VMOVDQU (AX), Y0
VPOR 32(AX), Y0, Y0
VPTEST Y0, Y4
JNZ invalid
ADDQ $0x40, AX
SUBQ $0x40, CX
cmp32:
CMPQ CX, $0x20
JB cmp16
VPTEST (AX), Y4
JNZ invalid
ADDQ $0x20, AX
SUBQ $0x20, CX
cmp16:
CMPQ CX, $0x10
JLE cmp_tail
VPTEST (AX), X4
JNZ invalid
ADDQ $0x10, AX
SUBQ $0x10, CX
cmp_tail:
SUBQ $0x10, CX
ADDQ CX, AX
VPTEST (AX), X4
JMP done

48
vendor/github.com/segmentio/asm/ascii/valid_default.go generated vendored Normal file
View File

@ -0,0 +1,48 @@
//go:build purego || !amd64
// +build purego !amd64
package ascii
import (
"unsafe"
)
// ValidString returns true if s contains only ASCII characters.
func ValidString(s string) bool {
p := *(*unsafe.Pointer)(unsafe.Pointer(&s))
i := uintptr(0)
n := uintptr(len(s))
for i+8 <= n {
if (*(*uint64)(unsafe.Pointer(uintptr(p) + i)) & 0x8080808080808080) != 0 {
return false
}
i += 8
}
if i+4 <= n {
if (*(*uint32)(unsafe.Pointer(uintptr(p) + i)) & 0x80808080) != 0 {
return false
}
i += 4
}
if i == n {
return true
}
p = unsafe.Pointer(uintptr(p) + i)
var x uint32
switch n - i {
case 3:
x = uint32(*(*uint16)(p)) | uint32(*(*uint8)(unsafe.Pointer(uintptr(p) + 2)))<<16
case 2:
x = uint32(*(*uint16)(p))
case 1:
x = uint32(*(*uint8)(p))
default:
return true
}
return (x & 0x80808080) == 0
}

18
vendor/github.com/segmentio/asm/ascii/valid_print.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
package ascii
import "github.com/segmentio/asm/internal/unsafebytes"
// ValidPrint returns true if b contains only printable ASCII characters.
func ValidPrint(b []byte) bool {
return ValidPrintString(unsafebytes.String(b))
}
// ValidPrintBytes returns true if b is an ASCII character.
func ValidPrintByte(b byte) bool {
return 0x20 <= b && b <= 0x7e
}
// ValidPrintBytes returns true if b is an ASCII character.
func ValidPrintRune(r rune) bool {
return 0x20 <= r && r <= 0x7e
}

View File

@ -0,0 +1,9 @@
// Code generated by command: go run valid_print_asm.go -pkg ascii -out ../ascii/valid_print_amd64.s -stubs ../ascii/valid_print_amd64.go. DO NOT EDIT.
//go:build !purego
// +build !purego
package ascii
// ValidPrintString returns true if s contains only printable ASCII characters.
func ValidPrintString(s string) bool

View File

@ -0,0 +1,185 @@
// Code generated by command: go run valid_print_asm.go -pkg ascii -out ../ascii/valid_print_amd64.s -stubs ../ascii/valid_print_amd64.go. DO NOT EDIT.
//go:build !purego
// +build !purego
#include "textflag.h"
// func ValidPrintString(s string) bool
// Requires: AVX, AVX2, SSE4.1
TEXT ·ValidPrintString(SB), NOSPLIT, $0-17
MOVQ s_base+0(FP), AX
MOVQ s_len+8(FP), CX
CMPQ CX, $0x10
JB init_x86
BTL $0x08, github·comsegmentioasmcpu·X86+0(SB)
JCS init_avx
init_x86:
CMPQ CX, $0x08
JB cmp4
MOVQ $0xdfdfdfdfdfdfdfe0, DX
MOVQ $0x0101010101010101, BX
MOVQ $0x8080808080808080, SI
cmp8:
MOVQ (AX), DI
MOVQ DI, R8
LEAQ (DI)(DX*1), R9
NOTQ R8
ANDQ R8, R9
LEAQ (DI)(BX*1), R8
ORQ R8, DI
ORQ R9, DI
ADDQ $0x08, AX
SUBQ $0x08, CX
TESTQ SI, DI
JNE done
CMPQ CX, $0x08
JB cmp4
JMP cmp8
cmp4:
CMPQ CX, $0x04
JB cmp3
MOVL (AX), DX
MOVL DX, BX
LEAL 3755991008(DX), SI
NOTL BX
ANDL BX, SI
LEAL 16843009(DX), BX
ORL BX, DX
ORL SI, DX
ADDQ $0x04, AX
SUBQ $0x04, CX
TESTL $0x80808080, DX
JNE done
cmp3:
CMPQ CX, $0x03
JB cmp2
MOVWLZX (AX), DX
MOVBLZX 2(AX), AX
SHLL $0x10, AX
ORL DX, AX
ORL $0x20000000, AX
JMP final
cmp2:
CMPQ CX, $0x02
JB cmp1
MOVWLZX (AX), AX
ORL $0x20200000, AX
JMP final
cmp1:
CMPQ CX, $0x00
JE done
MOVBLZX (AX), AX
ORL $0x20202000, AX
final:
MOVL AX, CX
LEAL 3755991008(AX), DX
NOTL CX
ANDL CX, DX
LEAL 16843009(AX), CX
ORL CX, AX
ORL DX, AX
TESTL $0x80808080, AX
done:
SETEQ ret+16(FP)
RET
init_avx:
MOVB $0x1f, DL
PINSRB $0x00, DX, X8
VPBROADCASTB X8, Y8
MOVB $0x7e, DL
PINSRB $0x00, DX, X9
VPBROADCASTB X9, Y9
cmp128:
CMPQ CX, $0x80
JB cmp64
VMOVDQU (AX), Y0
VMOVDQU 32(AX), Y1
VMOVDQU 64(AX), Y2
VMOVDQU 96(AX), Y3
VPCMPGTB Y8, Y0, Y4
VPCMPGTB Y9, Y0, Y0
VPANDN Y4, Y0, Y0
VPCMPGTB Y8, Y1, Y5
VPCMPGTB Y9, Y1, Y1
VPANDN Y5, Y1, Y1
VPCMPGTB Y8, Y2, Y6
VPCMPGTB Y9, Y2, Y2
VPANDN Y6, Y2, Y2
VPCMPGTB Y8, Y3, Y7
VPCMPGTB Y9, Y3, Y3
VPANDN Y7, Y3, Y3
VPAND Y1, Y0, Y0
VPAND Y3, Y2, Y2
VPAND Y2, Y0, Y0
ADDQ $0x80, AX
SUBQ $0x80, CX
VPMOVMSKB Y0, DX
XORL $0xffffffff, DX
JNE done
JMP cmp128
cmp64:
CMPQ CX, $0x40
JB cmp32
VMOVDQU (AX), Y0
VMOVDQU 32(AX), Y1
VPCMPGTB Y8, Y0, Y2
VPCMPGTB Y9, Y0, Y0
VPANDN Y2, Y0, Y0
VPCMPGTB Y8, Y1, Y3
VPCMPGTB Y9, Y1, Y1
VPANDN Y3, Y1, Y1
VPAND Y1, Y0, Y0
ADDQ $0x40, AX
SUBQ $0x40, CX
VPMOVMSKB Y0, DX
XORL $0xffffffff, DX
JNE done
cmp32:
CMPQ CX, $0x20
JB cmp16
VMOVDQU (AX), Y0
VPCMPGTB Y8, Y0, Y1
VPCMPGTB Y9, Y0, Y0
VPANDN Y1, Y0, Y0
ADDQ $0x20, AX
SUBQ $0x20, CX
VPMOVMSKB Y0, DX
XORL $0xffffffff, DX
JNE done
cmp16:
CMPQ CX, $0x10
JLE cmp_tail
VMOVDQU (AX), X0
VPCMPGTB X8, X0, X1
VPCMPGTB X9, X0, X0
VPANDN X1, X0, X0
ADDQ $0x10, AX
SUBQ $0x10, CX
VPMOVMSKB X0, DX
XORL $0x0000ffff, DX
JNE done
cmp_tail:
SUBQ $0x10, CX
ADDQ CX, AX
VMOVDQU (AX), X0
VPCMPGTB X8, X0, X1
VPCMPGTB X9, X0, X0
VPANDN X1, X0, X0
VPMOVMSKB X0, DX
XORL $0x0000ffff, DX
JMP done

View File

@ -0,0 +1,46 @@
//go:build purego || !amd64
// +build purego !amd64
package ascii
import "unsafe"
// ValidString returns true if s contains only printable ASCII characters.
func ValidPrintString(s string) bool {
p := *(*unsafe.Pointer)(unsafe.Pointer(&s))
i := uintptr(0)
n := uintptr(len(s))
for i+8 <= n {
if hasLess64(*(*uint64)(unsafe.Pointer(uintptr(p) + i)), 0x20) || hasMore64(*(*uint64)(unsafe.Pointer(uintptr(p) + i)), 0x7e) {
return false
}
i += 8
}
if i+4 <= n {
if hasLess32(*(*uint32)(unsafe.Pointer(uintptr(p) + i)), 0x20) || hasMore32(*(*uint32)(unsafe.Pointer(uintptr(p) + i)), 0x7e) {
return false
}
i += 4
}
if i == n {
return true
}
p = unsafe.Pointer(uintptr(p) + i)
var x uint32
switch n - i {
case 3:
x = 0x20000000 | uint32(*(*uint16)(p)) | uint32(*(*uint8)(unsafe.Pointer(uintptr(p) + 2)))<<16
case 2:
x = 0x20200000 | uint32(*(*uint16)(p))
case 1:
x = 0x20202000 | uint32(*(*uint8)(p))
default:
return true
}
return !(hasLess32(x, 0x20) || hasMore32(x, 0x7e))
}

67
vendor/github.com/segmentio/asm/base64/base64.go generated vendored Normal file
View File

@ -0,0 +1,67 @@
package base64
import (
"encoding/base64"
)
const (
StdPadding rune = base64.StdPadding
NoPadding rune = base64.NoPadding
encodeStd = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
encodeURL = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_"
encodeIMAP = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,"
letterRange = int8('Z' - 'A' + 1)
)
// StdEncoding is the standard base64 encoding, as defined in RFC 4648.
var StdEncoding = NewEncoding(encodeStd)
// URLEncoding is the alternate base64 encoding defined in RFC 4648.
// It is typically used in URLs and file names.
var URLEncoding = NewEncoding(encodeURL)
// RawStdEncoding is the standard unpadded base64 encoding defined in RFC 4648 section 3.2.
// This is the same as StdEncoding but omits padding characters.
var RawStdEncoding = StdEncoding.WithPadding(NoPadding)
// RawURLEncoding is the unpadded alternate base64 encoding defined in RFC 4648.
// This is the same as URLEncoding but omits padding characters.
var RawURLEncoding = URLEncoding.WithPadding(NoPadding)
// NewEncoding returns a new padded Encoding defined by the given alphabet,
// which must be a 64-byte string that does not contain the padding character
// or CR / LF ('\r', '\n'). Unlike the standard library, the encoding alphabet
// cannot be abitrary, and it must follow one of the know standard encoding
// variants.
//
// Required alphabet values:
// * [0,26): characters 'A'..'Z'
// * [26,52): characters 'a'..'z'
// * [52,62): characters '0'..'9'
// Flexible alphabet value options:
// * RFC 4648, RFC 1421, RFC 2045, RFC 2152, RFC 4880: '+' and '/'
// * RFC 4648 URI: '-' and '_'
// * RFC 3501: '+' and ','
//
// The resulting Encoding uses the default padding character ('='), which may
// be changed or disabled via WithPadding. The padding characters is urestricted,
// but it must be a character outside of the encoder alphabet.
func NewEncoding(encoder string) *Encoding {
if len(encoder) != 64 {
panic("encoding alphabet is not 64-bytes long")
}
if _, ok := allowedEncoding[encoder]; !ok {
panic("non-standard encoding alphabets are not supported")
}
return newEncoding(encoder)
}
var allowedEncoding = map[string]struct{}{
encodeStd: {},
encodeURL: {},
encodeIMAP: {},
}

160
vendor/github.com/segmentio/asm/base64/base64_amd64.go generated vendored Normal file
View File

@ -0,0 +1,160 @@
//go:build amd64 && !purego
// +build amd64,!purego
package base64
import (
"encoding/base64"
"github.com/segmentio/asm/cpu"
"github.com/segmentio/asm/cpu/x86"
"github.com/segmentio/asm/internal/unsafebytes"
)
// An Encoding is a radix 64 encoding/decoding scheme, defined by a
// 64-character alphabet.
type Encoding struct {
enc func(dst []byte, src []byte, lut *int8) (int, int)
enclut [32]int8
dec func(dst []byte, src []byte, lut *int8) (int, int)
declut [48]int8
base *base64.Encoding
}
const (
minEncodeLen = 28
minDecodeLen = 45
)
func newEncoding(encoder string) *Encoding {
e := &Encoding{base: base64.NewEncoding(encoder)}
if cpu.X86.Has(x86.AVX2) {
e.enableEncodeAVX2(encoder)
e.enableDecodeAVX2(encoder)
}
return e
}
func (e *Encoding) enableEncodeAVX2(encoder string) {
// Translate values 0..63 to the Base64 alphabet. There are five sets:
//
// From To Add Index Example
// [0..25] [65..90] +65 0 ABCDEFGHIJKLMNOPQRSTUVWXYZ
// [26..51] [97..122] +71 1 abcdefghijklmnopqrstuvwxyz
// [52..61] [48..57] -4 [2..11] 0123456789
// [62] [43] -19 12 +
// [63] [47] -16 13 /
tab := [32]int8{int8(encoder[0]), int8(encoder[letterRange]) - letterRange}
for i, ch := range encoder[2*letterRange:] {
tab[2+i] = int8(ch) - 2*letterRange - int8(i)
}
e.enc = encodeAVX2
e.enclut = tab
}
func (e *Encoding) enableDecodeAVX2(encoder string) {
c62, c63 := int8(encoder[62]), int8(encoder[63])
url := c63 == '_'
if url {
c63 = '/'
}
// Translate values from the Base64 alphabet using five sets. Values outside
// of these ranges are considered invalid:
//
// From To Add Index Example
// [47] [63] +16 1 /
// [43] [62] +19 2 +
// [48..57] [52..61] +4 3 0123456789
// [65..90] [0..25] -65 4,5 ABCDEFGHIJKLMNOPQRSTUVWXYZ
// [97..122] [26..51] -71 6,7 abcdefghijklmnopqrstuvwxyz
tab := [48]int8{
0, 63 - c63, 62 - c62, 4, -65, -65, -71, -71,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x15, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
0x11, 0x11, 0x13, 0x1B, 0x1B, 0x1B, 0x1B, 0x1B,
}
tab[(c62&15)+16] = 0x1A
tab[(c63&15)+16] = 0x1A
if url {
e.dec = decodeAVX2URI
} else {
e.dec = decodeAVX2
}
e.declut = tab
}
// WithPadding creates a duplicate Encoding updated with a specified padding
// character, or NoPadding to disable padding. The padding character must not
// be contained in the encoding alphabet, must not be '\r' or '\n', and must
// be no greater than '\xFF'.
func (enc Encoding) WithPadding(padding rune) *Encoding {
enc.base = enc.base.WithPadding(padding)
return &enc
}
// Strict creates a duplicate encoding updated with strict decoding enabled.
// This requires that trailing padding bits are zero.
func (enc Encoding) Strict() *Encoding {
enc.base = enc.base.Strict()
return &enc
}
// Encode encodes src using the defined encoding alphabet.
// This will write EncodedLen(len(src)) bytes to dst.
func (enc *Encoding) Encode(dst, src []byte) {
if len(src) >= minEncodeLen && enc.enc != nil {
d, s := enc.enc(dst, src, &enc.enclut[0])
dst = dst[d:]
src = src[s:]
}
enc.base.Encode(dst, src)
}
// Encode encodes src using the encoding enc, writing
// EncodedLen(len(src)) bytes to dst.
func (enc *Encoding) EncodeToString(src []byte) string {
buf := make([]byte, enc.base.EncodedLen(len(src)))
enc.Encode(buf, src)
return string(buf)
}
// EncodedLen calculates the base64-encoded byte length for a message
// of length n.
func (enc *Encoding) EncodedLen(n int) int {
return enc.base.EncodedLen(n)
}
// Decode decodes src using the defined encoding alphabet.
// This will write DecodedLen(len(src)) bytes to dst and return the number of
// bytes written.
func (enc *Encoding) Decode(dst, src []byte) (n int, err error) {
var d, s int
if len(src) >= minDecodeLen && enc.dec != nil {
d, s = enc.dec(dst, src, &enc.declut[0])
dst = dst[d:]
src = src[s:]
}
n, err = enc.base.Decode(dst, src)
n += d
return
}
// DecodeString decodes the base64 encoded string s, returns the decoded
// value as bytes.
func (enc *Encoding) DecodeString(s string) ([]byte, error) {
src := unsafebytes.BytesOf(s)
dst := make([]byte, enc.base.DecodedLen(len(s)))
n, err := enc.Decode(dst, src)
return dst[:n], err
}
// DecodedLen calculates the decoded byte length for a base64-encoded message
// of length n.
func (enc *Encoding) DecodedLen(n int) int {
return enc.base.DecodedLen(n)
}

View File

@ -0,0 +1,14 @@
//go:build purego || !amd64
// +build purego !amd64
package base64
import "encoding/base64"
// An Encoding is a radix 64 encoding/decoding scheme, defined by a
// 64-character alphabet.
type Encoding = base64.Encoding
func newEncoding(encoder string) *Encoding {
return base64.NewEncoding(encoder)
}

10
vendor/github.com/segmentio/asm/base64/decode_amd64.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
// Code generated by command: go run decode_asm.go -pkg base64 -out ../base64/decode_amd64.s -stubs ../base64/decode_amd64.go. DO NOT EDIT.
//go:build !purego
// +build !purego
package base64
func decodeAVX2(dst []byte, src []byte, lut *int8) (int, int)
func decodeAVX2URI(dst []byte, src []byte, lut *int8) (int, int)

144
vendor/github.com/segmentio/asm/base64/decode_amd64.s generated vendored Normal file
View File

@ -0,0 +1,144 @@
// Code generated by command: go run decode_asm.go -pkg base64 -out ../base64/decode_amd64.s -stubs ../base64/decode_amd64.go. DO NOT EDIT.
//go:build !purego
// +build !purego
#include "textflag.h"
DATA b64_dec_lut_hi<>+0(SB)/8, $0x0804080402011010
DATA b64_dec_lut_hi<>+8(SB)/8, $0x1010101010101010
DATA b64_dec_lut_hi<>+16(SB)/8, $0x0804080402011010
DATA b64_dec_lut_hi<>+24(SB)/8, $0x1010101010101010
GLOBL b64_dec_lut_hi<>(SB), RODATA|NOPTR, $32
DATA b64_dec_madd1<>+0(SB)/8, $0x0140014001400140
DATA b64_dec_madd1<>+8(SB)/8, $0x0140014001400140
DATA b64_dec_madd1<>+16(SB)/8, $0x0140014001400140
DATA b64_dec_madd1<>+24(SB)/8, $0x0140014001400140
GLOBL b64_dec_madd1<>(SB), RODATA|NOPTR, $32
DATA b64_dec_madd2<>+0(SB)/8, $0x0001100000011000
DATA b64_dec_madd2<>+8(SB)/8, $0x0001100000011000
DATA b64_dec_madd2<>+16(SB)/8, $0x0001100000011000
DATA b64_dec_madd2<>+24(SB)/8, $0x0001100000011000
GLOBL b64_dec_madd2<>(SB), RODATA|NOPTR, $32
DATA b64_dec_shuf_lo<>+0(SB)/8, $0x0000000000000000
DATA b64_dec_shuf_lo<>+8(SB)/8, $0x0600010200000000
GLOBL b64_dec_shuf_lo<>(SB), RODATA|NOPTR, $16
DATA b64_dec_shuf<>+0(SB)/8, $0x090a040506000102
DATA b64_dec_shuf<>+8(SB)/8, $0x000000000c0d0e08
DATA b64_dec_shuf<>+16(SB)/8, $0x0c0d0e08090a0405
DATA b64_dec_shuf<>+24(SB)/8, $0x0000000000000000
GLOBL b64_dec_shuf<>(SB), RODATA|NOPTR, $32
// func decodeAVX2(dst []byte, src []byte, lut *int8) (int, int)
// Requires: AVX, AVX2, SSE4.1
TEXT ·decodeAVX2(SB), NOSPLIT, $0-72
MOVQ dst_base+0(FP), AX
MOVQ src_base+24(FP), DX
MOVQ lut+48(FP), SI
MOVQ src_len+32(FP), DI
MOVB $0x2f, CL
PINSRB $0x00, CX, X8
VPBROADCASTB X8, Y8
XORQ CX, CX
XORQ BX, BX
VPXOR Y7, Y7, Y7
VPERMQ $0x44, (SI), Y6
VPERMQ $0x44, 16(SI), Y4
VMOVDQA b64_dec_lut_hi<>+0(SB), Y5
loop:
VMOVDQU (DX)(BX*1), Y0
VPSRLD $0x04, Y0, Y2
VPAND Y8, Y0, Y3
VPSHUFB Y3, Y4, Y3
VPAND Y8, Y2, Y2
VPSHUFB Y2, Y5, Y9
VPTEST Y9, Y3
JNE done
VPCMPEQB Y8, Y0, Y3
VPADDB Y3, Y2, Y2
VPSHUFB Y2, Y6, Y2
VPADDB Y0, Y2, Y0
VPMADDUBSW b64_dec_madd1<>+0(SB), Y0, Y0
VPMADDWD b64_dec_madd2<>+0(SB), Y0, Y0
VEXTRACTI128 $0x01, Y0, X1
VPSHUFB b64_dec_shuf_lo<>+0(SB), X1, X1
VPSHUFB b64_dec_shuf<>+0(SB), Y0, Y0
VPBLENDD $0x08, Y1, Y0, Y1
VPBLENDD $0xc0, Y7, Y1, Y1
VMOVDQU Y1, (AX)(CX*1)
ADDQ $0x18, CX
ADDQ $0x20, BX
SUBQ $0x20, DI
CMPQ DI, $0x2d
JB done
JMP loop
done:
MOVQ CX, ret+56(FP)
MOVQ BX, ret1+64(FP)
VZEROUPPER
RET
// func decodeAVX2URI(dst []byte, src []byte, lut *int8) (int, int)
// Requires: AVX, AVX2, SSE4.1
TEXT ·decodeAVX2URI(SB), NOSPLIT, $0-72
MOVB $0x2f, AL
PINSRB $0x00, AX, X0
VPBROADCASTB X0, Y0
MOVB $0x5f, AL
PINSRB $0x00, AX, X1
VPBROADCASTB X1, Y1
MOVQ dst_base+0(FP), AX
MOVQ src_base+24(FP), DX
MOVQ lut+48(FP), SI
MOVQ src_len+32(FP), DI
MOVB $0x2f, CL
PINSRB $0x00, CX, X10
VPBROADCASTB X10, Y10
XORQ CX, CX
XORQ BX, BX
VPXOR Y9, Y9, Y9
VPERMQ $0x44, (SI), Y8
VPERMQ $0x44, 16(SI), Y6
VMOVDQA b64_dec_lut_hi<>+0(SB), Y7
loop:
VMOVDQU (DX)(BX*1), Y2
VPCMPEQB Y2, Y1, Y4
VPBLENDVB Y4, Y0, Y2, Y2
VPSRLD $0x04, Y2, Y4
VPAND Y10, Y2, Y5
VPSHUFB Y5, Y6, Y5
VPAND Y10, Y4, Y4
VPSHUFB Y4, Y7, Y11
VPTEST Y11, Y5
JNE done
VPCMPEQB Y10, Y2, Y5
VPADDB Y5, Y4, Y4
VPSHUFB Y4, Y8, Y4
VPADDB Y2, Y4, Y2
VPMADDUBSW b64_dec_madd1<>+0(SB), Y2, Y2
VPMADDWD b64_dec_madd2<>+0(SB), Y2, Y2
VEXTRACTI128 $0x01, Y2, X3
VPSHUFB b64_dec_shuf_lo<>+0(SB), X3, X3
VPSHUFB b64_dec_shuf<>+0(SB), Y2, Y2
VPBLENDD $0x08, Y3, Y2, Y3
VPBLENDD $0xc0, Y9, Y3, Y3
VMOVDQU Y3, (AX)(CX*1)
ADDQ $0x18, CX
ADDQ $0x20, BX
SUBQ $0x20, DI
CMPQ DI, $0x2d
JB done
JMP loop
done:
MOVQ CX, ret+56(FP)
MOVQ BX, ret1+64(FP)
VZEROUPPER
RET

View File

@ -0,0 +1,8 @@
// Code generated by command: go run encode_asm.go -pkg base64 -out ../base64/encode_amd64.s -stubs ../base64/encode_amd64.go. DO NOT EDIT.
//go:build !purego
// +build !purego
package base64
func encodeAVX2(dst []byte, src []byte, lut *int8) (int, int)

88
vendor/github.com/segmentio/asm/base64/encode_amd64.s generated vendored Normal file
View File

@ -0,0 +1,88 @@
// Code generated by command: go run encode_asm.go -pkg base64 -out ../base64/encode_amd64.s -stubs ../base64/encode_amd64.go. DO NOT EDIT.
//go:build !purego
// +build !purego
#include "textflag.h"
// func encodeAVX2(dst []byte, src []byte, lut *int8) (int, int)
// Requires: AVX, AVX2, SSE4.1
TEXT ·encodeAVX2(SB), NOSPLIT, $0-72
MOVQ dst_base+0(FP), AX
MOVQ src_base+24(FP), DX
MOVQ lut+48(FP), SI
MOVQ src_len+32(FP), DI
MOVB $0x33, CL
PINSRB $0x00, CX, X4
VPBROADCASTB X4, Y4
MOVB $0x19, CL
PINSRB $0x00, CX, X5
VPBROADCASTB X5, Y5
XORQ CX, CX
XORQ BX, BX
// Load the 16-byte LUT into both lanes of the register
VPERMQ $0x44, (SI), Y3
// Load the first block using a mask to avoid potential fault
VMOVDQU b64_enc_load<>+0(SB), Y0
VPMASKMOVD -4(DX)(BX*1), Y0, Y0
loop:
VPSHUFB b64_enc_shuf<>+0(SB), Y0, Y0
VPAND b64_enc_mask1<>+0(SB), Y0, Y1
VPSLLW $0x08, Y1, Y2
VPSLLW $0x04, Y1, Y1
VPBLENDW $0xaa, Y2, Y1, Y2
VPAND b64_enc_mask2<>+0(SB), Y0, Y1
VPMULHUW b64_enc_mult<>+0(SB), Y1, Y0
VPOR Y0, Y2, Y0
VPSUBUSB Y4, Y0, Y1
VPCMPGTB Y5, Y0, Y2
VPSUBB Y2, Y1, Y1
VPSHUFB Y1, Y3, Y1
VPADDB Y0, Y1, Y0
VMOVDQU Y0, (AX)(CX*1)
ADDQ $0x20, CX
ADDQ $0x18, BX
SUBQ $0x18, DI
CMPQ DI, $0x20
JB done
VMOVDQU -4(DX)(BX*1), Y0
JMP loop
done:
MOVQ CX, ret+56(FP)
MOVQ BX, ret1+64(FP)
VZEROUPPER
RET
DATA b64_enc_load<>+0(SB)/8, $0x8000000000000000
DATA b64_enc_load<>+8(SB)/8, $0x8000000080000000
DATA b64_enc_load<>+16(SB)/8, $0x8000000080000000
DATA b64_enc_load<>+24(SB)/8, $0x8000000080000000
GLOBL b64_enc_load<>(SB), RODATA|NOPTR, $32
DATA b64_enc_shuf<>+0(SB)/8, $0x0809070805060405
DATA b64_enc_shuf<>+8(SB)/8, $0x0e0f0d0e0b0c0a0b
DATA b64_enc_shuf<>+16(SB)/8, $0x0405030401020001
DATA b64_enc_shuf<>+24(SB)/8, $0x0a0b090a07080607
GLOBL b64_enc_shuf<>(SB), RODATA|NOPTR, $32
DATA b64_enc_mask1<>+0(SB)/8, $0x003f03f0003f03f0
DATA b64_enc_mask1<>+8(SB)/8, $0x003f03f0003f03f0
DATA b64_enc_mask1<>+16(SB)/8, $0x003f03f0003f03f0
DATA b64_enc_mask1<>+24(SB)/8, $0x003f03f0003f03f0
GLOBL b64_enc_mask1<>(SB), RODATA|NOPTR, $32
DATA b64_enc_mask2<>+0(SB)/8, $0x0fc0fc000fc0fc00
DATA b64_enc_mask2<>+8(SB)/8, $0x0fc0fc000fc0fc00
DATA b64_enc_mask2<>+16(SB)/8, $0x0fc0fc000fc0fc00
DATA b64_enc_mask2<>+24(SB)/8, $0x0fc0fc000fc0fc00
GLOBL b64_enc_mask2<>(SB), RODATA|NOPTR, $32
DATA b64_enc_mult<>+0(SB)/8, $0x0400004004000040
DATA b64_enc_mult<>+8(SB)/8, $0x0400004004000040
DATA b64_enc_mult<>+16(SB)/8, $0x0400004004000040
DATA b64_enc_mult<>+24(SB)/8, $0x0400004004000040
GLOBL b64_enc_mult<>(SB), RODATA|NOPTR, $32

80
vendor/github.com/segmentio/asm/cpu/arm/arm.go generated vendored Normal file
View File

@ -0,0 +1,80 @@
package arm
import (
"github.com/segmentio/asm/cpu/cpuid"
. "golang.org/x/sys/cpu"
)
type CPU cpuid.CPU
func (cpu CPU) Has(feature Feature) bool {
return cpuid.CPU(cpu).Has(cpuid.Feature(feature))
}
func (cpu *CPU) set(feature Feature, enable bool) {
(*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable)
}
type Feature cpuid.Feature
const (
SWP Feature = 1 << iota // SWP instruction support
HALF // Half-word load and store support
THUMB // ARM Thumb instruction set
BIT26 // Address space limited to 26-bits
FASTMUL // 32-bit operand, 64-bit result multiplication support
FPA // Floating point arithmetic support
VFP // Vector floating point support
EDSP // DSP Extensions support
JAVA // Java instruction set
IWMMXT // Intel Wireless MMX technology support
CRUNCH // MaverickCrunch context switching and handling
THUMBEE // Thumb EE instruction set
NEON // NEON instruction set
VFPv3 // Vector floating point version 3 support
VFPv3D16 // Vector floating point version 3 D8-D15
TLS // Thread local storage support
VFPv4 // Vector floating point version 4 support
IDIVA // Integer divide instruction support in ARM mode
IDIVT // Integer divide instruction support in Thumb mode
VFPD32 // Vector floating point version 3 D15-D31
LPAE // Large Physical Address Extensions
EVTSTRM // Event stream support
AES // AES hardware implementation
PMULL // Polynomial multiplication instruction set
SHA1 // SHA1 hardware implementation
SHA2 // SHA2 hardware implementation
CRC32 // CRC32 hardware implementation
)
func ABI() CPU {
cpu := CPU(0)
cpu.set(SWP, ARM.HasSWP)
cpu.set(HALF, ARM.HasHALF)
cpu.set(THUMB, ARM.HasTHUMB)
cpu.set(BIT26, ARM.Has26BIT)
cpu.set(FASTMUL, ARM.HasFASTMUL)
cpu.set(FPA, ARM.HasFPA)
cpu.set(VFP, ARM.HasVFP)
cpu.set(EDSP, ARM.HasEDSP)
cpu.set(JAVA, ARM.HasJAVA)
cpu.set(IWMMXT, ARM.HasIWMMXT)
cpu.set(CRUNCH, ARM.HasCRUNCH)
cpu.set(THUMBEE, ARM.HasTHUMBEE)
cpu.set(NEON, ARM.HasNEON)
cpu.set(VFPv3, ARM.HasVFPv3)
cpu.set(VFPv3D16, ARM.HasVFPv3D16)
cpu.set(TLS, ARM.HasTLS)
cpu.set(VFPv4, ARM.HasVFPv4)
cpu.set(IDIVA, ARM.HasIDIVA)
cpu.set(IDIVT, ARM.HasIDIVT)
cpu.set(VFPD32, ARM.HasVFPD32)
cpu.set(LPAE, ARM.HasLPAE)
cpu.set(EVTSTRM, ARM.HasEVTSTRM)
cpu.set(AES, ARM.HasAES)
cpu.set(PMULL, ARM.HasPMULL)
cpu.set(SHA1, ARM.HasSHA1)
cpu.set(SHA2, ARM.HasSHA2)
cpu.set(CRC32, ARM.HasCRC32)
return cpu
}

74
vendor/github.com/segmentio/asm/cpu/arm64/arm64.go generated vendored Normal file
View File

@ -0,0 +1,74 @@
package arm64
import (
"github.com/segmentio/asm/cpu/cpuid"
. "golang.org/x/sys/cpu"
)
type CPU cpuid.CPU
func (cpu CPU) Has(feature Feature) bool {
return cpuid.CPU(cpu).Has(cpuid.Feature(feature))
}
func (cpu *CPU) set(feature Feature, enable bool) {
(*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable)
}
type Feature cpuid.Feature
const (
FP Feature = 1 << iota // Floating-point instruction set (always available)
ASIMD // Advanced SIMD (always available)
EVTSTRM // Event stream support
AES // AES hardware implementation
PMULL // Polynomial multiplication instruction set
SHA1 // SHA1 hardware implementation
SHA2 // SHA2 hardware implementation
CRC32 // CRC32 hardware implementation
ATOMICS // Atomic memory operation instruction set
FPHP // Half precision floating-point instruction set
ASIMDHP // Advanced SIMD half precision instruction set
CPUID // CPUID identification scheme registers
ASIMDRDM // Rounding double multiply add/subtract instruction set
JSCVT // Javascript conversion from floating-point to integer
FCMA // Floating-point multiplication and addition of complex numbers
LRCPC // Release Consistent processor consistent support
DCPOP // Persistent memory support
SHA3 // SHA3 hardware implementation
SM3 // SM3 hardware implementation
SM4 // SM4 hardware implementation
ASIMDDP // Advanced SIMD double precision instruction set
SHA512 // SHA512 hardware implementation
SVE // Scalable Vector Extensions
ASIMDFHM // Advanced SIMD multiplication FP16 to FP32
)
func ABI() CPU {
cpu := CPU(0)
cpu.set(FP, ARM64.HasFP)
cpu.set(ASIMD, ARM64.HasASIMD)
cpu.set(EVTSTRM, ARM64.HasEVTSTRM)
cpu.set(AES, ARM64.HasAES)
cpu.set(PMULL, ARM64.HasPMULL)
cpu.set(SHA1, ARM64.HasSHA1)
cpu.set(SHA2, ARM64.HasSHA2)
cpu.set(CRC32, ARM64.HasCRC32)
cpu.set(ATOMICS, ARM64.HasATOMICS)
cpu.set(FPHP, ARM64.HasFPHP)
cpu.set(ASIMDHP, ARM64.HasASIMDHP)
cpu.set(CPUID, ARM64.HasCPUID)
cpu.set(ASIMDRDM, ARM64.HasASIMDRDM)
cpu.set(JSCVT, ARM64.HasJSCVT)
cpu.set(FCMA, ARM64.HasFCMA)
cpu.set(LRCPC, ARM64.HasLRCPC)
cpu.set(DCPOP, ARM64.HasDCPOP)
cpu.set(SHA3, ARM64.HasSHA3)
cpu.set(SM3, ARM64.HasSM3)
cpu.set(SM4, ARM64.HasSM4)
cpu.set(ASIMDDP, ARM64.HasASIMDDP)
cpu.set(SHA512, ARM64.HasSHA512)
cpu.set(SVE, ARM64.HasSVE)
cpu.set(ASIMDFHM, ARM64.HasASIMDFHM)
return cpu
}

22
vendor/github.com/segmentio/asm/cpu/cpu.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
// Pakage cpu provides APIs to detect CPU features available at runtime.
package cpu
import (
"github.com/segmentio/asm/cpu/arm"
"github.com/segmentio/asm/cpu/arm64"
"github.com/segmentio/asm/cpu/x86"
)
var (
// X86 is the bitset representing the set of the x86 instruction sets are
// supported by the CPU.
X86 = x86.ABI()
// ARM is the bitset representing which parts of the arm instruction sets
// are supported by the CPU.
ARM = arm.ABI()
// ARM64 is the bitset representing which parts of the arm64 instruction
// sets are supported by the CPU.
ARM64 = arm64.ABI()
)

32
vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
// Package cpuid provides generic types used to represent CPU features supported
// by the architecture.
package cpuid
// CPU is a bitset of feature flags representing the capabilities of various CPU
// architeectures that this package provides optimized assembly routines for.
//
// The intent is to provide a stable ABI between the Go code that generate the
// assembly, and the program that uses the library functions.
type CPU uint64
// Feature represents a single CPU feature.
type Feature uint64
const (
// None is a Feature value that has no CPU features enabled.
None Feature = 0
// All is a Feature value that has all CPU features enabled.
All Feature = 0xFFFFFFFFFFFFFFFF
)
func (cpu CPU) Has(feature Feature) bool {
return (Feature(cpu) & feature) == feature
}
func (cpu *CPU) Set(feature Feature, enabled bool) {
if enabled {
*cpu |= CPU(feature)
} else {
*cpu &= ^CPU(feature)
}
}

76
vendor/github.com/segmentio/asm/cpu/x86/x86.go generated vendored Normal file
View File

@ -0,0 +1,76 @@
package x86
import (
"github.com/segmentio/asm/cpu/cpuid"
. "golang.org/x/sys/cpu"
)
type CPU cpuid.CPU
func (cpu CPU) Has(feature Feature) bool {
return cpuid.CPU(cpu).Has(cpuid.Feature(feature))
}
func (cpu *CPU) set(feature Feature, enable bool) {
(*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable)
}
type Feature cpuid.Feature
const (
SSE Feature = 1 << iota // SSE functions
SSE2 // P4 SSE functions
SSE3 // Prescott SSE3 functions
SSE41 // Penryn SSE4.1 functions
SSE42 // Nehalem SSE4.2 functions
SSE4A // AMD Barcelona microarchitecture SSE4a instructions
SSSE3 // Conroe SSSE3 functions
AVX // AVX functions
AVX2 // AVX2 functions
AVX512BF16 // AVX-512 BFLOAT16 Instructions
AVX512BITALG // AVX-512 Bit Algorithms
AVX512BW // AVX-512 Byte and Word Instructions
AVX512CD // AVX-512 Conflict Detection Instructions
AVX512DQ // AVX-512 Doubleword and Quadword Instructions
AVX512ER // AVX-512 Exponential and Reciprocal Instructions
AVX512F // AVX-512 Foundation
AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions
AVX512PF // AVX-512 Prefetch Instructions
AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions
AVX512VBMI2 // AVX-512 Vector Bit Manipulation Instructions, Version 2
AVX512VL // AVX-512 Vector Length Extensions
AVX512VNNI // AVX-512 Vector Neural Network Instructions
AVX512VP2INTERSECT // AVX-512 Intersect for D/Q
AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword
CMOV // Conditional move
)
func ABI() CPU {
cpu := CPU(0)
cpu.set(SSE, true) // TODO: golang.org/x/sys/cpu assumes all CPUs have SEE?
cpu.set(SSE2, X86.HasSSE2)
cpu.set(SSE3, X86.HasSSE3)
cpu.set(SSE41, X86.HasSSE41)
cpu.set(SSE42, X86.HasSSE42)
cpu.set(SSE4A, false) // TODO: add upstream support in golang.org/x/sys/cpu?
cpu.set(SSSE3, X86.HasSSSE3)
cpu.set(AVX, X86.HasAVX)
cpu.set(AVX2, X86.HasAVX2)
cpu.set(AVX512BF16, X86.HasAVX512BF16)
cpu.set(AVX512BITALG, X86.HasAVX512BITALG)
cpu.set(AVX512BW, X86.HasAVX512BW)
cpu.set(AVX512CD, X86.HasAVX512CD)
cpu.set(AVX512DQ, X86.HasAVX512DQ)
cpu.set(AVX512ER, X86.HasAVX512ER)
cpu.set(AVX512F, X86.HasAVX512F)
cpu.set(AVX512IFMA, X86.HasAVX512IFMA)
cpu.set(AVX512PF, X86.HasAVX512PF)
cpu.set(AVX512VBMI, X86.HasAVX512VBMI)
cpu.set(AVX512VBMI2, X86.HasAVX512VBMI2)
cpu.set(AVX512VL, X86.HasAVX512VL)
cpu.set(AVX512VNNI, X86.HasAVX512VNNI)
cpu.set(AVX512VP2INTERSECT, false) // TODO: add upstream support in golang.org/x/sys/cpu?
cpu.set(AVX512VPOPCNTDQ, X86.HasAVX512VPOPCNTDQ)
cpu.set(CMOV, true) // TODO: golang.org/x/sys/cpu assumes all CPUs have CMOV?
return cpu
}

View File

@ -0,0 +1,20 @@
package unsafebytes
import "unsafe"
func Pointer(b []byte) *byte {
return *(**byte)(unsafe.Pointer(&b))
}
func String(b []byte) string {
return *(*string)(unsafe.Pointer(&b))
}
func BytesOf(s string) []byte {
return *(*[]byte)(unsafe.Pointer(&sliceHeader{str: s, cap: len(s)}))
}
type sliceHeader struct {
str string
cap int
}

40
vendor/github.com/segmentio/asm/keyset/keyset.go generated vendored Normal file
View File

@ -0,0 +1,40 @@
package keyset
import (
"bytes"
"github.com/segmentio/asm/cpu"
"github.com/segmentio/asm/cpu/arm64"
"github.com/segmentio/asm/cpu/x86"
)
// New prepares a set of keys for use with Lookup.
//
// An optimized routine is used if the processor supports AVX instructions and
// the maximum length of any of the keys is less than or equal to 16. If New
// returns nil, this indicates that an optimized routine is not available, and
// the caller should use a fallback.
func New(keys [][]byte) []byte {
maxWidth, hasNullByte := checkKeys(keys)
if hasNullByte || maxWidth > 16 || !(cpu.X86.Has(x86.AVX) || cpu.ARM64.Has(arm64.ASIMD)) {
return nil
}
set := make([]byte, len(keys)*16)
for i, k := range keys {
copy(set[i*16:], k)
}
return set
}
func checkKeys(keys [][]byte) (maxWidth int, hasNullByte bool) {
for _, k := range keys {
if len(k) > maxWidth {
maxWidth = len(k)
}
if bytes.IndexByte(k, 0) >= 0 {
hasNullByte = true
}
}
return
}

10
vendor/github.com/segmentio/asm/keyset/keyset_amd64.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
// Code generated by command: go run keyset_asm.go -pkg keyset -out ../keyset/keyset_amd64.s -stubs ../keyset/keyset_amd64.go. DO NOT EDIT.
//go:build !purego
// +build !purego
package keyset
// Lookup searches for a key in a set of keys, returning its index if
// found. If the key cannot be found, the number of keys is returned.
func Lookup(keyset []byte, key []byte) int

108
vendor/github.com/segmentio/asm/keyset/keyset_amd64.s generated vendored Normal file
View File

@ -0,0 +1,108 @@
// Code generated by command: go run keyset_asm.go -pkg keyset -out ../keyset/keyset_amd64.s -stubs ../keyset/keyset_amd64.go. DO NOT EDIT.
//go:build !purego
// +build !purego
#include "textflag.h"
// func Lookup(keyset []byte, key []byte) int
// Requires: AVX
TEXT ·Lookup(SB), NOSPLIT, $0-56
MOVQ keyset_base+0(FP), AX
MOVQ keyset_len+8(FP), CX
SHRQ $0x04, CX
MOVQ key_base+24(FP), DX
MOVQ key_len+32(FP), BX
MOVQ key_cap+40(FP), SI
CMPQ BX, $0x10
JA not_found
CMPQ SI, $0x10
JB safe_load
load:
VMOVUPS (DX), X0
prepare:
VPXOR X2, X2, X2
VPCMPEQB X1, X1, X1
LEAQ blend_masks<>+16(SB), DX
SUBQ BX, DX
VMOVUPS (DX), X3
VPBLENDVB X3, X0, X2, X0
XORQ DX, DX
MOVQ CX, BX
SHRQ $0x02, BX
SHLQ $0x02, BX
bigloop:
CMPQ DX, BX
JE loop
VPCMPEQB (AX), X0, X8
VPTEST X1, X8
JCS done
VPCMPEQB 16(AX), X0, X9
VPTEST X1, X9
JCS found1
VPCMPEQB 32(AX), X0, X10
VPTEST X1, X10
JCS found2
VPCMPEQB 48(AX), X0, X11
VPTEST X1, X11
JCS found3
ADDQ $0x04, DX
ADDQ $0x40, AX
JMP bigloop
loop:
CMPQ DX, CX
JE done
VPCMPEQB (AX), X0, X2
VPTEST X1, X2
JCS done
INCQ DX
ADDQ $0x10, AX
JMP loop
JMP done
found3:
INCQ DX
found2:
INCQ DX
found1:
INCQ DX
done:
MOVQ DX, ret+48(FP)
RET
not_found:
MOVQ CX, ret+48(FP)
RET
safe_load:
MOVQ DX, SI
ANDQ $0x00000fff, SI
CMPQ SI, $0x00000ff0
JBE load
MOVQ $0xfffffffffffffff0, SI
ADDQ BX, SI
VMOVUPS (DX)(SI*1), X0
LEAQ shuffle_masks<>+16(SB), DX
SUBQ BX, DX
VMOVUPS (DX), X1
VPSHUFB X1, X0, X0
JMP prepare
DATA blend_masks<>+0(SB)/8, $0xffffffffffffffff
DATA blend_masks<>+8(SB)/8, $0xffffffffffffffff
DATA blend_masks<>+16(SB)/8, $0x0000000000000000
DATA blend_masks<>+24(SB)/8, $0x0000000000000000
GLOBL blend_masks<>(SB), RODATA|NOPTR, $32
DATA shuffle_masks<>+0(SB)/8, $0x0706050403020100
DATA shuffle_masks<>+8(SB)/8, $0x0f0e0d0c0b0a0908
DATA shuffle_masks<>+16(SB)/8, $0x0706050403020100
DATA shuffle_masks<>+24(SB)/8, $0x0f0e0d0c0b0a0908
GLOBL shuffle_masks<>(SB), RODATA|NOPTR, $32

View File

@ -0,0 +1,8 @@
//go:build !purego
// +build !purego
package keyset
// Lookup searches for a key in a set of keys, returning its index if
// found. If the key cannot be found, the number of keys is returned.
func Lookup(keyset []byte, key []byte) int

143
vendor/github.com/segmentio/asm/keyset/keyset_arm64.s generated vendored Normal file
View File

@ -0,0 +1,143 @@
//go:build !purego
// +build !purego
#include "textflag.h"
// func Lookup(keyset []byte, key []byte) int
TEXT ·Lookup(SB), NOSPLIT, $0-56
MOVD keyset+0(FP), R0
MOVD keyset_len+8(FP), R1
MOVD key+24(FP), R2
MOVD key_len+32(FP), R3
MOVD key_cap+40(FP), R4
// None of the keys in the set are greater than 16 bytes, so if the input
// key is we can jump straight to not found.
CMP $16, R3
BHI notfound
// We'll be moving the keyset pointer (R0) forward as we compare keys, so
// make a copy of the starting point (R6). Also add the byte length (R1) to
// obtain a pointer to the end of the keyset (R5).
MOVD R0, R6
ADD R0, R1, R5
// Prepare a 64-bit mask of all ones.
MOVD $-1, R7
// Prepare a vector of all zeroes.
VMOV ZR, V1.B16
// Check that it's safe to load 16 bytes of input. If cap(input)<16, jump
// to a check that determines whether a tail load is necessary (to avoid a
// page fault).
CMP $16, R4
BLO safeload
load:
// Load the input key (V0) and pad with zero bytes (V1). To blend the two
// vectors, we load a mask for the particular key length and then use TBL
// to select bytes from either V0 or V1.
VLD1 (R2), [V0.B16]
MOVD $blend_masks<>(SB), R10
ADD R3<<4, R10, R10
VLD1 (R10), [V2.B16]
VTBL V2.B16, [V0.B16, V1.B16], V3.B16
loop:
// Loop through each 16 byte key in the keyset.
CMP R0, R5
BEQ notfound
// Load and compare the next key.
VLD1.P 16(R0), [V4.B16]
VCMEQ V3.B16, V4.B16, V5.B16
VMOV V5.D[0], R8
VMOV V5.D[1], R9
AND R8, R9, R9
// If the masks match, we found the key.
CMP R9, R7
BEQ found
JMP loop
found:
// If the key was found, take the position in the keyset and convert it
// to an index. The keyset pointer (R0) will be 1 key past the match, so
// subtract the starting pointer (R6), divide by 16 to convert from byte
// length to an index, and then subtract one.
SUB R6, R0, R0
ADD R0>>4, ZR, R0
SUB $1, R0, R0
MOVD R0, ret+48(FP)
RET
notfound:
// Return the number of keys in the keyset, which is the byte length (R1)
// divided by 16.
ADD R1>>4, ZR, R1
MOVD R1, ret+48(FP)
RET
safeload:
// Check if the input crosses a page boundary. If not, jump back.
AND $4095, R2, R12
CMP $4080, R12
BLS load
// If it does cross a page boundary, we must assume that loading 16 bytes
// will cause a fault. Instead, we load the 16 bytes up to and including the
// key and then shuffle the key forward in the register. We can shuffle and
// pad with zeroes at the same time to avoid having to also blend (as load
// does).
MOVD $16, R12
SUB R3, R12, R12
SUB R12, R2, R2
VLD1 (R2), [V0.B16]
MOVD $shuffle_masks<>(SB), R10
ADD R12, R10, R10
VLD1 (R10), [V2.B16]
VTBL V2.B16, [V0.B16, V1.B16], V3.B16
JMP loop
DATA blend_masks<>+0(SB)/8, $0x1010101010101010
DATA blend_masks<>+8(SB)/8, $0x1010101010101010
DATA blend_masks<>+16(SB)/8, $0x1010101010101000
DATA blend_masks<>+24(SB)/8, $0x1010101010101010
DATA blend_masks<>+32(SB)/8, $0x1010101010100100
DATA blend_masks<>+40(SB)/8, $0x1010101010101010
DATA blend_masks<>+48(SB)/8, $0x1010101010020100
DATA blend_masks<>+56(SB)/8, $0x1010101010101010
DATA blend_masks<>+64(SB)/8, $0x1010101003020100
DATA blend_masks<>+72(SB)/8, $0x1010101010101010
DATA blend_masks<>+80(SB)/8, $0x1010100403020100
DATA blend_masks<>+88(SB)/8, $0x1010101010101010
DATA blend_masks<>+96(SB)/8, $0x1010050403020100
DATA blend_masks<>+104(SB)/8, $0x1010101010101010
DATA blend_masks<>+112(SB)/8, $0x1006050403020100
DATA blend_masks<>+120(SB)/8, $0x1010101010101010
DATA blend_masks<>+128(SB)/8, $0x0706050403020100
DATA blend_masks<>+136(SB)/8, $0x1010101010101010
DATA blend_masks<>+144(SB)/8, $0x0706050403020100
DATA blend_masks<>+152(SB)/8, $0x1010101010101008
DATA blend_masks<>+160(SB)/8, $0x0706050403020100
DATA blend_masks<>+168(SB)/8, $0x1010101010100908
DATA blend_masks<>+176(SB)/8, $0x0706050403020100
DATA blend_masks<>+184(SB)/8, $0x10101010100A0908
DATA blend_masks<>+192(SB)/8, $0x0706050403020100
DATA blend_masks<>+200(SB)/8, $0x101010100B0A0908
DATA blend_masks<>+208(SB)/8, $0x0706050403020100
DATA blend_masks<>+216(SB)/8, $0x1010100C0B0A0908
DATA blend_masks<>+224(SB)/8, $0x0706050403020100
DATA blend_masks<>+232(SB)/8, $0x10100D0C0B0A0908
DATA blend_masks<>+240(SB)/8, $0x0706050403020100
DATA blend_masks<>+248(SB)/8, $0x100E0D0C0B0A0908
DATA blend_masks<>+256(SB)/8, $0x0706050403020100
DATA blend_masks<>+264(SB)/8, $0x0F0E0D0C0B0A0908
GLOBL blend_masks<>(SB), RODATA|NOPTR, $272
DATA shuffle_masks<>+0(SB)/8, $0x0706050403020100
DATA shuffle_masks<>+8(SB)/8, $0x0F0E0D0C0B0A0908
DATA shuffle_masks<>+16(SB)/8, $0x1010101010101010
DATA shuffle_masks<>+24(SB)/8, $0x1010101010101010
GLOBL shuffle_masks<>(SB), RODATA|NOPTR, $32

View File

@ -0,0 +1,19 @@
//go:build purego || !(amd64 || arm64)
// +build purego !amd64,!arm64
package keyset
func Lookup(keyset []byte, key []byte) int {
if len(key) > 16 {
return len(keyset) / 16
}
var padded [16]byte
copy(padded[:], key)
for i := 0; i < len(keyset); i += 16 {
if string(padded[:]) == string(keyset[i:i+16]) {
return i / 16
}
}
return len(keyset) / 16
}

21
vendor/github.com/segmentio/encoding/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2019 Segment.io, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,40 @@
//go:generate go run equal_fold_asm.go -out equal_fold_amd64.s -stubs equal_fold_amd64.go
package ascii
import (
"github.com/segmentio/asm/ascii"
)
// EqualFold is a version of bytes.EqualFold designed to work on ASCII input
// instead of UTF-8.
//
// When the program has guarantees that the input is composed of ASCII
// characters only, it allows for greater optimizations.
func EqualFold(a, b []byte) bool {
return ascii.EqualFold(a, b)
}
func HasPrefixFold(s, prefix []byte) bool {
return ascii.HasPrefixFold(s, prefix)
}
func HasSuffixFold(s, suffix []byte) bool {
return ascii.HasSuffixFold(s, suffix)
}
// EqualFoldString is a version of strings.EqualFold designed to work on ASCII
// input instead of UTF-8.
//
// When the program has guarantees that the input is composed of ASCII
// characters only, it allows for greater optimizations.
func EqualFoldString(a, b string) bool {
return ascii.EqualFoldString(a, b)
}
func HasPrefixFoldString(s, prefix string) bool {
return ascii.HasPrefixFoldString(s, prefix)
}
func HasSuffixFoldString(s, suffix string) bool {
return ascii.HasSuffixFoldString(s, suffix)
}

26
vendor/github.com/segmentio/encoding/ascii/valid.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
//go:generate go run valid_asm.go -out valid_amd64.s -stubs valid_amd64.go
package ascii
import (
"github.com/segmentio/asm/ascii"
)
// Valid returns true if b contains only ASCII characters.
func Valid(b []byte) bool {
return ascii.Valid(b)
}
// ValidBytes returns true if b is an ASCII character.
func ValidByte(b byte) bool {
return ascii.ValidByte(b)
}
// ValidBytes returns true if b is an ASCII character.
func ValidRune(r rune) bool {
return ascii.ValidRune(r)
}
// ValidString returns true if s contains only ASCII characters.
func ValidString(s string) bool {
return ascii.ValidString(s)
}

View File

@ -0,0 +1,26 @@
//go:generate go run valid_print_asm.go -out valid_print_amd64.s -stubs valid_print_amd64.go
package ascii
import (
"github.com/segmentio/asm/ascii"
)
// Valid returns true if b contains only printable ASCII characters.
func ValidPrint(b []byte) bool {
return ascii.ValidPrint(b)
}
// ValidBytes returns true if b is an ASCII character.
func ValidPrintByte(b byte) bool {
return ascii.ValidPrintByte(b)
}
// ValidBytes returns true if b is an ASCII character.
func ValidPrintRune(r rune) bool {
return ascii.ValidPrintRune(r)
}
// ValidString returns true if s contains only printable ASCII characters.
func ValidPrintString(s string) bool {
return ascii.ValidPrintString(s)
}

185
vendor/github.com/segmentio/encoding/iso8601/parse.go generated vendored Normal file
View File

@ -0,0 +1,185 @@
package iso8601
import (
"encoding/binary"
"errors"
"time"
"unsafe"
)
var (
errInvalidTimestamp = errors.New("invalid ISO8601 timestamp")
errMonthOutOfRange = errors.New("month out of range")
errDayOutOfRange = errors.New("day out of range")
errHourOutOfRange = errors.New("hour out of range")
errMinuteOutOfRange = errors.New("minute out of range")
errSecondOutOfRange = errors.New("second out of range")
)
// Parse parses an ISO8601 timestamp, e.g. "2021-03-25T21:36:12Z".
func Parse(input string) (time.Time, error) {
b := unsafeStringToBytes(input)
if len(b) >= 20 && len(b) <= 30 && b[len(b)-1] == 'Z' {
if len(b) == 21 || (len(b) > 21 && b[19] != '.') {
return time.Time{}, errInvalidTimestamp
}
t1 := binary.LittleEndian.Uint64(b)
t2 := binary.LittleEndian.Uint64(b[8:16])
t3 := uint64(b[16]) | uint64(b[17])<<8 | uint64(b[18])<<16 | uint64('Z')<<24
// Check for valid separators by masking input with " - - T : : Z".
// If separators are all valid, replace them with a '0' (0x30) byte and
// check all bytes are now numeric.
if !match(t1, mask1) || !match(t2, mask2) || !match(t3, mask3) {
return time.Time{}, errInvalidTimestamp
}
t1 ^= replace1
t2 ^= replace2
t3 ^= replace3
if (nonNumeric(t1) | nonNumeric(t2) | nonNumeric(t3)) != 0 {
return time.Time{}, errInvalidTimestamp
}
t1 -= zero
t2 -= zero
t3 -= zero
year := (t1&0xF)*1000 + (t1>>8&0xF)*100 + (t1>>16&0xF)*10 + (t1 >> 24 & 0xF)
month := (t1>>40&0xF)*10 + (t1 >> 48 & 0xF)
day := (t2&0xF)*10 + (t2 >> 8 & 0xF)
hour := (t2>>24&0xF)*10 + (t2 >> 32 & 0xF)
minute := (t2>>48&0xF)*10 + (t2 >> 56)
second := (t3>>8&0xF)*10 + (t3 >> 16)
nanos := int64(0)
if len(b) > 20 {
for _, c := range b[20 : len(b)-1] {
if c < '0' || c > '9' {
return time.Time{}, errInvalidTimestamp
}
nanos = (nanos * 10) + int64(c-'0')
}
nanos *= pow10[30-len(b)]
}
if err := validate(year, month, day, hour, minute, second); err != nil {
return time.Time{}, err
}
unixSeconds := int64(daysSinceEpoch(year, month, day))*86400 + int64(hour*3600+minute*60+second)
return time.Unix(unixSeconds, nanos).UTC(), nil
}
// Fallback to using time.Parse().
t, err := time.Parse(time.RFC3339Nano, input)
if err != nil {
// Override (and don't wrap) the error here. The error returned by
// time.Parse() is dynamic, and includes a reference to the input
// string. By overriding the error, we guarantee that the input string
// doesn't escape.
return time.Time{}, errInvalidTimestamp
}
return t, nil
}
var pow10 = []int64{1, 10, 100, 1000, 1e4, 1e5, 1e6, 1e7, 1e8}
const (
mask1 = 0x2d00002d00000000 // YYYY-MM-
mask2 = 0x00003a0000540000 // DDTHH:MM
mask3 = 0x000000005a00003a // :SSZ____
// Generate masks that replace the separators with a numeric byte.
// The input must have valid separators. XOR with the separator bytes
// to zero them out and then XOR with 0x30 to replace them with '0'.
replace1 = mask1 ^ 0x3000003000000000
replace2 = mask2 ^ 0x0000300000300000
replace3 = mask3 ^ 0x3030303030000030
lsb = ^uint64(0) / 255
msb = lsb * 0x80
zero = lsb * '0'
nine = lsb * '9'
)
func validate(year, month, day, hour, minute, second uint64) error {
if day == 0 || day > 31 {
return errDayOutOfRange
}
if month == 0 || month > 12 {
return errMonthOutOfRange
}
if hour >= 24 {
return errHourOutOfRange
}
if minute >= 60 {
return errMinuteOutOfRange
}
if second >= 60 {
return errSecondOutOfRange
}
if month == 2 && (day > 29 || (day == 29 && !isLeapYear(year))) {
return errDayOutOfRange
}
if day == 31 {
switch month {
case 4, 6, 9, 11:
return errDayOutOfRange
}
}
return nil
}
func match(u, mask uint64) bool {
return (u & mask) == mask
}
func nonNumeric(u uint64) uint64 {
// Derived from https://graphics.stanford.edu/~seander/bithacks.html#HasLessInWord.
// Subtract '0' (0x30) from each byte so that the MSB is set in each byte
// if there's a byte less than '0' (0x30). Add 0x46 (0x7F-'9') so that the
// MSB is set if there's a byte greater than '9' (0x39). To handle overflow
// when adding 0x46, include the MSB from the input bytes in the final mask.
// Remove all but the MSBs and then you're left with a mask where each
// non-numeric byte from the input has its MSB set in the output.
return ((u - zero) | (u + (^msb - nine)) | u) & msb
}
func daysSinceEpoch(year, month, day uint64) uint64 {
// Derived from https://blog.reverberate.org/2020/05/12/optimizing-date-algorithms.html.
monthAdjusted := month - 3
var carry uint64
if monthAdjusted > month {
carry = 1
}
var adjust uint64
if carry == 1 {
adjust = 12
}
yearAdjusted := year + 4800 - carry
monthDays := ((monthAdjusted+adjust)*62719 + 769) / 2048
leapDays := yearAdjusted/4 - yearAdjusted/100 + yearAdjusted/400
return yearAdjusted*365 + leapDays + monthDays + (day - 1) - 2472632
}
func isLeapYear(y uint64) bool {
return (y%4) == 0 && ((y%100) != 0 || (y%400) == 0)
}
func unsafeStringToBytes(s string) []byte {
return *(*[]byte)(unsafe.Pointer(&sliceHeader{
Data: *(*unsafe.Pointer)(unsafe.Pointer(&s)),
Len: len(s),
Cap: len(s),
}))
}
// sliceHeader is like reflect.SliceHeader but the Data field is a
// unsafe.Pointer instead of being a uintptr to avoid invalid
// conversions from uintptr to unsafe.Pointer.
type sliceHeader struct {
Data unsafe.Pointer
Len int
Cap int
}

179
vendor/github.com/segmentio/encoding/iso8601/valid.go generated vendored Normal file
View File

@ -0,0 +1,179 @@
package iso8601
// ValidFlags is a bitset type used to configure the behavior of the Valid
//function.
type ValidFlags int
const (
// Strict is a validation flag used to represent a string iso8601 validation
// (this is the default).
Strict ValidFlags = 0
// AllowSpaceSeparator allows the presence of a space instead of a 'T' as
// separator between the date and time.
AllowSpaceSeparator ValidFlags = 1 << iota
// AllowMissingTime allows the value to contain only a date.
AllowMissingTime
// AllowMissingSubsecond allows the value to contain only a date and time.
AllowMissingSubsecond
// AllowMissingTimezone allows the value to be missing the timezone
// information.
AllowMissingTimezone
// AllowNumericTimezone allows the value to represent timezones in their
// numeric form.
AllowNumericTimezone
// Flexible is a combination of all validation flag that allow for
// non-strict checking of the input value.
Flexible = AllowSpaceSeparator | AllowMissingTime | AllowMissingSubsecond | AllowMissingTimezone | AllowNumericTimezone
)
// Valid check value to verify whether or not it is a valid iso8601 time
// representation.
func Valid(value string, flags ValidFlags) bool {
var ok bool
// year
if value, ok = readDigits(value, 4, 4); !ok {
return false
}
if value, ok = readByte(value, '-'); !ok {
return false
}
// month
if value, ok = readDigits(value, 2, 2); !ok {
return false
}
if value, ok = readByte(value, '-'); !ok {
return false
}
// day
if value, ok = readDigits(value, 2, 2); !ok {
return false
}
if len(value) == 0 && (flags&AllowMissingTime) != 0 {
return true // date only
}
// separator
if value, ok = readByte(value, 'T'); !ok {
if (flags & AllowSpaceSeparator) == 0 {
return false
}
if value, ok = readByte(value, ' '); !ok {
return false
}
}
// hour
if value, ok = readDigits(value, 2, 2); !ok {
return false
}
if value, ok = readByte(value, ':'); !ok {
return false
}
// minute
if value, ok = readDigits(value, 2, 2); !ok {
return false
}
if value, ok = readByte(value, ':'); !ok {
return false
}
// second
if value, ok = readDigits(value, 2, 2); !ok {
return false
}
// microsecond
if value, ok = readByte(value, '.'); !ok {
if (flags & AllowMissingSubsecond) == 0 {
return false
}
} else {
if value, ok = readDigits(value, 1, 9); !ok {
return false
}
}
if len(value) == 0 && (flags&AllowMissingTimezone) != 0 {
return true // date and time
}
// timezone
if value, ok = readByte(value, 'Z'); ok {
return len(value) == 0
}
if (flags & AllowSpaceSeparator) != 0 {
value, _ = readByte(value, ' ')
}
if value, ok = readByte(value, '+'); !ok {
if value, ok = readByte(value, '-'); !ok {
return false
}
}
// timezone hour
if value, ok = readDigits(value, 2, 2); !ok {
return false
}
if value, ok = readByte(value, ':'); !ok {
if (flags & AllowNumericTimezone) == 0 {
return false
}
}
// timezone minute
if value, ok = readDigits(value, 2, 2); !ok {
return false
}
return len(value) == 0
}
func readDigits(value string, min, max int) (string, bool) {
if len(value) < min {
return value, false
}
i := 0
for i < max && i < len(value) && isDigit(value[i]) {
i++
}
if i < max && i < min {
return value, false
}
return value[i:], true
}
func readByte(value string, c byte) (string, bool) {
if len(value) == 0 {
return value, false
}
if value[0] != c {
return value, false
}
return value[1:], true
}
func isDigit(c byte) bool {
return '0' <= c && c <= '9'
}

76
vendor/github.com/segmentio/encoding/json/README.md generated vendored Normal file
View File

@ -0,0 +1,76 @@
# encoding/json [![GoDoc](https://godoc.org/github.com/segmentio/encoding/json?status.svg)](https://godoc.org/github.com/segmentio/encoding/json)
Go package offering a replacement implementation of the standard library's
[`encoding/json`](https://golang.org/pkg/encoding/json/) package, with much
better performance.
## Usage
The exported API of this package mirrors the standard library's
[`encoding/json`](https://golang.org/pkg/encoding/json/) package, the only
change needed to take advantage of the performance improvements is the import
path of the `json` package, from:
```go
import (
"encoding/json"
)
```
to
```go
import (
"github.com/segmentio/encoding/json"
)
```
One way to gain higher encoding throughput is to disable HTML escaping.
It allows the string encoding to use a much more efficient code path which
does not require parsing UTF-8 runes most of the time.
## Performance Improvements
The internal implementation uses a fair amount of unsafe operations (untyped
code, pointer arithmetic, etc...) to avoid using reflection as much as possible,
which is often the reason why serialization code has a large CPU and memory
footprint.
The package aims for zero unnecessary dynamic memory allocations and hot code
paths that are mostly free from calls into the reflect package.
## Compatibility with encoding/json
This package aims to be a drop-in replacement, therefore it is tested to behave
exactly like the standard library's package. However, there are still a few
missing features that have not been ported yet:
- Streaming decoder, currently the `Decoder` implementation offered by the
package does not support progressively reading values from a JSON array (unlike
the standard library). In our experience this is a very rare use-case, if you
need it you're better off sticking to the standard library, or spend a bit of
time implementing it in here ;)
Note that none of those features should result in performance degradations if
they were implemented in the package, and we welcome contributions!
## Trade-offs
As one would expect, we had to make a couple of trade-offs to achieve greater
performance than the standard library, but there were also features that we
did not want to give away.
Other open-source packages offering a reduced CPU and memory footprint usually
do so by designing a different API, or require code generation (therefore adding
complexity to the build process). These were not acceptable conditions for us,
as we were not willing to trade off developer productivity for better runtime
performance. To achieve this, we chose to exactly replicate the standard
library interfaces and behavior, which meant the package implementation was the
only area that we were able to work with. The internals of this package make
heavy use of unsafe pointer arithmetics and other performance optimizations,
and therefore are not as approachable as typical Go programs. Basically, we put
a bigger burden on maintainers to achieve better runtime cost without
sacrificing developer productivity.
For these reasons, we also don't believe that this code should be ported upstream
to the standard `encoding/json` package. The standard library has to remain
readable and approachable to maximize stability and maintainability, and make
projects like this one possible because a high quality reference implementation
already exists.

1232
vendor/github.com/segmentio/encoding/json/codec.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

1462
vendor/github.com/segmentio/encoding/json/decode.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

990
vendor/github.com/segmentio/encoding/json/encode.go generated vendored Normal file
View File

@ -0,0 +1,990 @@
package json
import (
"encoding"
"fmt"
"math"
"reflect"
"sort"
"strconv"
"sync"
"time"
"unicode/utf8"
"unsafe"
"github.com/segmentio/asm/base64"
)
const hex = "0123456789abcdef"
func (e encoder) encodeNull(b []byte, p unsafe.Pointer) ([]byte, error) {
return append(b, "null"...), nil
}
func (e encoder) encodeBool(b []byte, p unsafe.Pointer) ([]byte, error) {
if *(*bool)(p) {
return append(b, "true"...), nil
}
return append(b, "false"...), nil
}
func (e encoder) encodeInt(b []byte, p unsafe.Pointer) ([]byte, error) {
return appendInt(b, int64(*(*int)(p))), nil
}
func (e encoder) encodeInt8(b []byte, p unsafe.Pointer) ([]byte, error) {
return appendInt(b, int64(*(*int8)(p))), nil
}
func (e encoder) encodeInt16(b []byte, p unsafe.Pointer) ([]byte, error) {
return appendInt(b, int64(*(*int16)(p))), nil
}
func (e encoder) encodeInt32(b []byte, p unsafe.Pointer) ([]byte, error) {
return appendInt(b, int64(*(*int32)(p))), nil
}
func (e encoder) encodeInt64(b []byte, p unsafe.Pointer) ([]byte, error) {
return appendInt(b, *(*int64)(p)), nil
}
func (e encoder) encodeUint(b []byte, p unsafe.Pointer) ([]byte, error) {
return appendUint(b, uint64(*(*uint)(p))), nil
}
func (e encoder) encodeUintptr(b []byte, p unsafe.Pointer) ([]byte, error) {
return appendUint(b, uint64(*(*uintptr)(p))), nil
}
func (e encoder) encodeUint8(b []byte, p unsafe.Pointer) ([]byte, error) {
return appendUint(b, uint64(*(*uint8)(p))), nil
}
func (e encoder) encodeUint16(b []byte, p unsafe.Pointer) ([]byte, error) {
return appendUint(b, uint64(*(*uint16)(p))), nil
}
func (e encoder) encodeUint32(b []byte, p unsafe.Pointer) ([]byte, error) {
return appendUint(b, uint64(*(*uint32)(p))), nil
}
func (e encoder) encodeUint64(b []byte, p unsafe.Pointer) ([]byte, error) {
return appendUint(b, *(*uint64)(p)), nil
}
func (e encoder) encodeFloat32(b []byte, p unsafe.Pointer) ([]byte, error) {
return e.encodeFloat(b, float64(*(*float32)(p)), 32)
}
func (e encoder) encodeFloat64(b []byte, p unsafe.Pointer) ([]byte, error) {
return e.encodeFloat(b, *(*float64)(p), 64)
}
func (e encoder) encodeFloat(b []byte, f float64, bits int) ([]byte, error) {
switch {
case math.IsNaN(f):
return b, &UnsupportedValueError{Value: reflect.ValueOf(f), Str: "NaN"}
case math.IsInf(f, 0):
return b, &UnsupportedValueError{Value: reflect.ValueOf(f), Str: "inf"}
}
// Convert as if by ES6 number to string conversion.
// This matches most other JSON generators.
// See golang.org/issue/6384 and golang.org/issue/14135.
// Like fmt %g, but the exponent cutoffs are different
// and exponents themselves are not padded to two digits.
abs := math.Abs(f)
fmt := byte('f')
// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
if abs != 0 {
if bits == 64 && (abs < 1e-6 || abs >= 1e21) || bits == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) {
fmt = 'e'
}
}
b = strconv.AppendFloat(b, f, fmt, -1, int(bits))
if fmt == 'e' {
// clean up e-09 to e-9
n := len(b)
if n >= 4 && b[n-4] == 'e' && b[n-3] == '-' && b[n-2] == '0' {
b[n-2] = b[n-1]
b = b[:n-1]
}
}
return b, nil
}
func (e encoder) encodeNumber(b []byte, p unsafe.Pointer) ([]byte, error) {
n := *(*Number)(p)
if n == "" {
n = "0"
}
d := decoder{}
_, _, _, err := d.parseNumber(stringToBytes(string(n)))
if err != nil {
return b, err
}
return append(b, n...), nil
}
func (e encoder) encodeString(b []byte, p unsafe.Pointer) ([]byte, error) {
s := *(*string)(p)
if len(s) == 0 {
return append(b, `""`...), nil
}
i := 0
j := 0
escapeHTML := (e.flags & EscapeHTML) != 0
b = append(b, '"')
if len(s) >= 8 {
if j = escapeIndex(s, escapeHTML); j < 0 {
return append(append(b, s...), '"'), nil
}
}
for j < len(s) {
c := s[j]
if c >= 0x20 && c <= 0x7f && c != '\\' && c != '"' && (!escapeHTML || (c != '<' && c != '>' && c != '&')) {
// fast path: most of the time, printable ascii characters are used
j++
continue
}
switch c {
case '\\', '"':
b = append(b, s[i:j]...)
b = append(b, '\\', c)
i = j + 1
j = j + 1
continue
case '\n':
b = append(b, s[i:j]...)
b = append(b, '\\', 'n')
i = j + 1
j = j + 1
continue
case '\r':
b = append(b, s[i:j]...)
b = append(b, '\\', 'r')
i = j + 1
j = j + 1
continue
case '\t':
b = append(b, s[i:j]...)
b = append(b, '\\', 't')
i = j + 1
j = j + 1
continue
case '<', '>', '&':
b = append(b, s[i:j]...)
b = append(b, `\u00`...)
b = append(b, hex[c>>4], hex[c&0xF])
i = j + 1
j = j + 1
continue
}
// This encodes bytes < 0x20 except for \t, \n and \r.
if c < 0x20 {
b = append(b, s[i:j]...)
b = append(b, `\u00`...)
b = append(b, hex[c>>4], hex[c&0xF])
i = j + 1
j = j + 1
continue
}
r, size := utf8.DecodeRuneInString(s[j:])
if r == utf8.RuneError && size == 1 {
b = append(b, s[i:j]...)
b = append(b, `\ufffd`...)
i = j + size
j = j + size
continue
}
switch r {
case '\u2028', '\u2029':
// U+2028 is LINE SEPARATOR.
// U+2029 is PARAGRAPH SEPARATOR.
// They are both technically valid characters in JSON strings,
// but don't work in JSONP, which has to be evaluated as JavaScript,
// and can lead to security holes there. It is valid JSON to
// escape them, so we do so unconditionally.
// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
b = append(b, s[i:j]...)
b = append(b, `\u202`...)
b = append(b, hex[r&0xF])
i = j + size
j = j + size
continue
}
j += size
}
b = append(b, s[i:]...)
b = append(b, '"')
return b, nil
}
func (e encoder) encodeToString(b []byte, p unsafe.Pointer, encode encodeFunc) ([]byte, error) {
i := len(b)
b, err := encode(e, b, p)
if err != nil {
return b, err
}
j := len(b)
s := b[i:]
if b, err = e.encodeString(b, unsafe.Pointer(&s)); err != nil {
return b, err
}
n := copy(b[i:], b[j:])
return b[:i+n], nil
}
func (e encoder) encodeBytes(b []byte, p unsafe.Pointer) ([]byte, error) {
v := *(*[]byte)(p)
if v == nil {
return append(b, "null"...), nil
}
n := base64.StdEncoding.EncodedLen(len(v)) + 2
if avail := cap(b) - len(b); avail < n {
newB := make([]byte, cap(b)+(n-avail))
copy(newB, b)
b = newB[:len(b)]
}
i := len(b)
j := len(b) + n
b = b[:j]
b[i] = '"'
base64.StdEncoding.Encode(b[i+1:j-1], v)
b[j-1] = '"'
return b, nil
}
func (e encoder) encodeDuration(b []byte, p unsafe.Pointer) ([]byte, error) {
b = append(b, '"')
b = appendDuration(b, *(*time.Duration)(p))
b = append(b, '"')
return b, nil
}
func (e encoder) encodeTime(b []byte, p unsafe.Pointer) ([]byte, error) {
t := *(*time.Time)(p)
b = append(b, '"')
b = t.AppendFormat(b, time.RFC3339Nano)
b = append(b, '"')
return b, nil
}
func (e encoder) encodeArray(b []byte, p unsafe.Pointer, n int, size uintptr, t reflect.Type, encode encodeFunc) ([]byte, error) {
var start = len(b)
var err error
b = append(b, '[')
for i := 0; i < n; i++ {
if i != 0 {
b = append(b, ',')
}
if b, err = encode(e, b, unsafe.Pointer(uintptr(p)+(uintptr(i)*size))); err != nil {
return b[:start], err
}
}
b = append(b, ']')
return b, nil
}
func (e encoder) encodeSlice(b []byte, p unsafe.Pointer, size uintptr, t reflect.Type, encode encodeFunc) ([]byte, error) {
s := (*slice)(p)
if s.data == nil && s.len == 0 && s.cap == 0 {
return append(b, "null"...), nil
}
return e.encodeArray(b, s.data, s.len, size, t, encode)
}
func (e encoder) encodeMap(b []byte, p unsafe.Pointer, t reflect.Type, encodeKey, encodeValue encodeFunc, sortKeys sortFunc) ([]byte, error) {
m := reflect.NewAt(t, p).Elem()
if m.IsNil() {
return append(b, "null"...), nil
}
keys := m.MapKeys()
if sortKeys != nil && (e.flags&SortMapKeys) != 0 {
sortKeys(keys)
}
var start = len(b)
var err error
b = append(b, '{')
for i, k := range keys {
v := m.MapIndex(k)
if i != 0 {
b = append(b, ',')
}
if b, err = encodeKey(e, b, (*iface)(unsafe.Pointer(&k)).ptr); err != nil {
return b[:start], err
}
b = append(b, ':')
if b, err = encodeValue(e, b, (*iface)(unsafe.Pointer(&v)).ptr); err != nil {
return b[:start], err
}
}
b = append(b, '}')
return b, nil
}
type element struct {
key string
val interface{}
raw RawMessage
}
type mapslice struct {
elements []element
}
func (m *mapslice) Len() int { return len(m.elements) }
func (m *mapslice) Less(i, j int) bool { return m.elements[i].key < m.elements[j].key }
func (m *mapslice) Swap(i, j int) { m.elements[i], m.elements[j] = m.elements[j], m.elements[i] }
var mapslicePool = sync.Pool{
New: func() interface{} { return new(mapslice) },
}
func (e encoder) encodeMapStringInterface(b []byte, p unsafe.Pointer) ([]byte, error) {
m := *(*map[string]interface{})(p)
if m == nil {
return append(b, "null"...), nil
}
if (e.flags & SortMapKeys) == 0 {
// Optimized code path when the program does not need the map keys to be
// sorted.
b = append(b, '{')
if len(m) != 0 {
var err error
var i = 0
for k, v := range m {
if i != 0 {
b = append(b, ',')
}
b, _ = e.encodeString(b, unsafe.Pointer(&k))
b = append(b, ':')
b, err = Append(b, v, e.flags)
if err != nil {
return b, err
}
i++
}
}
b = append(b, '}')
return b, nil
}
s := mapslicePool.Get().(*mapslice)
if cap(s.elements) < len(m) {
s.elements = make([]element, 0, align(10, uintptr(len(m))))
}
for key, val := range m {
s.elements = append(s.elements, element{key: key, val: val})
}
sort.Sort(s)
var start = len(b)
var err error
b = append(b, '{')
for i, elem := range s.elements {
if i != 0 {
b = append(b, ',')
}
b, _ = e.encodeString(b, unsafe.Pointer(&elem.key))
b = append(b, ':')
b, err = Append(b, elem.val, e.flags)
if err != nil {
break
}
}
for i := range s.elements {
s.elements[i] = element{}
}
s.elements = s.elements[:0]
mapslicePool.Put(s)
if err != nil {
return b[:start], err
}
b = append(b, '}')
return b, nil
}
func (e encoder) encodeMapStringRawMessage(b []byte, p unsafe.Pointer) ([]byte, error) {
m := *(*map[string]RawMessage)(p)
if m == nil {
return append(b, "null"...), nil
}
if (e.flags & SortMapKeys) == 0 {
// Optimized code path when the program does not need the map keys to be
// sorted.
b = append(b, '{')
if len(m) != 0 {
var err error
var i = 0
for k, v := range m {
if i != 0 {
b = append(b, ',')
}
// encodeString doesn't return errors so we ignore it here
b, _ = e.encodeString(b, unsafe.Pointer(&k))
b = append(b, ':')
b, err = e.encodeRawMessage(b, unsafe.Pointer(&v))
if err != nil {
break
}
i++
}
}
b = append(b, '}')
return b, nil
}
s := mapslicePool.Get().(*mapslice)
if cap(s.elements) < len(m) {
s.elements = make([]element, 0, align(10, uintptr(len(m))))
}
for key, raw := range m {
s.elements = append(s.elements, element{key: key, raw: raw})
}
sort.Sort(s)
var start = len(b)
var err error
b = append(b, '{')
for i, elem := range s.elements {
if i != 0 {
b = append(b, ',')
}
b, _ = e.encodeString(b, unsafe.Pointer(&elem.key))
b = append(b, ':')
b, err = e.encodeRawMessage(b, unsafe.Pointer(&elem.raw))
if err != nil {
break
}
}
for i := range s.elements {
s.elements[i] = element{}
}
s.elements = s.elements[:0]
mapslicePool.Put(s)
if err != nil {
return b[:start], err
}
b = append(b, '}')
return b, nil
}
func (e encoder) encodeMapStringString(b []byte, p unsafe.Pointer) ([]byte, error) {
m := *(*map[string]string)(p)
if m == nil {
return append(b, "null"...), nil
}
if (e.flags & SortMapKeys) == 0 {
// Optimized code path when the program does not need the map keys to be
// sorted.
b = append(b, '{')
if len(m) != 0 {
var i = 0
for k, v := range m {
if i != 0 {
b = append(b, ',')
}
// encodeString never returns an error so we ignore it here
b, _ = e.encodeString(b, unsafe.Pointer(&k))
b = append(b, ':')
b, _ = e.encodeString(b, unsafe.Pointer(&v))
i++
}
}
b = append(b, '}')
return b, nil
}
s := mapslicePool.Get().(*mapslice)
if cap(s.elements) < len(m) {
s.elements = make([]element, 0, align(10, uintptr(len(m))))
}
for key, val := range m {
v := val
s.elements = append(s.elements, element{key: key, val: &v})
}
sort.Sort(s)
b = append(b, '{')
for i, elem := range s.elements {
if i != 0 {
b = append(b, ',')
}
// encodeString never returns an error so we ignore it here
b, _ = e.encodeString(b, unsafe.Pointer(&elem.key))
b = append(b, ':')
b, _ = e.encodeString(b, unsafe.Pointer(elem.val.(*string)))
}
for i := range s.elements {
s.elements[i] = element{}
}
s.elements = s.elements[:0]
mapslicePool.Put(s)
b = append(b, '}')
return b, nil
}
func (e encoder) encodeMapStringStringSlice(b []byte, p unsafe.Pointer) ([]byte, error) {
m := *(*map[string][]string)(p)
if m == nil {
return append(b, "null"...), nil
}
var stringSize = unsafe.Sizeof("")
if (e.flags & SortMapKeys) == 0 {
// Optimized code path when the program does not need the map keys to be
// sorted.
b = append(b, '{')
if len(m) != 0 {
var err error
var i = 0
for k, v := range m {
if i != 0 {
b = append(b, ',')
}
b, _ = e.encodeString(b, unsafe.Pointer(&k))
b = append(b, ':')
b, err = e.encodeSlice(b, unsafe.Pointer(&v), stringSize, sliceStringType, encoder.encodeString)
if err != nil {
return b, err
}
i++
}
}
b = append(b, '}')
return b, nil
}
s := mapslicePool.Get().(*mapslice)
if cap(s.elements) < len(m) {
s.elements = make([]element, 0, align(10, uintptr(len(m))))
}
for key, val := range m {
v := val
s.elements = append(s.elements, element{key: key, val: &v})
}
sort.Sort(s)
var start = len(b)
var err error
b = append(b, '{')
for i, elem := range s.elements {
if i != 0 {
b = append(b, ',')
}
b, _ = e.encodeString(b, unsafe.Pointer(&elem.key))
b = append(b, ':')
b, err = e.encodeSlice(b, unsafe.Pointer(elem.val.(*[]string)), stringSize, sliceStringType, encoder.encodeString)
if err != nil {
break
}
}
for i := range s.elements {
s.elements[i] = element{}
}
s.elements = s.elements[:0]
mapslicePool.Put(s)
if err != nil {
return b[:start], err
}
b = append(b, '}')
return b, nil
}
func (e encoder) encodeMapStringBool(b []byte, p unsafe.Pointer) ([]byte, error) {
m := *(*map[string]bool)(p)
if m == nil {
return append(b, "null"...), nil
}
if (e.flags & SortMapKeys) == 0 {
// Optimized code path when the program does not need the map keys to be
// sorted.
b = append(b, '{')
if len(m) != 0 {
var i = 0
for k, v := range m {
if i != 0 {
b = append(b, ',')
}
// encodeString never returns an error so we ignore it here
b, _ = e.encodeString(b, unsafe.Pointer(&k))
if v {
b = append(b, ":true"...)
} else {
b = append(b, ":false"...)
}
i++
}
}
b = append(b, '}')
return b, nil
}
s := mapslicePool.Get().(*mapslice)
if cap(s.elements) < len(m) {
s.elements = make([]element, 0, align(10, uintptr(len(m))))
}
for key, val := range m {
s.elements = append(s.elements, element{key: key, val: val})
}
sort.Sort(s)
b = append(b, '{')
for i, elem := range s.elements {
if i != 0 {
b = append(b, ',')
}
// encodeString never returns an error so we ignore it here
b, _ = e.encodeString(b, unsafe.Pointer(&elem.key))
if elem.val.(bool) {
b = append(b, ":true"...)
} else {
b = append(b, ":false"...)
}
}
for i := range s.elements {
s.elements[i] = element{}
}
s.elements = s.elements[:0]
mapslicePool.Put(s)
b = append(b, '}')
return b, nil
}
func (e encoder) encodeStruct(b []byte, p unsafe.Pointer, st *structType) ([]byte, error) {
var start = len(b)
var err error
var k string
var n int
b = append(b, '{')
escapeHTML := (e.flags & EscapeHTML) != 0
for i := range st.fields {
f := &st.fields[i]
v := unsafe.Pointer(uintptr(p) + f.offset)
if f.omitempty && f.empty(v) {
continue
}
if escapeHTML {
k = f.html
} else {
k = f.json
}
lengthBeforeKey := len(b)
if n != 0 {
b = append(b, k...)
} else {
b = append(b, k[1:]...)
}
if b, err = f.codec.encode(e, b, v); err != nil {
if err == (rollback{}) {
b = b[:lengthBeforeKey]
continue
}
return b[:start], err
}
n++
}
b = append(b, '}')
return b, nil
}
type rollback struct{}
func (rollback) Error() string { return "rollback" }
func (e encoder) encodeEmbeddedStructPointer(b []byte, p unsafe.Pointer, t reflect.Type, unexported bool, offset uintptr, encode encodeFunc) ([]byte, error) {
p = *(*unsafe.Pointer)(p)
if p == nil {
return b, rollback{}
}
return encode(e, b, unsafe.Pointer(uintptr(p)+offset))
}
func (e encoder) encodePointer(b []byte, p unsafe.Pointer, t reflect.Type, encode encodeFunc) ([]byte, error) {
if p = *(*unsafe.Pointer)(p); p != nil {
if e.ptrDepth++; e.ptrDepth >= startDetectingCyclesAfter {
if _, seen := e.ptrSeen[p]; seen {
// TODO: reconstruct the reflect.Value from p + t so we can set
// the erorr's Value field?
return b, &UnsupportedValueError{Str: fmt.Sprintf("encountered a cycle via %s", t)}
}
if e.ptrSeen == nil {
e.ptrSeen = make(map[unsafe.Pointer]struct{})
}
e.ptrSeen[p] = struct{}{}
defer delete(e.ptrSeen, p)
}
return encode(e, b, p)
}
return e.encodeNull(b, nil)
}
func (e encoder) encodeInterface(b []byte, p unsafe.Pointer) ([]byte, error) {
return Append(b, *(*interface{})(p), e.flags)
}
func (e encoder) encodeMaybeEmptyInterface(b []byte, p unsafe.Pointer, t reflect.Type) ([]byte, error) {
return Append(b, reflect.NewAt(t, p).Elem().Interface(), e.flags)
}
func (e encoder) encodeUnsupportedTypeError(b []byte, p unsafe.Pointer, t reflect.Type) ([]byte, error) {
return b, &UnsupportedTypeError{Type: t}
}
func (e encoder) encodeRawMessage(b []byte, p unsafe.Pointer) ([]byte, error) {
v := *(*RawMessage)(p)
if v == nil {
return append(b, "null"...), nil
}
var s []byte
if (e.flags & TrustRawMessage) != 0 {
s = v
} else {
var err error
d := decoder{}
s, _, _, err = d.parseValue(v)
if err != nil {
return b, &UnsupportedValueError{Value: reflect.ValueOf(v), Str: err.Error()}
}
}
if (e.flags & EscapeHTML) != 0 {
return appendCompactEscapeHTML(b, s), nil
}
return append(b, s...), nil
}
func (e encoder) encodeJSONMarshaler(b []byte, p unsafe.Pointer, t reflect.Type, pointer bool) ([]byte, error) {
v := reflect.NewAt(t, p)
if !pointer {
v = v.Elem()
}
switch v.Kind() {
case reflect.Ptr, reflect.Interface:
if v.IsNil() {
return append(b, "null"...), nil
}
}
j, err := v.Interface().(Marshaler).MarshalJSON()
if err != nil {
return b, err
}
d := decoder{}
s, _, _, err := d.parseValue(j)
if err != nil {
return b, &MarshalerError{Type: t, Err: err}
}
if (e.flags & EscapeHTML) != 0 {
return appendCompactEscapeHTML(b, s), nil
}
return append(b, s...), nil
}
func (e encoder) encodeTextMarshaler(b []byte, p unsafe.Pointer, t reflect.Type, pointer bool) ([]byte, error) {
v := reflect.NewAt(t, p)
if !pointer {
v = v.Elem()
}
switch v.Kind() {
case reflect.Ptr, reflect.Interface:
if v.IsNil() {
return append(b, `null`...), nil
}
}
s, err := v.Interface().(encoding.TextMarshaler).MarshalText()
if err != nil {
return b, err
}
return e.encodeString(b, unsafe.Pointer(&s))
}
func appendCompactEscapeHTML(dst []byte, src []byte) []byte {
start := 0
escape := false
inString := false
for i, c := range src {
if !inString {
switch c {
case '"': // enter string
inString = true
case ' ', '\n', '\r', '\t': // skip space
if start < i {
dst = append(dst, src[start:i]...)
}
start = i + 1
}
continue
}
if escape {
escape = false
continue
}
if c == '\\' {
escape = true
continue
}
if c == '"' {
inString = false
continue
}
if c == '<' || c == '>' || c == '&' {
if start < i {
dst = append(dst, src[start:i]...)
}
dst = append(dst, `\u00`...)
dst = append(dst, hex[c>>4], hex[c&0xF])
start = i + 1
continue
}
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
if start < i {
dst = append(dst, src[start:i]...)
}
dst = append(dst, `\u202`...)
dst = append(dst, hex[src[i+2]&0xF])
start = i + 3
continue
}
}
if start < len(src) {
dst = append(dst, src[start:]...)
}
return dst
}

98
vendor/github.com/segmentio/encoding/json/int.go generated vendored Normal file
View File

@ -0,0 +1,98 @@
package json
import (
"unsafe"
)
var endianness int
func init() {
var b [2]byte
*(*uint16)(unsafe.Pointer(&b)) = uint16(0xABCD)
switch b[0] {
case 0xCD:
endianness = 0 // LE
case 0xAB:
endianness = 1 // BE
default:
panic("could not determine endianness")
}
}
// "00010203...96979899" cast to []uint16
var intLELookup = [100]uint16{
0x3030, 0x3130, 0x3230, 0x3330, 0x3430, 0x3530, 0x3630, 0x3730, 0x3830, 0x3930,
0x3031, 0x3131, 0x3231, 0x3331, 0x3431, 0x3531, 0x3631, 0x3731, 0x3831, 0x3931,
0x3032, 0x3132, 0x3232, 0x3332, 0x3432, 0x3532, 0x3632, 0x3732, 0x3832, 0x3932,
0x3033, 0x3133, 0x3233, 0x3333, 0x3433, 0x3533, 0x3633, 0x3733, 0x3833, 0x3933,
0x3034, 0x3134, 0x3234, 0x3334, 0x3434, 0x3534, 0x3634, 0x3734, 0x3834, 0x3934,
0x3035, 0x3135, 0x3235, 0x3335, 0x3435, 0x3535, 0x3635, 0x3735, 0x3835, 0x3935,
0x3036, 0x3136, 0x3236, 0x3336, 0x3436, 0x3536, 0x3636, 0x3736, 0x3836, 0x3936,
0x3037, 0x3137, 0x3237, 0x3337, 0x3437, 0x3537, 0x3637, 0x3737, 0x3837, 0x3937,
0x3038, 0x3138, 0x3238, 0x3338, 0x3438, 0x3538, 0x3638, 0x3738, 0x3838, 0x3938,
0x3039, 0x3139, 0x3239, 0x3339, 0x3439, 0x3539, 0x3639, 0x3739, 0x3839, 0x3939,
}
var intBELookup = [100]uint16{
0x3030, 0x3031, 0x3032, 0x3033, 0x3034, 0x3035, 0x3036, 0x3037, 0x3038, 0x3039,
0x3130, 0x3131, 0x3132, 0x3133, 0x3134, 0x3135, 0x3136, 0x3137, 0x3138, 0x3139,
0x3230, 0x3231, 0x3232, 0x3233, 0x3234, 0x3235, 0x3236, 0x3237, 0x3238, 0x3239,
0x3330, 0x3331, 0x3332, 0x3333, 0x3334, 0x3335, 0x3336, 0x3337, 0x3338, 0x3339,
0x3430, 0x3431, 0x3432, 0x3433, 0x3434, 0x3435, 0x3436, 0x3437, 0x3438, 0x3439,
0x3530, 0x3531, 0x3532, 0x3533, 0x3534, 0x3535, 0x3536, 0x3537, 0x3538, 0x3539,
0x3630, 0x3631, 0x3632, 0x3633, 0x3634, 0x3635, 0x3636, 0x3637, 0x3638, 0x3639,
0x3730, 0x3731, 0x3732, 0x3733, 0x3734, 0x3735, 0x3736, 0x3737, 0x3738, 0x3739,
0x3830, 0x3831, 0x3832, 0x3833, 0x3834, 0x3835, 0x3836, 0x3837, 0x3838, 0x3839,
0x3930, 0x3931, 0x3932, 0x3933, 0x3934, 0x3935, 0x3936, 0x3937, 0x3938, 0x3939,
}
var intLookup = [2]*[100]uint16{&intLELookup, &intBELookup}
func appendInt(b []byte, n int64) []byte {
return formatInteger(b, uint64(n), n < 0)
}
func appendUint(b []byte, n uint64) []byte {
return formatInteger(b, n, false)
}
func formatInteger(out []byte, n uint64, negative bool) []byte {
if !negative {
if n < 10 {
return append(out, byte(n+'0'))
} else if n < 100 {
u := intLELookup[n]
return append(out, byte(u), byte(u>>8))
}
} else {
n = -n
}
lookup := intLookup[endianness]
var b [22]byte
u := (*[11]uint16)(unsafe.Pointer(&b))
i := 11
for n >= 100 {
j := n % 100
n /= 100
i--
u[i] = lookup[j]
}
i--
u[i] = lookup[n]
i *= 2 // convert to byte index
if n < 10 {
i++ // remove leading zero
}
if negative {
i--
b[i] = '-'
}
return append(out, b[i:]...)
}

582
vendor/github.com/segmentio/encoding/json/json.go generated vendored Normal file
View File

@ -0,0 +1,582 @@
package json
import (
"bytes"
"encoding/json"
"io"
"math/bits"
"reflect"
"runtime"
"sync"
"unsafe"
)
// Delim is documented at https://golang.org/pkg/encoding/json/#Delim
type Delim = json.Delim
// InvalidUTF8Error is documented at https://golang.org/pkg/encoding/json/#InvalidUTF8Error
type InvalidUTF8Error = json.InvalidUTF8Error
// InvalidUnmarshalError is documented at https://golang.org/pkg/encoding/json/#InvalidUnmarshalError
type InvalidUnmarshalError = json.InvalidUnmarshalError
// Marshaler is documented at https://golang.org/pkg/encoding/json/#Marshaler
type Marshaler = json.Marshaler
// MarshalerError is documented at https://golang.org/pkg/encoding/json/#MarshalerError
type MarshalerError = json.MarshalerError
// Number is documented at https://golang.org/pkg/encoding/json/#Number
type Number = json.Number
// RawMessage is documented at https://golang.org/pkg/encoding/json/#RawMessage
type RawMessage = json.RawMessage
// A SyntaxError is a description of a JSON syntax error.
type SyntaxError = json.SyntaxError
// Token is documented at https://golang.org/pkg/encoding/json/#Token
type Token = json.Token
// UnmarshalFieldError is documented at https://golang.org/pkg/encoding/json/#UnmarshalFieldError
type UnmarshalFieldError = json.UnmarshalFieldError
// UnmarshalTypeError is documented at https://golang.org/pkg/encoding/json/#UnmarshalTypeError
type UnmarshalTypeError = json.UnmarshalTypeError
// Unmarshaler is documented at https://golang.org/pkg/encoding/json/#Unmarshaler
type Unmarshaler = json.Unmarshaler
// UnsupportedTypeError is documented at https://golang.org/pkg/encoding/json/#UnsupportedTypeError
type UnsupportedTypeError = json.UnsupportedTypeError
// UnsupportedValueError is documented at https://golang.org/pkg/encoding/json/#UnsupportedValueError
type UnsupportedValueError = json.UnsupportedValueError
// AppendFlags is a type used to represent configuration options that can be
// applied when formatting json output.
type AppendFlags uint32
const (
// EscapeHTML is a formatting flag used to to escape HTML in json strings.
EscapeHTML AppendFlags = 1 << iota
// SortMapKeys is formatting flag used to enable sorting of map keys when
// encoding JSON (this matches the behavior of the standard encoding/json
// package).
SortMapKeys
// TrustRawMessage is a performance optimization flag to skip value
// checking of raw messages. It should only be used if the values are
// known to be valid json (e.g., they were created by json.Unmarshal).
TrustRawMessage
// appendNewline is a formatting flag to enable the addition of a newline
// in Encode (this matches the behavior of the standard encoding/json
// package).
appendNewline
)
// ParseFlags is a type used to represent configuration options that can be
// applied when parsing json input.
type ParseFlags uint32
func (flags ParseFlags) has(f ParseFlags) bool {
return (flags & f) != 0
}
func (f ParseFlags) kind() Kind {
return Kind((f >> kindOffset) & 0xFF)
}
func (f ParseFlags) withKind(kind Kind) ParseFlags {
return (f & ^(ParseFlags(0xFF) << kindOffset)) | (ParseFlags(kind) << kindOffset)
}
const (
// DisallowUnknownFields is a parsing flag used to prevent decoding of
// objects to Go struct values when a field of the input does not match
// with any of the struct fields.
DisallowUnknownFields ParseFlags = 1 << iota
// UseNumber is a parsing flag used to load numeric values as Number
// instead of float64.
UseNumber
// DontCopyString is a parsing flag used to provide zero-copy support when
// loading string values from a json payload. It is not always possible to
// avoid dynamic memory allocations, for example when a string is escaped in
// the json data a new buffer has to be allocated, but when the `wire` value
// can be used as content of a Go value the decoder will simply point into
// the input buffer.
DontCopyString
// DontCopyNumber is a parsing flag used to provide zero-copy support when
// loading Number values (see DontCopyString and DontCopyRawMessage).
DontCopyNumber
// DontCopyRawMessage is a parsing flag used to provide zero-copy support
// when loading RawMessage values from a json payload. When used, the
// RawMessage values will not be allocated into new memory buffers and
// will instead point directly to the area of the input buffer where the
// value was found.
DontCopyRawMessage
// DontMatchCaseInsensitiveStructFields is a parsing flag used to prevent
// matching fields in a case-insensitive way. This can prevent degrading
// performance on case conversions, and can also act as a stricter decoding
// mode.
DontMatchCaseInsensitiveStructFields
// ZeroCopy is a parsing flag that combines all the copy optimizations
// available in the package.
//
// The zero-copy optimizations are better used in request-handler style
// code where none of the values are retained after the handler returns.
ZeroCopy = DontCopyString | DontCopyNumber | DontCopyRawMessage
// validAsciiPrint is an internal flag indicating that the input contains
// only valid ASCII print chars (0x20 <= c <= 0x7E). If the flag is unset,
// it's unknown whether the input is valid ASCII print.
validAsciiPrint ParseFlags = 1 << 28
// noBackslach is an internal flag indicating that the input does not
// contain a backslash. If the flag is unset, it's unknown whether the
// input contains a backslash.
noBackslash ParseFlags = 1 << 29
// Bit offset where the kind of the json value is stored.
//
// See Kind in token.go for the enum.
kindOffset ParseFlags = 16
)
// Kind represents the different kinds of value that exist in JSON.
type Kind uint
const (
Undefined Kind = 0
Null Kind = 1 // Null is not zero, so we keep zero for "undefined".
Bool Kind = 2 // Bit two is set to 1, means it's a boolean.
False Kind = 2 // Bool + 0
True Kind = 3 // Bool + 1
Num Kind = 4 // Bit three is set to 1, means it's a number.
Uint Kind = 5 // Num + 1
Int Kind = 6 // Num + 2
Float Kind = 7 // Num + 3
String Kind = 8 // Bit four is set to 1, means it's a string.
Unescaped Kind = 9 // String + 1
Array Kind = 16 // Equivalent to Delim == '['
Object Kind = 32 // Equivalent to Delim == '{'
)
// Class returns the class of k.
func (k Kind) Class() Kind { return Kind(1 << uint(bits.Len(uint(k))-1)) }
// Append acts like Marshal but appends the json representation to b instead of
// always reallocating a new slice.
func Append(b []byte, x interface{}, flags AppendFlags) ([]byte, error) {
if x == nil {
// Special case for nil values because it makes the rest of the code
// simpler to assume that it won't be seeing nil pointers.
return append(b, "null"...), nil
}
t := reflect.TypeOf(x)
p := (*iface)(unsafe.Pointer(&x)).ptr
cache := cacheLoad()
c, found := cache[typeid(t)]
if !found {
c = constructCachedCodec(t, cache)
}
b, err := c.encode(encoder{flags: flags}, b, p)
runtime.KeepAlive(x)
return b, err
}
// Escape is a convenience helper to construct an escaped JSON string from s.
// The function escales HTML characters, for more control over the escape
// behavior and to write to a pre-allocated buffer, use AppendEscape.
func Escape(s string) []byte {
// +10 for extra escape characters, maybe not enough and the buffer will
// be reallocated.
b := make([]byte, 0, len(s)+10)
return AppendEscape(b, s, EscapeHTML)
}
// AppendEscape appends s to b with the string escaped as a JSON value.
// This will include the starting and ending quote characters, and the
// appropriate characters will be escaped correctly for JSON encoding.
func AppendEscape(b []byte, s string, flags AppendFlags) []byte {
e := encoder{flags: flags}
b, _ = e.encodeString(b, unsafe.Pointer(&s))
return b
}
// Unescape is a convenience helper to unescape a JSON value.
// For more control over the unescape behavior and
// to write to a pre-allocated buffer, use AppendUnescape.
func Unescape(s []byte) []byte {
b := make([]byte, 0, len(s))
return AppendUnescape(b, s, ParseFlags(0))
}
// AppendUnescape appends s to b with the string unescaped as a JSON value.
// This will remove starting and ending quote characters, and the
// appropriate characters will be escaped correctly as if JSON decoded.
// New space will be reallocated if more space is needed.
func AppendUnescape(b []byte, s []byte, flags ParseFlags) []byte {
d := decoder{flags: flags}
buf := new(string)
d.decodeString(s, unsafe.Pointer(buf))
return append(b, *buf...)
}
// Compact is documented at https://golang.org/pkg/encoding/json/#Compact
func Compact(dst *bytes.Buffer, src []byte) error {
return json.Compact(dst, src)
}
// HTMLEscape is documented at https://golang.org/pkg/encoding/json/#HTMLEscape
func HTMLEscape(dst *bytes.Buffer, src []byte) {
json.HTMLEscape(dst, src)
}
// Indent is documented at https://golang.org/pkg/encoding/json/#Indent
func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
return json.Indent(dst, src, prefix, indent)
}
// Marshal is documented at https://golang.org/pkg/encoding/json/#Marshal
func Marshal(x interface{}) ([]byte, error) {
var err error
var buf = encoderBufferPool.Get().(*encoderBuffer)
if buf.data, err = Append(buf.data[:0], x, EscapeHTML|SortMapKeys); err != nil {
return nil, err
}
b := make([]byte, len(buf.data))
copy(b, buf.data)
encoderBufferPool.Put(buf)
return b, nil
}
// MarshalIndent is documented at https://golang.org/pkg/encoding/json/#MarshalIndent
func MarshalIndent(x interface{}, prefix, indent string) ([]byte, error) {
b, err := Marshal(x)
if err == nil {
tmp := &bytes.Buffer{}
tmp.Grow(2 * len(b))
Indent(tmp, b, prefix, indent)
b = tmp.Bytes()
}
return b, err
}
// Unmarshal is documented at https://golang.org/pkg/encoding/json/#Unmarshal
func Unmarshal(b []byte, x interface{}) error {
r, err := Parse(b, x, 0)
if len(r) != 0 {
if _, ok := err.(*SyntaxError); !ok {
// The encoding/json package prioritizes reporting errors caused by
// unexpected trailing bytes over other issues; here we emulate this
// behavior by overriding the error.
err = syntaxError(r, "invalid character '%c' after top-level value", r[0])
}
}
return err
}
// Parse behaves like Unmarshal but the caller can pass a set of flags to
// configure the parsing behavior.
func Parse(b []byte, x interface{}, flags ParseFlags) ([]byte, error) {
t := reflect.TypeOf(x)
p := (*iface)(unsafe.Pointer(&x)).ptr
d := decoder{flags: flags | internalParseFlags(b)}
b = skipSpaces(b)
if t == nil || p == nil || t.Kind() != reflect.Ptr {
_, r, _, err := d.parseValue(b)
r = skipSpaces(r)
if err != nil {
return r, err
}
return r, &InvalidUnmarshalError{Type: t}
}
t = t.Elem()
cache := cacheLoad()
c, found := cache[typeid(t)]
if !found {
c = constructCachedCodec(t, cache)
}
r, err := c.decode(d, b, p)
return skipSpaces(r), err
}
// Valid is documented at https://golang.org/pkg/encoding/json/#Valid
func Valid(data []byte) bool {
data = skipSpaces(data)
d := decoder{flags: internalParseFlags(data)}
_, data, _, err := d.parseValue(data)
if err != nil {
return false
}
return len(skipSpaces(data)) == 0
}
// Decoder is documented at https://golang.org/pkg/encoding/json/#Decoder
type Decoder struct {
reader io.Reader
buffer []byte
remain []byte
inputOffset int64
err error
flags ParseFlags
}
// NewDecoder is documented at https://golang.org/pkg/encoding/json/#NewDecoder
func NewDecoder(r io.Reader) *Decoder { return &Decoder{reader: r} }
// Buffered is documented at https://golang.org/pkg/encoding/json/#Decoder.Buffered
func (dec *Decoder) Buffered() io.Reader {
return bytes.NewReader(dec.remain)
}
// Decode is documented at https://golang.org/pkg/encoding/json/#Decoder.Decode
func (dec *Decoder) Decode(v interface{}) error {
raw, err := dec.readValue()
if err != nil {
return err
}
_, err = Parse(raw, v, dec.flags)
return err
}
const (
minBufferSize = 32768
minReadSize = 4096
)
// readValue reads one JSON value from the buffer and returns its raw bytes. It
// is optimized for the "one JSON value per line" case.
func (dec *Decoder) readValue() (v []byte, err error) {
var n int
var r []byte
d := decoder{flags: dec.flags}
for {
if len(dec.remain) != 0 {
v, r, _, err = d.parseValue(dec.remain)
if err == nil {
dec.remain, n = skipSpacesN(r)
dec.inputOffset += int64(len(v) + n)
return
}
if len(r) != 0 {
// Parsing of the next JSON value stopped at a position other
// than the end of the input buffer, which indicaates that a
// syntax error was encountered.
return
}
}
if err = dec.err; err != nil {
if len(dec.remain) != 0 && err == io.EOF {
err = io.ErrUnexpectedEOF
}
return
}
if dec.buffer == nil {
dec.buffer = make([]byte, 0, minBufferSize)
} else {
dec.buffer = dec.buffer[:copy(dec.buffer[:cap(dec.buffer)], dec.remain)]
dec.remain = nil
}
if (cap(dec.buffer) - len(dec.buffer)) < minReadSize {
buf := make([]byte, len(dec.buffer), 2*cap(dec.buffer))
copy(buf, dec.buffer)
dec.buffer = buf
}
n, err = io.ReadFull(dec.reader, dec.buffer[len(dec.buffer):cap(dec.buffer)])
if n > 0 {
dec.buffer = dec.buffer[:len(dec.buffer)+n]
if err != nil {
err = nil
}
} else if err == io.ErrUnexpectedEOF {
err = io.EOF
}
dec.remain, n = skipSpacesN(dec.buffer)
d.flags = dec.flags | internalParseFlags(dec.remain)
dec.inputOffset += int64(n)
dec.err = err
}
}
// DisallowUnknownFields is documented at https://golang.org/pkg/encoding/json/#Decoder.DisallowUnknownFields
func (dec *Decoder) DisallowUnknownFields() { dec.flags |= DisallowUnknownFields }
// UseNumber is documented at https://golang.org/pkg/encoding/json/#Decoder.UseNumber
func (dec *Decoder) UseNumber() { dec.flags |= UseNumber }
// DontCopyString is an extension to the standard encoding/json package
// which instructs the decoder to not copy strings loaded from the json
// payloads when possible.
func (dec *Decoder) DontCopyString() { dec.flags |= DontCopyString }
// DontCopyNumber is an extension to the standard encoding/json package
// which instructs the decoder to not copy numbers loaded from the json
// payloads.
func (dec *Decoder) DontCopyNumber() { dec.flags |= DontCopyNumber }
// DontCopyRawMessage is an extension to the standard encoding/json package
// which instructs the decoder to not allocate RawMessage values in separate
// memory buffers (see the documentation of the DontcopyRawMessage flag for
// more detais).
func (dec *Decoder) DontCopyRawMessage() { dec.flags |= DontCopyRawMessage }
// DontMatchCaseInsensitiveStructFields is an extension to the standard
// encoding/json package which instructs the decoder to not match object fields
// against struct fields in a case-insensitive way, the field names have to
// match exactly to be decoded into the struct field values.
func (dec *Decoder) DontMatchCaseInsensitiveStructFields() {
dec.flags |= DontMatchCaseInsensitiveStructFields
}
// ZeroCopy is an extension to the standard encoding/json package which enables
// all the copy optimizations of the decoder.
func (dec *Decoder) ZeroCopy() { dec.flags |= ZeroCopy }
// InputOffset returns the input stream byte offset of the current decoder position.
// The offset gives the location of the end of the most recently returned token
// and the beginning of the next token.
func (dec *Decoder) InputOffset() int64 {
return dec.inputOffset
}
// Encoder is documented at https://golang.org/pkg/encoding/json/#Encoder
type Encoder struct {
writer io.Writer
prefix string
indent string
buffer *bytes.Buffer
err error
flags AppendFlags
}
// NewEncoder is documented at https://golang.org/pkg/encoding/json/#NewEncoder
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{writer: w, flags: EscapeHTML | SortMapKeys | appendNewline}
}
// Encode is documented at https://golang.org/pkg/encoding/json/#Encoder.Encode
func (enc *Encoder) Encode(v interface{}) error {
if enc.err != nil {
return enc.err
}
var err error
var buf = encoderBufferPool.Get().(*encoderBuffer)
buf.data, err = Append(buf.data[:0], v, enc.flags)
if err != nil {
encoderBufferPool.Put(buf)
return err
}
if (enc.flags & appendNewline) != 0 {
buf.data = append(buf.data, '\n')
}
b := buf.data
if enc.prefix != "" || enc.indent != "" {
if enc.buffer == nil {
enc.buffer = new(bytes.Buffer)
enc.buffer.Grow(2 * len(buf.data))
} else {
enc.buffer.Reset()
}
Indent(enc.buffer, buf.data, enc.prefix, enc.indent)
b = enc.buffer.Bytes()
}
if _, err := enc.writer.Write(b); err != nil {
enc.err = err
}
encoderBufferPool.Put(buf)
return err
}
// SetEscapeHTML is documented at https://golang.org/pkg/encoding/json/#Encoder.SetEscapeHTML
func (enc *Encoder) SetEscapeHTML(on bool) {
if on {
enc.flags |= EscapeHTML
} else {
enc.flags &= ^EscapeHTML
}
}
// SetIndent is documented at https://golang.org/pkg/encoding/json/#Encoder.SetIndent
func (enc *Encoder) SetIndent(prefix, indent string) {
enc.prefix = prefix
enc.indent = indent
}
// SetSortMapKeys is an extension to the standard encoding/json package which
// allows the program to toggle sorting of map keys on and off.
func (enc *Encoder) SetSortMapKeys(on bool) {
if on {
enc.flags |= SortMapKeys
} else {
enc.flags &= ^SortMapKeys
}
}
// SetTrustRawMessage skips value checking when encoding a raw json message. It should only
// be used if the values are known to be valid json, e.g. because they were originally created
// by json.Unmarshal.
func (enc *Encoder) SetTrustRawMessage(on bool) {
if on {
enc.flags |= TrustRawMessage
} else {
enc.flags &= ^TrustRawMessage
}
}
// SetAppendNewline is an extension to the standard encoding/json package which
// allows the program to toggle the addition of a newline in Encode on or off.
func (enc *Encoder) SetAppendNewline(on bool) {
if on {
enc.flags |= appendNewline
} else {
enc.flags &= ^appendNewline
}
}
var encoderBufferPool = sync.Pool{
New: func() interface{} { return &encoderBuffer{data: make([]byte, 0, 4096)} },
}
type encoderBuffer struct{ data []byte }

787
vendor/github.com/segmentio/encoding/json/parse.go generated vendored Normal file
View File

@ -0,0 +1,787 @@
package json
import (
"bytes"
"encoding/binary"
"math"
"math/bits"
"reflect"
"unicode"
"unicode/utf16"
"unicode/utf8"
"github.com/segmentio/encoding/ascii"
)
// All spaces characters defined in the json specification.
const (
sp = ' '
ht = '\t'
nl = '\n'
cr = '\r'
)
const (
escape = '\\'
quote = '"'
)
func internalParseFlags(b []byte) (flags ParseFlags) {
// Don't consider surrounding whitespace
b = skipSpaces(b)
b = trimTrailingSpaces(b)
if ascii.ValidPrint(b) {
flags |= validAsciiPrint
}
if bytes.IndexByte(b, '\\') == -1 {
flags |= noBackslash
}
return
}
func skipSpaces(b []byte) []byte {
if len(b) > 0 && b[0] <= 0x20 {
b, _ = skipSpacesN(b)
}
return b
}
func skipSpacesN(b []byte) ([]byte, int) {
for i := range b {
switch b[i] {
case sp, ht, nl, cr:
default:
return b[i:], i
}
}
return nil, 0
}
func trimTrailingSpaces(b []byte) []byte {
if len(b) > 0 && b[len(b)-1] <= 0x20 {
b = trimTrailingSpacesN(b)
}
return b
}
func trimTrailingSpacesN(b []byte) []byte {
i := len(b) - 1
loop:
for ; i >= 0; i-- {
switch b[i] {
case sp, ht, nl, cr:
default:
break loop
}
}
return b[:i+1]
}
// parseInt parses a decimal representation of an int64 from b.
//
// The function is equivalent to calling strconv.ParseInt(string(b), 10, 64) but
// it prevents Go from making a memory allocation for converting a byte slice to
// a string (escape analysis fails due to the error returned by strconv.ParseInt).
//
// Because it only works with base 10 the function is also significantly faster
// than strconv.ParseInt.
func (d decoder) parseInt(b []byte, t reflect.Type) (int64, []byte, error) {
var value int64
var count int
if len(b) == 0 {
return 0, b, syntaxError(b, "cannot decode integer from an empty input")
}
if b[0] == '-' {
const max = math.MinInt64
const lim = max / 10
if len(b) == 1 {
return 0, b, syntaxError(b, "cannot decode integer from '-'")
}
if len(b) > 2 && b[1] == '0' && '0' <= b[2] && b[2] <= '9' {
return 0, b, syntaxError(b, "invalid leading character '0' in integer")
}
for _, c := range b[1:] {
if !(c >= '0' && c <= '9') {
if count == 0 {
b, err := d.inputError(b, t)
return 0, b, err
}
break
}
if value < lim {
return 0, b, unmarshalOverflow(b, t)
}
value *= 10
x := int64(c - '0')
if value < (max + x) {
return 0, b, unmarshalOverflow(b, t)
}
value -= x
count++
}
count++
} else {
if len(b) > 1 && b[0] == '0' && '0' <= b[1] && b[1] <= '9' {
return 0, b, syntaxError(b, "invalid leading character '0' in integer")
}
for ; count < len(b) && b[count] >= '0' && b[count] <= '9'; count++ {
x := int64(b[count] - '0')
next := value*10 + x
if next < value {
return 0, b, unmarshalOverflow(b, t)
}
value = next
}
if count == 0 {
b, err := d.inputError(b, t)
return 0, b, err
}
}
if count < len(b) {
switch b[count] {
case '.', 'e', 'E': // was this actually a float?
v, r, _, err := d.parseNumber(b)
if err != nil {
v, r = b[:count+1], b[count+1:]
}
return 0, r, unmarshalTypeError(v, t)
}
}
return value, b[count:], nil
}
// parseUint is like parseInt but for unsigned integers.
func (d decoder) parseUint(b []byte, t reflect.Type) (uint64, []byte, error) {
var value uint64
var count int
if len(b) == 0 {
return 0, b, syntaxError(b, "cannot decode integer value from an empty input")
}
if len(b) > 1 && b[0] == '0' && '0' <= b[1] && b[1] <= '9' {
return 0, b, syntaxError(b, "invalid leading character '0' in integer")
}
for ; count < len(b) && b[count] >= '0' && b[count] <= '9'; count++ {
x := uint64(b[count] - '0')
next := value*10 + x
if next < value {
return 0, b, unmarshalOverflow(b, t)
}
value = next
}
if count == 0 {
b, err := d.inputError(b, t)
return 0, b, err
}
if count < len(b) {
switch b[count] {
case '.', 'e', 'E': // was this actually a float?
v, r, _, err := d.parseNumber(b)
if err != nil {
v, r = b[:count+1], b[count+1:]
}
return 0, r, unmarshalTypeError(v, t)
}
}
return value, b[count:], nil
}
// parseUintHex parses a hexadecimanl representation of a uint64 from b.
//
// The function is equivalent to calling strconv.ParseUint(string(b), 16, 64) but
// it prevents Go from making a memory allocation for converting a byte slice to
// a string (escape analysis fails due to the error returned by strconv.ParseUint).
//
// Because it only works with base 16 the function is also significantly faster
// than strconv.ParseUint.
func (d decoder) parseUintHex(b []byte) (uint64, []byte, error) {
const max = math.MaxUint64
const lim = max / 0x10
var value uint64
var count int
if len(b) == 0 {
return 0, b, syntaxError(b, "cannot decode hexadecimal value from an empty input")
}
parseLoop:
for i, c := range b {
var x uint64
switch {
case c >= '0' && c <= '9':
x = uint64(c - '0')
case c >= 'A' && c <= 'F':
x = uint64(c-'A') + 0xA
case c >= 'a' && c <= 'f':
x = uint64(c-'a') + 0xA
default:
if i == 0 {
return 0, b, syntaxError(b, "expected hexadecimal digit but found '%c'", c)
}
break parseLoop
}
if value > lim {
return 0, b, syntaxError(b, "hexadecimal value out of range")
}
if value *= 0x10; value > (max - x) {
return 0, b, syntaxError(b, "hexadecimal value out of range")
}
value += x
count++
}
return value, b[count:], nil
}
func (d decoder) parseNull(b []byte) ([]byte, []byte, Kind, error) {
if hasNullPrefix(b) {
return b[:4], b[4:], Null, nil
}
if len(b) < 4 {
return nil, b[len(b):], Undefined, unexpectedEOF(b)
}
return nil, b, Undefined, syntaxError(b, "expected 'null' but found invalid token")
}
func (d decoder) parseTrue(b []byte) ([]byte, []byte, Kind, error) {
if hasTruePrefix(b) {
return b[:4], b[4:], True, nil
}
if len(b) < 4 {
return nil, b[len(b):], Undefined, unexpectedEOF(b)
}
return nil, b, Undefined, syntaxError(b, "expected 'true' but found invalid token")
}
func (d decoder) parseFalse(b []byte) ([]byte, []byte, Kind, error) {
if hasFalsePrefix(b) {
return b[:5], b[5:], False, nil
}
if len(b) < 5 {
return nil, b[len(b):], Undefined, unexpectedEOF(b)
}
return nil, b, Undefined, syntaxError(b, "expected 'false' but found invalid token")
}
func (d decoder) parseNumber(b []byte) (v, r []byte, kind Kind, err error) {
if len(b) == 0 {
r, err = b, unexpectedEOF(b)
return
}
// Assume it's an unsigned integer at first.
kind = Uint
i := 0
// sign
if b[i] == '-' {
kind = Int
i++
}
if i == len(b) {
r, err = b[i:], syntaxError(b, "missing number value after sign")
return
}
if b[i] < '0' || b[i] > '9' {
r, err = b[i:], syntaxError(b, "expected digit but got '%c'", b[i])
return
}
// integer part
if b[i] == '0' {
i++
if i == len(b) || (b[i] != '.' && b[i] != 'e' && b[i] != 'E') {
v, r = b[:i], b[i:]
return
}
if '0' <= b[i] && b[i] <= '9' {
r, err = b[i:], syntaxError(b, "cannot decode number with leading '0' character")
return
}
}
for i < len(b) && '0' <= b[i] && b[i] <= '9' {
i++
}
// decimal part
if i < len(b) && b[i] == '.' {
kind = Float
i++
decimalStart := i
for i < len(b) {
if c := b[i]; !('0' <= c && c <= '9') {
if i == decimalStart {
r, err = b[i:], syntaxError(b, "expected digit but found '%c'", c)
return
}
break
}
i++
}
if i == decimalStart {
r, err = b[i:], syntaxError(b, "expected decimal part after '.'")
return
}
}
// exponent part
if i < len(b) && (b[i] == 'e' || b[i] == 'E') {
kind = Float
i++
if i < len(b) {
if c := b[i]; c == '+' || c == '-' {
i++
}
}
if i == len(b) {
r, err = b[i:], syntaxError(b, "missing exponent in number")
return
}
exponentStart := i
for i < len(b) {
if c := b[i]; !('0' <= c && c <= '9') {
if i == exponentStart {
err = syntaxError(b, "expected digit but found '%c'", c)
return
}
break
}
i++
}
}
v, r = b[:i], b[i:]
return
}
func (d decoder) parseUnicode(b []byte) (rune, int, error) {
if len(b) < 4 {
return 0, len(b), syntaxError(b, "unicode code point must have at least 4 characters")
}
u, r, err := d.parseUintHex(b[:4])
if err != nil {
return 0, 4, syntaxError(b, "parsing unicode code point: %s", err)
}
if len(r) != 0 {
return 0, 4, syntaxError(b, "invalid unicode code point")
}
return rune(u), 4, nil
}
func (d decoder) parseString(b []byte) ([]byte, []byte, Kind, error) {
if len(b) < 2 {
return nil, b[len(b):], Undefined, unexpectedEOF(b)
}
if b[0] != '"' {
return nil, b, Undefined, syntaxError(b, "expected '\"' at the beginning of a string value")
}
var n int
if len(b) >= 9 {
// This is an optimization for short strings. We read 8/16 bytes,
// and XOR each with 0x22 (") so that these bytes (and only
// these bytes) are now zero. We use the hasless(u,1) trick
// from https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
// to determine whether any bytes are zero. Finally, we CTZ
// to find the index of that byte.
const mask1 = 0x2222222222222222
const mask2 = 0x0101010101010101
const mask3 = 0x8080808080808080
u := binary.LittleEndian.Uint64(b[1:]) ^ mask1
if mask := (u - mask2) & ^u & mask3; mask != 0 {
n = bits.TrailingZeros64(mask)/8 + 2
goto found
}
if len(b) >= 17 {
u = binary.LittleEndian.Uint64(b[9:]) ^ mask1
if mask := (u - mask2) & ^u & mask3; mask != 0 {
n = bits.TrailingZeros64(mask)/8 + 10
goto found
}
}
}
n = bytes.IndexByte(b[1:], '"') + 2
if n <= 1 {
return nil, b[len(b):], Undefined, syntaxError(b, "missing '\"' at the end of a string value")
}
found:
if (d.flags.has(noBackslash) || bytes.IndexByte(b[1:n], '\\') < 0) &&
(d.flags.has(validAsciiPrint) || ascii.ValidPrint(b[1:n])) {
return b[:n], b[n:], Unescaped, nil
}
for i := 1; i < len(b); i++ {
switch b[i] {
case '\\':
if i++; i < len(b) {
switch b[i] {
case '"', '\\', '/', 'n', 'r', 't', 'f', 'b':
case 'u':
_, n, err := d.parseUnicode(b[i+1:])
if err != nil {
return nil, b[i+1+n:], Undefined, err
}
i += n
default:
return nil, b, Undefined, syntaxError(b, "invalid character '%c' in string escape code", b[i])
}
}
case '"':
return b[:i+1], b[i+1:], String, nil
default:
if b[i] < 0x20 {
return nil, b, Undefined, syntaxError(b, "invalid character '%c' in string escape code", b[i])
}
}
}
return nil, b[len(b):], Undefined, syntaxError(b, "missing '\"' at the end of a string value")
}
func (d decoder) parseStringUnquote(b []byte, r []byte) ([]byte, []byte, bool, error) {
s, b, k, err := d.parseString(b)
if err != nil {
return s, b, false, err
}
s = s[1 : len(s)-1] // trim the quotes
if k == Unescaped {
return s, b, false, nil
}
if r == nil {
r = make([]byte, 0, len(s))
}
for len(s) != 0 {
i := bytes.IndexByte(s, '\\')
if i < 0 {
r = appendCoerceInvalidUTF8(r, s)
break
}
r = appendCoerceInvalidUTF8(r, s[:i])
s = s[i+1:]
c := s[0]
switch c {
case '"', '\\', '/':
// simple escaped character
case 'n':
c = '\n'
case 'r':
c = '\r'
case 't':
c = '\t'
case 'b':
c = '\b'
case 'f':
c = '\f'
case 'u':
s = s[1:]
r1, n1, err := d.parseUnicode(s)
if err != nil {
return r, b, true, err
}
s = s[n1:]
if utf16.IsSurrogate(r1) {
if !hasPrefix(s, `\u`) {
r1 = unicode.ReplacementChar
} else {
r2, n2, err := d.parseUnicode(s[2:])
if err != nil {
return r, b, true, err
}
if r1 = utf16.DecodeRune(r1, r2); r1 != unicode.ReplacementChar {
s = s[2+n2:]
}
}
}
r = appendRune(r, r1)
continue
default: // not sure what this escape sequence is
return r, b, false, syntaxError(s, "invalid character '%c' in string escape code", c)
}
r = append(r, c)
s = s[1:]
}
return r, b, true, nil
}
func appendRune(b []byte, r rune) []byte {
n := len(b)
b = append(b, 0, 0, 0, 0)
return b[:n+utf8.EncodeRune(b[n:], r)]
}
func appendCoerceInvalidUTF8(b []byte, s []byte) []byte {
c := [4]byte{}
for _, r := range string(s) {
b = append(b, c[:utf8.EncodeRune(c[:], r)]...)
}
return b
}
func (d decoder) parseObject(b []byte) ([]byte, []byte, Kind, error) {
if len(b) < 2 {
return nil, b[len(b):], Undefined, unexpectedEOF(b)
}
if b[0] != '{' {
return nil, b, Undefined, syntaxError(b, "expected '{' at the beginning of an object value")
}
var err error
var a = b
var n = len(b)
var i = 0
b = b[1:]
for {
b = skipSpaces(b)
if len(b) == 0 {
return nil, b, Undefined, syntaxError(b, "cannot decode object from empty input")
}
if b[0] == '}' {
j := (n - len(b)) + 1
return a[:j], a[j:], Object, nil
}
if i != 0 {
if len(b) == 0 {
return nil, b, Undefined, syntaxError(b, "unexpected EOF after object field value")
}
if b[0] != ',' {
return nil, b, Undefined, syntaxError(b, "expected ',' after object field value but found '%c'", b[0])
}
b = skipSpaces(b[1:])
if len(b) == 0 {
return nil, b, Undefined, unexpectedEOF(b)
}
if b[0] == '}' {
return nil, b, Undefined, syntaxError(b, "unexpected trailing comma after object field")
}
}
_, b, _, err = d.parseString(b)
if err != nil {
return nil, b, Undefined, err
}
b = skipSpaces(b)
if len(b) == 0 {
return nil, b, Undefined, syntaxError(b, "unexpected EOF after object field key")
}
if b[0] != ':' {
return nil, b, Undefined, syntaxError(b, "expected ':' after object field key but found '%c'", b[0])
}
b = skipSpaces(b[1:])
_, b, _, err = d.parseValue(b)
if err != nil {
return nil, b, Undefined, err
}
i++
}
}
func (d decoder) parseArray(b []byte) ([]byte, []byte, Kind, error) {
if len(b) < 2 {
return nil, b[len(b):], Undefined, unexpectedEOF(b)
}
if b[0] != '[' {
return nil, b, Undefined, syntaxError(b, "expected '[' at the beginning of array value")
}
var err error
var a = b
var n = len(b)
var i = 0
b = b[1:]
for {
b = skipSpaces(b)
if len(b) == 0 {
return nil, b, Undefined, syntaxError(b, "missing closing ']' after array value")
}
if b[0] == ']' {
j := (n - len(b)) + 1
return a[:j], a[j:], Array, nil
}
if i != 0 {
if len(b) == 0 {
return nil, b, Undefined, syntaxError(b, "unexpected EOF after array element")
}
if b[0] != ',' {
return nil, b, Undefined, syntaxError(b, "expected ',' after array element but found '%c'", b[0])
}
b = skipSpaces(b[1:])
if len(b) == 0 {
return nil, b, Undefined, unexpectedEOF(b)
}
if b[0] == ']' {
return nil, b, Undefined, syntaxError(b, "unexpected trailing comma after object field")
}
}
_, b, _, err = d.parseValue(b)
if err != nil {
return nil, b, Undefined, err
}
i++
}
}
func (d decoder) parseValue(b []byte) ([]byte, []byte, Kind, error) {
if len(b) == 0 {
return nil, b, Undefined, syntaxError(b, "unexpected end of JSON input")
}
var v []byte
var k Kind
var err error
switch b[0] {
case '{':
v, b, k, err = d.parseObject(b)
case '[':
k = Array
v, b, k, err = d.parseArray(b)
case '"':
v, b, k, err = d.parseString(b)
case 'n':
v, b, k, err = d.parseNull(b)
case 't':
v, b, k, err = d.parseTrue(b)
case 'f':
v, b, k, err = d.parseFalse(b)
case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
v, b, k, err = d.parseNumber(b)
default:
err = syntaxError(b, "invalid character '%c' looking for beginning of value", b[0])
}
return v, b, k, err
}
func hasNullPrefix(b []byte) bool {
return len(b) >= 4 && string(b[:4]) == "null"
}
func hasTruePrefix(b []byte) bool {
return len(b) >= 4 && string(b[:4]) == "true"
}
func hasFalsePrefix(b []byte) bool {
return len(b) >= 5 && string(b[:5]) == "false"
}
func hasPrefix(b []byte, s string) bool {
return len(b) >= len(s) && s == string(b[:len(s)])
}
func hasLeadingSign(b []byte) bool {
return len(b) > 0 && (b[0] == '+' || b[0] == '-')
}
func hasLeadingZeroes(b []byte) bool {
if hasLeadingSign(b) {
b = b[1:]
}
return len(b) > 1 && b[0] == '0' && '0' <= b[1] && b[1] <= '9'
}
func appendToLower(b, s []byte) []byte {
if ascii.Valid(s) { // fast path for ascii strings
i := 0
for j := range s {
c := s[j]
if 'A' <= c && c <= 'Z' {
b = append(b, s[i:j]...)
b = append(b, c+('a'-'A'))
i = j + 1
}
}
return append(b, s[i:]...)
}
for _, r := range string(s) {
b = appendRune(b, foldRune(r))
}
return b
}
func foldRune(r rune) rune {
if r = unicode.SimpleFold(r); 'A' <= r && r <= 'Z' {
r = r + ('a' - 'A')
}
return r
}

20
vendor/github.com/segmentio/encoding/json/reflect.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
//go:build go1.18
// +build go1.18
package json
import (
"reflect"
"unsafe"
)
func extendSlice(t reflect.Type, s *slice, n int) slice {
arrayType := reflect.ArrayOf(n, t.Elem())
arrayData := reflect.New(arrayType)
reflect.Copy(arrayData.Elem(), reflect.NewAt(t, unsafe.Pointer(s)).Elem())
return slice{
data: unsafe.Pointer(arrayData.Pointer()),
len: s.len,
cap: n,
}
}

View File

@ -0,0 +1,30 @@
//go:build !go1.18
// +build !go1.18
package json
import (
"reflect"
"unsafe"
)
//go:linkname unsafe_NewArray reflect.unsafe_NewArray
func unsafe_NewArray(rtype unsafe.Pointer, length int) unsafe.Pointer
//go:linkname typedslicecopy reflect.typedslicecopy
//go:noescape
func typedslicecopy(elemType unsafe.Pointer, dst, src slice) int
func extendSlice(t reflect.Type, s *slice, n int) slice {
elemTypeRef := t.Elem()
elemTypePtr := ((*iface)(unsafe.Pointer(&elemTypeRef))).ptr
d := slice{
data: unsafe_NewArray(elemTypePtr, n),
len: s.len,
cap: n,
}
typedslicecopy(elemTypePtr, d, *s)
return d
}

70
vendor/github.com/segmentio/encoding/json/string.go generated vendored Normal file
View File

@ -0,0 +1,70 @@
package json
import (
"math/bits"
"unsafe"
)
const (
lsb = 0x0101010101010101
msb = 0x8080808080808080
)
// escapeIndex finds the index of the first char in `s` that requires escaping.
// A char requires escaping if it's outside of the range of [0x20, 0x7F] or if
// it includes a double quote or backslash. If the escapeHTML mode is enabled,
// the chars <, > and & also require escaping. If no chars in `s` require
// escaping, the return value is -1.
func escapeIndex(s string, escapeHTML bool) int {
chunks := stringToUint64(s)
for _, n := range chunks {
// combine masks before checking for the MSB of each byte. We include
// `n` in the mask to check whether any of the *input* byte MSBs were
// set (i.e. the byte was outside the ASCII range).
mask := n | below(n, 0x20) | contains(n, '"') | contains(n, '\\')
if escapeHTML {
mask |= contains(n, '<') | contains(n, '>') | contains(n, '&')
}
if (mask & msb) != 0 {
return bits.TrailingZeros64(mask&msb) / 8
}
}
for i := len(chunks) * 8; i < len(s); i++ {
c := s[i]
if c < 0x20 || c > 0x7f || c == '"' || c == '\\' || (escapeHTML && (c == '<' || c == '>' || c == '&')) {
return i
}
}
return -1
}
// below return a mask that can be used to determine if any of the bytes
// in `n` are below `b`. If a byte's MSB is set in the mask then that byte was
// below `b`. The result is only valid if `b`, and each byte in `n`, is below
// 0x80.
func below(n uint64, b byte) uint64 {
return n - expand(b)
}
// contains returns a mask that can be used to determine if any of the
// bytes in `n` are equal to `b`. If a byte's MSB is set in the mask then
// that byte is equal to `b`. The result is only valid if `b`, and each
// byte in `n`, is below 0x80.
func contains(n uint64, b byte) uint64 {
return (n ^ expand(b)) - lsb
}
// expand puts the specified byte into each of the 8 bytes of a uint64.
func expand(b byte) uint64 {
return lsb * uint64(b)
}
func stringToUint64(s string) []uint64 {
return *(*[]uint64)(unsafe.Pointer(&sliceHeader{
Data: *(*unsafe.Pointer)(unsafe.Pointer(&s)),
Len: len(s) / 8,
Cap: len(s) / 8,
}))
}

416
vendor/github.com/segmentio/encoding/json/token.go generated vendored Normal file
View File

@ -0,0 +1,416 @@
package json
import (
"strconv"
"sync"
"unsafe"
)
// Tokenizer is an iterator-style type which can be used to progressively parse
// through a json input.
//
// Tokenizing json is useful to build highly efficient parsing operations, for
// example when doing tranformations on-the-fly where as the program reads the
// input and produces the transformed json to an output buffer.
//
// Here is a common pattern to use a tokenizer:
//
// for t := json.NewTokenizer(b); t.Next(); {
// switch k := t.Kind(); k.Class() {
// case json.Null:
// ...
// case json.Bool:
// ...
// case json.Num:
// ...
// case json.String:
// ...
// case json.Array:
// ...
// case json.Object:
// ...
// }
// }
//
type Tokenizer struct {
// When the tokenizer is positioned on a json delimiter this field is not
// zero. In this case the possible values are '{', '}', '[', ']', ':', and
// ','.
Delim Delim
// This field contains the raw json token that the tokenizer is pointing at.
// When Delim is not zero, this field is a single-element byte slice
// continaing the delimiter value. Otherwise, this field holds values like
// null, true, false, numbers, or quoted strings.
Value RawValue
// When the tokenizer has encountered invalid content this field is not nil.
Err error
// When the value is in an array or an object, this field contains the depth
// at which it was found.
Depth int
// When the value is in an array or an object, this field contains the
// position at which it was found.
Index int
// This field is true when the value is the key of an object.
IsKey bool
// Tells whether the next value read from the tokenizer is a key.
isKey bool
// json input for the tokenizer, pointing at data right after the last token
// that was parsed.
json []byte
// Stack used to track entering and leaving arrays, objects, and keys.
stack *stack
// Decoder used for parsing.
decoder
}
// NewTokenizer constructs a new Tokenizer which reads its json input from b.
func NewTokenizer(b []byte) *Tokenizer {
return &Tokenizer{
json: b,
decoder: decoder{flags: internalParseFlags(b)},
}
}
// Reset erases the state of t and re-initializes it with the json input from b.
func (t *Tokenizer) Reset(b []byte) {
if t.stack != nil {
releaseStack(t.stack)
}
// This code is similar to:
//
// *t = Tokenizer{json: b}
//
// However, it does not compile down to an invocation of duff-copy.
t.Delim = 0
t.Value = nil
t.Err = nil
t.Depth = 0
t.Index = 0
t.IsKey = false
t.isKey = false
t.json = b
t.stack = nil
t.decoder = decoder{flags: internalParseFlags(b)}
}
// Next returns a new tokenizer pointing at the next token, or the zero-value of
// Tokenizer if the end of the json input has been reached.
//
// If the tokenizer encounters malformed json while reading the input the method
// sets t.Err to an error describing the issue, and returns false. Once an error
// has been encountered, the tokenizer will always fail until its input is
// cleared by a call to its Reset method.
func (t *Tokenizer) Next() bool {
if t.Err != nil {
return false
}
// Inlined code of the skipSpaces function, this give a ~15% speed boost.
i := 0
skipLoop:
for _, c := range t.json {
switch c {
case sp, ht, nl, cr:
i++
default:
break skipLoop
}
}
if i > 0 {
t.json = t.json[i:]
}
if len(t.json) == 0 {
t.Reset(nil)
return false
}
var kind Kind
switch t.json[0] {
case '"':
t.Delim = 0
t.Value, t.json, kind, t.Err = t.parseString(t.json)
case 'n':
t.Delim = 0
t.Value, t.json, kind, t.Err = t.parseNull(t.json)
case 't':
t.Delim = 0
t.Value, t.json, kind, t.Err = t.parseTrue(t.json)
case 'f':
t.Delim = 0
t.Value, t.json, kind, t.Err = t.parseFalse(t.json)
case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
t.Delim = 0
t.Value, t.json, kind, t.Err = t.parseNumber(t.json)
case '{', '}', '[', ']', ':', ',':
t.Delim, t.Value, t.json = Delim(t.json[0]), t.json[:1], t.json[1:]
switch t.Delim {
case '{':
kind = Object
case '[':
kind = Array
}
default:
t.Delim = 0
t.Value, t.json, t.Err = t.json[:1], t.json[1:], syntaxError(t.json, "expected token but found '%c'", t.json[0])
}
t.Depth = t.depth()
t.Index = t.index()
t.flags = t.flags.withKind(kind)
if t.Delim == 0 {
t.IsKey = t.isKey
} else {
t.IsKey = false
switch t.Delim {
case '{':
t.isKey = true
t.push(inObject)
case '[':
t.push(inArray)
case '}':
t.Err = t.pop(inObject)
t.Depth--
t.Index = t.index()
case ']':
t.Err = t.pop(inArray)
t.Depth--
t.Index = t.index()
case ':':
t.isKey = false
case ',':
if t.stack == nil || len(t.stack.state) == 0 {
t.Err = syntaxError(t.json, "found unexpected comma")
return false
}
if t.stack.is(inObject) {
t.isKey = true
}
t.stack.state[len(t.stack.state)-1].len++
}
}
return (t.Delim != 0 || len(t.Value) != 0) && t.Err == nil
}
func (t *Tokenizer) depth() int {
if t.stack == nil {
return 0
}
return t.stack.depth()
}
func (t *Tokenizer) index() int {
if t.stack == nil {
return 0
}
return t.stack.index()
}
func (t *Tokenizer) push(typ scope) {
if t.stack == nil {
t.stack = acquireStack()
}
t.stack.push(typ)
}
func (t *Tokenizer) pop(expect scope) error {
if t.stack == nil || !t.stack.pop(expect) {
return syntaxError(t.json, "found unexpected character while tokenizing json input")
}
return nil
}
// Kind returns the kind of the value that the tokenizer is currently positioned
// on.
func (t *Tokenizer) Kind() Kind { return t.flags.kind() }
// Bool returns a bool containing the value of the json boolean that the
// tokenizer is currently pointing at.
//
// This method must only be called after checking the kind of the token via a
// call to Kind.
//
// If the tokenizer is not positioned on a boolean, the behavior is undefined.
func (t *Tokenizer) Bool() bool { return t.flags.kind() == True }
// Int returns a byte slice containing the value of the json number that the
// tokenizer is currently pointing at.
//
// This method must only be called after checking the kind of the token via a
// call to Kind.
//
// If the tokenizer is not positioned on an integer, the behavior is undefined.
func (t *Tokenizer) Int() int64 {
i, _, _ := t.parseInt(t.Value, int64Type)
return i
}
// Uint returns a byte slice containing the value of the json number that the
// tokenizer is currently pointing at.
//
// This method must only be called after checking the kind of the token via a
// call to Kind.
//
// If the tokenizer is not positioned on a positive integer, the behavior is
// undefined.
func (t *Tokenizer) Uint() uint64 {
u, _, _ := t.parseUint(t.Value, uint64Type)
return u
}
// Float returns a byte slice containing the value of the json number that the
// tokenizer is currently pointing at.
//
// This method must only be called after checking the kind of the token via a
// call to Kind.
//
// If the tokenizer is not positioned on a number, the behavior is undefined.
func (t *Tokenizer) Float() float64 {
f, _ := strconv.ParseFloat(*(*string)(unsafe.Pointer(&t.Value)), 64)
return f
}
// String returns a byte slice containing the value of the json string that the
// tokenizer is currently pointing at.
//
// This method must only be called after checking the kind of the token via a
// call to Kind.
//
// When possible, the returned byte slice references the backing array of the
// tokenizer. A new slice is only allocated if the tokenizer needed to unescape
// the json string.
//
// If the tokenizer is not positioned on a string, the behavior is undefined.
func (t *Tokenizer) String() []byte {
if t.flags.kind() == Unescaped && len(t.Value) > 1 {
return t.Value[1 : len(t.Value)-1] // unquote
}
s, _, _, _ := t.parseStringUnquote(t.Value, nil)
return s
}
// RawValue represents a raw json value, it is intended to carry null, true,
// false, number, and string values only.
type RawValue []byte
// String returns true if v contains a string value.
func (v RawValue) String() bool { return len(v) != 0 && v[0] == '"' }
// Null returns true if v contains a null value.
func (v RawValue) Null() bool { return len(v) != 0 && v[0] == 'n' }
// True returns true if v contains a true value.
func (v RawValue) True() bool { return len(v) != 0 && v[0] == 't' }
// False returns true if v contains a false value.
func (v RawValue) False() bool { return len(v) != 0 && v[0] == 'f' }
// Number returns true if v contains a number value.
func (v RawValue) Number() bool {
if len(v) != 0 {
switch v[0] {
case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
return true
}
}
return false
}
// AppendUnquote writes the unquoted version of the string value in v into b.
func (v RawValue) AppendUnquote(b []byte) []byte {
d := decoder{}
s, r, _, err := d.parseStringUnquote(v, b)
if err != nil {
panic(err)
}
if len(r) != 0 {
panic(syntaxError(r, "unexpected trailing tokens after json value"))
}
return append(b, s...)
}
// Unquote returns the unquoted version of the string value in v.
func (v RawValue) Unquote() []byte {
return v.AppendUnquote(nil)
}
type scope int
const (
inArray scope = iota
inObject
)
type state struct {
typ scope
len int
}
type stack struct {
state []state
}
func (s *stack) push(typ scope) {
s.state = append(s.state, state{typ: typ, len: 1})
}
func (s *stack) pop(expect scope) bool {
i := len(s.state) - 1
if i < 0 {
return false
}
if found := s.state[i]; expect != found.typ {
return false
}
s.state = s.state[:i]
return true
}
func (s *stack) is(typ scope) bool {
return len(s.state) != 0 && s.state[len(s.state)-1].typ == typ
}
func (s *stack) depth() int {
return len(s.state)
}
func (s *stack) index() int {
if len(s.state) == 0 {
return 0
}
return s.state[len(s.state)-1].len - 1
}
func acquireStack() *stack {
s, _ := stackPool.Get().(*stack)
if s == nil {
s = &stack{state: make([]state, 0, 4)}
} else {
s.state = s.state[:0]
}
return s
}
func releaseStack(s *stack) {
stackPool.Put(s)
}
var (
stackPool sync.Pool // *stack
)

38
vendor/go.lsp.dev/jsonrpc2/.codecov.yml vendored Normal file
View File

@ -0,0 +1,38 @@
codecov:
allow_coverage_offsets: true
notify:
wait_for_ci: false
coverage:
precision: 1
round: down
range: "70...100"
status:
project:
default:
target: auto
threshold: 1%
if_ci_failed: error
if_not_found: success
patch:
default:
only_pulls: true
target: 50%
threshold: 10%
if_ci_failed: error
if_not_found: failure
changes:
default:
if_ci_failed: error
if_not_found: success
only_pulls: false
branches:
- main
comment:
behavior: default
show_carryforward_flags: true
github_checks:
annotations: true

View File

@ -0,0 +1 @@
(*go.lsp.dev/jsonrpc2.Request).Reply

View File

@ -0,0 +1,11 @@
# go.lsp.dev/jsonrpc2 project gitattributes file
# https://github.com/github/linguist#using-gitattributes
# https://github.com/github/linguist/blob/master/lib/linguist/languages.yml
# To prevent CRLF breakages on Windows for fragile files, like testdata.
* -text
docs/ linguist-documentation
*.pb.go linguist-generated
*_gen.go linguist-generated
*_string.go linguist-generated

52
vendor/go.lsp.dev/jsonrpc2/.gitignore vendored Normal file
View File

@ -0,0 +1,52 @@
# go.lsp.dev/jsonrpc2 project generated files to ignore
# if you want to ignore files created by your editor/tools,
# please consider a global .gitignore https://help.github.com/articles/ignoring-files
# please do not open a pull request to add something created by your editor or tools
# github/gitignore/Go.gitignore
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
vendor/
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
# cgo generated
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
# test generated
_testmain.go
# profile
*.pprof
# coverage
coverage.*
# tools
bin/

200
vendor/go.lsp.dev/jsonrpc2/.golangci.yml vendored Normal file
View File

@ -0,0 +1,200 @@
run:
timeout: 5m
issues-exit-code: 1
tests: true
skip-dirs: []
skip-dirs-use-default: true
skip-files: []
allow-parallel-runners: true
output:
format: colored-line-number
print-issued-lines: true
print-linter-name: true
uniq-by-line: true
sort-results: true
linters-settings:
dupl:
threshold: 100
# errcheck:
# check-type-assertions: true
# check-blank: true
# exclude: .errcheckignore
funlen:
lines: 100
statements: 60
gocognit:
min-complexity: 20
goconst:
min-len: 3
min-occurrences: 3
gocritic:
enabled-tags:
- diagnostic
- experimental
- opinionated
- performance
- style
disabled-checks:
- commentedOutCode
- whyNoLint
settings:
hugeParam:
sizeThreshold: 80
rangeExprCopy:
sizeThreshold: 512
rangeValCopy:
sizeThreshold: 128
gocyclo:
min-complexity: 15
godot:
scope: declarations
capital: false
gofmt:
simplify: true
goimports:
local-prefixes: go.lsp.dev/jsonrpc2
golint:
min-confidence: 0.3
govet:
enable-all: true
check-shadowing: true
disable:
- fieldalignment
depguard:
list-type: blacklist
include-go-root: true
# packages-with-error-message:
# - github.com/sirupsen/logrus: "logging is allowed only by logutils.Log"
lll:
line-length: 120
tab-width: 1
maligned:
suggest-new: true
misspell:
locale: US
ignore-words:
- cancelled
nakedret:
max-func-lines: 30
prealloc:
simple: true
range-loops: true
for-loops: true
testpackage:
skip-regexp: '.*(export)_test\.go'
unparam:
check-exported: true
algo: cha
unused:
check-exported: false
whitespace:
multi-if: true
multi-func: true
linters:
fast: false
disabled:
- deadcode # Finds unused code
- errcheck # Errcheck is a program for checking for unchecked errors in go programs
- exhaustivestruct # Checks if all struct's fields are initialized
- forbidigo # Forbids identifiers
- gci # Gci control golang package import order and make it always deterministic
- gochecknoglobals # check that no global variables exist
- gochecknoinits # Checks that no init functions are present in Go code
- godox # Tool for detection of FIXME, TODO and other comment keywords
- goerr113 # Golang linter to check the errors handling expressions
- gofumpt # Gofumpt checks whether code was gofumpt-ed
- goheader # Checks is file header matches to pattern
- golint # Golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes
- gomnd # An analyzer to detect magic numbers
- gomodguard # Allow and block list linter for direct Go module dependencies
- gosec # Inspects source code for security problems
- nlreturn # nlreturn checks for a new line before return and branch statements to increase code clarity
- paralleltest # paralleltest detects missing usage of t.Parallel() method in your Go test
- scopelint # Scopelint checks for unpinned variables in go programs
- sqlclosecheck # Checks that sql.Rows and sql.Stmt are closed
- unparam # Reports unused function parameters
- wrapcheck # Checks that errors returned from external packages are wrapped TODO(zchee): enable
- wsl # Whitespace Linter
enable:
- asciicheck # Simple linter to check that your code does not contain non-ASCII identifiers
- bodyclose # checks whether HTTP response body is closed successfully
- depguard # Go linter that checks if package imports are in a list of acceptable packages
- dogsled # Checks assignments with too many blank identifiers
- dupl # Tool for code clone detection
- errorlint # source code linter for Go software that can be used to find code that will cause problemswith the error wrapping scheme introduced in Go 1.13
- exhaustive # check exhaustiveness of enum switch statements
- exportloopref # checks for pointers to enclosing loop variables
- funlen # Tool for detection of long functions
- gocognit # Computes and checks the cognitive complexity of functions
- goconst # Finds repeated strings that could be replaced by a constant
- gocritic # The most opinionated Go source code linter
- gocyclo # Computes and checks the cyclomatic complexity of functions
- godot # Check if comments end in a period
- gofmt # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification
- goimports # Goimports does everything that gofmt does. Additionally it checks unused imports
- goprintffuncname # Checks that printf-like functions are named with `f` at the end
- gosimple # Linter for Go source code that specializes in simplifying a code
- govet # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string
- ifshort # Checks that your code uses short syntax for if-statements whenever possible
- ineffassign # Detects when assignments to existing variables are not used
- lll # Reports long lines
- makezero # Finds slice declarations with non-zero initial length
- misspell # Finds commonly misspelled English words in comments
- nakedret # Finds naked returns in functions greater than a specified function length
- nestif # Reports deeply nested if statements
- noctx # noctx finds sending http request without context.Context
- nolintlint # Reports ill-formed or insufficient nolint directives
- prealloc # Finds slice declarations that could potentially be preallocated
- predeclared # find code that shadows one of Go's predeclared identifiers
- rowserrcheck # checks whether Err of rows is checked successfully
- staticcheck # Staticcheck is a go vet on steroids, applying a ton of static analysis checks
- structcheck # Finds unused struct fields
- stylecheck # Stylecheck is a replacement for golint
- testpackage # linter that makes you use a separate _test package
- thelper # thelper detects golang test helpers without t.Helper() call and checks the consistency of test helpers
- tparallel # tparallel detects inappropriate usage of t.Parallel() method in your Go test codes
- typecheck # Like the front-end of a Go compiler, parses and type-checks Go code
- unconvert # Remove unnecessary type conversions
- unused # Checks Go code for unused constants, variables, functions and types
- varcheck # Finds unused global variables and constants
- whitespace # Tool for detection of leading and trailing whitespace
issues:
max-same-issues: 0
exclude-use-default: true
exclude-rules:
- path: _test\.go
linters:
- errcheck
- funlen
- gocognit
- goconst
- gocyclo
- lll
- maligned
- wrapcheck
- path: "(.*)?_example_test.go"
linters:
- gocritic
# `TestMain` function is no longer required to call `os.Exit` since Go 1.15.
# ref: https://golang.org/doc/go1.15#testing
- text: "SA3000:"
linters:
- staticcheck
# Exclude shadow checking on the variable named err
- text: "shadow: declaration of \"(err|ok)\""
linters:
- govet
# fake implements
- path: fake/fake.go
linters:
- errcheck
# future use
- path: wire.go
text: "`(codeServerErrorStart|codeServerErrorEnd)` is unused"
# goroutine
- path: handler.go
text: "Error return value of `handler` is not checked"

29
vendor/go.lsp.dev/jsonrpc2/LICENSE vendored Normal file
View File

@ -0,0 +1,29 @@
BSD 3-Clause License
Copyright (c) 2019, The Go Language Server Authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

129
vendor/go.lsp.dev/jsonrpc2/Makefile vendored Normal file
View File

@ -0,0 +1,129 @@
# -----------------------------------------------------------------------------
# global
.DEFAULT_GOAL := test
comma := ,
empty :=
space := $(empty) $(empty)
# -----------------------------------------------------------------------------
# go
GO_PATH ?= $(shell go env GOPATH)
GO_OS ?= $(shell go env GOOS)
GO_ARCH ?= $(shell go env GOARCH)
PKG := $(subst $(GO_PATH)/src/,,$(CURDIR))
CGO_ENABLED ?= 0
GO_BUILDTAGS=osusergo netgo static
GO_LDFLAGS=-s -w "-extldflags=-static"
GO_FLAGS ?= -tags='$(subst $(space),$(comma),${GO_BUILDTAGS})' -ldflags='${GO_LDFLAGS}' -installsuffix=netgo
GO_PKGS := $(shell go list ./...)
GO_TEST ?= ${TOOLS_BIN}/gotestsum --
GO_TEST_PKGS ?= $(shell go list -f='{{if or .TestGoFiles .XTestGoFiles}}{{.ImportPath}}{{end}}' ./...)
GO_TEST_FLAGS ?= -race -count=1
GO_TEST_FUNC ?= .
GO_COVERAGE_OUT ?= coverage.out
GO_BENCH_FLAGS ?= -benchmem
GO_BENCH_FUNC ?= .
GO_LINT_FLAGS ?=
TOOLS := $(shell cd tools; go list -f '{{ join .Imports " " }}' -tags=tools)
TOOLS_BIN := ${CURDIR}/tools/bin
# Set build environment
JOBS := $(shell getconf _NPROCESSORS_CONF)
# -----------------------------------------------------------------------------
# defines
define target
@printf "+ $(patsubst ,$@,$(1))\\n" >&2
endef
# -----------------------------------------------------------------------------
# target
##@ test, bench, coverage
export GOTESTSUM_FORMAT=standard-verbose
.PHONY: test
test: CGO_ENABLED=1
test: GO_FLAGS=-tags='$(subst ${space},${comma},${GO_BUILDTAGS})'
test: tools/bin/gotestsum ## Runs package test including race condition.
$(call target)
@CGO_ENABLED=${CGO_ENABLED} ${GO_TEST} ${GO_TEST_FLAGS} -run=${GO_TEST_FUNC} $(strip ${GO_FLAGS}) ${GO_TEST_PKGS}
.PHONY: bench
bench: ## Take a package benchmark.
$(call target)
@CGO_ENABLED=${CGO_ENABLED} ${GO_TEST} -run='^$$' -bench=${GO_BENCH_FUNC} ${GO_BENCH_FLAGS} $(strip ${GO_FLAGS}) ${GO_TEST_PKGS}
.PHONY: coverage
coverage: CGO_ENABLED=1
coverage: GO_FLAGS=-tags='$(subst ${space},${comma},${GO_BUILDTAGS})'
coverage: tools/bin/gotestsum ## Takes packages test coverage.
$(call target)
@CGO_ENABLED=${CGO_ENABLED} ${GO_TEST} ${GO_TEST_FLAGS} -covermode=atomic -coverpkg=${PKG}/... -coverprofile=${GO_COVERAGE_OUT} $(strip ${GO_FLAGS}) ${GO_PKGS}
##@ fmt, lint
.PHONY: lint
lint: fmt lint/golangci-lint ## Run all linters.
.PHONY: fmt
fmt: tools/bin/goimportz tools/bin/gofumpt ## Run goimportz and gofumpt.
$(call target)
find . -iname "*.go" -not -path "./vendor/**" | xargs -P ${JOBS} ${TOOLS_BIN}/goimportz -local=${PKG},$(subst /jsonrpc2,,$(PKG)) -w
find . -iname "*.go" -not -path "./vendor/**" | xargs -P ${JOBS} ${TOOLS_BIN}/gofumpt -extra -w
.PHONY: lint/golangci-lint
lint/golangci-lint: tools/bin/golangci-lint .golangci.yml ## Run golangci-lint.
$(call target)
${TOOLS_BIN}/golangci-lint -j ${JOBS} run $(strip ${GO_LINT_FLAGS}) ./...
##@ tools
.PHONY: tools
tools: tools/bin/'' ## Install tools
tools/%: tools/bin/% ## install an individual dependent tool
tools/bin/%: ${CURDIR}/tools/go.mod ${CURDIR}/tools/go.sum
@cd tools; \
for t in ${TOOLS}; do \
if [ -z '$*' ] || [ $$(basename $$t) = '$*' ]; then \
echo "Install $$t ..."; \
GOBIN=${TOOLS_BIN} CGO_ENABLED=0 go install -v -mod=mod ${GO_FLAGS} "$${t}"; \
fi \
done
##@ clean
.PHONY: clean
clean: ## Cleanups binaries and extra files in the package.
$(call target)
@rm -rf *.out *.test *.prof trace.txt ${TOOLS_BIN}
##@ miscellaneous
.PHONY: todo
TODO: ## Print the all of (TODO|BUG|XXX|FIXME|NOTE) in packages.
@grep -E '(TODO|BUG|XXX|FIXME)(\(.+\):|:)' $(shell find . -type f -name '*.go' -and -not -iwholename '*vendor*')
.PHONY: env/%
env/%: ## Print the value of MAKEFILE_VARIABLE. Use `make env/GO_FLAGS` or etc.
@echo $($*)
##@ help
.PHONY: help
help: ## Show this help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[33m<target>\033[0m\n"} /^[a-zA-Z_0-9\/%_-]+:.*?##/ { printf " \033[1;32m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)

19
vendor/go.lsp.dev/jsonrpc2/README.md vendored Normal file
View File

@ -0,0 +1,19 @@
# jsonrpc2
[![CircleCI][circleci-badge]][circleci] [![pkg.go.dev][pkg.go.dev-badge]][pkg.go.dev] [![Go module][module-badge]][module] [![codecov.io][codecov-badge]][codecov] [![GA][ga-badge]][ga]
Package jsonrpc2 is an implementation of the JSON-RPC 2 specification for Go.
<!-- badge links -->
[circleci]: https://app.circleci.com/pipelines/github/go-language-server/jsonrpc2
[pkg.go.dev]: https://pkg.go.dev/go.lsp.dev/jsonrpc2
[module]: https://github.com/go-language-server/jsonrpc2/releases/latest
[codecov]: https://codecov.io/gh/go-language-server/jsonrpc2
[ga]: https://github.com/go-language-server/jsonrpc2
[circleci-badge]: https://img.shields.io/circleci/build/github/go-language-server/jsonrpc2/main.svg?style=for-the-badge&label=CIRCLECI&logo=circleci
[pkg.go.dev-badge]: https://bit.ly/shields-io-pkg-go-dev
[module-badge]: https://img.shields.io/github/release/go-language-server/jsonrpc2.svg?color=00add8&label=MODULE&style=for-the-badge&logoWidth=25&logo=data%3Aimage%2Fsvg%2Bxml%3Bbase64%2CPHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9Ijg1IDU1IDEyMCAxMjAiPjxwYXRoIGZpbGw9IiMwMEFERDgiIGQ9Ik00MC4yIDEwMS4xYy0uNCAwLS41LS4yLS4zLS41bDIuMS0yLjdjLjItLjMuNy0uNSAxLjEtLjVoMzUuN2MuNCAwIC41LjMuMy42bC0xLjcgMi42Yy0uMi4zLS43LjYtMSAuNmwtMzYuMi0uMXptLTE1LjEgOS4yYy0uNCAwLS41LS4yLS4zLS41bDIuMS0yLjdjLjItLjMuNy0uNSAxLjEtLjVoNDUuNmMuNCAwIC42LjMuNS42bC0uOCAyLjRjLS4xLjQtLjUuNi0uOS42bC00Ny4zLjF6bTI0LjIgOS4yYy0uNCAwLS41LS4zLS4zLS42bDEuNC0yLjVjLjItLjMuNi0uNiAxLS42aDIwYy40IDAgLjYuMy42LjdsLS4yIDIuNGMwIC40LS40LjctLjcuN2wtMjEuOC0uMXptMTAzLjgtMjAuMmMtNi4zIDEuNi0xMC42IDIuOC0xNi44IDQuNC0xLjUuNC0xLjYuNS0yLjktMS0xLjUtMS43LTIuNi0yLjgtNC43LTMuOC02LjMtMy4xLTEyLjQtMi4yLTE4LjEgMS41LTYuOCA0LjQtMTAuMyAxMC45LTEwLjIgMTkgLjEgOCA1LjYgMTQuNiAxMy41IDE1LjcgNi44LjkgMTIuNS0xLjUgMTctNi42LjktMS4xIDEuNy0yLjMgMi43LTMuN2gtMTkuM2MtMi4xIDAtMi42LTEuMy0xLjktMyAxLjMtMy4xIDMuNy04LjMgNS4xLTEwLjkuMy0uNiAxLTEuNiAyLjUtMS42aDM2LjRjLS4yIDIuNy0uMiA1LjQtLjYgOC4xLTEuMSA3LjItMy44IDEzLjgtOC4yIDE5LjYtNy4yIDkuNS0xNi42IDE1LjQtMjguNSAxNy05LjggMS4zLTE4LjktLjYtMjYuOS02LjYtNy40LTUuNi0xMS42LTEzLTEyLjctMjIuMi0xLjMtMTAuOSAxLjktMjAuNyA4LjUtMjkuMyA3LjEtOS4zIDE2LjUtMTUuMiAyOC0xNy4zIDkuNC0xLjcgMTguNC0uNiAyNi41IDQuOSA1LjMgMy41IDkuMSA4LjMgMTEuNiAxNC4xLjYuOS4yIDEuNC0xIDEuN3oiLz48cGF0aCBmaWxsPSIjMDBBREQ4IiBkPSJNMTg2LjIgMTU0LjZjLTkuMS0uMi0xNy40LTIuOC0yNC40LTguOC01LjktNS4xLTkuNi0xMS42LTEwLjgtMTkuMy0xLjgtMTEuMyAxLjMtMjEuMyA4LjEtMzAuMiA3LjMtOS42IDE2LjEtMTQuNiAyOC0xNi43IDEwLjItMS44IDE5LjgtLjggMjguNSA1LjEgNy45IDUuNCAxMi44IDEyLjcgMTQuMSAyMi4zIDEuNyAxMy41LTIuMiAyNC41LTExLjUgMzMuOS02LjYgNi43LTE0LjcgMTAuOS0yNCAxMi44LTIuNy41LTUuNC42LTggLjl6bTIzLjgtNDAuNGMtLjEtMS4zLS4xLTIuMy0uMy0zLjMtMS44LTkuOS0xMC45LTE1LjUtMjAuNC0xMy4zLTkuMyAyLjEtMTUuMyA4LTE3LjUgMTcuNC0xLjggNy44IDIgMTUuNyA5LjIgMTguOSA1LjUgMi40IDExIDIuMSAxNi4zLS42IDcuOS00LjEgMTIuMi0xMC41IDEyLjctMTkuMXoiLz48L3N2Zz4=
[codecov-badge]: https://img.shields.io/codecov/c/github/go-language-server/jsonrpc2/main?logo=codecov&style=for-the-badge
[ga-badge]: https://gh-ga-beacon.appspot.com/UA-89201129-1/go-language-server/jsonrpc2?useReferer&pixel

86
vendor/go.lsp.dev/jsonrpc2/codes.go vendored Normal file
View File

@ -0,0 +1,86 @@
// SPDX-FileCopyrightText: 2021 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package jsonrpc2
// Code is an error code as defined in the JSON-RPC spec.
type Code int32
// list of JSON-RPC error codes.
const (
// ParseError is the invalid JSON was received by the server.
// An error occurred on the server while parsing the JSON text.
ParseError Code = -32700
// InvalidRequest is the JSON sent is not a valid Request object.
InvalidRequest Code = -32600
// MethodNotFound is the method does not exist / is not available.
MethodNotFound Code = -32601
// InvalidParams is the invalid method parameter(s).
InvalidParams Code = -32602
// InternalError is the internal JSON-RPC error.
InternalError Code = -32603
// JSONRPCReservedErrorRangeStart is the start range of JSON RPC reserved error codes.
//
// It doesn't denote a real error code. No LSP error codes should
// be defined between the start and end range. For backwards
// compatibility the "ServerNotInitialized" and the "UnknownErrorCode"
// are left in the range.
//
// @since 3.16.0.
JSONRPCReservedErrorRangeStart Code = -32099
// CodeServerErrorStart reserved for implementation-defined server-errors.
//
// Deprecated: Use JSONRPCReservedErrorRangeStart instead.
CodeServerErrorStart = JSONRPCReservedErrorRangeStart
// ServerNotInitialized is the error of server not initialized.
ServerNotInitialized Code = -32002
// UnknownError should be used for all non coded errors.
UnknownError Code = -32001
// JSONRPCReservedErrorRangeEnd is the start range of JSON RPC reserved error codes.
//
// It doesn't denote a real error code.
//
// @since 3.16.0.
JSONRPCReservedErrorRangeEnd Code = -32000
// CodeServerErrorEnd reserved for implementation-defined server-errors.
//
// Deprecated: Use JSONRPCReservedErrorRangeEnd instead.
CodeServerErrorEnd = JSONRPCReservedErrorRangeEnd
)
// This file contains the Go forms of the wire specification.
//
// See http://www.jsonrpc.org/specification for details.
//
// list of JSON-RPC errors.
var (
// ErrUnknown should be used for all non coded errors.
ErrUnknown = NewError(UnknownError, "JSON-RPC unknown error")
// ErrParse is used when invalid JSON was received by the server.
ErrParse = NewError(ParseError, "JSON-RPC parse error")
// ErrInvalidRequest is used when the JSON sent is not a valid Request object.
ErrInvalidRequest = NewError(InvalidRequest, "JSON-RPC invalid request")
// ErrMethodNotFound should be returned by the handler when the method does
// not exist / is not available.
ErrMethodNotFound = NewError(MethodNotFound, "JSON-RPC method not found")
// ErrInvalidParams should be returned by the handler when method
// parameter(s) were invalid.
ErrInvalidParams = NewError(InvalidParams, "JSON-RPC invalid params")
// ErrInternal is not currently returned but defined for completeness.
ErrInternal = NewError(InternalError, "JSON-RPC internal error")
)

245
vendor/go.lsp.dev/jsonrpc2/conn.go vendored Normal file
View File

@ -0,0 +1,245 @@
// SPDX-FileCopyrightText: 2021 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package jsonrpc2
import (
"bytes"
"context"
"fmt"
"sync"
"sync/atomic"
"github.com/segmentio/encoding/json"
)
// Conn is the common interface to jsonrpc clients and servers.
//
// Conn is bidirectional; it does not have a designated server or client end.
// It manages the jsonrpc2 protocol, connecting responses back to their calls.
type Conn interface {
// Call invokes the target method and waits for a response.
//
// The params will be marshaled to JSON before sending over the wire, and will
// be handed to the method invoked.
//
// The response will be unmarshaled from JSON into the result.
//
// The id returned will be unique from this connection, and can be used for
// logging or tracking.
Call(ctx context.Context, method string, params, result interface{}) (ID, error)
// Notify invokes the target method but does not wait for a response.
//
// The params will be marshaled to JSON before sending over the wire, and will
// be handed to the method invoked.
Notify(ctx context.Context, method string, params interface{}) error
// Go starts a goroutine to handle the connection.
//
// It must be called exactly once for each Conn. It returns immediately.
// Must block on Done() to wait for the connection to shut down.
//
// This is a temporary measure, this should be started automatically in the
// future.
Go(ctx context.Context, handler Handler)
// Close closes the connection and it's underlying stream.
//
// It does not wait for the close to complete, use the Done() channel for
// that.
Close() error
// Done returns a channel that will be closed when the processing goroutine
// has terminated, which will happen if Close() is called or an underlying
// stream is closed.
Done() <-chan struct{}
// Err returns an error if there was one from within the processing goroutine.
//
// If err returns non nil, the connection will be already closed or closing.
Err() error
}
type conn struct {
seq int32 // access atomically
writeMu sync.Mutex // protects writes to the stream
stream Stream // supplied stream
pendingMu sync.Mutex // protects the pending map
pending map[ID]chan *Response // holds the pending response channel with the ID as the key.
done chan struct{} // closed when done
err atomic.Value // holds run error
}
// NewConn creates a new connection object around the supplied stream.
func NewConn(s Stream) Conn {
conn := &conn{
stream: s,
pending: make(map[ID]chan *Response),
done: make(chan struct{}),
}
return conn
}
// Call implements Conn.
func (c *conn) Call(ctx context.Context, method string, params, result interface{}) (id ID, err error) {
// generate a new request identifier
id = NewNumberID(atomic.AddInt32(&c.seq, 1))
call, err := NewCall(id, method, params)
if err != nil {
return id, fmt.Errorf("marshaling call parameters: %w", err)
}
// We have to add ourselves to the pending map before we send, otherwise we
// are racing the response. Also add a buffer to rchan, so that if we get a
// wire response between the time this call is cancelled and id is deleted
// from c.pending, the send to rchan will not block.
rchan := make(chan *Response, 1)
c.pendingMu.Lock()
c.pending[id] = rchan
c.pendingMu.Unlock()
defer func() {
c.pendingMu.Lock()
delete(c.pending, id)
c.pendingMu.Unlock()
}()
// now we are ready to send
_, err = c.write(ctx, call)
if err != nil {
// sending failed, we will never get a response, so don't leave it pending
return id, err
}
// now wait for the response
select {
case resp := <-rchan:
// is it an error response?
if resp.err != nil {
return id, resp.err
}
if result == nil || len(resp.result) == 0 {
return id, nil
}
dec := json.NewDecoder(bytes.NewReader(resp.result))
dec.ZeroCopy()
if err := dec.Decode(result); err != nil {
return id, fmt.Errorf("unmarshaling result: %w", err)
}
return id, nil
case <-ctx.Done():
return id, ctx.Err()
}
}
// Notify implements Conn.
func (c *conn) Notify(ctx context.Context, method string, params interface{}) (err error) {
notify, err := NewNotification(method, params)
if err != nil {
return fmt.Errorf("marshaling notify parameters: %w", err)
}
_, err = c.write(ctx, notify)
return err
}
func (c *conn) replier(req Message) Replier {
return func(ctx context.Context, result interface{}, err error) error {
call, ok := req.(*Call)
if !ok {
// request was a notify, no need to respond
return nil
}
response, err := NewResponse(call.id, result, err)
if err != nil {
return err
}
_, err = c.write(ctx, response)
if err != nil {
// TODO(iancottrell): if a stream write fails, we really need to shut down the whole stream
return err
}
return nil
}
}
func (c *conn) write(ctx context.Context, msg Message) (int64, error) {
c.writeMu.Lock()
n, err := c.stream.Write(ctx, msg)
c.writeMu.Unlock()
if err != nil {
return 0, fmt.Errorf("write to stream: %w", err)
}
return n, nil
}
// Go implements Conn.
func (c *conn) Go(ctx context.Context, handler Handler) {
go c.run(ctx, handler)
}
func (c *conn) run(ctx context.Context, handler Handler) {
defer close(c.done)
for {
// get the next message
msg, _, err := c.stream.Read(ctx)
if err != nil {
// The stream failed, we cannot continue.
c.fail(err)
return
}
switch msg := msg.(type) {
case Request:
if err := handler(ctx, c.replier(msg), msg); err != nil {
c.fail(err)
}
case *Response:
// If method is not set, this should be a response, in which case we must
// have an id to send the response back to the caller.
c.pendingMu.Lock()
rchan, ok := c.pending[msg.id]
c.pendingMu.Unlock()
if ok {
rchan <- msg
}
}
}
}
// Close implements Conn.
func (c *conn) Close() error {
return c.stream.Close()
}
// Done implements Conn.
func (c *conn) Done() <-chan struct{} {
return c.done
}
// Err implements Conn.
func (c *conn) Err() error {
if err := c.err.Load(); err != nil {
return err.(error)
}
return nil
}
// fail sets a failure condition on the stream and closes it.
func (c *conn) fail(err error) {
c.err.Store(err)
c.stream.Close()
}

70
vendor/go.lsp.dev/jsonrpc2/errors.go vendored Normal file
View File

@ -0,0 +1,70 @@
// SPDX-FileCopyrightText: 2019 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package jsonrpc2
import (
"errors"
"fmt"
"github.com/segmentio/encoding/json"
)
// Error represents a JSON-RPC error.
type Error struct {
// Code a number indicating the error type that occurred.
Code Code `json:"code"`
// Message a string providing a short description of the error.
Message string `json:"message"`
// Data a Primitive or Structured value that contains additional
// information about the error. Can be omitted.
Data *json.RawMessage `json:"data,omitempty"`
}
// compile time check whether the Error implements error interface.
var _ error = (*Error)(nil)
// Error implements error.Error.
func (e *Error) Error() string {
if e == nil {
return ""
}
return e.Message
}
// Unwrap implements errors.Unwrap.
//
// Returns the error underlying the receiver, which may be nil.
func (e *Error) Unwrap() error { return errors.New(e.Message) }
// NewError builds a Error struct for the suppied code and message.
func NewError(c Code, message string) *Error {
return &Error{
Code: c,
Message: message,
}
}
// Errorf builds a Error struct for the suppied code, format and args.
func Errorf(c Code, format string, args ...interface{}) *Error {
return &Error{
Code: c,
Message: fmt.Sprintf(format, args...),
}
}
// constErr represents a error constant.
type constErr string
// compile time check whether the constErr implements error interface.
var _ error = (*constErr)(nil)
// Error implements error.Error.
func (e constErr) Error() string { return string(e) }
const (
// ErrIdleTimeout is returned when serving timed out waiting for new connections.
ErrIdleTimeout = constErr("timed out waiting for new connections")
)

120
vendor/go.lsp.dev/jsonrpc2/handler.go vendored Normal file
View File

@ -0,0 +1,120 @@
// SPDX-FileCopyrightText: 2019 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package jsonrpc2
import (
"context"
"fmt"
"sync"
)
// Handler is invoked to handle incoming requests.
//
// The Replier sends a reply to the request and must be called exactly once.
type Handler func(ctx context.Context, reply Replier, req Request) error
// Replier is passed to handlers to allow them to reply to the request.
//
// If err is set then result will be ignored.
type Replier func(ctx context.Context, result interface{}, err error) error
// MethodNotFoundHandler is a Handler that replies to all call requests with the
// standard method not found response.
//
// This should normally be the final handler in a chain.
func MethodNotFoundHandler(ctx context.Context, reply Replier, req Request) error {
return reply(ctx, nil, fmt.Errorf("%q: %w", req.Method(), ErrMethodNotFound))
}
// ReplyHandler creates a Handler that panics if the wrapped handler does
// not call Reply for every request that it is passed.
func ReplyHandler(handler Handler) (h Handler) {
h = Handler(func(ctx context.Context, reply Replier, req Request) error {
called := false
err := handler(ctx, func(ctx context.Context, result interface{}, err error) error {
if called {
panic(fmt.Errorf("request %q replied to more than once", req.Method()))
}
called = true
return reply(ctx, result, err)
}, req)
if !called {
panic(fmt.Errorf("request %q was never replied to", req.Method()))
}
return err
})
return h
}
// CancelHandler returns a handler that supports cancellation, and a function
// that can be used to trigger canceling in progress requests.
func CancelHandler(handler Handler) (h Handler, canceller func(id ID)) {
var mu sync.Mutex
handling := make(map[ID]context.CancelFunc)
h = Handler(func(ctx context.Context, reply Replier, req Request) error {
if call, ok := req.(*Call); ok {
cancelCtx, cancel := context.WithCancel(ctx)
ctx = cancelCtx
mu.Lock()
handling[call.ID()] = cancel
mu.Unlock()
innerReply := reply
reply = func(ctx context.Context, result interface{}, err error) error {
mu.Lock()
delete(handling, call.ID())
mu.Unlock()
return innerReply(ctx, result, err)
}
}
return handler(ctx, reply, req)
})
canceller = func(id ID) {
mu.Lock()
cancel, found := handling[id]
mu.Unlock()
if found {
cancel()
}
}
return h, canceller
}
// AsyncHandler returns a handler that processes each request goes in its own
// goroutine.
//
// The handler returns immediately, without the request being processed.
// Each request then waits for the previous request to finish before it starts.
//
// This allows the stream to unblock at the cost of unbounded goroutines
// all stalled on the previous one.
func AsyncHandler(handler Handler) (h Handler) {
nextRequest := make(chan struct{})
close(nextRequest)
h = Handler(func(ctx context.Context, reply Replier, req Request) error {
waitForPrevious := nextRequest
nextRequest = make(chan struct{})
unlockNext := nextRequest
innerReply := reply
reply = func(ctx context.Context, result interface{}, err error) error {
close(unlockNext)
return innerReply(ctx, result, err)
}
go func() {
<-waitForPrevious
_ = handler(ctx, reply, req)
}()
return nil
})
return h
}

View File

@ -0,0 +1,7 @@
// SPDX-FileCopyrightText: 2019 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
// Package jsonrpc2 is an implementation of the JSON-RPC 2 specification for Go.
//
// https://www.jsonrpc.org/specification
package jsonrpc2 // import "go.lsp.dev/jsonrpc2"

358
vendor/go.lsp.dev/jsonrpc2/message.go vendored Normal file
View File

@ -0,0 +1,358 @@
// SPDX-FileCopyrightText: 2021 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package jsonrpc2
import (
"bytes"
"errors"
"fmt"
"github.com/segmentio/encoding/json"
)
// Message is the interface to all JSON-RPC message types.
//
// They share no common functionality, but are a closed set of concrete types
// that are allowed to implement this interface.
//
// The message types are *Call, *Response and *Notification.
type Message interface {
// jsonrpc2Message is used to make the set of message implementations a
// closed set.
jsonrpc2Message()
}
// Request is the shared interface to jsonrpc2 messages that request
// a method be invoked.
//
// The request types are a closed set of *Call and *Notification.
type Request interface {
Message
// Method is a string containing the method name to invoke.
Method() string
// Params is either a struct or an array with the parameters of the method.
Params() json.RawMessage
// jsonrpc2Request is used to make the set of request implementations closed.
jsonrpc2Request()
}
// Call is a request that expects a response.
//
// The response will have a matching ID.
type Call struct {
// Method is a string containing the method name to invoke.
method string
// Params is either a struct or an array with the parameters of the method.
params json.RawMessage
// id of this request, used to tie the Response back to the request.
id ID
}
// make sure a Call implements the Request, json.Marshaler and json.Unmarshaler and interfaces.
var (
_ Request = (*Call)(nil)
_ json.Marshaler = (*Call)(nil)
_ json.Unmarshaler = (*Call)(nil)
)
// NewCall constructs a new Call message for the supplied ID, method and
// parameters.
func NewCall(id ID, method string, params interface{}) (*Call, error) {
p, merr := marshalInterface(params)
req := &Call{
id: id,
method: method,
params: p,
}
return req, merr
}
// ID returns the current call id.
func (c *Call) ID() ID { return c.id }
// Method implements Request.
func (c *Call) Method() string { return c.method }
// Params implements Request.
func (c *Call) Params() json.RawMessage { return c.params }
// jsonrpc2Message implements Request.
func (Call) jsonrpc2Message() {}
// jsonrpc2Request implements Request.
func (Call) jsonrpc2Request() {}
// MarshalJSON implements json.Marshaler.
func (c Call) MarshalJSON() ([]byte, error) {
req := wireRequest{
Method: c.method,
Params: &c.params,
ID: &c.id,
}
data, err := json.Marshal(req)
if err != nil {
return data, fmt.Errorf("marshaling call: %w", err)
}
return data, nil
}
// UnmarshalJSON implements json.Unmarshaler.
func (c *Call) UnmarshalJSON(data []byte) error {
var req wireRequest
dec := json.NewDecoder(bytes.NewReader(data))
dec.ZeroCopy()
if err := dec.Decode(&req); err != nil {
return fmt.Errorf("unmarshaling call: %w", err)
}
c.method = req.Method
if req.Params != nil {
c.params = *req.Params
}
if req.ID != nil {
c.id = *req.ID
}
return nil
}
// Response is a reply to a Request.
//
// It will have the same ID as the call it is a response to.
type Response struct {
// result is the content of the response.
result json.RawMessage
// err is set only if the call failed.
err error
// ID of the request this is a response to.
id ID
}
// make sure a Response implements the Message, json.Marshaler and json.Unmarshaler and interfaces.
var (
_ Message = (*Response)(nil)
_ json.Marshaler = (*Response)(nil)
_ json.Unmarshaler = (*Response)(nil)
)
// NewResponse constructs a new Response message that is a reply to the
// supplied. If err is set result may be ignored.
func NewResponse(id ID, result interface{}, err error) (*Response, error) {
r, merr := marshalInterface(result)
resp := &Response{
id: id,
result: r,
err: err,
}
return resp, merr
}
// ID returns the current response id.
func (r *Response) ID() ID { return r.id }
// Result returns the Response result.
func (r *Response) Result() json.RawMessage { return r.result }
// Err returns the Response error.
func (r *Response) Err() error { return r.err }
// jsonrpc2Message implements Message.
func (r *Response) jsonrpc2Message() {}
// MarshalJSON implements json.Marshaler.
func (r Response) MarshalJSON() ([]byte, error) {
resp := &wireResponse{
Error: toError(r.err),
ID: &r.id,
}
if resp.Error == nil {
resp.Result = &r.result
}
data, err := json.Marshal(resp)
if err != nil {
return data, fmt.Errorf("marshaling notification: %w", err)
}
return data, nil
}
// UnmarshalJSON implements json.Unmarshaler.
func (r *Response) UnmarshalJSON(data []byte) error {
var resp wireResponse
dec := json.NewDecoder(bytes.NewReader(data))
dec.ZeroCopy()
if err := dec.Decode(&resp); err != nil {
return fmt.Errorf("unmarshaling jsonrpc response: %w", err)
}
if resp.Result != nil {
r.result = *resp.Result
}
if resp.Error != nil {
r.err = resp.Error
}
if resp.ID != nil {
r.id = *resp.ID
}
return nil
}
func toError(err error) *Error {
if err == nil {
// no error, the response is complete
return nil
}
var wrapped *Error
if errors.As(err, &wrapped) {
// already a wire error, just use it
return wrapped
}
result := &Error{Message: err.Error()}
if errors.As(err, &wrapped) {
// if we wrapped a wire error, keep the code from the wrapped error
// but the message from the outer error
result.Code = wrapped.Code
}
return result
}
// Notification is a request for which a response cannot occur, and as such
// it has not ID.
type Notification struct {
// Method is a string containing the method name to invoke.
method string
params json.RawMessage
}
// make sure a Notification implements the Request, json.Marshaler and json.Unmarshaler and interfaces.
var (
_ Request = (*Notification)(nil)
_ json.Marshaler = (*Notification)(nil)
_ json.Unmarshaler = (*Notification)(nil)
)
// NewNotification constructs a new Notification message for the supplied
// method and parameters.
func NewNotification(method string, params interface{}) (*Notification, error) {
p, merr := marshalInterface(params)
notify := &Notification{
method: method,
params: p,
}
return notify, merr
}
// Method implements Request.
func (n *Notification) Method() string { return n.method }
// Params implements Request.
func (n *Notification) Params() json.RawMessage { return n.params }
// jsonrpc2Message implements Request.
func (Notification) jsonrpc2Message() {}
// jsonrpc2Request implements Request.
func (Notification) jsonrpc2Request() {}
// MarshalJSON implements json.Marshaler.
func (n Notification) MarshalJSON() ([]byte, error) {
req := wireRequest{
Method: n.method,
Params: &n.params,
}
data, err := json.Marshal(req)
if err != nil {
return data, fmt.Errorf("marshaling notification: %w", err)
}
return data, nil
}
// UnmarshalJSON implements json.Unmarshaler.
func (n *Notification) UnmarshalJSON(data []byte) error {
var req wireRequest
dec := json.NewDecoder(bytes.NewReader(data))
dec.ZeroCopy()
if err := dec.Decode(&req); err != nil {
return fmt.Errorf("unmarshaling notification: %w", err)
}
n.method = req.Method
if req.Params != nil {
n.params = *req.Params
}
return nil
}
// DecodeMessage decodes data to Message.
func DecodeMessage(data []byte) (Message, error) {
var msg combined
dec := json.NewDecoder(bytes.NewReader(data))
dec.ZeroCopy()
if err := dec.Decode(&msg); err != nil {
return nil, fmt.Errorf("unmarshaling jsonrpc message: %w", err)
}
if msg.Method == "" {
// no method, should be a response
if msg.ID == nil {
return nil, ErrInvalidRequest
}
resp := &Response{
id: *msg.ID,
}
if msg.Error != nil {
resp.err = msg.Error
}
if msg.Result != nil {
resp.result = *msg.Result
}
return resp, nil
}
// has a method, must be a request
if msg.ID == nil {
// request with no ID is a notify
notify := &Notification{
method: msg.Method,
}
if msg.Params != nil {
notify.params = *msg.Params
}
return notify, nil
}
// request with an ID, must be a call
call := &Call{
method: msg.Method,
id: *msg.ID,
}
if msg.Params != nil {
call.params = *msg.Params
}
return call, nil
}
// marshalInterface marshal obj to json.RawMessage.
func marshalInterface(obj interface{}) (json.RawMessage, error) {
data, err := json.Marshal(obj)
if err != nil {
return json.RawMessage{}, fmt.Errorf("failed to marshal json: %w", err)
}
return json.RawMessage(data), nil
}

129
vendor/go.lsp.dev/jsonrpc2/serve.go vendored Normal file
View File

@ -0,0 +1,129 @@
// SPDX-FileCopyrightText: 2021 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package jsonrpc2
import (
"context"
"fmt"
"net"
"os"
"time"
)
// NOTE: This file provides an experimental API for serving multiple remote
// jsonrpc2 clients over the network. For now, it is intentionally similar to
// net/http, but that may change in the future as we figure out the correct
// semantics.
// StreamServer is used to serve incoming jsonrpc2 clients communicating over
// a newly created connection.
type StreamServer interface {
ServeStream(context.Context, Conn) error
}
// ServerFunc is an adapter that implements the StreamServer interface
// using an ordinary function.
type ServerFunc func(context.Context, Conn) error
// ServeStream implements StreamServer.
//
// ServeStream calls f(ctx, s).
func (f ServerFunc) ServeStream(ctx context.Context, c Conn) error {
return f(ctx, c)
}
// HandlerServer returns a StreamServer that handles incoming streams using the
// provided handler.
func HandlerServer(h Handler) StreamServer {
return ServerFunc(func(ctx context.Context, conn Conn) error {
conn.Go(ctx, h)
<-conn.Done()
return conn.Err()
})
}
// ListenAndServe starts an jsonrpc2 server on the given address.
//
// If idleTimeout is non-zero, ListenAndServe exits after there are no clients for
// this duration, otherwise it exits only on error.
func ListenAndServe(ctx context.Context, network, addr string, server StreamServer, idleTimeout time.Duration) error {
ln, err := net.Listen(network, addr)
if err != nil {
return fmt.Errorf("failed to listen %s:%s: %w", network, addr, err)
}
defer ln.Close()
if network == "unix" {
defer os.Remove(addr)
}
return Serve(ctx, ln, server, idleTimeout)
}
// Serve accepts incoming connections from the network, and handles them using
// the provided server. If idleTimeout is non-zero, ListenAndServe exits after
// there are no clients for this duration, otherwise it exits only on error.
func Serve(ctx context.Context, ln net.Listener, server StreamServer, idleTimeout time.Duration) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Max duration: ~290 years; surely that's long enough.
const forever = 1<<63 - 1
if idleTimeout <= 0 {
idleTimeout = forever
}
connTimer := time.NewTimer(idleTimeout)
newConns := make(chan net.Conn)
doneListening := make(chan error)
closedConns := make(chan error)
go func() {
for {
nc, err := ln.Accept()
if err != nil {
select {
case doneListening <- fmt.Errorf("accept: %w", err):
case <-ctx.Done():
}
return
}
newConns <- nc
}
}()
activeConns := 0
for {
select {
case netConn := <-newConns:
activeConns++
connTimer.Stop()
stream := NewStream(netConn)
go func() {
conn := NewConn(stream)
closedConns <- server.ServeStream(ctx, conn)
stream.Close()
}()
case err := <-doneListening:
return err
case <-closedConns:
// if !isClosingError(err) {
// }
activeConns--
if activeConns == 0 {
connTimer.Reset(idleTimeout)
}
case <-connTimer.C:
return ErrIdleTimeout
case <-ctx.Done():
return ctx.Err()
}
}
}

226
vendor/go.lsp.dev/jsonrpc2/stream.go vendored Normal file
View File

@ -0,0 +1,226 @@
// SPDX-FileCopyrightText: 2018 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package jsonrpc2
import (
"bufio"
"context"
stdjson "encoding/json"
"fmt"
"io"
"strconv"
"strings"
"github.com/segmentio/encoding/json"
)
const (
// HdrContentLength is the HTTP header name of the length of the content part in bytes. This header is required.
// This entity header indicates the size of the entity-body, in bytes, sent to the recipient.
//
// RFC 7230, section 3.3.2: Content-Length:
// https://tools.ietf.org/html/rfc7230#section-3.3.2
HdrContentLength = "Content-Length"
// HeaderContentType is the mime type of the content part. Defaults to "application/vscode-jsonrpc; charset=utf-8".
// This entity header is used to indicate the media type of the resource.
//
// RFC 7231, section 3.1.1.5: Content-Type:
// https://tools.ietf.org/html/rfc7231#section-3.1.1.5
HdrContentType = "Content-Type"
// HeaderContentSeparator is the header and content part separator.
HdrContentSeparator = "\r\n\r\n"
)
// Framer wraps a network connection up into a Stream.
//
// It is responsible for the framing and encoding of messages into wire form.
// NewRawStream and NewStream are implementations of a Framer.
type Framer func(conn io.ReadWriteCloser) Stream
// Stream abstracts the transport mechanics from the JSON RPC protocol.
//
// A Conn reads and writes messages using the stream it was provided on
// construction, and assumes that each call to Read or Write fully transfers
// a single message, or returns an error.
//
// A stream is not safe for concurrent use, it is expected it will be used by
// a single Conn in a safe manner.
type Stream interface {
// Read gets the next message from the stream.
Read(context.Context) (Message, int64, error)
// Write sends a message to the stream.
Write(context.Context, Message) (int64, error)
// Close closes the connection.
// Any blocked Read or Write operations will be unblocked and return errors.
Close() error
}
type rawStream struct {
conn io.ReadWriteCloser
in *stdjson.Decoder
}
// NewRawStream returns a Stream built on top of a io.ReadWriteCloser.
//
// The messages are sent with no wrapping, and rely on json decode consistency
// to determine message boundaries.
func NewRawStream(conn io.ReadWriteCloser) Stream {
return &rawStream{
conn: conn,
in: stdjson.NewDecoder(conn), // TODO(zchee): why test fail using segmentio json.Decoder?
}
}
// Read implements Stream.Read.
func (s *rawStream) Read(ctx context.Context) (Message, int64, error) {
select {
case <-ctx.Done():
return nil, 0, ctx.Err()
default:
}
var raw stdjson.RawMessage
if err := s.in.Decode(&raw); err != nil {
return nil, 0, fmt.Errorf("decoding raw message: %w", err)
}
msg, err := DecodeMessage(raw)
return msg, int64(len(raw)), err
}
// Write implements Stream.Write.
func (s *rawStream) Write(ctx context.Context, msg Message) (int64, error) {
select {
case <-ctx.Done():
return 0, ctx.Err()
default:
}
data, err := json.Marshal(msg)
if err != nil {
return 0, fmt.Errorf("marshaling message: %w", err)
}
n, err := s.conn.Write(data)
if err != nil {
return 0, fmt.Errorf("write to stream: %w", err)
}
return int64(n), nil
}
// Close implements Stream.Close.
func (s *rawStream) Close() error {
return s.conn.Close()
}
type stream struct {
conn io.ReadWriteCloser
in *bufio.Reader
}
// NewStream returns a Stream built on top of a io.ReadWriteCloser.
//
// The messages are sent with HTTP content length and MIME type headers.
// This is the format used by LSP and others.
func NewStream(conn io.ReadWriteCloser) Stream {
return &stream{
conn: conn,
in: bufio.NewReader(conn),
}
}
// Read implements Stream.Read.
func (s *stream) Read(ctx context.Context) (Message, int64, error) {
select {
case <-ctx.Done():
return nil, 0, ctx.Err()
default:
}
var total int64
var length int64
// read the header, stop on the first empty line
for {
line, err := s.in.ReadString('\n')
total += int64(len(line))
if err != nil {
return nil, total, fmt.Errorf("failed reading header line: %w", err)
}
line = strings.TrimSpace(line)
// check we have a header line
if line == "" {
break
}
colon := strings.IndexRune(line, ':')
if colon < 0 {
return nil, total, fmt.Errorf("invalid header line %q", line)
}
name, value := line[:colon], strings.TrimSpace(line[colon+1:])
switch name {
case HdrContentLength:
if length, err = strconv.ParseInt(value, 10, 32); err != nil {
return nil, total, fmt.Errorf("failed parsing %s: %v: %w", HdrContentLength, value, err)
}
if length <= 0 {
return nil, total, fmt.Errorf("invalid %s: %v", HdrContentLength, length)
}
default:
// ignoring unknown headers
}
}
if length == 0 {
return nil, total, fmt.Errorf("missing %s header", HdrContentLength)
}
data := make([]byte, length)
if _, err := io.ReadFull(s.in, data); err != nil {
return nil, total, fmt.Errorf("read full of data: %w", err)
}
total += length
msg, err := DecodeMessage(data)
return msg, total, err
}
// Write implements Stream.Write.
func (s *stream) Write(ctx context.Context, msg Message) (int64, error) {
select {
case <-ctx.Done():
return 0, ctx.Err()
default:
}
data, err := json.Marshal(msg)
if err != nil {
return 0, fmt.Errorf("marshaling message: %w", err)
}
n, err := fmt.Fprintf(s.conn, "%s: %v%s", HdrContentLength, len(data), HdrContentSeparator)
total := int64(n)
if err != nil {
return 0, fmt.Errorf("write data to conn: %w", err)
}
n, err = s.conn.Write(data)
total += int64(n)
if err != nil {
return 0, fmt.Errorf("write data to conn: %w", err)
}
return total, nil
}
// Close implements Stream.Close.
func (s *stream) Close() error {
return s.conn.Close()
}

140
vendor/go.lsp.dev/jsonrpc2/wire.go vendored Normal file
View File

@ -0,0 +1,140 @@
// SPDX-FileCopyrightText: 2021 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package jsonrpc2
import (
"fmt"
"github.com/segmentio/encoding/json"
)
// Version represents a JSON-RPC version.
const Version = "2.0"
// version is a special 0 sized struct that encodes as the jsonrpc version tag.
//
// It will fail during decode if it is not the correct version tag in the stream.
type version struct{}
// compile time check whether the version implements a json.Marshaler and json.Unmarshaler interfaces.
var (
_ json.Marshaler = (*version)(nil)
_ json.Unmarshaler = (*version)(nil)
)
// MarshalJSON implements json.Marshaler.
func (version) MarshalJSON() ([]byte, error) {
return json.Marshal(Version)
}
// UnmarshalJSON implements json.Unmarshaler.
func (version) UnmarshalJSON(data []byte) error {
version := ""
if err := json.Unmarshal(data, &version); err != nil {
return fmt.Errorf("failed to Unmarshal: %w", err)
}
if version != Version {
return fmt.Errorf("invalid RPC version %v", version)
}
return nil
}
// ID is a Request identifier.
//
// Only one of either the Name or Number members will be set, using the
// number form if the Name is the empty string.
type ID struct {
name string
number int32
}
// compile time check whether the ID implements a fmt.Formatter, json.Marshaler and json.Unmarshaler interfaces.
var (
_ fmt.Formatter = (*ID)(nil)
_ json.Marshaler = (*ID)(nil)
_ json.Unmarshaler = (*ID)(nil)
)
// NewNumberID returns a new number request ID.
func NewNumberID(v int32) ID { return ID{number: v} }
// NewStringID returns a new string request ID.
func NewStringID(v string) ID { return ID{name: v} }
// Format writes the ID to the formatter.
//
// If the rune is q the representation is non ambiguous,
// string forms are quoted, number forms are preceded by a #.
func (id ID) Format(f fmt.State, r rune) {
numF, strF := `%d`, `%s`
if r == 'q' {
numF, strF = `#%d`, `%q`
}
switch {
case id.name != "":
fmt.Fprintf(f, strF, id.name)
default:
fmt.Fprintf(f, numF, id.number)
}
}
// MarshalJSON implements json.Marshaler.
func (id *ID) MarshalJSON() ([]byte, error) {
if id.name != "" {
return json.Marshal(id.name)
}
return json.Marshal(id.number)
}
// UnmarshalJSON implements json.Unmarshaler.
func (id *ID) UnmarshalJSON(data []byte) error {
*id = ID{}
if err := json.Unmarshal(data, &id.number); err == nil {
return nil
}
return json.Unmarshal(data, &id.name)
}
// wireRequest is sent to a server to represent a Call or Notify operaton.
type wireRequest struct {
// VersionTag is always encoded as the string "2.0"
VersionTag version `json:"jsonrpc"`
// Method is a string containing the method name to invoke.
Method string `json:"method"`
// Params is either a struct or an array with the parameters of the method.
Params *json.RawMessage `json:"params,omitempty"`
// The id of this request, used to tie the Response back to the request.
// Will be either a string or a number. If not set, the Request is a notify,
// and no response is possible.
ID *ID `json:"id,omitempty"`
}
// wireResponse is a reply to a Request.
//
// It will always have the ID field set to tie it back to a request, and will
// have either the Result or Error fields set depending on whether it is a
// success or failure wireResponse.
type wireResponse struct {
// VersionTag is always encoded as the string "2.0"
VersionTag version `json:"jsonrpc"`
// Result is the response value, and is required on success.
Result *json.RawMessage `json:"result,omitempty"`
// Error is a structured error response if the call fails.
Error *Error `json:"error,omitempty"`
// ID must be set and is the identifier of the Request this is a response to.
ID *ID `json:"id,omitempty"`
}
// combined has all the fields of both Request and Response.
//
// We can decode this and then work out which it is.
type combined struct {
VersionTag version `json:"jsonrpc"`
ID *ID `json:"id,omitempty"`
Method string `json:"method"`
Params *json.RawMessage `json:"params,omitempty"`
Result *json.RawMessage `json:"result,omitempty"`
Error *Error `json:"error,omitempty"`
}

29
vendor/go.lsp.dev/pkg/LICENSE vendored Normal file
View File

@ -0,0 +1,29 @@
BSD 3-Clause License
Copyright (c) 2020, The Go Language Server Authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -0,0 +1,22 @@
// Copyright 2020 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
// Package xcontext is a package to offer the extra functionality we need
// from contexts that is not available from the standard context package.
package xcontext
import (
"context"
"time"
)
// Detach returns a context that keeps all the values of its parent context
// but detaches from the cancellation and error handling.
func Detach(ctx context.Context) context.Context { return detachedContext{ctx} }
type detachedContext struct{ parent context.Context }
func (v detachedContext) Deadline() (time.Time, bool) { return time.Time{}, false }
func (v detachedContext) Done() <-chan struct{} { return nil }
func (v detachedContext) Err() error { return nil }
func (v detachedContext) Value(key interface{}) interface{} { return v.parent.Value(key) }

45
vendor/go.lsp.dev/protocol/.codecov.yml vendored Normal file
View File

@ -0,0 +1,45 @@
codecov:
allow_coverage_offsets: true
parsers:
go:
partials_as_hits: true
coverage:
precision: 1
round: down
range: "70...100"
status:
default_rules:
flag_coverage_not_uploaded_behavior: include
project:
default:
target: auto
threshold: 1%
if_not_found: success
if_ci_failed: error
patch:
default:
only_pulls: true
target: 50%
threshold: 10%
changes:
default:
target: auto
threshold: 10%
if_not_found: success
if_ci_failed: error
branches:
- main
comment:
behavior: default
require_changes: true
show_carryforward_flags: true
github_checks:
annotations: true

View File

@ -0,0 +1,11 @@
# go.lsp.dev/protocol project gitattributes file
# https://github.com/github/linguist#using-gitattributes
# https://github.com/github/linguist/blob/master/lib/linguist/languages.yml
# To prevent CRLF breakages on Windows for fragile files, like testdata.
* -text
docs/ linguist-documentation
*.pb.go linguist-generated
*_gen.go linguist-generated
*_string.go linguist-generated

52
vendor/go.lsp.dev/protocol/.gitignore vendored Normal file
View File

@ -0,0 +1,52 @@
# go.lsp.dev/protocol project generated files to ignore
# if you want to ignore files created by your editor/tools,
# please consider a global .gitignore https://help.github.com/articles/ignoring-files
# please do not open a pull request to add something created by your editor or tools
# github/gitignore/Go.gitignore
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
vendor/
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
# cgo generated
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
# test generated
_testmain.go
# profile
*.pprof
# coverage
coverage.*
# tools
tools/bin/

242
vendor/go.lsp.dev/protocol/.golangci.yml vendored Normal file
View File

@ -0,0 +1,242 @@
# https://golangci-lint.run/usage/configuration/
# https://github.com/golangci/golangci-lint/blob/master/pkg/config/linters_settings.go
---
run:
timeout: 1m
issues-exit-code: 1
tests: true
skip-dirs: []
skip-dirs-use-default: true
skip-files: []
allow-parallel-runners: true
output:
format: colored-line-number
print-issued-lines: true
print-linter-name: true
uniq-by-line: true
sort-results: true
linters-settings:
depguard:
list-type: blacklist
include-go-root: false
dupl:
threshold: 150
errcheck:
check-type-assertions: true
check-blank: true
# exclude: .errcheckignore
errorlint:
errorf: true
asserts: true
comparison: true
funlen:
lines: 100
statements: 60
gocognit:
min-complexity: 30
goconst:
min-len: 3
min-occurrences: 3
gocritic:
enabled-tags:
- diagnostic
- experimental
- opinionated
- performance
- style
disabled-checks:
- commentedOutCode
- redundantSprint
- whyNoLint
settings:
hugeParam:
sizeThreshold: 80
rangeExprCopy:
sizeThreshold: 512
rangeValCopy:
sizeThreshold: 128
gocyclo:
min-complexity: 30
godot:
scope: declarations
capital: false
gofmt:
simplify: true
gofumpt:
extra-rules: true
goheader:
values:
const:
AUTHOR: Go Language Server
regexp:
YEAR: '20\d\d'
template: |-
SPDX-FileCopyrightText: {{ YEAR }} The {{ AUTHOR }} Authors
SPDX-License-Identifier: BSD-3-Clause
goimports:
local-prefixes: go.lsp.dev/protocol
gosimple:
go: 1.16
govet:
enable-all: true
check-shadowing: true
disable:
- fieldalignment
importas:
alias: []
no-unaliased: true
lll:
line-length: 120
tab-width: 1
misspell:
locale: US
ignore-words:
- cancelled
- cancelling
nakedret:
max-func-lines: 30
nestif:
min-complexity: 4
prealloc:
simple: true
range-loops: true
for-loops: true
staticcheck:
go: 1.16
testpackage:
skip-regexp: '.*(export)_test\.go'
unparam:
check-exported: true
algo: cha
unused:
go: 1.16
whitespace:
multi-if: true
multi-func: true
linters:
fast: false
disabled:
- exhaustivestruct # Checks if all struct's fields are initialized
- forbidigo # Forbids identifiers
- forcetypeassert # finds forced type assertions
- gci # Gci control golang package import order and make it always deterministic.
- gochecknoglobals # check that no global variables exist
- gochecknoinits # Checks that no init functions are present in Go code
- goconst # Finds repeated strings that could be replaced by a constant
- godox # Tool for detection of FIXME, TODO and other comment keywords
- goerr113 # Golang linter to check the errors handling expressions
- golint # Golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes
- gomnd # An analyzer to detect magic numbers.
- gomoddirectives # Manage the use of 'replace', 'retract', and 'excludes' directives in go.mod.
- gomodguard # Allow and block list linter for direct Go module dependencies.
- interfacer # Linter that suggests narrower interface types
- lll # Reports long lines
- maligned # Tool to detect Go structs that would take less memory if their fields were sorted
- promlinter # Check Prometheus metrics naming via promlint
- scopelint # Scopelint checks for unpinned variables in go programs
- sqlclosecheck # Checks that sql.Rows and sql.Stmt are closed.
- testpackage # TODO(zchee): enable: # linter that makes you use a separate _test package
- tparallel # tparallel detects inappropriate usage of t.Parallel() method in your Go test codes
- wrapcheck # TODO(zchee): enable: # Checks that errors returned from external packages are wrapped
- wsl # Whitespace Linter
enable:
- asciicheck # Simple linter to check that your code does not contain non-ASCII identifiers
- bodyclose # checks whether HTTP response body is closed successfully
- cyclop # checks function and package cyclomatic complexity
- deadcode # Finds unused code
- depguard # Go linter that checks if package imports are in a list of acceptable packages
- dogsled # Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f())
- dupl # Tool for code clone detection
- durationcheck # check for two durations multiplied together
- errcheck # Errcheck is a program for checking for unchecked errors in go programs. These unchecked errors can be critical bugs in some cases
- errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13.
- exhaustive # check exhaustiveness of enum switch statements
- exportloopref # checks for pointers to enclosing loop variables
- funlen # Tool for detection of long functions
- gocognit # Computes and checks the cognitive complexity of functions
- gocritic # Provides many diagnostics that check for bugs, performance and style issues.
- gocyclo # Computes and checks the cyclomatic complexity of functions
- godot # Check if comments end in a period
- gofmt # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification
- gofumpt # Gofumpt checks whether code was gofumpt-ed.
- goheader # Checks is file header matches to pattern
- goimports # Goimports does everything that gofmt does. Additionally it checks unused imports
- goprintffuncname # Checks that printf-like functions are named with `f` at the end
- gosec # Inspects source code for security problems
- gosimple # Linter for Go source code that specializes in simplifying a code
- govet # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string
- ifshort # Checks that your code uses short syntax for if-statements whenever possible
- importas # Enforces consistent import aliases
- ineffassign # Detects when assignments to existing variables are not used
- makezero # Finds slice declarations with non-zero initial length
- misspell # Finds commonly misspelled English words in comments
- nakedret # Finds naked returns in functions greater than a specified function length
- nestif # Reports deeply nested if statements
- nilerr # Finds the code that returns nil even if it checks that the error is not nil.
- nlreturn # nlreturn checks for a new line before return and branch statements to increase code clarity
- noctx # noctx finds sending http request without context.Context
- nolintlint # Reports ill-formed or insufficient nolint directives
- paralleltest # paralleltest detects missing usage of t.Parallel() method in your Go test
- prealloc # Finds slice declarations that could potentially be preallocated
- predeclared # find code that shadows one of Go's predeclared identifiers
- revive # Fast, configurable, extensible, flexible, and beautiful linter for Go. Drop-in replacement of golint.
- rowserrcheck # checks whether Err of rows is checked successfully
- staticcheck # Staticcheck is a go vet on steroids, applying a ton of static analysis checks
- structcheck # Finds unused struct fields
- stylecheck # Stylecheck is a replacement for golint
- tagliatelle # Checks the struct tags.
- thelper # thelper detects golang test helpers without t.Helper() call and checks the consistency of test helpers
- typecheck # Like the front-end of a Go compiler, parses and type-checks Go code
- unconvert # Remove unnecessary type conversions
- unparam # Reports unused function parameters
- unused # Checks Go code for unused constants, variables, functions and types
- varcheck # Finds unused global variables and constants
- wastedassign # wastedassign finds wasted assignment statements.
- whitespace # Tool for detection of leading and trailing whitespace
issues:
max-issues-per-linter: 0
max-same-issues: 0
exclude-use-default: true
exclude-rules:
- path: _test\.go
linters:
- cyclop
- dupl
- errcheck
- funlen
- gocognit
- goconst
- gocritic
- gocyclo
- gosec
- thelper
- wrapcheck
- path: "(.*)?_example_test.go"
linters:
- gocritic
# Exclude shadow checking on the variable named err
- text: "shadow: declaration of \"(err|ok)\""
linters:
- govet
# false positive
- path: language.go
text: "deprecatedComment: the proper format is `Deprecated: <text>`"
# async
- path: handler.go
text: "Error return value of `conn.Notify` is not checked"
linters:
- errcheck
- path: log.go
text: "Error return value of `s.log.Write` is not checked"
linters:
- errcheck
- path: deprecated.go
linters:
- lll
- path: "(client|server)_json.go"
linters:
- nlreturn

29
vendor/go.lsp.dev/protocol/LICENSE vendored Normal file
View File

@ -0,0 +1,29 @@
BSD 3-Clause License
Copyright (c) 2019, The Go Language Server Authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

Some files were not shown because too many files have changed in this diff Show More