chore_: make vendor

This commit is contained in:
Igor Sirotin 2024-10-19 22:31:59 +03:00
parent e889114df6
commit a1e77e91df
No known key found for this signature in database
GPG Key ID: 425E227CAAB81F95
143 changed files with 25445 additions and 0 deletions

21
vendor/github.com/segmentio/asm/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2021 Segment
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

53
vendor/github.com/segmentio/asm/ascii/ascii.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
package ascii
import _ "github.com/segmentio/asm/cpu"
// https://graphics.stanford.edu/~seander/bithacks.html#HasLessInWord
const (
hasLessConstL64 = (^uint64(0)) / 255
hasLessConstR64 = hasLessConstL64 * 128
hasLessConstL32 = (^uint32(0)) / 255
hasLessConstR32 = hasLessConstL32 * 128
hasMoreConstL64 = (^uint64(0)) / 255
hasMoreConstR64 = hasMoreConstL64 * 128
hasMoreConstL32 = (^uint32(0)) / 255
hasMoreConstR32 = hasMoreConstL32 * 128
)
func hasLess64(x, n uint64) bool {
return ((x - (hasLessConstL64 * n)) & ^x & hasLessConstR64) != 0
}
func hasLess32(x, n uint32) bool {
return ((x - (hasLessConstL32 * n)) & ^x & hasLessConstR32) != 0
}
func hasMore64(x, n uint64) bool {
return (((x + (hasMoreConstL64 * (127 - n))) | x) & hasMoreConstR64) != 0
}
func hasMore32(x, n uint32) bool {
return (((x + (hasMoreConstL32 * (127 - n))) | x) & hasMoreConstR32) != 0
}
var lowerCase = [256]byte{
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
}

30
vendor/github.com/segmentio/asm/ascii/equal_fold.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
package ascii
import (
"github.com/segmentio/asm/internal/unsafebytes"
)
// EqualFold is a version of bytes.EqualFold designed to work on ASCII input
// instead of UTF-8.
//
// When the program has guarantees that the input is composed of ASCII
// characters only, it allows for greater optimizations.
func EqualFold(a, b []byte) bool {
return EqualFoldString(unsafebytes.String(a), unsafebytes.String(b))
}
func HasPrefixFold(s, prefix []byte) bool {
return len(s) >= len(prefix) && EqualFold(s[:len(prefix)], prefix)
}
func HasSuffixFold(s, suffix []byte) bool {
return len(s) >= len(suffix) && EqualFold(s[len(s)-len(suffix):], suffix)
}
func HasPrefixFoldString(s, prefix string) bool {
return len(s) >= len(prefix) && EqualFoldString(s[:len(prefix)], prefix)
}
func HasSuffixFoldString(s, suffix string) bool {
return len(s) >= len(suffix) && EqualFoldString(s[len(s)-len(suffix):], suffix)
}

View File

@ -0,0 +1,13 @@
// Code generated by command: go run equal_fold_asm.go -pkg ascii -out ../ascii/equal_fold_amd64.s -stubs ../ascii/equal_fold_amd64.go. DO NOT EDIT.
//go:build !purego
// +build !purego
package ascii
// EqualFoldString is a version of strings.EqualFold designed to work on ASCII
// input instead of UTF-8.
//
// When the program has guarantees that the input is composed of ASCII
// characters only, it allows for greater optimizations.
func EqualFoldString(a string, b string) bool

View File

@ -0,0 +1,304 @@
// Code generated by command: go run equal_fold_asm.go -pkg ascii -out ../ascii/equal_fold_amd64.s -stubs ../ascii/equal_fold_amd64.go. DO NOT EDIT.
//go:build !purego
// +build !purego
#include "textflag.h"
// func EqualFoldString(a string, b string) bool
// Requires: AVX, AVX2, SSE4.1
TEXT ·EqualFoldString(SB), NOSPLIT, $0-33
MOVQ a_base+0(FP), CX
MOVQ a_len+8(FP), DX
MOVQ b_base+16(FP), BX
CMPQ DX, b_len+24(FP)
JNE done
XORQ AX, AX
CMPQ DX, $0x10
JB init_x86
BTL $0x08, github·comsegmentioasmcpu·X86+0(SB)
JCS init_avx
init_x86:
LEAQ github·comsegmentioasmascii·lowerCase+0(SB), R9
XORL SI, SI
cmp8:
CMPQ DX, $0x08
JB cmp7
MOVBLZX (CX)(AX*1), DI
MOVBLZX (BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
MOVBLZX 1(CX)(AX*1), DI
MOVBLZX 1(BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
MOVBLZX 2(CX)(AX*1), DI
MOVBLZX 2(BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
MOVBLZX 3(CX)(AX*1), DI
MOVBLZX 3(BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
MOVBLZX 4(CX)(AX*1), DI
MOVBLZX 4(BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
MOVBLZX 5(CX)(AX*1), DI
MOVBLZX 5(BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
MOVBLZX 6(CX)(AX*1), DI
MOVBLZX 6(BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
MOVBLZX 7(CX)(AX*1), DI
MOVBLZX 7(BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
JNE done
ADDQ $0x08, AX
SUBQ $0x08, DX
JMP cmp8
cmp7:
CMPQ DX, $0x07
JB cmp6
MOVBLZX 6(CX)(AX*1), DI
MOVBLZX 6(BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
cmp6:
CMPQ DX, $0x06
JB cmp5
MOVBLZX 5(CX)(AX*1), DI
MOVBLZX 5(BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
cmp5:
CMPQ DX, $0x05
JB cmp4
MOVBLZX 4(CX)(AX*1), DI
MOVBLZX 4(BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
cmp4:
CMPQ DX, $0x04
JB cmp3
MOVBLZX 3(CX)(AX*1), DI
MOVBLZX 3(BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
cmp3:
CMPQ DX, $0x03
JB cmp2
MOVBLZX 2(CX)(AX*1), DI
MOVBLZX 2(BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
cmp2:
CMPQ DX, $0x02
JB cmp1
MOVBLZX 1(CX)(AX*1), DI
MOVBLZX 1(BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
cmp1:
CMPQ DX, $0x01
JB success
MOVBLZX (CX)(AX*1), DI
MOVBLZX (BX)(AX*1), R8
MOVB (R9)(DI*1), DI
XORB (R9)(R8*1), DI
ORB DI, SI
done:
SETEQ ret+32(FP)
RET
success:
MOVB $0x01, ret+32(FP)
RET
init_avx:
MOVB $0x20, SI
PINSRB $0x00, SI, X12
VPBROADCASTB X12, Y12
MOVB $0x1f, SI
PINSRB $0x00, SI, X13
VPBROADCASTB X13, Y13
MOVB $0x9a, SI
PINSRB $0x00, SI, X14
VPBROADCASTB X14, Y14
MOVB $0x01, SI
PINSRB $0x00, SI, X15
VPBROADCASTB X15, Y15
cmp128:
CMPQ DX, $0x80
JB cmp64
VMOVDQU (CX)(AX*1), Y0
VMOVDQU 32(CX)(AX*1), Y1
VMOVDQU 64(CX)(AX*1), Y2
VMOVDQU 96(CX)(AX*1), Y3
VMOVDQU (BX)(AX*1), Y4
VMOVDQU 32(BX)(AX*1), Y5
VMOVDQU 64(BX)(AX*1), Y6
VMOVDQU 96(BX)(AX*1), Y7
VXORPD Y0, Y4, Y4
VPCMPEQB Y12, Y4, Y8
VORPD Y12, Y0, Y0
VPADDB Y13, Y0, Y0
VPCMPGTB Y0, Y14, Y0
VPAND Y8, Y0, Y0
VPAND Y15, Y0, Y0
VPSLLW $0x05, Y0, Y0
VPCMPEQB Y4, Y0, Y0
VXORPD Y1, Y5, Y5
VPCMPEQB Y12, Y5, Y9
VORPD Y12, Y1, Y1
VPADDB Y13, Y1, Y1
VPCMPGTB Y1, Y14, Y1
VPAND Y9, Y1, Y1
VPAND Y15, Y1, Y1
VPSLLW $0x05, Y1, Y1
VPCMPEQB Y5, Y1, Y1
VXORPD Y2, Y6, Y6
VPCMPEQB Y12, Y6, Y10
VORPD Y12, Y2, Y2
VPADDB Y13, Y2, Y2
VPCMPGTB Y2, Y14, Y2
VPAND Y10, Y2, Y2
VPAND Y15, Y2, Y2
VPSLLW $0x05, Y2, Y2
VPCMPEQB Y6, Y2, Y2
VXORPD Y3, Y7, Y7
VPCMPEQB Y12, Y7, Y11
VORPD Y12, Y3, Y3
VPADDB Y13, Y3, Y3
VPCMPGTB Y3, Y14, Y3
VPAND Y11, Y3, Y3
VPAND Y15, Y3, Y3
VPSLLW $0x05, Y3, Y3
VPCMPEQB Y7, Y3, Y3
VPAND Y1, Y0, Y0
VPAND Y3, Y2, Y2
VPAND Y2, Y0, Y0
ADDQ $0x80, AX
SUBQ $0x80, DX
VPMOVMSKB Y0, SI
XORL $0xffffffff, SI
JNE done
JMP cmp128
cmp64:
CMPQ DX, $0x40
JB cmp32
VMOVDQU (CX)(AX*1), Y0
VMOVDQU 32(CX)(AX*1), Y1
VMOVDQU (BX)(AX*1), Y2
VMOVDQU 32(BX)(AX*1), Y3
VXORPD Y0, Y2, Y2
VPCMPEQB Y12, Y2, Y4
VORPD Y12, Y0, Y0
VPADDB Y13, Y0, Y0
VPCMPGTB Y0, Y14, Y0
VPAND Y4, Y0, Y0
VPAND Y15, Y0, Y0
VPSLLW $0x05, Y0, Y0
VPCMPEQB Y2, Y0, Y0
VXORPD Y1, Y3, Y3
VPCMPEQB Y12, Y3, Y5
VORPD Y12, Y1, Y1
VPADDB Y13, Y1, Y1
VPCMPGTB Y1, Y14, Y1
VPAND Y5, Y1, Y1
VPAND Y15, Y1, Y1
VPSLLW $0x05, Y1, Y1
VPCMPEQB Y3, Y1, Y1
VPAND Y1, Y0, Y0
ADDQ $0x40, AX
SUBQ $0x40, DX
VPMOVMSKB Y0, SI
XORL $0xffffffff, SI
JNE done
cmp32:
CMPQ DX, $0x20
JB cmp16
VMOVDQU (CX)(AX*1), Y0
VMOVDQU (BX)(AX*1), Y1
VXORPD Y0, Y1, Y1
VPCMPEQB Y12, Y1, Y2
VORPD Y12, Y0, Y0
VPADDB Y13, Y0, Y0
VPCMPGTB Y0, Y14, Y0
VPAND Y2, Y0, Y0
VPAND Y15, Y0, Y0
VPSLLW $0x05, Y0, Y0
VPCMPEQB Y1, Y0, Y0
ADDQ $0x20, AX
SUBQ $0x20, DX
VPMOVMSKB Y0, SI
XORL $0xffffffff, SI
JNE done
cmp16:
CMPQ DX, $0x10
JLE cmp_tail
VMOVDQU (CX)(AX*1), X0
VMOVDQU (BX)(AX*1), X1
VXORPD X0, X1, X1
VPCMPEQB X12, X1, X2
VORPD X12, X0, X0
VPADDB X13, X0, X0
VPCMPGTB X0, X14, X0
VPAND X2, X0, X0
VPAND X15, X0, X0
VPSLLW $0x05, X0, X0
VPCMPEQB X1, X0, X0
ADDQ $0x10, AX
SUBQ $0x10, DX
VPMOVMSKB X0, SI
XORL $0x0000ffff, SI
JNE done
cmp_tail:
SUBQ $0x10, DX
ADDQ DX, AX
VMOVDQU (CX)(AX*1), X0
VMOVDQU (BX)(AX*1), X1
VXORPD X0, X1, X1
VPCMPEQB X12, X1, X2
VORPD X12, X0, X0
VPADDB X13, X0, X0
VPCMPGTB X0, X14, X0
VPAND X2, X0, X0
VPAND X15, X0, X0
VPSLLW $0x05, X0, X0
VPCMPEQB X1, X0, X0
VPMOVMSKB X0, AX
XORL $0x0000ffff, AX
JMP done

View File

@ -0,0 +1,60 @@
//go:build purego || !amd64
// +build purego !amd64
package ascii
// EqualFoldString is a version of strings.EqualFold designed to work on ASCII
// input instead of UTF-8.
//
// When the program has guarantees that the input is composed of ASCII
// characters only, it allows for greater optimizations.
func EqualFoldString(a, b string) bool {
if len(a) != len(b) {
return false
}
var cmp byte
for len(a) >= 8 {
cmp |= lowerCase[a[0]] ^ lowerCase[b[0]]
cmp |= lowerCase[a[1]] ^ lowerCase[b[1]]
cmp |= lowerCase[a[2]] ^ lowerCase[b[2]]
cmp |= lowerCase[a[3]] ^ lowerCase[b[3]]
cmp |= lowerCase[a[4]] ^ lowerCase[b[4]]
cmp |= lowerCase[a[5]] ^ lowerCase[b[5]]
cmp |= lowerCase[a[6]] ^ lowerCase[b[6]]
cmp |= lowerCase[a[7]] ^ lowerCase[b[7]]
if cmp != 0 {
return false
}
a = a[8:]
b = b[8:]
}
switch len(a) {
case 7:
cmp |= lowerCase[a[6]] ^ lowerCase[b[6]]
fallthrough
case 6:
cmp |= lowerCase[a[5]] ^ lowerCase[b[5]]
fallthrough
case 5:
cmp |= lowerCase[a[4]] ^ lowerCase[b[4]]
fallthrough
case 4:
cmp |= lowerCase[a[3]] ^ lowerCase[b[3]]
fallthrough
case 3:
cmp |= lowerCase[a[2]] ^ lowerCase[b[2]]
fallthrough
case 2:
cmp |= lowerCase[a[1]] ^ lowerCase[b[1]]
fallthrough
case 1:
cmp |= lowerCase[a[0]] ^ lowerCase[b[0]]
}
return cmp == 0
}

18
vendor/github.com/segmentio/asm/ascii/valid.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
package ascii
import "github.com/segmentio/asm/internal/unsafebytes"
// Valid returns true if b contains only ASCII characters.
func Valid(b []byte) bool {
return ValidString(unsafebytes.String(b))
}
// ValidBytes returns true if b is an ASCII character.
func ValidByte(b byte) bool {
return b <= 0x7f
}
// ValidBytes returns true if b is an ASCII character.
func ValidRune(r rune) bool {
return r <= 0x7f
}

9
vendor/github.com/segmentio/asm/ascii/valid_amd64.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
// Code generated by command: go run valid_asm.go -pkg ascii -out ../ascii/valid_amd64.s -stubs ../ascii/valid_amd64.go. DO NOT EDIT.
//go:build !purego
// +build !purego
package ascii
// ValidString returns true if s contains only ASCII characters.
func ValidString(s string) bool

132
vendor/github.com/segmentio/asm/ascii/valid_amd64.s generated vendored Normal file
View File

@ -0,0 +1,132 @@
// Code generated by command: go run valid_asm.go -pkg ascii -out ../ascii/valid_amd64.s -stubs ../ascii/valid_amd64.go. DO NOT EDIT.
//go:build !purego
// +build !purego
#include "textflag.h"
// func ValidString(s string) bool
// Requires: AVX, AVX2, SSE4.1
TEXT ·ValidString(SB), NOSPLIT, $0-17
MOVQ s_base+0(FP), AX
MOVQ s_len+8(FP), CX
MOVQ $0x8080808080808080, DX
CMPQ CX, $0x10
JB cmp8
BTL $0x08, github·comsegmentioasmcpu·X86+0(SB)
JCS init_avx
cmp8:
CMPQ CX, $0x08
JB cmp4
TESTQ DX, (AX)
JNZ invalid
ADDQ $0x08, AX
SUBQ $0x08, CX
JMP cmp8
cmp4:
CMPQ CX, $0x04
JB cmp3
TESTL $0x80808080, (AX)
JNZ invalid
ADDQ $0x04, AX
SUBQ $0x04, CX
cmp3:
CMPQ CX, $0x03
JB cmp2
MOVWLZX (AX), CX
MOVBLZX 2(AX), AX
SHLL $0x10, AX
ORL CX, AX
TESTL $0x80808080, AX
JMP done
cmp2:
CMPQ CX, $0x02
JB cmp1
TESTW $0x8080, (AX)
JMP done
cmp1:
CMPQ CX, $0x00
JE done
TESTB $0x80, (AX)
done:
SETEQ ret+16(FP)
RET
invalid:
MOVB $0x00, ret+16(FP)
RET
init_avx:
PINSRQ $0x00, DX, X4
VPBROADCASTQ X4, Y4
cmp256:
CMPQ CX, $0x00000100
JB cmp128
VMOVDQU (AX), Y0
VPOR 32(AX), Y0, Y0
VMOVDQU 64(AX), Y1
VPOR 96(AX), Y1, Y1
VMOVDQU 128(AX), Y2
VPOR 160(AX), Y2, Y2
VMOVDQU 192(AX), Y3
VPOR 224(AX), Y3, Y3
VPOR Y1, Y0, Y0
VPOR Y3, Y2, Y2
VPOR Y2, Y0, Y0
VPTEST Y0, Y4
JNZ invalid
ADDQ $0x00000100, AX
SUBQ $0x00000100, CX
JMP cmp256
cmp128:
CMPQ CX, $0x80
JB cmp64
VMOVDQU (AX), Y0
VPOR 32(AX), Y0, Y0
VMOVDQU 64(AX), Y1
VPOR 96(AX), Y1, Y1
VPOR Y1, Y0, Y0
VPTEST Y0, Y4
JNZ invalid
ADDQ $0x80, AX
SUBQ $0x80, CX
cmp64:
CMPQ CX, $0x40
JB cmp32
VMOVDQU (AX), Y0
VPOR 32(AX), Y0, Y0
VPTEST Y0, Y4
JNZ invalid
ADDQ $0x40, AX
SUBQ $0x40, CX
cmp32:
CMPQ CX, $0x20
JB cmp16
VPTEST (AX), Y4
JNZ invalid
ADDQ $0x20, AX
SUBQ $0x20, CX
cmp16:
CMPQ CX, $0x10
JLE cmp_tail
VPTEST (AX), X4
JNZ invalid
ADDQ $0x10, AX
SUBQ $0x10, CX
cmp_tail:
SUBQ $0x10, CX
ADDQ CX, AX
VPTEST (AX), X4
JMP done

48
vendor/github.com/segmentio/asm/ascii/valid_default.go generated vendored Normal file
View File

@ -0,0 +1,48 @@
//go:build purego || !amd64
// +build purego !amd64
package ascii
import (
"unsafe"
)
// ValidString returns true if s contains only ASCII characters.
func ValidString(s string) bool {
p := *(*unsafe.Pointer)(unsafe.Pointer(&s))
i := uintptr(0)
n := uintptr(len(s))
for i+8 <= n {
if (*(*uint64)(unsafe.Pointer(uintptr(p) + i)) & 0x8080808080808080) != 0 {
return false
}
i += 8
}
if i+4 <= n {
if (*(*uint32)(unsafe.Pointer(uintptr(p) + i)) & 0x80808080) != 0 {
return false
}
i += 4
}
if i == n {
return true
}
p = unsafe.Pointer(uintptr(p) + i)
var x uint32
switch n - i {
case 3:
x = uint32(*(*uint16)(p)) | uint32(*(*uint8)(unsafe.Pointer(uintptr(p) + 2)))<<16
case 2:
x = uint32(*(*uint16)(p))
case 1:
x = uint32(*(*uint8)(p))
default:
return true
}
return (x & 0x80808080) == 0
}

18
vendor/github.com/segmentio/asm/ascii/valid_print.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
package ascii
import "github.com/segmentio/asm/internal/unsafebytes"
// ValidPrint returns true if b contains only printable ASCII characters.
func ValidPrint(b []byte) bool {
return ValidPrintString(unsafebytes.String(b))
}
// ValidPrintBytes returns true if b is an ASCII character.
func ValidPrintByte(b byte) bool {
return 0x20 <= b && b <= 0x7e
}
// ValidPrintBytes returns true if b is an ASCII character.
func ValidPrintRune(r rune) bool {
return 0x20 <= r && r <= 0x7e
}

View File

@ -0,0 +1,9 @@
// Code generated by command: go run valid_print_asm.go -pkg ascii -out ../ascii/valid_print_amd64.s -stubs ../ascii/valid_print_amd64.go. DO NOT EDIT.
//go:build !purego
// +build !purego
package ascii
// ValidPrintString returns true if s contains only printable ASCII characters.
func ValidPrintString(s string) bool

View File

@ -0,0 +1,185 @@
// Code generated by command: go run valid_print_asm.go -pkg ascii -out ../ascii/valid_print_amd64.s -stubs ../ascii/valid_print_amd64.go. DO NOT EDIT.
//go:build !purego
// +build !purego
#include "textflag.h"
// func ValidPrintString(s string) bool
// Requires: AVX, AVX2, SSE4.1
TEXT ·ValidPrintString(SB), NOSPLIT, $0-17
MOVQ s_base+0(FP), AX
MOVQ s_len+8(FP), CX
CMPQ CX, $0x10
JB init_x86
BTL $0x08, github·comsegmentioasmcpu·X86+0(SB)
JCS init_avx
init_x86:
CMPQ CX, $0x08
JB cmp4
MOVQ $0xdfdfdfdfdfdfdfe0, DX
MOVQ $0x0101010101010101, BX
MOVQ $0x8080808080808080, SI
cmp8:
MOVQ (AX), DI
MOVQ DI, R8
LEAQ (DI)(DX*1), R9
NOTQ R8
ANDQ R8, R9
LEAQ (DI)(BX*1), R8
ORQ R8, DI
ORQ R9, DI
ADDQ $0x08, AX
SUBQ $0x08, CX
TESTQ SI, DI
JNE done
CMPQ CX, $0x08
JB cmp4
JMP cmp8
cmp4:
CMPQ CX, $0x04
JB cmp3
MOVL (AX), DX
MOVL DX, BX
LEAL 3755991008(DX), SI
NOTL BX
ANDL BX, SI
LEAL 16843009(DX), BX
ORL BX, DX
ORL SI, DX
ADDQ $0x04, AX
SUBQ $0x04, CX
TESTL $0x80808080, DX
JNE done
cmp3:
CMPQ CX, $0x03
JB cmp2
MOVWLZX (AX), DX
MOVBLZX 2(AX), AX
SHLL $0x10, AX
ORL DX, AX
ORL $0x20000000, AX
JMP final
cmp2:
CMPQ CX, $0x02
JB cmp1
MOVWLZX (AX), AX
ORL $0x20200000, AX
JMP final
cmp1:
CMPQ CX, $0x00
JE done
MOVBLZX (AX), AX
ORL $0x20202000, AX
final:
MOVL AX, CX
LEAL 3755991008(AX), DX
NOTL CX
ANDL CX, DX
LEAL 16843009(AX), CX
ORL CX, AX
ORL DX, AX
TESTL $0x80808080, AX
done:
SETEQ ret+16(FP)
RET
init_avx:
MOVB $0x1f, DL
PINSRB $0x00, DX, X8
VPBROADCASTB X8, Y8
MOVB $0x7e, DL
PINSRB $0x00, DX, X9
VPBROADCASTB X9, Y9
cmp128:
CMPQ CX, $0x80
JB cmp64
VMOVDQU (AX), Y0
VMOVDQU 32(AX), Y1
VMOVDQU 64(AX), Y2
VMOVDQU 96(AX), Y3
VPCMPGTB Y8, Y0, Y4
VPCMPGTB Y9, Y0, Y0
VPANDN Y4, Y0, Y0
VPCMPGTB Y8, Y1, Y5
VPCMPGTB Y9, Y1, Y1
VPANDN Y5, Y1, Y1
VPCMPGTB Y8, Y2, Y6
VPCMPGTB Y9, Y2, Y2
VPANDN Y6, Y2, Y2
VPCMPGTB Y8, Y3, Y7
VPCMPGTB Y9, Y3, Y3
VPANDN Y7, Y3, Y3
VPAND Y1, Y0, Y0
VPAND Y3, Y2, Y2
VPAND Y2, Y0, Y0
ADDQ $0x80, AX
SUBQ $0x80, CX
VPMOVMSKB Y0, DX
XORL $0xffffffff, DX
JNE done
JMP cmp128
cmp64:
CMPQ CX, $0x40
JB cmp32
VMOVDQU (AX), Y0
VMOVDQU 32(AX), Y1
VPCMPGTB Y8, Y0, Y2
VPCMPGTB Y9, Y0, Y0
VPANDN Y2, Y0, Y0
VPCMPGTB Y8, Y1, Y3
VPCMPGTB Y9, Y1, Y1
VPANDN Y3, Y1, Y1
VPAND Y1, Y0, Y0
ADDQ $0x40, AX
SUBQ $0x40, CX
VPMOVMSKB Y0, DX
XORL $0xffffffff, DX
JNE done
cmp32:
CMPQ CX, $0x20
JB cmp16
VMOVDQU (AX), Y0
VPCMPGTB Y8, Y0, Y1
VPCMPGTB Y9, Y0, Y0
VPANDN Y1, Y0, Y0
ADDQ $0x20, AX
SUBQ $0x20, CX
VPMOVMSKB Y0, DX
XORL $0xffffffff, DX
JNE done
cmp16:
CMPQ CX, $0x10
JLE cmp_tail
VMOVDQU (AX), X0
VPCMPGTB X8, X0, X1
VPCMPGTB X9, X0, X0
VPANDN X1, X0, X0
ADDQ $0x10, AX
SUBQ $0x10, CX
VPMOVMSKB X0, DX
XORL $0x0000ffff, DX
JNE done
cmp_tail:
SUBQ $0x10, CX
ADDQ CX, AX
VMOVDQU (AX), X0
VPCMPGTB X8, X0, X1
VPCMPGTB X9, X0, X0
VPANDN X1, X0, X0
VPMOVMSKB X0, DX
XORL $0x0000ffff, DX
JMP done

View File

@ -0,0 +1,46 @@
//go:build purego || !amd64
// +build purego !amd64
package ascii
import "unsafe"
// ValidString returns true if s contains only printable ASCII characters.
func ValidPrintString(s string) bool {
p := *(*unsafe.Pointer)(unsafe.Pointer(&s))
i := uintptr(0)
n := uintptr(len(s))
for i+8 <= n {
if hasLess64(*(*uint64)(unsafe.Pointer(uintptr(p) + i)), 0x20) || hasMore64(*(*uint64)(unsafe.Pointer(uintptr(p) + i)), 0x7e) {
return false
}
i += 8
}
if i+4 <= n {
if hasLess32(*(*uint32)(unsafe.Pointer(uintptr(p) + i)), 0x20) || hasMore32(*(*uint32)(unsafe.Pointer(uintptr(p) + i)), 0x7e) {
return false
}
i += 4
}
if i == n {
return true
}
p = unsafe.Pointer(uintptr(p) + i)
var x uint32
switch n - i {
case 3:
x = 0x20000000 | uint32(*(*uint16)(p)) | uint32(*(*uint8)(unsafe.Pointer(uintptr(p) + 2)))<<16
case 2:
x = 0x20200000 | uint32(*(*uint16)(p))
case 1:
x = 0x20202000 | uint32(*(*uint8)(p))
default:
return true
}
return !(hasLess32(x, 0x20) || hasMore32(x, 0x7e))
}

67
vendor/github.com/segmentio/asm/base64/base64.go generated vendored Normal file
View File

@ -0,0 +1,67 @@
package base64
import (
"encoding/base64"
)
const (
StdPadding rune = base64.StdPadding
NoPadding rune = base64.NoPadding
encodeStd = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
encodeURL = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_"
encodeIMAP = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,"
letterRange = int8('Z' - 'A' + 1)
)
// StdEncoding is the standard base64 encoding, as defined in RFC 4648.
var StdEncoding = NewEncoding(encodeStd)
// URLEncoding is the alternate base64 encoding defined in RFC 4648.
// It is typically used in URLs and file names.
var URLEncoding = NewEncoding(encodeURL)
// RawStdEncoding is the standard unpadded base64 encoding defined in RFC 4648 section 3.2.
// This is the same as StdEncoding but omits padding characters.
var RawStdEncoding = StdEncoding.WithPadding(NoPadding)
// RawURLEncoding is the unpadded alternate base64 encoding defined in RFC 4648.
// This is the same as URLEncoding but omits padding characters.
var RawURLEncoding = URLEncoding.WithPadding(NoPadding)
// NewEncoding returns a new padded Encoding defined by the given alphabet,
// which must be a 64-byte string that does not contain the padding character
// or CR / LF ('\r', '\n'). Unlike the standard library, the encoding alphabet
// cannot be abitrary, and it must follow one of the know standard encoding
// variants.
//
// Required alphabet values:
// * [0,26): characters 'A'..'Z'
// * [26,52): characters 'a'..'z'
// * [52,62): characters '0'..'9'
// Flexible alphabet value options:
// * RFC 4648, RFC 1421, RFC 2045, RFC 2152, RFC 4880: '+' and '/'
// * RFC 4648 URI: '-' and '_'
// * RFC 3501: '+' and ','
//
// The resulting Encoding uses the default padding character ('='), which may
// be changed or disabled via WithPadding. The padding characters is urestricted,
// but it must be a character outside of the encoder alphabet.
func NewEncoding(encoder string) *Encoding {
if len(encoder) != 64 {
panic("encoding alphabet is not 64-bytes long")
}
if _, ok := allowedEncoding[encoder]; !ok {
panic("non-standard encoding alphabets are not supported")
}
return newEncoding(encoder)
}
var allowedEncoding = map[string]struct{}{
encodeStd: {},
encodeURL: {},
encodeIMAP: {},
}

160
vendor/github.com/segmentio/asm/base64/base64_amd64.go generated vendored Normal file
View File

@ -0,0 +1,160 @@
//go:build amd64 && !purego
// +build amd64,!purego
package base64
import (
"encoding/base64"
"github.com/segmentio/asm/cpu"
"github.com/segmentio/asm/cpu/x86"
"github.com/segmentio/asm/internal/unsafebytes"
)
// An Encoding is a radix 64 encoding/decoding scheme, defined by a
// 64-character alphabet.
type Encoding struct {
enc func(dst []byte, src []byte, lut *int8) (int, int)
enclut [32]int8
dec func(dst []byte, src []byte, lut *int8) (int, int)
declut [48]int8
base *base64.Encoding
}
const (
minEncodeLen = 28
minDecodeLen = 45
)
func newEncoding(encoder string) *Encoding {
e := &Encoding{base: base64.NewEncoding(encoder)}
if cpu.X86.Has(x86.AVX2) {
e.enableEncodeAVX2(encoder)
e.enableDecodeAVX2(encoder)
}
return e
}
func (e *Encoding) enableEncodeAVX2(encoder string) {
// Translate values 0..63 to the Base64 alphabet. There are five sets:
//
// From To Add Index Example
// [0..25] [65..90] +65 0 ABCDEFGHIJKLMNOPQRSTUVWXYZ
// [26..51] [97..122] +71 1 abcdefghijklmnopqrstuvwxyz
// [52..61] [48..57] -4 [2..11] 0123456789
// [62] [43] -19 12 +
// [63] [47] -16 13 /
tab := [32]int8{int8(encoder[0]), int8(encoder[letterRange]) - letterRange}
for i, ch := range encoder[2*letterRange:] {
tab[2+i] = int8(ch) - 2*letterRange - int8(i)
}
e.enc = encodeAVX2
e.enclut = tab
}
func (e *Encoding) enableDecodeAVX2(encoder string) {
c62, c63 := int8(encoder[62]), int8(encoder[63])
url := c63 == '_'
if url {
c63 = '/'
}
// Translate values from the Base64 alphabet using five sets. Values outside
// of these ranges are considered invalid:
//
// From To Add Index Example
// [47] [63] +16 1 /
// [43] [62] +19 2 +
// [48..57] [52..61] +4 3 0123456789
// [65..90] [0..25] -65 4,5 ABCDEFGHIJKLMNOPQRSTUVWXYZ
// [97..122] [26..51] -71 6,7 abcdefghijklmnopqrstuvwxyz
tab := [48]int8{
0, 63 - c63, 62 - c62, 4, -65, -65, -71, -71,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x15, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
0x11, 0x11, 0x13, 0x1B, 0x1B, 0x1B, 0x1B, 0x1B,
}
tab[(c62&15)+16] = 0x1A
tab[(c63&15)+16] = 0x1A
if url {
e.dec = decodeAVX2URI
} else {
e.dec = decodeAVX2
}
e.declut = tab
}
// WithPadding creates a duplicate Encoding updated with a specified padding
// character, or NoPadding to disable padding. The padding character must not
// be contained in the encoding alphabet, must not be '\r' or '\n', and must
// be no greater than '\xFF'.
func (enc Encoding) WithPadding(padding rune) *Encoding {
enc.base = enc.base.WithPadding(padding)
return &enc
}
// Strict creates a duplicate encoding updated with strict decoding enabled.
// This requires that trailing padding bits are zero.
func (enc Encoding) Strict() *Encoding {
enc.base = enc.base.Strict()
return &enc
}
// Encode encodes src using the defined encoding alphabet.
// This will write EncodedLen(len(src)) bytes to dst.
func (enc *Encoding) Encode(dst, src []byte) {
if len(src) >= minEncodeLen && enc.enc != nil {
d, s := enc.enc(dst, src, &enc.enclut[0])
dst = dst[d:]
src = src[s:]
}
enc.base.Encode(dst, src)
}
// Encode encodes src using the encoding enc, writing
// EncodedLen(len(src)) bytes to dst.
func (enc *Encoding) EncodeToString(src []byte) string {
buf := make([]byte, enc.base.EncodedLen(len(src)))
enc.Encode(buf, src)
return string(buf)
}
// EncodedLen calculates the base64-encoded byte length for a message
// of length n.
func (enc *Encoding) EncodedLen(n int) int {
return enc.base.EncodedLen(n)
}
// Decode decodes src using the defined encoding alphabet.
// This will write DecodedLen(len(src)) bytes to dst and return the number of
// bytes written.
func (enc *Encoding) Decode(dst, src []byte) (n int, err error) {
var d, s int
if len(src) >= minDecodeLen && enc.dec != nil {
d, s = enc.dec(dst, src, &enc.declut[0])
dst = dst[d:]
src = src[s:]
}
n, err = enc.base.Decode(dst, src)
n += d
return
}
// DecodeString decodes the base64 encoded string s, returns the decoded
// value as bytes.
func (enc *Encoding) DecodeString(s string) ([]byte, error) {
src := unsafebytes.BytesOf(s)
dst := make([]byte, enc.base.DecodedLen(len(s)))
n, err := enc.Decode(dst, src)
return dst[:n], err
}
// DecodedLen calculates the decoded byte length for a base64-encoded message
// of length n.
func (enc *Encoding) DecodedLen(n int) int {
return enc.base.DecodedLen(n)
}

View File

@ -0,0 +1,14 @@
//go:build purego || !amd64
// +build purego !amd64
package base64
import "encoding/base64"
// An Encoding is a radix 64 encoding/decoding scheme, defined by a
// 64-character alphabet.
type Encoding = base64.Encoding
func newEncoding(encoder string) *Encoding {
return base64.NewEncoding(encoder)
}

10
vendor/github.com/segmentio/asm/base64/decode_amd64.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
// Code generated by command: go run decode_asm.go -pkg base64 -out ../base64/decode_amd64.s -stubs ../base64/decode_amd64.go. DO NOT EDIT.
//go:build !purego
// +build !purego
package base64
func decodeAVX2(dst []byte, src []byte, lut *int8) (int, int)
func decodeAVX2URI(dst []byte, src []byte, lut *int8) (int, int)

144
vendor/github.com/segmentio/asm/base64/decode_amd64.s generated vendored Normal file
View File

@ -0,0 +1,144 @@
// Code generated by command: go run decode_asm.go -pkg base64 -out ../base64/decode_amd64.s -stubs ../base64/decode_amd64.go. DO NOT EDIT.
//go:build !purego
// +build !purego
#include "textflag.h"
DATA b64_dec_lut_hi<>+0(SB)/8, $0x0804080402011010
DATA b64_dec_lut_hi<>+8(SB)/8, $0x1010101010101010
DATA b64_dec_lut_hi<>+16(SB)/8, $0x0804080402011010
DATA b64_dec_lut_hi<>+24(SB)/8, $0x1010101010101010
GLOBL b64_dec_lut_hi<>(SB), RODATA|NOPTR, $32
DATA b64_dec_madd1<>+0(SB)/8, $0x0140014001400140
DATA b64_dec_madd1<>+8(SB)/8, $0x0140014001400140
DATA b64_dec_madd1<>+16(SB)/8, $0x0140014001400140
DATA b64_dec_madd1<>+24(SB)/8, $0x0140014001400140
GLOBL b64_dec_madd1<>(SB), RODATA|NOPTR, $32
DATA b64_dec_madd2<>+0(SB)/8, $0x0001100000011000
DATA b64_dec_madd2<>+8(SB)/8, $0x0001100000011000
DATA b64_dec_madd2<>+16(SB)/8, $0x0001100000011000
DATA b64_dec_madd2<>+24(SB)/8, $0x0001100000011000
GLOBL b64_dec_madd2<>(SB), RODATA|NOPTR, $32
DATA b64_dec_shuf_lo<>+0(SB)/8, $0x0000000000000000
DATA b64_dec_shuf_lo<>+8(SB)/8, $0x0600010200000000
GLOBL b64_dec_shuf_lo<>(SB), RODATA|NOPTR, $16
DATA b64_dec_shuf<>+0(SB)/8, $0x090a040506000102
DATA b64_dec_shuf<>+8(SB)/8, $0x000000000c0d0e08
DATA b64_dec_shuf<>+16(SB)/8, $0x0c0d0e08090a0405
DATA b64_dec_shuf<>+24(SB)/8, $0x0000000000000000
GLOBL b64_dec_shuf<>(SB), RODATA|NOPTR, $32
// func decodeAVX2(dst []byte, src []byte, lut *int8) (int, int)
// Requires: AVX, AVX2, SSE4.1
TEXT ·decodeAVX2(SB), NOSPLIT, $0-72
MOVQ dst_base+0(FP), AX
MOVQ src_base+24(FP), DX
MOVQ lut+48(FP), SI
MOVQ src_len+32(FP), DI
MOVB $0x2f, CL
PINSRB $0x00, CX, X8
VPBROADCASTB X8, Y8
XORQ CX, CX
XORQ BX, BX
VPXOR Y7, Y7, Y7
VPERMQ $0x44, (SI), Y6
VPERMQ $0x44, 16(SI), Y4
VMOVDQA b64_dec_lut_hi<>+0(SB), Y5
loop:
VMOVDQU (DX)(BX*1), Y0
VPSRLD $0x04, Y0, Y2
VPAND Y8, Y0, Y3
VPSHUFB Y3, Y4, Y3
VPAND Y8, Y2, Y2
VPSHUFB Y2, Y5, Y9
VPTEST Y9, Y3
JNE done
VPCMPEQB Y8, Y0, Y3
VPADDB Y3, Y2, Y2
VPSHUFB Y2, Y6, Y2
VPADDB Y0, Y2, Y0
VPMADDUBSW b64_dec_madd1<>+0(SB), Y0, Y0
VPMADDWD b64_dec_madd2<>+0(SB), Y0, Y0
VEXTRACTI128 $0x01, Y0, X1
VPSHUFB b64_dec_shuf_lo<>+0(SB), X1, X1
VPSHUFB b64_dec_shuf<>+0(SB), Y0, Y0
VPBLENDD $0x08, Y1, Y0, Y1
VPBLENDD $0xc0, Y7, Y1, Y1
VMOVDQU Y1, (AX)(CX*1)
ADDQ $0x18, CX
ADDQ $0x20, BX
SUBQ $0x20, DI
CMPQ DI, $0x2d
JB done
JMP loop
done:
MOVQ CX, ret+56(FP)
MOVQ BX, ret1+64(FP)
VZEROUPPER
RET
// func decodeAVX2URI(dst []byte, src []byte, lut *int8) (int, int)
// Requires: AVX, AVX2, SSE4.1
TEXT ·decodeAVX2URI(SB), NOSPLIT, $0-72
MOVB $0x2f, AL
PINSRB $0x00, AX, X0
VPBROADCASTB X0, Y0
MOVB $0x5f, AL
PINSRB $0x00, AX, X1
VPBROADCASTB X1, Y1
MOVQ dst_base+0(FP), AX
MOVQ src_base+24(FP), DX
MOVQ lut+48(FP), SI
MOVQ src_len+32(FP), DI
MOVB $0x2f, CL
PINSRB $0x00, CX, X10
VPBROADCASTB X10, Y10
XORQ CX, CX
XORQ BX, BX
VPXOR Y9, Y9, Y9
VPERMQ $0x44, (SI), Y8
VPERMQ $0x44, 16(SI), Y6
VMOVDQA b64_dec_lut_hi<>+0(SB), Y7
loop:
VMOVDQU (DX)(BX*1), Y2
VPCMPEQB Y2, Y1, Y4
VPBLENDVB Y4, Y0, Y2, Y2
VPSRLD $0x04, Y2, Y4
VPAND Y10, Y2, Y5
VPSHUFB Y5, Y6, Y5
VPAND Y10, Y4, Y4
VPSHUFB Y4, Y7, Y11
VPTEST Y11, Y5
JNE done
VPCMPEQB Y10, Y2, Y5
VPADDB Y5, Y4, Y4
VPSHUFB Y4, Y8, Y4
VPADDB Y2, Y4, Y2
VPMADDUBSW b64_dec_madd1<>+0(SB), Y2, Y2
VPMADDWD b64_dec_madd2<>+0(SB), Y2, Y2
VEXTRACTI128 $0x01, Y2, X3
VPSHUFB b64_dec_shuf_lo<>+0(SB), X3, X3
VPSHUFB b64_dec_shuf<>+0(SB), Y2, Y2
VPBLENDD $0x08, Y3, Y2, Y3
VPBLENDD $0xc0, Y9, Y3, Y3
VMOVDQU Y3, (AX)(CX*1)
ADDQ $0x18, CX
ADDQ $0x20, BX
SUBQ $0x20, DI
CMPQ DI, $0x2d
JB done
JMP loop
done:
MOVQ CX, ret+56(FP)
MOVQ BX, ret1+64(FP)
VZEROUPPER
RET

View File

@ -0,0 +1,8 @@
// Code generated by command: go run encode_asm.go -pkg base64 -out ../base64/encode_amd64.s -stubs ../base64/encode_amd64.go. DO NOT EDIT.
//go:build !purego
// +build !purego
package base64
func encodeAVX2(dst []byte, src []byte, lut *int8) (int, int)

88
vendor/github.com/segmentio/asm/base64/encode_amd64.s generated vendored Normal file
View File

@ -0,0 +1,88 @@
// Code generated by command: go run encode_asm.go -pkg base64 -out ../base64/encode_amd64.s -stubs ../base64/encode_amd64.go. DO NOT EDIT.
//go:build !purego
// +build !purego
#include "textflag.h"
// func encodeAVX2(dst []byte, src []byte, lut *int8) (int, int)
// Requires: AVX, AVX2, SSE4.1
TEXT ·encodeAVX2(SB), NOSPLIT, $0-72
MOVQ dst_base+0(FP), AX
MOVQ src_base+24(FP), DX
MOVQ lut+48(FP), SI
MOVQ src_len+32(FP), DI
MOVB $0x33, CL
PINSRB $0x00, CX, X4
VPBROADCASTB X4, Y4
MOVB $0x19, CL
PINSRB $0x00, CX, X5
VPBROADCASTB X5, Y5
XORQ CX, CX
XORQ BX, BX
// Load the 16-byte LUT into both lanes of the register
VPERMQ $0x44, (SI), Y3
// Load the first block using a mask to avoid potential fault
VMOVDQU b64_enc_load<>+0(SB), Y0
VPMASKMOVD -4(DX)(BX*1), Y0, Y0
loop:
VPSHUFB b64_enc_shuf<>+0(SB), Y0, Y0
VPAND b64_enc_mask1<>+0(SB), Y0, Y1
VPSLLW $0x08, Y1, Y2
VPSLLW $0x04, Y1, Y1
VPBLENDW $0xaa, Y2, Y1, Y2
VPAND b64_enc_mask2<>+0(SB), Y0, Y1
VPMULHUW b64_enc_mult<>+0(SB), Y1, Y0
VPOR Y0, Y2, Y0
VPSUBUSB Y4, Y0, Y1
VPCMPGTB Y5, Y0, Y2
VPSUBB Y2, Y1, Y1
VPSHUFB Y1, Y3, Y1
VPADDB Y0, Y1, Y0
VMOVDQU Y0, (AX)(CX*1)
ADDQ $0x20, CX
ADDQ $0x18, BX
SUBQ $0x18, DI
CMPQ DI, $0x20
JB done
VMOVDQU -4(DX)(BX*1), Y0
JMP loop
done:
MOVQ CX, ret+56(FP)
MOVQ BX, ret1+64(FP)
VZEROUPPER
RET
DATA b64_enc_load<>+0(SB)/8, $0x8000000000000000
DATA b64_enc_load<>+8(SB)/8, $0x8000000080000000
DATA b64_enc_load<>+16(SB)/8, $0x8000000080000000
DATA b64_enc_load<>+24(SB)/8, $0x8000000080000000
GLOBL b64_enc_load<>(SB), RODATA|NOPTR, $32
DATA b64_enc_shuf<>+0(SB)/8, $0x0809070805060405
DATA b64_enc_shuf<>+8(SB)/8, $0x0e0f0d0e0b0c0a0b
DATA b64_enc_shuf<>+16(SB)/8, $0x0405030401020001
DATA b64_enc_shuf<>+24(SB)/8, $0x0a0b090a07080607
GLOBL b64_enc_shuf<>(SB), RODATA|NOPTR, $32
DATA b64_enc_mask1<>+0(SB)/8, $0x003f03f0003f03f0
DATA b64_enc_mask1<>+8(SB)/8, $0x003f03f0003f03f0
DATA b64_enc_mask1<>+16(SB)/8, $0x003f03f0003f03f0
DATA b64_enc_mask1<>+24(SB)/8, $0x003f03f0003f03f0
GLOBL b64_enc_mask1<>(SB), RODATA|NOPTR, $32
DATA b64_enc_mask2<>+0(SB)/8, $0x0fc0fc000fc0fc00
DATA b64_enc_mask2<>+8(SB)/8, $0x0fc0fc000fc0fc00
DATA b64_enc_mask2<>+16(SB)/8, $0x0fc0fc000fc0fc00
DATA b64_enc_mask2<>+24(SB)/8, $0x0fc0fc000fc0fc00
GLOBL b64_enc_mask2<>(SB), RODATA|NOPTR, $32
DATA b64_enc_mult<>+0(SB)/8, $0x0400004004000040
DATA b64_enc_mult<>+8(SB)/8, $0x0400004004000040
DATA b64_enc_mult<>+16(SB)/8, $0x0400004004000040
DATA b64_enc_mult<>+24(SB)/8, $0x0400004004000040
GLOBL b64_enc_mult<>(SB), RODATA|NOPTR, $32

80
vendor/github.com/segmentio/asm/cpu/arm/arm.go generated vendored Normal file
View File

@ -0,0 +1,80 @@
package arm
import (
"github.com/segmentio/asm/cpu/cpuid"
. "golang.org/x/sys/cpu"
)
type CPU cpuid.CPU
func (cpu CPU) Has(feature Feature) bool {
return cpuid.CPU(cpu).Has(cpuid.Feature(feature))
}
func (cpu *CPU) set(feature Feature, enable bool) {
(*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable)
}
type Feature cpuid.Feature
const (
SWP Feature = 1 << iota // SWP instruction support
HALF // Half-word load and store support
THUMB // ARM Thumb instruction set
BIT26 // Address space limited to 26-bits
FASTMUL // 32-bit operand, 64-bit result multiplication support
FPA // Floating point arithmetic support
VFP // Vector floating point support
EDSP // DSP Extensions support
JAVA // Java instruction set
IWMMXT // Intel Wireless MMX technology support
CRUNCH // MaverickCrunch context switching and handling
THUMBEE // Thumb EE instruction set
NEON // NEON instruction set
VFPv3 // Vector floating point version 3 support
VFPv3D16 // Vector floating point version 3 D8-D15
TLS // Thread local storage support
VFPv4 // Vector floating point version 4 support
IDIVA // Integer divide instruction support in ARM mode
IDIVT // Integer divide instruction support in Thumb mode
VFPD32 // Vector floating point version 3 D15-D31
LPAE // Large Physical Address Extensions
EVTSTRM // Event stream support
AES // AES hardware implementation
PMULL // Polynomial multiplication instruction set
SHA1 // SHA1 hardware implementation
SHA2 // SHA2 hardware implementation
CRC32 // CRC32 hardware implementation
)
func ABI() CPU {
cpu := CPU(0)
cpu.set(SWP, ARM.HasSWP)
cpu.set(HALF, ARM.HasHALF)
cpu.set(THUMB, ARM.HasTHUMB)
cpu.set(BIT26, ARM.Has26BIT)
cpu.set(FASTMUL, ARM.HasFASTMUL)
cpu.set(FPA, ARM.HasFPA)
cpu.set(VFP, ARM.HasVFP)
cpu.set(EDSP, ARM.HasEDSP)
cpu.set(JAVA, ARM.HasJAVA)
cpu.set(IWMMXT, ARM.HasIWMMXT)
cpu.set(CRUNCH, ARM.HasCRUNCH)
cpu.set(THUMBEE, ARM.HasTHUMBEE)
cpu.set(NEON, ARM.HasNEON)
cpu.set(VFPv3, ARM.HasVFPv3)
cpu.set(VFPv3D16, ARM.HasVFPv3D16)
cpu.set(TLS, ARM.HasTLS)
cpu.set(VFPv4, ARM.HasVFPv4)
cpu.set(IDIVA, ARM.HasIDIVA)
cpu.set(IDIVT, ARM.HasIDIVT)
cpu.set(VFPD32, ARM.HasVFPD32)
cpu.set(LPAE, ARM.HasLPAE)
cpu.set(EVTSTRM, ARM.HasEVTSTRM)
cpu.set(AES, ARM.HasAES)
cpu.set(PMULL, ARM.HasPMULL)
cpu.set(SHA1, ARM.HasSHA1)
cpu.set(SHA2, ARM.HasSHA2)
cpu.set(CRC32, ARM.HasCRC32)
return cpu
}

74
vendor/github.com/segmentio/asm/cpu/arm64/arm64.go generated vendored Normal file
View File

@ -0,0 +1,74 @@
package arm64
import (
"github.com/segmentio/asm/cpu/cpuid"
. "golang.org/x/sys/cpu"
)
type CPU cpuid.CPU
func (cpu CPU) Has(feature Feature) bool {
return cpuid.CPU(cpu).Has(cpuid.Feature(feature))
}
func (cpu *CPU) set(feature Feature, enable bool) {
(*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable)
}
type Feature cpuid.Feature
const (
FP Feature = 1 << iota // Floating-point instruction set (always available)
ASIMD // Advanced SIMD (always available)
EVTSTRM // Event stream support
AES // AES hardware implementation
PMULL // Polynomial multiplication instruction set
SHA1 // SHA1 hardware implementation
SHA2 // SHA2 hardware implementation
CRC32 // CRC32 hardware implementation
ATOMICS // Atomic memory operation instruction set
FPHP // Half precision floating-point instruction set
ASIMDHP // Advanced SIMD half precision instruction set
CPUID // CPUID identification scheme registers
ASIMDRDM // Rounding double multiply add/subtract instruction set
JSCVT // Javascript conversion from floating-point to integer
FCMA // Floating-point multiplication and addition of complex numbers
LRCPC // Release Consistent processor consistent support
DCPOP // Persistent memory support
SHA3 // SHA3 hardware implementation
SM3 // SM3 hardware implementation
SM4 // SM4 hardware implementation
ASIMDDP // Advanced SIMD double precision instruction set
SHA512 // SHA512 hardware implementation
SVE // Scalable Vector Extensions
ASIMDFHM // Advanced SIMD multiplication FP16 to FP32
)
func ABI() CPU {
cpu := CPU(0)
cpu.set(FP, ARM64.HasFP)
cpu.set(ASIMD, ARM64.HasASIMD)
cpu.set(EVTSTRM, ARM64.HasEVTSTRM)
cpu.set(AES, ARM64.HasAES)
cpu.set(PMULL, ARM64.HasPMULL)
cpu.set(SHA1, ARM64.HasSHA1)
cpu.set(SHA2, ARM64.HasSHA2)
cpu.set(CRC32, ARM64.HasCRC32)
cpu.set(ATOMICS, ARM64.HasATOMICS)
cpu.set(FPHP, ARM64.HasFPHP)
cpu.set(ASIMDHP, ARM64.HasASIMDHP)
cpu.set(CPUID, ARM64.HasCPUID)
cpu.set(ASIMDRDM, ARM64.HasASIMDRDM)
cpu.set(JSCVT, ARM64.HasJSCVT)
cpu.set(FCMA, ARM64.HasFCMA)
cpu.set(LRCPC, ARM64.HasLRCPC)
cpu.set(DCPOP, ARM64.HasDCPOP)
cpu.set(SHA3, ARM64.HasSHA3)
cpu.set(SM3, ARM64.HasSM3)
cpu.set(SM4, ARM64.HasSM4)
cpu.set(ASIMDDP, ARM64.HasASIMDDP)
cpu.set(SHA512, ARM64.HasSHA512)
cpu.set(SVE, ARM64.HasSVE)
cpu.set(ASIMDFHM, ARM64.HasASIMDFHM)
return cpu
}

22
vendor/github.com/segmentio/asm/cpu/cpu.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
// Pakage cpu provides APIs to detect CPU features available at runtime.
package cpu
import (
"github.com/segmentio/asm/cpu/arm"
"github.com/segmentio/asm/cpu/arm64"
"github.com/segmentio/asm/cpu/x86"
)
var (
// X86 is the bitset representing the set of the x86 instruction sets are
// supported by the CPU.
X86 = x86.ABI()
// ARM is the bitset representing which parts of the arm instruction sets
// are supported by the CPU.
ARM = arm.ABI()
// ARM64 is the bitset representing which parts of the arm64 instruction
// sets are supported by the CPU.
ARM64 = arm64.ABI()
)

32
vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
// Package cpuid provides generic types used to represent CPU features supported
// by the architecture.
package cpuid
// CPU is a bitset of feature flags representing the capabilities of various CPU
// architeectures that this package provides optimized assembly routines for.
//
// The intent is to provide a stable ABI between the Go code that generate the
// assembly, and the program that uses the library functions.
type CPU uint64
// Feature represents a single CPU feature.
type Feature uint64
const (
// None is a Feature value that has no CPU features enabled.
None Feature = 0
// All is a Feature value that has all CPU features enabled.
All Feature = 0xFFFFFFFFFFFFFFFF
)
func (cpu CPU) Has(feature Feature) bool {
return (Feature(cpu) & feature) == feature
}
func (cpu *CPU) Set(feature Feature, enabled bool) {
if enabled {
*cpu |= CPU(feature)
} else {
*cpu &= ^CPU(feature)
}
}

76
vendor/github.com/segmentio/asm/cpu/x86/x86.go generated vendored Normal file
View File

@ -0,0 +1,76 @@
package x86
import (
"github.com/segmentio/asm/cpu/cpuid"
. "golang.org/x/sys/cpu"
)
type CPU cpuid.CPU
func (cpu CPU) Has(feature Feature) bool {
return cpuid.CPU(cpu).Has(cpuid.Feature(feature))
}
func (cpu *CPU) set(feature Feature, enable bool) {
(*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable)
}
type Feature cpuid.Feature
const (
SSE Feature = 1 << iota // SSE functions
SSE2 // P4 SSE functions
SSE3 // Prescott SSE3 functions
SSE41 // Penryn SSE4.1 functions
SSE42 // Nehalem SSE4.2 functions
SSE4A // AMD Barcelona microarchitecture SSE4a instructions
SSSE3 // Conroe SSSE3 functions
AVX // AVX functions
AVX2 // AVX2 functions
AVX512BF16 // AVX-512 BFLOAT16 Instructions
AVX512BITALG // AVX-512 Bit Algorithms
AVX512BW // AVX-512 Byte and Word Instructions
AVX512CD // AVX-512 Conflict Detection Instructions
AVX512DQ // AVX-512 Doubleword and Quadword Instructions
AVX512ER // AVX-512 Exponential and Reciprocal Instructions
AVX512F // AVX-512 Foundation
AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions
AVX512PF // AVX-512 Prefetch Instructions
AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions
AVX512VBMI2 // AVX-512 Vector Bit Manipulation Instructions, Version 2
AVX512VL // AVX-512 Vector Length Extensions
AVX512VNNI // AVX-512 Vector Neural Network Instructions
AVX512VP2INTERSECT // AVX-512 Intersect for D/Q
AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword
CMOV // Conditional move
)
func ABI() CPU {
cpu := CPU(0)
cpu.set(SSE, true) // TODO: golang.org/x/sys/cpu assumes all CPUs have SEE?
cpu.set(SSE2, X86.HasSSE2)
cpu.set(SSE3, X86.HasSSE3)
cpu.set(SSE41, X86.HasSSE41)
cpu.set(SSE42, X86.HasSSE42)
cpu.set(SSE4A, false) // TODO: add upstream support in golang.org/x/sys/cpu?
cpu.set(SSSE3, X86.HasSSSE3)
cpu.set(AVX, X86.HasAVX)
cpu.set(AVX2, X86.HasAVX2)
cpu.set(AVX512BF16, X86.HasAVX512BF16)
cpu.set(AVX512BITALG, X86.HasAVX512BITALG)
cpu.set(AVX512BW, X86.HasAVX512BW)
cpu.set(AVX512CD, X86.HasAVX512CD)
cpu.set(AVX512DQ, X86.HasAVX512DQ)
cpu.set(AVX512ER, X86.HasAVX512ER)
cpu.set(AVX512F, X86.HasAVX512F)
cpu.set(AVX512IFMA, X86.HasAVX512IFMA)
cpu.set(AVX512PF, X86.HasAVX512PF)
cpu.set(AVX512VBMI, X86.HasAVX512VBMI)
cpu.set(AVX512VBMI2, X86.HasAVX512VBMI2)
cpu.set(AVX512VL, X86.HasAVX512VL)
cpu.set(AVX512VNNI, X86.HasAVX512VNNI)
cpu.set(AVX512VP2INTERSECT, false) // TODO: add upstream support in golang.org/x/sys/cpu?
cpu.set(AVX512VPOPCNTDQ, X86.HasAVX512VPOPCNTDQ)
cpu.set(CMOV, true) // TODO: golang.org/x/sys/cpu assumes all CPUs have CMOV?
return cpu
}

View File

@ -0,0 +1,20 @@
package unsafebytes
import "unsafe"
func Pointer(b []byte) *byte {
return *(**byte)(unsafe.Pointer(&b))
}
func String(b []byte) string {
return *(*string)(unsafe.Pointer(&b))
}
func BytesOf(s string) []byte {
return *(*[]byte)(unsafe.Pointer(&sliceHeader{str: s, cap: len(s)}))
}
type sliceHeader struct {
str string
cap int
}

40
vendor/github.com/segmentio/asm/keyset/keyset.go generated vendored Normal file
View File

@ -0,0 +1,40 @@
package keyset
import (
"bytes"
"github.com/segmentio/asm/cpu"
"github.com/segmentio/asm/cpu/arm64"
"github.com/segmentio/asm/cpu/x86"
)
// New prepares a set of keys for use with Lookup.
//
// An optimized routine is used if the processor supports AVX instructions and
// the maximum length of any of the keys is less than or equal to 16. If New
// returns nil, this indicates that an optimized routine is not available, and
// the caller should use a fallback.
func New(keys [][]byte) []byte {
maxWidth, hasNullByte := checkKeys(keys)
if hasNullByte || maxWidth > 16 || !(cpu.X86.Has(x86.AVX) || cpu.ARM64.Has(arm64.ASIMD)) {
return nil
}
set := make([]byte, len(keys)*16)
for i, k := range keys {
copy(set[i*16:], k)
}
return set
}
func checkKeys(keys [][]byte) (maxWidth int, hasNullByte bool) {
for _, k := range keys {
if len(k) > maxWidth {
maxWidth = len(k)
}
if bytes.IndexByte(k, 0) >= 0 {
hasNullByte = true
}
}
return
}

10
vendor/github.com/segmentio/asm/keyset/keyset_amd64.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
// Code generated by command: go run keyset_asm.go -pkg keyset -out ../keyset/keyset_amd64.s -stubs ../keyset/keyset_amd64.go. DO NOT EDIT.
//go:build !purego
// +build !purego
package keyset
// Lookup searches for a key in a set of keys, returning its index if
// found. If the key cannot be found, the number of keys is returned.
func Lookup(keyset []byte, key []byte) int

108
vendor/github.com/segmentio/asm/keyset/keyset_amd64.s generated vendored Normal file
View File

@ -0,0 +1,108 @@
// Code generated by command: go run keyset_asm.go -pkg keyset -out ../keyset/keyset_amd64.s -stubs ../keyset/keyset_amd64.go. DO NOT EDIT.
//go:build !purego
// +build !purego
#include "textflag.h"
// func Lookup(keyset []byte, key []byte) int
// Requires: AVX
TEXT ·Lookup(SB), NOSPLIT, $0-56
MOVQ keyset_base+0(FP), AX
MOVQ keyset_len+8(FP), CX
SHRQ $0x04, CX
MOVQ key_base+24(FP), DX
MOVQ key_len+32(FP), BX
MOVQ key_cap+40(FP), SI
CMPQ BX, $0x10
JA not_found
CMPQ SI, $0x10
JB safe_load
load:
VMOVUPS (DX), X0
prepare:
VPXOR X2, X2, X2
VPCMPEQB X1, X1, X1
LEAQ blend_masks<>+16(SB), DX
SUBQ BX, DX
VMOVUPS (DX), X3
VPBLENDVB X3, X0, X2, X0
XORQ DX, DX
MOVQ CX, BX
SHRQ $0x02, BX
SHLQ $0x02, BX
bigloop:
CMPQ DX, BX
JE loop
VPCMPEQB (AX), X0, X8
VPTEST X1, X8
JCS done
VPCMPEQB 16(AX), X0, X9
VPTEST X1, X9
JCS found1
VPCMPEQB 32(AX), X0, X10
VPTEST X1, X10
JCS found2
VPCMPEQB 48(AX), X0, X11
VPTEST X1, X11
JCS found3
ADDQ $0x04, DX
ADDQ $0x40, AX
JMP bigloop
loop:
CMPQ DX, CX
JE done
VPCMPEQB (AX), X0, X2
VPTEST X1, X2
JCS done
INCQ DX
ADDQ $0x10, AX
JMP loop
JMP done
found3:
INCQ DX
found2:
INCQ DX
found1:
INCQ DX
done:
MOVQ DX, ret+48(FP)
RET
not_found:
MOVQ CX, ret+48(FP)
RET
safe_load:
MOVQ DX, SI
ANDQ $0x00000fff, SI
CMPQ SI, $0x00000ff0
JBE load
MOVQ $0xfffffffffffffff0, SI
ADDQ BX, SI
VMOVUPS (DX)(SI*1), X0
LEAQ shuffle_masks<>+16(SB), DX
SUBQ BX, DX
VMOVUPS (DX), X1
VPSHUFB X1, X0, X0
JMP prepare
DATA blend_masks<>+0(SB)/8, $0xffffffffffffffff
DATA blend_masks<>+8(SB)/8, $0xffffffffffffffff
DATA blend_masks<>+16(SB)/8, $0x0000000000000000
DATA blend_masks<>+24(SB)/8, $0x0000000000000000
GLOBL blend_masks<>(SB), RODATA|NOPTR, $32
DATA shuffle_masks<>+0(SB)/8, $0x0706050403020100
DATA shuffle_masks<>+8(SB)/8, $0x0f0e0d0c0b0a0908
DATA shuffle_masks<>+16(SB)/8, $0x0706050403020100
DATA shuffle_masks<>+24(SB)/8, $0x0f0e0d0c0b0a0908
GLOBL shuffle_masks<>(SB), RODATA|NOPTR, $32

View File

@ -0,0 +1,8 @@
//go:build !purego
// +build !purego
package keyset
// Lookup searches for a key in a set of keys, returning its index if
// found. If the key cannot be found, the number of keys is returned.
func Lookup(keyset []byte, key []byte) int

143
vendor/github.com/segmentio/asm/keyset/keyset_arm64.s generated vendored Normal file
View File

@ -0,0 +1,143 @@
//go:build !purego
// +build !purego
#include "textflag.h"
// func Lookup(keyset []byte, key []byte) int
TEXT ·Lookup(SB), NOSPLIT, $0-56
MOVD keyset+0(FP), R0
MOVD keyset_len+8(FP), R1
MOVD key+24(FP), R2
MOVD key_len+32(FP), R3
MOVD key_cap+40(FP), R4
// None of the keys in the set are greater than 16 bytes, so if the input
// key is we can jump straight to not found.
CMP $16, R3
BHI notfound
// We'll be moving the keyset pointer (R0) forward as we compare keys, so
// make a copy of the starting point (R6). Also add the byte length (R1) to
// obtain a pointer to the end of the keyset (R5).
MOVD R0, R6
ADD R0, R1, R5
// Prepare a 64-bit mask of all ones.
MOVD $-1, R7
// Prepare a vector of all zeroes.
VMOV ZR, V1.B16
// Check that it's safe to load 16 bytes of input. If cap(input)<16, jump
// to a check that determines whether a tail load is necessary (to avoid a
// page fault).
CMP $16, R4
BLO safeload
load:
// Load the input key (V0) and pad with zero bytes (V1). To blend the two
// vectors, we load a mask for the particular key length and then use TBL
// to select bytes from either V0 or V1.
VLD1 (R2), [V0.B16]
MOVD $blend_masks<>(SB), R10
ADD R3<<4, R10, R10
VLD1 (R10), [V2.B16]
VTBL V2.B16, [V0.B16, V1.B16], V3.B16
loop:
// Loop through each 16 byte key in the keyset.
CMP R0, R5
BEQ notfound
// Load and compare the next key.
VLD1.P 16(R0), [V4.B16]
VCMEQ V3.B16, V4.B16, V5.B16
VMOV V5.D[0], R8
VMOV V5.D[1], R9
AND R8, R9, R9
// If the masks match, we found the key.
CMP R9, R7
BEQ found
JMP loop
found:
// If the key was found, take the position in the keyset and convert it
// to an index. The keyset pointer (R0) will be 1 key past the match, so
// subtract the starting pointer (R6), divide by 16 to convert from byte
// length to an index, and then subtract one.
SUB R6, R0, R0
ADD R0>>4, ZR, R0
SUB $1, R0, R0
MOVD R0, ret+48(FP)
RET
notfound:
// Return the number of keys in the keyset, which is the byte length (R1)
// divided by 16.
ADD R1>>4, ZR, R1
MOVD R1, ret+48(FP)
RET
safeload:
// Check if the input crosses a page boundary. If not, jump back.
AND $4095, R2, R12
CMP $4080, R12
BLS load
// If it does cross a page boundary, we must assume that loading 16 bytes
// will cause a fault. Instead, we load the 16 bytes up to and including the
// key and then shuffle the key forward in the register. We can shuffle and
// pad with zeroes at the same time to avoid having to also blend (as load
// does).
MOVD $16, R12
SUB R3, R12, R12
SUB R12, R2, R2
VLD1 (R2), [V0.B16]
MOVD $shuffle_masks<>(SB), R10
ADD R12, R10, R10
VLD1 (R10), [V2.B16]
VTBL V2.B16, [V0.B16, V1.B16], V3.B16
JMP loop
DATA blend_masks<>+0(SB)/8, $0x1010101010101010
DATA blend_masks<>+8(SB)/8, $0x1010101010101010
DATA blend_masks<>+16(SB)/8, $0x1010101010101000
DATA blend_masks<>+24(SB)/8, $0x1010101010101010
DATA blend_masks<>+32(SB)/8, $0x1010101010100100
DATA blend_masks<>+40(SB)/8, $0x1010101010101010
DATA blend_masks<>+48(SB)/8, $0x1010101010020100
DATA blend_masks<>+56(SB)/8, $0x1010101010101010
DATA blend_masks<>+64(SB)/8, $0x1010101003020100
DATA blend_masks<>+72(SB)/8, $0x1010101010101010
DATA blend_masks<>+80(SB)/8, $0x1010100403020100
DATA blend_masks<>+88(SB)/8, $0x1010101010101010
DATA blend_masks<>+96(SB)/8, $0x1010050403020100
DATA blend_masks<>+104(SB)/8, $0x1010101010101010
DATA blend_masks<>+112(SB)/8, $0x1006050403020100
DATA blend_masks<>+120(SB)/8, $0x1010101010101010
DATA blend_masks<>+128(SB)/8, $0x0706050403020100
DATA blend_masks<>+136(SB)/8, $0x1010101010101010
DATA blend_masks<>+144(SB)/8, $0x0706050403020100
DATA blend_masks<>+152(SB)/8, $0x1010101010101008
DATA blend_masks<>+160(SB)/8, $0x0706050403020100
DATA blend_masks<>+168(SB)/8, $0x1010101010100908
DATA blend_masks<>+176(SB)/8, $0x0706050403020100
DATA blend_masks<>+184(SB)/8, $0x10101010100A0908
DATA blend_masks<>+192(SB)/8, $0x0706050403020100
DATA blend_masks<>+200(SB)/8, $0x101010100B0A0908
DATA blend_masks<>+208(SB)/8, $0x0706050403020100
DATA blend_masks<>+216(SB)/8, $0x1010100C0B0A0908
DATA blend_masks<>+224(SB)/8, $0x0706050403020100
DATA blend_masks<>+232(SB)/8, $0x10100D0C0B0A0908
DATA blend_masks<>+240(SB)/8, $0x0706050403020100
DATA blend_masks<>+248(SB)/8, $0x100E0D0C0B0A0908
DATA blend_masks<>+256(SB)/8, $0x0706050403020100
DATA blend_masks<>+264(SB)/8, $0x0F0E0D0C0B0A0908
GLOBL blend_masks<>(SB), RODATA|NOPTR, $272
DATA shuffle_masks<>+0(SB)/8, $0x0706050403020100
DATA shuffle_masks<>+8(SB)/8, $0x0F0E0D0C0B0A0908
DATA shuffle_masks<>+16(SB)/8, $0x1010101010101010
DATA shuffle_masks<>+24(SB)/8, $0x1010101010101010
GLOBL shuffle_masks<>(SB), RODATA|NOPTR, $32

View File

@ -0,0 +1,19 @@
//go:build purego || !(amd64 || arm64)
// +build purego !amd64,!arm64
package keyset
func Lookup(keyset []byte, key []byte) int {
if len(key) > 16 {
return len(keyset) / 16
}
var padded [16]byte
copy(padded[:], key)
for i := 0; i < len(keyset); i += 16 {
if string(padded[:]) == string(keyset[i:i+16]) {
return i / 16
}
}
return len(keyset) / 16
}

21
vendor/github.com/segmentio/encoding/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2019 Segment.io, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,40 @@
//go:generate go run equal_fold_asm.go -out equal_fold_amd64.s -stubs equal_fold_amd64.go
package ascii
import (
"github.com/segmentio/asm/ascii"
)
// EqualFold is a version of bytes.EqualFold designed to work on ASCII input
// instead of UTF-8.
//
// When the program has guarantees that the input is composed of ASCII
// characters only, it allows for greater optimizations.
func EqualFold(a, b []byte) bool {
return ascii.EqualFold(a, b)
}
func HasPrefixFold(s, prefix []byte) bool {
return ascii.HasPrefixFold(s, prefix)
}
func HasSuffixFold(s, suffix []byte) bool {
return ascii.HasSuffixFold(s, suffix)
}
// EqualFoldString is a version of strings.EqualFold designed to work on ASCII
// input instead of UTF-8.
//
// When the program has guarantees that the input is composed of ASCII
// characters only, it allows for greater optimizations.
func EqualFoldString(a, b string) bool {
return ascii.EqualFoldString(a, b)
}
func HasPrefixFoldString(s, prefix string) bool {
return ascii.HasPrefixFoldString(s, prefix)
}
func HasSuffixFoldString(s, suffix string) bool {
return ascii.HasSuffixFoldString(s, suffix)
}

26
vendor/github.com/segmentio/encoding/ascii/valid.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
//go:generate go run valid_asm.go -out valid_amd64.s -stubs valid_amd64.go
package ascii
import (
"github.com/segmentio/asm/ascii"
)
// Valid returns true if b contains only ASCII characters.
func Valid(b []byte) bool {
return ascii.Valid(b)
}
// ValidBytes returns true if b is an ASCII character.
func ValidByte(b byte) bool {
return ascii.ValidByte(b)
}
// ValidBytes returns true if b is an ASCII character.
func ValidRune(r rune) bool {
return ascii.ValidRune(r)
}
// ValidString returns true if s contains only ASCII characters.
func ValidString(s string) bool {
return ascii.ValidString(s)
}

View File

@ -0,0 +1,26 @@
//go:generate go run valid_print_asm.go -out valid_print_amd64.s -stubs valid_print_amd64.go
package ascii
import (
"github.com/segmentio/asm/ascii"
)
// Valid returns true if b contains only printable ASCII characters.
func ValidPrint(b []byte) bool {
return ascii.ValidPrint(b)
}
// ValidBytes returns true if b is an ASCII character.
func ValidPrintByte(b byte) bool {
return ascii.ValidPrintByte(b)
}
// ValidBytes returns true if b is an ASCII character.
func ValidPrintRune(r rune) bool {
return ascii.ValidPrintRune(r)
}
// ValidString returns true if s contains only printable ASCII characters.
func ValidPrintString(s string) bool {
return ascii.ValidPrintString(s)
}

185
vendor/github.com/segmentio/encoding/iso8601/parse.go generated vendored Normal file
View File

@ -0,0 +1,185 @@
package iso8601
import (
"encoding/binary"
"errors"
"time"
"unsafe"
)
var (
errInvalidTimestamp = errors.New("invalid ISO8601 timestamp")
errMonthOutOfRange = errors.New("month out of range")
errDayOutOfRange = errors.New("day out of range")
errHourOutOfRange = errors.New("hour out of range")
errMinuteOutOfRange = errors.New("minute out of range")
errSecondOutOfRange = errors.New("second out of range")
)
// Parse parses an ISO8601 timestamp, e.g. "2021-03-25T21:36:12Z".
func Parse(input string) (time.Time, error) {
b := unsafeStringToBytes(input)
if len(b) >= 20 && len(b) <= 30 && b[len(b)-1] == 'Z' {
if len(b) == 21 || (len(b) > 21 && b[19] != '.') {
return time.Time{}, errInvalidTimestamp
}
t1 := binary.LittleEndian.Uint64(b)
t2 := binary.LittleEndian.Uint64(b[8:16])
t3 := uint64(b[16]) | uint64(b[17])<<8 | uint64(b[18])<<16 | uint64('Z')<<24
// Check for valid separators by masking input with " - - T : : Z".
// If separators are all valid, replace them with a '0' (0x30) byte and
// check all bytes are now numeric.
if !match(t1, mask1) || !match(t2, mask2) || !match(t3, mask3) {
return time.Time{}, errInvalidTimestamp
}
t1 ^= replace1
t2 ^= replace2
t3 ^= replace3
if (nonNumeric(t1) | nonNumeric(t2) | nonNumeric(t3)) != 0 {
return time.Time{}, errInvalidTimestamp
}
t1 -= zero
t2 -= zero
t3 -= zero
year := (t1&0xF)*1000 + (t1>>8&0xF)*100 + (t1>>16&0xF)*10 + (t1 >> 24 & 0xF)
month := (t1>>40&0xF)*10 + (t1 >> 48 & 0xF)
day := (t2&0xF)*10 + (t2 >> 8 & 0xF)
hour := (t2>>24&0xF)*10 + (t2 >> 32 & 0xF)
minute := (t2>>48&0xF)*10 + (t2 >> 56)
second := (t3>>8&0xF)*10 + (t3 >> 16)
nanos := int64(0)
if len(b) > 20 {
for _, c := range b[20 : len(b)-1] {
if c < '0' || c > '9' {
return time.Time{}, errInvalidTimestamp
}
nanos = (nanos * 10) + int64(c-'0')
}
nanos *= pow10[30-len(b)]
}
if err := validate(year, month, day, hour, minute, second); err != nil {
return time.Time{}, err
}
unixSeconds := int64(daysSinceEpoch(year, month, day))*86400 + int64(hour*3600+minute*60+second)
return time.Unix(unixSeconds, nanos).UTC(), nil
}
// Fallback to using time.Parse().
t, err := time.Parse(time.RFC3339Nano, input)
if err != nil {
// Override (and don't wrap) the error here. The error returned by
// time.Parse() is dynamic, and includes a reference to the input
// string. By overriding the error, we guarantee that the input string
// doesn't escape.
return time.Time{}, errInvalidTimestamp
}
return t, nil
}
var pow10 = []int64{1, 10, 100, 1000, 1e4, 1e5, 1e6, 1e7, 1e8}
const (
mask1 = 0x2d00002d00000000 // YYYY-MM-
mask2 = 0x00003a0000540000 // DDTHH:MM
mask3 = 0x000000005a00003a // :SSZ____
// Generate masks that replace the separators with a numeric byte.
// The input must have valid separators. XOR with the separator bytes
// to zero them out and then XOR with 0x30 to replace them with '0'.
replace1 = mask1 ^ 0x3000003000000000
replace2 = mask2 ^ 0x0000300000300000
replace3 = mask3 ^ 0x3030303030000030
lsb = ^uint64(0) / 255
msb = lsb * 0x80
zero = lsb * '0'
nine = lsb * '9'
)
func validate(year, month, day, hour, minute, second uint64) error {
if day == 0 || day > 31 {
return errDayOutOfRange
}
if month == 0 || month > 12 {
return errMonthOutOfRange
}
if hour >= 24 {
return errHourOutOfRange
}
if minute >= 60 {
return errMinuteOutOfRange
}
if second >= 60 {
return errSecondOutOfRange
}
if month == 2 && (day > 29 || (day == 29 && !isLeapYear(year))) {
return errDayOutOfRange
}
if day == 31 {
switch month {
case 4, 6, 9, 11:
return errDayOutOfRange
}
}
return nil
}
func match(u, mask uint64) bool {
return (u & mask) == mask
}
func nonNumeric(u uint64) uint64 {
// Derived from https://graphics.stanford.edu/~seander/bithacks.html#HasLessInWord.
// Subtract '0' (0x30) from each byte so that the MSB is set in each byte
// if there's a byte less than '0' (0x30). Add 0x46 (0x7F-'9') so that the
// MSB is set if there's a byte greater than '9' (0x39). To handle overflow
// when adding 0x46, include the MSB from the input bytes in the final mask.
// Remove all but the MSBs and then you're left with a mask where each
// non-numeric byte from the input has its MSB set in the output.
return ((u - zero) | (u + (^msb - nine)) | u) & msb
}
func daysSinceEpoch(year, month, day uint64) uint64 {
// Derived from https://blog.reverberate.org/2020/05/12/optimizing-date-algorithms.html.
monthAdjusted := month - 3
var carry uint64
if monthAdjusted > month {
carry = 1
}
var adjust uint64
if carry == 1 {
adjust = 12
}
yearAdjusted := year + 4800 - carry
monthDays := ((monthAdjusted+adjust)*62719 + 769) / 2048
leapDays := yearAdjusted/4 - yearAdjusted/100 + yearAdjusted/400
return yearAdjusted*365 + leapDays + monthDays + (day - 1) - 2472632
}
func isLeapYear(y uint64) bool {
return (y%4) == 0 && ((y%100) != 0 || (y%400) == 0)
}
func unsafeStringToBytes(s string) []byte {
return *(*[]byte)(unsafe.Pointer(&sliceHeader{
Data: *(*unsafe.Pointer)(unsafe.Pointer(&s)),
Len: len(s),
Cap: len(s),
}))
}
// sliceHeader is like reflect.SliceHeader but the Data field is a
// unsafe.Pointer instead of being a uintptr to avoid invalid
// conversions from uintptr to unsafe.Pointer.
type sliceHeader struct {
Data unsafe.Pointer
Len int
Cap int
}

179
vendor/github.com/segmentio/encoding/iso8601/valid.go generated vendored Normal file
View File

@ -0,0 +1,179 @@
package iso8601
// ValidFlags is a bitset type used to configure the behavior of the Valid
//function.
type ValidFlags int
const (
// Strict is a validation flag used to represent a string iso8601 validation
// (this is the default).
Strict ValidFlags = 0
// AllowSpaceSeparator allows the presence of a space instead of a 'T' as
// separator between the date and time.
AllowSpaceSeparator ValidFlags = 1 << iota
// AllowMissingTime allows the value to contain only a date.
AllowMissingTime
// AllowMissingSubsecond allows the value to contain only a date and time.
AllowMissingSubsecond
// AllowMissingTimezone allows the value to be missing the timezone
// information.
AllowMissingTimezone
// AllowNumericTimezone allows the value to represent timezones in their
// numeric form.
AllowNumericTimezone
// Flexible is a combination of all validation flag that allow for
// non-strict checking of the input value.
Flexible = AllowSpaceSeparator | AllowMissingTime | AllowMissingSubsecond | AllowMissingTimezone | AllowNumericTimezone
)
// Valid check value to verify whether or not it is a valid iso8601 time
// representation.
func Valid(value string, flags ValidFlags) bool {
var ok bool
// year
if value, ok = readDigits(value, 4, 4); !ok {
return false
}
if value, ok = readByte(value, '-'); !ok {
return false
}
// month
if value, ok = readDigits(value, 2, 2); !ok {
return false
}
if value, ok = readByte(value, '-'); !ok {
return false
}
// day
if value, ok = readDigits(value, 2, 2); !ok {
return false
}
if len(value) == 0 && (flags&AllowMissingTime) != 0 {
return true // date only
}
// separator
if value, ok = readByte(value, 'T'); !ok {
if (flags & AllowSpaceSeparator) == 0 {
return false
}
if value, ok = readByte(value, ' '); !ok {
return false
}
}
// hour
if value, ok = readDigits(value, 2, 2); !ok {
return false
}
if value, ok = readByte(value, ':'); !ok {
return false
}
// minute
if value, ok = readDigits(value, 2, 2); !ok {
return false
}
if value, ok = readByte(value, ':'); !ok {
return false
}
// second
if value, ok = readDigits(value, 2, 2); !ok {
return false
}
// microsecond
if value, ok = readByte(value, '.'); !ok {
if (flags & AllowMissingSubsecond) == 0 {
return false
}
} else {
if value, ok = readDigits(value, 1, 9); !ok {
return false
}
}
if len(value) == 0 && (flags&AllowMissingTimezone) != 0 {
return true // date and time
}
// timezone
if value, ok = readByte(value, 'Z'); ok {
return len(value) == 0
}
if (flags & AllowSpaceSeparator) != 0 {
value, _ = readByte(value, ' ')
}
if value, ok = readByte(value, '+'); !ok {
if value, ok = readByte(value, '-'); !ok {
return false
}
}
// timezone hour
if value, ok = readDigits(value, 2, 2); !ok {
return false
}
if value, ok = readByte(value, ':'); !ok {
if (flags & AllowNumericTimezone) == 0 {
return false
}
}
// timezone minute
if value, ok = readDigits(value, 2, 2); !ok {
return false
}
return len(value) == 0
}
func readDigits(value string, min, max int) (string, bool) {
if len(value) < min {
return value, false
}
i := 0
for i < max && i < len(value) && isDigit(value[i]) {
i++
}
if i < max && i < min {
return value, false
}
return value[i:], true
}
func readByte(value string, c byte) (string, bool) {
if len(value) == 0 {
return value, false
}
if value[0] != c {
return value, false
}
return value[1:], true
}
func isDigit(c byte) bool {
return '0' <= c && c <= '9'
}

76
vendor/github.com/segmentio/encoding/json/README.md generated vendored Normal file
View File

@ -0,0 +1,76 @@
# encoding/json [![GoDoc](https://godoc.org/github.com/segmentio/encoding/json?status.svg)](https://godoc.org/github.com/segmentio/encoding/json)
Go package offering a replacement implementation of the standard library's
[`encoding/json`](https://golang.org/pkg/encoding/json/) package, with much
better performance.
## Usage
The exported API of this package mirrors the standard library's
[`encoding/json`](https://golang.org/pkg/encoding/json/) package, the only
change needed to take advantage of the performance improvements is the import
path of the `json` package, from:
```go
import (
"encoding/json"
)
```
to
```go
import (
"github.com/segmentio/encoding/json"
)
```
One way to gain higher encoding throughput is to disable HTML escaping.
It allows the string encoding to use a much more efficient code path which
does not require parsing UTF-8 runes most of the time.
## Performance Improvements
The internal implementation uses a fair amount of unsafe operations (untyped
code, pointer arithmetic, etc...) to avoid using reflection as much as possible,
which is often the reason why serialization code has a large CPU and memory
footprint.
The package aims for zero unnecessary dynamic memory allocations and hot code
paths that are mostly free from calls into the reflect package.
## Compatibility with encoding/json
This package aims to be a drop-in replacement, therefore it is tested to behave
exactly like the standard library's package. However, there are still a few
missing features that have not been ported yet:
- Streaming decoder, currently the `Decoder` implementation offered by the
package does not support progressively reading values from a JSON array (unlike
the standard library). In our experience this is a very rare use-case, if you
need it you're better off sticking to the standard library, or spend a bit of
time implementing it in here ;)
Note that none of those features should result in performance degradations if
they were implemented in the package, and we welcome contributions!
## Trade-offs
As one would expect, we had to make a couple of trade-offs to achieve greater
performance than the standard library, but there were also features that we
did not want to give away.
Other open-source packages offering a reduced CPU and memory footprint usually
do so by designing a different API, or require code generation (therefore adding
complexity to the build process). These were not acceptable conditions for us,
as we were not willing to trade off developer productivity for better runtime
performance. To achieve this, we chose to exactly replicate the standard
library interfaces and behavior, which meant the package implementation was the
only area that we were able to work with. The internals of this package make
heavy use of unsafe pointer arithmetics and other performance optimizations,
and therefore are not as approachable as typical Go programs. Basically, we put
a bigger burden on maintainers to achieve better runtime cost without
sacrificing developer productivity.
For these reasons, we also don't believe that this code should be ported upstream
to the standard `encoding/json` package. The standard library has to remain
readable and approachable to maximize stability and maintainability, and make
projects like this one possible because a high quality reference implementation
already exists.

1232
vendor/github.com/segmentio/encoding/json/codec.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

1462
vendor/github.com/segmentio/encoding/json/decode.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

990
vendor/github.com/segmentio/encoding/json/encode.go generated vendored Normal file
View File

@ -0,0 +1,990 @@
package json
import (
"encoding"
"fmt"
"math"
"reflect"
"sort"
"strconv"
"sync"
"time"
"unicode/utf8"
"unsafe"
"github.com/segmentio/asm/base64"
)
const hex = "0123456789abcdef"
func (e encoder) encodeNull(b []byte, p unsafe.Pointer) ([]byte, error) {
return append(b, "null"...), nil
}
func (e encoder) encodeBool(b []byte, p unsafe.Pointer) ([]byte, error) {
if *(*bool)(p) {
return append(b, "true"...), nil
}
return append(b, "false"...), nil
}
func (e encoder) encodeInt(b []byte, p unsafe.Pointer) ([]byte, error) {
return appendInt(b, int64(*(*int)(p))), nil
}
func (e encoder) encodeInt8(b []byte, p unsafe.Pointer) ([]byte, error) {
return appendInt(b, int64(*(*int8)(p))), nil
}
func (e encoder) encodeInt16(b []byte, p unsafe.Pointer) ([]byte, error) {
return appendInt(b, int64(*(*int16)(p))), nil
}
func (e encoder) encodeInt32(b []byte, p unsafe.Pointer) ([]byte, error) {
return appendInt(b, int64(*(*int32)(p))), nil
}
func (e encoder) encodeInt64(b []byte, p unsafe.Pointer) ([]byte, error) {
return appendInt(b, *(*int64)(p)), nil
}
func (e encoder) encodeUint(b []byte, p unsafe.Pointer) ([]byte, error) {
return appendUint(b, uint64(*(*uint)(p))), nil
}
func (e encoder) encodeUintptr(b []byte, p unsafe.Pointer) ([]byte, error) {
return appendUint(b, uint64(*(*uintptr)(p))), nil
}
func (e encoder) encodeUint8(b []byte, p unsafe.Pointer) ([]byte, error) {
return appendUint(b, uint64(*(*uint8)(p))), nil
}
func (e encoder) encodeUint16(b []byte, p unsafe.Pointer) ([]byte, error) {
return appendUint(b, uint64(*(*uint16)(p))), nil
}
func (e encoder) encodeUint32(b []byte, p unsafe.Pointer) ([]byte, error) {
return appendUint(b, uint64(*(*uint32)(p))), nil
}
func (e encoder) encodeUint64(b []byte, p unsafe.Pointer) ([]byte, error) {
return appendUint(b, *(*uint64)(p)), nil
}
func (e encoder) encodeFloat32(b []byte, p unsafe.Pointer) ([]byte, error) {
return e.encodeFloat(b, float64(*(*float32)(p)), 32)
}
func (e encoder) encodeFloat64(b []byte, p unsafe.Pointer) ([]byte, error) {
return e.encodeFloat(b, *(*float64)(p), 64)
}
func (e encoder) encodeFloat(b []byte, f float64, bits int) ([]byte, error) {
switch {
case math.IsNaN(f):
return b, &UnsupportedValueError{Value: reflect.ValueOf(f), Str: "NaN"}
case math.IsInf(f, 0):
return b, &UnsupportedValueError{Value: reflect.ValueOf(f), Str: "inf"}
}
// Convert as if by ES6 number to string conversion.
// This matches most other JSON generators.
// See golang.org/issue/6384 and golang.org/issue/14135.
// Like fmt %g, but the exponent cutoffs are different
// and exponents themselves are not padded to two digits.
abs := math.Abs(f)
fmt := byte('f')
// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
if abs != 0 {
if bits == 64 && (abs < 1e-6 || abs >= 1e21) || bits == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) {
fmt = 'e'
}
}
b = strconv.AppendFloat(b, f, fmt, -1, int(bits))
if fmt == 'e' {
// clean up e-09 to e-9
n := len(b)
if n >= 4 && b[n-4] == 'e' && b[n-3] == '-' && b[n-2] == '0' {
b[n-2] = b[n-1]
b = b[:n-1]
}
}
return b, nil
}
func (e encoder) encodeNumber(b []byte, p unsafe.Pointer) ([]byte, error) {
n := *(*Number)(p)
if n == "" {
n = "0"
}
d := decoder{}
_, _, _, err := d.parseNumber(stringToBytes(string(n)))
if err != nil {
return b, err
}
return append(b, n...), nil
}
func (e encoder) encodeString(b []byte, p unsafe.Pointer) ([]byte, error) {
s := *(*string)(p)
if len(s) == 0 {
return append(b, `""`...), nil
}
i := 0
j := 0
escapeHTML := (e.flags & EscapeHTML) != 0
b = append(b, '"')
if len(s) >= 8 {
if j = escapeIndex(s, escapeHTML); j < 0 {
return append(append(b, s...), '"'), nil
}
}
for j < len(s) {
c := s[j]
if c >= 0x20 && c <= 0x7f && c != '\\' && c != '"' && (!escapeHTML || (c != '<' && c != '>' && c != '&')) {
// fast path: most of the time, printable ascii characters are used
j++
continue
}
switch c {
case '\\', '"':
b = append(b, s[i:j]...)
b = append(b, '\\', c)
i = j + 1
j = j + 1
continue
case '\n':
b = append(b, s[i:j]...)
b = append(b, '\\', 'n')
i = j + 1
j = j + 1
continue
case '\r':
b = append(b, s[i:j]...)
b = append(b, '\\', 'r')
i = j + 1
j = j + 1
continue
case '\t':
b = append(b, s[i:j]...)
b = append(b, '\\', 't')
i = j + 1
j = j + 1
continue
case '<', '>', '&':
b = append(b, s[i:j]...)
b = append(b, `\u00`...)
b = append(b, hex[c>>4], hex[c&0xF])
i = j + 1
j = j + 1
continue
}
// This encodes bytes < 0x20 except for \t, \n and \r.
if c < 0x20 {
b = append(b, s[i:j]...)
b = append(b, `\u00`...)
b = append(b, hex[c>>4], hex[c&0xF])
i = j + 1
j = j + 1
continue
}
r, size := utf8.DecodeRuneInString(s[j:])
if r == utf8.RuneError && size == 1 {
b = append(b, s[i:j]...)
b = append(b, `\ufffd`...)
i = j + size
j = j + size
continue
}
switch r {
case '\u2028', '\u2029':
// U+2028 is LINE SEPARATOR.
// U+2029 is PARAGRAPH SEPARATOR.
// They are both technically valid characters in JSON strings,
// but don't work in JSONP, which has to be evaluated as JavaScript,
// and can lead to security holes there. It is valid JSON to
// escape them, so we do so unconditionally.
// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
b = append(b, s[i:j]...)
b = append(b, `\u202`...)
b = append(b, hex[r&0xF])
i = j + size
j = j + size
continue
}
j += size
}
b = append(b, s[i:]...)
b = append(b, '"')
return b, nil
}
func (e encoder) encodeToString(b []byte, p unsafe.Pointer, encode encodeFunc) ([]byte, error) {
i := len(b)
b, err := encode(e, b, p)
if err != nil {
return b, err
}
j := len(b)
s := b[i:]
if b, err = e.encodeString(b, unsafe.Pointer(&s)); err != nil {
return b, err
}
n := copy(b[i:], b[j:])
return b[:i+n], nil
}
func (e encoder) encodeBytes(b []byte, p unsafe.Pointer) ([]byte, error) {
v := *(*[]byte)(p)
if v == nil {
return append(b, "null"...), nil
}
n := base64.StdEncoding.EncodedLen(len(v)) + 2
if avail := cap(b) - len(b); avail < n {
newB := make([]byte, cap(b)+(n-avail))
copy(newB, b)
b = newB[:len(b)]
}
i := len(b)
j := len(b) + n
b = b[:j]
b[i] = '"'
base64.StdEncoding.Encode(b[i+1:j-1], v)
b[j-1] = '"'
return b, nil
}
func (e encoder) encodeDuration(b []byte, p unsafe.Pointer) ([]byte, error) {
b = append(b, '"')
b = appendDuration(b, *(*time.Duration)(p))
b = append(b, '"')
return b, nil
}
func (e encoder) encodeTime(b []byte, p unsafe.Pointer) ([]byte, error) {
t := *(*time.Time)(p)
b = append(b, '"')
b = t.AppendFormat(b, time.RFC3339Nano)
b = append(b, '"')
return b, nil
}
func (e encoder) encodeArray(b []byte, p unsafe.Pointer, n int, size uintptr, t reflect.Type, encode encodeFunc) ([]byte, error) {
var start = len(b)
var err error
b = append(b, '[')
for i := 0; i < n; i++ {
if i != 0 {
b = append(b, ',')
}
if b, err = encode(e, b, unsafe.Pointer(uintptr(p)+(uintptr(i)*size))); err != nil {
return b[:start], err
}
}
b = append(b, ']')
return b, nil
}
func (e encoder) encodeSlice(b []byte, p unsafe.Pointer, size uintptr, t reflect.Type, encode encodeFunc) ([]byte, error) {
s := (*slice)(p)
if s.data == nil && s.len == 0 && s.cap == 0 {
return append(b, "null"...), nil
}
return e.encodeArray(b, s.data, s.len, size, t, encode)
}
func (e encoder) encodeMap(b []byte, p unsafe.Pointer, t reflect.Type, encodeKey, encodeValue encodeFunc, sortKeys sortFunc) ([]byte, error) {
m := reflect.NewAt(t, p).Elem()
if m.IsNil() {
return append(b, "null"...), nil
}
keys := m.MapKeys()
if sortKeys != nil && (e.flags&SortMapKeys) != 0 {
sortKeys(keys)
}
var start = len(b)
var err error
b = append(b, '{')
for i, k := range keys {
v := m.MapIndex(k)
if i != 0 {
b = append(b, ',')
}
if b, err = encodeKey(e, b, (*iface)(unsafe.Pointer(&k)).ptr); err != nil {
return b[:start], err
}
b = append(b, ':')
if b, err = encodeValue(e, b, (*iface)(unsafe.Pointer(&v)).ptr); err != nil {
return b[:start], err
}
}
b = append(b, '}')
return b, nil
}
type element struct {
key string
val interface{}
raw RawMessage
}
type mapslice struct {
elements []element
}
func (m *mapslice) Len() int { return len(m.elements) }
func (m *mapslice) Less(i, j int) bool { return m.elements[i].key < m.elements[j].key }
func (m *mapslice) Swap(i, j int) { m.elements[i], m.elements[j] = m.elements[j], m.elements[i] }
var mapslicePool = sync.Pool{
New: func() interface{} { return new(mapslice) },
}
func (e encoder) encodeMapStringInterface(b []byte, p unsafe.Pointer) ([]byte, error) {
m := *(*map[string]interface{})(p)
if m == nil {
return append(b, "null"...), nil
}
if (e.flags & SortMapKeys) == 0 {
// Optimized code path when the program does not need the map keys to be
// sorted.
b = append(b, '{')
if len(m) != 0 {
var err error
var i = 0
for k, v := range m {
if i != 0 {
b = append(b, ',')
}
b, _ = e.encodeString(b, unsafe.Pointer(&k))
b = append(b, ':')
b, err = Append(b, v, e.flags)
if err != nil {
return b, err
}
i++
}
}
b = append(b, '}')
return b, nil
}
s := mapslicePool.Get().(*mapslice)
if cap(s.elements) < len(m) {
s.elements = make([]element, 0, align(10, uintptr(len(m))))
}
for key, val := range m {
s.elements = append(s.elements, element{key: key, val: val})
}
sort.Sort(s)
var start = len(b)
var err error
b = append(b, '{')
for i, elem := range s.elements {
if i != 0 {
b = append(b, ',')
}
b, _ = e.encodeString(b, unsafe.Pointer(&elem.key))
b = append(b, ':')
b, err = Append(b, elem.val, e.flags)
if err != nil {
break
}
}
for i := range s.elements {
s.elements[i] = element{}
}
s.elements = s.elements[:0]
mapslicePool.Put(s)
if err != nil {
return b[:start], err
}
b = append(b, '}')
return b, nil
}
func (e encoder) encodeMapStringRawMessage(b []byte, p unsafe.Pointer) ([]byte, error) {
m := *(*map[string]RawMessage)(p)
if m == nil {
return append(b, "null"...), nil
}
if (e.flags & SortMapKeys) == 0 {
// Optimized code path when the program does not need the map keys to be
// sorted.
b = append(b, '{')
if len(m) != 0 {
var err error
var i = 0
for k, v := range m {
if i != 0 {
b = append(b, ',')
}
// encodeString doesn't return errors so we ignore it here
b, _ = e.encodeString(b, unsafe.Pointer(&k))
b = append(b, ':')
b, err = e.encodeRawMessage(b, unsafe.Pointer(&v))
if err != nil {
break
}
i++
}
}
b = append(b, '}')
return b, nil
}
s := mapslicePool.Get().(*mapslice)
if cap(s.elements) < len(m) {
s.elements = make([]element, 0, align(10, uintptr(len(m))))
}
for key, raw := range m {
s.elements = append(s.elements, element{key: key, raw: raw})
}
sort.Sort(s)
var start = len(b)
var err error
b = append(b, '{')
for i, elem := range s.elements {
if i != 0 {
b = append(b, ',')
}
b, _ = e.encodeString(b, unsafe.Pointer(&elem.key))
b = append(b, ':')
b, err = e.encodeRawMessage(b, unsafe.Pointer(&elem.raw))
if err != nil {
break
}
}
for i := range s.elements {
s.elements[i] = element{}
}
s.elements = s.elements[:0]
mapslicePool.Put(s)
if err != nil {
return b[:start], err
}
b = append(b, '}')
return b, nil
}
func (e encoder) encodeMapStringString(b []byte, p unsafe.Pointer) ([]byte, error) {
m := *(*map[string]string)(p)
if m == nil {
return append(b, "null"...), nil
}
if (e.flags & SortMapKeys) == 0 {
// Optimized code path when the program does not need the map keys to be
// sorted.
b = append(b, '{')
if len(m) != 0 {
var i = 0
for k, v := range m {
if i != 0 {
b = append(b, ',')
}
// encodeString never returns an error so we ignore it here
b, _ = e.encodeString(b, unsafe.Pointer(&k))
b = append(b, ':')
b, _ = e.encodeString(b, unsafe.Pointer(&v))
i++
}
}
b = append(b, '}')
return b, nil
}
s := mapslicePool.Get().(*mapslice)
if cap(s.elements) < len(m) {
s.elements = make([]element, 0, align(10, uintptr(len(m))))
}
for key, val := range m {
v := val
s.elements = append(s.elements, element{key: key, val: &v})
}
sort.Sort(s)
b = append(b, '{')
for i, elem := range s.elements {
if i != 0 {
b = append(b, ',')
}
// encodeString never returns an error so we ignore it here
b, _ = e.encodeString(b, unsafe.Pointer(&elem.key))
b = append(b, ':')
b, _ = e.encodeString(b, unsafe.Pointer(elem.val.(*string)))
}
for i := range s.elements {
s.elements[i] = element{}
}
s.elements = s.elements[:0]
mapslicePool.Put(s)
b = append(b, '}')
return b, nil
}
func (e encoder) encodeMapStringStringSlice(b []byte, p unsafe.Pointer) ([]byte, error) {
m := *(*map[string][]string)(p)
if m == nil {
return append(b, "null"...), nil
}
var stringSize = unsafe.Sizeof("")
if (e.flags & SortMapKeys) == 0 {
// Optimized code path when the program does not need the map keys to be
// sorted.
b = append(b, '{')
if len(m) != 0 {
var err error
var i = 0
for k, v := range m {
if i != 0 {
b = append(b, ',')
}
b, _ = e.encodeString(b, unsafe.Pointer(&k))
b = append(b, ':')
b, err = e.encodeSlice(b, unsafe.Pointer(&v), stringSize, sliceStringType, encoder.encodeString)
if err != nil {
return b, err
}
i++
}
}
b = append(b, '}')
return b, nil
}
s := mapslicePool.Get().(*mapslice)
if cap(s.elements) < len(m) {
s.elements = make([]element, 0, align(10, uintptr(len(m))))
}
for key, val := range m {
v := val
s.elements = append(s.elements, element{key: key, val: &v})
}
sort.Sort(s)
var start = len(b)
var err error
b = append(b, '{')
for i, elem := range s.elements {
if i != 0 {
b = append(b, ',')
}
b, _ = e.encodeString(b, unsafe.Pointer(&elem.key))
b = append(b, ':')
b, err = e.encodeSlice(b, unsafe.Pointer(elem.val.(*[]string)), stringSize, sliceStringType, encoder.encodeString)
if err != nil {
break
}
}
for i := range s.elements {
s.elements[i] = element{}
}
s.elements = s.elements[:0]
mapslicePool.Put(s)
if err != nil {
return b[:start], err
}
b = append(b, '}')
return b, nil
}
func (e encoder) encodeMapStringBool(b []byte, p unsafe.Pointer) ([]byte, error) {
m := *(*map[string]bool)(p)
if m == nil {
return append(b, "null"...), nil
}
if (e.flags & SortMapKeys) == 0 {
// Optimized code path when the program does not need the map keys to be
// sorted.
b = append(b, '{')
if len(m) != 0 {
var i = 0
for k, v := range m {
if i != 0 {
b = append(b, ',')
}
// encodeString never returns an error so we ignore it here
b, _ = e.encodeString(b, unsafe.Pointer(&k))
if v {
b = append(b, ":true"...)
} else {
b = append(b, ":false"...)
}
i++
}
}
b = append(b, '}')
return b, nil
}
s := mapslicePool.Get().(*mapslice)
if cap(s.elements) < len(m) {
s.elements = make([]element, 0, align(10, uintptr(len(m))))
}
for key, val := range m {
s.elements = append(s.elements, element{key: key, val: val})
}
sort.Sort(s)
b = append(b, '{')
for i, elem := range s.elements {
if i != 0 {
b = append(b, ',')
}
// encodeString never returns an error so we ignore it here
b, _ = e.encodeString(b, unsafe.Pointer(&elem.key))
if elem.val.(bool) {
b = append(b, ":true"...)
} else {
b = append(b, ":false"...)
}
}
for i := range s.elements {
s.elements[i] = element{}
}
s.elements = s.elements[:0]
mapslicePool.Put(s)
b = append(b, '}')
return b, nil
}
func (e encoder) encodeStruct(b []byte, p unsafe.Pointer, st *structType) ([]byte, error) {
var start = len(b)
var err error
var k string
var n int
b = append(b, '{')
escapeHTML := (e.flags & EscapeHTML) != 0
for i := range st.fields {
f := &st.fields[i]
v := unsafe.Pointer(uintptr(p) + f.offset)
if f.omitempty && f.empty(v) {
continue
}
if escapeHTML {
k = f.html
} else {
k = f.json
}
lengthBeforeKey := len(b)
if n != 0 {
b = append(b, k...)
} else {
b = append(b, k[1:]...)
}
if b, err = f.codec.encode(e, b, v); err != nil {
if err == (rollback{}) {
b = b[:lengthBeforeKey]
continue
}
return b[:start], err
}
n++
}
b = append(b, '}')
return b, nil
}
type rollback struct{}
func (rollback) Error() string { return "rollback" }
func (e encoder) encodeEmbeddedStructPointer(b []byte, p unsafe.Pointer, t reflect.Type, unexported bool, offset uintptr, encode encodeFunc) ([]byte, error) {
p = *(*unsafe.Pointer)(p)
if p == nil {
return b, rollback{}
}
return encode(e, b, unsafe.Pointer(uintptr(p)+offset))
}
func (e encoder) encodePointer(b []byte, p unsafe.Pointer, t reflect.Type, encode encodeFunc) ([]byte, error) {
if p = *(*unsafe.Pointer)(p); p != nil {
if e.ptrDepth++; e.ptrDepth >= startDetectingCyclesAfter {
if _, seen := e.ptrSeen[p]; seen {
// TODO: reconstruct the reflect.Value from p + t so we can set
// the erorr's Value field?
return b, &UnsupportedValueError{Str: fmt.Sprintf("encountered a cycle via %s", t)}
}
if e.ptrSeen == nil {
e.ptrSeen = make(map[unsafe.Pointer]struct{})
}
e.ptrSeen[p] = struct{}{}
defer delete(e.ptrSeen, p)
}
return encode(e, b, p)
}
return e.encodeNull(b, nil)
}
func (e encoder) encodeInterface(b []byte, p unsafe.Pointer) ([]byte, error) {
return Append(b, *(*interface{})(p), e.flags)
}
func (e encoder) encodeMaybeEmptyInterface(b []byte, p unsafe.Pointer, t reflect.Type) ([]byte, error) {
return Append(b, reflect.NewAt(t, p).Elem().Interface(), e.flags)
}
func (e encoder) encodeUnsupportedTypeError(b []byte, p unsafe.Pointer, t reflect.Type) ([]byte, error) {
return b, &UnsupportedTypeError{Type: t}
}
func (e encoder) encodeRawMessage(b []byte, p unsafe.Pointer) ([]byte, error) {
v := *(*RawMessage)(p)
if v == nil {
return append(b, "null"...), nil
}
var s []byte
if (e.flags & TrustRawMessage) != 0 {
s = v
} else {
var err error
d := decoder{}
s, _, _, err = d.parseValue(v)
if err != nil {
return b, &UnsupportedValueError{Value: reflect.ValueOf(v), Str: err.Error()}
}
}
if (e.flags & EscapeHTML) != 0 {
return appendCompactEscapeHTML(b, s), nil
}
return append(b, s...), nil
}
func (e encoder) encodeJSONMarshaler(b []byte, p unsafe.Pointer, t reflect.Type, pointer bool) ([]byte, error) {
v := reflect.NewAt(t, p)
if !pointer {
v = v.Elem()
}
switch v.Kind() {
case reflect.Ptr, reflect.Interface:
if v.IsNil() {
return append(b, "null"...), nil
}
}
j, err := v.Interface().(Marshaler).MarshalJSON()
if err != nil {
return b, err
}
d := decoder{}
s, _, _, err := d.parseValue(j)
if err != nil {
return b, &MarshalerError{Type: t, Err: err}
}
if (e.flags & EscapeHTML) != 0 {
return appendCompactEscapeHTML(b, s), nil
}
return append(b, s...), nil
}
func (e encoder) encodeTextMarshaler(b []byte, p unsafe.Pointer, t reflect.Type, pointer bool) ([]byte, error) {
v := reflect.NewAt(t, p)
if !pointer {
v = v.Elem()
}
switch v.Kind() {
case reflect.Ptr, reflect.Interface:
if v.IsNil() {
return append(b, `null`...), nil
}
}
s, err := v.Interface().(encoding.TextMarshaler).MarshalText()
if err != nil {
return b, err
}
return e.encodeString(b, unsafe.Pointer(&s))
}
func appendCompactEscapeHTML(dst []byte, src []byte) []byte {
start := 0
escape := false
inString := false
for i, c := range src {
if !inString {
switch c {
case '"': // enter string
inString = true
case ' ', '\n', '\r', '\t': // skip space
if start < i {
dst = append(dst, src[start:i]...)
}
start = i + 1
}
continue
}
if escape {
escape = false
continue
}
if c == '\\' {
escape = true
continue
}
if c == '"' {
inString = false
continue
}
if c == '<' || c == '>' || c == '&' {
if start < i {
dst = append(dst, src[start:i]...)
}
dst = append(dst, `\u00`...)
dst = append(dst, hex[c>>4], hex[c&0xF])
start = i + 1
continue
}
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
if start < i {
dst = append(dst, src[start:i]...)
}
dst = append(dst, `\u202`...)
dst = append(dst, hex[src[i+2]&0xF])
start = i + 3
continue
}
}
if start < len(src) {
dst = append(dst, src[start:]...)
}
return dst
}

98
vendor/github.com/segmentio/encoding/json/int.go generated vendored Normal file
View File

@ -0,0 +1,98 @@
package json
import (
"unsafe"
)
var endianness int
func init() {
var b [2]byte
*(*uint16)(unsafe.Pointer(&b)) = uint16(0xABCD)
switch b[0] {
case 0xCD:
endianness = 0 // LE
case 0xAB:
endianness = 1 // BE
default:
panic("could not determine endianness")
}
}
// "00010203...96979899" cast to []uint16
var intLELookup = [100]uint16{
0x3030, 0x3130, 0x3230, 0x3330, 0x3430, 0x3530, 0x3630, 0x3730, 0x3830, 0x3930,
0x3031, 0x3131, 0x3231, 0x3331, 0x3431, 0x3531, 0x3631, 0x3731, 0x3831, 0x3931,
0x3032, 0x3132, 0x3232, 0x3332, 0x3432, 0x3532, 0x3632, 0x3732, 0x3832, 0x3932,
0x3033, 0x3133, 0x3233, 0x3333, 0x3433, 0x3533, 0x3633, 0x3733, 0x3833, 0x3933,
0x3034, 0x3134, 0x3234, 0x3334, 0x3434, 0x3534, 0x3634, 0x3734, 0x3834, 0x3934,
0x3035, 0x3135, 0x3235, 0x3335, 0x3435, 0x3535, 0x3635, 0x3735, 0x3835, 0x3935,
0x3036, 0x3136, 0x3236, 0x3336, 0x3436, 0x3536, 0x3636, 0x3736, 0x3836, 0x3936,
0x3037, 0x3137, 0x3237, 0x3337, 0x3437, 0x3537, 0x3637, 0x3737, 0x3837, 0x3937,
0x3038, 0x3138, 0x3238, 0x3338, 0x3438, 0x3538, 0x3638, 0x3738, 0x3838, 0x3938,
0x3039, 0x3139, 0x3239, 0x3339, 0x3439, 0x3539, 0x3639, 0x3739, 0x3839, 0x3939,
}
var intBELookup = [100]uint16{
0x3030, 0x3031, 0x3032, 0x3033, 0x3034, 0x3035, 0x3036, 0x3037, 0x3038, 0x3039,
0x3130, 0x3131, 0x3132, 0x3133, 0x3134, 0x3135, 0x3136, 0x3137, 0x3138, 0x3139,
0x3230, 0x3231, 0x3232, 0x3233, 0x3234, 0x3235, 0x3236, 0x3237, 0x3238, 0x3239,
0x3330, 0x3331, 0x3332, 0x3333, 0x3334, 0x3335, 0x3336, 0x3337, 0x3338, 0x3339,
0x3430, 0x3431, 0x3432, 0x3433, 0x3434, 0x3435, 0x3436, 0x3437, 0x3438, 0x3439,
0x3530, 0x3531, 0x3532, 0x3533, 0x3534, 0x3535, 0x3536, 0x3537, 0x3538, 0x3539,
0x3630, 0x3631, 0x3632, 0x3633, 0x3634, 0x3635, 0x3636, 0x3637, 0x3638, 0x3639,
0x3730, 0x3731, 0x3732, 0x3733, 0x3734, 0x3735, 0x3736, 0x3737, 0x3738, 0x3739,
0x3830, 0x3831, 0x3832, 0x3833, 0x3834, 0x3835, 0x3836, 0x3837, 0x3838, 0x3839,
0x3930, 0x3931, 0x3932, 0x3933, 0x3934, 0x3935, 0x3936, 0x3937, 0x3938, 0x3939,
}
var intLookup = [2]*[100]uint16{&intLELookup, &intBELookup}
func appendInt(b []byte, n int64) []byte {
return formatInteger(b, uint64(n), n < 0)
}
func appendUint(b []byte, n uint64) []byte {
return formatInteger(b, n, false)
}
func formatInteger(out []byte, n uint64, negative bool) []byte {
if !negative {
if n < 10 {
return append(out, byte(n+'0'))
} else if n < 100 {
u := intLELookup[n]
return append(out, byte(u), byte(u>>8))
}
} else {
n = -n
}
lookup := intLookup[endianness]
var b [22]byte
u := (*[11]uint16)(unsafe.Pointer(&b))
i := 11
for n >= 100 {
j := n % 100
n /= 100
i--
u[i] = lookup[j]
}
i--
u[i] = lookup[n]
i *= 2 // convert to byte index
if n < 10 {
i++ // remove leading zero
}
if negative {
i--
b[i] = '-'
}
return append(out, b[i:]...)
}

582
vendor/github.com/segmentio/encoding/json/json.go generated vendored Normal file
View File

@ -0,0 +1,582 @@
package json
import (
"bytes"
"encoding/json"
"io"
"math/bits"
"reflect"
"runtime"
"sync"
"unsafe"
)
// Delim is documented at https://golang.org/pkg/encoding/json/#Delim
type Delim = json.Delim
// InvalidUTF8Error is documented at https://golang.org/pkg/encoding/json/#InvalidUTF8Error
type InvalidUTF8Error = json.InvalidUTF8Error
// InvalidUnmarshalError is documented at https://golang.org/pkg/encoding/json/#InvalidUnmarshalError
type InvalidUnmarshalError = json.InvalidUnmarshalError
// Marshaler is documented at https://golang.org/pkg/encoding/json/#Marshaler
type Marshaler = json.Marshaler
// MarshalerError is documented at https://golang.org/pkg/encoding/json/#MarshalerError
type MarshalerError = json.MarshalerError
// Number is documented at https://golang.org/pkg/encoding/json/#Number
type Number = json.Number
// RawMessage is documented at https://golang.org/pkg/encoding/json/#RawMessage
type RawMessage = json.RawMessage
// A SyntaxError is a description of a JSON syntax error.
type SyntaxError = json.SyntaxError
// Token is documented at https://golang.org/pkg/encoding/json/#Token
type Token = json.Token
// UnmarshalFieldError is documented at https://golang.org/pkg/encoding/json/#UnmarshalFieldError
type UnmarshalFieldError = json.UnmarshalFieldError
// UnmarshalTypeError is documented at https://golang.org/pkg/encoding/json/#UnmarshalTypeError
type UnmarshalTypeError = json.UnmarshalTypeError
// Unmarshaler is documented at https://golang.org/pkg/encoding/json/#Unmarshaler
type Unmarshaler = json.Unmarshaler
// UnsupportedTypeError is documented at https://golang.org/pkg/encoding/json/#UnsupportedTypeError
type UnsupportedTypeError = json.UnsupportedTypeError
// UnsupportedValueError is documented at https://golang.org/pkg/encoding/json/#UnsupportedValueError
type UnsupportedValueError = json.UnsupportedValueError
// AppendFlags is a type used to represent configuration options that can be
// applied when formatting json output.
type AppendFlags uint32
const (
// EscapeHTML is a formatting flag used to to escape HTML in json strings.
EscapeHTML AppendFlags = 1 << iota
// SortMapKeys is formatting flag used to enable sorting of map keys when
// encoding JSON (this matches the behavior of the standard encoding/json
// package).
SortMapKeys
// TrustRawMessage is a performance optimization flag to skip value
// checking of raw messages. It should only be used if the values are
// known to be valid json (e.g., they were created by json.Unmarshal).
TrustRawMessage
// appendNewline is a formatting flag to enable the addition of a newline
// in Encode (this matches the behavior of the standard encoding/json
// package).
appendNewline
)
// ParseFlags is a type used to represent configuration options that can be
// applied when parsing json input.
type ParseFlags uint32
func (flags ParseFlags) has(f ParseFlags) bool {
return (flags & f) != 0
}
func (f ParseFlags) kind() Kind {
return Kind((f >> kindOffset) & 0xFF)
}
func (f ParseFlags) withKind(kind Kind) ParseFlags {
return (f & ^(ParseFlags(0xFF) << kindOffset)) | (ParseFlags(kind) << kindOffset)
}
const (
// DisallowUnknownFields is a parsing flag used to prevent decoding of
// objects to Go struct values when a field of the input does not match
// with any of the struct fields.
DisallowUnknownFields ParseFlags = 1 << iota
// UseNumber is a parsing flag used to load numeric values as Number
// instead of float64.
UseNumber
// DontCopyString is a parsing flag used to provide zero-copy support when
// loading string values from a json payload. It is not always possible to
// avoid dynamic memory allocations, for example when a string is escaped in
// the json data a new buffer has to be allocated, but when the `wire` value
// can be used as content of a Go value the decoder will simply point into
// the input buffer.
DontCopyString
// DontCopyNumber is a parsing flag used to provide zero-copy support when
// loading Number values (see DontCopyString and DontCopyRawMessage).
DontCopyNumber
// DontCopyRawMessage is a parsing flag used to provide zero-copy support
// when loading RawMessage values from a json payload. When used, the
// RawMessage values will not be allocated into new memory buffers and
// will instead point directly to the area of the input buffer where the
// value was found.
DontCopyRawMessage
// DontMatchCaseInsensitiveStructFields is a parsing flag used to prevent
// matching fields in a case-insensitive way. This can prevent degrading
// performance on case conversions, and can also act as a stricter decoding
// mode.
DontMatchCaseInsensitiveStructFields
// ZeroCopy is a parsing flag that combines all the copy optimizations
// available in the package.
//
// The zero-copy optimizations are better used in request-handler style
// code where none of the values are retained after the handler returns.
ZeroCopy = DontCopyString | DontCopyNumber | DontCopyRawMessage
// validAsciiPrint is an internal flag indicating that the input contains
// only valid ASCII print chars (0x20 <= c <= 0x7E). If the flag is unset,
// it's unknown whether the input is valid ASCII print.
validAsciiPrint ParseFlags = 1 << 28
// noBackslach is an internal flag indicating that the input does not
// contain a backslash. If the flag is unset, it's unknown whether the
// input contains a backslash.
noBackslash ParseFlags = 1 << 29
// Bit offset where the kind of the json value is stored.
//
// See Kind in token.go for the enum.
kindOffset ParseFlags = 16
)
// Kind represents the different kinds of value that exist in JSON.
type Kind uint
const (
Undefined Kind = 0
Null Kind = 1 // Null is not zero, so we keep zero for "undefined".
Bool Kind = 2 // Bit two is set to 1, means it's a boolean.
False Kind = 2 // Bool + 0
True Kind = 3 // Bool + 1
Num Kind = 4 // Bit three is set to 1, means it's a number.
Uint Kind = 5 // Num + 1
Int Kind = 6 // Num + 2
Float Kind = 7 // Num + 3
String Kind = 8 // Bit four is set to 1, means it's a string.
Unescaped Kind = 9 // String + 1
Array Kind = 16 // Equivalent to Delim == '['
Object Kind = 32 // Equivalent to Delim == '{'
)
// Class returns the class of k.
func (k Kind) Class() Kind { return Kind(1 << uint(bits.Len(uint(k))-1)) }
// Append acts like Marshal but appends the json representation to b instead of
// always reallocating a new slice.
func Append(b []byte, x interface{}, flags AppendFlags) ([]byte, error) {
if x == nil {
// Special case for nil values because it makes the rest of the code
// simpler to assume that it won't be seeing nil pointers.
return append(b, "null"...), nil
}
t := reflect.TypeOf(x)
p := (*iface)(unsafe.Pointer(&x)).ptr
cache := cacheLoad()
c, found := cache[typeid(t)]
if !found {
c = constructCachedCodec(t, cache)
}
b, err := c.encode(encoder{flags: flags}, b, p)
runtime.KeepAlive(x)
return b, err
}
// Escape is a convenience helper to construct an escaped JSON string from s.
// The function escales HTML characters, for more control over the escape
// behavior and to write to a pre-allocated buffer, use AppendEscape.
func Escape(s string) []byte {
// +10 for extra escape characters, maybe not enough and the buffer will
// be reallocated.
b := make([]byte, 0, len(s)+10)
return AppendEscape(b, s, EscapeHTML)
}
// AppendEscape appends s to b with the string escaped as a JSON value.
// This will include the starting and ending quote characters, and the
// appropriate characters will be escaped correctly for JSON encoding.
func AppendEscape(b []byte, s string, flags AppendFlags) []byte {
e := encoder{flags: flags}
b, _ = e.encodeString(b, unsafe.Pointer(&s))
return b
}
// Unescape is a convenience helper to unescape a JSON value.
// For more control over the unescape behavior and
// to write to a pre-allocated buffer, use AppendUnescape.
func Unescape(s []byte) []byte {
b := make([]byte, 0, len(s))
return AppendUnescape(b, s, ParseFlags(0))
}
// AppendUnescape appends s to b with the string unescaped as a JSON value.
// This will remove starting and ending quote characters, and the
// appropriate characters will be escaped correctly as if JSON decoded.
// New space will be reallocated if more space is needed.
func AppendUnescape(b []byte, s []byte, flags ParseFlags) []byte {
d := decoder{flags: flags}
buf := new(string)
d.decodeString(s, unsafe.Pointer(buf))
return append(b, *buf...)
}
// Compact is documented at https://golang.org/pkg/encoding/json/#Compact
func Compact(dst *bytes.Buffer, src []byte) error {
return json.Compact(dst, src)
}
// HTMLEscape is documented at https://golang.org/pkg/encoding/json/#HTMLEscape
func HTMLEscape(dst *bytes.Buffer, src []byte) {
json.HTMLEscape(dst, src)
}
// Indent is documented at https://golang.org/pkg/encoding/json/#Indent
func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
return json.Indent(dst, src, prefix, indent)
}
// Marshal is documented at https://golang.org/pkg/encoding/json/#Marshal
func Marshal(x interface{}) ([]byte, error) {
var err error
var buf = encoderBufferPool.Get().(*encoderBuffer)
if buf.data, err = Append(buf.data[:0], x, EscapeHTML|SortMapKeys); err != nil {
return nil, err
}
b := make([]byte, len(buf.data))
copy(b, buf.data)
encoderBufferPool.Put(buf)
return b, nil
}
// MarshalIndent is documented at https://golang.org/pkg/encoding/json/#MarshalIndent
func MarshalIndent(x interface{}, prefix, indent string) ([]byte, error) {
b, err := Marshal(x)
if err == nil {
tmp := &bytes.Buffer{}
tmp.Grow(2 * len(b))
Indent(tmp, b, prefix, indent)
b = tmp.Bytes()
}
return b, err
}
// Unmarshal is documented at https://golang.org/pkg/encoding/json/#Unmarshal
func Unmarshal(b []byte, x interface{}) error {
r, err := Parse(b, x, 0)
if len(r) != 0 {
if _, ok := err.(*SyntaxError); !ok {
// The encoding/json package prioritizes reporting errors caused by
// unexpected trailing bytes over other issues; here we emulate this
// behavior by overriding the error.
err = syntaxError(r, "invalid character '%c' after top-level value", r[0])
}
}
return err
}
// Parse behaves like Unmarshal but the caller can pass a set of flags to
// configure the parsing behavior.
func Parse(b []byte, x interface{}, flags ParseFlags) ([]byte, error) {
t := reflect.TypeOf(x)
p := (*iface)(unsafe.Pointer(&x)).ptr
d := decoder{flags: flags | internalParseFlags(b)}
b = skipSpaces(b)
if t == nil || p == nil || t.Kind() != reflect.Ptr {
_, r, _, err := d.parseValue(b)
r = skipSpaces(r)
if err != nil {
return r, err
}
return r, &InvalidUnmarshalError{Type: t}
}
t = t.Elem()
cache := cacheLoad()
c, found := cache[typeid(t)]
if !found {
c = constructCachedCodec(t, cache)
}
r, err := c.decode(d, b, p)
return skipSpaces(r), err
}
// Valid is documented at https://golang.org/pkg/encoding/json/#Valid
func Valid(data []byte) bool {
data = skipSpaces(data)
d := decoder{flags: internalParseFlags(data)}
_, data, _, err := d.parseValue(data)
if err != nil {
return false
}
return len(skipSpaces(data)) == 0
}
// Decoder is documented at https://golang.org/pkg/encoding/json/#Decoder
type Decoder struct {
reader io.Reader
buffer []byte
remain []byte
inputOffset int64
err error
flags ParseFlags
}
// NewDecoder is documented at https://golang.org/pkg/encoding/json/#NewDecoder
func NewDecoder(r io.Reader) *Decoder { return &Decoder{reader: r} }
// Buffered is documented at https://golang.org/pkg/encoding/json/#Decoder.Buffered
func (dec *Decoder) Buffered() io.Reader {
return bytes.NewReader(dec.remain)
}
// Decode is documented at https://golang.org/pkg/encoding/json/#Decoder.Decode
func (dec *Decoder) Decode(v interface{}) error {
raw, err := dec.readValue()
if err != nil {
return err
}
_, err = Parse(raw, v, dec.flags)
return err
}
const (
minBufferSize = 32768
minReadSize = 4096
)
// readValue reads one JSON value from the buffer and returns its raw bytes. It
// is optimized for the "one JSON value per line" case.
func (dec *Decoder) readValue() (v []byte, err error) {
var n int
var r []byte
d := decoder{flags: dec.flags}
for {
if len(dec.remain) != 0 {
v, r, _, err = d.parseValue(dec.remain)
if err == nil {
dec.remain, n = skipSpacesN(r)
dec.inputOffset += int64(len(v) + n)
return
}
if len(r) != 0 {
// Parsing of the next JSON value stopped at a position other
// than the end of the input buffer, which indicaates that a
// syntax error was encountered.
return
}
}
if err = dec.err; err != nil {
if len(dec.remain) != 0 && err == io.EOF {
err = io.ErrUnexpectedEOF
}
return
}
if dec.buffer == nil {
dec.buffer = make([]byte, 0, minBufferSize)
} else {
dec.buffer = dec.buffer[:copy(dec.buffer[:cap(dec.buffer)], dec.remain)]
dec.remain = nil
}
if (cap(dec.buffer) - len(dec.buffer)) < minReadSize {
buf := make([]byte, len(dec.buffer), 2*cap(dec.buffer))
copy(buf, dec.buffer)
dec.buffer = buf
}
n, err = io.ReadFull(dec.reader, dec.buffer[len(dec.buffer):cap(dec.buffer)])
if n > 0 {
dec.buffer = dec.buffer[:len(dec.buffer)+n]
if err != nil {
err = nil
}
} else if err == io.ErrUnexpectedEOF {
err = io.EOF
}
dec.remain, n = skipSpacesN(dec.buffer)
d.flags = dec.flags | internalParseFlags(dec.remain)
dec.inputOffset += int64(n)
dec.err = err
}
}
// DisallowUnknownFields is documented at https://golang.org/pkg/encoding/json/#Decoder.DisallowUnknownFields
func (dec *Decoder) DisallowUnknownFields() { dec.flags |= DisallowUnknownFields }
// UseNumber is documented at https://golang.org/pkg/encoding/json/#Decoder.UseNumber
func (dec *Decoder) UseNumber() { dec.flags |= UseNumber }
// DontCopyString is an extension to the standard encoding/json package
// which instructs the decoder to not copy strings loaded from the json
// payloads when possible.
func (dec *Decoder) DontCopyString() { dec.flags |= DontCopyString }
// DontCopyNumber is an extension to the standard encoding/json package
// which instructs the decoder to not copy numbers loaded from the json
// payloads.
func (dec *Decoder) DontCopyNumber() { dec.flags |= DontCopyNumber }
// DontCopyRawMessage is an extension to the standard encoding/json package
// which instructs the decoder to not allocate RawMessage values in separate
// memory buffers (see the documentation of the DontcopyRawMessage flag for
// more detais).
func (dec *Decoder) DontCopyRawMessage() { dec.flags |= DontCopyRawMessage }
// DontMatchCaseInsensitiveStructFields is an extension to the standard
// encoding/json package which instructs the decoder to not match object fields
// against struct fields in a case-insensitive way, the field names have to
// match exactly to be decoded into the struct field values.
func (dec *Decoder) DontMatchCaseInsensitiveStructFields() {
dec.flags |= DontMatchCaseInsensitiveStructFields
}
// ZeroCopy is an extension to the standard encoding/json package which enables
// all the copy optimizations of the decoder.
func (dec *Decoder) ZeroCopy() { dec.flags |= ZeroCopy }
// InputOffset returns the input stream byte offset of the current decoder position.
// The offset gives the location of the end of the most recently returned token
// and the beginning of the next token.
func (dec *Decoder) InputOffset() int64 {
return dec.inputOffset
}
// Encoder is documented at https://golang.org/pkg/encoding/json/#Encoder
type Encoder struct {
writer io.Writer
prefix string
indent string
buffer *bytes.Buffer
err error
flags AppendFlags
}
// NewEncoder is documented at https://golang.org/pkg/encoding/json/#NewEncoder
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{writer: w, flags: EscapeHTML | SortMapKeys | appendNewline}
}
// Encode is documented at https://golang.org/pkg/encoding/json/#Encoder.Encode
func (enc *Encoder) Encode(v interface{}) error {
if enc.err != nil {
return enc.err
}
var err error
var buf = encoderBufferPool.Get().(*encoderBuffer)
buf.data, err = Append(buf.data[:0], v, enc.flags)
if err != nil {
encoderBufferPool.Put(buf)
return err
}
if (enc.flags & appendNewline) != 0 {
buf.data = append(buf.data, '\n')
}
b := buf.data
if enc.prefix != "" || enc.indent != "" {
if enc.buffer == nil {
enc.buffer = new(bytes.Buffer)
enc.buffer.Grow(2 * len(buf.data))
} else {
enc.buffer.Reset()
}
Indent(enc.buffer, buf.data, enc.prefix, enc.indent)
b = enc.buffer.Bytes()
}
if _, err := enc.writer.Write(b); err != nil {
enc.err = err
}
encoderBufferPool.Put(buf)
return err
}
// SetEscapeHTML is documented at https://golang.org/pkg/encoding/json/#Encoder.SetEscapeHTML
func (enc *Encoder) SetEscapeHTML(on bool) {
if on {
enc.flags |= EscapeHTML
} else {
enc.flags &= ^EscapeHTML
}
}
// SetIndent is documented at https://golang.org/pkg/encoding/json/#Encoder.SetIndent
func (enc *Encoder) SetIndent(prefix, indent string) {
enc.prefix = prefix
enc.indent = indent
}
// SetSortMapKeys is an extension to the standard encoding/json package which
// allows the program to toggle sorting of map keys on and off.
func (enc *Encoder) SetSortMapKeys(on bool) {
if on {
enc.flags |= SortMapKeys
} else {
enc.flags &= ^SortMapKeys
}
}
// SetTrustRawMessage skips value checking when encoding a raw json message. It should only
// be used if the values are known to be valid json, e.g. because they were originally created
// by json.Unmarshal.
func (enc *Encoder) SetTrustRawMessage(on bool) {
if on {
enc.flags |= TrustRawMessage
} else {
enc.flags &= ^TrustRawMessage
}
}
// SetAppendNewline is an extension to the standard encoding/json package which
// allows the program to toggle the addition of a newline in Encode on or off.
func (enc *Encoder) SetAppendNewline(on bool) {
if on {
enc.flags |= appendNewline
} else {
enc.flags &= ^appendNewline
}
}
var encoderBufferPool = sync.Pool{
New: func() interface{} { return &encoderBuffer{data: make([]byte, 0, 4096)} },
}
type encoderBuffer struct{ data []byte }

787
vendor/github.com/segmentio/encoding/json/parse.go generated vendored Normal file
View File

@ -0,0 +1,787 @@
package json
import (
"bytes"
"encoding/binary"
"math"
"math/bits"
"reflect"
"unicode"
"unicode/utf16"
"unicode/utf8"
"github.com/segmentio/encoding/ascii"
)
// All spaces characters defined in the json specification.
const (
sp = ' '
ht = '\t'
nl = '\n'
cr = '\r'
)
const (
escape = '\\'
quote = '"'
)
func internalParseFlags(b []byte) (flags ParseFlags) {
// Don't consider surrounding whitespace
b = skipSpaces(b)
b = trimTrailingSpaces(b)
if ascii.ValidPrint(b) {
flags |= validAsciiPrint
}
if bytes.IndexByte(b, '\\') == -1 {
flags |= noBackslash
}
return
}
func skipSpaces(b []byte) []byte {
if len(b) > 0 && b[0] <= 0x20 {
b, _ = skipSpacesN(b)
}
return b
}
func skipSpacesN(b []byte) ([]byte, int) {
for i := range b {
switch b[i] {
case sp, ht, nl, cr:
default:
return b[i:], i
}
}
return nil, 0
}
func trimTrailingSpaces(b []byte) []byte {
if len(b) > 0 && b[len(b)-1] <= 0x20 {
b = trimTrailingSpacesN(b)
}
return b
}
func trimTrailingSpacesN(b []byte) []byte {
i := len(b) - 1
loop:
for ; i >= 0; i-- {
switch b[i] {
case sp, ht, nl, cr:
default:
break loop
}
}
return b[:i+1]
}
// parseInt parses a decimal representation of an int64 from b.
//
// The function is equivalent to calling strconv.ParseInt(string(b), 10, 64) but
// it prevents Go from making a memory allocation for converting a byte slice to
// a string (escape analysis fails due to the error returned by strconv.ParseInt).
//
// Because it only works with base 10 the function is also significantly faster
// than strconv.ParseInt.
func (d decoder) parseInt(b []byte, t reflect.Type) (int64, []byte, error) {
var value int64
var count int
if len(b) == 0 {
return 0, b, syntaxError(b, "cannot decode integer from an empty input")
}
if b[0] == '-' {
const max = math.MinInt64
const lim = max / 10
if len(b) == 1 {
return 0, b, syntaxError(b, "cannot decode integer from '-'")
}
if len(b) > 2 && b[1] == '0' && '0' <= b[2] && b[2] <= '9' {
return 0, b, syntaxError(b, "invalid leading character '0' in integer")
}
for _, c := range b[1:] {
if !(c >= '0' && c <= '9') {
if count == 0 {
b, err := d.inputError(b, t)
return 0, b, err
}
break
}
if value < lim {
return 0, b, unmarshalOverflow(b, t)
}
value *= 10
x := int64(c - '0')
if value < (max + x) {
return 0, b, unmarshalOverflow(b, t)
}
value -= x
count++
}
count++
} else {
if len(b) > 1 && b[0] == '0' && '0' <= b[1] && b[1] <= '9' {
return 0, b, syntaxError(b, "invalid leading character '0' in integer")
}
for ; count < len(b) && b[count] >= '0' && b[count] <= '9'; count++ {
x := int64(b[count] - '0')
next := value*10 + x
if next < value {
return 0, b, unmarshalOverflow(b, t)
}
value = next
}
if count == 0 {
b, err := d.inputError(b, t)
return 0, b, err
}
}
if count < len(b) {
switch b[count] {
case '.', 'e', 'E': // was this actually a float?
v, r, _, err := d.parseNumber(b)
if err != nil {
v, r = b[:count+1], b[count+1:]
}
return 0, r, unmarshalTypeError(v, t)
}
}
return value, b[count:], nil
}
// parseUint is like parseInt but for unsigned integers.
func (d decoder) parseUint(b []byte, t reflect.Type) (uint64, []byte, error) {
var value uint64
var count int
if len(b) == 0 {
return 0, b, syntaxError(b, "cannot decode integer value from an empty input")
}
if len(b) > 1 && b[0] == '0' && '0' <= b[1] && b[1] <= '9' {
return 0, b, syntaxError(b, "invalid leading character '0' in integer")
}
for ; count < len(b) && b[count] >= '0' && b[count] <= '9'; count++ {
x := uint64(b[count] - '0')
next := value*10 + x
if next < value {
return 0, b, unmarshalOverflow(b, t)
}
value = next
}
if count == 0 {
b, err := d.inputError(b, t)
return 0, b, err
}
if count < len(b) {
switch b[count] {
case '.', 'e', 'E': // was this actually a float?
v, r, _, err := d.parseNumber(b)
if err != nil {
v, r = b[:count+1], b[count+1:]
}
return 0, r, unmarshalTypeError(v, t)
}
}
return value, b[count:], nil
}
// parseUintHex parses a hexadecimanl representation of a uint64 from b.
//
// The function is equivalent to calling strconv.ParseUint(string(b), 16, 64) but
// it prevents Go from making a memory allocation for converting a byte slice to
// a string (escape analysis fails due to the error returned by strconv.ParseUint).
//
// Because it only works with base 16 the function is also significantly faster
// than strconv.ParseUint.
func (d decoder) parseUintHex(b []byte) (uint64, []byte, error) {
const max = math.MaxUint64
const lim = max / 0x10
var value uint64
var count int
if len(b) == 0 {
return 0, b, syntaxError(b, "cannot decode hexadecimal value from an empty input")
}
parseLoop:
for i, c := range b {
var x uint64
switch {
case c >= '0' && c <= '9':
x = uint64(c - '0')
case c >= 'A' && c <= 'F':
x = uint64(c-'A') + 0xA
case c >= 'a' && c <= 'f':
x = uint64(c-'a') + 0xA
default:
if i == 0 {
return 0, b, syntaxError(b, "expected hexadecimal digit but found '%c'", c)
}
break parseLoop
}
if value > lim {
return 0, b, syntaxError(b, "hexadecimal value out of range")
}
if value *= 0x10; value > (max - x) {
return 0, b, syntaxError(b, "hexadecimal value out of range")
}
value += x
count++
}
return value, b[count:], nil
}
func (d decoder) parseNull(b []byte) ([]byte, []byte, Kind, error) {
if hasNullPrefix(b) {
return b[:4], b[4:], Null, nil
}
if len(b) < 4 {
return nil, b[len(b):], Undefined, unexpectedEOF(b)
}
return nil, b, Undefined, syntaxError(b, "expected 'null' but found invalid token")
}
func (d decoder) parseTrue(b []byte) ([]byte, []byte, Kind, error) {
if hasTruePrefix(b) {
return b[:4], b[4:], True, nil
}
if len(b) < 4 {
return nil, b[len(b):], Undefined, unexpectedEOF(b)
}
return nil, b, Undefined, syntaxError(b, "expected 'true' but found invalid token")
}
func (d decoder) parseFalse(b []byte) ([]byte, []byte, Kind, error) {
if hasFalsePrefix(b) {
return b[:5], b[5:], False, nil
}
if len(b) < 5 {
return nil, b[len(b):], Undefined, unexpectedEOF(b)
}
return nil, b, Undefined, syntaxError(b, "expected 'false' but found invalid token")
}
func (d decoder) parseNumber(b []byte) (v, r []byte, kind Kind, err error) {
if len(b) == 0 {
r, err = b, unexpectedEOF(b)
return
}
// Assume it's an unsigned integer at first.
kind = Uint
i := 0
// sign
if b[i] == '-' {
kind = Int
i++
}
if i == len(b) {
r, err = b[i:], syntaxError(b, "missing number value after sign")
return
}
if b[i] < '0' || b[i] > '9' {
r, err = b[i:], syntaxError(b, "expected digit but got '%c'", b[i])
return
}
// integer part
if b[i] == '0' {
i++
if i == len(b) || (b[i] != '.' && b[i] != 'e' && b[i] != 'E') {
v, r = b[:i], b[i:]
return
}
if '0' <= b[i] && b[i] <= '9' {
r, err = b[i:], syntaxError(b, "cannot decode number with leading '0' character")
return
}
}
for i < len(b) && '0' <= b[i] && b[i] <= '9' {
i++
}
// decimal part
if i < len(b) && b[i] == '.' {
kind = Float
i++
decimalStart := i
for i < len(b) {
if c := b[i]; !('0' <= c && c <= '9') {
if i == decimalStart {
r, err = b[i:], syntaxError(b, "expected digit but found '%c'", c)
return
}
break
}
i++
}
if i == decimalStart {
r, err = b[i:], syntaxError(b, "expected decimal part after '.'")
return
}
}
// exponent part
if i < len(b) && (b[i] == 'e' || b[i] == 'E') {
kind = Float
i++
if i < len(b) {
if c := b[i]; c == '+' || c == '-' {
i++
}
}
if i == len(b) {
r, err = b[i:], syntaxError(b, "missing exponent in number")
return
}
exponentStart := i
for i < len(b) {
if c := b[i]; !('0' <= c && c <= '9') {
if i == exponentStart {
err = syntaxError(b, "expected digit but found '%c'", c)
return
}
break
}
i++
}
}
v, r = b[:i], b[i:]
return
}
func (d decoder) parseUnicode(b []byte) (rune, int, error) {
if len(b) < 4 {
return 0, len(b), syntaxError(b, "unicode code point must have at least 4 characters")
}
u, r, err := d.parseUintHex(b[:4])
if err != nil {
return 0, 4, syntaxError(b, "parsing unicode code point: %s", err)
}
if len(r) != 0 {
return 0, 4, syntaxError(b, "invalid unicode code point")
}
return rune(u), 4, nil
}
func (d decoder) parseString(b []byte) ([]byte, []byte, Kind, error) {
if len(b) < 2 {
return nil, b[len(b):], Undefined, unexpectedEOF(b)
}
if b[0] != '"' {
return nil, b, Undefined, syntaxError(b, "expected '\"' at the beginning of a string value")
}
var n int
if len(b) >= 9 {
// This is an optimization for short strings. We read 8/16 bytes,
// and XOR each with 0x22 (") so that these bytes (and only
// these bytes) are now zero. We use the hasless(u,1) trick
// from https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
// to determine whether any bytes are zero. Finally, we CTZ
// to find the index of that byte.
const mask1 = 0x2222222222222222
const mask2 = 0x0101010101010101
const mask3 = 0x8080808080808080
u := binary.LittleEndian.Uint64(b[1:]) ^ mask1
if mask := (u - mask2) & ^u & mask3; mask != 0 {
n = bits.TrailingZeros64(mask)/8 + 2
goto found
}
if len(b) >= 17 {
u = binary.LittleEndian.Uint64(b[9:]) ^ mask1
if mask := (u - mask2) & ^u & mask3; mask != 0 {
n = bits.TrailingZeros64(mask)/8 + 10
goto found
}
}
}
n = bytes.IndexByte(b[1:], '"') + 2
if n <= 1 {
return nil, b[len(b):], Undefined, syntaxError(b, "missing '\"' at the end of a string value")
}
found:
if (d.flags.has(noBackslash) || bytes.IndexByte(b[1:n], '\\') < 0) &&
(d.flags.has(validAsciiPrint) || ascii.ValidPrint(b[1:n])) {
return b[:n], b[n:], Unescaped, nil
}
for i := 1; i < len(b); i++ {
switch b[i] {
case '\\':
if i++; i < len(b) {
switch b[i] {
case '"', '\\', '/', 'n', 'r', 't', 'f', 'b':
case 'u':
_, n, err := d.parseUnicode(b[i+1:])
if err != nil {
return nil, b[i+1+n:], Undefined, err
}
i += n
default:
return nil, b, Undefined, syntaxError(b, "invalid character '%c' in string escape code", b[i])
}
}
case '"':
return b[:i+1], b[i+1:], String, nil
default:
if b[i] < 0x20 {
return nil, b, Undefined, syntaxError(b, "invalid character '%c' in string escape code", b[i])
}
}
}
return nil, b[len(b):], Undefined, syntaxError(b, "missing '\"' at the end of a string value")
}
func (d decoder) parseStringUnquote(b []byte, r []byte) ([]byte, []byte, bool, error) {
s, b, k, err := d.parseString(b)
if err != nil {
return s, b, false, err
}
s = s[1 : len(s)-1] // trim the quotes
if k == Unescaped {
return s, b, false, nil
}
if r == nil {
r = make([]byte, 0, len(s))
}
for len(s) != 0 {
i := bytes.IndexByte(s, '\\')
if i < 0 {
r = appendCoerceInvalidUTF8(r, s)
break
}
r = appendCoerceInvalidUTF8(r, s[:i])
s = s[i+1:]
c := s[0]
switch c {
case '"', '\\', '/':
// simple escaped character
case 'n':
c = '\n'
case 'r':
c = '\r'
case 't':
c = '\t'
case 'b':
c = '\b'
case 'f':
c = '\f'
case 'u':
s = s[1:]
r1, n1, err := d.parseUnicode(s)
if err != nil {
return r, b, true, err
}
s = s[n1:]
if utf16.IsSurrogate(r1) {
if !hasPrefix(s, `\u`) {
r1 = unicode.ReplacementChar
} else {
r2, n2, err := d.parseUnicode(s[2:])
if err != nil {
return r, b, true, err
}
if r1 = utf16.DecodeRune(r1, r2); r1 != unicode.ReplacementChar {
s = s[2+n2:]
}
}
}
r = appendRune(r, r1)
continue
default: // not sure what this escape sequence is
return r, b, false, syntaxError(s, "invalid character '%c' in string escape code", c)
}
r = append(r, c)
s = s[1:]
}
return r, b, true, nil
}
func appendRune(b []byte, r rune) []byte {
n := len(b)
b = append(b, 0, 0, 0, 0)
return b[:n+utf8.EncodeRune(b[n:], r)]
}
func appendCoerceInvalidUTF8(b []byte, s []byte) []byte {
c := [4]byte{}
for _, r := range string(s) {
b = append(b, c[:utf8.EncodeRune(c[:], r)]...)
}
return b
}
func (d decoder) parseObject(b []byte) ([]byte, []byte, Kind, error) {
if len(b) < 2 {
return nil, b[len(b):], Undefined, unexpectedEOF(b)
}
if b[0] != '{' {
return nil, b, Undefined, syntaxError(b, "expected '{' at the beginning of an object value")
}
var err error
var a = b
var n = len(b)
var i = 0
b = b[1:]
for {
b = skipSpaces(b)
if len(b) == 0 {
return nil, b, Undefined, syntaxError(b, "cannot decode object from empty input")
}
if b[0] == '}' {
j := (n - len(b)) + 1
return a[:j], a[j:], Object, nil
}
if i != 0 {
if len(b) == 0 {
return nil, b, Undefined, syntaxError(b, "unexpected EOF after object field value")
}
if b[0] != ',' {
return nil, b, Undefined, syntaxError(b, "expected ',' after object field value but found '%c'", b[0])
}
b = skipSpaces(b[1:])
if len(b) == 0 {
return nil, b, Undefined, unexpectedEOF(b)
}
if b[0] == '}' {
return nil, b, Undefined, syntaxError(b, "unexpected trailing comma after object field")
}
}
_, b, _, err = d.parseString(b)
if err != nil {
return nil, b, Undefined, err
}
b = skipSpaces(b)
if len(b) == 0 {
return nil, b, Undefined, syntaxError(b, "unexpected EOF after object field key")
}
if b[0] != ':' {
return nil, b, Undefined, syntaxError(b, "expected ':' after object field key but found '%c'", b[0])
}
b = skipSpaces(b[1:])
_, b, _, err = d.parseValue(b)
if err != nil {
return nil, b, Undefined, err
}
i++
}
}
func (d decoder) parseArray(b []byte) ([]byte, []byte, Kind, error) {
if len(b) < 2 {
return nil, b[len(b):], Undefined, unexpectedEOF(b)
}
if b[0] != '[' {
return nil, b, Undefined, syntaxError(b, "expected '[' at the beginning of array value")
}
var err error
var a = b
var n = len(b)
var i = 0
b = b[1:]
for {
b = skipSpaces(b)
if len(b) == 0 {
return nil, b, Undefined, syntaxError(b, "missing closing ']' after array value")
}
if b[0] == ']' {
j := (n - len(b)) + 1
return a[:j], a[j:], Array, nil
}
if i != 0 {
if len(b) == 0 {
return nil, b, Undefined, syntaxError(b, "unexpected EOF after array element")
}
if b[0] != ',' {
return nil, b, Undefined, syntaxError(b, "expected ',' after array element but found '%c'", b[0])
}
b = skipSpaces(b[1:])
if len(b) == 0 {
return nil, b, Undefined, unexpectedEOF(b)
}
if b[0] == ']' {
return nil, b, Undefined, syntaxError(b, "unexpected trailing comma after object field")
}
}
_, b, _, err = d.parseValue(b)
if err != nil {
return nil, b, Undefined, err
}
i++
}
}
func (d decoder) parseValue(b []byte) ([]byte, []byte, Kind, error) {
if len(b) == 0 {
return nil, b, Undefined, syntaxError(b, "unexpected end of JSON input")
}
var v []byte
var k Kind
var err error
switch b[0] {
case '{':
v, b, k, err = d.parseObject(b)
case '[':
k = Array
v, b, k, err = d.parseArray(b)
case '"':
v, b, k, err = d.parseString(b)
case 'n':
v, b, k, err = d.parseNull(b)
case 't':
v, b, k, err = d.parseTrue(b)
case 'f':
v, b, k, err = d.parseFalse(b)
case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
v, b, k, err = d.parseNumber(b)
default:
err = syntaxError(b, "invalid character '%c' looking for beginning of value", b[0])
}
return v, b, k, err
}
func hasNullPrefix(b []byte) bool {
return len(b) >= 4 && string(b[:4]) == "null"
}
func hasTruePrefix(b []byte) bool {
return len(b) >= 4 && string(b[:4]) == "true"
}
func hasFalsePrefix(b []byte) bool {
return len(b) >= 5 && string(b[:5]) == "false"
}
func hasPrefix(b []byte, s string) bool {
return len(b) >= len(s) && s == string(b[:len(s)])
}
func hasLeadingSign(b []byte) bool {
return len(b) > 0 && (b[0] == '+' || b[0] == '-')
}
func hasLeadingZeroes(b []byte) bool {
if hasLeadingSign(b) {
b = b[1:]
}
return len(b) > 1 && b[0] == '0' && '0' <= b[1] && b[1] <= '9'
}
func appendToLower(b, s []byte) []byte {
if ascii.Valid(s) { // fast path for ascii strings
i := 0
for j := range s {
c := s[j]
if 'A' <= c && c <= 'Z' {
b = append(b, s[i:j]...)
b = append(b, c+('a'-'A'))
i = j + 1
}
}
return append(b, s[i:]...)
}
for _, r := range string(s) {
b = appendRune(b, foldRune(r))
}
return b
}
func foldRune(r rune) rune {
if r = unicode.SimpleFold(r); 'A' <= r && r <= 'Z' {
r = r + ('a' - 'A')
}
return r
}

20
vendor/github.com/segmentio/encoding/json/reflect.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
//go:build go1.18
// +build go1.18
package json
import (
"reflect"
"unsafe"
)
func extendSlice(t reflect.Type, s *slice, n int) slice {
arrayType := reflect.ArrayOf(n, t.Elem())
arrayData := reflect.New(arrayType)
reflect.Copy(arrayData.Elem(), reflect.NewAt(t, unsafe.Pointer(s)).Elem())
return slice{
data: unsafe.Pointer(arrayData.Pointer()),
len: s.len,
cap: n,
}
}

View File

@ -0,0 +1,30 @@
//go:build !go1.18
// +build !go1.18
package json
import (
"reflect"
"unsafe"
)
//go:linkname unsafe_NewArray reflect.unsafe_NewArray
func unsafe_NewArray(rtype unsafe.Pointer, length int) unsafe.Pointer
//go:linkname typedslicecopy reflect.typedslicecopy
//go:noescape
func typedslicecopy(elemType unsafe.Pointer, dst, src slice) int
func extendSlice(t reflect.Type, s *slice, n int) slice {
elemTypeRef := t.Elem()
elemTypePtr := ((*iface)(unsafe.Pointer(&elemTypeRef))).ptr
d := slice{
data: unsafe_NewArray(elemTypePtr, n),
len: s.len,
cap: n,
}
typedslicecopy(elemTypePtr, d, *s)
return d
}

70
vendor/github.com/segmentio/encoding/json/string.go generated vendored Normal file
View File

@ -0,0 +1,70 @@
package json
import (
"math/bits"
"unsafe"
)
const (
lsb = 0x0101010101010101
msb = 0x8080808080808080
)
// escapeIndex finds the index of the first char in `s` that requires escaping.
// A char requires escaping if it's outside of the range of [0x20, 0x7F] or if
// it includes a double quote or backslash. If the escapeHTML mode is enabled,
// the chars <, > and & also require escaping. If no chars in `s` require
// escaping, the return value is -1.
func escapeIndex(s string, escapeHTML bool) int {
chunks := stringToUint64(s)
for _, n := range chunks {
// combine masks before checking for the MSB of each byte. We include
// `n` in the mask to check whether any of the *input* byte MSBs were
// set (i.e. the byte was outside the ASCII range).
mask := n | below(n, 0x20) | contains(n, '"') | contains(n, '\\')
if escapeHTML {
mask |= contains(n, '<') | contains(n, '>') | contains(n, '&')
}
if (mask & msb) != 0 {
return bits.TrailingZeros64(mask&msb) / 8
}
}
for i := len(chunks) * 8; i < len(s); i++ {
c := s[i]
if c < 0x20 || c > 0x7f || c == '"' || c == '\\' || (escapeHTML && (c == '<' || c == '>' || c == '&')) {
return i
}
}
return -1
}
// below return a mask that can be used to determine if any of the bytes
// in `n` are below `b`. If a byte's MSB is set in the mask then that byte was
// below `b`. The result is only valid if `b`, and each byte in `n`, is below
// 0x80.
func below(n uint64, b byte) uint64 {
return n - expand(b)
}
// contains returns a mask that can be used to determine if any of the
// bytes in `n` are equal to `b`. If a byte's MSB is set in the mask then
// that byte is equal to `b`. The result is only valid if `b`, and each
// byte in `n`, is below 0x80.
func contains(n uint64, b byte) uint64 {
return (n ^ expand(b)) - lsb
}
// expand puts the specified byte into each of the 8 bytes of a uint64.
func expand(b byte) uint64 {
return lsb * uint64(b)
}
func stringToUint64(s string) []uint64 {
return *(*[]uint64)(unsafe.Pointer(&sliceHeader{
Data: *(*unsafe.Pointer)(unsafe.Pointer(&s)),
Len: len(s) / 8,
Cap: len(s) / 8,
}))
}

416
vendor/github.com/segmentio/encoding/json/token.go generated vendored Normal file
View File

@ -0,0 +1,416 @@
package json
import (
"strconv"
"sync"
"unsafe"
)
// Tokenizer is an iterator-style type which can be used to progressively parse
// through a json input.
//
// Tokenizing json is useful to build highly efficient parsing operations, for
// example when doing tranformations on-the-fly where as the program reads the
// input and produces the transformed json to an output buffer.
//
// Here is a common pattern to use a tokenizer:
//
// for t := json.NewTokenizer(b); t.Next(); {
// switch k := t.Kind(); k.Class() {
// case json.Null:
// ...
// case json.Bool:
// ...
// case json.Num:
// ...
// case json.String:
// ...
// case json.Array:
// ...
// case json.Object:
// ...
// }
// }
//
type Tokenizer struct {
// When the tokenizer is positioned on a json delimiter this field is not
// zero. In this case the possible values are '{', '}', '[', ']', ':', and
// ','.
Delim Delim
// This field contains the raw json token that the tokenizer is pointing at.
// When Delim is not zero, this field is a single-element byte slice
// continaing the delimiter value. Otherwise, this field holds values like
// null, true, false, numbers, or quoted strings.
Value RawValue
// When the tokenizer has encountered invalid content this field is not nil.
Err error
// When the value is in an array or an object, this field contains the depth
// at which it was found.
Depth int
// When the value is in an array or an object, this field contains the
// position at which it was found.
Index int
// This field is true when the value is the key of an object.
IsKey bool
// Tells whether the next value read from the tokenizer is a key.
isKey bool
// json input for the tokenizer, pointing at data right after the last token
// that was parsed.
json []byte
// Stack used to track entering and leaving arrays, objects, and keys.
stack *stack
// Decoder used for parsing.
decoder
}
// NewTokenizer constructs a new Tokenizer which reads its json input from b.
func NewTokenizer(b []byte) *Tokenizer {
return &Tokenizer{
json: b,
decoder: decoder{flags: internalParseFlags(b)},
}
}
// Reset erases the state of t and re-initializes it with the json input from b.
func (t *Tokenizer) Reset(b []byte) {
if t.stack != nil {
releaseStack(t.stack)
}
// This code is similar to:
//
// *t = Tokenizer{json: b}
//
// However, it does not compile down to an invocation of duff-copy.
t.Delim = 0
t.Value = nil
t.Err = nil
t.Depth = 0
t.Index = 0
t.IsKey = false
t.isKey = false
t.json = b
t.stack = nil
t.decoder = decoder{flags: internalParseFlags(b)}
}
// Next returns a new tokenizer pointing at the next token, or the zero-value of
// Tokenizer if the end of the json input has been reached.
//
// If the tokenizer encounters malformed json while reading the input the method
// sets t.Err to an error describing the issue, and returns false. Once an error
// has been encountered, the tokenizer will always fail until its input is
// cleared by a call to its Reset method.
func (t *Tokenizer) Next() bool {
if t.Err != nil {
return false
}
// Inlined code of the skipSpaces function, this give a ~15% speed boost.
i := 0
skipLoop:
for _, c := range t.json {
switch c {
case sp, ht, nl, cr:
i++
default:
break skipLoop
}
}
if i > 0 {
t.json = t.json[i:]
}
if len(t.json) == 0 {
t.Reset(nil)
return false
}
var kind Kind
switch t.json[0] {
case '"':
t.Delim = 0
t.Value, t.json, kind, t.Err = t.parseString(t.json)
case 'n':
t.Delim = 0
t.Value, t.json, kind, t.Err = t.parseNull(t.json)
case 't':
t.Delim = 0
t.Value, t.json, kind, t.Err = t.parseTrue(t.json)
case 'f':
t.Delim = 0
t.Value, t.json, kind, t.Err = t.parseFalse(t.json)
case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
t.Delim = 0
t.Value, t.json, kind, t.Err = t.parseNumber(t.json)
case '{', '}', '[', ']', ':', ',':
t.Delim, t.Value, t.json = Delim(t.json[0]), t.json[:1], t.json[1:]
switch t.Delim {
case '{':
kind = Object
case '[':
kind = Array
}
default:
t.Delim = 0
t.Value, t.json, t.Err = t.json[:1], t.json[1:], syntaxError(t.json, "expected token but found '%c'", t.json[0])
}
t.Depth = t.depth()
t.Index = t.index()
t.flags = t.flags.withKind(kind)
if t.Delim == 0 {
t.IsKey = t.isKey
} else {
t.IsKey = false
switch t.Delim {
case '{':
t.isKey = true
t.push(inObject)
case '[':
t.push(inArray)
case '}':
t.Err = t.pop(inObject)
t.Depth--
t.Index = t.index()
case ']':
t.Err = t.pop(inArray)
t.Depth--
t.Index = t.index()
case ':':
t.isKey = false
case ',':
if t.stack == nil || len(t.stack.state) == 0 {
t.Err = syntaxError(t.json, "found unexpected comma")
return false
}
if t.stack.is(inObject) {
t.isKey = true
}
t.stack.state[len(t.stack.state)-1].len++
}
}
return (t.Delim != 0 || len(t.Value) != 0) && t.Err == nil
}
func (t *Tokenizer) depth() int {
if t.stack == nil {
return 0
}
return t.stack.depth()
}
func (t *Tokenizer) index() int {
if t.stack == nil {
return 0
}
return t.stack.index()
}
func (t *Tokenizer) push(typ scope) {
if t.stack == nil {
t.stack = acquireStack()
}
t.stack.push(typ)
}
func (t *Tokenizer) pop(expect scope) error {
if t.stack == nil || !t.stack.pop(expect) {
return syntaxError(t.json, "found unexpected character while tokenizing json input")
}
return nil
}
// Kind returns the kind of the value that the tokenizer is currently positioned
// on.
func (t *Tokenizer) Kind() Kind { return t.flags.kind() }
// Bool returns a bool containing the value of the json boolean that the
// tokenizer is currently pointing at.
//
// This method must only be called after checking the kind of the token via a
// call to Kind.
//
// If the tokenizer is not positioned on a boolean, the behavior is undefined.
func (t *Tokenizer) Bool() bool { return t.flags.kind() == True }
// Int returns a byte slice containing the value of the json number that the
// tokenizer is currently pointing at.
//
// This method must only be called after checking the kind of the token via a
// call to Kind.
//
// If the tokenizer is not positioned on an integer, the behavior is undefined.
func (t *Tokenizer) Int() int64 {
i, _, _ := t.parseInt(t.Value, int64Type)
return i
}
// Uint returns a byte slice containing the value of the json number that the
// tokenizer is currently pointing at.
//
// This method must only be called after checking the kind of the token via a
// call to Kind.
//
// If the tokenizer is not positioned on a positive integer, the behavior is
// undefined.
func (t *Tokenizer) Uint() uint64 {
u, _, _ := t.parseUint(t.Value, uint64Type)
return u
}
// Float returns a byte slice containing the value of the json number that the
// tokenizer is currently pointing at.
//
// This method must only be called after checking the kind of the token via a
// call to Kind.
//
// If the tokenizer is not positioned on a number, the behavior is undefined.
func (t *Tokenizer) Float() float64 {
f, _ := strconv.ParseFloat(*(*string)(unsafe.Pointer(&t.Value)), 64)
return f
}
// String returns a byte slice containing the value of the json string that the
// tokenizer is currently pointing at.
//
// This method must only be called after checking the kind of the token via a
// call to Kind.
//
// When possible, the returned byte slice references the backing array of the
// tokenizer. A new slice is only allocated if the tokenizer needed to unescape
// the json string.
//
// If the tokenizer is not positioned on a string, the behavior is undefined.
func (t *Tokenizer) String() []byte {
if t.flags.kind() == Unescaped && len(t.Value) > 1 {
return t.Value[1 : len(t.Value)-1] // unquote
}
s, _, _, _ := t.parseStringUnquote(t.Value, nil)
return s
}
// RawValue represents a raw json value, it is intended to carry null, true,
// false, number, and string values only.
type RawValue []byte
// String returns true if v contains a string value.
func (v RawValue) String() bool { return len(v) != 0 && v[0] == '"' }
// Null returns true if v contains a null value.
func (v RawValue) Null() bool { return len(v) != 0 && v[0] == 'n' }
// True returns true if v contains a true value.
func (v RawValue) True() bool { return len(v) != 0 && v[0] == 't' }
// False returns true if v contains a false value.
func (v RawValue) False() bool { return len(v) != 0 && v[0] == 'f' }
// Number returns true if v contains a number value.
func (v RawValue) Number() bool {
if len(v) != 0 {
switch v[0] {
case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
return true
}
}
return false
}
// AppendUnquote writes the unquoted version of the string value in v into b.
func (v RawValue) AppendUnquote(b []byte) []byte {
d := decoder{}
s, r, _, err := d.parseStringUnquote(v, b)
if err != nil {
panic(err)
}
if len(r) != 0 {
panic(syntaxError(r, "unexpected trailing tokens after json value"))
}
return append(b, s...)
}
// Unquote returns the unquoted version of the string value in v.
func (v RawValue) Unquote() []byte {
return v.AppendUnquote(nil)
}
type scope int
const (
inArray scope = iota
inObject
)
type state struct {
typ scope
len int
}
type stack struct {
state []state
}
func (s *stack) push(typ scope) {
s.state = append(s.state, state{typ: typ, len: 1})
}
func (s *stack) pop(expect scope) bool {
i := len(s.state) - 1
if i < 0 {
return false
}
if found := s.state[i]; expect != found.typ {
return false
}
s.state = s.state[:i]
return true
}
func (s *stack) is(typ scope) bool {
return len(s.state) != 0 && s.state[len(s.state)-1].typ == typ
}
func (s *stack) depth() int {
return len(s.state)
}
func (s *stack) index() int {
if len(s.state) == 0 {
return 0
}
return s.state[len(s.state)-1].len - 1
}
func acquireStack() *stack {
s, _ := stackPool.Get().(*stack)
if s == nil {
s = &stack{state: make([]state, 0, 4)}
} else {
s.state = s.state[:0]
}
return s
}
func releaseStack(s *stack) {
stackPool.Put(s)
}
var (
stackPool sync.Pool // *stack
)

38
vendor/go.lsp.dev/jsonrpc2/.codecov.yml vendored Normal file
View File

@ -0,0 +1,38 @@
codecov:
allow_coverage_offsets: true
notify:
wait_for_ci: false
coverage:
precision: 1
round: down
range: "70...100"
status:
project:
default:
target: auto
threshold: 1%
if_ci_failed: error
if_not_found: success
patch:
default:
only_pulls: true
target: 50%
threshold: 10%
if_ci_failed: error
if_not_found: failure
changes:
default:
if_ci_failed: error
if_not_found: success
only_pulls: false
branches:
- main
comment:
behavior: default
show_carryforward_flags: true
github_checks:
annotations: true

View File

@ -0,0 +1 @@
(*go.lsp.dev/jsonrpc2.Request).Reply

View File

@ -0,0 +1,11 @@
# go.lsp.dev/jsonrpc2 project gitattributes file
# https://github.com/github/linguist#using-gitattributes
# https://github.com/github/linguist/blob/master/lib/linguist/languages.yml
# To prevent CRLF breakages on Windows for fragile files, like testdata.
* -text
docs/ linguist-documentation
*.pb.go linguist-generated
*_gen.go linguist-generated
*_string.go linguist-generated

52
vendor/go.lsp.dev/jsonrpc2/.gitignore vendored Normal file
View File

@ -0,0 +1,52 @@
# go.lsp.dev/jsonrpc2 project generated files to ignore
# if you want to ignore files created by your editor/tools,
# please consider a global .gitignore https://help.github.com/articles/ignoring-files
# please do not open a pull request to add something created by your editor or tools
# github/gitignore/Go.gitignore
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
vendor/
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
# cgo generated
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
# test generated
_testmain.go
# profile
*.pprof
# coverage
coverage.*
# tools
bin/

200
vendor/go.lsp.dev/jsonrpc2/.golangci.yml vendored Normal file
View File

@ -0,0 +1,200 @@
run:
timeout: 5m
issues-exit-code: 1
tests: true
skip-dirs: []
skip-dirs-use-default: true
skip-files: []
allow-parallel-runners: true
output:
format: colored-line-number
print-issued-lines: true
print-linter-name: true
uniq-by-line: true
sort-results: true
linters-settings:
dupl:
threshold: 100
# errcheck:
# check-type-assertions: true
# check-blank: true
# exclude: .errcheckignore
funlen:
lines: 100
statements: 60
gocognit:
min-complexity: 20
goconst:
min-len: 3
min-occurrences: 3
gocritic:
enabled-tags:
- diagnostic
- experimental
- opinionated
- performance
- style
disabled-checks:
- commentedOutCode
- whyNoLint
settings:
hugeParam:
sizeThreshold: 80
rangeExprCopy:
sizeThreshold: 512
rangeValCopy:
sizeThreshold: 128
gocyclo:
min-complexity: 15
godot:
scope: declarations
capital: false
gofmt:
simplify: true
goimports:
local-prefixes: go.lsp.dev/jsonrpc2
golint:
min-confidence: 0.3
govet:
enable-all: true
check-shadowing: true
disable:
- fieldalignment
depguard:
list-type: blacklist
include-go-root: true
# packages-with-error-message:
# - github.com/sirupsen/logrus: "logging is allowed only by logutils.Log"
lll:
line-length: 120
tab-width: 1
maligned:
suggest-new: true
misspell:
locale: US
ignore-words:
- cancelled
nakedret:
max-func-lines: 30
prealloc:
simple: true
range-loops: true
for-loops: true
testpackage:
skip-regexp: '.*(export)_test\.go'
unparam:
check-exported: true
algo: cha
unused:
check-exported: false
whitespace:
multi-if: true
multi-func: true
linters:
fast: false
disabled:
- deadcode # Finds unused code
- errcheck # Errcheck is a program for checking for unchecked errors in go programs
- exhaustivestruct # Checks if all struct's fields are initialized
- forbidigo # Forbids identifiers
- gci # Gci control golang package import order and make it always deterministic
- gochecknoglobals # check that no global variables exist
- gochecknoinits # Checks that no init functions are present in Go code
- godox # Tool for detection of FIXME, TODO and other comment keywords
- goerr113 # Golang linter to check the errors handling expressions
- gofumpt # Gofumpt checks whether code was gofumpt-ed
- goheader # Checks is file header matches to pattern
- golint # Golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes
- gomnd # An analyzer to detect magic numbers
- gomodguard # Allow and block list linter for direct Go module dependencies
- gosec # Inspects source code for security problems
- nlreturn # nlreturn checks for a new line before return and branch statements to increase code clarity
- paralleltest # paralleltest detects missing usage of t.Parallel() method in your Go test
- scopelint # Scopelint checks for unpinned variables in go programs
- sqlclosecheck # Checks that sql.Rows and sql.Stmt are closed
- unparam # Reports unused function parameters
- wrapcheck # Checks that errors returned from external packages are wrapped TODO(zchee): enable
- wsl # Whitespace Linter
enable:
- asciicheck # Simple linter to check that your code does not contain non-ASCII identifiers
- bodyclose # checks whether HTTP response body is closed successfully
- depguard # Go linter that checks if package imports are in a list of acceptable packages
- dogsled # Checks assignments with too many blank identifiers
- dupl # Tool for code clone detection
- errorlint # source code linter for Go software that can be used to find code that will cause problemswith the error wrapping scheme introduced in Go 1.13
- exhaustive # check exhaustiveness of enum switch statements
- exportloopref # checks for pointers to enclosing loop variables
- funlen # Tool for detection of long functions
- gocognit # Computes and checks the cognitive complexity of functions
- goconst # Finds repeated strings that could be replaced by a constant
- gocritic # The most opinionated Go source code linter
- gocyclo # Computes and checks the cyclomatic complexity of functions
- godot # Check if comments end in a period
- gofmt # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification
- goimports # Goimports does everything that gofmt does. Additionally it checks unused imports
- goprintffuncname # Checks that printf-like functions are named with `f` at the end
- gosimple # Linter for Go source code that specializes in simplifying a code
- govet # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string
- ifshort # Checks that your code uses short syntax for if-statements whenever possible
- ineffassign # Detects when assignments to existing variables are not used
- lll # Reports long lines
- makezero # Finds slice declarations with non-zero initial length
- misspell # Finds commonly misspelled English words in comments
- nakedret # Finds naked returns in functions greater than a specified function length
- nestif # Reports deeply nested if statements
- noctx # noctx finds sending http request without context.Context
- nolintlint # Reports ill-formed or insufficient nolint directives
- prealloc # Finds slice declarations that could potentially be preallocated
- predeclared # find code that shadows one of Go's predeclared identifiers
- rowserrcheck # checks whether Err of rows is checked successfully
- staticcheck # Staticcheck is a go vet on steroids, applying a ton of static analysis checks
- structcheck # Finds unused struct fields
- stylecheck # Stylecheck is a replacement for golint
- testpackage # linter that makes you use a separate _test package
- thelper # thelper detects golang test helpers without t.Helper() call and checks the consistency of test helpers
- tparallel # tparallel detects inappropriate usage of t.Parallel() method in your Go test codes
- typecheck # Like the front-end of a Go compiler, parses and type-checks Go code
- unconvert # Remove unnecessary type conversions
- unused # Checks Go code for unused constants, variables, functions and types
- varcheck # Finds unused global variables and constants
- whitespace # Tool for detection of leading and trailing whitespace
issues:
max-same-issues: 0
exclude-use-default: true
exclude-rules:
- path: _test\.go
linters:
- errcheck
- funlen
- gocognit
- goconst
- gocyclo
- lll
- maligned
- wrapcheck
- path: "(.*)?_example_test.go"
linters:
- gocritic
# `TestMain` function is no longer required to call `os.Exit` since Go 1.15.
# ref: https://golang.org/doc/go1.15#testing
- text: "SA3000:"
linters:
- staticcheck
# Exclude shadow checking on the variable named err
- text: "shadow: declaration of \"(err|ok)\""
linters:
- govet
# fake implements
- path: fake/fake.go
linters:
- errcheck
# future use
- path: wire.go
text: "`(codeServerErrorStart|codeServerErrorEnd)` is unused"
# goroutine
- path: handler.go
text: "Error return value of `handler` is not checked"

29
vendor/go.lsp.dev/jsonrpc2/LICENSE vendored Normal file
View File

@ -0,0 +1,29 @@
BSD 3-Clause License
Copyright (c) 2019, The Go Language Server Authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

129
vendor/go.lsp.dev/jsonrpc2/Makefile vendored Normal file
View File

@ -0,0 +1,129 @@
# -----------------------------------------------------------------------------
# global
.DEFAULT_GOAL := test
comma := ,
empty :=
space := $(empty) $(empty)
# -----------------------------------------------------------------------------
# go
GO_PATH ?= $(shell go env GOPATH)
GO_OS ?= $(shell go env GOOS)
GO_ARCH ?= $(shell go env GOARCH)
PKG := $(subst $(GO_PATH)/src/,,$(CURDIR))
CGO_ENABLED ?= 0
GO_BUILDTAGS=osusergo netgo static
GO_LDFLAGS=-s -w "-extldflags=-static"
GO_FLAGS ?= -tags='$(subst $(space),$(comma),${GO_BUILDTAGS})' -ldflags='${GO_LDFLAGS}' -installsuffix=netgo
GO_PKGS := $(shell go list ./...)
GO_TEST ?= ${TOOLS_BIN}/gotestsum --
GO_TEST_PKGS ?= $(shell go list -f='{{if or .TestGoFiles .XTestGoFiles}}{{.ImportPath}}{{end}}' ./...)
GO_TEST_FLAGS ?= -race -count=1
GO_TEST_FUNC ?= .
GO_COVERAGE_OUT ?= coverage.out
GO_BENCH_FLAGS ?= -benchmem
GO_BENCH_FUNC ?= .
GO_LINT_FLAGS ?=
TOOLS := $(shell cd tools; go list -f '{{ join .Imports " " }}' -tags=tools)
TOOLS_BIN := ${CURDIR}/tools/bin
# Set build environment
JOBS := $(shell getconf _NPROCESSORS_CONF)
# -----------------------------------------------------------------------------
# defines
define target
@printf "+ $(patsubst ,$@,$(1))\\n" >&2
endef
# -----------------------------------------------------------------------------
# target
##@ test, bench, coverage
export GOTESTSUM_FORMAT=standard-verbose
.PHONY: test
test: CGO_ENABLED=1
test: GO_FLAGS=-tags='$(subst ${space},${comma},${GO_BUILDTAGS})'
test: tools/bin/gotestsum ## Runs package test including race condition.
$(call target)
@CGO_ENABLED=${CGO_ENABLED} ${GO_TEST} ${GO_TEST_FLAGS} -run=${GO_TEST_FUNC} $(strip ${GO_FLAGS}) ${GO_TEST_PKGS}
.PHONY: bench
bench: ## Take a package benchmark.
$(call target)
@CGO_ENABLED=${CGO_ENABLED} ${GO_TEST} -run='^$$' -bench=${GO_BENCH_FUNC} ${GO_BENCH_FLAGS} $(strip ${GO_FLAGS}) ${GO_TEST_PKGS}
.PHONY: coverage
coverage: CGO_ENABLED=1
coverage: GO_FLAGS=-tags='$(subst ${space},${comma},${GO_BUILDTAGS})'
coverage: tools/bin/gotestsum ## Takes packages test coverage.
$(call target)
@CGO_ENABLED=${CGO_ENABLED} ${GO_TEST} ${GO_TEST_FLAGS} -covermode=atomic -coverpkg=${PKG}/... -coverprofile=${GO_COVERAGE_OUT} $(strip ${GO_FLAGS}) ${GO_PKGS}
##@ fmt, lint
.PHONY: lint
lint: fmt lint/golangci-lint ## Run all linters.
.PHONY: fmt
fmt: tools/bin/goimportz tools/bin/gofumpt ## Run goimportz and gofumpt.
$(call target)
find . -iname "*.go" -not -path "./vendor/**" | xargs -P ${JOBS} ${TOOLS_BIN}/goimportz -local=${PKG},$(subst /jsonrpc2,,$(PKG)) -w
find . -iname "*.go" -not -path "./vendor/**" | xargs -P ${JOBS} ${TOOLS_BIN}/gofumpt -extra -w
.PHONY: lint/golangci-lint
lint/golangci-lint: tools/bin/golangci-lint .golangci.yml ## Run golangci-lint.
$(call target)
${TOOLS_BIN}/golangci-lint -j ${JOBS} run $(strip ${GO_LINT_FLAGS}) ./...
##@ tools
.PHONY: tools
tools: tools/bin/'' ## Install tools
tools/%: tools/bin/% ## install an individual dependent tool
tools/bin/%: ${CURDIR}/tools/go.mod ${CURDIR}/tools/go.sum
@cd tools; \
for t in ${TOOLS}; do \
if [ -z '$*' ] || [ $$(basename $$t) = '$*' ]; then \
echo "Install $$t ..."; \
GOBIN=${TOOLS_BIN} CGO_ENABLED=0 go install -v -mod=mod ${GO_FLAGS} "$${t}"; \
fi \
done
##@ clean
.PHONY: clean
clean: ## Cleanups binaries and extra files in the package.
$(call target)
@rm -rf *.out *.test *.prof trace.txt ${TOOLS_BIN}
##@ miscellaneous
.PHONY: todo
TODO: ## Print the all of (TODO|BUG|XXX|FIXME|NOTE) in packages.
@grep -E '(TODO|BUG|XXX|FIXME)(\(.+\):|:)' $(shell find . -type f -name '*.go' -and -not -iwholename '*vendor*')
.PHONY: env/%
env/%: ## Print the value of MAKEFILE_VARIABLE. Use `make env/GO_FLAGS` or etc.
@echo $($*)
##@ help
.PHONY: help
help: ## Show this help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[33m<target>\033[0m\n"} /^[a-zA-Z_0-9\/%_-]+:.*?##/ { printf " \033[1;32m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)

19
vendor/go.lsp.dev/jsonrpc2/README.md vendored Normal file
View File

@ -0,0 +1,19 @@
# jsonrpc2
[![CircleCI][circleci-badge]][circleci] [![pkg.go.dev][pkg.go.dev-badge]][pkg.go.dev] [![Go module][module-badge]][module] [![codecov.io][codecov-badge]][codecov] [![GA][ga-badge]][ga]
Package jsonrpc2 is an implementation of the JSON-RPC 2 specification for Go.
<!-- badge links -->
[circleci]: https://app.circleci.com/pipelines/github/go-language-server/jsonrpc2
[pkg.go.dev]: https://pkg.go.dev/go.lsp.dev/jsonrpc2
[module]: https://github.com/go-language-server/jsonrpc2/releases/latest
[codecov]: https://codecov.io/gh/go-language-server/jsonrpc2
[ga]: https://github.com/go-language-server/jsonrpc2
[circleci-badge]: https://img.shields.io/circleci/build/github/go-language-server/jsonrpc2/main.svg?style=for-the-badge&label=CIRCLECI&logo=circleci
[pkg.go.dev-badge]: https://bit.ly/shields-io-pkg-go-dev
[module-badge]: https://img.shields.io/github/release/go-language-server/jsonrpc2.svg?color=00add8&label=MODULE&style=for-the-badge&logoWidth=25&logo=data%3Aimage%2Fsvg%2Bxml%3Bbase64%2CPHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9Ijg1IDU1IDEyMCAxMjAiPjxwYXRoIGZpbGw9IiMwMEFERDgiIGQ9Ik00MC4yIDEwMS4xYy0uNCAwLS41LS4yLS4zLS41bDIuMS0yLjdjLjItLjMuNy0uNSAxLjEtLjVoMzUuN2MuNCAwIC41LjMuMy42bC0xLjcgMi42Yy0uMi4zLS43LjYtMSAuNmwtMzYuMi0uMXptLTE1LjEgOS4yYy0uNCAwLS41LS4yLS4zLS41bDIuMS0yLjdjLjItLjMuNy0uNSAxLjEtLjVoNDUuNmMuNCAwIC42LjMuNS42bC0uOCAyLjRjLS4xLjQtLjUuNi0uOS42bC00Ny4zLjF6bTI0LjIgOS4yYy0uNCAwLS41LS4zLS4zLS42bDEuNC0yLjVjLjItLjMuNi0uNiAxLS42aDIwYy40IDAgLjYuMy42LjdsLS4yIDIuNGMwIC40LS40LjctLjcuN2wtMjEuOC0uMXptMTAzLjgtMjAuMmMtNi4zIDEuNi0xMC42IDIuOC0xNi44IDQuNC0xLjUuNC0xLjYuNS0yLjktMS0xLjUtMS43LTIuNi0yLjgtNC43LTMuOC02LjMtMy4xLTEyLjQtMi4yLTE4LjEgMS41LTYuOCA0LjQtMTAuMyAxMC45LTEwLjIgMTkgLjEgOCA1LjYgMTQuNiAxMy41IDE1LjcgNi44LjkgMTIuNS0xLjUgMTctNi42LjktMS4xIDEuNy0yLjMgMi43LTMuN2gtMTkuM2MtMi4xIDAtMi42LTEuMy0xLjktMyAxLjMtMy4xIDMuNy04LjMgNS4xLTEwLjkuMy0uNiAxLTEuNiAyLjUtMS42aDM2LjRjLS4yIDIuNy0uMiA1LjQtLjYgOC4xLTEuMSA3LjItMy44IDEzLjgtOC4yIDE5LjYtNy4yIDkuNS0xNi42IDE1LjQtMjguNSAxNy05LjggMS4zLTE4LjktLjYtMjYuOS02LjYtNy40LTUuNi0xMS42LTEzLTEyLjctMjIuMi0xLjMtMTAuOSAxLjktMjAuNyA4LjUtMjkuMyA3LjEtOS4zIDE2LjUtMTUuMiAyOC0xNy4zIDkuNC0xLjcgMTguNC0uNiAyNi41IDQuOSA1LjMgMy41IDkuMSA4LjMgMTEuNiAxNC4xLjYuOS4yIDEuNC0xIDEuN3oiLz48cGF0aCBmaWxsPSIjMDBBREQ4IiBkPSJNMTg2LjIgMTU0LjZjLTkuMS0uMi0xNy40LTIuOC0yNC40LTguOC01LjktNS4xLTkuNi0xMS42LTEwLjgtMTkuMy0xLjgtMTEuMyAxLjMtMjEuMyA4LjEtMzAuMiA3LjMtOS42IDE2LjEtMTQuNiAyOC0xNi43IDEwLjItMS44IDE5LjgtLjggMjguNSA1LjEgNy45IDUuNCAxMi44IDEyLjcgMTQuMSAyMi4zIDEuNyAxMy41LTIuMiAyNC41LTExLjUgMzMuOS02LjYgNi43LTE0LjcgMTAuOS0yNCAxMi44LTIuNy41LTUuNC42LTggLjl6bTIzLjgtNDAuNGMtLjEtMS4zLS4xLTIuMy0uMy0zLjMtMS44LTkuOS0xMC45LTE1LjUtMjAuNC0xMy4zLTkuMyAyLjEtMTUuMyA4LTE3LjUgMTcuNC0xLjggNy44IDIgMTUuNyA5LjIgMTguOSA1LjUgMi40IDExIDIuMSAxNi4zLS42IDcuOS00LjEgMTIuMi0xMC41IDEyLjctMTkuMXoiLz48L3N2Zz4=
[codecov-badge]: https://img.shields.io/codecov/c/github/go-language-server/jsonrpc2/main?logo=codecov&style=for-the-badge
[ga-badge]: https://gh-ga-beacon.appspot.com/UA-89201129-1/go-language-server/jsonrpc2?useReferer&pixel

86
vendor/go.lsp.dev/jsonrpc2/codes.go vendored Normal file
View File

@ -0,0 +1,86 @@
// SPDX-FileCopyrightText: 2021 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package jsonrpc2
// Code is an error code as defined in the JSON-RPC spec.
type Code int32
// list of JSON-RPC error codes.
const (
// ParseError is the invalid JSON was received by the server.
// An error occurred on the server while parsing the JSON text.
ParseError Code = -32700
// InvalidRequest is the JSON sent is not a valid Request object.
InvalidRequest Code = -32600
// MethodNotFound is the method does not exist / is not available.
MethodNotFound Code = -32601
// InvalidParams is the invalid method parameter(s).
InvalidParams Code = -32602
// InternalError is the internal JSON-RPC error.
InternalError Code = -32603
// JSONRPCReservedErrorRangeStart is the start range of JSON RPC reserved error codes.
//
// It doesn't denote a real error code. No LSP error codes should
// be defined between the start and end range. For backwards
// compatibility the "ServerNotInitialized" and the "UnknownErrorCode"
// are left in the range.
//
// @since 3.16.0.
JSONRPCReservedErrorRangeStart Code = -32099
// CodeServerErrorStart reserved for implementation-defined server-errors.
//
// Deprecated: Use JSONRPCReservedErrorRangeStart instead.
CodeServerErrorStart = JSONRPCReservedErrorRangeStart
// ServerNotInitialized is the error of server not initialized.
ServerNotInitialized Code = -32002
// UnknownError should be used for all non coded errors.
UnknownError Code = -32001
// JSONRPCReservedErrorRangeEnd is the start range of JSON RPC reserved error codes.
//
// It doesn't denote a real error code.
//
// @since 3.16.0.
JSONRPCReservedErrorRangeEnd Code = -32000
// CodeServerErrorEnd reserved for implementation-defined server-errors.
//
// Deprecated: Use JSONRPCReservedErrorRangeEnd instead.
CodeServerErrorEnd = JSONRPCReservedErrorRangeEnd
)
// This file contains the Go forms of the wire specification.
//
// See http://www.jsonrpc.org/specification for details.
//
// list of JSON-RPC errors.
var (
// ErrUnknown should be used for all non coded errors.
ErrUnknown = NewError(UnknownError, "JSON-RPC unknown error")
// ErrParse is used when invalid JSON was received by the server.
ErrParse = NewError(ParseError, "JSON-RPC parse error")
// ErrInvalidRequest is used when the JSON sent is not a valid Request object.
ErrInvalidRequest = NewError(InvalidRequest, "JSON-RPC invalid request")
// ErrMethodNotFound should be returned by the handler when the method does
// not exist / is not available.
ErrMethodNotFound = NewError(MethodNotFound, "JSON-RPC method not found")
// ErrInvalidParams should be returned by the handler when method
// parameter(s) were invalid.
ErrInvalidParams = NewError(InvalidParams, "JSON-RPC invalid params")
// ErrInternal is not currently returned but defined for completeness.
ErrInternal = NewError(InternalError, "JSON-RPC internal error")
)

245
vendor/go.lsp.dev/jsonrpc2/conn.go vendored Normal file
View File

@ -0,0 +1,245 @@
// SPDX-FileCopyrightText: 2021 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package jsonrpc2
import (
"bytes"
"context"
"fmt"
"sync"
"sync/atomic"
"github.com/segmentio/encoding/json"
)
// Conn is the common interface to jsonrpc clients and servers.
//
// Conn is bidirectional; it does not have a designated server or client end.
// It manages the jsonrpc2 protocol, connecting responses back to their calls.
type Conn interface {
// Call invokes the target method and waits for a response.
//
// The params will be marshaled to JSON before sending over the wire, and will
// be handed to the method invoked.
//
// The response will be unmarshaled from JSON into the result.
//
// The id returned will be unique from this connection, and can be used for
// logging or tracking.
Call(ctx context.Context, method string, params, result interface{}) (ID, error)
// Notify invokes the target method but does not wait for a response.
//
// The params will be marshaled to JSON before sending over the wire, and will
// be handed to the method invoked.
Notify(ctx context.Context, method string, params interface{}) error
// Go starts a goroutine to handle the connection.
//
// It must be called exactly once for each Conn. It returns immediately.
// Must block on Done() to wait for the connection to shut down.
//
// This is a temporary measure, this should be started automatically in the
// future.
Go(ctx context.Context, handler Handler)
// Close closes the connection and it's underlying stream.
//
// It does not wait for the close to complete, use the Done() channel for
// that.
Close() error
// Done returns a channel that will be closed when the processing goroutine
// has terminated, which will happen if Close() is called or an underlying
// stream is closed.
Done() <-chan struct{}
// Err returns an error if there was one from within the processing goroutine.
//
// If err returns non nil, the connection will be already closed or closing.
Err() error
}
type conn struct {
seq int32 // access atomically
writeMu sync.Mutex // protects writes to the stream
stream Stream // supplied stream
pendingMu sync.Mutex // protects the pending map
pending map[ID]chan *Response // holds the pending response channel with the ID as the key.
done chan struct{} // closed when done
err atomic.Value // holds run error
}
// NewConn creates a new connection object around the supplied stream.
func NewConn(s Stream) Conn {
conn := &conn{
stream: s,
pending: make(map[ID]chan *Response),
done: make(chan struct{}),
}
return conn
}
// Call implements Conn.
func (c *conn) Call(ctx context.Context, method string, params, result interface{}) (id ID, err error) {
// generate a new request identifier
id = NewNumberID(atomic.AddInt32(&c.seq, 1))
call, err := NewCall(id, method, params)
if err != nil {
return id, fmt.Errorf("marshaling call parameters: %w", err)
}
// We have to add ourselves to the pending map before we send, otherwise we
// are racing the response. Also add a buffer to rchan, so that if we get a
// wire response between the time this call is cancelled and id is deleted
// from c.pending, the send to rchan will not block.
rchan := make(chan *Response, 1)
c.pendingMu.Lock()
c.pending[id] = rchan
c.pendingMu.Unlock()
defer func() {
c.pendingMu.Lock()
delete(c.pending, id)
c.pendingMu.Unlock()
}()
// now we are ready to send
_, err = c.write(ctx, call)
if err != nil {
// sending failed, we will never get a response, so don't leave it pending
return id, err
}
// now wait for the response
select {
case resp := <-rchan:
// is it an error response?
if resp.err != nil {
return id, resp.err
}
if result == nil || len(resp.result) == 0 {
return id, nil
}
dec := json.NewDecoder(bytes.NewReader(resp.result))
dec.ZeroCopy()
if err := dec.Decode(result); err != nil {
return id, fmt.Errorf("unmarshaling result: %w", err)
}
return id, nil
case <-ctx.Done():
return id, ctx.Err()
}
}
// Notify implements Conn.
func (c *conn) Notify(ctx context.Context, method string, params interface{}) (err error) {
notify, err := NewNotification(method, params)
if err != nil {
return fmt.Errorf("marshaling notify parameters: %w", err)
}
_, err = c.write(ctx, notify)
return err
}
func (c *conn) replier(req Message) Replier {
return func(ctx context.Context, result interface{}, err error) error {
call, ok := req.(*Call)
if !ok {
// request was a notify, no need to respond
return nil
}
response, err := NewResponse(call.id, result, err)
if err != nil {
return err
}
_, err = c.write(ctx, response)
if err != nil {
// TODO(iancottrell): if a stream write fails, we really need to shut down the whole stream
return err
}
return nil
}
}
func (c *conn) write(ctx context.Context, msg Message) (int64, error) {
c.writeMu.Lock()
n, err := c.stream.Write(ctx, msg)
c.writeMu.Unlock()
if err != nil {
return 0, fmt.Errorf("write to stream: %w", err)
}
return n, nil
}
// Go implements Conn.
func (c *conn) Go(ctx context.Context, handler Handler) {
go c.run(ctx, handler)
}
func (c *conn) run(ctx context.Context, handler Handler) {
defer close(c.done)
for {
// get the next message
msg, _, err := c.stream.Read(ctx)
if err != nil {
// The stream failed, we cannot continue.
c.fail(err)
return
}
switch msg := msg.(type) {
case Request:
if err := handler(ctx, c.replier(msg), msg); err != nil {
c.fail(err)
}
case *Response:
// If method is not set, this should be a response, in which case we must
// have an id to send the response back to the caller.
c.pendingMu.Lock()
rchan, ok := c.pending[msg.id]
c.pendingMu.Unlock()
if ok {
rchan <- msg
}
}
}
}
// Close implements Conn.
func (c *conn) Close() error {
return c.stream.Close()
}
// Done implements Conn.
func (c *conn) Done() <-chan struct{} {
return c.done
}
// Err implements Conn.
func (c *conn) Err() error {
if err := c.err.Load(); err != nil {
return err.(error)
}
return nil
}
// fail sets a failure condition on the stream and closes it.
func (c *conn) fail(err error) {
c.err.Store(err)
c.stream.Close()
}

70
vendor/go.lsp.dev/jsonrpc2/errors.go vendored Normal file
View File

@ -0,0 +1,70 @@
// SPDX-FileCopyrightText: 2019 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package jsonrpc2
import (
"errors"
"fmt"
"github.com/segmentio/encoding/json"
)
// Error represents a JSON-RPC error.
type Error struct {
// Code a number indicating the error type that occurred.
Code Code `json:"code"`
// Message a string providing a short description of the error.
Message string `json:"message"`
// Data a Primitive or Structured value that contains additional
// information about the error. Can be omitted.
Data *json.RawMessage `json:"data,omitempty"`
}
// compile time check whether the Error implements error interface.
var _ error = (*Error)(nil)
// Error implements error.Error.
func (e *Error) Error() string {
if e == nil {
return ""
}
return e.Message
}
// Unwrap implements errors.Unwrap.
//
// Returns the error underlying the receiver, which may be nil.
func (e *Error) Unwrap() error { return errors.New(e.Message) }
// NewError builds a Error struct for the suppied code and message.
func NewError(c Code, message string) *Error {
return &Error{
Code: c,
Message: message,
}
}
// Errorf builds a Error struct for the suppied code, format and args.
func Errorf(c Code, format string, args ...interface{}) *Error {
return &Error{
Code: c,
Message: fmt.Sprintf(format, args...),
}
}
// constErr represents a error constant.
type constErr string
// compile time check whether the constErr implements error interface.
var _ error = (*constErr)(nil)
// Error implements error.Error.
func (e constErr) Error() string { return string(e) }
const (
// ErrIdleTimeout is returned when serving timed out waiting for new connections.
ErrIdleTimeout = constErr("timed out waiting for new connections")
)

120
vendor/go.lsp.dev/jsonrpc2/handler.go vendored Normal file
View File

@ -0,0 +1,120 @@
// SPDX-FileCopyrightText: 2019 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package jsonrpc2
import (
"context"
"fmt"
"sync"
)
// Handler is invoked to handle incoming requests.
//
// The Replier sends a reply to the request and must be called exactly once.
type Handler func(ctx context.Context, reply Replier, req Request) error
// Replier is passed to handlers to allow them to reply to the request.
//
// If err is set then result will be ignored.
type Replier func(ctx context.Context, result interface{}, err error) error
// MethodNotFoundHandler is a Handler that replies to all call requests with the
// standard method not found response.
//
// This should normally be the final handler in a chain.
func MethodNotFoundHandler(ctx context.Context, reply Replier, req Request) error {
return reply(ctx, nil, fmt.Errorf("%q: %w", req.Method(), ErrMethodNotFound))
}
// ReplyHandler creates a Handler that panics if the wrapped handler does
// not call Reply for every request that it is passed.
func ReplyHandler(handler Handler) (h Handler) {
h = Handler(func(ctx context.Context, reply Replier, req Request) error {
called := false
err := handler(ctx, func(ctx context.Context, result interface{}, err error) error {
if called {
panic(fmt.Errorf("request %q replied to more than once", req.Method()))
}
called = true
return reply(ctx, result, err)
}, req)
if !called {
panic(fmt.Errorf("request %q was never replied to", req.Method()))
}
return err
})
return h
}
// CancelHandler returns a handler that supports cancellation, and a function
// that can be used to trigger canceling in progress requests.
func CancelHandler(handler Handler) (h Handler, canceller func(id ID)) {
var mu sync.Mutex
handling := make(map[ID]context.CancelFunc)
h = Handler(func(ctx context.Context, reply Replier, req Request) error {
if call, ok := req.(*Call); ok {
cancelCtx, cancel := context.WithCancel(ctx)
ctx = cancelCtx
mu.Lock()
handling[call.ID()] = cancel
mu.Unlock()
innerReply := reply
reply = func(ctx context.Context, result interface{}, err error) error {
mu.Lock()
delete(handling, call.ID())
mu.Unlock()
return innerReply(ctx, result, err)
}
}
return handler(ctx, reply, req)
})
canceller = func(id ID) {
mu.Lock()
cancel, found := handling[id]
mu.Unlock()
if found {
cancel()
}
}
return h, canceller
}
// AsyncHandler returns a handler that processes each request goes in its own
// goroutine.
//
// The handler returns immediately, without the request being processed.
// Each request then waits for the previous request to finish before it starts.
//
// This allows the stream to unblock at the cost of unbounded goroutines
// all stalled on the previous one.
func AsyncHandler(handler Handler) (h Handler) {
nextRequest := make(chan struct{})
close(nextRequest)
h = Handler(func(ctx context.Context, reply Replier, req Request) error {
waitForPrevious := nextRequest
nextRequest = make(chan struct{})
unlockNext := nextRequest
innerReply := reply
reply = func(ctx context.Context, result interface{}, err error) error {
close(unlockNext)
return innerReply(ctx, result, err)
}
go func() {
<-waitForPrevious
_ = handler(ctx, reply, req)
}()
return nil
})
return h
}

View File

@ -0,0 +1,7 @@
// SPDX-FileCopyrightText: 2019 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
// Package jsonrpc2 is an implementation of the JSON-RPC 2 specification for Go.
//
// https://www.jsonrpc.org/specification
package jsonrpc2 // import "go.lsp.dev/jsonrpc2"

358
vendor/go.lsp.dev/jsonrpc2/message.go vendored Normal file
View File

@ -0,0 +1,358 @@
// SPDX-FileCopyrightText: 2021 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package jsonrpc2
import (
"bytes"
"errors"
"fmt"
"github.com/segmentio/encoding/json"
)
// Message is the interface to all JSON-RPC message types.
//
// They share no common functionality, but are a closed set of concrete types
// that are allowed to implement this interface.
//
// The message types are *Call, *Response and *Notification.
type Message interface {
// jsonrpc2Message is used to make the set of message implementations a
// closed set.
jsonrpc2Message()
}
// Request is the shared interface to jsonrpc2 messages that request
// a method be invoked.
//
// The request types are a closed set of *Call and *Notification.
type Request interface {
Message
// Method is a string containing the method name to invoke.
Method() string
// Params is either a struct or an array with the parameters of the method.
Params() json.RawMessage
// jsonrpc2Request is used to make the set of request implementations closed.
jsonrpc2Request()
}
// Call is a request that expects a response.
//
// The response will have a matching ID.
type Call struct {
// Method is a string containing the method name to invoke.
method string
// Params is either a struct or an array with the parameters of the method.
params json.RawMessage
// id of this request, used to tie the Response back to the request.
id ID
}
// make sure a Call implements the Request, json.Marshaler and json.Unmarshaler and interfaces.
var (
_ Request = (*Call)(nil)
_ json.Marshaler = (*Call)(nil)
_ json.Unmarshaler = (*Call)(nil)
)
// NewCall constructs a new Call message for the supplied ID, method and
// parameters.
func NewCall(id ID, method string, params interface{}) (*Call, error) {
p, merr := marshalInterface(params)
req := &Call{
id: id,
method: method,
params: p,
}
return req, merr
}
// ID returns the current call id.
func (c *Call) ID() ID { return c.id }
// Method implements Request.
func (c *Call) Method() string { return c.method }
// Params implements Request.
func (c *Call) Params() json.RawMessage { return c.params }
// jsonrpc2Message implements Request.
func (Call) jsonrpc2Message() {}
// jsonrpc2Request implements Request.
func (Call) jsonrpc2Request() {}
// MarshalJSON implements json.Marshaler.
func (c Call) MarshalJSON() ([]byte, error) {
req := wireRequest{
Method: c.method,
Params: &c.params,
ID: &c.id,
}
data, err := json.Marshal(req)
if err != nil {
return data, fmt.Errorf("marshaling call: %w", err)
}
return data, nil
}
// UnmarshalJSON implements json.Unmarshaler.
func (c *Call) UnmarshalJSON(data []byte) error {
var req wireRequest
dec := json.NewDecoder(bytes.NewReader(data))
dec.ZeroCopy()
if err := dec.Decode(&req); err != nil {
return fmt.Errorf("unmarshaling call: %w", err)
}
c.method = req.Method
if req.Params != nil {
c.params = *req.Params
}
if req.ID != nil {
c.id = *req.ID
}
return nil
}
// Response is a reply to a Request.
//
// It will have the same ID as the call it is a response to.
type Response struct {
// result is the content of the response.
result json.RawMessage
// err is set only if the call failed.
err error
// ID of the request this is a response to.
id ID
}
// make sure a Response implements the Message, json.Marshaler and json.Unmarshaler and interfaces.
var (
_ Message = (*Response)(nil)
_ json.Marshaler = (*Response)(nil)
_ json.Unmarshaler = (*Response)(nil)
)
// NewResponse constructs a new Response message that is a reply to the
// supplied. If err is set result may be ignored.
func NewResponse(id ID, result interface{}, err error) (*Response, error) {
r, merr := marshalInterface(result)
resp := &Response{
id: id,
result: r,
err: err,
}
return resp, merr
}
// ID returns the current response id.
func (r *Response) ID() ID { return r.id }
// Result returns the Response result.
func (r *Response) Result() json.RawMessage { return r.result }
// Err returns the Response error.
func (r *Response) Err() error { return r.err }
// jsonrpc2Message implements Message.
func (r *Response) jsonrpc2Message() {}
// MarshalJSON implements json.Marshaler.
func (r Response) MarshalJSON() ([]byte, error) {
resp := &wireResponse{
Error: toError(r.err),
ID: &r.id,
}
if resp.Error == nil {
resp.Result = &r.result
}
data, err := json.Marshal(resp)
if err != nil {
return data, fmt.Errorf("marshaling notification: %w", err)
}
return data, nil
}
// UnmarshalJSON implements json.Unmarshaler.
func (r *Response) UnmarshalJSON(data []byte) error {
var resp wireResponse
dec := json.NewDecoder(bytes.NewReader(data))
dec.ZeroCopy()
if err := dec.Decode(&resp); err != nil {
return fmt.Errorf("unmarshaling jsonrpc response: %w", err)
}
if resp.Result != nil {
r.result = *resp.Result
}
if resp.Error != nil {
r.err = resp.Error
}
if resp.ID != nil {
r.id = *resp.ID
}
return nil
}
func toError(err error) *Error {
if err == nil {
// no error, the response is complete
return nil
}
var wrapped *Error
if errors.As(err, &wrapped) {
// already a wire error, just use it
return wrapped
}
result := &Error{Message: err.Error()}
if errors.As(err, &wrapped) {
// if we wrapped a wire error, keep the code from the wrapped error
// but the message from the outer error
result.Code = wrapped.Code
}
return result
}
// Notification is a request for which a response cannot occur, and as such
// it has not ID.
type Notification struct {
// Method is a string containing the method name to invoke.
method string
params json.RawMessage
}
// make sure a Notification implements the Request, json.Marshaler and json.Unmarshaler and interfaces.
var (
_ Request = (*Notification)(nil)
_ json.Marshaler = (*Notification)(nil)
_ json.Unmarshaler = (*Notification)(nil)
)
// NewNotification constructs a new Notification message for the supplied
// method and parameters.
func NewNotification(method string, params interface{}) (*Notification, error) {
p, merr := marshalInterface(params)
notify := &Notification{
method: method,
params: p,
}
return notify, merr
}
// Method implements Request.
func (n *Notification) Method() string { return n.method }
// Params implements Request.
func (n *Notification) Params() json.RawMessage { return n.params }
// jsonrpc2Message implements Request.
func (Notification) jsonrpc2Message() {}
// jsonrpc2Request implements Request.
func (Notification) jsonrpc2Request() {}
// MarshalJSON implements json.Marshaler.
func (n Notification) MarshalJSON() ([]byte, error) {
req := wireRequest{
Method: n.method,
Params: &n.params,
}
data, err := json.Marshal(req)
if err != nil {
return data, fmt.Errorf("marshaling notification: %w", err)
}
return data, nil
}
// UnmarshalJSON implements json.Unmarshaler.
func (n *Notification) UnmarshalJSON(data []byte) error {
var req wireRequest
dec := json.NewDecoder(bytes.NewReader(data))
dec.ZeroCopy()
if err := dec.Decode(&req); err != nil {
return fmt.Errorf("unmarshaling notification: %w", err)
}
n.method = req.Method
if req.Params != nil {
n.params = *req.Params
}
return nil
}
// DecodeMessage decodes data to Message.
func DecodeMessage(data []byte) (Message, error) {
var msg combined
dec := json.NewDecoder(bytes.NewReader(data))
dec.ZeroCopy()
if err := dec.Decode(&msg); err != nil {
return nil, fmt.Errorf("unmarshaling jsonrpc message: %w", err)
}
if msg.Method == "" {
// no method, should be a response
if msg.ID == nil {
return nil, ErrInvalidRequest
}
resp := &Response{
id: *msg.ID,
}
if msg.Error != nil {
resp.err = msg.Error
}
if msg.Result != nil {
resp.result = *msg.Result
}
return resp, nil
}
// has a method, must be a request
if msg.ID == nil {
// request with no ID is a notify
notify := &Notification{
method: msg.Method,
}
if msg.Params != nil {
notify.params = *msg.Params
}
return notify, nil
}
// request with an ID, must be a call
call := &Call{
method: msg.Method,
id: *msg.ID,
}
if msg.Params != nil {
call.params = *msg.Params
}
return call, nil
}
// marshalInterface marshal obj to json.RawMessage.
func marshalInterface(obj interface{}) (json.RawMessage, error) {
data, err := json.Marshal(obj)
if err != nil {
return json.RawMessage{}, fmt.Errorf("failed to marshal json: %w", err)
}
return json.RawMessage(data), nil
}

129
vendor/go.lsp.dev/jsonrpc2/serve.go vendored Normal file
View File

@ -0,0 +1,129 @@
// SPDX-FileCopyrightText: 2021 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package jsonrpc2
import (
"context"
"fmt"
"net"
"os"
"time"
)
// NOTE: This file provides an experimental API for serving multiple remote
// jsonrpc2 clients over the network. For now, it is intentionally similar to
// net/http, but that may change in the future as we figure out the correct
// semantics.
// StreamServer is used to serve incoming jsonrpc2 clients communicating over
// a newly created connection.
type StreamServer interface {
ServeStream(context.Context, Conn) error
}
// ServerFunc is an adapter that implements the StreamServer interface
// using an ordinary function.
type ServerFunc func(context.Context, Conn) error
// ServeStream implements StreamServer.
//
// ServeStream calls f(ctx, s).
func (f ServerFunc) ServeStream(ctx context.Context, c Conn) error {
return f(ctx, c)
}
// HandlerServer returns a StreamServer that handles incoming streams using the
// provided handler.
func HandlerServer(h Handler) StreamServer {
return ServerFunc(func(ctx context.Context, conn Conn) error {
conn.Go(ctx, h)
<-conn.Done()
return conn.Err()
})
}
// ListenAndServe starts an jsonrpc2 server on the given address.
//
// If idleTimeout is non-zero, ListenAndServe exits after there are no clients for
// this duration, otherwise it exits only on error.
func ListenAndServe(ctx context.Context, network, addr string, server StreamServer, idleTimeout time.Duration) error {
ln, err := net.Listen(network, addr)
if err != nil {
return fmt.Errorf("failed to listen %s:%s: %w", network, addr, err)
}
defer ln.Close()
if network == "unix" {
defer os.Remove(addr)
}
return Serve(ctx, ln, server, idleTimeout)
}
// Serve accepts incoming connections from the network, and handles them using
// the provided server. If idleTimeout is non-zero, ListenAndServe exits after
// there are no clients for this duration, otherwise it exits only on error.
func Serve(ctx context.Context, ln net.Listener, server StreamServer, idleTimeout time.Duration) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Max duration: ~290 years; surely that's long enough.
const forever = 1<<63 - 1
if idleTimeout <= 0 {
idleTimeout = forever
}
connTimer := time.NewTimer(idleTimeout)
newConns := make(chan net.Conn)
doneListening := make(chan error)
closedConns := make(chan error)
go func() {
for {
nc, err := ln.Accept()
if err != nil {
select {
case doneListening <- fmt.Errorf("accept: %w", err):
case <-ctx.Done():
}
return
}
newConns <- nc
}
}()
activeConns := 0
for {
select {
case netConn := <-newConns:
activeConns++
connTimer.Stop()
stream := NewStream(netConn)
go func() {
conn := NewConn(stream)
closedConns <- server.ServeStream(ctx, conn)
stream.Close()
}()
case err := <-doneListening:
return err
case <-closedConns:
// if !isClosingError(err) {
// }
activeConns--
if activeConns == 0 {
connTimer.Reset(idleTimeout)
}
case <-connTimer.C:
return ErrIdleTimeout
case <-ctx.Done():
return ctx.Err()
}
}
}

226
vendor/go.lsp.dev/jsonrpc2/stream.go vendored Normal file
View File

@ -0,0 +1,226 @@
// SPDX-FileCopyrightText: 2018 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package jsonrpc2
import (
"bufio"
"context"
stdjson "encoding/json"
"fmt"
"io"
"strconv"
"strings"
"github.com/segmentio/encoding/json"
)
const (
// HdrContentLength is the HTTP header name of the length of the content part in bytes. This header is required.
// This entity header indicates the size of the entity-body, in bytes, sent to the recipient.
//
// RFC 7230, section 3.3.2: Content-Length:
// https://tools.ietf.org/html/rfc7230#section-3.3.2
HdrContentLength = "Content-Length"
// HeaderContentType is the mime type of the content part. Defaults to "application/vscode-jsonrpc; charset=utf-8".
// This entity header is used to indicate the media type of the resource.
//
// RFC 7231, section 3.1.1.5: Content-Type:
// https://tools.ietf.org/html/rfc7231#section-3.1.1.5
HdrContentType = "Content-Type"
// HeaderContentSeparator is the header and content part separator.
HdrContentSeparator = "\r\n\r\n"
)
// Framer wraps a network connection up into a Stream.
//
// It is responsible for the framing and encoding of messages into wire form.
// NewRawStream and NewStream are implementations of a Framer.
type Framer func(conn io.ReadWriteCloser) Stream
// Stream abstracts the transport mechanics from the JSON RPC protocol.
//
// A Conn reads and writes messages using the stream it was provided on
// construction, and assumes that each call to Read or Write fully transfers
// a single message, or returns an error.
//
// A stream is not safe for concurrent use, it is expected it will be used by
// a single Conn in a safe manner.
type Stream interface {
// Read gets the next message from the stream.
Read(context.Context) (Message, int64, error)
// Write sends a message to the stream.
Write(context.Context, Message) (int64, error)
// Close closes the connection.
// Any blocked Read or Write operations will be unblocked and return errors.
Close() error
}
type rawStream struct {
conn io.ReadWriteCloser
in *stdjson.Decoder
}
// NewRawStream returns a Stream built on top of a io.ReadWriteCloser.
//
// The messages are sent with no wrapping, and rely on json decode consistency
// to determine message boundaries.
func NewRawStream(conn io.ReadWriteCloser) Stream {
return &rawStream{
conn: conn,
in: stdjson.NewDecoder(conn), // TODO(zchee): why test fail using segmentio json.Decoder?
}
}
// Read implements Stream.Read.
func (s *rawStream) Read(ctx context.Context) (Message, int64, error) {
select {
case <-ctx.Done():
return nil, 0, ctx.Err()
default:
}
var raw stdjson.RawMessage
if err := s.in.Decode(&raw); err != nil {
return nil, 0, fmt.Errorf("decoding raw message: %w", err)
}
msg, err := DecodeMessage(raw)
return msg, int64(len(raw)), err
}
// Write implements Stream.Write.
func (s *rawStream) Write(ctx context.Context, msg Message) (int64, error) {
select {
case <-ctx.Done():
return 0, ctx.Err()
default:
}
data, err := json.Marshal(msg)
if err != nil {
return 0, fmt.Errorf("marshaling message: %w", err)
}
n, err := s.conn.Write(data)
if err != nil {
return 0, fmt.Errorf("write to stream: %w", err)
}
return int64(n), nil
}
// Close implements Stream.Close.
func (s *rawStream) Close() error {
return s.conn.Close()
}
type stream struct {
conn io.ReadWriteCloser
in *bufio.Reader
}
// NewStream returns a Stream built on top of a io.ReadWriteCloser.
//
// The messages are sent with HTTP content length and MIME type headers.
// This is the format used by LSP and others.
func NewStream(conn io.ReadWriteCloser) Stream {
return &stream{
conn: conn,
in: bufio.NewReader(conn),
}
}
// Read implements Stream.Read.
func (s *stream) Read(ctx context.Context) (Message, int64, error) {
select {
case <-ctx.Done():
return nil, 0, ctx.Err()
default:
}
var total int64
var length int64
// read the header, stop on the first empty line
for {
line, err := s.in.ReadString('\n')
total += int64(len(line))
if err != nil {
return nil, total, fmt.Errorf("failed reading header line: %w", err)
}
line = strings.TrimSpace(line)
// check we have a header line
if line == "" {
break
}
colon := strings.IndexRune(line, ':')
if colon < 0 {
return nil, total, fmt.Errorf("invalid header line %q", line)
}
name, value := line[:colon], strings.TrimSpace(line[colon+1:])
switch name {
case HdrContentLength:
if length, err = strconv.ParseInt(value, 10, 32); err != nil {
return nil, total, fmt.Errorf("failed parsing %s: %v: %w", HdrContentLength, value, err)
}
if length <= 0 {
return nil, total, fmt.Errorf("invalid %s: %v", HdrContentLength, length)
}
default:
// ignoring unknown headers
}
}
if length == 0 {
return nil, total, fmt.Errorf("missing %s header", HdrContentLength)
}
data := make([]byte, length)
if _, err := io.ReadFull(s.in, data); err != nil {
return nil, total, fmt.Errorf("read full of data: %w", err)
}
total += length
msg, err := DecodeMessage(data)
return msg, total, err
}
// Write implements Stream.Write.
func (s *stream) Write(ctx context.Context, msg Message) (int64, error) {
select {
case <-ctx.Done():
return 0, ctx.Err()
default:
}
data, err := json.Marshal(msg)
if err != nil {
return 0, fmt.Errorf("marshaling message: %w", err)
}
n, err := fmt.Fprintf(s.conn, "%s: %v%s", HdrContentLength, len(data), HdrContentSeparator)
total := int64(n)
if err != nil {
return 0, fmt.Errorf("write data to conn: %w", err)
}
n, err = s.conn.Write(data)
total += int64(n)
if err != nil {
return 0, fmt.Errorf("write data to conn: %w", err)
}
return total, nil
}
// Close implements Stream.Close.
func (s *stream) Close() error {
return s.conn.Close()
}

140
vendor/go.lsp.dev/jsonrpc2/wire.go vendored Normal file
View File

@ -0,0 +1,140 @@
// SPDX-FileCopyrightText: 2021 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package jsonrpc2
import (
"fmt"
"github.com/segmentio/encoding/json"
)
// Version represents a JSON-RPC version.
const Version = "2.0"
// version is a special 0 sized struct that encodes as the jsonrpc version tag.
//
// It will fail during decode if it is not the correct version tag in the stream.
type version struct{}
// compile time check whether the version implements a json.Marshaler and json.Unmarshaler interfaces.
var (
_ json.Marshaler = (*version)(nil)
_ json.Unmarshaler = (*version)(nil)
)
// MarshalJSON implements json.Marshaler.
func (version) MarshalJSON() ([]byte, error) {
return json.Marshal(Version)
}
// UnmarshalJSON implements json.Unmarshaler.
func (version) UnmarshalJSON(data []byte) error {
version := ""
if err := json.Unmarshal(data, &version); err != nil {
return fmt.Errorf("failed to Unmarshal: %w", err)
}
if version != Version {
return fmt.Errorf("invalid RPC version %v", version)
}
return nil
}
// ID is a Request identifier.
//
// Only one of either the Name or Number members will be set, using the
// number form if the Name is the empty string.
type ID struct {
name string
number int32
}
// compile time check whether the ID implements a fmt.Formatter, json.Marshaler and json.Unmarshaler interfaces.
var (
_ fmt.Formatter = (*ID)(nil)
_ json.Marshaler = (*ID)(nil)
_ json.Unmarshaler = (*ID)(nil)
)
// NewNumberID returns a new number request ID.
func NewNumberID(v int32) ID { return ID{number: v} }
// NewStringID returns a new string request ID.
func NewStringID(v string) ID { return ID{name: v} }
// Format writes the ID to the formatter.
//
// If the rune is q the representation is non ambiguous,
// string forms are quoted, number forms are preceded by a #.
func (id ID) Format(f fmt.State, r rune) {
numF, strF := `%d`, `%s`
if r == 'q' {
numF, strF = `#%d`, `%q`
}
switch {
case id.name != "":
fmt.Fprintf(f, strF, id.name)
default:
fmt.Fprintf(f, numF, id.number)
}
}
// MarshalJSON implements json.Marshaler.
func (id *ID) MarshalJSON() ([]byte, error) {
if id.name != "" {
return json.Marshal(id.name)
}
return json.Marshal(id.number)
}
// UnmarshalJSON implements json.Unmarshaler.
func (id *ID) UnmarshalJSON(data []byte) error {
*id = ID{}
if err := json.Unmarshal(data, &id.number); err == nil {
return nil
}
return json.Unmarshal(data, &id.name)
}
// wireRequest is sent to a server to represent a Call or Notify operaton.
type wireRequest struct {
// VersionTag is always encoded as the string "2.0"
VersionTag version `json:"jsonrpc"`
// Method is a string containing the method name to invoke.
Method string `json:"method"`
// Params is either a struct or an array with the parameters of the method.
Params *json.RawMessage `json:"params,omitempty"`
// The id of this request, used to tie the Response back to the request.
// Will be either a string or a number. If not set, the Request is a notify,
// and no response is possible.
ID *ID `json:"id,omitempty"`
}
// wireResponse is a reply to a Request.
//
// It will always have the ID field set to tie it back to a request, and will
// have either the Result or Error fields set depending on whether it is a
// success or failure wireResponse.
type wireResponse struct {
// VersionTag is always encoded as the string "2.0"
VersionTag version `json:"jsonrpc"`
// Result is the response value, and is required on success.
Result *json.RawMessage `json:"result,omitempty"`
// Error is a structured error response if the call fails.
Error *Error `json:"error,omitempty"`
// ID must be set and is the identifier of the Request this is a response to.
ID *ID `json:"id,omitempty"`
}
// combined has all the fields of both Request and Response.
//
// We can decode this and then work out which it is.
type combined struct {
VersionTag version `json:"jsonrpc"`
ID *ID `json:"id,omitempty"`
Method string `json:"method"`
Params *json.RawMessage `json:"params,omitempty"`
Result *json.RawMessage `json:"result,omitempty"`
Error *Error `json:"error,omitempty"`
}

29
vendor/go.lsp.dev/pkg/LICENSE vendored Normal file
View File

@ -0,0 +1,29 @@
BSD 3-Clause License
Copyright (c) 2020, The Go Language Server Authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -0,0 +1,22 @@
// Copyright 2020 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
// Package xcontext is a package to offer the extra functionality we need
// from contexts that is not available from the standard context package.
package xcontext
import (
"context"
"time"
)
// Detach returns a context that keeps all the values of its parent context
// but detaches from the cancellation and error handling.
func Detach(ctx context.Context) context.Context { return detachedContext{ctx} }
type detachedContext struct{ parent context.Context }
func (v detachedContext) Deadline() (time.Time, bool) { return time.Time{}, false }
func (v detachedContext) Done() <-chan struct{} { return nil }
func (v detachedContext) Err() error { return nil }
func (v detachedContext) Value(key interface{}) interface{} { return v.parent.Value(key) }

45
vendor/go.lsp.dev/protocol/.codecov.yml vendored Normal file
View File

@ -0,0 +1,45 @@
codecov:
allow_coverage_offsets: true
parsers:
go:
partials_as_hits: true
coverage:
precision: 1
round: down
range: "70...100"
status:
default_rules:
flag_coverage_not_uploaded_behavior: include
project:
default:
target: auto
threshold: 1%
if_not_found: success
if_ci_failed: error
patch:
default:
only_pulls: true
target: 50%
threshold: 10%
changes:
default:
target: auto
threshold: 10%
if_not_found: success
if_ci_failed: error
branches:
- main
comment:
behavior: default
require_changes: true
show_carryforward_flags: true
github_checks:
annotations: true

View File

@ -0,0 +1,11 @@
# go.lsp.dev/protocol project gitattributes file
# https://github.com/github/linguist#using-gitattributes
# https://github.com/github/linguist/blob/master/lib/linguist/languages.yml
# To prevent CRLF breakages on Windows for fragile files, like testdata.
* -text
docs/ linguist-documentation
*.pb.go linguist-generated
*_gen.go linguist-generated
*_string.go linguist-generated

52
vendor/go.lsp.dev/protocol/.gitignore vendored Normal file
View File

@ -0,0 +1,52 @@
# go.lsp.dev/protocol project generated files to ignore
# if you want to ignore files created by your editor/tools,
# please consider a global .gitignore https://help.github.com/articles/ignoring-files
# please do not open a pull request to add something created by your editor or tools
# github/gitignore/Go.gitignore
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
vendor/
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
# cgo generated
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
# test generated
_testmain.go
# profile
*.pprof
# coverage
coverage.*
# tools
tools/bin/

242
vendor/go.lsp.dev/protocol/.golangci.yml vendored Normal file
View File

@ -0,0 +1,242 @@
# https://golangci-lint.run/usage/configuration/
# https://github.com/golangci/golangci-lint/blob/master/pkg/config/linters_settings.go
---
run:
timeout: 1m
issues-exit-code: 1
tests: true
skip-dirs: []
skip-dirs-use-default: true
skip-files: []
allow-parallel-runners: true
output:
format: colored-line-number
print-issued-lines: true
print-linter-name: true
uniq-by-line: true
sort-results: true
linters-settings:
depguard:
list-type: blacklist
include-go-root: false
dupl:
threshold: 150
errcheck:
check-type-assertions: true
check-blank: true
# exclude: .errcheckignore
errorlint:
errorf: true
asserts: true
comparison: true
funlen:
lines: 100
statements: 60
gocognit:
min-complexity: 30
goconst:
min-len: 3
min-occurrences: 3
gocritic:
enabled-tags:
- diagnostic
- experimental
- opinionated
- performance
- style
disabled-checks:
- commentedOutCode
- redundantSprint
- whyNoLint
settings:
hugeParam:
sizeThreshold: 80
rangeExprCopy:
sizeThreshold: 512
rangeValCopy:
sizeThreshold: 128
gocyclo:
min-complexity: 30
godot:
scope: declarations
capital: false
gofmt:
simplify: true
gofumpt:
extra-rules: true
goheader:
values:
const:
AUTHOR: Go Language Server
regexp:
YEAR: '20\d\d'
template: |-
SPDX-FileCopyrightText: {{ YEAR }} The {{ AUTHOR }} Authors
SPDX-License-Identifier: BSD-3-Clause
goimports:
local-prefixes: go.lsp.dev/protocol
gosimple:
go: 1.16
govet:
enable-all: true
check-shadowing: true
disable:
- fieldalignment
importas:
alias: []
no-unaliased: true
lll:
line-length: 120
tab-width: 1
misspell:
locale: US
ignore-words:
- cancelled
- cancelling
nakedret:
max-func-lines: 30
nestif:
min-complexity: 4
prealloc:
simple: true
range-loops: true
for-loops: true
staticcheck:
go: 1.16
testpackage:
skip-regexp: '.*(export)_test\.go'
unparam:
check-exported: true
algo: cha
unused:
go: 1.16
whitespace:
multi-if: true
multi-func: true
linters:
fast: false
disabled:
- exhaustivestruct # Checks if all struct's fields are initialized
- forbidigo # Forbids identifiers
- forcetypeassert # finds forced type assertions
- gci # Gci control golang package import order and make it always deterministic.
- gochecknoglobals # check that no global variables exist
- gochecknoinits # Checks that no init functions are present in Go code
- goconst # Finds repeated strings that could be replaced by a constant
- godox # Tool for detection of FIXME, TODO and other comment keywords
- goerr113 # Golang linter to check the errors handling expressions
- golint # Golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes
- gomnd # An analyzer to detect magic numbers.
- gomoddirectives # Manage the use of 'replace', 'retract', and 'excludes' directives in go.mod.
- gomodguard # Allow and block list linter for direct Go module dependencies.
- interfacer # Linter that suggests narrower interface types
- lll # Reports long lines
- maligned # Tool to detect Go structs that would take less memory if their fields were sorted
- promlinter # Check Prometheus metrics naming via promlint
- scopelint # Scopelint checks for unpinned variables in go programs
- sqlclosecheck # Checks that sql.Rows and sql.Stmt are closed.
- testpackage # TODO(zchee): enable: # linter that makes you use a separate _test package
- tparallel # tparallel detects inappropriate usage of t.Parallel() method in your Go test codes
- wrapcheck # TODO(zchee): enable: # Checks that errors returned from external packages are wrapped
- wsl # Whitespace Linter
enable:
- asciicheck # Simple linter to check that your code does not contain non-ASCII identifiers
- bodyclose # checks whether HTTP response body is closed successfully
- cyclop # checks function and package cyclomatic complexity
- deadcode # Finds unused code
- depguard # Go linter that checks if package imports are in a list of acceptable packages
- dogsled # Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f())
- dupl # Tool for code clone detection
- durationcheck # check for two durations multiplied together
- errcheck # Errcheck is a program for checking for unchecked errors in go programs. These unchecked errors can be critical bugs in some cases
- errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13.
- exhaustive # check exhaustiveness of enum switch statements
- exportloopref # checks for pointers to enclosing loop variables
- funlen # Tool for detection of long functions
- gocognit # Computes and checks the cognitive complexity of functions
- gocritic # Provides many diagnostics that check for bugs, performance and style issues.
- gocyclo # Computes and checks the cyclomatic complexity of functions
- godot # Check if comments end in a period
- gofmt # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification
- gofumpt # Gofumpt checks whether code was gofumpt-ed.
- goheader # Checks is file header matches to pattern
- goimports # Goimports does everything that gofmt does. Additionally it checks unused imports
- goprintffuncname # Checks that printf-like functions are named with `f` at the end
- gosec # Inspects source code for security problems
- gosimple # Linter for Go source code that specializes in simplifying a code
- govet # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string
- ifshort # Checks that your code uses short syntax for if-statements whenever possible
- importas # Enforces consistent import aliases
- ineffassign # Detects when assignments to existing variables are not used
- makezero # Finds slice declarations with non-zero initial length
- misspell # Finds commonly misspelled English words in comments
- nakedret # Finds naked returns in functions greater than a specified function length
- nestif # Reports deeply nested if statements
- nilerr # Finds the code that returns nil even if it checks that the error is not nil.
- nlreturn # nlreturn checks for a new line before return and branch statements to increase code clarity
- noctx # noctx finds sending http request without context.Context
- nolintlint # Reports ill-formed or insufficient nolint directives
- paralleltest # paralleltest detects missing usage of t.Parallel() method in your Go test
- prealloc # Finds slice declarations that could potentially be preallocated
- predeclared # find code that shadows one of Go's predeclared identifiers
- revive # Fast, configurable, extensible, flexible, and beautiful linter for Go. Drop-in replacement of golint.
- rowserrcheck # checks whether Err of rows is checked successfully
- staticcheck # Staticcheck is a go vet on steroids, applying a ton of static analysis checks
- structcheck # Finds unused struct fields
- stylecheck # Stylecheck is a replacement for golint
- tagliatelle # Checks the struct tags.
- thelper # thelper detects golang test helpers without t.Helper() call and checks the consistency of test helpers
- typecheck # Like the front-end of a Go compiler, parses and type-checks Go code
- unconvert # Remove unnecessary type conversions
- unparam # Reports unused function parameters
- unused # Checks Go code for unused constants, variables, functions and types
- varcheck # Finds unused global variables and constants
- wastedassign # wastedassign finds wasted assignment statements.
- whitespace # Tool for detection of leading and trailing whitespace
issues:
max-issues-per-linter: 0
max-same-issues: 0
exclude-use-default: true
exclude-rules:
- path: _test\.go
linters:
- cyclop
- dupl
- errcheck
- funlen
- gocognit
- goconst
- gocritic
- gocyclo
- gosec
- thelper
- wrapcheck
- path: "(.*)?_example_test.go"
linters:
- gocritic
# Exclude shadow checking on the variable named err
- text: "shadow: declaration of \"(err|ok)\""
linters:
- govet
# false positive
- path: language.go
text: "deprecatedComment: the proper format is `Deprecated: <text>`"
# async
- path: handler.go
text: "Error return value of `conn.Notify` is not checked"
linters:
- errcheck
- path: log.go
text: "Error return value of `s.log.Write` is not checked"
linters:
- errcheck
- path: deprecated.go
linters:
- lll
- path: "(client|server)_json.go"
linters:
- nlreturn

29
vendor/go.lsp.dev/protocol/LICENSE vendored Normal file
View File

@ -0,0 +1,29 @@
BSD 3-Clause License
Copyright (c) 2019, The Go Language Server Authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

126
vendor/go.lsp.dev/protocol/Makefile vendored Normal file
View File

@ -0,0 +1,126 @@
# -----------------------------------------------------------------------------
# global
.DEFAULT_GOAL := test
comma := ,
empty :=
space := $(empty) $(empty)
# -----------------------------------------------------------------------------
# go
GO_PATH ?= $(shell go env GOPATH)
PKG := $(subst $(GO_PATH)/src/,,$(CURDIR))
CGO_ENABLED ?= 0
GO_BUILDTAGS=osusergo,netgo,static
GO_LDFLAGS=-s -w "-extldflags=-static"
GO_FLAGS ?= -tags='$(subst $(space),$(comma),${GO_BUILDTAGS})' -ldflags='${GO_LDFLAGS}' -installsuffix=netgo
TOOLS_DIR := ${CURDIR}/tools
TOOLS_BIN := ${TOOLS_DIR}/bin
TOOLS := $(shell cd ${TOOLS_DIR} && go list -v -x -f '{{ join .Imports " " }}' -tags=tools)
GO_PKGS := ./...
GO_TEST ?= ${TOOLS_BIN}/gotestsum --
GO_TEST_PKGS ?= $(shell go list -f='{{if or .TestGoFiles .XTestGoFiles}}{{.ImportPath}}{{end}}' ./...)
GO_TEST_FLAGS ?= -race -count=1
GO_TEST_FUNC ?= .
GO_BENCH_FLAGS ?= -benchmem
GO_BENCH_FUNC ?= .
GO_LINT_FLAGS ?=
# Set build environment
JOBS := $(shell getconf _NPROCESSORS_CONF)
# -----------------------------------------------------------------------------
# defines
define target
@printf "+ $(patsubst ,$@,$(1))\\n" >&2
endef
# -----------------------------------------------------------------------------
# target
##@ test, bench, coverage
export GOTESTSUM_FORMAT=standard-verbose
.PHONY: test
test: CGO_ENABLED=1
test: tools/bin/gotestsum ## Runs package test including race condition.
$(call target)
@CGO_ENABLED=${CGO_ENABLED} ${GO_TEST} ${GO_TEST_FLAGS} -run=${GO_TEST_FUNC} -tags='$(subst $(space),$(comma),${GO_BUILDTAGS})' ${GO_TEST_PKGS}
.PHONY: coverage
coverage: CGO_ENABLED=1
coverage: tools/bin/gotestsum ## Takes packages test coverage.
$(call target)
CGO_ENABLED=${CGO_ENABLED} ${GO_TEST} ${GO_TEST_FLAGS} -covermode=atomic -coverpkg=./... -coverprofile=coverage.out $(strip ${GO_FLAGS}) ${GO_PKGS}
##@ fmt, lint
.PHONY: lint
lint: fmt lint/golangci-lint ## Run all linters.
.PHONY: fmt
fmt: tools/goimportz tools/gofumpt ## Run goimportz and gofumpt.
$(call target)
find . -iname "*.go" -not -path "./vendor/**" | xargs -P ${JOBS} ${TOOLS_BIN}/goimportz -local=${PKG},$(subst /protocol,,$(PKG)) -w
find . -iname "*.go" -not -path "./vendor/**" | xargs -P ${JOBS} ${TOOLS_BIN}/gofumpt -extra -w
.PHONY: lint/golangci-lint
lint/golangci-lint: tools/golangci-lint .golangci.yml ## Run golangci-lint.
$(call target)
${TOOLS_BIN}/golangci-lint -j ${JOBS} run $(strip ${GO_LINT_FLAGS}) ./...
##@ tools
.PHONY: tools
tools: tools/bin/'' ## Install tools
tools/%: ## install an individual dependent tool
@${MAKE} tools/bin/$* 1>/dev/null
tools/bin/%: ${TOOLS_DIR}/go.mod ${TOOLS_DIR}/go.sum
@cd tools; \
for t in ${TOOLS}; do \
if [ -z '$*' ] || [ $$(basename $$t) = '$*' ]; then \
echo "Install $$t ..." >&2; \
GOBIN=${TOOLS_BIN} CGO_ENABLED=0 go install -mod=mod ${GO_FLAGS} "$${t}"; \
fi \
done
##@ clean
.PHONY: clean
clean: ## Cleanups binaries and extra files in the package.
$(call target)
@rm -rf *.out *.test *.prof trace.txt ${TOOLS_BIN}
##@ miscellaneous
.PHONY: todo
TODO: ## Print the all of (TODO|BUG|XXX|FIXME|NOTE) in packages.
@grep -E '(TODO|BUG|XXX|FIXME)(\(.+\):|:)' $(shell find . -type f -name '*.go' -and -not -iwholename '*vendor*')
.PHONY: nolint
nolint: ## Print the all of //nolint:... pragma in packages.
@grep -E -C 3 '//nolint.+' $(shell find . -type f -name '*.go' -and -not -iwholename '*vendor*' -and -not -iwholename '*internal*')
.PHONY: env/%
env/%: ## Print the value of MAKEFILE_VARIABLE. Use `make env/GO_FLAGS` or etc.
@echo $($*)
##@ help
.PHONY: help
help: ## Show this help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[33m<target>\033[0m\n"} /^[a-zA-Z_0-9\/%_-]+:.*?##/ { printf " \033[1;32m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)

19
vendor/go.lsp.dev/protocol/README.md vendored Normal file
View File

@ -0,0 +1,19 @@
# protocol
[![CircleCI][circleci-badge]][circleci] [![pkg.go.dev][pkg.go.dev-badge]][pkg.go.dev] [![Go module][module-badge]][module] [![codecov.io][codecov-badge]][codecov] [![GA][ga-badge]][ga]
Package protocol implements Language Server Protocol specification in Go.
<!-- badge links -->
[circleci]: https://app.circleci.com/pipelines/github/go-language-server/protocol
[pkg.go.dev]: https://pkg.go.dev/go.lsp.dev/protocol
[module]: https://github.com/go-language-server/protocol/releases/latest
[codecov]: https://codecov.io/gh/go-language-server/protocol
[ga]: https://github.com/go-language-server/protocol
[circleci-badge]: https://img.shields.io/circleci/build/github/go-language-server/protocol/main.svg?style=for-the-badge&label=CIRCLECI&logo=circleci
[pkg.go.dev-badge]: https://bit.ly/pkg-go-dev-badge
[module-badge]: https://img.shields.io/github/release/go-language-server/protocol.svg?color=007D9C&label=MODULE&style=for-the-badge&logoWidth=25&logo=data%3Aimage%2Fsvg%2Bxml%3Bbase64%2CPHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9Ijg1IDU1IDEyMCAxMjAiPjxwYXRoIGZpbGw9IiMwMEFERDgiIGQ9Ik00MC4yIDEwMS4xYy0uNCAwLS41LS4yLS4zLS41bDIuMS0yLjdjLjItLjMuNy0uNSAxLjEtLjVoMzUuN2MuNCAwIC41LjMuMy42bC0xLjcgMi42Yy0uMi4zLS43LjYtMSAuNmwtMzYuMi0uMXptLTE1LjEgOS4yYy0uNCAwLS41LS4yLS4zLS41bDIuMS0yLjdjLjItLjMuNy0uNSAxLjEtLjVoNDUuNmMuNCAwIC42LjMuNS42bC0uOCAyLjRjLS4xLjQtLjUuNi0uOS42bC00Ny4zLjF6bTI0LjIgOS4yYy0uNCAwLS41LS4zLS4zLS42bDEuNC0yLjVjLjItLjMuNi0uNiAxLS42aDIwYy40IDAgLjYuMy42LjdsLS4yIDIuNGMwIC40LS40LjctLjcuN2wtMjEuOC0uMXptMTAzLjgtMjAuMmMtNi4zIDEuNi0xMC42IDIuOC0xNi44IDQuNC0xLjUuNC0xLjYuNS0yLjktMS0xLjUtMS43LTIuNi0yLjgtNC43LTMuOC02LjMtMy4xLTEyLjQtMi4yLTE4LjEgMS41LTYuOCA0LjQtMTAuMyAxMC45LTEwLjIgMTkgLjEgOCA1LjYgMTQuNiAxMy41IDE1LjcgNi44LjkgMTIuNS0xLjUgMTctNi42LjktMS4xIDEuNy0yLjMgMi43LTMuN2gtMTkuM2MtMi4xIDAtMi42LTEuMy0xLjktMyAxLjMtMy4xIDMuNy04LjMgNS4xLTEwLjkuMy0uNiAxLTEuNiAyLjUtMS42aDM2LjRjLS4yIDIuNy0uMiA1LjQtLjYgOC4xLTEuMSA3LjItMy44IDEzLjgtOC4yIDE5LjYtNy4yIDkuNS0xNi42IDE1LjQtMjguNSAxNy05LjggMS4zLTE4LjktLjYtMjYuOS02LjYtNy40LTUuNi0xMS42LTEzLTEyLjctMjIuMi0xLjMtMTAuOSAxLjktMjAuNyA4LjUtMjkuMyA3LjEtOS4zIDE2LjUtMTUuMiAyOC0xNy4zIDkuNC0xLjcgMTguNC0uNiAyNi41IDQuOSA1LjMgMy41IDkuMSA4LjMgMTEuNiAxNC4xLjYuOS4yIDEuNC0xIDEuN3oiLz48cGF0aCBmaWxsPSIjMDBBREQ4IiBkPSJNMTg2LjIgMTU0LjZjLTkuMS0uMi0xNy40LTIuOC0yNC40LTguOC01LjktNS4xLTkuNi0xMS42LTEwLjgtMTkuMy0xLjgtMTEuMyAxLjMtMjEuMyA4LjEtMzAuMiA3LjMtOS42IDE2LjEtMTQuNiAyOC0xNi43IDEwLjItMS44IDE5LjgtLjggMjguNSA1LjEgNy45IDUuNCAxMi44IDEyLjcgMTQuMSAyMi4zIDEuNyAxMy41LTIuMiAyNC41LTExLjUgMzMuOS02LjYgNi43LTE0LjcgMTAuOS0yNCAxMi44LTIuNy41LTUuNC42LTggLjl6bTIzLjgtNDAuNGMtLjEtMS4zLS4xLTIuMy0uMy0zLjMtMS44LTkuOS0xMC45LTE1LjUtMjAuNC0xMy4zLTkuMyAyLjEtMTUuMyA4LTE3LjUgMTcuNC0xLjggNy44IDIgMTUuNyA5LjIgMTguOSA1LjUgMi40IDExIDIuMSAxNi4zLS42IDcuOS00LjEgMTIuMi0xMC41IDEyLjctMTkuMXoiLz48L3N2Zz4=
[codecov-badge]: https://img.shields.io/codecov/c/github/go-language-server/protocol/main?logo=codecov&style=for-the-badge
[ga-badge]: https://gh-ga-beacon.appspot.com/UA-89201129-1/go-language-server/protocol?useReferer&pixel

96
vendor/go.lsp.dev/protocol/base.go vendored Normal file
View File

@ -0,0 +1,96 @@
// SPDX-FileCopyrightText: 2021 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package protocol
import (
"fmt"
"github.com/segmentio/encoding/json"
)
// CancelParams params of cancelRequest.
type CancelParams struct {
// ID is the request id to cancel.
ID interface{} `json:"id"` // int32 | string
}
// ProgressParams params of Progress netification.
//
// @since 3.15.0.
type ProgressParams struct {
// Token is the progress token provided by the client or server.
Token ProgressToken `json:"token"`
// Value is the progress data.
Value interface{} `json:"value"`
}
// ProgressToken is the progress token provided by the client or server.
//
// @since 3.15.0.
type ProgressToken struct {
name string
number int32
}
// compile time check whether the ProgressToken implements a fmt.Formatter, fmt.Stringer, json.Marshaler and json.Unmarshaler interfaces.
var (
_ fmt.Formatter = (*ProgressToken)(nil)
_ fmt.Stringer = (*ProgressToken)(nil)
_ json.Marshaler = (*ProgressToken)(nil)
_ json.Unmarshaler = (*ProgressToken)(nil)
)
// NewProgressToken returns a new ProgressToken.
func NewProgressToken(s string) *ProgressToken {
return &ProgressToken{name: s}
}
// NewNumberProgressToken returns a new number ProgressToken.
func NewNumberProgressToken(n int32) *ProgressToken {
return &ProgressToken{number: n}
}
// Format writes the ProgressToken to the formatter.
//
// If the rune is q the representation is non ambiguous,
// string forms are quoted.
func (v ProgressToken) Format(f fmt.State, r rune) {
const numF = `%d`
strF := `%s`
if r == 'q' {
strF = `%q`
}
switch {
case v.name != "":
fmt.Fprintf(f, strF, v.name)
default:
fmt.Fprintf(f, numF, v.number)
}
}
// String returns a string representation of the ProgressToken.
func (v ProgressToken) String() string {
return fmt.Sprint(v)
}
// MarshalJSON implements json.Marshaler.
func (v *ProgressToken) MarshalJSON() ([]byte, error) {
if v.name != "" {
return json.Marshal(v.name)
}
return json.Marshal(v.number)
}
// UnmarshalJSON implements json.Unmarshaler.
func (v *ProgressToken) UnmarshalJSON(data []byte) error {
*v = ProgressToken{}
if err := json.Unmarshal(data, &v.number); err == nil {
return nil
}
return json.Unmarshal(data, &v.name)
}

705
vendor/go.lsp.dev/protocol/basic.go vendored Normal file
View File

@ -0,0 +1,705 @@
// SPDX-FileCopyrightText: 2019 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package protocol
import (
"go.lsp.dev/uri"
)
// DocumentURI represents the URI of a document.
//
// Many of the interfaces contain fields that correspond to the URI of a document.
// For clarity, the type of such a field is declared as a DocumentURI.
// Over the wire, it will still be transferred as a string, but this guarantees
// that the contents of that string can be parsed as a valid URI.
type DocumentURI = uri.URI
// URI a tagging interface for normal non document URIs.
//
// @since 3.16.0.
type URI = uri.URI
// EOL denotes represents the character offset.
var EOL = []string{"\n", "\r\n", "\r"}
// Position represents a text document expressed as zero-based line and zero-based character offset.
//
// The offsets are based on a UTF-16 string representation.
// So a string of the form "a𐐀b" the character offset of the character "a" is 0,
// the character offset of "𐐀" is 1 and the character offset of "b" is 3 since 𐐀 is represented using two code
// units in UTF-16.
//
// Positions are line end character agnostic. So you can not specify a position that
// denotes "\r|\n" or "\n|" where "|" represents the character offset.
//
// Position is between two characters like an "insert" cursor in a editor.
// Special values like for example "-1" to denote the end of a line are not supported.
type Position struct {
// Line position in a document (zero-based).
//
// If a line number is greater than the number of lines in a document, it defaults back to the number of lines in
// the document.
// If a line number is negative, it defaults to 0.
Line uint32 `json:"line"`
// Character offset on a line in a document (zero-based).
//
// Assuming that the line is represented as a string, the Character value represents the gap between the
// "character" and "character + 1".
//
// If the character value is greater than the line length it defaults back to the line length.
// If a line number is negative, it defaults to 0.
Character uint32 `json:"character"`
}
// Range represents a text document expressed as (zero-based) start and end positions.
//
// A range is comparable to a selection in an editor. Therefore the end position is exclusive.
// If you want to specify a range that contains a line including the line ending character(s) then use an end position
// denoting the start of the next line.
type Range struct {
// Start is the range's start position.
Start Position `json:"start"`
// End is the range's end position.
End Position `json:"end"`
}
// Location represents a location inside a resource, such as a line inside a text file.
type Location struct {
URI DocumentURI `json:"uri"`
Range Range `json:"range"`
}
// LocationLink represents a link between a source and a target location.
type LocationLink struct {
// OriginSelectionRange span of the origin of this link.
//
// Used as the underlined span for mouse interaction. Defaults to the word range at the mouse position.
OriginSelectionRange *Range `json:"originSelectionRange,omitempty"`
// TargetURI is the target resource identifier of this link.
TargetURI DocumentURI `json:"targetUri"`
// TargetRange is the full target range of this link.
//
// If the target for example is a symbol then target range is the range enclosing this symbol not including
// leading/trailing whitespace but everything else like comments.
//
// This information is typically used to highlight the range in the editor.
TargetRange Range `json:"targetRange"`
// TargetSelectionRange is the range that should be selected and revealed when this link is being followed,
// e.g the name of a function.
//
// Must be contained by the the TargetRange. See also DocumentSymbol#range
TargetSelectionRange Range `json:"targetSelectionRange"`
}
// Command represents a reference to a command. Provides a title which will be used to represent a command in the UI.
//
// Commands are identified by a string identifier.
// The recommended way to handle commands is to implement their execution on the server side if the client and
// server provides the corresponding capabilities.
//
// Alternatively the tool extension code could handle the command. The protocol currently doesn't specify
// a set of well-known commands.
type Command struct {
// Title of the command, like `save`.
Title string `json:"title"`
// Command is the identifier of the actual command handler.
Command string `json:"command"`
// Arguments that the command handler should be invoked with.
Arguments []interface{} `json:"arguments,omitempty"`
}
// TextEdit is a textual edit applicable to a text document.
type TextEdit struct {
// Range is the range of the text document to be manipulated.
//
// To insert text into a document create a range where start == end.
Range Range `json:"range"`
// NewText is the string to be inserted. For delete operations use an
// empty string.
NewText string `json:"newText"`
}
// ChangeAnnotation is the additional information that describes document changes.
//
// @since 3.16.0.
type ChangeAnnotation struct {
// Label a human-readable string describing the actual change.
// The string is rendered prominent in the user interface.
Label string `json:"label"`
// NeedsConfirmation is a flag which indicates that user confirmation is needed
// before applying the change.
NeedsConfirmation bool `json:"needsConfirmation,omitempty"`
// Description is a human-readable string which is rendered less prominent in
// the user interface.
Description string `json:"description,omitempty"`
}
// ChangeAnnotationIdentifier an identifier referring to a change annotation managed by a workspace
// edit.
//
// @since 3.16.0.
type ChangeAnnotationIdentifier string
// AnnotatedTextEdit is a special text edit with an additional change annotation.
//
// @since 3.16.0.
type AnnotatedTextEdit struct {
TextEdit
// AnnotationID is the actual annotation identifier.
AnnotationID ChangeAnnotationIdentifier `json:"annotationId"`
}
// TextDocumentEdit describes textual changes on a single text document.
//
// The TextDocument is referred to as a OptionalVersionedTextDocumentIdentifier to allow clients to check the
// text document version before an edit is applied.
//
// TextDocumentEdit describes all changes on a version "Si" and after they are applied move the document to
// version "Si+1".
// So the creator of a TextDocumentEdit doesn't need to sort the array or do any kind of ordering. However the
// edits must be non overlapping.
type TextDocumentEdit struct {
// TextDocument is the text document to change.
TextDocument OptionalVersionedTextDocumentIdentifier `json:"textDocument"`
// Edits is the edits to be applied.
//
// @since 3.16.0 - support for AnnotatedTextEdit.
// This is guarded by the client capability Workspace.WorkspaceEdit.ChangeAnnotationSupport.
Edits []TextEdit `json:"edits"` // []TextEdit | []AnnotatedTextEdit
}
// ResourceOperationKind is the file event type.
type ResourceOperationKind string
const (
// CreateResourceOperation supports creating new files and folders.
CreateResourceOperation ResourceOperationKind = "create"
// RenameResourceOperation supports renaming existing files and folders.
RenameResourceOperation ResourceOperationKind = "rename"
// DeleteResourceOperation supports deleting existing files and folders.
DeleteResourceOperation ResourceOperationKind = "delete"
)
// CreateFileOptions represents an options to create a file.
type CreateFileOptions struct {
// Overwrite existing file. Overwrite wins over `ignoreIfExists`.
Overwrite bool `json:"overwrite,omitempty"`
// IgnoreIfExists ignore if exists.
IgnoreIfExists bool `json:"ignoreIfExists,omitempty"`
}
// CreateFile represents a create file operation.
type CreateFile struct {
// Kind a create.
Kind ResourceOperationKind `json:"kind"` // should be `create`
// URI is the resource to create.
URI DocumentURI `json:"uri"`
// Options additional options.
Options *CreateFileOptions `json:"options,omitempty"`
// AnnotationID an optional annotation identifier describing the operation.
//
// @since 3.16.0.
AnnotationID ChangeAnnotationIdentifier `json:"annotationId,omitempty"`
}
// RenameFileOptions represents a rename file options.
type RenameFileOptions struct {
// Overwrite target if existing. Overwrite wins over `ignoreIfExists`.
Overwrite bool `json:"overwrite,omitempty"`
// IgnoreIfExists ignores if target exists.
IgnoreIfExists bool `json:"ignoreIfExists,omitempty"`
}
// RenameFile represents a rename file operation.
type RenameFile struct {
// Kind a rename.
Kind ResourceOperationKind `json:"kind"` // should be `rename`
// OldURI is the old (existing) location.
OldURI DocumentURI `json:"oldUri"`
// NewURI is the new location.
NewURI DocumentURI `json:"newUri"`
// Options rename options.
Options *RenameFileOptions `json:"options,omitempty"`
// AnnotationID an optional annotation identifier describing the operation.
//
// @since 3.16.0.
AnnotationID ChangeAnnotationIdentifier `json:"annotationId,omitempty"`
}
// DeleteFileOptions represents a delete file options.
type DeleteFileOptions struct {
// Recursive delete the content recursively if a folder is denoted.
Recursive bool `json:"recursive,omitempty"`
// IgnoreIfNotExists ignore the operation if the file doesn't exist.
IgnoreIfNotExists bool `json:"ignoreIfNotExists,omitempty"`
}
// DeleteFile represents a delete file operation.
type DeleteFile struct {
// Kind is a delete.
Kind ResourceOperationKind `json:"kind"` // should be `delete`
// URI is the file to delete.
URI DocumentURI `json:"uri"`
// Options delete options.
Options *DeleteFileOptions `json:"options,omitempty"`
// AnnotationID an optional annotation identifier describing the operation.
//
// @since 3.16.0.
AnnotationID ChangeAnnotationIdentifier `json:"annotationId,omitempty"`
}
// WorkspaceEdit represent a changes to many resources managed in the workspace.
//
// The edit should either provide changes or documentChanges.
// If the client can handle versioned document edits and if documentChanges are present, the latter are preferred over
// changes.
type WorkspaceEdit struct {
// Changes holds changes to existing resources.
Changes map[DocumentURI][]TextEdit `json:"changes,omitempty"`
// DocumentChanges depending on the client capability `workspace.workspaceEdit.resourceOperations` document changes
// are either an array of `TextDocumentEdit`s to express changes to n different text documents
// where each text document edit addresses a specific version of a text document. Or it can contain
// above `TextDocumentEdit`s mixed with create, rename and delete file / folder operations.
//
// Whether a client supports versioned document edits is expressed via
// `workspace.workspaceEdit.documentChanges` client capability.
//
// If a client neither supports `documentChanges` nor `workspace.workspaceEdit.resourceOperations` then
// only plain `TextEdit`s using the `changes` property are supported.
DocumentChanges []TextDocumentEdit `json:"documentChanges,omitempty"`
// ChangeAnnotations is a map of change annotations that can be referenced in
// "AnnotatedTextEdit"s or create, rename and delete file / folder
// operations.
//
// Whether clients honor this property depends on the client capability
// "workspace.changeAnnotationSupport".
//
// @since 3.16.0.
ChangeAnnotations map[ChangeAnnotationIdentifier]ChangeAnnotation `json:"changeAnnotations,omitempty"`
}
// TextDocumentIdentifier indicates the using a URI. On the protocol level, URIs are passed as strings.
type TextDocumentIdentifier struct {
// URI is the text document's URI.
URI DocumentURI `json:"uri"`
}
// TextDocumentItem represent an item to transfer a text document from the client to the server.
type TextDocumentItem struct {
// URI is the text document's URI.
URI DocumentURI `json:"uri"`
// LanguageID is the text document's language identifier.
LanguageID LanguageIdentifier `json:"languageId"`
// Version is the version number of this document (it will increase after each
// change, including undo/redo).
Version int32 `json:"version"`
// Text is the content of the opened text document.
Text string `json:"text"`
}
// LanguageIdentifier represent a text document's language identifier.
type LanguageIdentifier string
const (
// ABAPLanguage ABAP Language.
ABAPLanguage LanguageIdentifier = "abap"
// BatLanguage Windows Bat Language.
BatLanguage LanguageIdentifier = "bat"
// BibtexLanguage BibTeX Language.
BibtexLanguage LanguageIdentifier = "bibtex"
// ClojureLanguage Clojure Language.
ClojureLanguage LanguageIdentifier = "clojure"
// CoffeescriptLanguage CoffeeScript Language.
CoffeeScriptLanguage LanguageIdentifier = "coffeescript"
// CLanguage C Language.
CLanguage LanguageIdentifier = "c"
// CppLanguage C++ Language.
CppLanguage LanguageIdentifier = "cpp"
// CsharpLanguage C# Language.
CsharpLanguage LanguageIdentifier = "csharp"
// CSSLanguage CSS Language.
CSSLanguage LanguageIdentifier = "css"
// DiffLanguage Diff Language.
DiffLanguage LanguageIdentifier = "diff"
// DartLanguage Dart Language.
DartLanguage LanguageIdentifier = "dart"
// DockerfileLanguage Dockerfile Language.
DockerfileLanguage LanguageIdentifier = "dockerfile"
// ElixirLanguage Elixir Language.
ElixirLanguage LanguageIdentifier = "elixir"
// ErlangLanguage Erlang Language.
ErlangLanguage LanguageIdentifier = "erlang"
// FsharpLanguage F# Language.
FsharpLanguage LanguageIdentifier = "fsharp"
// GitCommitLanguage Git Language.
GitCommitLanguage LanguageIdentifier = "git-commit"
// GitRebaseLanguage Git Language.
GitRebaseLanguage LanguageIdentifier = "git-rebase"
// GoLanguage Go Language.
GoLanguage LanguageIdentifier = "go"
// GroovyLanguage Groovy Language.
GroovyLanguage LanguageIdentifier = "groovy"
// HandlebarsLanguage Handlebars Language.
HandlebarsLanguage LanguageIdentifier = "handlebars"
// HTMLLanguage HTML Language.
HTMLLanguage LanguageIdentifier = "html"
// IniLanguage Ini Language.
IniLanguage LanguageIdentifier = "ini"
// JavaLanguage Java Language.
JavaLanguage LanguageIdentifier = "java"
// JavaScriptLanguage JavaScript Language.
JavaScriptLanguage LanguageIdentifier = "javascript"
// JavaScriptReactLanguage JavaScript React Language.
JavaScriptReactLanguage LanguageIdentifier = "javascriptreact"
// JSONLanguage JSON Language.
JSONLanguage LanguageIdentifier = "json"
// LatexLanguage LaTeX Language.
LatexLanguage LanguageIdentifier = "latex"
// LessLanguage Less Language.
LessLanguage LanguageIdentifier = "less"
// LuaLanguage Lua Language.
LuaLanguage LanguageIdentifier = "lua"
// MakefileLanguage Makefile Language.
MakefileLanguage LanguageIdentifier = "makefile"
// MarkdownLanguage Markdown Language.
MarkdownLanguage LanguageIdentifier = "markdown"
// ObjectiveCLanguage Objective-C Language.
ObjectiveCLanguage LanguageIdentifier = "objective-c"
// ObjectiveCppLanguage Objective-C++ Language.
ObjectiveCppLanguage LanguageIdentifier = "objective-cpp"
// PerlLanguage Perl Language.
PerlLanguage LanguageIdentifier = "perl"
// Perl6Language Perl Language.
Perl6Language LanguageIdentifier = "perl6"
// PHPLanguage PHP Language.
PHPLanguage LanguageIdentifier = "php"
// PowershellLanguage Powershell Language.
PowershellLanguage LanguageIdentifier = "powershell"
// JadeLanguage Pug Language.
JadeLanguage LanguageIdentifier = "jade"
// PythonLanguage Python Language.
PythonLanguage LanguageIdentifier = "python"
// RLanguage R Language.
RLanguage LanguageIdentifier = "r"
// RazorLanguage Razor(cshtml) Language.
RazorLanguage LanguageIdentifier = "razor"
// RubyLanguage Ruby Language.
RubyLanguage LanguageIdentifier = "ruby"
// RustLanguage Rust Language.
RustLanguage LanguageIdentifier = "rust"
// SCSSLanguage SCSS Languages syntax using curly brackets.
SCSSLanguage LanguageIdentifier = "scss"
// SASSLanguage SCSS Languages indented syntax.
SASSLanguage LanguageIdentifier = "sass"
// ScalaLanguage Scala Language.
ScalaLanguage LanguageIdentifier = "scala"
// ShaderlabLanguage ShaderLab Language.
ShaderlabLanguage LanguageIdentifier = "shaderlab"
// ShellscriptLanguage Shell Script (Bash) Language.
ShellscriptLanguage LanguageIdentifier = "shellscript"
// SQLLanguage SQL Language.
SQLLanguage LanguageIdentifier = "sql"
// SwiftLanguage Swift Language.
SwiftLanguage LanguageIdentifier = "swift"
// TypeScriptLanguage TypeScript Language.
TypeScriptLanguage LanguageIdentifier = "typescript"
// TypeScriptReactLanguage TypeScript React Language.
TypeScriptReactLanguage LanguageIdentifier = "typescriptreact"
// TeXLanguage TeX Language.
TeXLanguage LanguageIdentifier = "tex"
// VBLanguage Visual Basic Language.
VBLanguage LanguageIdentifier = "vb"
// XMLLanguage XML Language.
XMLLanguage LanguageIdentifier = "xml"
// XslLanguage XSL Language.
XslLanguage LanguageIdentifier = "xsl"
// YamlLanguage YAML Language.
YamlLanguage LanguageIdentifier = "yaml"
)
// languageIdentifierMap map of LanguageIdentifiers.
var languageIdentifierMap = map[string]LanguageIdentifier{
"abap": ABAPLanguage,
"bat": BatLanguage,
"bibtex": BibtexLanguage,
"clojure": ClojureLanguage,
"coffeescript": CoffeeScriptLanguage,
"c": CLanguage,
"cpp": CppLanguage,
"csharp": CsharpLanguage,
"css": CSSLanguage,
"diff": DiffLanguage,
"dart": DartLanguage,
"dockerfile": DockerfileLanguage,
"elixir": ElixirLanguage,
"erlang": ErlangLanguage,
"fsharp": FsharpLanguage,
"git-commit": GitCommitLanguage,
"git-rebase": GitRebaseLanguage,
"go": GoLanguage,
"groovy": GroovyLanguage,
"handlebars": HandlebarsLanguage,
"html": HTMLLanguage,
"ini": IniLanguage,
"java": JavaLanguage,
"javascript": JavaScriptLanguage,
"javascriptreact": JavaScriptReactLanguage,
"json": JSONLanguage,
"latex": LatexLanguage,
"less": LessLanguage,
"lua": LuaLanguage,
"makefile": MakefileLanguage,
"markdown": MarkdownLanguage,
"objective-c": ObjectiveCLanguage,
"objective-cpp": ObjectiveCppLanguage,
"perl": PerlLanguage,
"perl6": Perl6Language,
"php": PHPLanguage,
"powershell": PowershellLanguage,
"jade": JadeLanguage,
"python": PythonLanguage,
"r": RLanguage,
"razor": RazorLanguage,
"ruby": RubyLanguage,
"rust": RustLanguage,
"scss": SCSSLanguage,
"sass": SASSLanguage,
"scala": ScalaLanguage,
"shaderlab": ShaderlabLanguage,
"shellscript": ShellscriptLanguage,
"sql": SQLLanguage,
"swift": SwiftLanguage,
"typescript": TypeScriptLanguage,
"typescriptreact": TypeScriptReactLanguage,
"tex": TeXLanguage,
"vb": VBLanguage,
"xml": XMLLanguage,
"xsl": XslLanguage,
"yaml": YamlLanguage,
}
// ToLanguageIdentifier converts ft to LanguageIdentifier.
func ToLanguageIdentifier(ft string) LanguageIdentifier {
langID, ok := languageIdentifierMap[ft]
if ok {
return langID
}
return LanguageIdentifier(ft)
}
// VersionedTextDocumentIdentifier represents an identifier to denote a specific version of a text document.
//
// This information usually flows from the client to the server.
type VersionedTextDocumentIdentifier struct {
TextDocumentIdentifier
// Version is the version number of this document.
//
// The version number of a document will increase after each change, including
// undo/redo. The number doesn't need to be consecutive.
Version int32 `json:"version"`
}
// OptionalVersionedTextDocumentIdentifier represents an identifier which optionally denotes a specific version of
// a text document.
//
// This information usually flows from the server to the client.
//
// @since 3.16.0.
type OptionalVersionedTextDocumentIdentifier struct {
TextDocumentIdentifier
// Version is the version number of this document. If an optional versioned text document
// identifier is sent from the server to the client and the file is not
// open in the editor (the server has not received an open notification
// before) the server can send `null` to indicate that the version is
// known and the content on disk is the master (as specified with document
// content ownership).
//
// The version number of a document will increase after each change,
// including undo/redo. The number doesn't need to be consecutive.
Version *int32 `json:"version"` // int32 | null
}
// TextDocumentPositionParams is a parameter literal used in requests to pass a text document and a position
// inside that document.
//
// It is up to the client to decide how a selection is converted into a position when issuing a request for a text
// document.
//
// The client can for example honor or ignore the selection direction to make LSP request consistent with features
// implemented internally.
type TextDocumentPositionParams struct {
// TextDocument is the text document.
TextDocument TextDocumentIdentifier `json:"textDocument"`
// Position is the position inside the text document.
Position Position `json:"position"`
}
// DocumentFilter is a document filter denotes a document through properties like language, scheme or pattern.
//
// An example is a filter that applies to TypeScript files on disk.
type DocumentFilter struct {
// Language a language id, like `typescript`.
Language string `json:"language,omitempty"`
// Scheme a URI scheme, like `file` or `untitled`.
Scheme string `json:"scheme,omitempty"`
// Pattern a glob pattern, like `*.{ts,js}`.
//
// Glob patterns can have the following syntax:
// "*"
// "*" to match one or more characters in a path segment
// "?"
// "?" to match on one character in a path segment
// "**"
// "**" to match any number of path segments, including none
// "{}"
// "{}" to group conditions (e.g. `**/*.{ts,js}` matches all TypeScript and JavaScript files)
// "[]"
// "[]" to declare a range of characters to match in a path segment (e.g., `example.[0-9]` to match on `example.0`, `example.1`, …)
// "[!...]"
// "[!...]" to negate a range of characters to match in a path segment (e.g., `example.[!0-9]` to match on `example.a`, `example.b`, but not `example.0`)
Pattern string `json:"pattern,omitempty"`
}
// DocumentSelector is a document selector is the combination of one or more document filters.
type DocumentSelector []*DocumentFilter
// MarkupKind describes the content type that a client supports in various
// result literals like `Hover`, `ParameterInfo` or `CompletionItem`.
//
// Please note that `MarkupKinds` must not start with a `$`. This kinds
// are reserved for internal usage.
type MarkupKind string
const (
// PlainText is supported as a content format.
PlainText MarkupKind = "plaintext"
// Markdown is supported as a content format.
Markdown MarkupKind = "markdown"
)
// MarkupContent a `MarkupContent` literal represents a string value which content is interpreted base on its
// kind flag.
//
// Currently the protocol supports `plaintext` and `markdown` as markup kinds.
//
// If the kind is `markdown` then the value can contain fenced code blocks like in GitHub issues.
// See https://help.github.com/articles/creating-and-highlighting-code-blocks/#syntax-highlighting
//
// Here is an example how such a string can be constructed using JavaScript / TypeScript:
//
// let markdown: MarkdownContent = {
// kind: MarkupKind.Markdown,
// value: [
// '# Header',
// 'Some text',
// '```typescript',
// 'someCode();',
// '```'
// ].join('\n')
// };
//
// NOTE: clients might sanitize the return markdown. A client could decide to
// remove HTML from the markdown to avoid script execution.
type MarkupContent struct {
// Kind is the type of the Markup
Kind MarkupKind `json:"kind"`
// Value is the content itself
Value string `json:"value"`
}

View File

@ -0,0 +1,103 @@
// SPDX-FileCopyrightText: 2021 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package protocol
// CallHierarchy capabilities specific to the "textDocument/callHierarchy".
//
// @since 3.16.0.
type CallHierarchy struct {
// DynamicRegistration whether implementation supports dynamic registration.
//
// If this is set to "true" the client supports the new
// TextDocumentRegistrationOptions && StaticRegistrationOptions return
// value for the corresponding server capability as well.
DynamicRegistration bool `json:"dynamicRegistration,omitempty"`
}
// CallHierarchyPrepareParams params of CallHierarchyPrepare.
//
// @since 3.16.0.
type CallHierarchyPrepareParams struct {
TextDocumentPositionParams
WorkDoneProgressParams
}
// CallHierarchyItem is the result of a "textDocument/prepareCallHierarchy" request.
//
// @since 3.16.0.
type CallHierarchyItem struct {
// name is the name of this item.
Name string `json:"name"`
// Kind is the kind of this item.
Kind SymbolKind `json:"kind"`
// Tags for this item.
Tags []SymbolTag `json:"tags,omitempty"`
// Detail more detail for this item, e.g. the signature of a function.
Detail string `json:"detail,omitempty"`
// URI is the resource identifier of this item.
URI DocumentURI `json:"uri"`
// Range is the range enclosing this symbol not including leading/trailing whitespace
// but everything else, e.g. comments and code.
Range Range `json:"range"`
// SelectionRange is the range that should be selected and revealed when this symbol is being
// picked, e.g. the name of a function. Must be contained by the
// Range.
SelectionRange Range `json:"selectionRange"`
// Data is a data entry field that is preserved between a call hierarchy prepare and
// incoming calls or outgoing calls requests.
Data interface{} `json:"data,omitempty"`
}
// CallHierarchyIncomingCallsParams params of CallHierarchyIncomingCalls.
//
// @since 3.16.0.
type CallHierarchyIncomingCallsParams struct {
WorkDoneProgressParams
PartialResultParams
// Item is the IncomingCalls item.
Item CallHierarchyItem `json:"item"`
}
// CallHierarchyIncomingCall is the result of a "callHierarchy/incomingCalls" request.
//
// @since 3.16.0.
type CallHierarchyIncomingCall struct {
// From is the item that makes the call.
From CallHierarchyItem `json:"from"`
// FromRanges is the ranges at which the calls appear. This is relative to the caller
// denoted by From.
FromRanges []Range `json:"fromRanges"`
}
// CallHierarchyOutgoingCallsParams params of CallHierarchyOutgoingCalls.
//
// @since 3.16.0.
type CallHierarchyOutgoingCallsParams struct {
WorkDoneProgressParams
PartialResultParams
// Item is the OutgoingCalls item.
Item CallHierarchyItem `json:"item"`
}
// CallHierarchyOutgoingCall is the result of a "callHierarchy/outgoingCalls" request.
//
// @since 3.16.0.
type CallHierarchyOutgoingCall struct {
// To is the item that is called.
To CallHierarchyItem `json:"to"`
// FromRanges is the range at which this item is called. This is the range relative to
// the caller, e.g the item passed to "callHierarchy/outgoingCalls" request.
FromRanges []Range `json:"fromRanges"`
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,523 @@
// SPDX-FileCopyrightText: 2021 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package protocol
import (
"strconv"
)
// ServerCapabilities efines the capabilities provided by a language server.
type ServerCapabilities struct {
// TextDocumentSync defines how text documents are synced. Is either a detailed structure defining each notification
// or for backwards compatibility the TextDocumentSyncKind number.
//
// If omitted it defaults to TextDocumentSyncKind.None`
TextDocumentSync interface{} `json:"textDocumentSync,omitempty"` // *TextDocumentSyncOptions | TextDocumentSyncKind
// CompletionProvider is The server provides completion support.
CompletionProvider *CompletionOptions `json:"completionProvider,omitempty"`
// HoverProvider is the server provides hover support.
HoverProvider interface{} `json:"hoverProvider,omitempty"` // TODO(zchee): bool | *HoverOptions
// SignatureHelpProvider is the server provides signature help support.
SignatureHelpProvider *SignatureHelpOptions `json:"signatureHelpProvider,omitempty"`
// DeclarationProvider is the server provides Goto Declaration support.
//
// @since 3.14.0.
DeclarationProvider interface{} `json:"declarationProvider,omitempty"` // TODO(zchee): bool | *DeclarationOptions | *DeclarationRegistrationOptions
// DefinitionProvider is the server provides Goto definition support.
DefinitionProvider interface{} `json:"definitionProvider,omitempty"` // TODO(zchee): bool | *DefinitionOptions
// TypeDefinitionProvider is the provides Goto Type Definition support.
//
// @since 3.6.0.
TypeDefinitionProvider interface{} `json:"typeDefinitionProvider,omitempty"` // TODO(zchee): bool | *TypeDefinitionOptions | *TypeDefinitionRegistrationOptions
// ImplementationProvider is the provides Goto Implementation support.
//
// @since 3.6.0.
ImplementationProvider interface{} `json:"implementationProvider,omitempty"` // TODO(zchee): bool | *ImplementationOptions | *ImplementationRegistrationOptions
// ReferencesProvider is the server provides find references support.
ReferencesProvider interface{} `json:"referencesProvider,omitempty"` // TODO(zchee): bool | *ReferenceOptions
// DocumentHighlightProvider is the server provides document highlight support.
DocumentHighlightProvider interface{} `json:"documentHighlightProvider,omitempty"` // TODO(zchee): bool | *DocumentHighlightOptions
// DocumentSymbolProvider is the server provides document symbol support.
DocumentSymbolProvider interface{} `json:"documentSymbolProvider,omitempty"` // TODO(zchee): bool | *DocumentSymbolOptions
// CodeActionProvider is the server provides code actions.
//
// CodeActionOptions may only be specified if the client states that it supports CodeActionLiteralSupport in its
// initial Initialize request.
CodeActionProvider interface{} `json:"codeActionProvider,omitempty"` // TODO(zchee): bool | *CodeActionOptions
// CodeLensProvider is the server provides code lens.
CodeLensProvider *CodeLensOptions `json:"codeLensProvider,omitempty"`
// The server provides document link support.
DocumentLinkProvider *DocumentLinkOptions `json:"documentLinkProvider,omitempty"`
// ColorProvider is the server provides color provider support.
//
// @since 3.6.0.
ColorProvider interface{} `json:"colorProvider,omitempty"` // TODO(zchee): bool | *DocumentColorOptions | *DocumentColorRegistrationOptions
// WorkspaceSymbolProvider is the server provides workspace symbol support.
WorkspaceSymbolProvider interface{} `json:"workspaceSymbolProvider,omitempty"` // TODO(zchee): bool | *WorkspaceSymbolOptions
// DocumentFormattingProvider is the server provides document formatting.
DocumentFormattingProvider interface{} `json:"documentFormattingProvider,omitempty"` // TODO(zchee): bool | *DocumentFormattingOptions
// DocumentRangeFormattingProvider is the server provides document range formatting.
DocumentRangeFormattingProvider interface{} `json:"documentRangeFormattingProvider,omitempty"` // TODO(zchee): bool | *DocumentRangeFormattingOptions
// DocumentOnTypeFormattingProvider is the server provides document formatting on typing.
DocumentOnTypeFormattingProvider *DocumentOnTypeFormattingOptions `json:"documentOnTypeFormattingProvider,omitempty"`
// RenameProvider is the server provides rename support.
//
// RenameOptions may only be specified if the client states that it supports PrepareSupport in its
// initial Initialize request.
RenameProvider interface{} `json:"renameProvider,omitempty"` // TODO(zchee): bool | *RenameOptions
// FoldingRangeProvider is the server provides folding provider support.
//
// @since 3.10.0.
FoldingRangeProvider interface{} `json:"foldingRangeProvider,omitempty"` // TODO(zchee): bool | *FoldingRangeOptions | *FoldingRangeRegistrationOptions
// SelectionRangeProvider is the server provides selection range support.
//
// @since 3.15.0.
SelectionRangeProvider interface{} `json:"selectionRangeProvider,omitempty"` // TODO(zchee): bool | *SelectionRangeOptions | *SelectionRangeRegistrationOptions
// ExecuteCommandProvider is the server provides execute command support.
ExecuteCommandProvider *ExecuteCommandOptions `json:"executeCommandProvider,omitempty"`
// CallHierarchyProvider is the server provides call hierarchy support.
//
// @since 3.16.0.
CallHierarchyProvider interface{} `json:"callHierarchyProvider,omitempty"` // TODO(zchee): bool | *CallHierarchyOptions | *CallHierarchyRegistrationOptions
// LinkedEditingRangeProvider is the server provides linked editing range support.
//
// @since 3.16.0.
LinkedEditingRangeProvider interface{} `json:"linkedEditingRangeProvider,omitempty"` // TODO(zchee): bool | *LinkedEditingRangeOptions | *LinkedEditingRangeRegistrationOptions
// SemanticTokensProvider is the server provides semantic tokens support.
//
// @since 3.16.0.
SemanticTokensProvider interface{} `json:"semanticTokensProvider,omitempty"` // TODO(zchee): *SemanticTokensOptions | *SemanticTokensRegistrationOptions
// Workspace is the window specific server capabilities.
Workspace *ServerCapabilitiesWorkspace `json:"workspace,omitempty"`
// MonikerProvider is the server provides moniker support.
//
// @since 3.16.0.
MonikerProvider interface{} `json:"monikerProvider,omitempty"` // TODO(zchee): bool | *MonikerOptions | *MonikerRegistrationOptions
// Experimental server capabilities.
Experimental interface{} `json:"experimental,omitempty"`
}
// TextDocumentSyncOptions TextDocumentSync options.
type TextDocumentSyncOptions struct {
// OpenClose open and close notifications are sent to the server.
OpenClose bool `json:"openClose,omitempty"`
// Change notifications are sent to the server. See TextDocumentSyncKind.None, TextDocumentSyncKind.Full
// and TextDocumentSyncKind.Incremental. If omitted it defaults to TextDocumentSyncKind.None.
Change TextDocumentSyncKind `json:"change,omitempty"`
// WillSave notifications are sent to the server.
WillSave bool `json:"willSave,omitempty"`
// WillSaveWaitUntil will save wait until requests are sent to the server.
WillSaveWaitUntil bool `json:"willSaveWaitUntil,omitempty"`
// Save notifications are sent to the server.
Save *SaveOptions `json:"save,omitempty"`
}
// SaveOptions save options.
type SaveOptions struct {
// IncludeText is the client is supposed to include the content on save.
IncludeText bool `json:"includeText,omitempty"`
}
// TextDocumentSyncKind defines how the host (editor) should sync document changes to the language server.
type TextDocumentSyncKind float64
const (
// TextDocumentSyncKindNone documents should not be synced at all.
TextDocumentSyncKindNone TextDocumentSyncKind = 0
// TextDocumentSyncKindFull documents are synced by always sending the full content
// of the document.
TextDocumentSyncKindFull TextDocumentSyncKind = 1
// TextDocumentSyncKindIncremental documents are synced by sending the full content on open.
// After that only incremental updates to the document are
// send.
TextDocumentSyncKindIncremental TextDocumentSyncKind = 2
)
// String implements fmt.Stringer.
func (k TextDocumentSyncKind) String() string {
switch k {
case TextDocumentSyncKindNone:
return "None"
case TextDocumentSyncKindFull:
return "Full"
case TextDocumentSyncKindIncremental:
return "Incremental"
default:
return strconv.FormatFloat(float64(k), 'f', -10, 64)
}
}
// CompletionOptions Completion options.
type CompletionOptions struct {
// The server provides support to resolve additional
// information for a completion item.
ResolveProvider bool `json:"resolveProvider,omitempty"`
// The characters that trigger completion automatically.
TriggerCharacters []string `json:"triggerCharacters,omitempty"`
}
// HoverOptions option of hover provider server capabilities.
type HoverOptions struct {
WorkDoneProgressOptions
}
// SignatureHelpOptions SignatureHelp options.
type SignatureHelpOptions struct {
// The characters that trigger signature help
// automatically.
TriggerCharacters []string `json:"triggerCharacters,omitempty"`
// RetriggerCharacters is the slist of characters that re-trigger signature help.
//
// These trigger characters are only active when signature help is already
// showing.
// All trigger characters are also counted as re-trigger characters.
//
// @since 3.15.0.
RetriggerCharacters []string `json:"retriggerCharacters,omitempty"`
}
// DeclarationOptions registration option of Declaration server capability.
//
// @since 3.15.0.
type DeclarationOptions struct {
WorkDoneProgressOptions
}
// DeclarationRegistrationOptions registration option of Declaration server capability.
//
// @since 3.15.0.
type DeclarationRegistrationOptions struct {
DeclarationOptions
TextDocumentRegistrationOptions
StaticRegistrationOptions
}
// DefinitionOptions registration option of Definition server capability.
//
// @since 3.15.0.
type DefinitionOptions struct {
WorkDoneProgressOptions
}
// TypeDefinitionOptions registration option of TypeDefinition server capability.
//
// @since 3.15.0.
type TypeDefinitionOptions struct {
WorkDoneProgressOptions
}
// TypeDefinitionRegistrationOptions registration option of TypeDefinition server capability.
//
// @since 3.15.0.
type TypeDefinitionRegistrationOptions struct {
TextDocumentRegistrationOptions
TypeDefinitionOptions
StaticRegistrationOptions
}
// ImplementationOptions registration option of Implementation server capability.
//
// @since 3.15.0.
type ImplementationOptions struct {
WorkDoneProgressOptions
}
// ImplementationRegistrationOptions registration option of Implementation server capability.
//
// @since 3.15.0.
type ImplementationRegistrationOptions struct {
TextDocumentRegistrationOptions
ImplementationOptions
StaticRegistrationOptions
}
// ReferenceOptions registration option of Reference server capability.
type ReferenceOptions struct {
WorkDoneProgressOptions
}
// DocumentHighlightOptions registration option of DocumentHighlight server capability.
//
// @since 3.15.0.
type DocumentHighlightOptions struct {
WorkDoneProgressOptions
}
// DocumentSymbolOptions registration option of DocumentSymbol server capability.
//
// @since 3.15.0.
type DocumentSymbolOptions struct {
WorkDoneProgressOptions
// Label a human-readable string that is shown when multiple outlines trees
// are shown for the same document.
//
// @since 3.16.0.
Label string `json:"label,omitempty"`
}
// CodeActionOptions CodeAction options.
type CodeActionOptions struct {
// CodeActionKinds that this server may return.
//
// The list of kinds may be generic, such as "CodeActionKind.Refactor", or the server
// may list out every specific kind they provide.
CodeActionKinds []CodeActionKind `json:"codeActionKinds,omitempty"`
// ResolveProvider is the server provides support to resolve additional
// information for a code action.
//
// @since 3.16.0.
ResolveProvider bool `json:"resolveProvider,omitempty"`
}
// CodeLensOptions CodeLens options.
type CodeLensOptions struct {
// Code lens has a resolve provider as well.
ResolveProvider bool `json:"resolveProvider,omitempty"`
}
// DocumentLinkOptions document link options.
type DocumentLinkOptions struct {
// ResolveProvider document links have a resolve provider as well.
ResolveProvider bool `json:"resolveProvider,omitempty"`
}
// DocumentColorOptions registration option of DocumentColor server capability.
//
// @since 3.15.0.
type DocumentColorOptions struct {
WorkDoneProgressOptions
}
// DocumentColorRegistrationOptions registration option of DocumentColor server capability.
//
// @since 3.15.0.
type DocumentColorRegistrationOptions struct {
TextDocumentRegistrationOptions
StaticRegistrationOptions
DocumentColorOptions
}
// WorkspaceSymbolOptions registration option of WorkspaceSymbol server capability.
//
// @since 3.15.0.
type WorkspaceSymbolOptions struct {
WorkDoneProgressOptions
}
// DocumentFormattingOptions registration option of DocumentFormatting server capability.
//
// @since 3.15.0.
type DocumentFormattingOptions struct {
WorkDoneProgressOptions
}
// DocumentRangeFormattingOptions registration option of DocumentRangeFormatting server capability.
//
// @since 3.15.0.
type DocumentRangeFormattingOptions struct {
WorkDoneProgressOptions
}
// DocumentOnTypeFormattingOptions format document on type options.
type DocumentOnTypeFormattingOptions struct {
// FirstTriggerCharacter a character on which formatting should be triggered, like "}".
FirstTriggerCharacter string `json:"firstTriggerCharacter"`
// MoreTriggerCharacter more trigger characters.
MoreTriggerCharacter []string `json:"moreTriggerCharacter,omitempty"`
}
// RenameOptions rename options.
type RenameOptions struct {
// PrepareProvider renames should be checked and tested before being executed.
PrepareProvider bool `json:"prepareProvider,omitempty"`
}
// FoldingRangeOptions registration option of FoldingRange server capability.
//
// @since 3.15.0.
type FoldingRangeOptions struct {
WorkDoneProgressOptions
}
// FoldingRangeRegistrationOptions registration option of FoldingRange server capability.
//
// @since 3.15.0.
type FoldingRangeRegistrationOptions struct {
TextDocumentRegistrationOptions
FoldingRangeOptions
StaticRegistrationOptions
}
// ExecuteCommandOptions execute command options.
type ExecuteCommandOptions struct {
// Commands is the commands to be executed on the server
Commands []string `json:"commands"`
}
// CallHierarchyOptions option of CallHierarchy.
//
// @since 3.16.0.
type CallHierarchyOptions struct {
WorkDoneProgressOptions
}
// CallHierarchyRegistrationOptions registration options of CallHierarchy.
//
// @since 3.16.0.
type CallHierarchyRegistrationOptions struct {
TextDocumentRegistrationOptions
CallHierarchyOptions
StaticRegistrationOptions
}
// LinkedEditingRangeOptions option of linked editing range provider server capabilities.
//
// @since 3.16.0.
type LinkedEditingRangeOptions struct {
WorkDoneProgressOptions
}
// LinkedEditingRangeRegistrationOptions registration option of linked editing range provider server capabilities.
//
// @since 3.16.0.
type LinkedEditingRangeRegistrationOptions struct {
TextDocumentRegistrationOptions
LinkedEditingRangeOptions
StaticRegistrationOptions
}
// SemanticTokensOptions option of semantic tokens provider server capabilities.
//
// @since 3.16.0.
type SemanticTokensOptions struct {
WorkDoneProgressOptions
}
// SemanticTokensRegistrationOptions registration option of semantic tokens provider server capabilities.
//
// @since 3.16.0.
type SemanticTokensRegistrationOptions struct {
TextDocumentRegistrationOptions
SemanticTokensOptions
StaticRegistrationOptions
}
// ServerCapabilitiesWorkspace specific server capabilities.
type ServerCapabilitiesWorkspace struct {
// WorkspaceFolders is the server supports workspace folder.
//
// @since 3.6.0.
WorkspaceFolders *ServerCapabilitiesWorkspaceFolders `json:"workspaceFolders,omitempty"`
// FileOperations is the server is interested in file notifications/requests.
//
// @since 3.16.0.
FileOperations *ServerCapabilitiesWorkspaceFileOperations `json:"fileOperations,omitempty"`
}
// ServerCapabilitiesWorkspaceFolders is the server supports workspace folder.
//
// @since 3.6.0.
type ServerCapabilitiesWorkspaceFolders struct {
// Supported is the server has support for workspace folders
Supported bool `json:"supported,omitempty"`
// ChangeNotifications whether the server wants to receive workspace folder
// change notifications.
//
// If a strings is provided the string is treated as a ID
// under which the notification is registered on the client
// side. The ID can be used to unregister for these events
// using the `client/unregisterCapability` request.
ChangeNotifications interface{} `json:"changeNotifications,omitempty"` // string | boolean
}
// ServerCapabilitiesWorkspaceFileOperations is the server is interested in file notifications/requests.
//
// @since 3.16.0.
type ServerCapabilitiesWorkspaceFileOperations struct {
// DidCreate is the server is interested in receiving didCreateFiles
// notifications.
DidCreate *FileOperationRegistrationOptions `json:"didCreate,omitempty"`
// WillCreate is the server is interested in receiving willCreateFiles requests.
WillCreate *FileOperationRegistrationOptions `json:"willCreate,omitempty"`
// DidRename is the server is interested in receiving didRenameFiles
// notifications.
DidRename *FileOperationRegistrationOptions `json:"didRename,omitempty"`
// WillRename is the server is interested in receiving willRenameFiles requests.
WillRename *FileOperationRegistrationOptions `json:"willRename,omitempty"`
// DidDelete is the server is interested in receiving didDeleteFiles file
// notifications.
DidDelete *FileOperationRegistrationOptions `json:"didDelete,omitempty"`
// WillDelete is the server is interested in receiving willDeleteFiles file
// requests.
WillDelete *FileOperationRegistrationOptions `json:"willDelete,omitempty"`
}
// FileOperationRegistrationOptions is the options to register for file operations.
//
// @since 3.16.0.
type FileOperationRegistrationOptions struct {
// filters is the actual filters.
Filters []FileOperationFilter `json:"filters"`
}
// MonikerOptions option of moniker provider server capabilities.
//
// @since 3.16.0.
type MonikerOptions struct {
WorkDoneProgressOptions
}
// MonikerRegistrationOptions registration option of moniker provider server capabilities.
//
// @since 3.16.0.
type MonikerRegistrationOptions struct {
TextDocumentRegistrationOptions
MonikerOptions
}

412
vendor/go.lsp.dev/protocol/client.go vendored Normal file
View File

@ -0,0 +1,412 @@
// SPDX-FileCopyrightText: 2019 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package protocol
import (
"bytes"
"context"
"fmt"
"github.com/segmentio/encoding/json"
"go.uber.org/zap"
"go.lsp.dev/jsonrpc2"
"go.lsp.dev/pkg/xcontext"
)
// ClientDispatcher returns a Client that dispatches LSP requests across the
// given jsonrpc2 connection.
func ClientDispatcher(conn jsonrpc2.Conn, logger *zap.Logger) Client {
return &client{
Conn: conn,
logger: logger,
}
}
// ClientHandler handler of LSP client.
func ClientHandler(client Client, handler jsonrpc2.Handler) jsonrpc2.Handler {
h := func(ctx context.Context, reply jsonrpc2.Replier, req jsonrpc2.Request) error {
if ctx.Err() != nil {
xctx := xcontext.Detach(ctx)
return reply(xctx, nil, ErrRequestCancelled)
}
handled, err := clientDispatch(ctx, client, reply, req)
if handled || err != nil {
return err
}
return handler(ctx, reply, req)
}
return h
}
// clientDispatch implements jsonrpc2.Handler.
//nolint:funlen,cyclop
func clientDispatch(ctx context.Context, client Client, reply jsonrpc2.Replier, req jsonrpc2.Request) (handled bool, err error) {
if ctx.Err() != nil {
return true, reply(ctx, nil, ErrRequestCancelled)
}
dec := json.NewDecoder(bytes.NewReader(req.Params()))
logger := LoggerFromContext(ctx)
switch req.Method() {
case MethodProgress: // notification
defer logger.Debug(MethodProgress, zap.Error(err))
var params ProgressParams
if err := dec.Decode(&params); err != nil {
return true, replyParseError(ctx, reply, err)
}
err := client.Progress(ctx, &params)
return true, reply(ctx, nil, err)
case MethodWorkDoneProgressCreate: // request
defer logger.Debug(MethodWorkDoneProgressCreate, zap.Error(err))
var params WorkDoneProgressCreateParams
if err := dec.Decode(&params); err != nil {
return true, replyParseError(ctx, reply, err)
}
err := client.WorkDoneProgressCreate(ctx, &params)
return true, reply(ctx, nil, err)
case MethodWindowLogMessage: // notification
defer logger.Debug(MethodWindowLogMessage, zap.Error(err))
var params LogMessageParams
if err := dec.Decode(&params); err != nil {
return true, replyParseError(ctx, reply, err)
}
err := client.LogMessage(ctx, &params)
return true, reply(ctx, nil, err)
case MethodTextDocumentPublishDiagnostics: // notification
defer logger.Debug(MethodTextDocumentPublishDiagnostics, zap.Error(err))
var params PublishDiagnosticsParams
if err := dec.Decode(&params); err != nil {
return true, replyParseError(ctx, reply, err)
}
err := client.PublishDiagnostics(ctx, &params)
return true, reply(ctx, nil, err)
case MethodWindowShowMessage: // notification
defer logger.Debug(MethodWindowShowMessage, zap.Error(err))
var params ShowMessageParams
if err := dec.Decode(&params); err != nil {
return true, replyParseError(ctx, reply, err)
}
err := client.ShowMessage(ctx, &params)
return true, reply(ctx, nil, err)
case MethodWindowShowMessageRequest: // request
defer logger.Debug(MethodWindowShowMessageRequest, zap.Error(err))
var params ShowMessageRequestParams
if err := dec.Decode(&params); err != nil {
return true, replyParseError(ctx, reply, err)
}
resp, err := client.ShowMessageRequest(ctx, &params)
return true, reply(ctx, resp, err)
case MethodTelemetryEvent: // notification
defer logger.Debug(MethodTelemetryEvent, zap.Error(err))
var params interface{}
if err := dec.Decode(&params); err != nil {
return true, replyParseError(ctx, reply, err)
}
err := client.Telemetry(ctx, &params)
return true, reply(ctx, nil, err)
case MethodClientRegisterCapability: // request
defer logger.Debug(MethodClientRegisterCapability, zap.Error(err))
var params RegistrationParams
if err := dec.Decode(&params); err != nil {
return true, replyParseError(ctx, reply, err)
}
err := client.RegisterCapability(ctx, &params)
return true, reply(ctx, nil, err)
case MethodClientUnregisterCapability: // request
defer logger.Debug(MethodClientUnregisterCapability, zap.Error(err))
var params UnregistrationParams
if err := dec.Decode(&params); err != nil {
return true, replyParseError(ctx, reply, err)
}
err := client.UnregisterCapability(ctx, &params)
return true, reply(ctx, nil, err)
case MethodWorkspaceApplyEdit: // request
defer logger.Debug(MethodWorkspaceApplyEdit, zap.Error(err))
var params ApplyWorkspaceEditParams
if err := dec.Decode(&params); err != nil {
return true, replyParseError(ctx, reply, err)
}
resp, err := client.ApplyEdit(ctx, &params)
return true, reply(ctx, resp, err)
case MethodWorkspaceConfiguration: // request
defer logger.Debug(MethodWorkspaceConfiguration, zap.Error(err))
var params ConfigurationParams
if err := dec.Decode(&params); err != nil {
return true, replyParseError(ctx, reply, err)
}
resp, err := client.Configuration(ctx, &params)
return true, reply(ctx, resp, err)
case MethodWorkspaceWorkspaceFolders: // request
defer logger.Debug(MethodWorkspaceWorkspaceFolders, zap.Error(err))
if len(req.Params()) > 0 {
return true, reply(ctx, nil, fmt.Errorf("expected no params: %w", jsonrpc2.ErrInvalidParams))
}
resp, err := client.WorkspaceFolders(ctx)
return true, reply(ctx, resp, err)
default:
return false, nil
}
}
// Client represents a Language Server Protocol client.
type Client interface {
Progress(ctx context.Context, params *ProgressParams) (err error)
WorkDoneProgressCreate(ctx context.Context, params *WorkDoneProgressCreateParams) (err error)
LogMessage(ctx context.Context, params *LogMessageParams) (err error)
PublishDiagnostics(ctx context.Context, params *PublishDiagnosticsParams) (err error)
ShowMessage(ctx context.Context, params *ShowMessageParams) (err error)
ShowMessageRequest(ctx context.Context, params *ShowMessageRequestParams) (result *MessageActionItem, err error)
Telemetry(ctx context.Context, params interface{}) (err error)
RegisterCapability(ctx context.Context, params *RegistrationParams) (err error)
UnregisterCapability(ctx context.Context, params *UnregistrationParams) (err error)
ApplyEdit(ctx context.Context, params *ApplyWorkspaceEditParams) (result bool, err error)
Configuration(ctx context.Context, params *ConfigurationParams) (result []interface{}, err error)
WorkspaceFolders(ctx context.Context) (result []WorkspaceFolder, err error)
}
// list of client methods.
const (
// MethodProgress method name of "$/progress".
MethodProgress = "$/progress"
// MethodWorkDoneProgressCreate method name of "window/workDoneProgress/create".
MethodWorkDoneProgressCreate = "window/workDoneProgress/create"
// MethodWindowShowMessage method name of "window/showMessage".
MethodWindowShowMessage = "window/showMessage"
// MethodWindowShowMessageRequest method name of "window/showMessageRequest.
MethodWindowShowMessageRequest = "window/showMessageRequest"
// MethodWindowLogMessage method name of "window/logMessage.
MethodWindowLogMessage = "window/logMessage"
// MethodTelemetryEvent method name of "telemetry/event.
MethodTelemetryEvent = "telemetry/event"
// MethodClientRegisterCapability method name of "client/registerCapability.
MethodClientRegisterCapability = "client/registerCapability"
// MethodClientUnregisterCapability method name of "client/unregisterCapability.
MethodClientUnregisterCapability = "client/unregisterCapability"
// MethodTextDocumentPublishDiagnostics method name of "textDocument/publishDiagnostics.
MethodTextDocumentPublishDiagnostics = "textDocument/publishDiagnostics"
// MethodWorkspaceApplyEdit method name of "workspace/applyEdit.
MethodWorkspaceApplyEdit = "workspace/applyEdit"
// MethodWorkspaceConfiguration method name of "workspace/configuration.
MethodWorkspaceConfiguration = "workspace/configuration"
// MethodWorkspaceWorkspaceFolders method name of "workspace/workspaceFolders".
MethodWorkspaceWorkspaceFolders = "workspace/workspaceFolders"
)
// client implements a Language Server Protocol client.
type client struct {
jsonrpc2.Conn
logger *zap.Logger
}
// compiler time check whether the Client implements ClientInterface interface.
var _ Client = (*client)(nil)
// Progress is the base protocol offers also support to report progress in a generic fashion.
//
// This mechanism can be used to report any kind of progress including work done progress (usually used to report progress in the user interface using a progress bar) and
// partial result progress to support streaming of results.
//
// @since 3.16.0.
func (c *client) Progress(ctx context.Context, params *ProgressParams) (err error) {
c.logger.Debug("call " + MethodProgress)
defer c.logger.Debug("end "+MethodProgress, zap.Error(err))
return c.Conn.Notify(ctx, MethodProgress, params)
}
// WorkDoneProgressCreate sends the request is sent from the server to the client to ask the client to create a work done progress.
//
// @since 3.16.0.
func (c *client) WorkDoneProgressCreate(ctx context.Context, params *WorkDoneProgressCreateParams) (err error) {
c.logger.Debug("call " + MethodWorkDoneProgressCreate)
defer c.logger.Debug("end "+MethodWorkDoneProgressCreate, zap.Error(err))
return Call(ctx, c.Conn, MethodWorkDoneProgressCreate, params, nil)
}
// LogMessage sends the notification from the server to the client to ask the client to log a particular message.
func (c *client) LogMessage(ctx context.Context, params *LogMessageParams) (err error) {
c.logger.Debug("call " + MethodWindowLogMessage)
defer c.logger.Debug("end "+MethodWindowLogMessage, zap.Error(err))
return c.Conn.Notify(ctx, MethodWindowLogMessage, params)
}
// PublishDiagnostics sends the notification from the server to the client to signal results of validation runs.
//
// Diagnostics are “owned” by the server so it is the servers responsibility to clear them if necessary. The following rule is used for VS Code servers that generate diagnostics:
//
// - if a language is single file only (for example HTML) then diagnostics are cleared by the server when the file is closed.
// - if a language has a project system (for example C#) diagnostics are not cleared when a file closes. When a project is opened all diagnostics for all files are recomputed (or read from a cache).
//
// When a file changes it is the servers responsibility to re-compute diagnostics and push them to the client.
// If the computed set is empty it has to push the empty array to clear former diagnostics.
// Newly pushed diagnostics always replace previously pushed diagnostics. There is no merging that happens on the client side.
func (c *client) PublishDiagnostics(ctx context.Context, params *PublishDiagnosticsParams) (err error) {
c.logger.Debug("call " + MethodTextDocumentPublishDiagnostics)
defer c.logger.Debug("end "+MethodTextDocumentPublishDiagnostics, zap.Error(err))
return c.Conn.Notify(ctx, MethodTextDocumentPublishDiagnostics, params)
}
// ShowMessage sends the notification from a server to a client to ask the
// client to display a particular message in the user interface.
func (c *client) ShowMessage(ctx context.Context, params *ShowMessageParams) (err error) {
return c.Conn.Notify(ctx, MethodWindowShowMessage, params)
}
// ShowMessageRequest sends the request from a server to a client to ask the client to display a particular message in the user interface.
//
// In addition to the show message notification the request allows to pass actions and to wait for an answer from the client.
func (c *client) ShowMessageRequest(ctx context.Context, params *ShowMessageRequestParams) (_ *MessageActionItem, err error) {
c.logger.Debug("call " + MethodWindowShowMessageRequest)
defer c.logger.Debug("end "+MethodWindowShowMessageRequest, zap.Error(err))
var result *MessageActionItem
if err := Call(ctx, c.Conn, MethodWindowShowMessageRequest, params, &result); err != nil {
return nil, err
}
return result, nil
}
// Telemetry sends the notification from the server to the client to ask the client to log a telemetry event.
func (c *client) Telemetry(ctx context.Context, params interface{}) (err error) {
c.logger.Debug("call " + MethodTelemetryEvent)
defer c.logger.Debug("end "+MethodTelemetryEvent, zap.Error(err))
return c.Conn.Notify(ctx, MethodTelemetryEvent, params)
}
// RegisterCapability sends the request from the server to the client to register for a new capability on the client side.
//
// Not all clients need to support dynamic capability registration.
//
// A client opts in via the dynamicRegistration property on the specific client capabilities.
// A client can even provide dynamic registration for capability A but not for capability B (see TextDocumentClientCapabilities as an example).
func (c *client) RegisterCapability(ctx context.Context, params *RegistrationParams) (err error) {
c.logger.Debug("call " + MethodClientRegisterCapability)
defer c.logger.Debug("end "+MethodClientRegisterCapability, zap.Error(err))
return Call(ctx, c.Conn, MethodClientRegisterCapability, params, nil)
}
// UnregisterCapability sends the request from the server to the client to unregister a previously registered capability.
func (c *client) UnregisterCapability(ctx context.Context, params *UnregistrationParams) (err error) {
c.logger.Debug("call " + MethodClientUnregisterCapability)
defer c.logger.Debug("end "+MethodClientUnregisterCapability, zap.Error(err))
return Call(ctx, c.Conn, MethodClientUnregisterCapability, params, nil)
}
// ApplyEdit sends the request from the server to the client to modify resource on the client side.
func (c *client) ApplyEdit(ctx context.Context, params *ApplyWorkspaceEditParams) (result bool, err error) {
c.logger.Debug("call " + MethodWorkspaceApplyEdit)
defer c.logger.Debug("end "+MethodWorkspaceApplyEdit, zap.Error(err))
if err := Call(ctx, c.Conn, MethodWorkspaceApplyEdit, params, &result); err != nil {
return false, err
}
return result, nil
}
// Configuration sends the request from the server to the client to fetch configuration settings from the client.
//
// The request can fetch several configuration settings in one roundtrip.
// The order of the returned configuration settings correspond to the order of the
// passed ConfigurationItems (e.g. the first item in the response is the result for the first configuration item in the params).
func (c *client) Configuration(ctx context.Context, params *ConfigurationParams) (_ []interface{}, err error) {
c.logger.Debug("call " + MethodWorkspaceConfiguration)
defer c.logger.Debug("end "+MethodWorkspaceConfiguration, zap.Error(err))
var result []interface{}
if err := Call(ctx, c.Conn, MethodWorkspaceConfiguration, params, &result); err != nil {
return nil, err
}
return result, nil
}
// WorkspaceFolders sends the request from the server to the client to fetch the current open list of workspace folders.
//
// Returns null in the response if only a single file is open in the tool. Returns an empty array if a workspace is open but no folders are configured.
//
// @since 3.6.0.
func (c *client) WorkspaceFolders(ctx context.Context) (result []WorkspaceFolder, err error) {
c.logger.Debug("call " + MethodWorkspaceWorkspaceFolders)
defer c.logger.Debug("end "+MethodWorkspaceWorkspaceFolders, zap.Error(err))
if err := Call(ctx, c.Conn, MethodWorkspaceWorkspaceFolders, nil, &result); err != nil {
return nil, err
}
return result, nil
}

35
vendor/go.lsp.dev/protocol/context.go vendored Normal file
View File

@ -0,0 +1,35 @@
// SPDX-FileCopyrightText: 2020 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package protocol
import (
"context"
"go.uber.org/zap"
)
var (
ctxLogger struct{}
ctxClient struct{}
)
// WithLogger returns the context with zap.Logger value.
func WithLogger(ctx context.Context, logger *zap.Logger) context.Context {
return context.WithValue(ctx, ctxLogger, logger)
}
// LoggerFromContext extracts zap.Logger from context.
func LoggerFromContext(ctx context.Context) *zap.Logger {
logger, ok := ctx.Value(ctxLogger).(*zap.Logger)
if !ok {
return zap.NewNop()
}
return logger
}
// WithClient returns the context with Client value.
func WithClient(ctx context.Context, client Client) context.Context {
return context.WithValue(ctx, ctxClient, client)
}

264
vendor/go.lsp.dev/protocol/deprecated.go vendored Normal file
View File

@ -0,0 +1,264 @@
// SPDX-FileCopyrightText: 2021 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package protocol
// ClientCapabilitiesShowDocument alias of ShowDocumentClientCapabilities.
//
// Deprecated: Use ShowDocumentClientCapabilities instead.
type ClientCapabilitiesShowDocument = ShowDocumentClientCapabilities
// ClientCapabilitiesShowMessageRequest alias of ShowMessageRequestClientCapabilities.
//
// Deprecated: Use ShowMessageRequestClientCapabilities instead.
type ClientCapabilitiesShowMessageRequest = ShowMessageRequestClientCapabilities
// ClientCapabilitiesShowMessageRequestMessageActionItem alias of ShowMessageRequestClientCapabilitiesMessageActionItem.
//
// Deprecated: Use ShowMessageRequestClientCapabilitiesMessageActionItem instead.
type ClientCapabilitiesShowMessageRequestMessageActionItem = ShowMessageRequestClientCapabilitiesMessageActionItem
// ReferencesParams alias of ReferenceParams.
//
// Deprecated: Use ReferenceParams instead.
type ReferencesParams = ReferenceParams
// TextDocumentClientCapabilitiesCallHierarchy alias of CallHierarchyClientCapabilities.
//
// Deprecated: Use CallHierarchyClientCapabilities instead.
type TextDocumentClientCapabilitiesCallHierarchy = CallHierarchyClientCapabilities
// TextDocumentClientCapabilitiesCodeAction alias of CodeActionClientCapabilities.
//
// Deprecated: Use CodeActionClientCapabilities instead.
type TextDocumentClientCapabilitiesCodeAction = CodeActionClientCapabilities
// TextDocumentClientCapabilitiesCodeActionKind alias of CodeActionClientCapabilitiesKind.
//
// Deprecated: Use CodeActionClientCapabilitiesKind instead.
type TextDocumentClientCapabilitiesCodeActionKind = CodeActionClientCapabilitiesKind
// TextDocumentClientCapabilitiesCodeActionLiteralSupport alias of CodeActionClientCapabilitiesLiteralSupport.
//
// Deprecated: Use CodeActionClientCapabilitiesLiteralSupport instead.
type TextDocumentClientCapabilitiesCodeActionLiteralSupport = CodeActionClientCapabilitiesLiteralSupport
// TextDocumentClientCapabilitiesCodeActionResolveSupport alias of CodeActionClientCapabilitiesResolveSupport.
//
// Deprecated: Use CodeActionClientCapabilitiesResolveSupport instead.
type TextDocumentClientCapabilitiesCodeActionResolveSupport = CodeActionClientCapabilitiesResolveSupport
// TextDocumentClientCapabilitiesCodeLens alias of CodeLensClientCapabilities.
//
// Deprecated: Use CodeLensClientCapabilities instead.
type TextDocumentClientCapabilitiesCodeLens = CodeLensClientCapabilities
// TextDocumentClientCapabilitiesColorProvider alias of DocumentColorClientCapabilities.
//
// Deprecated: Use DocumentColorClientCapabilities instead.
type TextDocumentClientCapabilitiesColorProvider = DocumentColorClientCapabilities
// TextDocumentClientCapabilitiesCompletion alias of CompletionTextDocumentClientCapabilities.
//
// Deprecated: Use CompletionTextDocumentClientCapabilities instead.
type TextDocumentClientCapabilitiesCompletion = CompletionTextDocumentClientCapabilities
// TextDocumentClientCapabilitiesCompletionItem alias of CompletionTextDocumentClientCapabilitiesItem.
//
// Deprecated: Use CompletionTextDocumentClientCapabilitiesItem instead.
type TextDocumentClientCapabilitiesCompletionItem = CompletionTextDocumentClientCapabilitiesItem
// TextDocumentClientCapabilitiesCompletionItemInsertTextModeSupport alias of CompletionTextDocumentClientCapabilitiesItemInsertTextModeSupport.
//
// Deprecated: Use CompletionTextDocumentClientCapabilitiesItemInsertTextModeSupport instead.
type TextDocumentClientCapabilitiesCompletionItemInsertTextModeSupport = CompletionTextDocumentClientCapabilitiesItemInsertTextModeSupport
// TextDocumentClientCapabilitiesCompletionItemKind alias of CompletionTextDocumentClientCapabilitiesItemKind.
//
// Deprecated: Use CompletionTextDocumentClientCapabilitiesItemKind instead.
type TextDocumentClientCapabilitiesCompletionItemKind = CompletionTextDocumentClientCapabilitiesItemKind
// TextDocumentClientCapabilitiesCompletionItemResolveSupport alias of CompletionTextDocumentClientCapabilitiesItemResolveSupport.
//
// Deprecated: Use CompletionTextDocumentClientCapabilitiesItemResolveSupport instead.
type TextDocumentClientCapabilitiesCompletionItemResolveSupport = CompletionTextDocumentClientCapabilitiesItemResolveSupport
// TextDocumentClientCapabilitiesCompletionItemTagSupport alias of CompletionTextDocumentClientCapabilitiesItemTagSupport.
//
// Deprecated: Use CompletionTextDocumentClientCapabilitiesItemTagSupport instead.
type TextDocumentClientCapabilitiesCompletionItemTagSupport = CompletionTextDocumentClientCapabilitiesItemTagSupport
// TextDocumentClientCapabilitiesDeclaration alias of DeclarationTextDocumentClientCapabilities.
//
// Deprecated: Use DeclarationTextDocumentClientCapabilities instead.
type TextDocumentClientCapabilitiesDeclaration = DeclarationTextDocumentClientCapabilities
// TextDocumentClientCapabilitiesDefinition alias of DefinitionTextDocumentClientCapabilities.
//
// Deprecated: Use DefinitionTextDocumentClientCapabilities instead.
type TextDocumentClientCapabilitiesDefinition = DefinitionTextDocumentClientCapabilities
// TextDocumentClientCapabilitiesDocumentHighlight alias of DocumentHighlightClientCapabilities.
//
// Deprecated: Use DocumentHighlightClientCapabilities instead.
type TextDocumentClientCapabilitiesDocumentHighlight = DocumentHighlightClientCapabilities
// TextDocumentClientCapabilitiesDocumentLink alias of DocumentLinkClientCapabilities.
//
// Deprecated: Use DocumentLinkClientCapabilities instead.
type TextDocumentClientCapabilitiesDocumentLink = DocumentLinkClientCapabilities
// TextDocumentClientCapabilitiesDocumentSymbol alias of DocumentSymbolClientCapabilities.
//
// Deprecated: Use DocumentSymbolClientCapabilities instead.
type TextDocumentClientCapabilitiesDocumentSymbol = DocumentSymbolClientCapabilities
// TextDocumentClientCapabilitiesDocumentSymbolTagSupport alias of DocumentSymbolClientCapabilitiesTagSupport.
//
// Deprecated: Use DocumentSymbolClientCapabilitiesTagSupport instead.
type TextDocumentClientCapabilitiesDocumentSymbolTagSupport = DocumentSymbolClientCapabilitiesTagSupport
// TextDocumentClientCapabilitiesFoldingRange alias of FoldingRangeClientCapabilities.
//
// Deprecated: Use FoldingRangeClientCapabilities instead.
type TextDocumentClientCapabilitiesFoldingRange = FoldingRangeClientCapabilities
// TextDocumentClientCapabilitiesFormatting alias of DocumentFormattingClientCapabilities.
//
// Deprecated: Use DocumentFormattingClientCapabilities instead.
type TextDocumentClientCapabilitiesFormatting = DocumentFormattingClientCapabilities
// TextDocumentClientCapabilitiesHover alias of HoverTextDocumentClientCapabilities.
//
// Deprecated: Use HoverTextDocumentClientCapabilities instead.
type TextDocumentClientCapabilitiesHover = HoverTextDocumentClientCapabilities
// TextDocumentClientCapabilitiesImplementation alias of ImplementationTextDocumentClientCapabilities.
//
// Deprecated: Use ImplementationTextDocumentClientCapabilities instead.
type TextDocumentClientCapabilitiesImplementation = ImplementationTextDocumentClientCapabilities
// TextDocumentClientCapabilitiesLinkedEditingRange alias of LinkedEditingRangeClientCapabilities.
//
// Deprecated: Use LinkedEditingRangeClientCapabilities instead.
type TextDocumentClientCapabilitiesLinkedEditingRange = LinkedEditingRangeClientCapabilities
// TextDocumentClientCapabilitiesMoniker of MonikerClientCapabilities.
//
// Deprecated: Use MonikerClientCapabilities instead.
type TextDocumentClientCapabilitiesMoniker = MonikerClientCapabilities
// TextDocumentClientCapabilitiesOnTypeFormatting of DocumentOnTypeFormattingClientCapabilities.
//
// Deprecated: Use DocumentOnTypeFormattingClientCapabilities instead.
type TextDocumentClientCapabilitiesOnTypeFormatting = DocumentOnTypeFormattingClientCapabilities
// TextDocumentClientCapabilitiesPublishDiagnostics of PublishDiagnosticsClientCapabilities.
//
// Deprecated: Use PublishDiagnosticsClientCapabilities instead.
type TextDocumentClientCapabilitiesPublishDiagnostics = PublishDiagnosticsClientCapabilities
// TextDocumentClientCapabilitiesPublishDiagnosticsTagSupport of PublishDiagnosticsClientCapabilitiesTagSupport.
//
// Deprecated: Use PublishDiagnosticsClientCapabilitiesTagSupport instead.
type TextDocumentClientCapabilitiesPublishDiagnosticsTagSupport = PublishDiagnosticsClientCapabilitiesTagSupport
// TextDocumentClientCapabilitiesRangeFormatting of DocumentRangeFormattingClientCapabilities.
//
// Deprecated: Use DocumentRangeFormattingClientCapabilities instead.
type TextDocumentClientCapabilitiesRangeFormatting = DocumentRangeFormattingClientCapabilities
// TextDocumentClientCapabilitiesReferences of ReferencesTextDocumentClientCapabilities.
//
// Deprecated: Use ReferencesTextDocumentClientCapabilities instead.
type TextDocumentClientCapabilitiesReferences = ReferencesTextDocumentClientCapabilities
// TextDocumentClientCapabilitiesRename of RenameClientCapabilities.
//
// Deprecated: Use RenameClientCapabilities instead.
type TextDocumentClientCapabilitiesRename = RenameClientCapabilities
// TextDocumentClientCapabilitiesSelectionRange of SelectionRangeClientCapabilities.
//
// Deprecated: Use SelectionRangeClientCapabilities instead.
type TextDocumentClientCapabilitiesSelectionRange = SelectionRangeClientCapabilities
// TextDocumentClientCapabilitiesSemanticTokens of SemanticTokensClientCapabilities.
//
// Deprecated: Use SemanticTokensClientCapabilities instead.
type TextDocumentClientCapabilitiesSemanticTokens = SemanticTokensClientCapabilities
// TextDocumentClientCapabilitiesSignatureHelp of SignatureHelpTextDocumentClientCapabilities.
//
// Deprecated: Use SignatureHelpTextDocumentClientCapabilities instead.
type TextDocumentClientCapabilitiesSignatureHelp = SignatureHelpTextDocumentClientCapabilities
// TextDocumentClientCapabilitiesSynchronization of TextDocumentSyncClientCapabilities.
//
// Deprecated: Use TextDocumentSyncClientCapabilities instead.
type TextDocumentClientCapabilitiesSynchronization = TextDocumentSyncClientCapabilities
// TextDocumentClientCapabilitiesTypeDefinition of TypeDefinitionTextDocumentClientCapabilities.
//
// Deprecated: Use TypeDefinitionTextDocumentClientCapabilities instead.
type TextDocumentClientCapabilitiesTypeDefinition = TypeDefinitionTextDocumentClientCapabilities
// Abort alias of FailureHandlingKindAbort.
//
// Deprecated: Use FailureHandlingKindAbort instead.
const Abort = FailureHandlingKindAbort
// TextOnlyTransactional alias of FailureHandlingKindTextOnlyTransactional.
//
// Deprecated: Use FailureHandlingKindTextOnlyTransactional instead.
const TextOnlyTransactional = FailureHandlingKindTextOnlyTransactional
// Transactional alias of FailureHandlingKindTransactional.
//
// Deprecated: Use FailureHandlingKindTransactional instead.
const Transactional = FailureHandlingKindTransactional
// Undo alias of FailureHandlingKindUndo.
//
// Deprecated: Use FailureHandlingKindUndo instead.
const Undo = FailureHandlingKindUndo
// WorkspaceClientCapabilitiesSymbol alias of WorkspaceSymbolClientCapabilities.
//
// Deprecated: Use WorkspaceSymbolClientCapabilities instead.
type WorkspaceClientCapabilitiesSymbol = WorkspaceSymbolClientCapabilities
// WorkspaceClientCapabilitiesSymbolKind alias of SymbolKindCapabilities.
//
// Deprecated: Use SymbolKindCapabilities instead.
type WorkspaceClientCapabilitiesSymbolKind = SymbolKindCapabilities
// WorkspaceClientCapabilitiesCodeLens alias of CodeLensWorkspaceClientCapabilities.
//
// Deprecated: Use CodeLensWorkspaceClientCapabilities instead.
type WorkspaceClientCapabilitiesCodeLens = CodeLensWorkspaceClientCapabilities
// WorkspaceClientCapabilitiesDidChangeConfiguration alias of DidChangeConfigurationWorkspaceClientCapabilities.
//
// Deprecated: Use DidChangeConfigurationWorkspaceClientCapabilities instead.
type WorkspaceClientCapabilitiesDidChangeConfiguration = DidChangeConfigurationWorkspaceClientCapabilities
// WorkspaceClientCapabilitiesDidChangeWatchedFiles alias of DidChangeWatchedFilesWorkspaceClientCapabilities.
//
// Deprecated: Use DidChangeWatchedFilesWorkspaceClientCapabilities instead.
type WorkspaceClientCapabilitiesDidChangeWatchedFiles = DidChangeWatchedFilesWorkspaceClientCapabilities
// WorkspaceClientCapabilitiesExecuteCommand alias of ExecuteCommandClientCapabilities.
//
// Deprecated: Use ExecuteCommandClientCapabilities instead.
type WorkspaceClientCapabilitiesExecuteCommand = ExecuteCommandClientCapabilities
// WorkspaceClientCapabilitiesSemanticTokens alias of SemanticTokensWorkspaceClientCapabilities.
//
// Deprecated: Use SemanticTokensWorkspaceClientCapabilities instead.
type WorkspaceClientCapabilitiesSemanticTokens = SemanticTokensWorkspaceClientCapabilities
// WorkspaceClientCapabilitiesSemanticTokensRequests alias of SemanticTokensWorkspaceClientCapabilitiesRequests.
//
// Deprecated: Use SemanticTokensWorkspaceClientCapabilitiesRequests instead.
type WorkspaceClientCapabilitiesSemanticTokensRequests = SemanticTokensWorkspaceClientCapabilitiesRequests

View File

@ -0,0 +1,149 @@
// SPDX-FileCopyrightText: 2019 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package protocol
import (
"strconv"
)
// Diagnostic represents a diagnostic, such as a compiler error or warning.
//
// Diagnostic objects are only valid in the scope of a resource.
type Diagnostic struct {
// Range is the range at which the message applies.
Range Range `json:"range"`
// Severity is the diagnostic's severity. Can be omitted. If omitted it is up to the
// client to interpret diagnostics as error, warning, info or hint.
Severity DiagnosticSeverity `json:"severity,omitempty"`
// Code is the diagnostic's code, which might appear in the user interface.
Code interface{} `json:"code,omitempty"` // int32 | string;
// CodeDescription an optional property to describe the error code.
//
// @since 3.16.0.
CodeDescription *CodeDescription `json:"codeDescription,omitempty"`
// Source a human-readable string describing the source of this
// diagnostic, e.g. 'typescript' or 'super lint'.
Source string `json:"source,omitempty"`
// Message is the diagnostic's message.
Message string `json:"message"`
// Tags is the additional metadata about the diagnostic.
//
// @since 3.15.0.
Tags []DiagnosticTag `json:"tags,omitempty"`
// RelatedInformation an array of related diagnostic information, e.g. when symbol-names within
// a scope collide all definitions can be marked via this property.
RelatedInformation []DiagnosticRelatedInformation `json:"relatedInformation,omitempty"`
// Data is a data entry field that is preserved between a
// "textDocument/publishDiagnostics" notification and
// "textDocument/codeAction" request.
//
// @since 3.16.0.
Data interface{} `json:"data,omitempty"`
}
// DiagnosticSeverity indicates the severity of a Diagnostic message.
type DiagnosticSeverity float64
const (
// DiagnosticSeverityError reports an error.
DiagnosticSeverityError DiagnosticSeverity = 1
// DiagnosticSeverityWarning reports a warning.
DiagnosticSeverityWarning DiagnosticSeverity = 2
// DiagnosticSeverityInformation reports an information.
DiagnosticSeverityInformation DiagnosticSeverity = 3
// DiagnosticSeverityHint reports a hint.
DiagnosticSeverityHint DiagnosticSeverity = 4
)
// String implements fmt.Stringer.
func (d DiagnosticSeverity) String() string {
switch d {
case DiagnosticSeverityError:
return "Error"
case DiagnosticSeverityWarning:
return "Warning"
case DiagnosticSeverityInformation:
return "Information"
case DiagnosticSeverityHint:
return "Hint"
default:
return strconv.FormatFloat(float64(d), 'f', -10, 64)
}
}
// CodeDescription is the structure to capture a description for an error code.
//
// @since 3.16.0.
type CodeDescription struct {
// Href an URI to open with more information about the diagnostic error.
Href URI `json:"href"`
}
// DiagnosticTag is the diagnostic tags.
//
// @since 3.15.0.
type DiagnosticTag float64
// list of DiagnosticTag.
const (
// DiagnosticTagUnnecessary unused or unnecessary code.
//
// Clients are allowed to render diagnostics with this tag faded out instead of having
// an error squiggle.
DiagnosticTagUnnecessary DiagnosticTag = 1
// DiagnosticTagDeprecated deprecated or obsolete code.
//
// Clients are allowed to rendered diagnostics with this tag strike through.
DiagnosticTagDeprecated DiagnosticTag = 2
)
// String implements fmt.Stringer.
func (d DiagnosticTag) String() string {
switch d {
case DiagnosticTagUnnecessary:
return "Unnecessary"
case DiagnosticTagDeprecated:
return "Deprecated"
default:
return strconv.FormatFloat(float64(d), 'f', -10, 64)
}
}
// DiagnosticRelatedInformation represents a related message and source code location for a diagnostic.
//
// This should be used to point to code locations that cause or related to a diagnostics, e.g when duplicating
// a symbol in a scope.
type DiagnosticRelatedInformation struct {
// Location is the location of this related diagnostic information.
Location Location `json:"location"`
// Message is the message of this related diagnostic information.
Message string `json:"message"`
}
// PublishDiagnosticsParams represents a params of PublishDiagnostics notification.
type PublishDiagnosticsParams struct {
// URI is the URI for which diagnostic information is reported.
URI DocumentURI `json:"uri"`
// Version optional the version number of the document the diagnostics are published for.
//
// @since 3.15
Version uint32 `json:"version,omitempty"`
// Diagnostics an array of diagnostic information items.
Diagnostics []Diagnostic `json:"diagnostics"`
}

23
vendor/go.lsp.dev/protocol/doc.go vendored Normal file
View File

@ -0,0 +1,23 @@
// SPDX-FileCopyrightText: 2019 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
// Package protocol implements Language Server Protocol specification in Go.
//
// This package contains the structs that map directly to the wire format
// of the Language Server Protocol.
//
// It is a literal transcription, with unmodified comments, and only the changes
// required to make it Go code.
//
// - Names are uppercased to export them.
//
// - All fields have JSON tags added to correct the names.
//
// - Fields marked with a ? are also marked as "omitempty".
//
// - Fields that are "|| null" are made pointers.
//
// - Fields that are string or number are left as string.
//
// - Fields that are type "number" are made float64.
package protocol // import "go.lsp.dev/protocol"

40
vendor/go.lsp.dev/protocol/errors.go vendored Normal file
View File

@ -0,0 +1,40 @@
// SPDX-FileCopyrightText: 2021 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package protocol
import "go.lsp.dev/jsonrpc2"
const (
// LSPReservedErrorRangeStart is the start range of LSP reserved error codes.
//
// It doesn't denote a real error code.
//
// @since 3.16.0.
LSPReservedErrorRangeStart jsonrpc2.Code = -32899
// ContentModified is the state change that invalidates the result of a request in execution.
//
// Defined by the protocol.
CodeContentModified jsonrpc2.Code = -32801
// RequestCancelled is the cancellation error.
//
// Defined by the protocol.
CodeRequestCancelled jsonrpc2.Code = -32800
// LSPReservedErrorRangeEnd is the end range of LSP reserved error codes.
//
// It doesn't denote a real error code.
//
// @since 3.16.0.
LSPReservedErrorRangeEnd jsonrpc2.Code = -32800
)
var (
// ErrContentModified should be used when a request is canceled early.
ErrContentModified = jsonrpc2.NewError(CodeContentModified, "cancelled JSON-RPC")
// ErrRequestCancelled should be used when a request is canceled early.
ErrRequestCancelled = jsonrpc2.NewError(CodeRequestCancelled, "cancelled JSON-RPC")
)

461
vendor/go.lsp.dev/protocol/general.go vendored Normal file
View File

@ -0,0 +1,461 @@
// SPDX-FileCopyrightText: 2019 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package protocol
// TraceValue represents a InitializeParams Trace mode.
type TraceValue string
// list of TraceValue.
const (
// TraceOff disable tracing.
TraceOff TraceValue = "off"
// TraceMessage normal tracing mode.
TraceMessage TraceValue = "message"
// TraceVerbose verbose tracing mode.
TraceVerbose TraceValue = "verbose"
)
// ClientInfo information about the client.
//
// @since 3.15.0.
type ClientInfo struct {
// Name is the name of the client as defined by the client.
Name string `json:"name"`
// Version is the client's version as defined by the client.
Version string `json:"version,omitempty"`
}
// InitializeParams params of Initialize request.
type InitializeParams struct {
WorkDoneProgressParams
// ProcessID is the process Id of the parent process that started
// the server. Is null if the process has not been started by another process.
// If the parent process is not alive then the server should exit (see exit notification) its process.
ProcessID int32 `json:"processId"`
// ClientInfo is the information about the client.
//
// @since 3.15.0
ClientInfo *ClientInfo `json:"clientInfo,omitempty"`
// Locale is the locale the client is currently showing the user interface
// in. This must not necessarily be the locale of the operating
// system.
//
// Uses IETF language tags as the value's syntax
// (See https://en.wikipedia.org/wiki/IETF_language_tag)
//
// @since 3.16.0.
Locale string `json:"locale,omitempty"`
// RootPath is the rootPath of the workspace. Is null
// if no folder is open.
//
// Deprecated: Use RootURI instead.
RootPath string `json:"rootPath,omitempty"`
// RootURI is the rootUri of the workspace. Is null if no
// folder is open. If both `rootPath` and "rootUri" are set
// "rootUri" wins.
//
// Deprecated: Use WorkspaceFolders instead.
RootURI DocumentURI `json:"rootUri,omitempty"`
// InitializationOptions user provided initialization options.
InitializationOptions interface{} `json:"initializationOptions,omitempty"`
// Capabilities is the capabilities provided by the client (editor or tool)
Capabilities ClientCapabilities `json:"capabilities"`
// Trace is the initial trace setting. If omitted trace is disabled ('off').
Trace TraceValue `json:"trace,omitempty"`
// WorkspaceFolders is the workspace folders configured in the client when the server starts.
// This property is only available if the client supports workspace folders.
// It can be `null` if the client supports workspace folders but none are
// configured.
//
// @since 3.6.0.
WorkspaceFolders []WorkspaceFolder `json:"workspaceFolders,omitempty"`
}
// InitializeResult result of ClientCapabilities.
type InitializeResult struct {
// Capabilities is the capabilities the language server provides.
Capabilities ServerCapabilities `json:"capabilities"`
// ServerInfo Information about the server.
//
// @since 3.15.0.
ServerInfo *ServerInfo `json:"serverInfo,omitempty"`
}
// LogTraceParams params of LogTrace notification.
//
// @since 3.16.0.
type LogTraceParams struct {
// Message is the message to be logged.
Message string `json:"message"`
// Verbose is the additional information that can be computed if the "trace" configuration
// is set to "verbose".
Verbose TraceValue `json:"verbose,omitempty"`
}
// SetTraceParams params of SetTrace notification.
//
// @since 3.16.0.
type SetTraceParams struct {
// Value is the new value that should be assigned to the trace setting.
Value TraceValue `json:"value"`
}
// FileOperationPatternKind is a pattern kind describing if a glob pattern matches a file a folder or
// both.
//
// @since 3.16.0.
type FileOperationPatternKind string
// list of FileOperationPatternKind.
const (
// FileOperationPatternKindFile is the pattern matches a file only.
FileOperationPatternKindFile FileOperationPatternKind = "file"
// FileOperationPatternKindFolder is the pattern matches a folder only.
FileOperationPatternKindFolder FileOperationPatternKind = "folder"
)
// FileOperationPatternOptions matching options for the file operation pattern.
//
// @since 3.16.0.
type FileOperationPatternOptions struct {
// IgnoreCase is The pattern should be matched ignoring casing.
IgnoreCase bool `json:"ignoreCase,omitempty"`
}
// FileOperationPattern a pattern to describe in which file operation requests or notifications
// the server is interested in.
//
// @since 3.16.0.
type FileOperationPattern struct {
// The glob pattern to match. Glob patterns can have the following syntax:
// - `*` to match one or more characters in a path segment
// - `?` to match on one character in a path segment
// - `**` to match any number of path segments, including none
// - `{}` to group conditions (e.g. `**/*.{ts,js}` matches all TypeScript
// and JavaScript files)
// - `[]` to declare a range of characters to match in a path segment
// (e.g., `example.[0-9]` to match on `example.0`, `example.1`, …)
// - `[!...]` to negate a range of characters to match in a path segment
// (e.g., `example.[!0-9]` to match on `example.a`, `example.b`, but
// not `example.0`)
Glob string `json:"glob"`
// Matches whether to match files or folders with this pattern.
//
// Matches both if undefined.
Matches FileOperationPatternKind `json:"matches,omitempty"`
// Options additional options used during matching.
Options FileOperationPatternOptions `json:"options,omitempty"`
}
// FileOperationFilter is a filter to describe in which file operation requests or notifications
// the server is interested in.
//
// @since 3.16.0.
type FileOperationFilter struct {
// Scheme is a URI like "file" or "untitled".
Scheme string `json:"scheme,omitempty"`
// Pattern is the actual file operation pattern.
Pattern FileOperationPattern `json:"pattern"`
}
// CreateFilesParams is the parameters sent in notifications/requests for user-initiated creation
// of files.
//
// @since 3.16.0.
type CreateFilesParams struct {
// Files an array of all files/folders created in this operation.
Files []FileCreate `json:"files"`
}
// FileCreate nepresents information on a file/folder create.
//
// @since 3.16.0.
type FileCreate struct {
// URI is a file:// URI for the location of the file/folder being created.
URI string `json:"uri"`
}
// RenameFilesParams is the parameters sent in notifications/requests for user-initiated renames
// of files.
//
// @since 3.16.0.
type RenameFilesParams struct {
// Files an array of all files/folders renamed in this operation. When a folder
// is renamed, only the folder will be included, and not its children.
Files []FileRename `json:"files"`
}
// FileRename represents information on a file/folder rename.
//
// @since 3.16.0.
type FileRename struct {
// OldURI is a file:// URI for the original location of the file/folder being renamed.
OldURI string `json:"oldUri"`
// NewURI is a file:// URI for the new location of the file/folder being renamed.
NewURI string `json:"newUri"`
}
// DeleteFilesParams is the parameters sent in notifications/requests for user-initiated deletes
// of files.
//
// @since 3.16.0.
type DeleteFilesParams struct {
// Files an array of all files/folders deleted in this operation.
Files []FileDelete `json:"files"`
}
// FileDelete represents information on a file/folder delete.
//
// @since 3.16.0.
type FileDelete struct {
// URI is a file:// URI for the location of the file/folder being deleted.
URI string `json:"uri"`
}
// DocumentHighlightParams params of DocumentHighlight request.
//
// @since 3.15.0.
type DocumentHighlightParams struct {
TextDocumentPositionParams
WorkDoneProgressParams
PartialResultParams
}
// DeclarationParams params of Declaration request.
//
// @since 3.15.0.
type DeclarationParams struct {
TextDocumentPositionParams
WorkDoneProgressParams
PartialResultParams
}
// DefinitionParams params of Definition request.
//
// @since 3.15.0.
type DefinitionParams struct {
TextDocumentPositionParams
WorkDoneProgressParams
PartialResultParams
}
// TypeDefinitionParams params of TypeDefinition request.
//
// @since 3.15.0.
type TypeDefinitionParams struct {
TextDocumentPositionParams
WorkDoneProgressParams
PartialResultParams
}
// ImplementationParams params of Implementation request.
//
// @since 3.15.0.
type ImplementationParams struct {
TextDocumentPositionParams
WorkDoneProgressParams
PartialResultParams
}
// ShowDocumentParams params to show a document.
//
// @since 3.16.0.
type ShowDocumentParams struct {
// URI is the document uri to show.
URI URI `json:"uri"`
// External indicates to show the resource in an external program.
// To show for example `https://code.visualstudio.com/`
// in the default WEB browser set `external` to `true`.
External bool `json:"external,omitempty"`
// TakeFocus an optional property to indicate whether the editor
// showing the document should take focus or not.
// Clients might ignore this property if an external
// program is started.
TakeFocus bool `json:"takeFocus,omitempty"`
// Selection an optional selection range if the document is a text
// document. Clients might ignore the property if an
// external program is started or the file is not a text
// file.
Selection *Range `json:"selection,omitempty"`
}
// ShowDocumentResult is the result of an show document request.
//
// @since 3.16.0.
type ShowDocumentResult struct {
// Success a boolean indicating if the show was successful.
Success bool `json:"success"`
}
// ServerInfo Information about the server.
//
// @since 3.15.0.
type ServerInfo struct {
// Name is the name of the server as defined by the server.
Name string `json:"name"`
// Version is the server's version as defined by the server.
Version string `json:"version,omitempty"`
}
// InitializeError known error codes for an "InitializeError".
type InitializeError struct {
// Retry indicates whether the client execute the following retry logic:
// (1) show the message provided by the ResponseError to the user
// (2) user selects retry or cancel
// (3) if user selected retry the initialize method is sent again.
Retry bool `json:"retry,omitempty"`
}
// ReferencesOptions ReferencesProvider options.
//
// @since 3.15.0.
type ReferencesOptions struct {
WorkDoneProgressOptions
}
// WorkDoneProgressOptions WorkDoneProgress options.
//
// @since 3.15.0.
type WorkDoneProgressOptions struct {
WorkDoneProgress bool `json:"workDoneProgress,omitempty"`
}
// LinkedEditingRangeParams params for the LinkedEditingRange request.
//
// @since 3.16.0.
type LinkedEditingRangeParams struct {
TextDocumentPositionParams
WorkDoneProgressParams
}
// LinkedEditingRanges result of LinkedEditingRange request.
//
// @since 3.16.0.
type LinkedEditingRanges struct {
// Ranges a list of ranges that can be renamed together.
//
// The ranges must have identical length and contain identical text content.
//
// The ranges cannot overlap.
Ranges []Range `json:"ranges"`
// WordPattern an optional word pattern (regular expression) that describes valid contents for
// the given ranges.
//
// If no pattern is provided, the client configuration's word pattern will be used.
WordPattern string `json:"wordPattern,omitempty"`
}
// MonikerParams params for the Moniker request.
//
// @since 3.16.0.
type MonikerParams struct {
TextDocumentPositionParams
WorkDoneProgressParams
PartialResultParams
}
// UniquenessLevel is the Moniker uniqueness level to define scope of the moniker.
//
// @since 3.16.0.
type UniquenessLevel string
// list of UniquenessLevel.
const (
// UniquenessLevelDocument is the moniker is only unique inside a document.
UniquenessLevelDocument UniquenessLevel = "document"
// UniquenessLevelProject is the moniker is unique inside a project for which a dump got created.
UniquenessLevelProject UniquenessLevel = "project"
// UniquenessLevelGroup is the moniker is unique inside the group to which a project belongs.
UniquenessLevelGroup UniquenessLevel = "group"
// UniquenessLevelScheme is the moniker is unique inside the moniker scheme.
UniquenessLevelScheme UniquenessLevel = "scheme"
// UniquenessLevelGlobal is the moniker is globally unique.
UniquenessLevelGlobal UniquenessLevel = "global"
)
// MonikerKind is the moniker kind.
//
// @since 3.16.0.
type MonikerKind string
// list of MonikerKind.
const (
// MonikerKindImport is the moniker represent a symbol that is imported into a project.
MonikerKindImport MonikerKind = "import"
// MonikerKindExport is the moniker represents a symbol that is exported from a project.
MonikerKindExport MonikerKind = "export"
// MonikerKindLocal is the moniker represents a symbol that is local to a project (e.g. a local
// variable of a function, a class not visible outside the project, ...).
MonikerKindLocal MonikerKind = "local"
)
// Moniker definition to match LSIF 0.5 moniker definition.
//
// @since 3.16.0.
type Moniker struct {
// Scheme is the scheme of the moniker. For example tsc or .Net.
Scheme string `json:"scheme"`
// Identifier is the identifier of the moniker.
//
// The value is opaque in LSIF however schema owners are allowed to define the structure if they want.
Identifier string `json:"identifier"`
// Unique is the scope in which the moniker is unique.
Unique UniquenessLevel `json:"unique"`
// Kind is the moniker kind if known.
Kind MonikerKind `json:"kind,omitempty"`
}
// StaticRegistrationOptions staticRegistration options to be returned in the initialize request.
type StaticRegistrationOptions struct {
// ID is the id used to register the request. The id can be used to deregister
// the request again. See also Registration#id.
ID string `json:"id,omitempty"`
}
// DocumentLinkRegistrationOptions DocumentLinkRegistration options.
type DocumentLinkRegistrationOptions struct {
TextDocumentRegistrationOptions
// ResolveProvider document links have a resolve provider as well.
ResolveProvider bool `json:"resolveProvider,omitempty"`
}
// InitializedParams params of Initialized notification.
type InitializedParams struct{}
// WorkspaceFolders represents a slice of WorkspaceFolder.
type WorkspaceFolders []WorkspaceFolder

88
vendor/go.lsp.dev/protocol/handler.go vendored Normal file
View File

@ -0,0 +1,88 @@
// SPDX-FileCopyrightText: 2021 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package protocol
import (
"context"
"fmt"
"github.com/segmentio/encoding/json"
"go.lsp.dev/jsonrpc2"
"go.lsp.dev/pkg/xcontext"
)
// CancelHandler handler of cancelling.
func CancelHandler(handler jsonrpc2.Handler) jsonrpc2.Handler {
handler, canceller := jsonrpc2.CancelHandler(handler)
h := func(ctx context.Context, reply jsonrpc2.Replier, req jsonrpc2.Request) error {
if req.Method() != MethodCancelRequest {
// TODO(iancottrell): See if we can generate a reply for the request to be cancelled
// at the point of cancellation rather than waiting for gopls to naturally reply.
// To do that, we need to keep track of whether a reply has been sent already and
// be careful about racing between the two paths.
// TODO(iancottrell): Add a test that watches the stream and verifies the response
// for the cancelled request flows.
reply := func(ctx context.Context, resp interface{}, err error) error {
// https://microsoft.github.io/language-server-protocol/specifications/specification-current/#cancelRequest
if ctx.Err() != nil && err == nil {
err = ErrRequestCancelled
}
ctx = xcontext.Detach(ctx)
return reply(ctx, resp, err)
}
return handler(ctx, reply, req)
}
var params CancelParams
if err := json.Unmarshal(req.Params(), &params); err != nil {
return replyParseError(ctx, reply, err)
}
switch id := params.ID.(type) {
case int32:
canceller(jsonrpc2.NewNumberID(id))
case string:
canceller(jsonrpc2.NewStringID(id))
default:
return replyParseError(ctx, reply, fmt.Errorf("request ID %v malformed", id))
}
return reply(ctx, nil, nil)
}
return h
}
// Handlers default jsonrpc2.Handler.
func Handlers(handler jsonrpc2.Handler) jsonrpc2.Handler {
return CancelHandler(
jsonrpc2.AsyncHandler(
jsonrpc2.ReplyHandler(handler),
),
)
}
// Call calls method to params and result.
func Call(ctx context.Context, conn jsonrpc2.Conn, method string, params, result interface{}) error {
id, err := conn.Call(ctx, method, params, result)
if ctx.Err() != nil {
notifyCancel(ctx, conn, id)
}
return err
}
func notifyCancel(ctx context.Context, conn jsonrpc2.Conn, id jsonrpc2.ID) {
ctx = xcontext.Detach(ctx)
// Note that only *jsonrpc2.ID implements json.Marshaler.
conn.Notify(ctx, MethodCancelRequest, &CancelParams{ID: &id})
}
func replyParseError(ctx context.Context, reply jsonrpc2.Replier, err error) error {
return reply(ctx, nil, fmt.Errorf("%s: %w", jsonrpc2.ErrParse, err))
}

1316
vendor/go.lsp.dev/protocol/language.go vendored Normal file

File diff suppressed because it is too large Load Diff

156
vendor/go.lsp.dev/protocol/log.go vendored Normal file
View File

@ -0,0 +1,156 @@
// SPDX-FileCopyrightText: 2021 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package protocol
import (
"bytes"
"context"
"fmt"
"io"
"sync"
"time"
"go.lsp.dev/jsonrpc2"
)
// loggingStream represents a logging of jsonrpc2.Stream.
type loggingStream struct {
stream jsonrpc2.Stream
log io.Writer
logMu sync.Mutex
}
// LoggingStream returns a stream that does LSP protocol logging.
func LoggingStream(stream jsonrpc2.Stream, w io.Writer) jsonrpc2.Stream {
return &loggingStream{
stream: stream,
log: w,
}
}
// Read implements jsonrpc2.Stream.Read.
func (s *loggingStream) Read(ctx context.Context) (jsonrpc2.Message, int64, error) {
msg, count, err := s.stream.Read(ctx)
if err == nil {
s.logCommon(msg, true)
}
return msg, count, err
}
// Write implements jsonrpc2.Stream.Write.
func (s *loggingStream) Write(ctx context.Context, msg jsonrpc2.Message) (int64, error) {
s.logCommon(msg, false)
count, err := s.stream.Write(ctx, msg)
return count, err
}
// Close implements jsonrpc2.Stream.Close.
func (s *loggingStream) Close() error {
return s.stream.Close()
}
type req struct {
method string
start time.Time
}
type mapped struct {
mu sync.Mutex
clientCalls map[string]req
serverCalls map[string]req
}
var maps = &mapped{
mu: sync.Mutex{},
clientCalls: make(map[string]req),
serverCalls: make(map[string]req),
}
// these 4 methods are each used exactly once, but it seemed
// better to have the encapsulation rather than ad hoc mutex
// code in 4 places.
func (m *mapped) client(id string) req {
m.mu.Lock()
v := m.clientCalls[id]
delete(m.clientCalls, id)
m.mu.Unlock()
return v
}
func (m *mapped) server(id string) req {
m.mu.Lock()
v := m.serverCalls[id]
delete(m.serverCalls, id)
m.mu.Unlock()
return v
}
func (m *mapped) setClient(id string, r req) {
m.mu.Lock()
m.clientCalls[id] = r
m.mu.Unlock()
}
func (m *mapped) setServer(id string, r req) {
m.mu.Lock()
m.serverCalls[id] = r
m.mu.Unlock()
}
const eor = "\r\n\r\n\r\n"
func (s *loggingStream) logCommon(msg jsonrpc2.Message, isRead bool) {
if msg == nil || s.log == nil {
return
}
s.logMu.Lock()
direction, pastTense := "Received", "Received"
get, set := maps.client, maps.setServer
if isRead {
direction, pastTense = "Sending", "Sent"
get, set = maps.server, maps.setClient
}
tm := time.Now()
tmfmt := tm.Format("15:04:05.000 PM")
var buf bytes.Buffer
fmt.Fprintf(&buf, "[Trace - %s] ", tmfmt) // common beginning
switch msg := msg.(type) {
case *jsonrpc2.Call:
id := fmt.Sprint(msg.ID())
fmt.Fprintf(&buf, "%s request '%s - (%s)'.\n", direction, msg.Method(), id)
fmt.Fprintf(&buf, "Params: %s%s", msg.Params(), eor)
set(id, req{method: msg.Method(), start: tm})
case *jsonrpc2.Notification:
fmt.Fprintf(&buf, "%s notification '%s'.\n", direction, msg.Method())
fmt.Fprintf(&buf, "Params: %s%s", msg.Params(), eor)
case *jsonrpc2.Response:
id := fmt.Sprint(msg.ID())
if err := msg.Err(); err != nil {
fmt.Fprintf(s.log, "[Error - %s] %s #%s %s%s", pastTense, tmfmt, id, err, eor)
return
}
cc := get(id)
elapsed := tm.Sub(cc.start)
fmt.Fprintf(&buf, "%s response '%s - (%s)' in %dms.\n",
direction, cc.method, id, elapsed/time.Millisecond)
fmt.Fprintf(&buf, "Result: %s%s", msg.Result(), eor)
}
s.log.Write(buf.Bytes())
s.logMu.Unlock()
}

119
vendor/go.lsp.dev/protocol/progress.go vendored Normal file
View File

@ -0,0 +1,119 @@
// SPDX-FileCopyrightText: 2021 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package protocol
// WorkDoneProgressKind kind of WorkDoneProgress.
//
// @since 3.15.0.
type WorkDoneProgressKind string
// list of WorkDoneProgressKind.
const (
// WorkDoneProgressKindBegin kind of WorkDoneProgressBegin.
WorkDoneProgressKindBegin WorkDoneProgressKind = "begin"
// WorkDoneProgressKindReport kind of WorkDoneProgressReport.
WorkDoneProgressKindReport WorkDoneProgressKind = "report"
// WorkDoneProgressKindEnd kind of WorkDoneProgressEnd.
WorkDoneProgressKindEnd WorkDoneProgressKind = "end"
)
// WorkDoneProgressBegin is the to start progress reporting a "$/progress" notification.
//
// @since 3.15.0.
type WorkDoneProgressBegin struct {
// Kind is the kind of WorkDoneProgressBegin.
//
// It must be WorkDoneProgressKindBegin.
Kind WorkDoneProgressKind `json:"kind"`
// Title mandatory title of the progress operation. Used to briefly inform about
// the kind of operation being performed.
//
// Examples: "Indexing" or "Linking dependencies".
Title string `json:"title"`
// Cancellable controls if a cancel button should show to allow the user to cancel the
// long running operation. Clients that don't support cancellation are allowed
// to ignore the setting.
Cancellable bool `json:"cancellable,omitempty"`
// Message is optional, more detailed associated progress message. Contains
// complementary information to the `title`.
//
// Examples: "3/25 files", "project/src/module2", "node_modules/some_dep".
// If unset, the previous progress message (if any) is still valid.
Message string `json:"message,omitempty"`
// Percentage is optional progress percentage to display (value 100 is considered 100%).
// If not provided infinite progress is assumed and clients are allowed
// to ignore the `percentage` value in subsequent in report notifications.
//
// The value should be steadily rising. Clients are free to ignore values
// that are not following this rule.
Percentage uint32 `json:"percentage,omitempty"`
}
// WorkDoneProgressReport is the reporting progress is done.
//
// @since 3.15.0.
type WorkDoneProgressReport struct {
// Kind is the kind of WorkDoneProgressReport.
//
// It must be WorkDoneProgressKindReport.
Kind WorkDoneProgressKind `json:"kind"`
// Cancellable controls enablement state of a cancel button.
//
// Clients that don't support cancellation or don't support controlling the button's
// enablement state are allowed to ignore the property.
Cancellable bool `json:"cancellable,omitempty"`
// Message is optional, more detailed associated progress message. Contains
// complementary information to the `title`.
//
// Examples: "3/25 files", "project/src/module2", "node_modules/some_dep".
// If unset, the previous progress message (if any) is still valid.
Message string `json:"message,omitempty"`
// Percentage is optional progress percentage to display (value 100 is considered 100%).
// If not provided infinite progress is assumed and clients are allowed
// to ignore the `percentage` value in subsequent in report notifications.
//
// The value should be steadily rising. Clients are free to ignore values
// that are not following this rule.
Percentage uint32 `json:"percentage,omitempty"`
}
// WorkDoneProgressEnd is the signaling the end of a progress reporting is done.
//
// @since 3.15.0.
type WorkDoneProgressEnd struct {
// Kind is the kind of WorkDoneProgressEnd.
//
// It must be WorkDoneProgressKindEnd.
Kind WorkDoneProgressKind `json:"kind"`
// Message is optional, a final message indicating to for example indicate the outcome
// of the operation.
Message string `json:"message,omitempty"`
}
// WorkDoneProgressParams is a parameter property of report work done progress.
//
// @since 3.15.0.
type WorkDoneProgressParams struct {
// WorkDoneToken an optional token that a server can use to report work done progress.
WorkDoneToken *ProgressToken `json:"workDoneToken,omitempty"`
}
// PartialResultParams is the parameter literal used to pass a partial result token.
//
// @since 3.15.0.
type PartialResultParams struct {
// PartialResultToken an optional token that a server can use to report partial results
// (for example, streaming) to the client.
PartialResultToken *ProgressToken `json:"partialResultToken,omitempty"`
}

42
vendor/go.lsp.dev/protocol/protocol.go vendored Normal file
View File

@ -0,0 +1,42 @@
// SPDX-FileCopyrightText: 2019 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package protocol
import (
"context"
"go.uber.org/zap"
"go.lsp.dev/jsonrpc2"
)
// NewServer returns the context in which client is embedded, jsonrpc2.Conn, and the Client.
func NewServer(ctx context.Context, server Server, stream jsonrpc2.Stream, logger *zap.Logger) (context.Context, jsonrpc2.Conn, Client) {
conn := jsonrpc2.NewConn(stream)
cliint := ClientDispatcher(conn, logger.Named("client"))
ctx = WithClient(ctx, cliint)
conn.Go(ctx,
Handlers(
ServerHandler(server, jsonrpc2.MethodNotFoundHandler),
),
)
return ctx, conn, cliint
}
// NewClient returns the context in which Client is embedded, jsonrpc2.Conn, and the Server.
func NewClient(ctx context.Context, client Client, stream jsonrpc2.Stream, logger *zap.Logger) (context.Context, jsonrpc2.Conn, Server) {
ctx = WithClient(ctx, client)
conn := jsonrpc2.NewConn(stream)
conn.Go(ctx,
Handlers(
ClientHandler(client, jsonrpc2.MethodNotFoundHandler),
),
)
server := ServerDispatcher(conn, logger.Named("server"))
return ctx, conn, server
}

View File

@ -0,0 +1,44 @@
// SPDX-FileCopyrightText: 2019 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package protocol
// Registration general parameters to register for a capability.
type Registration struct {
// ID is the id used to register the request. The id can be used to deregister
// the request again.
ID string `json:"id"`
// Method is the method / capability to register for.
Method string `json:"method"`
// RegisterOptions options necessary for the registration.
RegisterOptions interface{} `json:"registerOptions,omitempty"`
}
// RegistrationParams params of Register Capability.
type RegistrationParams struct {
Registrations []Registration `json:"registrations"`
}
// TextDocumentRegistrationOptions TextDocumentRegistration options.
type TextDocumentRegistrationOptions struct {
// DocumentSelector a document selector to identify the scope of the registration. If set to null
// the document selector provided on the client side will be used.
DocumentSelector DocumentSelector `json:"documentSelector"`
}
// Unregistration general parameters to unregister a capability.
type Unregistration struct {
// ID is the id used to unregister the request or notification. Usually an id
// provided during the register request.
ID string `json:"id"`
// Method is the method / capability to unregister for.
Method string `json:"method"`
}
// UnregistrationParams params of Unregistration.
type UnregistrationParams struct {
Unregisterations []Unregistration `json:"unregisterations"`
}

View File

@ -0,0 +1,110 @@
// SPDX-FileCopyrightText: 2021 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package protocol
// SelectionRangeProviderOptions selection range provider options interface.
type SelectionRangeProviderOptions interface{}
// SelectionRange represents a selection range represents a part of a selection hierarchy.
//
// A selection range may have a parent selection range that contains it.
//
// @since 3.15.0.
type SelectionRange struct {
// Range is the Range of this selection range.
Range Range `json:"range"`
// Parent is the parent selection range containing this range. Therefore `parent.range` must contain this Range.
Parent *SelectionRange `json:"parent,omitempty"`
}
// EnableSelectionRange is the whether the selection range.
type EnableSelectionRange bool
// compile time check whether the EnableSelectionRange implements a SelectionRangeProviderOptions interface.
var _ SelectionRangeProviderOptions = (*EnableSelectionRange)(nil)
// Value implements SelectionRangeProviderOptions interface.
func (v EnableSelectionRange) Value() interface{} {
return bool(v)
}
// NewEnableSelectionRange returns the new EnableSelectionRange underlying types SelectionRangeProviderOptions.
func NewEnableSelectionRange(enable bool) SelectionRangeProviderOptions {
v := EnableSelectionRange(enable)
return &v
}
// SelectionRangeOptions is the server capability of selection range.
type SelectionRangeOptions struct {
WorkDoneProgressOptions
}
// compile time check whether the EnableSelectionRange implements a SelectionRangeProviderOptions interface.
var _ SelectionRangeProviderOptions = (*EnableSelectionRange)(nil)
// Value implements SelectionRangeProviderOptions interface.
func (v *SelectionRangeOptions) Value() interface{} {
return v
}
// NewSelectionRangeOptions returns the new SelectionRangeOptions underlying types SelectionRangeProviderOptions.
func NewSelectionRangeOptions(enableWorkDoneProgress bool) SelectionRangeProviderOptions {
v := SelectionRangeOptions{
WorkDoneProgressOptions: WorkDoneProgressOptions{
WorkDoneProgress: enableWorkDoneProgress,
},
}
return &v
}
// SelectionRangeRegistrationOptions is the server capability of selection range registration.
type SelectionRangeRegistrationOptions struct {
SelectionRangeOptions
TextDocumentRegistrationOptions
StaticRegistrationOptions
}
// compile time check whether the SelectionRangeRegistrationOptions implements a SelectionRangeProviderOptions interface.
var _ SelectionRangeProviderOptions = (*SelectionRangeRegistrationOptions)(nil)
// Value implements SelectionRangeProviderOptions interface.
func (v *SelectionRangeRegistrationOptions) Value() interface{} {
return v
}
// NewSelectionRangeRegistrationOptions returns the new SelectionRangeRegistrationOptions underlying types SelectionRangeProviderOptions.
func NewSelectionRangeRegistrationOptions(enableWorkDoneProgress bool, selector DocumentSelector, id string) SelectionRangeProviderOptions {
v := SelectionRangeRegistrationOptions{
SelectionRangeOptions: SelectionRangeOptions{
WorkDoneProgressOptions: WorkDoneProgressOptions{
WorkDoneProgress: enableWorkDoneProgress,
},
},
TextDocumentRegistrationOptions: TextDocumentRegistrationOptions{
DocumentSelector: selector,
},
StaticRegistrationOptions: StaticRegistrationOptions{
ID: id,
},
}
return &v
}
// SelectionRangeParams represents a parameter literal used in selection range requests.
//
// @since 3.15.0.
type SelectionRangeParams struct {
WorkDoneProgressParams
PartialResultParams
// TextDocument is the text document.
TextDocument TextDocumentIdentifier `json:"textDocument"`
// Positions is the positions inside the text document.
Positions []Position `json:"positions"`
}

View File

@ -0,0 +1,179 @@
// SPDX-FileCopyrightText: 2021 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package protocol
// SemanticTokenTypes represents a type of semantic token.
//
// @since 3.16.0.
type SemanticTokenTypes string
// list of SemanticTokenTypes.
const (
SemanticTokenNamespace SemanticTokenTypes = "namespace"
// Represents a generic type. Acts as a fallback for types which
// can't be mapped to a specific type like class or enum.
SemanticTokenType SemanticTokenTypes = "type"
SemanticTokenClass SemanticTokenTypes = "class"
SemanticTokenEnum SemanticTokenTypes = "enum"
SemanticTokenInterface SemanticTokenTypes = "interface"
SemanticTokenStruct SemanticTokenTypes = "struct"
SemanticTokenTypeParameter SemanticTokenTypes = "typeParameter"
SemanticTokenParameter SemanticTokenTypes = "parameter"
SemanticTokenVariable SemanticTokenTypes = "variable"
SemanticTokenProperty SemanticTokenTypes = "property"
SemanticTokenEnumMember SemanticTokenTypes = "enumMember"
SemanticTokenEvent SemanticTokenTypes = "event"
SemanticTokenFunction SemanticTokenTypes = "function"
SemanticTokenMethod SemanticTokenTypes = "method"
SemanticTokenMacro SemanticTokenTypes = "macro"
SemanticTokenKeyword SemanticTokenTypes = "keyword"
SemanticTokenModifier SemanticTokenTypes = "modifier"
SemanticTokenComment SemanticTokenTypes = "comment"
SemanticTokenString SemanticTokenTypes = "string"
SemanticTokenNumber SemanticTokenTypes = "number"
SemanticTokenRegexp SemanticTokenTypes = "regexp"
SemanticTokenOperator SemanticTokenTypes = "operator"
)
// SemanticTokenModifiers represents a modifiers of semantic token.
//
// @since 3.16.0.
type SemanticTokenModifiers string
// list of SemanticTokenModifiers.
const (
SemanticTokenModifierDeclaration SemanticTokenModifiers = "declaration"
SemanticTokenModifierDefinition SemanticTokenModifiers = "definition"
SemanticTokenModifierReadonly SemanticTokenModifiers = "readonly"
SemanticTokenModifierStatic SemanticTokenModifiers = "static"
SemanticTokenModifierDeprecated SemanticTokenModifiers = "deprecated"
SemanticTokenModifierAbstract SemanticTokenModifiers = "abstract"
SemanticTokenModifierAsync SemanticTokenModifiers = "async"
SemanticTokenModifierModification SemanticTokenModifiers = "modification"
SemanticTokenModifierDocumentation SemanticTokenModifiers = "documentation"
SemanticTokenModifierDefaultLibrary SemanticTokenModifiers = "defaultLibrary"
)
// TokenFormat is an additional token format capability to allow future extensions of the format.
//
// @since 3.16.0.
type TokenFormat string
// TokenFormatRelative described using relative positions.
const TokenFormatRelative TokenFormat = "relative"
// SemanticTokensLegend is the on the capability level types and modifiers are defined using strings.
//
// However the real encoding happens using numbers.
//
// The server therefore needs to let the client know which numbers it is using for which types and modifiers.
//
// @since 3.16.0.
type SemanticTokensLegend struct {
// TokenTypes is the token types a server uses.
TokenTypes []SemanticTokenTypes `json:"tokenTypes"`
// TokenModifiers is the token modifiers a server uses.
TokenModifiers []SemanticTokenModifiers `json:"tokenModifiers"`
}
// SemanticTokensParams params for the SemanticTokensFull request.
//
// @since 3.16.0.
type SemanticTokensParams struct {
WorkDoneProgressParams
PartialResultParams
// TextDocument is the text document.
TextDocument TextDocumentIdentifier `json:"textDocument"`
}
// SemanticTokens is the result of SemanticTokensFull request.
//
// @since 3.16.0.
type SemanticTokens struct {
// ResultID an optional result id. If provided and clients support delta updating
// the client will include the result id in the next semantic token request.
//
// A server can then instead of computing all semantic tokens again simply
// send a delta.
ResultID string `json:"resultId,omitempty"`
// Data is the actual tokens.
Data []uint32 `json:"data"`
}
// SemanticTokensPartialResult is the partial result of SemanticTokensFull request.
//
// @since 3.16.0.
type SemanticTokensPartialResult struct {
// Data is the actual tokens.
Data []uint32 `json:"data"`
}
// SemanticTokensDeltaParams params for the SemanticTokensFullDelta request.
//
// @since 3.16.0.
type SemanticTokensDeltaParams struct {
WorkDoneProgressParams
PartialResultParams
// TextDocument is the text document.
TextDocument TextDocumentIdentifier `json:"textDocument"`
// PreviousResultID is the result id of a previous response.
//
// The result Id can either point to a full response or a delta response depending on what was received last.
PreviousResultID string `json:"previousResultId"`
}
// SemanticTokensDelta result of SemanticTokensFullDelta request.
//
// @since 3.16.0.
type SemanticTokensDelta struct {
// ResultID is the result id.
//
// This field is readonly.
ResultID string `json:"resultId,omitempty"`
// Edits is the semantic token edits to transform a previous result into a new
// result.
Edits []SemanticTokensEdit `json:"edits"`
}
// SemanticTokensDeltaPartialResult is the partial result of SemanticTokensFullDelta request.
//
// @since 3.16.0.
type SemanticTokensDeltaPartialResult struct {
Edits []SemanticTokensEdit `json:"edits"`
}
// SemanticTokensEdit is the semantic token edit.
//
// @since 3.16.0.
type SemanticTokensEdit struct {
// Start is the start offset of the edit.
Start uint32 `json:"start"`
// DeleteCount is the count of elements to remove.
DeleteCount uint32 `json:"deleteCount"`
// Data is the elements to insert.
Data []uint32 `json:"data,omitempty"`
}
// SemanticTokensRangeParams params for the SemanticTokensRange request.
//
// @since 3.16.0.
type SemanticTokensRangeParams struct {
WorkDoneProgressParams
PartialResultParams
// TextDocument is the text document.
TextDocument TextDocumentIdentifier `json:"textDocument"`
// Range is the range the semantic tokens are requested for.
Range Range `json:"range"`
}

1892
vendor/go.lsp.dev/protocol/server.go vendored Normal file

File diff suppressed because it is too large Load Diff

111
vendor/go.lsp.dev/protocol/text.go vendored Normal file
View File

@ -0,0 +1,111 @@
// SPDX-FileCopyrightText: 2019 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package protocol
import (
"strconv"
)
// DidOpenTextDocumentParams params of DidOpenTextDocument notification.
type DidOpenTextDocumentParams struct {
// TextDocument is the document that was opened.
TextDocument TextDocumentItem `json:"textDocument"`
}
// DidChangeTextDocumentParams params of DidChangeTextDocument notification.
type DidChangeTextDocumentParams struct {
// TextDocument is the document that did change. The version number points
// to the version after all provided content changes have
// been applied.
TextDocument VersionedTextDocumentIdentifier `json:"textDocument"`
// ContentChanges is the actual content changes. The content changes describe single state changes
// to the document. So if there are two content changes c1 and c2 for a document
// in state S then c1 move the document to S' and c2 to S''.
ContentChanges []TextDocumentContentChangeEvent `json:"contentChanges"` // []TextDocumentContentChangeEvent | text
}
// TextDocumentSaveReason represents reasons why a text document is saved.
type TextDocumentSaveReason float64
const (
// TextDocumentSaveReasonManual is the manually triggered, e.g. by the user pressing save, by starting debugging,
// or by an API call.
TextDocumentSaveReasonManual TextDocumentSaveReason = 1
// TextDocumentSaveReasonAfterDelay is the automatic after a delay.
TextDocumentSaveReasonAfterDelay TextDocumentSaveReason = 2
// TextDocumentSaveReasonFocusOut when the editor lost focus.
TextDocumentSaveReasonFocusOut TextDocumentSaveReason = 3
)
// String implements fmt.Stringer.
func (t TextDocumentSaveReason) String() string {
switch t {
case TextDocumentSaveReasonManual:
return "Manual"
case TextDocumentSaveReasonAfterDelay:
return "AfterDelay"
case TextDocumentSaveReasonFocusOut:
return "FocusOut"
default:
return strconv.FormatFloat(float64(t), 'f', -10, 64)
}
}
// TextDocumentChangeRegistrationOptions describe options to be used when registering for text document change events.
type TextDocumentChangeRegistrationOptions struct {
TextDocumentRegistrationOptions
// SyncKind how documents are synced to the server. See TextDocumentSyncKind.Full
// and TextDocumentSyncKind.Incremental.
SyncKind TextDocumentSyncKind `json:"syncKind"`
}
// WillSaveTextDocumentParams is the parameters send in a will save text document notification.
type WillSaveTextDocumentParams struct {
// TextDocument is the document that will be saved.
TextDocument TextDocumentIdentifier `json:"textDocument"`
// Reason is the 'TextDocumentSaveReason'.
Reason TextDocumentSaveReason `json:"reason,omitempty"`
}
// DidSaveTextDocumentParams params of DidSaveTextDocument notification.
type DidSaveTextDocumentParams struct {
// Text optional the content when saved. Depends on the includeText value
// when the save notification was requested.
Text string `json:"text,omitempty"`
// TextDocument is the document that was saved.
TextDocument TextDocumentIdentifier `json:"textDocument"`
}
// TextDocumentContentChangeEvent an event describing a change to a text document. If range and rangeLength are omitted
// the new text is considered to be the full content of the document.
type TextDocumentContentChangeEvent struct {
// Range is the range of the document that changed.
Range Range `json:"range"`
// RangeLength is the length of the range that got replaced.
RangeLength uint32 `json:"rangeLength,omitempty"`
// Text is the new text of the document.
Text string `json:"text"`
}
// TextDocumentSaveRegistrationOptions TextDocumentSave Registration options.
type TextDocumentSaveRegistrationOptions struct {
TextDocumentRegistrationOptions
// IncludeText is the client is supposed to include the content on save.
IncludeText bool `json:"includeText,omitempty"`
}
// DidCloseTextDocumentParams params of DidCloseTextDocument notification.
type DidCloseTextDocumentParams struct {
// TextDocument the document that was closed.
TextDocument TextDocumentIdentifier `json:"textDocument"`
}

9
vendor/go.lsp.dev/protocol/util.go vendored Normal file
View File

@ -0,0 +1,9 @@
// SPDX-FileCopyrightText: 2019 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package protocol
// NewVersion returns the int32 pointer converted i.
func NewVersion(i int32) *int32 {
return &i
}

7
vendor/go.lsp.dev/protocol/version.go vendored Normal file
View File

@ -0,0 +1,7 @@
// SPDX-FileCopyrightText: 2018 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
package protocol
// Version is the version of the language-server-protocol specification being implemented.
const Version = "3.15.3"

Some files were not shown because too many files have changed in this diff Show More