feat(wallet) make remaining filter apis async

Implement activity.Scheduler to serialize and limit the number of
calls on the activity service. This way we protect form inefficient
parallel queries and easy support async and rate limiting based on the
API requirements.

Refactor the activity APIs async and use the Scheduler for managing
the activity service calls configured with one of the two rules: cancel
ignore.

Updates status-desktop #11170
This commit is contained in:
Stefan 2023-06-22 13:28:35 +02:00 committed by Stefan Dunca
parent 28229faec0
commit c61a4000d8
36 changed files with 4811 additions and 76 deletions

4
go.mod
View File

@ -83,6 +83,7 @@ require (
github.com/schollz/peerdiscovery v1.7.0
github.com/siphiuel/lc-proxy-wrapper v0.0.0-20230516150924-246507cee8c7
github.com/waku-org/go-waku v0.7.1-0.20230630125546-47cdb86aaf07
github.com/wk8/go-ordered-map/v2 v2.1.7
github.com/yeqown/go-qrcode/v2 v2.2.1
github.com/yeqown/go-qrcode/writer/standard v1.2.1
go.uber.org/multierr v1.11.0
@ -113,6 +114,7 @@ require (
github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96 // indirect
github.com/anacrolix/utp v0.1.0 // indirect
github.com/andybalholm/cascadia v1.2.0 // indirect
github.com/bahlo/generic-list-go v0.2.0 // indirect
github.com/benbjohnson/clock v1.3.5 // indirect
github.com/benbjohnson/immutable v0.3.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
@ -121,6 +123,7 @@ require (
github.com/btcsuite/btcd v0.22.1 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 // indirect
github.com/buger/jsonparser v1.1.1 // indirect
github.com/cenkalti/backoff/v4 v4.1.2 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/containerd/cgroups v1.1.0 // indirect
@ -179,6 +182,7 @@ require (
github.com/libp2p/go-netroute v0.2.1 // indirect
github.com/libp2p/go-reuseport v0.3.0 // indirect
github.com/libp2p/go-yamux/v4 v4.0.0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
github.com/mattn/go-colorable v0.1.8 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect

7
go.sum
View File

@ -384,6 +384,8 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21
github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw=
github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=
github.com/beevik/ntp v0.3.0 h1:xzVrPrE4ziasFXgBVBZJDP0Wg/KpMwk2KHJ4Ba8GrDw=
github.com/beevik/ntp v0.3.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
@ -439,6 +441,7 @@ github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtE
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
@ -1366,6 +1369,8 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
github.com/markbates/pkger v0.15.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI=
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
@ -2108,6 +2113,8 @@ github.com/willf/bloom v0.0.0-20170505221640-54e3b963ee16/go.mod h1:MmAltL9pDMNT
github.com/willf/bloom v2.0.3+incompatible/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8=
github.com/wk8/go-ordered-map v1.0.0 h1:BV7z+2PaK8LTSd/mWgY12HyMAo5CEgkHqbkVq2thqr8=
github.com/wk8/go-ordered-map v1.0.0/go.mod h1:9ZIbRunKbuvfPKyBP1SIKLcXNlv74YCOZ3t3VTS6gRk=
github.com/wk8/go-ordered-map/v2 v2.1.7 h1:aUZ1xBMdbvY8wnNt77qqo4nyT3y0pX4Usat48Vm+hik=
github.com/wk8/go-ordered-map/v2 v2.1.7/go.mod h1:9Xvgm2mV2kSq2SAm0Y608tBmu8akTzI7c2bz7/G7ZN4=
github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=

View File

@ -18,7 +18,7 @@ Legend:
Improve on the identified limitations
- [ ] Missing filtering data
- [x] Missing filtering data
- [x] Missing cached (not extracted as a column)
- Extracting the data from the raw data is expensive but might be negligible given that usually we should not expect more than 20 entries per second in the worst case scenario.
- [x] Table extensions
@ -47,9 +47,9 @@ UX requirements
- [x] `status`: for status icon and label
- [ ] `chainIDs`: for chain icons
- Missing for `Bridge`, `Buy`, `Swap`
- [ ] `amount`s: add to the activity.Entry
- [x] `amount`s: add to the activity.Entry
- already in DB
- [ ] `tokenCode`s: add to the activity.Entry
- [x] `tokenCode`s: add to the activity.Entry
- already in DB
- [ ] `to`/`from`/`owner`: add to the activity.Entry
- already in DB, coming soon
@ -63,7 +63,7 @@ UX requirements
Extend `entry.nim:ActivityEntry` and `activity.go:Entry` with presentation layer data
- [ ] `activityType`: instead of the current `MultiTransactionType`
- [x] `activityType`: instead of the current `MultiTransactionType`
- [x] `status`: for status icon and label
## Current state
@ -158,5 +158,4 @@ Will leave the performance concerns for the next milestone
- Less changes to migrate existing data. Still have to maintain activity filtering specific data
- Cons:
- Slower to query (don't know how much yet)
- Complex query increases maintenance
- Complex query increases maintenance

View File

@ -219,7 +219,6 @@ const (
toTrType = byte(2)
// TODO: Multi-transaction network information is missing in filtering
// TODO: extract token code for non transfer type eth
// TODO optimization: consider implementing nullable []byte instead of using strings for addresses
// or insert binary (X'...' syntax) directly into the query
//

View File

@ -0,0 +1,200 @@
package activity
import (
"context"
"errors"
"fmt"
"sync"
orderedmap "github.com/wk8/go-ordered-map/v2"
)
var ErrTaskOverwritten = errors.New("task overwritten")
type Scheduler struct {
queue *orderedmap.OrderedMap[TaskType, *taskContext]
queueMutex sync.Mutex
context context.Context
cancelFn context.CancelFunc
doNotDeleteCurrentTask bool
}
type ReplacementPolicy = int
const (
ReplacementPolicyCancelOld ReplacementPolicy = iota
ReplacementPolicyIgnoreNew
)
type TaskType struct {
ID int
Policy ReplacementPolicy
}
type taskFunction func(context.Context) (interface{}, error)
type resultFunction func(interface{}, TaskType, error)
type taskContext struct {
taskType TaskType
policy ReplacementPolicy
taskFn taskFunction
resFn resultFunction
}
func NewScheduler() *Scheduler {
return &Scheduler{
queue: orderedmap.New[TaskType, *taskContext](),
}
}
// Enqueue provides a queue of task types allowing only one task at a time of the corresponding type. The running task is the first one in the queue (s.queue.Oldest())
//
// Schedule policy for new tasks
// - pushed at the back of the queue (s.queue.PushBack()) if none of the same time already scheduled
// - overwrite the queued one of the same type, depending on the policy
// - In case of ReplacementPolicyIgnoreNew, the new task will be ignored
// - In case of ReplacementPolicyCancelOld, the old running task will be canceled or if not yet run overwritten and the new one will be executed when its turn comes.
//
// The task function (taskFn) might not be executed if
// - the task is ignored
// - the task is overwritten. The result function (resFn) will be called with ErrTaskOverwritten
//
// The result function (resFn) will always be called if the task is not ignored
func (s *Scheduler) Enqueue(taskType TaskType, taskFn taskFunction, resFn resultFunction) (ignored bool) {
s.queueMutex.Lock()
defer s.queueMutex.Unlock()
taskRunning := s.queue.Len() > 0
existingTask, typeInQueue := s.queue.Get(taskType)
newTask := &taskContext{
taskType: taskType,
policy: taskType.Policy,
taskFn: taskFn,
resFn: resFn,
}
if taskRunning {
if typeInQueue {
if s.queue.Oldest().Value.taskType == taskType {
// If same task type is running
if existingTask.policy == ReplacementPolicyCancelOld {
// If a previous task is running, cancel it
if s.cancelFn != nil {
s.cancelFn()
s.cancelFn = nil
} else {
// In case of multiple tasks of the same type, the previous one is overwritten
go func() {
existingTask.resFn(nil, existingTask.taskType, ErrTaskOverwritten)
}()
}
s.doNotDeleteCurrentTask = true
// Add it again to refresh the order of the task
s.queue.Delete(taskType)
s.queue.Set(taskType, newTask)
} else {
ignored = true
}
} else {
// if other task type is running
// notify the queued one that it is overwritten or ignored
if existingTask.policy == ReplacementPolicyCancelOld {
go func() {
existingTask.resFn(nil, existingTask.taskType, ErrTaskOverwritten)
}()
// Overwrite the queued one of the same type
existingTask.taskFn = taskFn
existingTask.resFn = resFn
} else {
ignored = true
}
}
} else {
// Policy does not matter for the fist enqueued task of a type
s.queue.Set(taskType, newTask)
}
} else {
// If no task is running add and run it. The worker will take care of scheduling new tasks added while running
s.queue.Set(taskType, newTask)
existingTask = newTask
s.runTask(existingTask, taskFn, func(res interface{}, runningTask *taskContext, err error) {
s.finishedTask(res, runningTask, resFn, err)
})
}
return ignored
}
func (s *Scheduler) runTask(tc *taskContext, taskFn taskFunction, resFn func(interface{}, *taskContext, error)) {
thisContext, thisCancelFn := context.WithCancel(context.Background())
s.cancelFn = thisCancelFn
s.context = thisContext
go func() {
res, err := taskFn(thisContext)
// Release context resources
thisCancelFn()
if errors.Is(err, context.Canceled) {
resFn(res, tc, fmt.Errorf("task canceled: %w", err))
} else {
resFn(res, tc, err)
}
}()
}
// finishedTask is the only one that can remove a task from the queue
// if the current running task is
func (s *Scheduler) finishedTask(finishedRes interface{}, finishedTask *taskContext, finishedResFn resultFunction, finishedErr error) {
s.queueMutex.Lock()
// We always have a running task
current := s.queue.Oldest()
// Delete current task if not overwritten
if s.doNotDeleteCurrentTask {
s.doNotDeleteCurrentTask = false
} else {
s.queue.Delete(current.Value.taskType)
}
// Run next task
if pair := s.queue.Oldest(); pair != nil {
nextTask := pair.Value
s.runTask(nextTask, nextTask.taskFn, func(res interface{}, runningTask *taskContext, err error) {
s.finishedTask(res, runningTask, runningTask.resFn, err)
})
} else {
s.cancelFn = nil
}
s.queueMutex.Unlock()
// Report result
finishedResFn(finishedRes, finishedTask.taskType, finishedErr)
}
func (s *Scheduler) Stop() {
s.queueMutex.Lock()
defer s.queueMutex.Unlock()
if s.cancelFn != nil {
s.cancelFn()
s.cancelFn = nil
}
// Empty the queue so the running task will not be restarted
for pair := s.queue.Oldest(); pair != nil; pair = pair.Next() {
// Notify the queued one that they are canceled
if pair.Value.policy == ReplacementPolicyCancelOld {
go func() {
pair.Value.resFn(nil, pair.Value.taskType, context.Canceled)
}()
}
s.queue.Delete(pair.Value.taskType)
}
}

View File

@ -0,0 +1,364 @@
package activity
import (
"context"
"errors"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
)
const (
noActionPerformed = "no action performed"
taskCalled = "task called"
taskResultCalled = "task result called"
)
func TestScheduler_Enqueue_Simple(t *testing.T) {
s := NewScheduler()
callChan := make(chan string, 10)
testFunction := func(policy ReplacementPolicy, failTest bool) {
testTask := TaskType{1, policy}
ignored := s.Enqueue(testTask, func(ctx context.Context) (interface{}, error) {
callChan <- taskCalled
if failTest {
return nil, errors.New("test error")
}
return 123, nil
}, func(res interface{}, taskType TaskType, err error) {
if failTest {
require.Error(t, err)
require.Nil(t, res)
} else {
require.NoError(t, err)
require.Equal(t, 123, res)
}
require.Equal(t, testTask, taskType)
callChan <- taskResultCalled
})
require.False(t, ignored)
lastRes := noActionPerformed
done := false
for !done {
select {
case callRes := <-callChan:
if callRes == taskCalled {
require.Equal(t, noActionPerformed, lastRes)
} else if callRes == taskResultCalled {
require.Equal(t, taskCalled, lastRes)
done = true
} else {
require.Fail(t, "unexpected result", `"%s" for policy %d`, callRes, policy)
}
lastRes = callRes
case <-time.After(1 * time.Second):
require.Fail(t, "test not completed in time", `last result: "%s" for policy %d`, lastRes, policy)
}
}
require.Equal(t, taskResultCalled, lastRes)
}
testFailed := false
for i := 0; i < 2; i++ {
testFailed = (i == 0)
for policy := range []ReplacementPolicy{ReplacementPolicyCancelOld, ReplacementPolicyIgnoreNew} {
testFunction(policy, testFailed)
}
}
}
// Validate the task is cancelled when a new one is scheduled and that the third one will overwrite the second one
func TestScheduler_Enqueue_VerifyReplacementPolicyCancelOld(t *testing.T) {
s := NewScheduler()
type testStage string
const (
stage1FirstTaskStarted testStage = "First task started"
stage2ThirdEnqueueOverwroteSecondTask testStage = "Third Enqueue overwrote second task"
stage3ExitingFirstCancelledTask testStage = "Exiting first cancelled task"
stage4FirstTaskCanceledResponse testStage = "First task canceled response"
stage5ThirdTaskRunning testStage = "Third task running"
stage6ThirdTaskResponse testStage = "Third task response"
)
testStages := []testStage{
stage1FirstTaskStarted,
stage2ThirdEnqueueOverwroteSecondTask,
stage3ExitingFirstCancelledTask,
stage4FirstTaskCanceledResponse,
stage5ThirdTaskRunning,
stage6ThirdTaskResponse,
}
callChan := make(chan testStage, len(testStages))
resultCallCount := 0
var firstRunWG, secondRunWG, thirdRunWG sync.WaitGroup
firstRunWG.Add(1)
secondRunWG.Add(1)
thirdRunWG.Add(1)
testTask := TaskType{1, ReplacementPolicyCancelOld}
for i := 0; i < 2; i++ {
currentIndex := i
ignored := s.Enqueue(testTask, func(workCtx context.Context) (interface{}, error) {
callChan <- stage1FirstTaskStarted
// Mark first task running so that the second Enqueue will cancel this one and overwrite it
firstRunWG.Done()
// Wait for the first task to be cancelled by the second one
select {
case <-workCtx.Done():
require.ErrorAs(t, workCtx.Err(), &context.Canceled)
// Unblock the third Enqueue call
secondRunWG.Done()
// Block the second task from running until the third one is overwriting the second one that didn't run
thirdRunWG.Wait()
callChan <- stage3ExitingFirstCancelledTask
case <-time.After(1 * time.Second):
require.Fail(t, "task not cancelled in time")
}
return nil, workCtx.Err()
}, func(res interface{}, taskType TaskType, err error) {
switch currentIndex {
case 0:
// First task was cancelled by the second one Enqueue call
callChan <- stage4FirstTaskCanceledResponse
require.ErrorAs(t, err, &context.Canceled)
case 1:
callChan <- stage2ThirdEnqueueOverwroteSecondTask
// Unblock the first task from blocking execution of the third one
// also validate that the third Enqueue call overwrote running the second one
thirdRunWG.Done()
require.True(t, errors.Is(err, ErrTaskOverwritten))
case 3:
// Third task was successfully executed
require.Equal(t, testTask, taskType)
require.Nil(t, res)
resultCallCount++
}
})
require.False(t, ignored)
// Wait first task to run
firstRunWG.Wait()
}
// Wait for the second task to be cancelled before running the third one
secondRunWG.Wait()
ignored := s.Enqueue(testTask, func(ctx context.Context) (interface{}, error) {
callChan <- stage5ThirdTaskRunning
return 123, errors.New("test error")
}, func(res interface{}, taskType TaskType, err error) {
require.Error(t, err)
require.Equal(t, testTask, taskType)
require.Equal(t, 123, res)
callChan <- stage6ThirdTaskResponse
})
require.False(t, ignored)
lastRes := noActionPerformed
expectedTestStageIndex := 0
for i := 0; i < len(testStages); i++ {
select {
case callRes := <-callChan:
require.Equal(t, testStages[expectedTestStageIndex], callRes, "task stage out of order; expected %s, got %s", testStages[expectedTestStageIndex], callRes)
expectedTestStageIndex++
case <-time.After(1 * time.Second):
require.Fail(t, "test not completed in time", `last result: "%s" for cancel task policy`, lastRes)
}
}
}
func TestScheduler_Enqueue_VerifyReplacementPolicyIgnoreNew(t *testing.T) {
s := NewScheduler()
callChan := make(chan string, 10)
workloadWG := sync.WaitGroup{}
taskCallCount := 0
resultCallCount := 0
workloadWG.Add(1)
testTask := TaskType{1, ReplacementPolicyIgnoreNew}
ignored := s.Enqueue(testTask, func(workCtx context.Context) (interface{}, error) {
workloadWG.Wait()
require.NoError(t, workCtx.Err())
taskCallCount++
callChan <- taskCalled
return 123, nil
}, func(res interface{}, taskType TaskType, err error) {
require.NoError(t, err)
require.Equal(t, testTask, taskType)
require.Equal(t, 123, res)
resultCallCount++
callChan <- taskResultCalled
})
require.False(t, ignored)
ignored = s.Enqueue(testTask, func(ctx context.Context) (interface{}, error) {
require.Fail(t, "unexpected call")
return nil, errors.New("unexpected call")
}, func(res interface{}, taskType TaskType, err error) {
require.Fail(t, "unexpected result call")
})
require.True(t, ignored)
workloadWG.Done()
lastRes := noActionPerformed
done := false
for !done {
select {
case callRes := <-callChan:
if callRes == taskCalled {
require.Equal(t, noActionPerformed, lastRes)
} else if callRes == taskResultCalled {
require.Equal(t, taskCalled, lastRes)
done = true
} else {
require.Fail(t, "unexpected result", `"%s" for ignore task policy`, callRes)
}
lastRes = callRes
case <-time.After(1 * time.Second):
require.Fail(t, "test not completed in time", `last result: "%s" for ignore task policy`, lastRes)
}
}
require.Equal(t, 1, resultCallCount)
require.Equal(t, 1, taskCallCount)
require.Equal(t, taskResultCalled, lastRes)
}
func TestScheduler_Enqueue_ValidateOrder(t *testing.T) {
s := NewScheduler()
waitEnqueueAll := sync.WaitGroup{}
type failType bool
const (
fail failType = true
pass failType = false
)
type enqueueParams struct {
taskType TaskType
taskAction failType
callIndex int
}
testTask1 := TaskType{1, ReplacementPolicyCancelOld}
testTask2 := TaskType{2, ReplacementPolicyCancelOld}
testTask3 := TaskType{3, ReplacementPolicyIgnoreNew}
// Task type, ReplacementPolicy: CancelOld if true IgnoreNew if false, task fail or success, index
enqueueSequence := []enqueueParams{
{testTask1, pass, 0}, // 1 task event
{testTask2, pass, 0}, // 0 task event
{testTask3, fail, 0}, // 1 task event
{testTask3, pass, 0}, // 0 task event
{testTask2, pass, 0}, // 1 task event
{testTask1, pass, 0}, // 1 task event
{testTask3, fail, 0}, // 0 run event
}
const taskEventCount = 4
taskSuccessChan := make(chan enqueueParams, len(enqueueSequence))
taskCanceledChan := make(chan enqueueParams, len(enqueueSequence))
taskFailedChan := make(chan enqueueParams, len(enqueueSequence))
resChan := make(chan enqueueParams, len(enqueueSequence))
firstIgnoreNewProcessed := make(map[TaskType]bool)
ignoredCount := 0
waitEnqueueAll.Add(1)
for i := 0; i < len(enqueueSequence); i++ {
enqueueSequence[i].callIndex = i
p := enqueueSequence[i]
currentIndex := i
ignored := s.Enqueue(p.taskType, func(ctx context.Context) (interface{}, error) {
waitEnqueueAll.Wait()
if p.taskType.Policy == ReplacementPolicyCancelOld && ctx.Err() != nil && errors.Is(ctx.Err(), context.Canceled) {
taskCanceledChan <- p
return nil, ctx.Err()
}
if p.taskAction == fail {
taskFailedChan <- p
return nil, errors.New("test error")
}
taskSuccessChan <- p
return 10 * (currentIndex + 1), nil
}, func(res interface{}, taskType TaskType, err error) {
require.Equal(t, p.taskType, taskType)
resChan <- p
})
if ignored {
ignoredCount++
}
if _, ok := firstIgnoreNewProcessed[p.taskType]; !ok {
require.False(t, ignored)
firstIgnoreNewProcessed[p.taskType] = p.taskType.Policy == ReplacementPolicyCancelOld
} else {
if p.taskType.Policy == ReplacementPolicyIgnoreNew {
require.True(t, ignored)
} else {
require.False(t, ignored)
}
}
}
waitEnqueueAll.Done()
taskSuccessCount := make(map[TaskType]int)
taskCanceledCount := make(map[TaskType]int)
taskFailedCount := make(map[TaskType]int)
resChanCount := make(map[TaskType]int)
// Only ignored don't generate result events
expectedEventsCount := len(enqueueSequence) - ignoredCount + taskEventCount
for i := 0; i < expectedEventsCount; i++ {
// Loop for run and result calls
select {
case p := <-taskSuccessChan:
taskSuccessCount[p.taskType]++
case p := <-taskCanceledChan:
taskCanceledCount[p.taskType]++
case p := <-taskFailedChan:
taskFailedCount[p.taskType]++
case p := <-resChan:
resChanCount[p.taskType]++
case <-time.After(1 * time.Second):
require.Fail(t, "test not completed in time")
}
}
require.Equal(t, 1, taskSuccessCount[testTask1], "expected one task call for type: %d had %d", 1, taskSuccessCount[testTask1])
require.Equal(t, 1, taskSuccessCount[testTask2], "expected one task call for type: %d had %d", 2, taskSuccessCount[testTask2])
require.Equal(t, 0, taskSuccessCount[testTask3], "expected no task call for type: %d had %d", 3, taskSuccessCount[testTask3])
require.Equal(t, 1, taskCanceledCount[testTask1], "expected one task call for type: %d had %d", 1, taskSuccessCount[testTask1])
require.Equal(t, 0, taskCanceledCount[testTask2], "expected no task call for type: %d had %d", 2, taskSuccessCount[testTask2])
require.Equal(t, 0, taskCanceledCount[testTask3], "expected no task call for type: %d had %d", 3, taskSuccessCount[testTask3])
require.Equal(t, 0, taskFailedCount[testTask1], "expected no task call for type: %d had %d", 1, taskSuccessCount[testTask1])
require.Equal(t, 0, taskFailedCount[testTask2], "expected no task call for type: %d had %d", 2, taskSuccessCount[testTask2])
require.Equal(t, 1, taskFailedCount[testTask3], "expected one task call for type: %d had %d", 3, taskSuccessCount[testTask3])
require.Equal(t, 2, resChanCount[testTask1], "expected two task call for type: %d had %d", 1, taskSuccessCount[testTask1])
require.Equal(t, 2, resChanCount[testTask2], "expected tow task call for type: %d had %d", 2, taskSuccessCount[testTask2])
require.Equal(t, 1, resChanCount[testTask3], "expected one task call for type: %d had %d", 3, taskSuccessCount[testTask3])
}

View File

@ -5,8 +5,6 @@ import (
"database/sql"
"encoding/json"
"errors"
"fmt"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/event"
@ -19,7 +17,24 @@ import (
const (
// FilterResponse json is sent as a message in the EventActivityFilteringDone event
EventActivityFilteringDone walletevent.EventType = "wallet-activity-filtering-done"
EventActivityFilteringDone walletevent.EventType = "wallet-activity-filtering-done"
EventActivityGetRecipientsDone walletevent.EventType = "wallet-activity-get-recipients-result"
EventActivityGetOldestTimestampDone walletevent.EventType = "wallet-activity-get-oldest-timestamp-result"
)
var (
filterTask = TaskType{
ID: 1,
Policy: ReplacementPolicyCancelOld,
}
getRecipientsTask = TaskType{
ID: 2,
Policy: ReplacementPolicyIgnoreNew,
}
getOldestTimestampTask = TaskType{
ID: 3,
Policy: ReplacementPolicyCancelOld,
}
)
type Service struct {
@ -27,10 +42,7 @@ type Service struct {
tokenManager *token.Manager
eventFeed *event.Feed
context context.Context
cancelFn context.CancelFunc
wg sync.WaitGroup
cancelMutex sync.Mutex
scheduler *Scheduler
}
func NewService(db *sql.DB, tokenManager *token.Manager, eventFeed *event.Feed) *Service {
@ -38,6 +50,7 @@ func NewService(db *sql.DB, tokenManager *token.Manager, eventFeed *event.Feed)
db: db,
tokenManager: tokenManager,
eventFeed: eventFeed,
scheduler: NewScheduler(),
}
}
@ -45,8 +58,8 @@ type ErrorCode = int
const (
ErrorCodeSuccess ErrorCode = iota + 1
ErrorCodeFilterCanceled
ErrorCodeFilterFailed
ErrorCodeTaskCanceled
ErrorCodeFailed
)
type FilterResponse struct {
@ -61,63 +74,89 @@ type FilterResponse struct {
// FilterActivityAsync allows only one filter task to run at a time
// and it cancels the current one if a new one is started
// All calls will trigger an EventActivityFilteringDone event with the result of the filtering
func (s *Service) FilterActivityAsync(ctx context.Context, addresses []common.Address, chainIDs []w_common.ChainID, filter Filter, offset int, limit int) error {
s.cancelMutex.Lock()
defer s.cancelMutex.Unlock()
// If a previous task is running, cancel it and wait to finish
if s.cancelFn != nil {
s.cancelFn()
s.cancelFn = nil
s.wg.Wait()
}
if ctx.Err() != nil {
return fmt.Errorf("context error: %w", ctx.Err())
}
s.context, s.cancelFn = context.WithCancel(context.Background())
thisCancelFn := s.cancelFn
s.wg.Add(1)
go func() {
activities, err := getActivityEntries(s.context, s.getDeps(), addresses, chainIDs, filter, offset, limit)
// Don't lock s.cancelMutex, it might have been locked already
s.wg.Done()
// Release context resources
thisCancelFn()
func (s *Service) FilterActivityAsync(ctx context.Context, addresses []common.Address, chainIDs []w_common.ChainID, filter Filter, offset int, limit int) {
s.scheduler.Enqueue(filterTask, func(ctx context.Context) (interface{}, error) {
activities, err := getActivityEntries(ctx, s.getDeps(), addresses, chainIDs, filter, offset, limit)
return activities, err
}, func(result interface{}, taskType TaskType, err error) {
res := FilterResponse{
ErrorCode: ErrorCodeFilterFailed,
ErrorCode: ErrorCodeFailed,
}
if errors.Is(err, context.Canceled) {
res.ErrorCode = ErrorCodeFilterCanceled
if errors.Is(err, context.Canceled) || errors.Is(err, ErrTaskOverwritten) {
res.ErrorCode = ErrorCodeTaskCanceled
} else if err == nil {
activities := result.([]Entry)
res.Activities = activities
res.Offset = offset
res.HasMore = len(activities) == limit
res.ErrorCode = ErrorCodeSuccess
}
s.sendResponseEvent(res)
}()
s.sendResponseEvent(EventActivityFilteringDone, res)
})
}
return nil
type GetRecipientsResponse struct {
Addresses []common.Address `json:"addresses"`
Offset int `json:"offset"`
// Used to indicate that there might be more entries that were not returned
// based on a simple heuristic
HasMore bool `json:"hasMore"`
ErrorCode ErrorCode `json:"errorCode"`
}
// GetRecipientsAsync returns true if a task is already running or scheduled due to a previous call; meaning that
// this call won't receive an answer but client should rely on the answer from the previous call.
// If no task is already scheduled false will be returned
func (s *Service) GetRecipientsAsync(ctx context.Context, offset int, limit int) bool {
return s.scheduler.Enqueue(getRecipientsTask, func(ctx context.Context) (interface{}, error) {
var err error
result := &GetRecipientsResponse{
Offset: offset,
ErrorCode: ErrorCodeSuccess,
}
result.Addresses, result.HasMore, err = GetRecipients(ctx, s.db, offset, limit)
return result, err
}, func(result interface{}, taskType TaskType, err error) {
res := result.(*GetRecipientsResponse)
if errors.Is(err, context.Canceled) || errors.Is(err, ErrTaskOverwritten) {
res.ErrorCode = ErrorCodeTaskCanceled
} else if err != nil {
res.ErrorCode = ErrorCodeFailed
}
s.sendResponseEvent(EventActivityGetRecipientsDone, result)
})
}
type GetOldestTimestampResponse struct {
Timestamp int64 `json:"timestamp"`
ErrorCode ErrorCode `json:"errorCode"`
}
func (s *Service) GetOldestTimestampAsync(ctx context.Context, addresses []common.Address) {
s.scheduler.Enqueue(getOldestTimestampTask, func(ctx context.Context) (interface{}, error) {
timestamp, err := GetOldestTimestamp(ctx, s.db, addresses)
return timestamp, err
}, func(result interface{}, taskType TaskType, err error) {
res := GetOldestTimestampResponse{
ErrorCode: ErrorCodeFailed,
}
if errors.Is(err, context.Canceled) || errors.Is(err, ErrTaskOverwritten) {
res.ErrorCode = ErrorCodeTaskCanceled
} else if err == nil {
res.Timestamp = result.(int64)
res.ErrorCode = ErrorCodeSuccess
}
s.sendResponseEvent(EventActivityGetOldestTimestampDone, res)
})
}
func (s *Service) Stop() {
s.cancelMutex.Lock()
defer s.cancelMutex.Unlock()
// If a previous task is running, cancel it and wait to finish
if s.cancelFn != nil {
s.cancelFn()
s.wg.Wait()
s.cancelFn = nil
}
s.scheduler.Stop()
}
func (s *Service) getDeps() FilterDependencies {
@ -153,16 +192,16 @@ func (s *Service) getDeps() FilterDependencies {
}
}
func (s *Service) sendResponseEvent(response FilterResponse) {
payload, err := json.Marshal(response)
func (s *Service) sendResponseEvent(eventType walletevent.EventType, payloadObj interface{}) {
payload, err := json.Marshal(payloadObj)
if err != nil {
log.Error("Error marshaling response: %v", err)
}
log.Debug("wallet.api.FilterActivityAsync RESPONSE", "activities.len", len(response.Activities), "offset", response.Offset, "hasMore", response.HasMore, "error", response.ErrorCode)
log.Debug("wallet.api.activity.Service RESPONSE", "eventType", eventType, "error", err, "payload.len", len(payload))
s.eventFeed.Send(walletevent.Event{
Type: EventActivityFilteringDone,
Type: eventType,
Message: string(payload),
})
}

View File

@ -537,22 +537,20 @@ func (api *API) FetchAllCurrencyFormats() (currency.FormatPerSymbol, error) {
func (api *API) FilterActivityAsync(ctx context.Context, addresses []common.Address, chainIDs []wcommon.ChainID, filter activity.Filter, offset int, limit int) error {
log.Debug("wallet.api.FilterActivityAsync", "addr.count", len(addresses), "chainIDs.count", len(chainIDs), "offset", offset, "limit", limit)
return api.s.activity.FilterActivityAsync(ctx, addresses, chainIDs, filter, offset, limit)
api.s.activity.FilterActivityAsync(ctx, addresses, chainIDs, filter, offset, limit)
return nil
}
type GetAllRecipientsResponse struct {
Addresses []common.Address `json:"addresses"`
HasMore bool `json:"hasMore"`
func (api *API) GetRecipientsAsync(ctx context.Context, offset int, limit int) (ignored bool, err error) {
log.Debug("wallet.api.GetRecipientsAsync", "offset", offset, "limit", limit)
ignored = api.s.activity.GetRecipientsAsync(ctx, offset, limit)
return ignored, err
}
func (api *API) GetAllRecipients(ctx context.Context, offset int, limit int) (result *GetAllRecipientsResponse, err error) {
log.Debug("wallet.api.GetAllRecipients", "offset", offset, "limit", limit)
result = &GetAllRecipientsResponse{}
result.Addresses, result.HasMore, err = activity.GetRecipients(ctx, api.s.db, offset, limit)
return result, err
}
func (api *API) GetOldestActivityTimestamp(ctx context.Context, addresses []common.Address) (timestamp int64, err error) {
func (api *API) GetOldestActivityTimestampAsync(ctx context.Context, addresses []common.Address) error {
log.Debug("wallet.api.GetOldestActivityTimestamp", "addresses.len", len(addresses))
return activity.GetOldestTimestamp(ctx, api.s.db, addresses)
api.s.activity.GetOldestTimestampAsync(ctx, addresses)
return nil
}

27
vendor/github.com/bahlo/generic-list-go/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

5
vendor/github.com/bahlo/generic-list-go/README.md generated vendored Normal file
View File

@ -0,0 +1,5 @@
# generic-list-go [![CI](https://github.com/bahlo/generic-list-go/actions/workflows/ci.yml/badge.svg)](https://github.com/bahlo/generic-list-go/actions/workflows/ci.yml)
Go [container/list](https://pkg.go.dev/container/list) but with generics.
The code is based on `container/list` in `go1.18beta2`.

235
vendor/github.com/bahlo/generic-list-go/list.go generated vendored Normal file
View File

@ -0,0 +1,235 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package list implements a doubly linked list.
//
// To iterate over a list (where l is a *List):
// for e := l.Front(); e != nil; e = e.Next() {
// // do something with e.Value
// }
//
package list
// Element is an element of a linked list.
type Element[T any] struct {
// Next and previous pointers in the doubly-linked list of elements.
// To simplify the implementation, internally a list l is implemented
// as a ring, such that &l.root is both the next element of the last
// list element (l.Back()) and the previous element of the first list
// element (l.Front()).
next, prev *Element[T]
// The list to which this element belongs.
list *List[T]
// The value stored with this element.
Value T
}
// Next returns the next list element or nil.
func (e *Element[T]) Next() *Element[T] {
if p := e.next; e.list != nil && p != &e.list.root {
return p
}
return nil
}
// Prev returns the previous list element or nil.
func (e *Element[T]) Prev() *Element[T] {
if p := e.prev; e.list != nil && p != &e.list.root {
return p
}
return nil
}
// List represents a doubly linked list.
// The zero value for List is an empty list ready to use.
type List[T any] struct {
root Element[T] // sentinel list element, only &root, root.prev, and root.next are used
len int // current list length excluding (this) sentinel element
}
// Init initializes or clears list l.
func (l *List[T]) Init() *List[T] {
l.root.next = &l.root
l.root.prev = &l.root
l.len = 0
return l
}
// New returns an initialized list.
func New[T any]() *List[T] { return new(List[T]).Init() }
// Len returns the number of elements of list l.
// The complexity is O(1).
func (l *List[T]) Len() int { return l.len }
// Front returns the first element of list l or nil if the list is empty.
func (l *List[T]) Front() *Element[T] {
if l.len == 0 {
return nil
}
return l.root.next
}
// Back returns the last element of list l or nil if the list is empty.
func (l *List[T]) Back() *Element[T] {
if l.len == 0 {
return nil
}
return l.root.prev
}
// lazyInit lazily initializes a zero List value.
func (l *List[T]) lazyInit() {
if l.root.next == nil {
l.Init()
}
}
// insert inserts e after at, increments l.len, and returns e.
func (l *List[T]) insert(e, at *Element[T]) *Element[T] {
e.prev = at
e.next = at.next
e.prev.next = e
e.next.prev = e
e.list = l
l.len++
return e
}
// insertValue is a convenience wrapper for insert(&Element{Value: v}, at).
func (l *List[T]) insertValue(v T, at *Element[T]) *Element[T] {
return l.insert(&Element[T]{Value: v}, at)
}
// remove removes e from its list, decrements l.len
func (l *List[T]) remove(e *Element[T]) {
e.prev.next = e.next
e.next.prev = e.prev
e.next = nil // avoid memory leaks
e.prev = nil // avoid memory leaks
e.list = nil
l.len--
}
// move moves e to next to at.
func (l *List[T]) move(e, at *Element[T]) {
if e == at {
return
}
e.prev.next = e.next
e.next.prev = e.prev
e.prev = at
e.next = at.next
e.prev.next = e
e.next.prev = e
}
// Remove removes e from l if e is an element of list l.
// It returns the element value e.Value.
// The element must not be nil.
func (l *List[T]) Remove(e *Element[T]) T {
if e.list == l {
// if e.list == l, l must have been initialized when e was inserted
// in l or l == nil (e is a zero Element) and l.remove will crash
l.remove(e)
}
return e.Value
}
// PushFront inserts a new element e with value v at the front of list l and returns e.
func (l *List[T]) PushFront(v T) *Element[T] {
l.lazyInit()
return l.insertValue(v, &l.root)
}
// PushBack inserts a new element e with value v at the back of list l and returns e.
func (l *List[T]) PushBack(v T) *Element[T] {
l.lazyInit()
return l.insertValue(v, l.root.prev)
}
// InsertBefore inserts a new element e with value v immediately before mark and returns e.
// If mark is not an element of l, the list is not modified.
// The mark must not be nil.
func (l *List[T]) InsertBefore(v T, mark *Element[T]) *Element[T] {
if mark.list != l {
return nil
}
// see comment in List.Remove about initialization of l
return l.insertValue(v, mark.prev)
}
// InsertAfter inserts a new element e with value v immediately after mark and returns e.
// If mark is not an element of l, the list is not modified.
// The mark must not be nil.
func (l *List[T]) InsertAfter(v T, mark *Element[T]) *Element[T] {
if mark.list != l {
return nil
}
// see comment in List.Remove about initialization of l
return l.insertValue(v, mark)
}
// MoveToFront moves element e to the front of list l.
// If e is not an element of l, the list is not modified.
// The element must not be nil.
func (l *List[T]) MoveToFront(e *Element[T]) {
if e.list != l || l.root.next == e {
return
}
// see comment in List.Remove about initialization of l
l.move(e, &l.root)
}
// MoveToBack moves element e to the back of list l.
// If e is not an element of l, the list is not modified.
// The element must not be nil.
func (l *List[T]) MoveToBack(e *Element[T]) {
if e.list != l || l.root.prev == e {
return
}
// see comment in List.Remove about initialization of l
l.move(e, l.root.prev)
}
// MoveBefore moves element e to its new position before mark.
// If e or mark is not an element of l, or e == mark, the list is not modified.
// The element and mark must not be nil.
func (l *List[T]) MoveBefore(e, mark *Element[T]) {
if e.list != l || e == mark || mark.list != l {
return
}
l.move(e, mark.prev)
}
// MoveAfter moves element e to its new position after mark.
// If e or mark is not an element of l, or e == mark, the list is not modified.
// The element and mark must not be nil.
func (l *List[T]) MoveAfter(e, mark *Element[T]) {
if e.list != l || e == mark || mark.list != l {
return
}
l.move(e, mark)
}
// PushBackList inserts a copy of another list at the back of list l.
// The lists l and other may be the same. They must not be nil.
func (l *List[T]) PushBackList(other *List[T]) {
l.lazyInit()
for i, e := other.Len(), other.Front(); i > 0; i, e = i-1, e.Next() {
l.insertValue(e.Value, l.root.prev)
}
}
// PushFrontList inserts a copy of another list at the front of list l.
// The lists l and other may be the same. They must not be nil.
func (l *List[T]) PushFrontList(other *List[T]) {
l.lazyInit()
for i, e := other.Len(), other.Back(); i > 0; i, e = i-1, e.Prev() {
l.insertValue(e.Value, &l.root)
}
}

12
vendor/github.com/buger/jsonparser/.gitignore generated vendored Normal file
View File

@ -0,0 +1,12 @@
*.test
*.out
*.mprof
.idea
vendor/github.com/buger/goterm/
prof.cpu
prof.mem

11
vendor/github.com/buger/jsonparser/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,11 @@
language: go
arch:
- amd64
- ppc64le
go:
- 1.7.x
- 1.8.x
- 1.9.x
- 1.10.x
- 1.11.x
script: go test -v ./.

12
vendor/github.com/buger/jsonparser/Dockerfile generated vendored Normal file
View File

@ -0,0 +1,12 @@
FROM golang:1.6
RUN go get github.com/Jeffail/gabs
RUN go get github.com/bitly/go-simplejson
RUN go get github.com/pquerna/ffjson
RUN go get github.com/antonholmquist/jason
RUN go get github.com/mreiferson/go-ujson
RUN go get -tags=unsafe -u github.com/ugorji/go/codec
RUN go get github.com/mailru/easyjson
WORKDIR /go/src/github.com/buger/jsonparser
ADD . /go/src/github.com/buger/jsonparser

21
vendor/github.com/buger/jsonparser/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2016 Leonid Bugaev
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

36
vendor/github.com/buger/jsonparser/Makefile generated vendored Normal file
View File

@ -0,0 +1,36 @@
SOURCE = parser.go
CONTAINER = jsonparser
SOURCE_PATH = /go/src/github.com/buger/jsonparser
BENCHMARK = JsonParser
BENCHTIME = 5s
TEST = .
DRUN = docker run -v `pwd`:$(SOURCE_PATH) -i -t $(CONTAINER)
build:
docker build -t $(CONTAINER) .
race:
$(DRUN) --env GORACE="halt_on_error=1" go test ./. $(ARGS) -v -race -timeout 15s
bench:
$(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -benchtime $(BENCHTIME) -v
bench_local:
$(DRUN) go test $(LDFLAGS) -test.benchmem -bench . $(ARGS) -benchtime $(BENCHTIME) -v
profile:
$(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -memprofile mem.mprof -v
$(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -cpuprofile cpu.out -v
$(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -c
test:
$(DRUN) go test $(LDFLAGS) ./ -run $(TEST) -timeout 10s $(ARGS) -v
fmt:
$(DRUN) go fmt ./...
vet:
$(DRUN) go vet ./.
bash:
$(DRUN) /bin/bash

365
vendor/github.com/buger/jsonparser/README.md generated vendored Normal file
View File

@ -0,0 +1,365 @@
[![Go Report Card](https://goreportcard.com/badge/github.com/buger/jsonparser)](https://goreportcard.com/report/github.com/buger/jsonparser) ![License](https://img.shields.io/dub/l/vibe-d.svg)
# Alternative JSON parser for Go (10x times faster standard library)
It does not require you to know the structure of the payload (eg. create structs), and allows accessing fields by providing the path to them. It is up to **10 times faster** than standard `encoding/json` package (depending on payload size and usage), **allocates no memory**. See benchmarks below.
## Rationale
Originally I made this for a project that relies on a lot of 3rd party APIs that can be unpredictable and complex.
I love simplicity and prefer to avoid external dependecies. `encoding/json` requires you to know exactly your data structures, or if you prefer to use `map[string]interface{}` instead, it will be very slow and hard to manage.
I investigated what's on the market and found that most libraries are just wrappers around `encoding/json`, there is few options with own parsers (`ffjson`, `easyjson`), but they still requires you to create data structures.
Goal of this project is to push JSON parser to the performance limits and not sacrifice with compliance and developer user experience.
## Example
For the given JSON our goal is to extract the user's full name, number of github followers and avatar.
```go
import "github.com/buger/jsonparser"
...
data := []byte(`{
"person": {
"name": {
"first": "Leonid",
"last": "Bugaev",
"fullName": "Leonid Bugaev"
},
"github": {
"handle": "buger",
"followers": 109
},
"avatars": [
{ "url": "https://avatars1.githubusercontent.com/u/14009?v=3&s=460", "type": "thumbnail" }
]
},
"company": {
"name": "Acme"
}
}`)
// You can specify key path by providing arguments to Get function
jsonparser.Get(data, "person", "name", "fullName")
// There is `GetInt` and `GetBoolean` helpers if you exactly know key data type
jsonparser.GetInt(data, "person", "github", "followers")
// When you try to get object, it will return you []byte slice pointer to data containing it
// In `company` it will be `{"name": "Acme"}`
jsonparser.Get(data, "company")
// If the key doesn't exist it will throw an error
var size int64
if value, err := jsonparser.GetInt(data, "company", "size"); err == nil {
size = value
}
// You can use `ArrayEach` helper to iterate items [item1, item2 .... itemN]
jsonparser.ArrayEach(data, func(value []byte, dataType jsonparser.ValueType, offset int, err error) {
fmt.Println(jsonparser.Get(value, "url"))
}, "person", "avatars")
// Or use can access fields by index!
jsonparser.GetString(data, "person", "avatars", "[0]", "url")
// You can use `ObjectEach` helper to iterate objects { "key1":object1, "key2":object2, .... "keyN":objectN }
jsonparser.ObjectEach(data, func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error {
fmt.Printf("Key: '%s'\n Value: '%s'\n Type: %s\n", string(key), string(value), dataType)
return nil
}, "person", "name")
// The most efficient way to extract multiple keys is `EachKey`
paths := [][]string{
[]string{"person", "name", "fullName"},
[]string{"person", "avatars", "[0]", "url"},
[]string{"company", "url"},
}
jsonparser.EachKey(data, func(idx int, value []byte, vt jsonparser.ValueType, err error){
switch idx {
case 0: // []string{"person", "name", "fullName"}
...
case 1: // []string{"person", "avatars", "[0]", "url"}
...
case 2: // []string{"company", "url"},
...
}
}, paths...)
// For more information see docs below
```
## Need to speedup your app?
I'm available for consulting and can help you push your app performance to the limits. Ping me at: leonsbox@gmail.com.
## Reference
Library API is really simple. You just need the `Get` method to perform any operation. The rest is just helpers around it.
You also can view API at [godoc.org](https://godoc.org/github.com/buger/jsonparser)
### **`Get`**
```go
func Get(data []byte, keys ...string) (value []byte, dataType jsonparser.ValueType, offset int, err error)
```
Receives data structure, and key path to extract value from.
Returns:
* `value` - Pointer to original data structure containing key value, or just empty slice if nothing found or error
* `dataType` - Can be: `NotExist`, `String`, `Number`, `Object`, `Array`, `Boolean` or `Null`
* `offset` - Offset from provided data structure where key value ends. Used mostly internally, for example for `ArrayEach` helper.
* `err` - If the key is not found or any other parsing issue, it should return error. If key not found it also sets `dataType` to `NotExist`
Accepts multiple keys to specify path to JSON value (in case of quering nested structures).
If no keys are provided it will try to extract the closest JSON value (simple ones or object/array), useful for reading streams or arrays, see `ArrayEach` implementation.
Note that keys can be an array indexes: `jsonparser.GetInt("person", "avatars", "[0]", "url")`, pretty cool, yeah?
### **`GetString`**
```go
func GetString(data []byte, keys ...string) (val string, err error)
```
Returns strings properly handing escaped and unicode characters. Note that this will cause additional memory allocations.
### **`GetUnsafeString`**
If you need string in your app, and ready to sacrifice with support of escaped symbols in favor of speed. It returns string mapped to existing byte slice memory, without any allocations:
```go
s, _, := jsonparser.GetUnsafeString(data, "person", "name", "title")
switch s {
case 'CEO':
...
case 'Engineer'
...
...
}
```
Note that `unsafe` here means that your string will exist until GC will free underlying byte slice, for most of cases it means that you can use this string only in current context, and should not pass it anywhere externally: through channels or any other way.
### **`GetBoolean`**, **`GetInt`** and **`GetFloat`**
```go
func GetBoolean(data []byte, keys ...string) (val bool, err error)
func GetFloat(data []byte, keys ...string) (val float64, err error)
func GetInt(data []byte, keys ...string) (val int64, err error)
```
If you know the key type, you can use the helpers above.
If key data type do not match, it will return error.
### **`ArrayEach`**
```go
func ArrayEach(data []byte, cb func(value []byte, dataType jsonparser.ValueType, offset int, err error), keys ...string)
```
Needed for iterating arrays, accepts a callback function with the same return arguments as `Get`.
### **`ObjectEach`**
```go
func ObjectEach(data []byte, callback func(key []byte, value []byte, dataType ValueType, offset int) error, keys ...string) (err error)
```
Needed for iterating object, accepts a callback function. Example:
```go
var handler func([]byte, []byte, jsonparser.ValueType, int) error
handler = func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error {
//do stuff here
}
jsonparser.ObjectEach(myJson, handler)
```
### **`EachKey`**
```go
func EachKey(data []byte, cb func(idx int, value []byte, dataType jsonparser.ValueType, err error), paths ...[]string)
```
When you need to read multiple keys, and you do not afraid of low-level API `EachKey` is your friend. It read payload only single time, and calls callback function once path is found. For example when you call multiple times `Get`, it has to process payload multiple times, each time you call it. Depending on payload `EachKey` can be multiple times faster than `Get`. Path can use nested keys as well!
```go
paths := [][]string{
[]string{"uuid"},
[]string{"tz"},
[]string{"ua"},
[]string{"st"},
}
var data SmallPayload
jsonparser.EachKey(smallFixture, func(idx int, value []byte, vt jsonparser.ValueType, err error){
switch idx {
case 0:
data.Uuid, _ = value
case 1:
v, _ := jsonparser.ParseInt(value)
data.Tz = int(v)
case 2:
data.Ua, _ = value
case 3:
v, _ := jsonparser.ParseInt(value)
data.St = int(v)
}
}, paths...)
```
### **`Set`**
```go
func Set(data []byte, setValue []byte, keys ...string) (value []byte, err error)
```
Receives existing data structure, key path to set, and value to set at that key. *This functionality is experimental.*
Returns:
* `value` - Pointer to original data structure with updated or added key value.
* `err` - If any parsing issue, it should return error.
Accepts multiple keys to specify path to JSON value (in case of updating or creating nested structures).
Note that keys can be an array indexes: `jsonparser.Set(data, []byte("http://github.com"), "person", "avatars", "[0]", "url")`
### **`Delete`**
```go
func Delete(data []byte, keys ...string) value []byte
```
Receives existing data structure, and key path to delete. *This functionality is experimental.*
Returns:
* `value` - Pointer to original data structure with key path deleted if it can be found. If there is no key path, then the whole data structure is deleted.
Accepts multiple keys to specify path to JSON value (in case of updating or creating nested structures).
Note that keys can be an array indexes: `jsonparser.Delete(data, "person", "avatars", "[0]", "url")`
## What makes it so fast?
* It does not rely on `encoding/json`, `reflection` or `interface{}`, the only real package dependency is `bytes`.
* Operates with JSON payload on byte level, providing you pointers to the original data structure: no memory allocation.
* No automatic type conversions, by default everything is a []byte, but it provides you value type, so you can convert by yourself (there is few helpers included).
* Does not parse full record, only keys you specified
## Benchmarks
There are 3 benchmark types, trying to simulate real-life usage for small, medium and large JSON payloads.
For each metric, the lower value is better. Time/op is in nanoseconds. Values better than standard encoding/json marked as bold text.
Benchmarks run on standard Linode 1024 box.
Compared libraries:
* https://golang.org/pkg/encoding/json
* https://github.com/Jeffail/gabs
* https://github.com/a8m/djson
* https://github.com/bitly/go-simplejson
* https://github.com/antonholmquist/jason
* https://github.com/mreiferson/go-ujson
* https://github.com/ugorji/go/codec
* https://github.com/pquerna/ffjson
* https://github.com/mailru/easyjson
* https://github.com/buger/jsonparser
#### TLDR
If you want to skip next sections we have 2 winner: `jsonparser` and `easyjson`.
`jsonparser` is up to 10 times faster than standard `encoding/json` package (depending on payload size and usage), and almost infinitely (literally) better in memory consumption because it operates with data on byte level, and provide direct slice pointers.
`easyjson` wins in CPU in medium tests and frankly i'm impressed with this package: it is remarkable results considering that it is almost drop-in replacement for `encoding/json` (require some code generation).
It's hard to fully compare `jsonparser` and `easyjson` (or `ffson`), they a true parsers and fully process record, unlike `jsonparser` which parse only keys you specified.
If you searching for replacement of `encoding/json` while keeping structs, `easyjson` is an amazing choice. If you want to process dynamic JSON, have memory constrains, or more control over your data you should try `jsonparser`.
`jsonparser` performance heavily depends on usage, and it works best when you do not need to process full record, only some keys. The more calls you need to make, the slower it will be, in contrast `easyjson` (or `ffjson`, `encoding/json`) parser record only 1 time, and then you can make as many calls as you want.
With great power comes great responsibility! :)
#### Small payload
Each test processes 190 bytes of http log as a JSON record.
It should read multiple fields.
https://github.com/buger/jsonparser/blob/master/benchmark/benchmark_small_payload_test.go
Library | time/op | bytes/op | allocs/op
------ | ------- | -------- | -------
encoding/json struct | 7879 | 880 | 18
encoding/json interface{} | 8946 | 1521 | 38
Jeffail/gabs | 10053 | 1649 | 46
bitly/go-simplejson | 10128 | 2241 | 36
antonholmquist/jason | 27152 | 7237 | 101
github.com/ugorji/go/codec | 8806 | 2176 | 31
mreiferson/go-ujson | **7008** | **1409** | 37
a8m/djson | 3862 | 1249 | 30
pquerna/ffjson | **3769** | **624** | **15**
mailru/easyjson | **2002** | **192** | **9**
buger/jsonparser | **1367** | **0** | **0**
buger/jsonparser (EachKey API) | **809** | **0** | **0**
Winners are ffjson, easyjson and jsonparser, where jsonparser is up to 9.8x faster than encoding/json and 4.6x faster than ffjson, and slightly faster than easyjson.
If you look at memory allocation, jsonparser has no rivals, as it makes no data copy and operates with raw []byte structures and pointers to it.
#### Medium payload
Each test processes a 2.4kb JSON record (based on Clearbit API).
It should read multiple nested fields and 1 array.
https://github.com/buger/jsonparser/blob/master/benchmark/benchmark_medium_payload_test.go
| Library | time/op | bytes/op | allocs/op |
| ------- | ------- | -------- | --------- |
| encoding/json struct | 57749 | 1336 | 29 |
| encoding/json interface{} | 79297 | 10627 | 215 |
| Jeffail/gabs | 83807 | 11202 | 235 |
| bitly/go-simplejson | 88187 | 17187 | 220 |
| antonholmquist/jason | 94099 | 19013 | 247 |
| github.com/ugorji/go/codec | 114719 | 6712 | 152 |
| mreiferson/go-ujson | **56972** | 11547 | 270 |
| a8m/djson | 28525 | 10196 | 198 |
| pquerna/ffjson | **20298** | **856** | **20** |
| mailru/easyjson | **10512** | **336** | **12** |
| buger/jsonparser | **15955** | **0** | **0** |
| buger/jsonparser (EachKey API) | **8916** | **0** | **0** |
The difference between ffjson and jsonparser in CPU usage is smaller, while the memory consumption difference is growing. On the other hand `easyjson` shows remarkable performance for medium payload.
`gabs`, `go-simplejson` and `jason` are based on encoding/json and map[string]interface{} and actually only helpers for unstructured JSON, their performance correlate with `encoding/json interface{}`, and they will skip next round.
`go-ujson` while have its own parser, shows same performance as `encoding/json`, also skips next round. Same situation with `ugorji/go/codec`, but it showed unexpectedly bad performance for complex payloads.
#### Large payload
Each test processes a 24kb JSON record (based on Discourse API)
It should read 2 arrays, and for each item in array get a few fields.
Basically it means processing a full JSON file.
https://github.com/buger/jsonparser/blob/master/benchmark/benchmark_large_payload_test.go
| Library | time/op | bytes/op | allocs/op |
| --- | --- | --- | --- |
| encoding/json struct | 748336 | 8272 | 307 |
| encoding/json interface{} | 1224271 | 215425 | 3395 |
| a8m/djson | 510082 | 213682 | 2845 |
| pquerna/ffjson | **312271** | **7792** | **298** |
| mailru/easyjson | **154186** | **6992** | **288** |
| buger/jsonparser | **85308** | **0** | **0** |
`jsonparser` now is a winner, but do not forget that it is way more lightweight parser than `ffson` or `easyjson`, and they have to parser all the data, while `jsonparser` parse only what you need. All `ffjson`, `easysjon` and `jsonparser` have their own parsing code, and does not depend on `encoding/json` or `interface{}`, thats one of the reasons why they are so fast. `easyjson` also use a bit of `unsafe` package to reduce memory consuption (in theory it can lead to some unexpected GC issue, but i did not tested enough)
Also last benchmark did not included `EachKey` test, because in this particular case we need to read lot of Array values, and using `ArrayEach` is more efficient.
## Questions and support
All bug-reports and suggestions should go though Github Issues.
## Contributing
1. Fork it
2. Create your feature branch (git checkout -b my-new-feature)
3. Commit your changes (git commit -am 'Added some feature')
4. Push to the branch (git push origin my-new-feature)
5. Create new Pull Request
## Development
All my development happens using Docker, and repo include some Make tasks to simplify development.
* `make build` - builds docker image, usually can be called only once
* `make test` - run tests
* `make fmt` - run go fmt
* `make bench` - run benchmarks (if you need to run only single benchmark modify `BENCHMARK` variable in make file)
* `make profile` - runs benchmark and generate 3 files- `cpu.out`, `mem.mprof` and `benchmark.test` binary, which can be used for `go tool pprof`
* `make bash` - enter container (i use it for running `go tool pprof` above)

47
vendor/github.com/buger/jsonparser/bytes.go generated vendored Normal file
View File

@ -0,0 +1,47 @@
package jsonparser
import (
bio "bytes"
)
// minInt64 '-9223372036854775808' is the smallest representable number in int64
const minInt64 = `9223372036854775808`
// About 2x faster then strconv.ParseInt because it only supports base 10, which is enough for JSON
func parseInt(bytes []byte) (v int64, ok bool, overflow bool) {
if len(bytes) == 0 {
return 0, false, false
}
var neg bool = false
if bytes[0] == '-' {
neg = true
bytes = bytes[1:]
}
var b int64 = 0
for _, c := range bytes {
if c >= '0' && c <= '9' {
b = (10 * v) + int64(c-'0')
} else {
return 0, false, false
}
if overflow = (b < v); overflow {
break
}
v = b
}
if overflow {
if neg && bio.Equal(bytes, []byte(minInt64)) {
return b, true, false
}
return 0, false, true
}
if neg {
return -v, true, false
} else {
return v, true, false
}
}

25
vendor/github.com/buger/jsonparser/bytes_safe.go generated vendored Normal file
View File

@ -0,0 +1,25 @@
// +build appengine appenginevm
package jsonparser
import (
"strconv"
)
// See fastbytes_unsafe.go for explanation on why *[]byte is used (signatures must be consistent with those in that file)
func equalStr(b *[]byte, s string) bool {
return string(*b) == s
}
func parseFloat(b *[]byte) (float64, error) {
return strconv.ParseFloat(string(*b), 64)
}
func bytesToString(b *[]byte) string {
return string(*b)
}
func StringToBytes(s string) []byte {
return []byte(s)
}

44
vendor/github.com/buger/jsonparser/bytes_unsafe.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
// +build !appengine,!appenginevm
package jsonparser
import (
"reflect"
"strconv"
"unsafe"
"runtime"
)
//
// The reason for using *[]byte rather than []byte in parameters is an optimization. As of Go 1.6,
// the compiler cannot perfectly inline the function when using a non-pointer slice. That is,
// the non-pointer []byte parameter version is slower than if its function body is manually
// inlined, whereas the pointer []byte version is equally fast to the manually inlined
// version. Instruction count in assembly taken from "go tool compile" confirms this difference.
//
// TODO: Remove hack after Go 1.7 release
//
func equalStr(b *[]byte, s string) bool {
return *(*string)(unsafe.Pointer(b)) == s
}
func parseFloat(b *[]byte) (float64, error) {
return strconv.ParseFloat(*(*string)(unsafe.Pointer(b)), 64)
}
// A hack until issue golang/go#2632 is fixed.
// See: https://github.com/golang/go/issues/2632
func bytesToString(b *[]byte) string {
return *(*string)(unsafe.Pointer(b))
}
func StringToBytes(s string) []byte {
b := make([]byte, 0, 0)
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
sh := (*reflect.StringHeader)(unsafe.Pointer(&s))
bh.Data = sh.Data
bh.Cap = sh.Len
bh.Len = sh.Len
runtime.KeepAlive(s)
return b
}

173
vendor/github.com/buger/jsonparser/escape.go generated vendored Normal file
View File

@ -0,0 +1,173 @@
package jsonparser
import (
"bytes"
"unicode/utf8"
)
// JSON Unicode stuff: see https://tools.ietf.org/html/rfc7159#section-7
const supplementalPlanesOffset = 0x10000
const highSurrogateOffset = 0xD800
const lowSurrogateOffset = 0xDC00
const basicMultilingualPlaneReservedOffset = 0xDFFF
const basicMultilingualPlaneOffset = 0xFFFF
func combineUTF16Surrogates(high, low rune) rune {
return supplementalPlanesOffset + (high-highSurrogateOffset)<<10 + (low - lowSurrogateOffset)
}
const badHex = -1
func h2I(c byte) int {
switch {
case c >= '0' && c <= '9':
return int(c - '0')
case c >= 'A' && c <= 'F':
return int(c - 'A' + 10)
case c >= 'a' && c <= 'f':
return int(c - 'a' + 10)
}
return badHex
}
// decodeSingleUnicodeEscape decodes a single \uXXXX escape sequence. The prefix \u is assumed to be present and
// is not checked.
// In JSON, these escapes can either come alone or as part of "UTF16 surrogate pairs" that must be handled together.
// This function only handles one; decodeUnicodeEscape handles this more complex case.
func decodeSingleUnicodeEscape(in []byte) (rune, bool) {
// We need at least 6 characters total
if len(in) < 6 {
return utf8.RuneError, false
}
// Convert hex to decimal
h1, h2, h3, h4 := h2I(in[2]), h2I(in[3]), h2I(in[4]), h2I(in[5])
if h1 == badHex || h2 == badHex || h3 == badHex || h4 == badHex {
return utf8.RuneError, false
}
// Compose the hex digits
return rune(h1<<12 + h2<<8 + h3<<4 + h4), true
}
// isUTF16EncodedRune checks if a rune is in the range for non-BMP characters,
// which is used to describe UTF16 chars.
// Source: https://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane
func isUTF16EncodedRune(r rune) bool {
return highSurrogateOffset <= r && r <= basicMultilingualPlaneReservedOffset
}
func decodeUnicodeEscape(in []byte) (rune, int) {
if r, ok := decodeSingleUnicodeEscape(in); !ok {
// Invalid Unicode escape
return utf8.RuneError, -1
} else if r <= basicMultilingualPlaneOffset && !isUTF16EncodedRune(r) {
// Valid Unicode escape in Basic Multilingual Plane
return r, 6
} else if r2, ok := decodeSingleUnicodeEscape(in[6:]); !ok { // Note: previous decodeSingleUnicodeEscape success guarantees at least 6 bytes remain
// UTF16 "high surrogate" without manditory valid following Unicode escape for the "low surrogate"
return utf8.RuneError, -1
} else if r2 < lowSurrogateOffset {
// Invalid UTF16 "low surrogate"
return utf8.RuneError, -1
} else {
// Valid UTF16 surrogate pair
return combineUTF16Surrogates(r, r2), 12
}
}
// backslashCharEscapeTable: when '\X' is found for some byte X, it is to be replaced with backslashCharEscapeTable[X]
var backslashCharEscapeTable = [...]byte{
'"': '"',
'\\': '\\',
'/': '/',
'b': '\b',
'f': '\f',
'n': '\n',
'r': '\r',
't': '\t',
}
// unescapeToUTF8 unescapes the single escape sequence starting at 'in' into 'out' and returns
// how many characters were consumed from 'in' and emitted into 'out'.
// If a valid escape sequence does not appear as a prefix of 'in', (-1, -1) to signal the error.
func unescapeToUTF8(in, out []byte) (inLen int, outLen int) {
if len(in) < 2 || in[0] != '\\' {
// Invalid escape due to insufficient characters for any escape or no initial backslash
return -1, -1
}
// https://tools.ietf.org/html/rfc7159#section-7
switch e := in[1]; e {
case '"', '\\', '/', 'b', 'f', 'n', 'r', 't':
// Valid basic 2-character escapes (use lookup table)
out[0] = backslashCharEscapeTable[e]
return 2, 1
case 'u':
// Unicode escape
if r, inLen := decodeUnicodeEscape(in); inLen == -1 {
// Invalid Unicode escape
return -1, -1
} else {
// Valid Unicode escape; re-encode as UTF8
outLen := utf8.EncodeRune(out, r)
return inLen, outLen
}
}
return -1, -1
}
// unescape unescapes the string contained in 'in' and returns it as a slice.
// If 'in' contains no escaped characters:
// Returns 'in'.
// Else, if 'out' is of sufficient capacity (guaranteed if cap(out) >= len(in)):
// 'out' is used to build the unescaped string and is returned with no extra allocation
// Else:
// A new slice is allocated and returned.
func Unescape(in, out []byte) ([]byte, error) {
firstBackslash := bytes.IndexByte(in, '\\')
if firstBackslash == -1 {
return in, nil
}
// Get a buffer of sufficient size (allocate if needed)
if cap(out) < len(in) {
out = make([]byte, len(in))
} else {
out = out[0:len(in)]
}
// Copy the first sequence of unescaped bytes to the output and obtain a buffer pointer (subslice)
copy(out, in[:firstBackslash])
in = in[firstBackslash:]
buf := out[firstBackslash:]
for len(in) > 0 {
// Unescape the next escaped character
inLen, bufLen := unescapeToUTF8(in, buf)
if inLen == -1 {
return nil, MalformedStringEscapeError
}
in = in[inLen:]
buf = buf[bufLen:]
// Copy everything up until the next backslash
nextBackslash := bytes.IndexByte(in, '\\')
if nextBackslash == -1 {
copy(buf, in)
buf = buf[len(in):]
break
} else {
copy(buf, in[:nextBackslash])
buf = buf[nextBackslash:]
in = in[nextBackslash:]
}
}
// Trim the out buffer to the amount that was actually emitted
return out[:len(out)-len(buf)], nil
}

117
vendor/github.com/buger/jsonparser/fuzz.go generated vendored Normal file
View File

@ -0,0 +1,117 @@
package jsonparser
func FuzzParseString(data []byte) int {
r, err := ParseString(data)
if err != nil || r == "" {
return 0
}
return 1
}
func FuzzEachKey(data []byte) int {
paths := [][]string{
{"name"},
{"order"},
{"nested", "a"},
{"nested", "b"},
{"nested2", "a"},
{"nested", "nested3", "b"},
{"arr", "[1]", "b"},
{"arrInt", "[3]"},
{"arrInt", "[5]"},
{"nested"},
{"arr", "["},
{"a\n", "b\n"},
}
EachKey(data, func(idx int, value []byte, vt ValueType, err error) {}, paths...)
return 1
}
func FuzzDelete(data []byte) int {
Delete(data, "test")
return 1
}
func FuzzSet(data []byte) int {
_, err := Set(data, []byte(`"new value"`), "test")
if err != nil {
return 0
}
return 1
}
func FuzzObjectEach(data []byte) int {
_ = ObjectEach(data, func(key, value []byte, valueType ValueType, off int) error {
return nil
})
return 1
}
func FuzzParseFloat(data []byte) int {
_, err := ParseFloat(data)
if err != nil {
return 0
}
return 1
}
func FuzzParseInt(data []byte) int {
_, err := ParseInt(data)
if err != nil {
return 0
}
return 1
}
func FuzzParseBool(data []byte) int {
_, err := ParseBoolean(data)
if err != nil {
return 0
}
return 1
}
func FuzzTokenStart(data []byte) int {
_ = tokenStart(data)
return 1
}
func FuzzGetString(data []byte) int {
_, err := GetString(data, "test")
if err != nil {
return 0
}
return 1
}
func FuzzGetFloat(data []byte) int {
_, err := GetFloat(data, "test")
if err != nil {
return 0
}
return 1
}
func FuzzGetInt(data []byte) int {
_, err := GetInt(data, "test")
if err != nil {
return 0
}
return 1
}
func FuzzGetBoolean(data []byte) int {
_, err := GetBoolean(data, "test")
if err != nil {
return 0
}
return 1
}
func FuzzGetUnsafeString(data []byte) int {
_, err := GetUnsafeString(data, "test")
if err != nil {
return 0
}
return 1
}

47
vendor/github.com/buger/jsonparser/oss-fuzz-build.sh generated vendored Normal file
View File

@ -0,0 +1,47 @@
#!/bin/bash -eu
git clone https://github.com/dvyukov/go-fuzz-corpus
zip corpus.zip go-fuzz-corpus/json/corpus/*
cp corpus.zip $OUT/fuzzparsestring_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzParseString fuzzparsestring
cp corpus.zip $OUT/fuzzeachkey_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzEachKey fuzzeachkey
cp corpus.zip $OUT/fuzzdelete_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzDelete fuzzdelete
cp corpus.zip $OUT/fuzzset_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzSet fuzzset
cp corpus.zip $OUT/fuzzobjecteach_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzObjectEach fuzzobjecteach
cp corpus.zip $OUT/fuzzparsefloat_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzParseFloat fuzzparsefloat
cp corpus.zip $OUT/fuzzparseint_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzParseInt fuzzparseint
cp corpus.zip $OUT/fuzzparsebool_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzParseBool fuzzparsebool
cp corpus.zip $OUT/fuzztokenstart_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzTokenStart fuzztokenstart
cp corpus.zip $OUT/fuzzgetstring_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzGetString fuzzgetstring
cp corpus.zip $OUT/fuzzgetfloat_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzGetFloat fuzzgetfloat
cp corpus.zip $OUT/fuzzgetint_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzGetInt fuzzgetint
cp corpus.zip $OUT/fuzzgetboolean_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzGetBoolean fuzzgetboolean
cp corpus.zip $OUT/fuzzgetunsafestring_seed_corpus.zip
compile_go_fuzzer github.com/buger/jsonparser FuzzGetUnsafeString fuzzgetunsafestring

1283
vendor/github.com/buger/jsonparser/parser.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

7
vendor/github.com/mailru/easyjson/LICENSE generated vendored Normal file
View File

@ -0,0 +1,7 @@
Copyright (c) 2016 Mail.Ru Group
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

278
vendor/github.com/mailru/easyjson/buffer/pool.go generated vendored Normal file
View File

@ -0,0 +1,278 @@
// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to
// reduce copying and to allow reuse of individual chunks.
package buffer
import (
"io"
"net"
"sync"
)
// PoolConfig contains configuration for the allocation and reuse strategy.
type PoolConfig struct {
StartSize int // Minimum chunk size that is allocated.
PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead.
MaxSize int // Maximum chunk size that will be allocated.
}
var config = PoolConfig{
StartSize: 128,
PooledSize: 512,
MaxSize: 32768,
}
// Reuse pool: chunk size -> pool.
var buffers = map[int]*sync.Pool{}
func initBuffers() {
for l := config.PooledSize; l <= config.MaxSize; l *= 2 {
buffers[l] = new(sync.Pool)
}
}
func init() {
initBuffers()
}
// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done.
func Init(cfg PoolConfig) {
config = cfg
initBuffers()
}
// putBuf puts a chunk to reuse pool if it can be reused.
func putBuf(buf []byte) {
size := cap(buf)
if size < config.PooledSize {
return
}
if c := buffers[size]; c != nil {
c.Put(buf[:0])
}
}
// getBuf gets a chunk from reuse pool or creates a new one if reuse failed.
func getBuf(size int) []byte {
if size >= config.PooledSize {
if c := buffers[size]; c != nil {
v := c.Get()
if v != nil {
return v.([]byte)
}
}
}
return make([]byte, 0, size)
}
// Buffer is a buffer optimized for serialization without extra copying.
type Buffer struct {
// Buf is the current chunk that can be used for serialization.
Buf []byte
toPool []byte
bufs [][]byte
}
// EnsureSpace makes sure that the current chunk contains at least s free bytes,
// possibly creating a new chunk.
func (b *Buffer) EnsureSpace(s int) {
if cap(b.Buf)-len(b.Buf) < s {
b.ensureSpaceSlow(s)
}
}
func (b *Buffer) ensureSpaceSlow(s int) {
l := len(b.Buf)
if l > 0 {
if cap(b.toPool) != cap(b.Buf) {
// Chunk was reallocated, toPool can be pooled.
putBuf(b.toPool)
}
if cap(b.bufs) == 0 {
b.bufs = make([][]byte, 0, 8)
}
b.bufs = append(b.bufs, b.Buf)
l = cap(b.toPool) * 2
} else {
l = config.StartSize
}
if l > config.MaxSize {
l = config.MaxSize
}
b.Buf = getBuf(l)
b.toPool = b.Buf
}
// AppendByte appends a single byte to buffer.
func (b *Buffer) AppendByte(data byte) {
b.EnsureSpace(1)
b.Buf = append(b.Buf, data)
}
// AppendBytes appends a byte slice to buffer.
func (b *Buffer) AppendBytes(data []byte) {
if len(data) <= cap(b.Buf)-len(b.Buf) {
b.Buf = append(b.Buf, data...) // fast path
} else {
b.appendBytesSlow(data)
}
}
func (b *Buffer) appendBytesSlow(data []byte) {
for len(data) > 0 {
b.EnsureSpace(1)
sz := cap(b.Buf) - len(b.Buf)
if sz > len(data) {
sz = len(data)
}
b.Buf = append(b.Buf, data[:sz]...)
data = data[sz:]
}
}
// AppendString appends a string to buffer.
func (b *Buffer) AppendString(data string) {
if len(data) <= cap(b.Buf)-len(b.Buf) {
b.Buf = append(b.Buf, data...) // fast path
} else {
b.appendStringSlow(data)
}
}
func (b *Buffer) appendStringSlow(data string) {
for len(data) > 0 {
b.EnsureSpace(1)
sz := cap(b.Buf) - len(b.Buf)
if sz > len(data) {
sz = len(data)
}
b.Buf = append(b.Buf, data[:sz]...)
data = data[sz:]
}
}
// Size computes the size of a buffer by adding sizes of every chunk.
func (b *Buffer) Size() int {
size := len(b.Buf)
for _, buf := range b.bufs {
size += len(buf)
}
return size
}
// DumpTo outputs the contents of a buffer to a writer and resets the buffer.
func (b *Buffer) DumpTo(w io.Writer) (written int, err error) {
bufs := net.Buffers(b.bufs)
if len(b.Buf) > 0 {
bufs = append(bufs, b.Buf)
}
n, err := bufs.WriteTo(w)
for _, buf := range b.bufs {
putBuf(buf)
}
putBuf(b.toPool)
b.bufs = nil
b.Buf = nil
b.toPool = nil
return int(n), err
}
// BuildBytes creates a single byte slice with all the contents of the buffer. Data is
// copied if it does not fit in a single chunk. You can optionally provide one byte
// slice as argument that it will try to reuse.
func (b *Buffer) BuildBytes(reuse ...[]byte) []byte {
if len(b.bufs) == 0 {
ret := b.Buf
b.toPool = nil
b.Buf = nil
return ret
}
var ret []byte
size := b.Size()
// If we got a buffer as argument and it is big enough, reuse it.
if len(reuse) == 1 && cap(reuse[0]) >= size {
ret = reuse[0][:0]
} else {
ret = make([]byte, 0, size)
}
for _, buf := range b.bufs {
ret = append(ret, buf...)
putBuf(buf)
}
ret = append(ret, b.Buf...)
putBuf(b.toPool)
b.bufs = nil
b.toPool = nil
b.Buf = nil
return ret
}
type readCloser struct {
offset int
bufs [][]byte
}
func (r *readCloser) Read(p []byte) (n int, err error) {
for _, buf := range r.bufs {
// Copy as much as we can.
x := copy(p[n:], buf[r.offset:])
n += x // Increment how much we filled.
// Did we empty the whole buffer?
if r.offset+x == len(buf) {
// On to the next buffer.
r.offset = 0
r.bufs = r.bufs[1:]
// We can release this buffer.
putBuf(buf)
} else {
r.offset += x
}
if n == len(p) {
break
}
}
// No buffers left or nothing read?
if len(r.bufs) == 0 {
err = io.EOF
}
return
}
func (r *readCloser) Close() error {
// Release all remaining buffers.
for _, buf := range r.bufs {
putBuf(buf)
}
// In case Close gets called multiple times.
r.bufs = nil
return nil
}
// ReadCloser creates an io.ReadCloser with all the contents of the buffer.
func (b *Buffer) ReadCloser() io.ReadCloser {
ret := &readCloser{0, append(b.bufs, b.Buf)}
b.bufs = nil
b.toPool = nil
b.Buf = nil
return ret
}

405
vendor/github.com/mailru/easyjson/jwriter/writer.go generated vendored Normal file
View File

@ -0,0 +1,405 @@
// Package jwriter contains a JSON writer.
package jwriter
import (
"io"
"strconv"
"unicode/utf8"
"github.com/mailru/easyjson/buffer"
)
// Flags describe various encoding options. The behavior may be actually implemented in the encoder, but
// Flags field in Writer is used to set and pass them around.
type Flags int
const (
NilMapAsEmpty Flags = 1 << iota // Encode nil map as '{}' rather than 'null'.
NilSliceAsEmpty // Encode nil slice as '[]' rather than 'null'.
)
// Writer is a JSON writer.
type Writer struct {
Flags Flags
Error error
Buffer buffer.Buffer
NoEscapeHTML bool
}
// Size returns the size of the data that was written out.
func (w *Writer) Size() int {
return w.Buffer.Size()
}
// DumpTo outputs the data to given io.Writer, resetting the buffer.
func (w *Writer) DumpTo(out io.Writer) (written int, err error) {
return w.Buffer.DumpTo(out)
}
// BuildBytes returns writer data as a single byte slice. You can optionally provide one byte slice
// as argument that it will try to reuse.
func (w *Writer) BuildBytes(reuse ...[]byte) ([]byte, error) {
if w.Error != nil {
return nil, w.Error
}
return w.Buffer.BuildBytes(reuse...), nil
}
// ReadCloser returns an io.ReadCloser that can be used to read the data.
// ReadCloser also resets the buffer.
func (w *Writer) ReadCloser() (io.ReadCloser, error) {
if w.Error != nil {
return nil, w.Error
}
return w.Buffer.ReadCloser(), nil
}
// RawByte appends raw binary data to the buffer.
func (w *Writer) RawByte(c byte) {
w.Buffer.AppendByte(c)
}
// RawByte appends raw binary data to the buffer.
func (w *Writer) RawString(s string) {
w.Buffer.AppendString(s)
}
// Raw appends raw binary data to the buffer or sets the error if it is given. Useful for
// calling with results of MarshalJSON-like functions.
func (w *Writer) Raw(data []byte, err error) {
switch {
case w.Error != nil:
return
case err != nil:
w.Error = err
case len(data) > 0:
w.Buffer.AppendBytes(data)
default:
w.RawString("null")
}
}
// RawText encloses raw binary data in quotes and appends in to the buffer.
// Useful for calling with results of MarshalText-like functions.
func (w *Writer) RawText(data []byte, err error) {
switch {
case w.Error != nil:
return
case err != nil:
w.Error = err
case len(data) > 0:
w.String(string(data))
default:
w.RawString("null")
}
}
// Base64Bytes appends data to the buffer after base64 encoding it
func (w *Writer) Base64Bytes(data []byte) {
if data == nil {
w.Buffer.AppendString("null")
return
}
w.Buffer.AppendByte('"')
w.base64(data)
w.Buffer.AppendByte('"')
}
func (w *Writer) Uint8(n uint8) {
w.Buffer.EnsureSpace(3)
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
}
func (w *Writer) Uint16(n uint16) {
w.Buffer.EnsureSpace(5)
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
}
func (w *Writer) Uint32(n uint32) {
w.Buffer.EnsureSpace(10)
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
}
func (w *Writer) Uint(n uint) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
}
func (w *Writer) Uint64(n uint64) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10)
}
func (w *Writer) Int8(n int8) {
w.Buffer.EnsureSpace(4)
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
}
func (w *Writer) Int16(n int16) {
w.Buffer.EnsureSpace(6)
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
}
func (w *Writer) Int32(n int32) {
w.Buffer.EnsureSpace(11)
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
}
func (w *Writer) Int(n int) {
w.Buffer.EnsureSpace(21)
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
}
func (w *Writer) Int64(n int64) {
w.Buffer.EnsureSpace(21)
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10)
}
func (w *Writer) Uint8Str(n uint8) {
w.Buffer.EnsureSpace(3)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Uint16Str(n uint16) {
w.Buffer.EnsureSpace(5)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Uint32Str(n uint32) {
w.Buffer.EnsureSpace(10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) UintStr(n uint) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Uint64Str(n uint64) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) UintptrStr(n uintptr) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Int8Str(n int8) {
w.Buffer.EnsureSpace(4)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Int16Str(n int16) {
w.Buffer.EnsureSpace(6)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Int32Str(n int32) {
w.Buffer.EnsureSpace(11)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) IntStr(n int) {
w.Buffer.EnsureSpace(21)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Int64Str(n int64) {
w.Buffer.EnsureSpace(21)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Float32(n float32) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32)
}
func (w *Writer) Float32Str(n float32) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Float64(n float64) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64)
}
func (w *Writer) Float64Str(n float64) {
w.Buffer.EnsureSpace(20)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 64)
w.Buffer.Buf = append(w.Buffer.Buf, '"')
}
func (w *Writer) Bool(v bool) {
w.Buffer.EnsureSpace(5)
if v {
w.Buffer.Buf = append(w.Buffer.Buf, "true"...)
} else {
w.Buffer.Buf = append(w.Buffer.Buf, "false"...)
}
}
const chars = "0123456789abcdef"
func getTable(falseValues ...int) [128]bool {
table := [128]bool{}
for i := 0; i < 128; i++ {
table[i] = true
}
for _, v := range falseValues {
table[v] = false
}
return table
}
var (
htmlEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '&', '<', '>', '\\')
htmlNoEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '\\')
)
func (w *Writer) String(s string) {
w.Buffer.AppendByte('"')
// Portions of the string that contain no escapes are appended as
// byte slices.
p := 0 // last non-escape symbol
escapeTable := &htmlEscapeTable
if w.NoEscapeHTML {
escapeTable = &htmlNoEscapeTable
}
for i := 0; i < len(s); {
c := s[i]
if c < utf8.RuneSelf {
if escapeTable[c] {
// single-width character, no escaping is required
i++
continue
}
w.Buffer.AppendString(s[p:i])
switch c {
case '\t':
w.Buffer.AppendString(`\t`)
case '\r':
w.Buffer.AppendString(`\r`)
case '\n':
w.Buffer.AppendString(`\n`)
case '\\':
w.Buffer.AppendString(`\\`)
case '"':
w.Buffer.AppendString(`\"`)
default:
w.Buffer.AppendString(`\u00`)
w.Buffer.AppendByte(chars[c>>4])
w.Buffer.AppendByte(chars[c&0xf])
}
i++
p = i
continue
}
// broken utf
runeValue, runeWidth := utf8.DecodeRuneInString(s[i:])
if runeValue == utf8.RuneError && runeWidth == 1 {
w.Buffer.AppendString(s[p:i])
w.Buffer.AppendString(`\ufffd`)
i++
p = i
continue
}
// jsonp stuff - tab separator and line separator
if runeValue == '\u2028' || runeValue == '\u2029' {
w.Buffer.AppendString(s[p:i])
w.Buffer.AppendString(`\u202`)
w.Buffer.AppendByte(chars[runeValue&0xf])
i += runeWidth
p = i
continue
}
i += runeWidth
}
w.Buffer.AppendString(s[p:])
w.Buffer.AppendByte('"')
}
const encode = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
const padChar = '='
func (w *Writer) base64(in []byte) {
if len(in) == 0 {
return
}
w.Buffer.EnsureSpace(((len(in)-1)/3 + 1) * 4)
si := 0
n := (len(in) / 3) * 3
for si < n {
// Convert 3x 8bit source bytes into 4 bytes
val := uint(in[si+0])<<16 | uint(in[si+1])<<8 | uint(in[si+2])
w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F], encode[val>>6&0x3F], encode[val&0x3F])
si += 3
}
remain := len(in) - si
if remain == 0 {
return
}
// Add the remaining small block
val := uint(in[si+0]) << 16
if remain == 2 {
val |= uint(in[si+1]) << 8
}
w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F])
switch remain {
case 2:
w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>6&0x3F], byte(padChar))
case 1:
w.Buffer.Buf = append(w.Buffer.Buf, byte(padChar), byte(padChar))
}
}

1
vendor/github.com/wk8/go-ordered-map/v2/.gitignore generated vendored Normal file
View File

@ -0,0 +1 @@
/vendor/

80
vendor/github.com/wk8/go-ordered-map/v2/.golangci.yml generated vendored Normal file
View File

@ -0,0 +1,80 @@
run:
tests: false
linters:
disable-all: true
enable:
- asciicheck
- bidichk
- bodyclose
- containedctx
- contextcheck
- decorder
- depguard
- dogsled
- dupl
- durationcheck
- errcheck
- errchkjson
# FIXME: commented out as it crashes with 1.18 for now
# - errname
- errorlint
- exportloopref
- forbidigo
- funlen
- gci
- gochecknoglobals
- gochecknoinits
- gocognit
- goconst
- gocritic
- gocyclo
- godox
- gofmt
- gofumpt
- goheader
- goimports
- gomnd
- gomoddirectives
- gomodguard
- goprintffuncname
- gosec
- gosimple
- govet
- grouper
- ifshort
- importas
- ineffassign
- lll
- maintidx
- makezero
- misspell
- nakedret
- nilerr
- nilnil
- noctx
- nolintlint
- paralleltest
- prealloc
- predeclared
- promlinter
# FIXME: doesn't support 1.18 yet
# - revive
- rowserrcheck
- sqlclosecheck
- staticcheck
- structcheck
- stylecheck
- tagliatelle
- tenv
- testpackage
- thelper
- tparallel
- typecheck
- unconvert
- unparam
- unused
- varcheck
- varnamelen
- wastedassign
- whitespace

34
vendor/github.com/wk8/go-ordered-map/v2/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,34 @@
# Changelog
[comment]: # (Changes since last release go here)
## 2.1.7 - Apr 13th 2023
* Renamed test_utils.go to utils_test.go
## 2.1.6 - Feb 15th 2023
* Added `GetAndMoveToBack()` and `GetAndMoveToFront()` methods
## 2.1.5 - Dec 13th 2022
* Added `Value()` method
## 2.1.4 - Dec 12th 2022
* Fixed a bug with UTF-8 special characters in JSON keys
## 2.1.3 - Dec 11th 2022
* Added support for JSON marshalling/unmarshalling of wrapper of primitive types
## 2.1.2 - Dec 10th 2022
* Allowing to pass options to `New`, to give a capacity hint, or initial data
* Allowing to deserialize nested ordered maps from JSON without having to explicitly instantiate them
* Added the `AddPairs` method
## 2.1.1 - Dec 9th 2022
* Fixing a bug with JSON marshalling
## 2.1.0 - Dec 7th 2022
* Added support for JSON serialization/deserialization

201
vendor/github.com/wk8/go-ordered-map/v2/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

28
vendor/github.com/wk8/go-ordered-map/v2/Makefile generated vendored Normal file
View File

@ -0,0 +1,28 @@
.DEFAULT_GOAL := all
.PHONY: all
all: test_with_fuzz lint
# the TEST_FLAGS env var can be set to eg run only specific tests
TEST_COMMAND = go test -v -count=1 -race -cover $(TEST_FLAGS)
.PHONY: test
test:
$(TEST_COMMAND)
.PHONY: bench
bench:
go test -bench=.
FUZZ_TIME ?= 10s
.PHONY: test_with_fuzz
test_with_fuzz:
$(TEST_COMMAND) -fuzz=. -fuzztime=$(FUZZ_TIME)
.PHONY: fuzz
fuzz: test_with_fuzz
.PHONY: lint
lint:
golangci-lint run

140
vendor/github.com/wk8/go-ordered-map/v2/README.md generated vendored Normal file
View File

@ -0,0 +1,140 @@
[![Go Reference](https://pkg.go.dev/badge/github.com/wk8/go-ordered-map/v2.svg)](https://pkg.go.dev/github.com/wk8/go-ordered-map/v2)
[![Build Status](https://circleci.com/gh/wk8/go-ordered-map.svg?style=svg)](https://app.circleci.com/pipelines/github/wk8/go-ordered-map)
# Golang Ordered Maps
Same as regular maps, but also remembers the order in which keys were inserted, akin to [Python's `collections.OrderedDict`s](https://docs.python.org/3.7/library/collections.html#ordereddict-objects).
It offers the following features:
* optimal runtime performance (all operations are constant time)
* optimal memory usage (only one copy of values, no unnecessary memory allocation)
* allows iterating from newest or oldest keys indifferently, without memory copy, allowing to `break` the iteration, and in time linear to the number of keys iterated over rather than the total length of the ordered map
* supports any generic types for both keys and values. If you're running go < 1.18, you can use [version 1](https://github.com/wk8/go-ordered-map/tree/v1) that takes and returns generic `interface{}`s instead of using generics
* idiomatic API, akin to that of [`container/list`](https://golang.org/pkg/container/list)
## Documentation
[The full documentation is available on pkg.go.dev](https://pkg.go.dev/github.com/wk8/go-ordered-map/v2).
## Installation
```bash
go get -u github.com/wk8/go-ordered-map/v2
```
Or use your favorite golang vendoring tool!
## Supported go versions
Go >= 1.18 is required to use version >= 2 of this library, as it uses generics.
If you're running go < 1.18, you can use [version 1](https://github.com/wk8/go-ordered-map/tree/v1) instead.
## Example / usage
```go
package main
import (
"fmt"
"github.com/wk8/go-ordered-map/v2"
)
func main() {
om := orderedmap.New[string, string]()
om.Set("foo", "bar")
om.Set("bar", "baz")
om.Set("coucou", "toi")
fmt.Println(om.Get("foo")) // => "bar", true
fmt.Println(om.Get("i dont exist")) // => "", false
// iterating pairs from oldest to newest:
for pair := om.Oldest(); pair != nil; pair = pair.Next() {
fmt.Printf("%s => %s\n", pair.Key, pair.Value)
} // prints:
// foo => bar
// bar => baz
// coucou => toi
// iterating over the 2 newest pairs:
i := 0
for pair := om.Newest(); pair != nil; pair = pair.Prev() {
fmt.Printf("%s => %s\n", pair.Key, pair.Value)
i++
if i >= 2 {
break
}
} // prints:
// coucou => toi
// bar => baz
}
```
An `OrderedMap`'s keys must implement `comparable`, and its values can be anything, for example:
```go
type myStruct struct {
payload string
}
func main() {
om := orderedmap.New[int, *myStruct]()
om.Set(12, &myStruct{"foo"})
om.Set(1, &myStruct{"bar"})
value, present := om.Get(12)
if !present {
panic("should be there!")
}
fmt.Println(value.payload) // => foo
for pair := om.Oldest(); pair != nil; pair = pair.Next() {
fmt.Printf("%d => %s\n", pair.Key, pair.Value.payload)
} // prints:
// 12 => foo
// 1 => bar
}
```
Also worth noting that you can provision ordered maps with a capacity hint, as you would do by passing an optional hint to `make(map[K]V, capacity`):
```go
om := orderedmap.New[int, *myStruct](28)
```
You can also pass in some initial data to store in the map:
```go
om := orderedmap.New[int, string](orderedmap.WithInitialData[int, string](
orderedmap.Pair[int, string]{
Key: 12,
Value: "foo",
},
orderedmap.Pair[int, string]{
Key: 28,
Value: "bar",
},
))
```
`OrderedMap`s also support JSON serialization/deserialization, and preserves order:
```go
// serialization
data, err := json.Marshal(om)
...
// deserialization
om := orderedmap.New[string, string]() // or orderedmap.New[int, any](), or any type you expect
err := json.Unmarshal(data, &om)
...
```
## Alternatives
There are several other ordered map golang implementations out there, but I believe that at the time of writing none of them offer the same functionality as this library; more specifically:
* [iancoleman/orderedmap](https://github.com/iancoleman/orderedmap) only accepts `string` keys, its `Delete` operations are linear
* [cevaris/ordered_map](https://github.com/cevaris/ordered_map) uses a channel for iterations, and leaks goroutines if the iteration is interrupted before fully traversing the map
* [mantyr/iterator](https://github.com/mantyr/iterator) also uses a channel for iterations, and its `Delete` operations are linear
* [samdolan/go-ordered-map](https://github.com/samdolan/go-ordered-map) adds unnecessary locking (users should add their own locking instead if they need it), its `Delete` and `Get` operations are linear, iterations trigger a linear memory allocation

182
vendor/github.com/wk8/go-ordered-map/v2/json.go generated vendored Normal file
View File

@ -0,0 +1,182 @@
package orderedmap
import (
"bytes"
"encoding"
"encoding/json"
"fmt"
"reflect"
"unicode/utf8"
"github.com/buger/jsonparser"
"github.com/mailru/easyjson/jwriter"
)
var (
_ json.Marshaler = &OrderedMap[int, any]{}
_ json.Unmarshaler = &OrderedMap[int, any]{}
)
// MarshalJSON implements the json.Marshaler interface.
func (om *OrderedMap[K, V]) MarshalJSON() ([]byte, error) { //nolint:funlen
if om == nil || om.list == nil {
return []byte("null"), nil
}
writer := jwriter.Writer{}
writer.RawByte('{')
for pair, firstIteration := om.Oldest(), true; pair != nil; pair = pair.Next() {
if firstIteration {
firstIteration = false
} else {
writer.RawByte(',')
}
switch key := any(pair.Key).(type) {
case string:
writer.String(key)
case encoding.TextMarshaler:
writer.RawByte('"')
writer.Raw(key.MarshalText())
writer.RawByte('"')
case int:
writer.IntStr(key)
case int8:
writer.Int8Str(key)
case int16:
writer.Int16Str(key)
case int32:
writer.Int32Str(key)
case int64:
writer.Int64Str(key)
case uint:
writer.UintStr(key)
case uint8:
writer.Uint8Str(key)
case uint16:
writer.Uint16Str(key)
case uint32:
writer.Uint32Str(key)
case uint64:
writer.Uint64Str(key)
default:
// this switch takes care of wrapper types around primitive types, such as
// type myType string
switch keyValue := reflect.ValueOf(key); keyValue.Type().Kind() {
case reflect.String:
writer.String(keyValue.String())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
writer.Int64Str(keyValue.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
writer.Uint64Str(keyValue.Uint())
default:
return nil, fmt.Errorf("unsupported key type: %T", key)
}
}
writer.RawByte(':')
// the error is checked at the end of the function
writer.Raw(json.Marshal(pair.Value)) //nolint:errchkjson
}
writer.RawByte('}')
return dumpWriter(&writer)
}
func dumpWriter(writer *jwriter.Writer) ([]byte, error) {
if writer.Error != nil {
return nil, writer.Error
}
var buf bytes.Buffer
buf.Grow(writer.Size())
if _, err := writer.DumpTo(&buf); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (om *OrderedMap[K, V]) UnmarshalJSON(data []byte) error {
if om.list == nil {
om.initialize(0)
}
return jsonparser.ObjectEach(
data,
func(keyData []byte, valueData []byte, dataType jsonparser.ValueType, offset int) error {
if dataType == jsonparser.String {
// jsonparser removes the enclosing quotes; we need to restore them to make a valid JSON
valueData = data[offset-len(valueData)-2 : offset]
}
var key K
var value V
switch typedKey := any(&key).(type) {
case *string:
s, err := decodeUTF8(keyData)
if err != nil {
return err
}
*typedKey = s
case encoding.TextUnmarshaler:
if err := typedKey.UnmarshalText(keyData); err != nil {
return err
}
case *int, *int8, *int16, *int32, *int64, *uint, *uint8, *uint16, *uint32, *uint64:
if err := json.Unmarshal(keyData, typedKey); err != nil {
return err
}
default:
// this switch takes care of wrapper types around primitive types, such as
// type myType string
switch reflect.TypeOf(key).Kind() {
case reflect.String:
s, err := decodeUTF8(keyData)
if err != nil {
return err
}
convertedKeyData := reflect.ValueOf(s).Convert(reflect.TypeOf(key))
reflect.ValueOf(&key).Elem().Set(convertedKeyData)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
if err := json.Unmarshal(keyData, &key); err != nil {
return err
}
default:
return fmt.Errorf("unsupported key type: %T", key)
}
}
if err := json.Unmarshal(valueData, &value); err != nil {
return err
}
om.Set(key, value)
return nil
})
}
func decodeUTF8(input []byte) (string, error) {
remaining, offset := input, 0
runes := make([]rune, 0, len(remaining))
for len(remaining) > 0 {
r, size := utf8.DecodeRune(remaining)
if r == utf8.RuneError && size <= 1 {
return "", fmt.Errorf("not a valid UTF-8 string (at position %d): %s", offset, string(input))
}
runes = append(runes, r)
remaining = remaining[size:]
offset += size
}
return string(runes), nil
}

296
vendor/github.com/wk8/go-ordered-map/v2/orderedmap.go generated vendored Normal file
View File

@ -0,0 +1,296 @@
// Package orderedmap implements an ordered map, i.e. a map that also keeps track of
// the order in which keys were inserted.
//
// All operations are constant-time.
//
// Github repo: https://github.com/wk8/go-ordered-map
//
package orderedmap
import (
"fmt"
list "github.com/bahlo/generic-list-go"
)
type Pair[K comparable, V any] struct {
Key K
Value V
element *list.Element[*Pair[K, V]]
}
type OrderedMap[K comparable, V any] struct {
pairs map[K]*Pair[K, V]
list *list.List[*Pair[K, V]]
}
type initConfig[K comparable, V any] struct {
capacity int
initialData []Pair[K, V]
}
type InitOption[K comparable, V any] func(config *initConfig[K, V])
// WithCapacity allows giving a capacity hint for the map, akin to the standard make(map[K]V, capacity).
func WithCapacity[K comparable, V any](capacity int) InitOption[K, V] {
return func(c *initConfig[K, V]) {
c.capacity = capacity
}
}
// WithInitialData allows passing in initial data for the map.
func WithInitialData[K comparable, V any](initialData ...Pair[K, V]) InitOption[K, V] {
return func(c *initConfig[K, V]) {
c.initialData = initialData
if c.capacity < len(initialData) {
c.capacity = len(initialData)
}
}
}
// New creates a new OrderedMap.
// options can either be one or several InitOption[K, V], or a single integer,
// which is then interpreted as a capacity hint, à la make(map[K]V, capacity).
func New[K comparable, V any](options ...any) *OrderedMap[K, V] { //nolint:varnamelen
orderedMap := &OrderedMap[K, V]{}
var config initConfig[K, V]
for _, untypedOption := range options {
switch option := untypedOption.(type) {
case int:
if len(options) != 1 {
invalidOption()
}
config.capacity = option
case InitOption[K, V]:
option(&config)
default:
invalidOption()
}
}
orderedMap.initialize(config.capacity)
orderedMap.AddPairs(config.initialData...)
return orderedMap
}
const invalidOptionMessage = `when using orderedmap.New[K,V]() with options, either provide one or several InitOption[K, V]; or a single integer which is then interpreted as a capacity hint, à la make(map[K]V, capacity).` //nolint:lll
func invalidOption() { panic(invalidOptionMessage) }
func (om *OrderedMap[K, V]) initialize(capacity int) {
om.pairs = make(map[K]*Pair[K, V], capacity)
om.list = list.New[*Pair[K, V]]()
}
// Get looks for the given key, and returns the value associated with it,
// or V's nil value if not found. The boolean it returns says whether the key is present in the map.
func (om *OrderedMap[K, V]) Get(key K) (val V, present bool) {
if pair, present := om.pairs[key]; present {
return pair.Value, true
}
return
}
// Load is an alias for Get, mostly to present an API similar to `sync.Map`'s.
func (om *OrderedMap[K, V]) Load(key K) (V, bool) {
return om.Get(key)
}
// Value returns the value associated with the given key or the zero value.
func (om *OrderedMap[K, V]) Value(key K) (val V) {
if pair, present := om.pairs[key]; present {
val = pair.Value
}
return
}
// GetPair looks for the given key, and returns the pair associated with it,
// or nil if not found. The Pair struct can then be used to iterate over the ordered map
// from that point, either forward or backward.
func (om *OrderedMap[K, V]) GetPair(key K) *Pair[K, V] {
return om.pairs[key]
}
// Set sets the key-value pair, and returns what `Get` would have returned
// on that key prior to the call to `Set`.
func (om *OrderedMap[K, V]) Set(key K, value V) (val V, present bool) {
if pair, present := om.pairs[key]; present {
oldValue := pair.Value
pair.Value = value
return oldValue, true
}
pair := &Pair[K, V]{
Key: key,
Value: value,
}
pair.element = om.list.PushBack(pair)
om.pairs[key] = pair
return
}
// AddPairs allows setting multiple pairs at a time. It's equivalent to calling
// Set on each pair sequentially.
func (om *OrderedMap[K, V]) AddPairs(pairs ...Pair[K, V]) {
for _, pair := range pairs {
om.Set(pair.Key, pair.Value)
}
}
// Store is an alias for Set, mostly to present an API similar to `sync.Map`'s.
func (om *OrderedMap[K, V]) Store(key K, value V) (V, bool) {
return om.Set(key, value)
}
// Delete removes the key-value pair, and returns what `Get` would have returned
// on that key prior to the call to `Delete`.
func (om *OrderedMap[K, V]) Delete(key K) (val V, present bool) {
if pair, present := om.pairs[key]; present {
om.list.Remove(pair.element)
delete(om.pairs, key)
return pair.Value, true
}
return
}
// Len returns the length of the ordered map.
func (om *OrderedMap[K, V]) Len() int {
if om == nil || om.pairs == nil {
return 0
}
return len(om.pairs)
}
// Oldest returns a pointer to the oldest pair. It's meant to be used to iterate on the ordered map's
// pairs from the oldest to the newest, e.g.:
// for pair := orderedMap.Oldest(); pair != nil; pair = pair.Next() { fmt.Printf("%v => %v\n", pair.Key, pair.Value) }
func (om *OrderedMap[K, V]) Oldest() *Pair[K, V] {
if om == nil || om.list == nil {
return nil
}
return listElementToPair(om.list.Front())
}
// Newest returns a pointer to the newest pair. It's meant to be used to iterate on the ordered map's
// pairs from the newest to the oldest, e.g.:
// for pair := orderedMap.Oldest(); pair != nil; pair = pair.Next() { fmt.Printf("%v => %v\n", pair.Key, pair.Value) }
func (om *OrderedMap[K, V]) Newest() *Pair[K, V] {
if om == nil || om.list == nil {
return nil
}
return listElementToPair(om.list.Back())
}
// Next returns a pointer to the next pair.
func (p *Pair[K, V]) Next() *Pair[K, V] {
return listElementToPair(p.element.Next())
}
// Prev returns a pointer to the previous pair.
func (p *Pair[K, V]) Prev() *Pair[K, V] {
return listElementToPair(p.element.Prev())
}
func listElementToPair[K comparable, V any](element *list.Element[*Pair[K, V]]) *Pair[K, V] {
if element == nil {
return nil
}
return element.Value
}
// KeyNotFoundError may be returned by functions in this package when they're called with keys that are not present
// in the map.
type KeyNotFoundError[K comparable] struct {
MissingKey K
}
func (e *KeyNotFoundError[K]) Error() string {
return fmt.Sprintf("missing key: %v", e.MissingKey)
}
// MoveAfter moves the value associated with key to its new position after the one associated with markKey.
// Returns an error iff key or markKey are not present in the map. If an error is returned,
// it will be a KeyNotFoundError.
func (om *OrderedMap[K, V]) MoveAfter(key, markKey K) error {
elements, err := om.getElements(key, markKey)
if err != nil {
return err
}
om.list.MoveAfter(elements[0], elements[1])
return nil
}
// MoveBefore moves the value associated with key to its new position before the one associated with markKey.
// Returns an error iff key or markKey are not present in the map. If an error is returned,
// it will be a KeyNotFoundError.
func (om *OrderedMap[K, V]) MoveBefore(key, markKey K) error {
elements, err := om.getElements(key, markKey)
if err != nil {
return err
}
om.list.MoveBefore(elements[0], elements[1])
return nil
}
func (om *OrderedMap[K, V]) getElements(keys ...K) ([]*list.Element[*Pair[K, V]], error) {
elements := make([]*list.Element[*Pair[K, V]], len(keys))
for i, k := range keys {
pair, present := om.pairs[k]
if !present {
return nil, &KeyNotFoundError[K]{k}
}
elements[i] = pair.element
}
return elements, nil
}
// MoveToBack moves the value associated with key to the back of the ordered map,
// i.e. makes it the newest pair in the map.
// Returns an error iff key is not present in the map. If an error is returned,
// it will be a KeyNotFoundError.
func (om *OrderedMap[K, V]) MoveToBack(key K) error {
_, err := om.GetAndMoveToBack(key)
return err
}
// MoveToFront moves the value associated with key to the front of the ordered map,
// i.e. makes it the oldest pair in the map.
// Returns an error iff key is not present in the map. If an error is returned,
// it will be a KeyNotFoundError.
func (om *OrderedMap[K, V]) MoveToFront(key K) error {
_, err := om.GetAndMoveToFront(key)
return err
}
// GetAndMoveToBack combines Get and MoveToBack in the same call. If an error is returned,
// it will be a KeyNotFoundError.
func (om *OrderedMap[K, V]) GetAndMoveToBack(key K) (val V, err error) {
if pair, present := om.pairs[key]; present {
val = pair.Value
om.list.MoveToBack(pair.element)
} else {
err = &KeyNotFoundError[K]{key}
}
return
}
// GetAndMoveToFront combines Get and MoveToFront in the same call. If an error is returned,
// it will be a KeyNotFoundError.
func (om *OrderedMap[K, V]) GetAndMoveToFront(key K) (val V, err error) {
if pair, present := om.pairs[key]; present {
val = pair.Value
om.list.MoveToFront(pair.element)
} else {
err = &KeyNotFoundError[K]{key}
}
return
}

13
vendor/modules.txt vendored
View File

@ -116,6 +116,9 @@ github.com/anacrolix/utp
# github.com/andybalholm/cascadia v1.2.0
## explicit; go 1.13
github.com/andybalholm/cascadia
# github.com/bahlo/generic-list-go v0.2.0
## explicit; go 1.18
github.com/bahlo/generic-list-go
# github.com/beevik/ntp v0.3.0
## explicit
github.com/beevik/ntp
@ -151,6 +154,9 @@ github.com/btcsuite/btcd/chaincfg/chainhash
github.com/btcsuite/btcutil
github.com/btcsuite/btcutil/base58
github.com/btcsuite/btcutil/bech32
# github.com/buger/jsonparser v1.1.1
## explicit; go 1.13
github.com/buger/jsonparser
# github.com/cenkalti/backoff/v3 v3.2.2
## explicit; go 1.12
github.com/cenkalti/backoff/v3
@ -569,6 +575,10 @@ github.com/libp2p/go-yamux/v4
# github.com/lucasb-eyer/go-colorful v1.0.3
## explicit; go 1.12
github.com/lucasb-eyer/go-colorful
# github.com/mailru/easyjson v0.7.7
## explicit; go 1.12
github.com/mailru/easyjson/buffer
github.com/mailru/easyjson/jwriter
# github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd
## explicit; go 1.15
github.com/marten-seemann/tcp
@ -1057,6 +1067,9 @@ github.com/wealdtech/go-multicodec
# github.com/wk8/go-ordered-map v1.0.0
## explicit; go 1.14
github.com/wk8/go-ordered-map
# github.com/wk8/go-ordered-map/v2 v2.1.7
## explicit; go 1.18
github.com/wk8/go-ordered-map/v2
# github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f
## explicit
github.com/xeipuuv/gojsonpointer