🎭 📊 Anonymous Metrics V0 (#2170)

* Migrations in place, how to run them?

* Remove down migrations and touch database.go

* Database and Database Test package in place, added functions to get and store app metrics

* make generate output

* Minor bug fix on app metrics insert and select

* Add a validation layer to restrict what can be saved in the database

* Make validation more terse, throw error if schema doesn't exist, expose appmetrics service

* service updates

* Compute all errors before sending them out

* Trying to bring a closjure to appmetrics go

* Expose appmetrics via an api, skip fancy

* Address value as Jason Dawt Rawmasage to ease parsing

* Introduce a buffered chan with magic cap of 8 to minimize writes to DB. Tests for service and API. Also expose GetAppMetrics function.

* Lint issues

* Remove autoincrement, undo waku.json changes, fix error being shadowed, return nil where nil ought to be returned, get rid of buffered channel

* Bump migration number

* Fix API factory usage

* Add comment re:json.RawMessage instead of strings

* Get rid of test vars, throw save error inside the loop

* Update version

Co-authored-by: Samuel Hawksby-Robinson <samuel@samyoul.com>
This commit is contained in:
Shivek Khurana 2021-03-17 18:09:28 +05:30 committed by GitHub
parent 66fbfc1daf
commit 1097b14a7f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
54 changed files with 7068 additions and 126 deletions

View File

@ -1 +1 @@
0.73.6 0.73.7

View File

@ -23,6 +23,7 @@ import (
"github.com/status-im/status-go/account" "github.com/status-im/status-go/account"
"github.com/status-im/status-go/appdatabase" "github.com/status-im/status-go/appdatabase"
"github.com/status-im/status-go/appmetrics"
"github.com/status-im/status-go/eth-node/crypto" "github.com/status-im/status-go/eth-node/crypto"
"github.com/status-im/status-go/eth-node/types" "github.com/status-im/status-go/eth-node/types"
"github.com/status-im/status-go/logutils" "github.com/status-im/status-go/logutils"
@ -33,6 +34,7 @@ import (
"github.com/status-im/status-go/params" "github.com/status-im/status-go/params"
"github.com/status-im/status-go/rpc" "github.com/status-im/status-go/rpc"
accountssvc "github.com/status-im/status-go/services/accounts" accountssvc "github.com/status-im/status-go/services/accounts"
appmetricsservice "github.com/status-im/status-go/services/appmetrics"
"github.com/status-im/status-go/services/browsers" "github.com/status-im/status-go/services/browsers"
localnotifications "github.com/status-im/status-go/services/local-notifications" localnotifications "github.com/status-im/status-go/services/local-notifications"
"github.com/status-im/status-go/services/mailservers" "github.com/status-im/status-go/services/mailservers"
@ -578,6 +580,12 @@ func (b *GethStatusBackend) mailserversService() gethnode.ServiceConstructor {
} }
} }
func (b *GethStatusBackend) appmetricsService() gethnode.ServiceConstructor {
return func(*gethnode.ServiceContext) (gethnode.Service, error) {
return appmetricsservice.NewService(appmetrics.NewDB(b.appDB)), nil
}
}
func (b *GethStatusBackend) walletService(network uint64, accountsFeed *event.Feed) gethnode.ServiceConstructor { func (b *GethStatusBackend) walletService(network uint64, accountsFeed *event.Feed) gethnode.ServiceConstructor {
return func(*gethnode.ServiceContext) (gethnode.Service, error) { return func(*gethnode.ServiceContext) (gethnode.Service, error) {
return wallet.NewService(wallet.NewDB(b.appDB, network), accountsFeed), nil return wallet.NewService(wallet.NewDB(b.appDB, network), accountsFeed), nil
@ -614,6 +622,7 @@ func (b *GethStatusBackend) startNode(config *params.NodeConfig) (err error) {
services = appendIf(config.UpstreamConfig.Enabled, services, b.rpcFiltersService()) services = appendIf(config.UpstreamConfig.Enabled, services, b.rpcFiltersService())
services = append(services, b.subscriptionService()) services = append(services, b.subscriptionService())
services = append(services, b.rpcStatsService()) services = append(services, b.rpcStatsService())
services = append(services, b.appmetricsService())
services = appendIf(b.appDB != nil && b.multiaccountsDB != nil, services, b.accountsService(accountsFeed)) services = appendIf(b.appDB != nil && b.multiaccountsDB != nil, services, b.accountsService(accountsFeed))
services = appendIf(config.BrowsersConfig.Enabled, services, b.browsersService()) services = appendIf(config.BrowsersConfig.Enabled, services, b.browsersService())
services = appendIf(config.PermissionsConfig.Enabled, services, b.permissionsService()) services = appendIf(config.PermissionsConfig.Enabled, services, b.permissionsService())

View File

@ -33,6 +33,7 @@
// 0017_bookmarks.up.sql (147B) // 0017_bookmarks.up.sql (147B)
// 0018_profile_pictures_visibility.up.sql (84B) // 0018_profile_pictures_visibility.up.sql (84B)
// 0019_blocks_ranges_extra_data.up.sql (89B) // 0019_blocks_ranges_extra_data.up.sql (89B)
// 0020_metrics.up.sql (235B)
// doc.go (74B) // doc.go (74B)
package migrations package migrations
@ -117,7 +118,7 @@ func _0001_appDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0001_app.down.sql", size: 356, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "0001_app.down.sql", size: 356, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb5, 0x25, 0xa0, 0xf8, 0x7d, 0x2d, 0xd, 0xcf, 0x18, 0xe4, 0x73, 0xc3, 0x95, 0xf5, 0x24, 0x20, 0xa9, 0xe6, 0x9e, 0x1d, 0x93, 0xe5, 0xc5, 0xad, 0x93, 0x8f, 0x5e, 0x40, 0xb5, 0x30, 0xaa, 0x25}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb5, 0x25, 0xa0, 0xf8, 0x7d, 0x2d, 0xd, 0xcf, 0x18, 0xe4, 0x73, 0xc3, 0x95, 0xf5, 0x24, 0x20, 0xa9, 0xe6, 0x9e, 0x1d, 0x93, 0xe5, 0xc5, 0xad, 0x93, 0x8f, 0x5e, 0x40, 0xb5, 0x30, 0xaa, 0x25}}
return a, nil return a, nil
} }
@ -137,7 +138,7 @@ func _0001_appUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0001_app.up.sql", size: 2967, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "0001_app.up.sql", size: 2967, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf7, 0x3a, 0xa7, 0xf2, 0x8f, 0xfa, 0x82, 0x7c, 0xc5, 0x49, 0xac, 0xac, 0xf, 0xc, 0x77, 0xe2, 0xba, 0xe8, 0x4d, 0xe, 0x6f, 0x5d, 0x2c, 0x2c, 0x18, 0x80, 0xc2, 0x1d, 0xe, 0x25, 0xe, 0x18}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf7, 0x3a, 0xa7, 0xf2, 0x8f, 0xfa, 0x82, 0x7c, 0xc5, 0x49, 0xac, 0xac, 0xf, 0xc, 0x77, 0xe2, 0xba, 0xe8, 0x4d, 0xe, 0x6f, 0x5d, 0x2c, 0x2c, 0x18, 0x80, 0xc2, 0x1d, 0xe, 0x25, 0xe, 0x18}}
return a, nil return a, nil
} }
@ -157,7 +158,7 @@ func _0002_tokensDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0002_tokens.down.sql", size: 19, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "0002_tokens.down.sql", size: 19, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd1, 0x31, 0x2, 0xcc, 0x2f, 0x38, 0x90, 0xf7, 0x58, 0x37, 0x47, 0xf4, 0x18, 0xf7, 0x72, 0x74, 0x67, 0x14, 0x7e, 0xf3, 0xb1, 0xd6, 0x5f, 0xb0, 0xd5, 0xe7, 0x91, 0xf4, 0x26, 0x77, 0x8e, 0x68}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd1, 0x31, 0x2, 0xcc, 0x2f, 0x38, 0x90, 0xf7, 0x58, 0x37, 0x47, 0xf4, 0x18, 0xf7, 0x72, 0x74, 0x67, 0x14, 0x7e, 0xf3, 0xb1, 0xd6, 0x5f, 0xb0, 0xd5, 0xe7, 0x91, 0xf4, 0x26, 0x77, 0x8e, 0x68}}
return a, nil return a, nil
} }
@ -177,7 +178,7 @@ func _0002_tokensUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0002_tokens.up.sql", size: 248, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "0002_tokens.up.sql", size: 248, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xcc, 0xd6, 0xde, 0xd3, 0x7b, 0xee, 0x92, 0x11, 0x38, 0xa4, 0xeb, 0x84, 0xca, 0xcb, 0x37, 0x75, 0x5, 0x77, 0x7f, 0x14, 0x39, 0xee, 0xa1, 0x8b, 0xd4, 0x5c, 0x6e, 0x55, 0x6, 0x50, 0x16, 0xd4}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xcc, 0xd6, 0xde, 0xd3, 0x7b, 0xee, 0x92, 0x11, 0x38, 0xa4, 0xeb, 0x84, 0xca, 0xcb, 0x37, 0x75, 0x5, 0x77, 0x7f, 0x14, 0x39, 0xee, 0xa1, 0x8b, 0xd4, 0x5c, 0x6e, 0x55, 0x6, 0x50, 0x16, 0xd4}}
return a, nil return a, nil
} }
@ -197,7 +198,7 @@ func _0003_settingsDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0003_settings.down.sql", size: 118, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "0003_settings.down.sql", size: 118, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe5, 0xa6, 0xf5, 0xc0, 0x60, 0x64, 0x77, 0xe2, 0xe7, 0x3c, 0x9b, 0xb1, 0x52, 0xa9, 0x95, 0x16, 0xf8, 0x60, 0x2f, 0xa5, 0xeb, 0x46, 0xb9, 0xb9, 0x8f, 0x4c, 0xf4, 0xfd, 0xbb, 0xe7, 0xe5, 0xe5}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe5, 0xa6, 0xf5, 0xc0, 0x60, 0x64, 0x77, 0xe2, 0xe7, 0x3c, 0x9b, 0xb1, 0x52, 0xa9, 0x95, 0x16, 0xf8, 0x60, 0x2f, 0xa5, 0xeb, 0x46, 0xb9, 0xb9, 0x8f, 0x4c, 0xf4, 0xfd, 0xbb, 0xe7, 0xe5, 0xe5}}
return a, nil return a, nil
} }
@ -217,7 +218,7 @@ func _0003_settingsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0003_settings.up.sql", size: 1311, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "0003_settings.up.sql", size: 1311, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xea, 0x35, 0x0, 0xeb, 0xe2, 0x33, 0x68, 0xb9, 0xf4, 0xf6, 0x8e, 0x9e, 0x10, 0xe9, 0x58, 0x68, 0x28, 0xb, 0xcd, 0xec, 0x74, 0x71, 0xa7, 0x9a, 0x5a, 0x77, 0x59, 0xb1, 0x13, 0x1c, 0xa1, 0x5b}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xea, 0x35, 0x0, 0xeb, 0xe2, 0x33, 0x68, 0xb9, 0xf4, 0xf6, 0x8e, 0x9e, 0x10, 0xe9, 0x58, 0x68, 0x28, 0xb, 0xcd, 0xec, 0x74, 0x71, 0xa7, 0x9a, 0x5a, 0x77, 0x59, 0xb1, 0x13, 0x1c, 0xa1, 0x5b}}
return a, nil return a, nil
} }
@ -237,7 +238,7 @@ func _0004_pending_stickersDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0004_pending_stickers.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "0004_pending_stickers.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}
return a, nil return a, nil
} }
@ -257,7 +258,7 @@ func _0004_pending_stickersUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0004_pending_stickers.up.sql", size: 61, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "0004_pending_stickers.up.sql", size: 61, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3c, 0xed, 0x25, 0xdf, 0x75, 0x2, 0x6c, 0xf0, 0xa2, 0xa8, 0x37, 0x62, 0x65, 0xad, 0xfd, 0x98, 0xa0, 0x9d, 0x63, 0x94, 0xdf, 0x6b, 0x46, 0xe0, 0x68, 0xec, 0x9c, 0x7f, 0x77, 0xdd, 0xb3, 0x6}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3c, 0xed, 0x25, 0xdf, 0x75, 0x2, 0x6c, 0xf0, 0xa2, 0xa8, 0x37, 0x62, 0x65, 0xad, 0xfd, 0x98, 0xa0, 0x9d, 0x63, 0x94, 0xdf, 0x6b, 0x46, 0xe0, 0x68, 0xec, 0x9c, 0x7f, 0x77, 0xdd, 0xb3, 0x6}}
return a, nil return a, nil
} }
@ -277,7 +278,7 @@ func _0005_waku_modeDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0005_waku_mode.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "0005_waku_mode.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}
return a, nil return a, nil
} }
@ -297,7 +298,7 @@ func _0005_waku_modeUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0005_waku_mode.up.sql", size: 146, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "0005_waku_mode.up.sql", size: 146, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa6, 0x91, 0xc, 0xd7, 0x89, 0x61, 0x2e, 0x4c, 0x5a, 0xb6, 0x67, 0xd1, 0xc1, 0x42, 0x24, 0x38, 0xd6, 0x1b, 0x75, 0x41, 0x9c, 0x23, 0xb0, 0xca, 0x5c, 0xf1, 0x5c, 0xd0, 0x13, 0x92, 0x3e, 0xe1}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa6, 0x91, 0xc, 0xd7, 0x89, 0x61, 0x2e, 0x4c, 0x5a, 0xb6, 0x67, 0xd1, 0xc1, 0x42, 0x24, 0x38, 0xd6, 0x1b, 0x75, 0x41, 0x9c, 0x23, 0xb0, 0xca, 0x5c, 0xf1, 0x5c, 0xd0, 0x13, 0x92, 0x3e, 0xe1}}
return a, nil return a, nil
} }
@ -317,7 +318,7 @@ func _0006_appearanceUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0006_appearance.up.sql", size: 67, mode: os.FileMode(0644), modTime: time.Unix(1609934130, 0)} info := bindataFileInfo{name: "0006_appearance.up.sql", size: 67, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xae, 0x6, 0x25, 0x6c, 0xe4, 0x9d, 0xa7, 0x72, 0xe8, 0xbc, 0xe4, 0x1f, 0x1e, 0x2d, 0x7c, 0xb7, 0xf6, 0xa3, 0xec, 0x3b, 0x4e, 0x93, 0x2e, 0xa4, 0xec, 0x6f, 0xe5, 0x95, 0x94, 0xe8, 0x4, 0xfb}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xae, 0x6, 0x25, 0x6c, 0xe4, 0x9d, 0xa7, 0x72, 0xe8, 0xbc, 0xe4, 0x1f, 0x1e, 0x2d, 0x7c, 0xb7, 0xf6, 0xa3, 0xec, 0x3b, 0x4e, 0x93, 0x2e, 0xa4, 0xec, 0x6f, 0xe5, 0x95, 0x94, 0xe8, 0x4, 0xfb}}
return a, nil return a, nil
} }
@ -337,7 +338,7 @@ func _0007_enable_waku_defaultUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0007_enable_waku_default.up.sql", size: 38, mode: os.FileMode(0644), modTime: time.Unix(1609934130, 0)} info := bindataFileInfo{name: "0007_enable_waku_default.up.sql", size: 38, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd4, 0x42, 0xb6, 0xe5, 0x48, 0x41, 0xeb, 0xc0, 0x7e, 0x3b, 0xe6, 0x8e, 0x96, 0x33, 0x20, 0x92, 0x24, 0x5a, 0x60, 0xfa, 0xa0, 0x3, 0x5e, 0x76, 0x4b, 0x89, 0xaa, 0x37, 0x66, 0xbc, 0x26, 0x11}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd4, 0x42, 0xb6, 0xe5, 0x48, 0x41, 0xeb, 0xc0, 0x7e, 0x3b, 0xe6, 0x8e, 0x96, 0x33, 0x20, 0x92, 0x24, 0x5a, 0x60, 0xfa, 0xa0, 0x3, 0x5e, 0x76, 0x4b, 0x89, 0xaa, 0x37, 0x66, 0xbc, 0x26, 0x11}}
return a, nil return a, nil
} }
@ -357,7 +358,7 @@ func _0008_add_push_notificationsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0008_add_push_notifications.up.sql", size: 349, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "0008_add_push_notifications.up.sql", size: 349, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x5a, 0x0, 0xbf, 0xd0, 0xdd, 0xcd, 0x73, 0xe0, 0x7c, 0x56, 0xef, 0xdc, 0x57, 0x61, 0x94, 0x64, 0x70, 0xb9, 0xfa, 0xa1, 0x2a, 0x36, 0xc, 0x2f, 0xf8, 0x95, 0xa, 0x57, 0x3e, 0x7a, 0xd7, 0x12}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x5a, 0x0, 0xbf, 0xd0, 0xdd, 0xcd, 0x73, 0xe0, 0x7c, 0x56, 0xef, 0xdc, 0x57, 0x61, 0x94, 0x64, 0x70, 0xb9, 0xfa, 0xa1, 0x2a, 0x36, 0xc, 0x2f, 0xf8, 0x95, 0xa, 0x57, 0x3e, 0x7a, 0xd7, 0x12}}
return a, nil return a, nil
} }
@ -377,7 +378,7 @@ func _0009_enable_sending_push_notificationsDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0009_enable_sending_push_notifications.down.sql", size: 49, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "0009_enable_sending_push_notifications.down.sql", size: 49, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe9, 0xae, 0x1b, 0x41, 0xcb, 0x9c, 0x2c, 0x93, 0xc6, 0x2a, 0x77, 0x3, 0xb9, 0x51, 0xe0, 0x68, 0x68, 0x0, 0xf7, 0x5b, 0xb3, 0x1e, 0x94, 0x44, 0xba, 0x9c, 0xd0, 0x3b, 0x80, 0x21, 0x6f, 0xb5}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe9, 0xae, 0x1b, 0x41, 0xcb, 0x9c, 0x2c, 0x93, 0xc6, 0x2a, 0x77, 0x3, 0xb9, 0x51, 0xe0, 0x68, 0x68, 0x0, 0xf7, 0x5b, 0xb3, 0x1e, 0x94, 0x44, 0xba, 0x9c, 0xd0, 0x3b, 0x80, 0x21, 0x6f, 0xb5}}
return a, nil return a, nil
} }
@ -397,7 +398,7 @@ func _0009_enable_sending_push_notificationsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0009_enable_sending_push_notifications.up.sql", size: 49, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "0009_enable_sending_push_notifications.up.sql", size: 49, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1b, 0x80, 0xe4, 0x9c, 0xc8, 0xb8, 0xd5, 0xef, 0xce, 0x74, 0x9b, 0x7b, 0xdd, 0xa, 0x99, 0x1e, 0xef, 0x7f, 0xb8, 0x99, 0x84, 0x4, 0x0, 0x6b, 0x1d, 0x2c, 0xa, 0xf8, 0x2c, 0x4f, 0xb5, 0x44}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1b, 0x80, 0xe4, 0x9c, 0xc8, 0xb8, 0xd5, 0xef, 0xce, 0x74, 0x9b, 0x7b, 0xdd, 0xa, 0x99, 0x1e, 0xef, 0x7f, 0xb8, 0x99, 0x84, 0x4, 0x0, 0x6b, 0x1d, 0x2c, 0xa, 0xf8, 0x2c, 0x4f, 0xb5, 0x44}}
return a, nil return a, nil
} }
@ -417,7 +418,7 @@ func _0010_add_block_mentionsDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0010_add_block_mentions.down.sql", size: 83, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "0010_add_block_mentions.down.sql", size: 83, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6d, 0x9e, 0x27, 0x1e, 0xba, 0x9f, 0xca, 0xae, 0x98, 0x2e, 0x6e, 0xe3, 0xdd, 0xac, 0x73, 0x34, 0x4e, 0x69, 0x92, 0xb5, 0xf6, 0x9, 0xab, 0x50, 0x35, 0xd, 0xee, 0xeb, 0x3e, 0xcc, 0x7e, 0xce}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6d, 0x9e, 0x27, 0x1e, 0xba, 0x9f, 0xca, 0xae, 0x98, 0x2e, 0x6e, 0xe3, 0xdd, 0xac, 0x73, 0x34, 0x4e, 0x69, 0x92, 0xb5, 0xf6, 0x9, 0xab, 0x50, 0x35, 0xd, 0xee, 0xeb, 0x3e, 0xcc, 0x7e, 0xce}}
return a, nil return a, nil
} }
@ -437,7 +438,7 @@ func _0010_add_block_mentionsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0010_add_block_mentions.up.sql", size: 89, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "0010_add_block_mentions.up.sql", size: 89, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd7, 0x23, 0x85, 0xa2, 0xb5, 0xb6, 0xb4, 0x3f, 0xdc, 0x4e, 0xff, 0xe2, 0x6b, 0x66, 0x68, 0x5e, 0xb2, 0xb4, 0x14, 0xb2, 0x1b, 0x4d, 0xb1, 0xce, 0xf7, 0x6, 0x58, 0xa7, 0xaf, 0x93, 0x3f, 0x25}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd7, 0x23, 0x85, 0xa2, 0xb5, 0xb6, 0xb4, 0x3f, 0xdc, 0x4e, 0xff, 0xe2, 0x6b, 0x66, 0x68, 0x5e, 0xb2, 0xb4, 0x14, 0xb2, 0x1b, 0x4d, 0xb1, 0xce, 0xf7, 0x6, 0x58, 0xa7, 0xaf, 0x93, 0x3f, 0x25}}
return a, nil return a, nil
} }
@ -457,7 +458,7 @@ func _0011_allow_webview_permission_requestsDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0011_allow_webview_permission_requests.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "0011_allow_webview_permission_requests.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}
return a, nil return a, nil
} }
@ -477,7 +478,7 @@ func _0011_allow_webview_permission_requestsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0011_allow_webview_permission_requests.up.sql", size: 88, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "0011_allow_webview_permission_requests.up.sql", size: 88, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x43, 0x5f, 0x22, 0x4c, 0x98, 0x1d, 0xc6, 0xf4, 0x89, 0xaf, 0xf4, 0x44, 0xba, 0xf8, 0x28, 0xa7, 0xb5, 0xb9, 0xf0, 0xf2, 0xcb, 0x5, 0x59, 0x7a, 0xc, 0xdf, 0xd3, 0x38, 0xa4, 0xb8, 0x98, 0xc2}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x43, 0x5f, 0x22, 0x4c, 0x98, 0x1d, 0xc6, 0xf4, 0x89, 0xaf, 0xf4, 0x44, 0xba, 0xf8, 0x28, 0xa7, 0xb5, 0xb9, 0xf0, 0xf2, 0xcb, 0x5, 0x59, 0x7a, 0xc, 0xdf, 0xd3, 0x38, 0xa4, 0xb8, 0x98, 0xc2}}
return a, nil return a, nil
} }
@ -497,7 +498,7 @@ func _0012_pending_transactionsDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0012_pending_transactions.down.sql", size: 33, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "0012_pending_transactions.down.sql", size: 33, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7e, 0x41, 0xfe, 0x5c, 0xd8, 0xc3, 0x29, 0xfd, 0x31, 0x78, 0x99, 0x7a, 0xeb, 0x17, 0x62, 0x88, 0x41, 0xb3, 0xe7, 0xb5, 0x5, 0x0, 0x90, 0xa1, 0x7, 0x1a, 0x23, 0x88, 0x81, 0xba, 0x56, 0x9d}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7e, 0x41, 0xfe, 0x5c, 0xd8, 0xc3, 0x29, 0xfd, 0x31, 0x78, 0x99, 0x7a, 0xeb, 0x17, 0x62, 0x88, 0x41, 0xb3, 0xe7, 0xb5, 0x5, 0x0, 0x90, 0xa1, 0x7, 0x1a, 0x23, 0x88, 0x81, 0xba, 0x56, 0x9d}}
return a, nil return a, nil
} }
@ -517,7 +518,7 @@ func _0012_pending_transactionsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0012_pending_transactions.up.sql", size: 321, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "0012_pending_transactions.up.sql", size: 321, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd, 0x17, 0xff, 0xd7, 0xa7, 0x49, 0x1e, 0x7b, 0x34, 0x63, 0x7c, 0x53, 0xaa, 0x6b, 0x2d, 0xc8, 0xe0, 0x82, 0x21, 0x90, 0x3a, 0x94, 0xf1, 0xa6, 0xe4, 0x70, 0xe5, 0x85, 0x1a, 0x48, 0x25, 0xb}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd, 0x17, 0xff, 0xd7, 0xa7, 0x49, 0x1e, 0x7b, 0x34, 0x63, 0x7c, 0x53, 0xaa, 0x6b, 0x2d, 0xc8, 0xe0, 0x82, 0x21, 0x90, 0x3a, 0x94, 0xf1, 0xa6, 0xe4, 0x70, 0xe5, 0x85, 0x1a, 0x48, 0x25, 0xb}}
return a, nil return a, nil
} }
@ -537,7 +538,7 @@ func _0013_favouritesDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0013_favourites.down.sql", size: 23, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "0013_favourites.down.sql", size: 23, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x32, 0xf8, 0x55, 0x13, 0x4f, 0x4a, 0x19, 0x83, 0x9c, 0xda, 0x34, 0xb8, 0x3, 0x54, 0x82, 0x1e, 0x99, 0x36, 0x6b, 0x42, 0x3, 0xf6, 0x43, 0xde, 0xe6, 0x32, 0xb6, 0xdf, 0xe2, 0x59, 0x8c, 0x84}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x32, 0xf8, 0x55, 0x13, 0x4f, 0x4a, 0x19, 0x83, 0x9c, 0xda, 0x34, 0xb8, 0x3, 0x54, 0x82, 0x1e, 0x99, 0x36, 0x6b, 0x42, 0x3, 0xf6, 0x43, 0xde, 0xe6, 0x32, 0xb6, 0xdf, 0xe2, 0x59, 0x8c, 0x84}}
return a, nil return a, nil
} }
@ -557,7 +558,7 @@ func _0013_favouritesUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0013_favourites.up.sql", size: 132, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "0013_favourites.up.sql", size: 132, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xbe, 0x1, 0x27, 0x38, 0x76, 0xf5, 0xcb, 0x61, 0xda, 0x5b, 0xce, 0xd9, 0x8b, 0x18, 0x77, 0x61, 0x84, 0xe7, 0x22, 0xe2, 0x13, 0x99, 0xab, 0x32, 0xbc, 0xbe, 0xed, 0x1f, 0x2f, 0xb0, 0xe4, 0x8d}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xbe, 0x1, 0x27, 0x38, 0x76, 0xf5, 0xcb, 0x61, 0xda, 0x5b, 0xce, 0xd9, 0x8b, 0x18, 0x77, 0x61, 0x84, 0xe7, 0x22, 0xe2, 0x13, 0x99, 0xab, 0x32, 0xbc, 0xbe, 0xed, 0x1f, 0x2f, 0xb0, 0xe4, 0x8d}}
return a, nil return a, nil
} }
@ -577,7 +578,7 @@ func _0014_add_use_mailserversDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0014_add_use_mailservers.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "0014_add_use_mailservers.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}
return a, nil return a, nil
} }
@ -597,7 +598,7 @@ func _0014_add_use_mailserversUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0014_add_use_mailservers.up.sql", size: 111, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "0014_add_use_mailservers.up.sql", size: 111, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc9, 0xba, 0x65, 0xbf, 0x1b, 0xc9, 0x6d, 0x45, 0xf2, 0xf5, 0x30, 0x7c, 0xc1, 0xde, 0xb8, 0xe3, 0x3f, 0xa9, 0x2f, 0x9f, 0xea, 0x1, 0x29, 0x29, 0x65, 0xe7, 0x38, 0xab, 0xa4, 0x62, 0xf, 0xd0}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc9, 0xba, 0x65, 0xbf, 0x1b, 0xc9, 0x6d, 0x45, 0xf2, 0xf5, 0x30, 0x7c, 0xc1, 0xde, 0xb8, 0xe3, 0x3f, 0xa9, 0x2f, 0x9f, 0xea, 0x1, 0x29, 0x29, 0x65, 0xe7, 0x38, 0xab, 0xa4, 0x62, 0xf, 0xd0}}
return a, nil return a, nil
} }
@ -617,7 +618,7 @@ func _0015_link_previewsDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0015_link_previews.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "0015_link_previews.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}
return a, nil return a, nil
} }
@ -637,7 +638,7 @@ func _0015_link_previewsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0015_link_previews.up.sql", size: 203, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "0015_link_previews.up.sql", size: 203, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb1, 0xf7, 0x38, 0x25, 0xa6, 0xfc, 0x6b, 0x9, 0xe4, 0xd9, 0xbf, 0x58, 0x7b, 0x80, 0xd8, 0x48, 0x63, 0xde, 0xa5, 0x5e, 0x30, 0xa3, 0xeb, 0x68, 0x8e, 0x6a, 0x9f, 0xfd, 0xf4, 0x46, 0x41, 0x34}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb1, 0xf7, 0x38, 0x25, 0xa6, 0xfc, 0x6b, 0x9, 0xe4, 0xd9, 0xbf, 0x58, 0x7b, 0x80, 0xd8, 0x48, 0x63, 0xde, 0xa5, 0x5e, 0x30, 0xa3, 0xeb, 0x68, 0x8e, 0x6a, 0x9f, 0xfd, 0xf4, 0x46, 0x41, 0x34}}
return a, nil return a, nil
} }
@ -657,7 +658,7 @@ func _0016_local_notifications_preferencesDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0016_local_notifications_preferences.down.sql", size: 43, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "0016_local_notifications_preferences.down.sql", size: 43, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe0, 0x50, 0xc7, 0xdd, 0x53, 0x9c, 0x5d, 0x1e, 0xb5, 0x71, 0x25, 0x50, 0x58, 0xcf, 0x6d, 0xbe, 0x5a, 0x8, 0x12, 0xc9, 0x13, 0xd, 0x9a, 0x3d, 0x4b, 0x7a, 0x2f, 0x1b, 0xe5, 0x23, 0x52, 0x78}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe0, 0x50, 0xc7, 0xdd, 0x53, 0x9c, 0x5d, 0x1e, 0xb5, 0x71, 0x25, 0x50, 0x58, 0xcf, 0x6d, 0xbe, 0x5a, 0x8, 0x12, 0xc9, 0x13, 0xd, 0x9a, 0x3d, 0x4b, 0x7a, 0x2f, 0x1b, 0xe5, 0x23, 0x52, 0x78}}
return a, nil return a, nil
} }
@ -677,7 +678,7 @@ func _0016_local_notifications_preferencesUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0016_local_notifications_preferences.up.sql", size: 204, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "0016_local_notifications_preferences.up.sql", size: 204, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3f, 0x3a, 0x16, 0x25, 0xdf, 0xba, 0x62, 0xd3, 0x81, 0x73, 0xc, 0x10, 0x85, 0xbc, 0x8d, 0xe, 0x1d, 0x62, 0xcb, 0xb, 0x6d, 0x8c, 0x4f, 0x63, 0x5f, 0xe2, 0xd, 0xc5, 0x46, 0xa8, 0x35, 0x5b}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3f, 0x3a, 0x16, 0x25, 0xdf, 0xba, 0x62, 0xd3, 0x81, 0x73, 0xc, 0x10, 0x85, 0xbc, 0x8d, 0xe, 0x1d, 0x62, 0xcb, 0xb, 0x6d, 0x8c, 0x4f, 0x63, 0x5f, 0xe2, 0xd, 0xc5, 0x46, 0xa8, 0x35, 0x5b}}
return a, nil return a, nil
} }
@ -697,7 +698,7 @@ func _0017_bookmarksDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0017_bookmarks.down.sql", size: 22, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "0017_bookmarks.down.sql", size: 22, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9a, 0x13, 0x2a, 0x44, 0xb0, 0x3, 0x18, 0x63, 0xb8, 0x33, 0xda, 0x3a, 0xeb, 0xb8, 0xcb, 0xd1, 0x98, 0x29, 0xa7, 0xf0, 0x6, 0x9d, 0xc9, 0x62, 0xe7, 0x89, 0x7f, 0x77, 0xaf, 0xec, 0x6b, 0x8f}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9a, 0x13, 0x2a, 0x44, 0xb0, 0x3, 0x18, 0x63, 0xb8, 0x33, 0xda, 0x3a, 0xeb, 0xb8, 0xcb, 0xd1, 0x98, 0x29, 0xa7, 0xf0, 0x6, 0x9d, 0xc9, 0x62, 0xe7, 0x89, 0x7f, 0x77, 0xaf, 0xec, 0x6b, 0x8f}}
return a, nil return a, nil
} }
@ -717,7 +718,7 @@ func _0017_bookmarksUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0017_bookmarks.up.sql", size: 147, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "0017_bookmarks.up.sql", size: 147, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xbc, 0x47, 0xe1, 0xe3, 0xd8, 0xc6, 0x4, 0x6d, 0x5f, 0x2f, 0xa, 0x51, 0xa6, 0x8c, 0x6a, 0xe0, 0x3d, 0x8c, 0x91, 0x47, 0xbc, 0x1, 0x75, 0x46, 0x92, 0x2, 0x18, 0x6e, 0xe3, 0x4f, 0x18, 0x57}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xbc, 0x47, 0xe1, 0xe3, 0xd8, 0xc6, 0x4, 0x6d, 0x5f, 0x2f, 0xa, 0x51, 0xa6, 0x8c, 0x6a, 0xe0, 0x3d, 0x8c, 0x91, 0x47, 0xbc, 0x1, 0x75, 0x46, 0x92, 0x2, 0x18, 0x6e, 0xe3, 0x4f, 0x18, 0x57}}
return a, nil return a, nil
} }
@ -737,7 +738,7 @@ func _0018_profile_pictures_visibilityUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0018_profile_pictures_visibility.up.sql", size: 84, mode: os.FileMode(0644), modTime: time.Unix(1612251705, 0)} info := bindataFileInfo{name: "0018_profile_pictures_visibility.up.sql", size: 84, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc9, 0xe3, 0xc5, 0xec, 0x83, 0x55, 0x45, 0x57, 0x7a, 0xaa, 0xd2, 0xa7, 0x59, 0xa7, 0x87, 0xef, 0x63, 0x19, 0x9c, 0x46, 0x9c, 0xc5, 0x32, 0x89, 0xa4, 0x68, 0x70, 0xd8, 0x83, 0x43, 0xa4, 0x72}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc9, 0xe3, 0xc5, 0xec, 0x83, 0x55, 0x45, 0x57, 0x7a, 0xaa, 0xd2, 0xa7, 0x59, 0xa7, 0x87, 0xef, 0x63, 0x19, 0x9c, 0x46, 0x9c, 0xc5, 0x32, 0x89, 0xa4, 0x68, 0x70, 0xd8, 0x83, 0x43, 0xa4, 0x72}}
return a, nil return a, nil
} }
@ -757,11 +758,31 @@ func _0019_blocks_ranges_extra_dataUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0019_blocks_ranges_extra_data.up.sql", size: 89, mode: os.FileMode(0644), modTime: time.Unix(1615889815, 0)} info := bindataFileInfo{name: "0019_blocks_ranges_extra_data.up.sql", size: 89, mode: os.FileMode(0644), modTime: time.Unix(1615901672, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa3, 0x96, 0x32, 0x58, 0xf0, 0xb9, 0xe1, 0x70, 0x81, 0xca, 0x8d, 0x45, 0x57, 0x8a, 0x7, 0x5d, 0x9e, 0x2a, 0x30, 0xb, 0xad, 0x5f, 0xf8, 0xd4, 0x30, 0x94, 0x73, 0x37, 0x8d, 0xc1, 0x9a, 0xed}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa3, 0x96, 0x32, 0x58, 0xf0, 0xb9, 0xe1, 0x70, 0x81, 0xca, 0x8d, 0x45, 0x57, 0x8a, 0x7, 0x5d, 0x9e, 0x2a, 0x30, 0xb, 0xad, 0x5f, 0xf8, 0xd4, 0x30, 0x94, 0x73, 0x37, 0x8d, 0xc1, 0x9a, 0xed}}
return a, nil return a, nil
} }
var __0020_metricsUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x74\xcc\x4d\xaa\xc2\x30\x14\xc5\xf1\x79\x57\x71\x86\xef\x81\x9b\x88\xf5\x16\x0b\x69\x95\xf4\x56\x3a\x0b\xa1\x5e\xa4\x60\x3f\x48\x62\xc0\xdd\x8b\x8a\x4e\xa4\x67\xfa\x3b\xfc\x73\x43\x8a\x09\xac\xb6\x9a\x50\x16\xa8\x0f\x0c\xea\xca\x86\x1b\xb8\x65\xb1\xa3\x44\x3f\xf4\x01\x7f\x19\xde\x93\x24\x53\xc4\x49\x99\x7c\xaf\xcc\xeb\x5d\xb7\x5a\x6f\x3e\x9c\xdc\xf5\x26\x60\xea\xf8\xd7\x9e\xbd\x24\x3e\x0c\xf3\xb4\x1e\x98\x17\xf1\x2e\x0e\xd3\xc5\x86\x7b\x88\x32\xae\x3f\x7b\x2f\x2e\xca\xd9\xba\x08\x2e\x2b\x6a\x58\x55\x47\xec\xa8\x50\xad\x66\xe4\xad\x31\x54\xb3\xfd\x4a\xf6\x9f\x3d\x02\x00\x00\xff\xff\x95\xc5\x25\x15\xeb\x00\x00\x00")
func _0020_metricsUpSqlBytes() ([]byte, error) {
return bindataRead(
__0020_metricsUpSql,
"0020_metrics.up.sql",
)
}
func _0020_metricsUpSql() (*asset, error) {
bytes, err := _0020_metricsUpSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "0020_metrics.up.sql", size: 235, mode: os.FileMode(0644), modTime: time.Unix(1615973409, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe8, 0x32, 0xbc, 0xb6, 0x9b, 0x5a, 0x8f, 0x9f, 0x4c, 0x90, 0x81, 0x3e, 0x2e, 0xd1, 0x23, 0xcd, 0xf1, 0x83, 0x35, 0xca, 0x66, 0x87, 0x52, 0x4e, 0x30, 0x3e, 0x4f, 0xa8, 0xfd, 0x30, 0x16, 0xbd}}
return a, nil
}
var _docGo = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x2c\xc9\xb1\x0d\xc4\x20\x0c\x05\xd0\x9e\x29\xfe\x02\xd8\xfd\x6d\xe3\x4b\xac\x2f\x44\x82\x09\x78\x7f\xa5\x49\xfd\xa6\x1d\xdd\xe8\xd8\xcf\x55\x8a\x2a\xe3\x47\x1f\xbe\x2c\x1d\x8c\xfa\x6f\xe3\xb4\x34\xd4\xd9\x89\xbb\x71\x59\xb6\x18\x1b\x35\x20\xa2\x9f\x0a\x03\xa2\xe5\x0d\x00\x00\xff\xff\x60\xcd\x06\xbe\x4a\x00\x00\x00") var _docGo = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x2c\xc9\xb1\x0d\xc4\x20\x0c\x05\xd0\x9e\x29\xfe\x02\xd8\xfd\x6d\xe3\x4b\xac\x2f\x44\x82\x09\x78\x7f\xa5\x49\xfd\xa6\x1d\xdd\xe8\xd8\xcf\x55\x8a\x2a\xe3\x47\x1f\xbe\x2c\x1d\x8c\xfa\x6f\xe3\xb4\x34\xd4\xd9\x89\xbb\x71\x59\xb6\x18\x1b\x35\x20\xa2\x9f\x0a\x03\xa2\xe5\x0d\x00\x00\xff\xff\x60\xcd\x06\xbe\x4a\x00\x00\x00")
func docGoBytes() ([]byte, error) { func docGoBytes() ([]byte, error) {
@ -777,7 +798,7 @@ func docGo() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "doc.go", size: 74, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "doc.go", size: 74, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xde, 0x7c, 0x28, 0xcd, 0x47, 0xf2, 0xfa, 0x7c, 0x51, 0x2d, 0xd8, 0x38, 0xb, 0xb0, 0x34, 0x9d, 0x4c, 0x62, 0xa, 0x9e, 0x28, 0xc3, 0x31, 0x23, 0xd9, 0xbb, 0x89, 0x9f, 0xa0, 0x89, 0x1f, 0xe8}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xde, 0x7c, 0x28, 0xcd, 0x47, 0xf2, 0xfa, 0x7c, 0x51, 0x2d, 0xd8, 0x38, 0xb, 0xb0, 0x34, 0x9d, 0x4c, 0x62, 0xa, 0x9e, 0x28, 0xc3, 0x31, 0x23, 0xd9, 0xbb, 0x89, 0x9f, 0xa0, 0x89, 0x1f, 0xe8}}
return a, nil return a, nil
} }
@ -939,6 +960,8 @@ var _bindata = map[string]func() (*asset, error){
"0019_blocks_ranges_extra_data.up.sql": _0019_blocks_ranges_extra_dataUpSql, "0019_blocks_ranges_extra_data.up.sql": _0019_blocks_ranges_extra_dataUpSql,
"0020_metrics.up.sql": _0020_metricsUpSql,
"doc.go": docGo, "doc.go": docGo,
} }
@ -1016,6 +1039,7 @@ var _bintree = &bintree{nil, map[string]*bintree{
"0017_bookmarks.up.sql": &bintree{_0017_bookmarksUpSql, map[string]*bintree{}}, "0017_bookmarks.up.sql": &bintree{_0017_bookmarksUpSql, map[string]*bintree{}},
"0018_profile_pictures_visibility.up.sql": &bintree{_0018_profile_pictures_visibilityUpSql, map[string]*bintree{}}, "0018_profile_pictures_visibility.up.sql": &bintree{_0018_profile_pictures_visibilityUpSql, map[string]*bintree{}},
"0019_blocks_ranges_extra_data.up.sql": &bintree{_0019_blocks_ranges_extra_dataUpSql, map[string]*bintree{}}, "0019_blocks_ranges_extra_data.up.sql": &bintree{_0019_blocks_ranges_extra_dataUpSql, map[string]*bintree{}},
"0020_metrics.up.sql": &bintree{_0020_metricsUpSql, map[string]*bintree{}},
"doc.go": &bintree{docGo, map[string]*bintree{}}, "doc.go": &bintree{docGo, map[string]*bintree{}},
}} }}

View File

@ -0,0 +1,7 @@
CREATE TABLE IF NOT EXISTS app_metrics (
event VARCHAR NOT NULL,
value TEXT NOT NULL,
app_version VARCHAR NOT NULL,
operating_system VARCHAR NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)

156
appmetrics/database.go Normal file
View File

@ -0,0 +1,156 @@
package appmetrics
import (
"database/sql"
"encoding/json"
"errors"
"strings"
"github.com/xeipuuv/gojsonschema"
)
type AppMetricEventType string
// Value is `json.RawMessage` so we can send any json shape, including strings
// Validation is handled using JSON schemas defined in validators.go, instead of Golang structs
type AppMetric struct {
Event AppMetricEventType `json:"event"`
Value json.RawMessage `json:"value"`
AppVersion string `json:"app_version"`
OS string `json:"os"`
}
type AppMetricValidationError struct {
Metric AppMetric
Errors []gojsonschema.ResultError
}
const (
// status-react navigation events
NavigationNavigateToCofx AppMetricEventType = "navigation/navigate-to"
)
// EventSchemaMap Every event should have a schema attached
var EventSchemaMap = map[AppMetricEventType]interface{}{
NavigationNavigateToCofx: NavigationNavigateToCofxSchema,
}
func NewDB(db *sql.DB) *Database {
return &Database{db: db}
}
// Database sql wrapper for operations with browser objects.
type Database struct {
db *sql.DB
}
// Close closes database.
func (db Database) Close() error {
return db.db.Close()
}
func jsonschemaErrorsToError(validationErrors []AppMetricValidationError) error {
var fieldErrors []string
for _, appMetricValidationError := range validationErrors {
metric := appMetricValidationError.Metric
errors := appMetricValidationError.Errors
var errorDesc string = "Error in event: " + string(metric.Event) + " - "
for _, e := range errors {
errorDesc = errorDesc + "value." + e.Context().String() + ":" + e.Description()
}
fieldErrors = append(fieldErrors, errorDesc)
}
return errors.New(strings.Join(fieldErrors[:], "/ "))
}
func (db *Database) ValidateAppMetrics(appMetrics []AppMetric) (err error) {
var calculatedErrors []AppMetricValidationError
for _, metric := range appMetrics {
schema := EventSchemaMap[metric.Event]
if schema == nil {
return errors.New("No schema defined for: " + string(metric.Event))
}
schemaLoader := gojsonschema.NewGoLoader(schema)
valLoader := gojsonschema.NewStringLoader(string(metric.Value))
res, err := gojsonschema.Validate(schemaLoader, valLoader)
if err != nil {
return err
}
// validate all metrics and save errors
if !res.Valid() {
calculatedErrors = append(calculatedErrors, AppMetricValidationError{metric, res.Errors()})
}
}
if len(calculatedErrors) > 0 {
return jsonschemaErrorsToError(calculatedErrors)
}
return
}
func (db *Database) SaveAppMetrics(appMetrics []AppMetric) (err error) {
var (
tx *sql.Tx
insert *sql.Stmt
)
// make sure that the shape of the metric is same as expected
err = db.ValidateAppMetrics(appMetrics)
if err != nil {
return err
}
// start txn
tx, err = db.db.Begin()
if err != nil {
return err
}
defer func() {
if err == nil {
err = tx.Commit()
return
}
_ = tx.Rollback()
}()
insert, err = tx.Prepare("INSERT INTO app_metrics (event, value, app_version, operating_system) VALUES (?, ?, ?, ?)")
if err != nil {
return err
}
for _, metric := range appMetrics {
_, err = insert.Exec(metric.Event, metric.Value, metric.AppVersion, metric.OS)
if err != nil {
return
}
}
return
}
func (db *Database) GetAppMetrics(limit int, offset int) (appMetrics []AppMetric, err error) {
rows, err := db.db.Query("SELECT event, value, app_version, operating_system FROM app_metrics LIMIT ? OFFSET ?", limit, offset)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
metric := AppMetric{}
err := rows.Scan(&metric.Event, &metric.Value, &metric.AppVersion, &metric.OS)
if err != nil {
return nil, err
}
appMetrics = append(appMetrics, metric)
}
return appMetrics, nil
}

View File

@ -0,0 +1,42 @@
package appmetrics
import (
"encoding/json"
"io/ioutil"
"os"
"testing"
"github.com/status-im/status-go/appdatabase"
"github.com/stretchr/testify/require"
)
func setupTestDB(t *testing.T) (*Database, func()) {
tmpfile, err := ioutil.TempFile("", "appmetrics-tests-")
require.NoError(t, err)
db, err := appdatabase.InitializeDB(tmpfile.Name(), "appmetrics-tests")
require.NoError(t, err)
return NewDB(db), func() {
require.NoError(t, db.Close())
require.NoError(t, os.Remove(tmpfile.Name()))
}
}
func TestSaveAppMetrics(t *testing.T) {
db, stop := setupTestDB(t)
defer stop()
// we need backticks (``) for value because it is expected by gojsonschema
// it considers text inside tics to be stringified json
appMetrics := []AppMetric{
{Event: NavigationNavigateToCofx, Value: json.RawMessage(`{"view_id": "some-view-id", "params": {"screen": "allowed-screen-name"}}`), OS: "android", AppVersion: "1.11"},
}
err := db.SaveAppMetrics(appMetrics)
require.NoError(t, err)
res, err := db.GetAppMetrics(10, 0)
require.NoError(t, err)
require.Equal(t, appMetrics, res)
}

23
appmetrics/validators.go Normal file
View File

@ -0,0 +1,23 @@
package appmetrics
var NavigationNavigateToCofxSchema = map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"view_id": map[string]interface{}{
"type": "string",
"maxLength": 16,
},
"params": map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"screen": map[string]interface{}{
"enum": []string{"allowed-screen-name"},
},
},
"additionalProperties": false,
"required": []string{"screen"},
},
},
"additionalProperties": false,
"required": []string{"view_id", "params"},
}

View File

@ -0,0 +1,41 @@
package appmetrics
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/xeipuuv/gojsonschema"
)
func TestNavigationNavigateToCofxSchema(t *testing.T) {
NavigationNavigateToCofxLoader := gojsonschema.NewGoLoader(NavigationNavigateToCofxSchema)
schema, _ := gojsonschema.NewSchema(NavigationNavigateToCofxLoader)
// test correct json
validSampleVal := `{"view_id": "less-than-16", "params": {"screen": "allowed-screen-name"}}`
doc := gojsonschema.NewStringLoader(validSampleVal)
result, err := schema.Validate(doc)
require.NoError(t, err)
require.True(t, result.Valid())
// test in-correct json
invalidSampleVal := `{"view_id": "more-than-16-chars", "params": {"screen": "not-allowed-screen-name"}}`
doc = gojsonschema.NewStringLoader(invalidSampleVal)
result, err = schema.Validate(doc)
require.NoError(t, err)
require.False(t, result.Valid())
// test extra params
extraParamsVal := `{"view_id": "valid-view", "params": {"screen": "allowed-screen-name"}, "fishy-key": "fishy-val"}`
doc = gojsonschema.NewStringLoader(extraParamsVal)
result, err = schema.Validate(doc)
require.NoError(t, err)
require.False(t, result.Valid())
// test less params
lessParamsVal := `{"view_id": "valid-view"}`
doc = gojsonschema.NewStringLoader(lessParamsVal)
result, err = schema.Validate(doc)
require.NoError(t, err)
require.False(t, result.Valid())
}

3
go.mod
View File

@ -69,10 +69,13 @@ require (
github.com/tsenart/tb v0.0.0-20181025101425-0d2499c8b6e9 github.com/tsenart/tb v0.0.0-20181025101425-0d2499c8b6e9
github.com/vacp2p/mvds v0.0.24-0.20201124060106-26d8e94130d8 github.com/vacp2p/mvds v0.0.24-0.20201124060106-26d8e94130d8
github.com/wealdtech/go-ens/v3 v3.3.0 github.com/wealdtech/go-ens/v3 v3.3.0
github.com/xeipuuv/gojsonschema v1.2.0
go.uber.org/multierr v1.4.0 // indirect go.uber.org/multierr v1.4.0 // indirect
go.uber.org/zap v1.13.0 go.uber.org/zap v1.13.0
golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c
golang.org/x/image v0.0.0-20200927104501-e162460cd6b5 golang.org/x/image v0.0.0-20200927104501-e162460cd6b5
golang.org/x/mobile v0.0.0-20210220033013-bdb1ca9a1e08 // indirect
golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd // indirect
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
golang.org/x/text v0.3.3 // indirect golang.org/x/text v0.3.3 // indirect
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect

20
go.sum
View File

@ -10,6 +10,7 @@ github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7O
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/ClickHouse/clickhouse-go v1.3.12/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= github.com/ClickHouse/clickhouse-go v1.3.12/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI=
github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74= github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74=
github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y=
@ -203,7 +204,6 @@ github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk= github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
@ -720,6 +720,12 @@ github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7V
github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE=
go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
@ -759,6 +765,9 @@ golang.org/x/crypto v0.0.0-20191119213627-4f8c1d86b1ba/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c h1:/nJuwDLoL/zrqY6gf57vxC+Pi+pZ8bfhpPkicO5H7W4= golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c h1:/nJuwDLoL/zrqY6gf57vxC+Pi+pZ8bfhpPkicO5H7W4=
golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20200927104501-e162460cd6b5 h1:QelT11PB4FXiDEXucrfNckHoFxwt8USGY1ajP1ZF5lM= golang.org/x/image v0.0.0-20200927104501-e162460cd6b5 h1:QelT11PB4FXiDEXucrfNckHoFxwt8USGY1ajP1ZF5lM=
golang.org/x/image v0.0.0-20200927104501-e162460cd6b5/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20200927104501-e162460cd6b5/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -767,9 +776,15 @@ golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTk
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20210220033013-bdb1ca9a1e08 h1:h+GZ3ubjuWaQjGe8owMGcmMVCqs0xYJtRG5y2bpHaqU=
golang.org/x/mobile v0.0.0-20210220033013-bdb1ca9a1e08/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd h1:ePuNC7PZ6O5BzgPn9bZayERXBdfZjUYoXEf5BTfDfh8=
golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -848,6 +863,7 @@ golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190425222832-ad9eeb80039a/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425222832-ad9eeb80039a/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
@ -857,6 +873,8 @@ golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191109212701-97ad0ed33101 h1:LCmXVkvpQCDj724eX6irUTPCJP5GelFHxqGSWL2D1R0= golang.org/x/tools v0.0.0-20191109212701-97ad0ed33101 h1:LCmXVkvpQCDj724eX6irUTPCJP5GelFHxqGSWL2D1R0=
golang.org/x/tools v0.0.0-20191109212701-97ad0ed33101/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191109212701-97ad0ed33101/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200211045251-2de505fc5306 h1:5gd/+xxg4X7hx+44aG6Sdh17vBwwRFacMaSfqF4wkWk= golang.org/x/tools v0.0.0-20200211045251-2de505fc5306 h1:5gd/+xxg4X7hx+44aG6Sdh17vBwwRFacMaSfqF4wkWk=
golang.org/x/tools v0.0.0-20200211045251-2de505fc5306/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200211045251-2de505fc5306/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@ -86,7 +86,7 @@ func _1557732988_initialize_dbDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1557732988_initialize_db.down.sql", size: 72, mode: os.FileMode(0644), modTime: time.Unix(1599559877, 0)} info := bindataFileInfo{name: "1557732988_initialize_db.down.sql", size: 72, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x77, 0x40, 0x78, 0xb7, 0x71, 0x3c, 0x20, 0x3b, 0xc9, 0xb, 0x2f, 0x49, 0xe4, 0xff, 0x1c, 0x84, 0x54, 0xa1, 0x30, 0xe3, 0x90, 0xf8, 0x73, 0xda, 0xb0, 0x2a, 0xea, 0x8e, 0xf1, 0x82, 0xe7, 0xd2}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x77, 0x40, 0x78, 0xb7, 0x71, 0x3c, 0x20, 0x3b, 0xc9, 0xb, 0x2f, 0x49, 0xe4, 0xff, 0x1c, 0x84, 0x54, 0xa1, 0x30, 0xe3, 0x90, 0xf8, 0x73, 0xda, 0xb0, 0x2a, 0xea, 0x8e, 0xf1, 0x82, 0xe7, 0xd2}}
return a, nil return a, nil
} }
@ -106,7 +106,7 @@ func _1557732988_initialize_dbUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1557732988_initialize_db.up.sql", size: 234, mode: os.FileMode(0644), modTime: time.Unix(1599559877, 0)} info := bindataFileInfo{name: "1557732988_initialize_db.up.sql", size: 234, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8f, 0xa, 0x31, 0xf, 0x94, 0xe, 0xd7, 0xd6, 0xaa, 0x22, 0xd6, 0x6c, 0x7a, 0xbc, 0xad, 0x6a, 0xed, 0x2e, 0x7a, 0xf0, 0x24, 0x81, 0x87, 0x14, 0xe, 0x1c, 0x8a, 0xf1, 0x45, 0xaf, 0x9e, 0x85}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8f, 0xa, 0x31, 0xf, 0x94, 0xe, 0xd7, 0xd6, 0xaa, 0x22, 0xd6, 0x6c, 0x7a, 0xbc, 0xad, 0x6a, 0xed, 0x2e, 0x7a, 0xf0, 0x24, 0x81, 0x87, 0x14, 0xe, 0x1c, 0x8a, 0xf1, 0x45, 0xaf, 0x9e, 0x85}}
return a, nil return a, nil
} }
@ -126,7 +126,7 @@ func staticGo() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "static.go", size: 178, mode: os.FileMode(0644), modTime: time.Unix(1599559877, 0)} info := bindataFileInfo{name: "static.go", size: 178, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xab, 0x8a, 0xf4, 0x27, 0x24, 0x9d, 0x2a, 0x1, 0x7b, 0x54, 0xea, 0xae, 0x4a, 0x35, 0x40, 0x92, 0xb5, 0xf9, 0xb3, 0x54, 0x3e, 0x3a, 0x1a, 0x2b, 0xae, 0xfb, 0x9e, 0x82, 0xeb, 0x4c, 0xf, 0x6}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xab, 0x8a, 0xf4, 0x27, 0x24, 0x9d, 0x2a, 0x1, 0x7b, 0x54, 0xea, 0xae, 0x4a, 0x35, 0x40, 0x92, 0xb5, 0xf9, 0xb3, 0x54, 0x3e, 0x3a, 0x1a, 0x2b, 0xae, 0xfb, 0x9e, 0x82, 0xeb, 0x4c, 0xf, 0x6}}
return a, nil return a, nil
} }

View File

@ -90,7 +90,7 @@ func _0001_accountsDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0001_accounts.down.sql", size: 21, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "0001_accounts.down.sql", size: 21, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd2, 0x61, 0x4c, 0x18, 0xfc, 0xc, 0xdf, 0x5c, 0x1f, 0x5e, 0xd3, 0xbd, 0xfa, 0x12, 0x5e, 0x8d, 0x8d, 0x8b, 0xb9, 0x5f, 0x99, 0x46, 0x63, 0xa5, 0xe3, 0xa6, 0x8a, 0x4, 0xf1, 0x73, 0x8a, 0xe9}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd2, 0x61, 0x4c, 0x18, 0xfc, 0xc, 0xdf, 0x5c, 0x1f, 0x5e, 0xd3, 0xbd, 0xfa, 0x12, 0x5e, 0x8d, 0x8d, 0x8b, 0xb9, 0x5f, 0x99, 0x46, 0x63, 0xa5, 0xe3, 0xa6, 0x8a, 0x4, 0xf1, 0x73, 0x8a, 0xe9}}
return a, nil return a, nil
} }
@ -110,7 +110,7 @@ func _0001_accountsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0001_accounts.up.sql", size: 163, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "0001_accounts.up.sql", size: 163, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf2, 0xfa, 0x99, 0x8e, 0x96, 0xb3, 0x13, 0x6c, 0x1f, 0x6, 0x27, 0xc5, 0xd2, 0xd4, 0xe0, 0xa5, 0x26, 0x82, 0xa7, 0x26, 0xf2, 0x68, 0x9d, 0xed, 0x9c, 0x3d, 0xbb, 0xdc, 0x37, 0x28, 0xbc, 0x1}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf2, 0xfa, 0x99, 0x8e, 0x96, 0xb3, 0x13, 0x6c, 0x1f, 0x6, 0x27, 0xc5, 0xd2, 0xd4, 0xe0, 0xa5, 0x26, 0x82, 0xa7, 0x26, 0xf2, 0x68, 0x9d, 0xed, 0x9c, 0x3d, 0xbb, 0xdc, 0x37, 0x28, 0xbc, 0x1}}
return a, nil return a, nil
} }
@ -130,7 +130,7 @@ func _1605007189_identity_imagesDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1605007189_identity_images.down.sql", size: 29, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "1605007189_identity_images.down.sql", size: 29, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x2f, 0xcf, 0xa7, 0xae, 0xd5, 0x4f, 0xcd, 0x14, 0x63, 0x9, 0xbe, 0x39, 0x49, 0x18, 0x96, 0xb2, 0xa3, 0x8, 0x7d, 0x41, 0xdb, 0x50, 0x5d, 0xf5, 0x4d, 0xa2, 0xd, 0x8f, 0x57, 0x79, 0x77, 0x67}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x2f, 0xcf, 0xa7, 0xae, 0xd5, 0x4f, 0xcd, 0x14, 0x63, 0x9, 0xbe, 0x39, 0x49, 0x18, 0x96, 0xb2, 0xa3, 0x8, 0x7d, 0x41, 0xdb, 0x50, 0x5d, 0xf5, 0x4d, 0xa2, 0xd, 0x8f, 0x57, 0x79, 0x77, 0x67}}
return a, nil return a, nil
} }
@ -150,7 +150,7 @@ func _1605007189_identity_imagesUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1605007189_identity_images.up.sql", size: 268, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "1605007189_identity_images.up.sql", size: 268, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x50, 0xb6, 0xc1, 0x5c, 0x76, 0x72, 0x6b, 0x22, 0x34, 0xdc, 0x96, 0xdc, 0x2b, 0xfd, 0x2d, 0xbe, 0xcc, 0x1e, 0xd4, 0x5, 0x93, 0xd, 0xc2, 0x51, 0xf3, 0x1a, 0xef, 0x2b, 0x26, 0xa4, 0xeb, 0x65}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x50, 0xb6, 0xc1, 0x5c, 0x76, 0x72, 0x6b, 0x22, 0x34, 0xdc, 0x96, 0xdc, 0x2b, 0xfd, 0x2d, 0xbe, 0xcc, 0x1e, 0xd4, 0x5, 0x93, 0xd, 0xc2, 0x51, 0xf3, 0x1a, 0xef, 0x2b, 0x26, 0xa4, 0xeb, 0x65}}
return a, nil return a, nil
} }
@ -170,7 +170,7 @@ func _1606224181_drop_photo_path_from_accountsDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1606224181_drop_photo_path_from_accounts.down.sql", size: 892, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "1606224181_drop_photo_path_from_accounts.down.sql", size: 892, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x90, 0x24, 0x17, 0x7, 0x80, 0x93, 0x6f, 0x8d, 0x5d, 0xaa, 0x8c, 0x79, 0x15, 0x5d, 0xb3, 0x19, 0xd7, 0xd8, 0x39, 0xf9, 0x3a, 0x63, 0x8f, 0x81, 0x15, 0xb6, 0xd6, 0x9a, 0x37, 0xa8, 0x8e, 0x9b}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x90, 0x24, 0x17, 0x7, 0x80, 0x93, 0x6f, 0x8d, 0x5d, 0xaa, 0x8c, 0x79, 0x15, 0x5d, 0xb3, 0x19, 0xd7, 0xd8, 0x39, 0xf9, 0x3a, 0x63, 0x8f, 0x81, 0x15, 0xb6, 0xd6, 0x9a, 0x37, 0xa8, 0x8e, 0x9b}}
return a, nil return a, nil
} }
@ -190,7 +190,7 @@ func _1606224181_drop_photo_path_from_accountsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1606224181_drop_photo_path_from_accounts.up.sql", size: 866, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "1606224181_drop_photo_path_from_accounts.up.sql", size: 866, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xff, 0x4c, 0x97, 0xee, 0xef, 0x82, 0xb8, 0x6c, 0x71, 0xbb, 0x50, 0x7b, 0xe6, 0xd9, 0x22, 0x31, 0x7c, 0x1a, 0xfe, 0x91, 0x28, 0xf6, 0x6, 0x36, 0xe, 0xb1, 0xf1, 0xc8, 0x25, 0xac, 0x7e, 0xd6}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xff, 0x4c, 0x97, 0xee, 0xef, 0x82, 0xb8, 0x6c, 0x71, 0xbb, 0x50, 0x7b, 0xe6, 0xd9, 0x22, 0x31, 0x7c, 0x1a, 0xfe, 0x91, 0x28, 0xf6, 0x6, 0x36, 0xe, 0xb1, 0xf1, 0xc8, 0x25, 0xac, 0x7e, 0xd6}}
return a, nil return a, nil
} }
@ -210,7 +210,7 @@ func docGo() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "doc.go", size: 74, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "doc.go", size: 74, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xde, 0x7c, 0x28, 0xcd, 0x47, 0xf2, 0xfa, 0x7c, 0x51, 0x2d, 0xd8, 0x38, 0xb, 0xb0, 0x34, 0x9d, 0x4c, 0x62, 0xa, 0x9e, 0x28, 0xc3, 0x31, 0x23, 0xd9, 0xbb, 0x89, 0x9f, 0xa0, 0x89, 0x1f, 0xe8}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xde, 0x7c, 0x28, 0xcd, 0x47, 0xf2, 0xfa, 0x7c, 0x51, 0x2d, 0xd8, 0x38, 0xb, 0xb0, 0x34, 0x9d, 0x4c, 0x62, 0xa, 0x9e, 0x28, 0xc3, 0x31, 0x23, 0xd9, 0xbb, 0x89, 0x9f, 0xa0, 0x89, 0x1f, 0xe8}}
return a, nil return a, nil
} }

View File

@ -100,7 +100,7 @@ func _1536754952_initial_schemaDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1536754952_initial_schema.down.sql", size: 83, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "1536754952_initial_schema.down.sql", size: 83, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x44, 0xcf, 0x76, 0x71, 0x1f, 0x5e, 0x9a, 0x43, 0xd8, 0xcd, 0xb8, 0xc3, 0x70, 0xc3, 0x7f, 0xfc, 0x90, 0xb4, 0x25, 0x1e, 0xf4, 0x66, 0x20, 0xb8, 0x33, 0x7e, 0xb0, 0x76, 0x1f, 0xc, 0xc0, 0x75}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x44, 0xcf, 0x76, 0x71, 0x1f, 0x5e, 0x9a, 0x43, 0xd8, 0xcd, 0xb8, 0xc3, 0x70, 0xc3, 0x7f, 0xfc, 0x90, 0xb4, 0x25, 0x1e, 0xf4, 0x66, 0x20, 0xb8, 0x33, 0x7e, 0xb0, 0x76, 0x1f, 0xc, 0xc0, 0x75}}
return a, nil return a, nil
} }
@ -120,7 +120,7 @@ func _1536754952_initial_schemaUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1536754952_initial_schema.up.sql", size: 962, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "1536754952_initial_schema.up.sql", size: 962, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xea, 0x90, 0x5a, 0x59, 0x3e, 0x3, 0xe2, 0x3c, 0x81, 0x42, 0xcd, 0x4c, 0x9a, 0xe8, 0xda, 0x93, 0x2b, 0x70, 0xa4, 0xd5, 0x29, 0x3e, 0xd5, 0xc9, 0x27, 0xb6, 0xb7, 0x65, 0xff, 0x0, 0xcb, 0xde}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xea, 0x90, 0x5a, 0x59, 0x3e, 0x3, 0xe2, 0x3c, 0x81, 0x42, 0xcd, 0x4c, 0x9a, 0xe8, 0xda, 0x93, 0x2b, 0x70, 0xa4, 0xd5, 0x29, 0x3e, 0xd5, 0xc9, 0x27, 0xb6, 0xb7, 0x65, 0xff, 0x0, 0xcb, 0xde}}
return a, nil return a, nil
} }
@ -140,7 +140,7 @@ func _1539249977_update_ratchet_infoDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1539249977_update_ratchet_info.down.sql", size: 311, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "1539249977_update_ratchet_info.down.sql", size: 311, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1, 0xa4, 0xeb, 0xa0, 0xe6, 0xa0, 0xd4, 0x48, 0xbb, 0xad, 0x6f, 0x7d, 0x67, 0x8c, 0xbd, 0x25, 0xde, 0x1f, 0x73, 0x9a, 0xbb, 0xa8, 0xc9, 0x30, 0xb7, 0xa9, 0x7c, 0xaf, 0xb5, 0x1, 0x61, 0xdd}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1, 0xa4, 0xeb, 0xa0, 0xe6, 0xa0, 0xd4, 0x48, 0xbb, 0xad, 0x6f, 0x7d, 0x67, 0x8c, 0xbd, 0x25, 0xde, 0x1f, 0x73, 0x9a, 0xbb, 0xa8, 0xc9, 0x30, 0xb7, 0xa9, 0x7c, 0xaf, 0xb5, 0x1, 0x61, 0xdd}}
return a, nil return a, nil
} }
@ -160,7 +160,7 @@ func _1539249977_update_ratchet_infoUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1539249977_update_ratchet_info.up.sql", size: 368, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "1539249977_update_ratchet_info.up.sql", size: 368, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc, 0x8e, 0xbf, 0x6f, 0xa, 0xc0, 0xe1, 0x3c, 0x42, 0x28, 0x88, 0x1d, 0xdb, 0xba, 0x1c, 0x83, 0xec, 0xba, 0xd3, 0x5f, 0x5c, 0x77, 0x5e, 0xa7, 0x46, 0x36, 0xec, 0x69, 0xa, 0x4b, 0x17, 0x79}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc, 0x8e, 0xbf, 0x6f, 0xa, 0xc0, 0xe1, 0x3c, 0x42, 0x28, 0x88, 0x1d, 0xdb, 0xba, 0x1c, 0x83, 0xec, 0xba, 0xd3, 0x5f, 0x5c, 0x77, 0x5e, 0xa7, 0x46, 0x36, 0xec, 0x69, 0xa, 0x4b, 0x17, 0x79}}
return a, nil return a, nil
} }
@ -180,7 +180,7 @@ func _1540715431_add_versionDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1540715431_add_version.down.sql", size: 127, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "1540715431_add_version.down.sql", size: 127, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf5, 0x9, 0x4, 0xe3, 0x76, 0x2e, 0xb8, 0x9, 0x23, 0xf0, 0x70, 0x93, 0xc4, 0x50, 0xe, 0x9d, 0x84, 0x22, 0x8c, 0x94, 0xd3, 0x24, 0x9, 0x9a, 0xc1, 0xa1, 0x48, 0x45, 0xfd, 0x40, 0x6e, 0xe6}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf5, 0x9, 0x4, 0xe3, 0x76, 0x2e, 0xb8, 0x9, 0x23, 0xf0, 0x70, 0x93, 0xc4, 0x50, 0xe, 0x9d, 0x84, 0x22, 0x8c, 0x94, 0xd3, 0x24, 0x9, 0x9a, 0xc1, 0xa1, 0x48, 0x45, 0xfd, 0x40, 0x6e, 0xe6}}
return a, nil return a, nil
} }
@ -200,7 +200,7 @@ func _1540715431_add_versionUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1540715431_add_version.up.sql", size: 265, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "1540715431_add_version.up.sql", size: 265, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc7, 0x4c, 0x36, 0x96, 0xdf, 0x16, 0x10, 0xa6, 0x27, 0x1a, 0x79, 0x8b, 0x42, 0x83, 0x23, 0xc, 0x7e, 0xb6, 0x3d, 0x2, 0xda, 0xa4, 0xb4, 0xd, 0x27, 0x55, 0xba, 0xdc, 0xb2, 0x88, 0x8f, 0xa6}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc7, 0x4c, 0x36, 0x96, 0xdf, 0x16, 0x10, 0xa6, 0x27, 0x1a, 0x79, 0x8b, 0x42, 0x83, 0x23, 0xc, 0x7e, 0xb6, 0x3d, 0x2, 0xda, 0xa4, 0xb4, 0xd, 0x27, 0x55, 0xba, 0xdc, 0xb2, 0x88, 0x8f, 0xa6}}
return a, nil return a, nil
} }
@ -220,7 +220,7 @@ func _1541164797_add_installationsDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1541164797_add_installations.down.sql", size: 26, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "1541164797_add_installations.down.sql", size: 26, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf5, 0xfd, 0xe6, 0xd8, 0xca, 0x3b, 0x38, 0x18, 0xee, 0x0, 0x5f, 0x36, 0x9e, 0x1e, 0xd, 0x19, 0x3e, 0xb4, 0x73, 0x53, 0xe9, 0xa5, 0xac, 0xdd, 0xa1, 0x2f, 0xc7, 0x6c, 0xa8, 0xd9, 0xa, 0x88}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf5, 0xfd, 0xe6, 0xd8, 0xca, 0x3b, 0x38, 0x18, 0xee, 0x0, 0x5f, 0x36, 0x9e, 0x1e, 0xd, 0x19, 0x3e, 0xb4, 0x73, 0x53, 0xe9, 0xa5, 0xac, 0xdd, 0xa1, 0x2f, 0xc7, 0x6c, 0xa8, 0xd9, 0xa, 0x88}}
return a, nil return a, nil
} }
@ -240,7 +240,7 @@ func _1541164797_add_installationsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1541164797_add_installations.up.sql", size: 216, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "1541164797_add_installations.up.sql", size: 216, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x2d, 0x18, 0x26, 0xb8, 0x88, 0x47, 0xdb, 0x83, 0xcc, 0xb6, 0x9d, 0x1c, 0x1, 0xae, 0x2f, 0xde, 0x97, 0x82, 0x3, 0x30, 0xa8, 0x63, 0xa1, 0x78, 0x4b, 0xa5, 0x9, 0x8, 0x75, 0xa2, 0x57, 0x81}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x2d, 0x18, 0x26, 0xb8, 0x88, 0x47, 0xdb, 0x83, 0xcc, 0xb6, 0x9d, 0x1c, 0x1, 0xae, 0x2f, 0xde, 0x97, 0x82, 0x3, 0x30, 0xa8, 0x63, 0xa1, 0x78, 0x4b, 0xa5, 0x9, 0x8, 0x75, 0xa2, 0x57, 0x81}}
return a, nil return a, nil
} }
@ -260,7 +260,7 @@ func _1558084410_add_secretDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1558084410_add_secret.down.sql", size: 56, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "1558084410_add_secret.down.sql", size: 56, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x49, 0xb, 0x65, 0xdf, 0x59, 0xbf, 0xe9, 0x5, 0x5b, 0x6f, 0xd5, 0x3a, 0xb7, 0x57, 0xe8, 0x78, 0x38, 0x73, 0x53, 0x57, 0xf7, 0x24, 0x4, 0xe4, 0xa2, 0x49, 0x22, 0xa2, 0xc6, 0xfd, 0x80, 0xa4}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x49, 0xb, 0x65, 0xdf, 0x59, 0xbf, 0xe9, 0x5, 0x5b, 0x6f, 0xd5, 0x3a, 0xb7, 0x57, 0xe8, 0x78, 0x38, 0x73, 0x53, 0x57, 0xf7, 0x24, 0x4, 0xe4, 0xa2, 0x49, 0x22, 0xa2, 0xc6, 0xfd, 0x80, 0xa4}}
return a, nil return a, nil
} }
@ -280,7 +280,7 @@ func _1558084410_add_secretUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1558084410_add_secret.up.sql", size: 301, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "1558084410_add_secret.up.sql", size: 301, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf5, 0x32, 0x36, 0x8e, 0x47, 0xb0, 0x8f, 0xc1, 0xc6, 0xf7, 0xc6, 0x9f, 0x2d, 0x44, 0x75, 0x2b, 0x26, 0xec, 0x6, 0xa0, 0x7b, 0xa5, 0xbd, 0xc8, 0x76, 0x8a, 0x82, 0x68, 0x2, 0x42, 0xb5, 0xf4}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf5, 0x32, 0x36, 0x8e, 0x47, 0xb0, 0x8f, 0xc1, 0xc6, 0xf7, 0xc6, 0x9f, 0x2d, 0x44, 0x75, 0x2b, 0x26, 0xec, 0x6, 0xa0, 0x7b, 0xa5, 0xbd, 0xc8, 0x76, 0x8a, 0x82, 0x68, 0x2, 0x42, 0xb5, 0xf4}}
return a, nil return a, nil
} }
@ -300,7 +300,7 @@ func _1558588866_add_versionDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1558588866_add_version.down.sql", size: 47, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "1558588866_add_version.down.sql", size: 47, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xde, 0x52, 0x34, 0x3c, 0x46, 0x4a, 0xf0, 0x72, 0x47, 0x6f, 0x49, 0x5c, 0xc7, 0xf9, 0x32, 0xce, 0xc4, 0x3d, 0xfd, 0x61, 0xa1, 0x8b, 0x8f, 0xf2, 0x31, 0x34, 0xde, 0x15, 0x49, 0xa6, 0xde, 0xb9}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xde, 0x52, 0x34, 0x3c, 0x46, 0x4a, 0xf0, 0x72, 0x47, 0x6f, 0x49, 0x5c, 0xc7, 0xf9, 0x32, 0xce, 0xc4, 0x3d, 0xfd, 0x61, 0xa1, 0x8b, 0x8f, 0xf2, 0x31, 0x34, 0xde, 0x15, 0x49, 0xa6, 0xde, 0xb9}}
return a, nil return a, nil
} }
@ -320,7 +320,7 @@ func _1558588866_add_versionUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1558588866_add_version.up.sql", size: 57, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "1558588866_add_version.up.sql", size: 57, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x2a, 0xea, 0x64, 0x39, 0x61, 0x20, 0x83, 0x83, 0xb, 0x2e, 0x79, 0x64, 0xb, 0x53, 0xfa, 0xfe, 0xc6, 0xf7, 0x67, 0x42, 0xd3, 0x4f, 0xdc, 0x7e, 0x30, 0x32, 0xe8, 0x14, 0x41, 0xe9, 0xe7, 0x3b}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x2a, 0xea, 0x64, 0x39, 0x61, 0x20, 0x83, 0x83, 0xb, 0x2e, 0x79, 0x64, 0xb, 0x53, 0xfa, 0xfe, 0xc6, 0xf7, 0x67, 0x42, 0xd3, 0x4f, 0xdc, 0x7e, 0x30, 0x32, 0xe8, 0x14, 0x41, 0xe9, 0xe7, 0x3b}}
return a, nil return a, nil
} }
@ -340,7 +340,7 @@ func _1559627659_add_contact_codeDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1559627659_add_contact_code.down.sql", size: 32, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "1559627659_add_contact_code.down.sql", size: 32, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x5d, 0x64, 0x6d, 0xce, 0x24, 0x42, 0x20, 0x8d, 0x4f, 0x37, 0xaa, 0x9d, 0xc, 0x57, 0x98, 0xc1, 0xd1, 0x1a, 0x34, 0xcd, 0x9f, 0x8f, 0x34, 0x86, 0xb3, 0xd3, 0xdc, 0xf1, 0x7d, 0xe5, 0x1b, 0x6e}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x5d, 0x64, 0x6d, 0xce, 0x24, 0x42, 0x20, 0x8d, 0x4f, 0x37, 0xaa, 0x9d, 0xc, 0x57, 0x98, 0xc1, 0xd1, 0x1a, 0x34, 0xcd, 0x9f, 0x8f, 0x34, 0x86, 0xb3, 0xd3, 0xdc, 0xf1, 0x7d, 0xe5, 0x1b, 0x6e}}
return a, nil return a, nil
} }
@ -360,7 +360,7 @@ func _1559627659_add_contact_codeUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1559627659_add_contact_code.up.sql", size: 198, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "1559627659_add_contact_code.up.sql", size: 198, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x16, 0xf6, 0xc2, 0x62, 0x9c, 0xd2, 0xc9, 0x1e, 0xd8, 0xea, 0xaa, 0xea, 0x95, 0x8f, 0x89, 0x6a, 0x85, 0x5d, 0x9d, 0x99, 0x78, 0x3c, 0x90, 0x66, 0x99, 0x3e, 0x4b, 0x19, 0x62, 0xfb, 0x31, 0x4d}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x16, 0xf6, 0xc2, 0x62, 0x9c, 0xd2, 0xc9, 0x1e, 0xd8, 0xea, 0xaa, 0xea, 0x95, 0x8f, 0x89, 0x6a, 0x85, 0x5d, 0x9d, 0x99, 0x78, 0x3c, 0x90, 0x66, 0x99, 0x3e, 0x4b, 0x19, 0x62, 0xfb, 0x31, 0x4d}}
return a, nil return a, nil
} }
@ -380,7 +380,7 @@ func _1561368210_add_installation_metadataDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1561368210_add_installation_metadata.down.sql", size: 35, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "1561368210_add_installation_metadata.down.sql", size: 35, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa8, 0xde, 0x3f, 0xd2, 0x4a, 0x50, 0x98, 0x56, 0xe3, 0xc0, 0xcd, 0x9d, 0xb0, 0x34, 0x3b, 0xe5, 0x62, 0x18, 0xb5, 0x20, 0xc9, 0x3e, 0xdc, 0x6a, 0x40, 0x36, 0x66, 0xea, 0x51, 0x8c, 0x71, 0xf5}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa8, 0xde, 0x3f, 0xd2, 0x4a, 0x50, 0x98, 0x56, 0xe3, 0xc0, 0xcd, 0x9d, 0xb0, 0x34, 0x3b, 0xe5, 0x62, 0x18, 0xb5, 0x20, 0xc9, 0x3e, 0xdc, 0x6a, 0x40, 0x36, 0x66, 0xea, 0x51, 0x8c, 0x71, 0xf5}}
return a, nil return a, nil
} }
@ -400,7 +400,7 @@ func _1561368210_add_installation_metadataUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1561368210_add_installation_metadata.up.sql", size: 267, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "1561368210_add_installation_metadata.up.sql", size: 267, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb4, 0x71, 0x8f, 0x29, 0xb1, 0xaa, 0xd6, 0xd1, 0x8c, 0x17, 0xef, 0x6c, 0xd5, 0x80, 0xb8, 0x2c, 0xc3, 0xfe, 0xec, 0x24, 0x4d, 0xc8, 0x25, 0xd3, 0xb4, 0xcd, 0xa9, 0xac, 0x63, 0x61, 0xb2, 0x9c}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb4, 0x71, 0x8f, 0x29, 0xb1, 0xaa, 0xd6, 0xd1, 0x8c, 0x17, 0xef, 0x6c, 0xd5, 0x80, 0xb8, 0x2c, 0xc3, 0xfe, 0xec, 0x24, 0x4d, 0xc8, 0x25, 0xd3, 0xb4, 0xcd, 0xa9, 0xac, 0x63, 0x61, 0xb2, 0x9c}}
return a, nil return a, nil
} }
@ -420,7 +420,7 @@ func docGo() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "doc.go", size: 377, mode: os.FileMode(0644), modTime: time.Unix(1603694100, 0)} info := bindataFileInfo{name: "doc.go", size: 377, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xef, 0xaf, 0xdf, 0xcf, 0x65, 0xae, 0x19, 0xfc, 0x9d, 0x29, 0xc1, 0x91, 0xaf, 0xb5, 0xd5, 0xb1, 0x56, 0xf3, 0xee, 0xa8, 0xba, 0x13, 0x65, 0xdb, 0xab, 0xcf, 0x4e, 0xac, 0x92, 0xe9, 0x60, 0xf1}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xef, 0xaf, 0xdf, 0xcf, 0x65, 0xae, 0x19, 0xfc, 0x9d, 0x29, 0xc1, 0x91, 0xaf, 0xb5, 0xd5, 0xb1, 0x56, 0xf3, 0xee, 0xa8, 0xba, 0x13, 0x65, 0xdb, 0xab, 0xcf, 0x4e, 0xac, 0x92, 0xe9, 0x60, 0xf1}}
return a, nil return a, nil
} }

View File

@ -111,7 +111,7 @@ func _000001_initDownDbSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "000001_init.down.db.sql", size: 65, mode: os.FileMode(0644), modTime: time.Unix(1610007618, 0)} info := bindataFileInfo{name: "000001_init.down.db.sql", size: 65, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x5e, 0xbb, 0x3f, 0x1, 0x75, 0x19, 0x70, 0x86, 0xa7, 0x34, 0x40, 0x17, 0x34, 0x3e, 0x18, 0x51, 0x79, 0xd4, 0x22, 0xad, 0x8f, 0x80, 0xcc, 0xa6, 0xcc, 0x6, 0x2b, 0x62, 0x2, 0x47, 0xba, 0xf9}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x5e, 0xbb, 0x3f, 0x1, 0x75, 0x19, 0x70, 0x86, 0xa7, 0x34, 0x40, 0x17, 0x34, 0x3e, 0x18, 0x51, 0x79, 0xd4, 0x22, 0xad, 0x8f, 0x80, 0xcc, 0xa6, 0xcc, 0x6, 0x2b, 0x62, 0x2, 0x47, 0xba, 0xf9}}
return a, nil return a, nil
} }
@ -131,7 +131,7 @@ func _000001_initUpDbSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "000001_init.up.db.sql", size: 2719, mode: os.FileMode(0644), modTime: time.Unix(1610007618, 0)} info := bindataFileInfo{name: "000001_init.up.db.sql", size: 2719, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x60, 0xdc, 0xeb, 0xe, 0xc2, 0x4f, 0x75, 0xa, 0xf6, 0x3e, 0xc7, 0xc4, 0x4, 0xe2, 0xe1, 0xa4, 0x73, 0x2f, 0x4a, 0xad, 0x1a, 0x0, 0xc3, 0x93, 0x9d, 0x77, 0x3e, 0x31, 0x91, 0x77, 0x2e, 0xc8}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x60, 0xdc, 0xeb, 0xe, 0xc2, 0x4f, 0x75, 0xa, 0xf6, 0x3e, 0xc7, 0xc4, 0x4, 0xe2, 0xe1, 0xa4, 0x73, 0x2f, 0x4a, 0xad, 0x1a, 0x0, 0xc3, 0x93, 0x9d, 0x77, 0x3e, 0x31, 0x91, 0x77, 0x2e, 0xc8}}
return a, nil return a, nil
} }
@ -151,7 +151,7 @@ func _000002_add_last_ens_clock_valueUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "000002_add_last_ens_clock_value.up.sql", size: 77, mode: os.FileMode(0644), modTime: time.Unix(1610007618, 0)} info := bindataFileInfo{name: "000002_add_last_ens_clock_value.up.sql", size: 77, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4d, 0x3, 0x8f, 0xd5, 0x85, 0x83, 0x47, 0xbe, 0xf9, 0x82, 0x7e, 0x81, 0xa4, 0xbd, 0xaa, 0xd5, 0x98, 0x18, 0x5, 0x2d, 0x82, 0x42, 0x3b, 0x3, 0x50, 0xc3, 0x1e, 0x84, 0x35, 0xf, 0xb6, 0x2b}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4d, 0x3, 0x8f, 0xd5, 0x85, 0x83, 0x47, 0xbe, 0xf9, 0x82, 0x7e, 0x81, 0xa4, 0xbd, 0xaa, 0xd5, 0x98, 0x18, 0x5, 0x2d, 0x82, 0x42, 0x3b, 0x3, 0x50, 0xc3, 0x1e, 0x84, 0x35, 0xf, 0xb6, 0x2b}}
return a, nil return a, nil
} }
@ -171,7 +171,7 @@ func _1586358095_add_replaceUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1586358095_add_replace.up.sql", size: 224, mode: os.FileMode(0644), modTime: time.Unix(1611588719, 0)} info := bindataFileInfo{name: "1586358095_add_replace.up.sql", size: 224, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd2, 0xb3, 0xa9, 0xc7, 0x7f, 0x9d, 0x8f, 0x43, 0x8c, 0x9e, 0x58, 0x8d, 0x44, 0xbc, 0xfa, 0x6b, 0x5f, 0x3f, 0x5a, 0xbe, 0xe8, 0xb1, 0x16, 0xf, 0x91, 0x2a, 0xa0, 0x71, 0xbb, 0x8d, 0x6b, 0xcb}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd2, 0xb3, 0xa9, 0xc7, 0x7f, 0x9d, 0x8f, 0x43, 0x8c, 0x9e, 0x58, 0x8d, 0x44, 0xbc, 0xfa, 0x6b, 0x5f, 0x3f, 0x5a, 0xbe, 0xe8, 0xb1, 0x16, 0xf, 0x91, 0x2a, 0xa0, 0x71, 0xbb, 0x8d, 0x6b, 0xcb}}
return a, nil return a, nil
} }
@ -191,7 +191,7 @@ func _1588665364_add_image_dataUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1588665364_add_image_data.up.sql", size: 186, mode: os.FileMode(0644), modTime: time.Unix(1611588719, 0)} info := bindataFileInfo{name: "1588665364_add_image_data.up.sql", size: 186, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd6, 0xc6, 0x35, 0xb4, 0x4c, 0x39, 0x96, 0x29, 0x30, 0xda, 0xf4, 0x8f, 0xcb, 0xf1, 0x9f, 0x84, 0xdc, 0x88, 0xd4, 0xd5, 0xbc, 0xb6, 0x5b, 0x46, 0x78, 0x67, 0x76, 0x1a, 0x5, 0x36, 0xdc, 0xe5}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd6, 0xc6, 0x35, 0xb4, 0x4c, 0x39, 0x96, 0x29, 0x30, 0xda, 0xf4, 0x8f, 0xcb, 0xf1, 0x9f, 0x84, 0xdc, 0x88, 0xd4, 0xd5, 0xbc, 0xb6, 0x5b, 0x46, 0x78, 0x67, 0x76, 0x1a, 0x5, 0x36, 0xdc, 0xe5}}
return a, nil return a, nil
} }
@ -211,7 +211,7 @@ func _1589365189_add_pow_targetUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1589365189_add_pow_target.up.sql", size: 66, mode: os.FileMode(0644), modTime: time.Unix(1611588719, 0)} info := bindataFileInfo{name: "1589365189_add_pow_target.up.sql", size: 66, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4e, 0x3a, 0xe2, 0x2e, 0x7d, 0xaf, 0xbb, 0xcc, 0x21, 0xa1, 0x7a, 0x41, 0x9a, 0xd0, 0xbb, 0xa9, 0xc8, 0x35, 0xf9, 0x32, 0x34, 0x46, 0x44, 0x9a, 0x86, 0x40, 0x7c, 0xb9, 0x23, 0xc7, 0x3, 0x3f}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4e, 0x3a, 0xe2, 0x2e, 0x7d, 0xaf, 0xbb, 0xcc, 0x21, 0xa1, 0x7a, 0x41, 0x9a, 0xd0, 0xbb, 0xa9, 0xc8, 0x35, 0xf9, 0x32, 0x34, 0x46, 0x44, 0x9a, 0x86, 0x40, 0x7c, 0xb9, 0x23, 0xc7, 0x3, 0x3f}}
return a, nil return a, nil
} }
@ -231,7 +231,7 @@ func _1591277220_add_index_messagesUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1591277220_add_index_messages.up.sql", size: 240, mode: os.FileMode(0644), modTime: time.Unix(1614609069, 0)} info := bindataFileInfo{name: "1591277220_add_index_messages.up.sql", size: 240, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9c, 0xfe, 0xbe, 0xd5, 0xb8, 0x8f, 0xdd, 0xef, 0xbb, 0xa8, 0xad, 0x7f, 0xed, 0x5b, 0x5b, 0x2f, 0xe6, 0x82, 0x27, 0x78, 0x1f, 0xb9, 0x57, 0xdc, 0x8, 0xc2, 0xb2, 0xa9, 0x9a, 0x4, 0xe1, 0x7a}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9c, 0xfe, 0xbe, 0xd5, 0xb8, 0x8f, 0xdd, 0xef, 0xbb, 0xa8, 0xad, 0x7f, 0xed, 0x5b, 0x5b, 0x2f, 0xe6, 0x82, 0x27, 0x78, 0x1f, 0xb9, 0x57, 0xdc, 0x8, 0xc2, 0xb2, 0xa9, 0x9a, 0x4, 0xe1, 0x7a}}
return a, nil return a, nil
} }
@ -251,7 +251,7 @@ func _1593087212_add_mute_chat_and_raw_message_fieldsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1593087212_add_mute_chat_and_raw_message_fields.up.sql", size: 215, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "1593087212_add_mute_chat_and_raw_message_fields.up.sql", size: 215, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x73, 0x99, 0x61, 0xd1, 0xaa, 0xb4, 0xbf, 0xaf, 0xd7, 0x20, 0x17, 0x40, 0xf9, 0x2, 0xfb, 0xcc, 0x40, 0x2a, 0xd, 0x86, 0x36, 0x30, 0x88, 0x89, 0x25, 0x80, 0x42, 0xb0, 0x5b, 0xe9, 0x73, 0x78}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x73, 0x99, 0x61, 0xd1, 0xaa, 0xb4, 0xbf, 0xaf, 0xd7, 0x20, 0x17, 0x40, 0xf9, 0x2, 0xfb, 0xcc, 0x40, 0x2a, 0xd, 0x86, 0x36, 0x30, 0x88, 0x89, 0x25, 0x80, 0x42, 0xb0, 0x5b, 0xe9, 0x73, 0x78}}
return a, nil return a, nil
} }
@ -271,7 +271,7 @@ func _1595862781_add_audio_dataUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1595862781_add_audio_data.up.sql", size: 246, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "1595862781_add_audio_data.up.sql", size: 246, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xae, 0xd2, 0xee, 0x55, 0xfb, 0x36, 0xa4, 0x92, 0x66, 0xe, 0x81, 0x62, 0x1e, 0x7a, 0x69, 0xa, 0xd5, 0x4b, 0xa5, 0x6a, 0x8d, 0x1d, 0xce, 0xf3, 0x3e, 0xc0, 0x5f, 0x9c, 0x66, 0x1b, 0xb4, 0xed}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xae, 0xd2, 0xee, 0x55, 0xfb, 0x36, 0xa4, 0x92, 0x66, 0xe, 0x81, 0x62, 0x1e, 0x7a, 0x69, 0xa, 0xd5, 0x4b, 0xa5, 0x6a, 0x8d, 0x1d, 0xce, 0xf3, 0x3e, 0xc0, 0x5f, 0x9c, 0x66, 0x1b, 0xb4, 0xed}}
return a, nil return a, nil
} }
@ -291,7 +291,7 @@ func _1595865249_create_emoji_reactions_tableUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1595865249_create_emoji_reactions_table.up.sql", size: 300, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "1595865249_create_emoji_reactions_table.up.sql", size: 300, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3e, 0xc5, 0x43, 0x5c, 0x3d, 0x53, 0x43, 0x2c, 0x1a, 0xa5, 0xb6, 0xbf, 0x7, 0x4, 0x5a, 0x3e, 0x40, 0x8b, 0xa4, 0x57, 0x12, 0x58, 0xbc, 0x42, 0xe2, 0xc3, 0xde, 0x76, 0x98, 0x80, 0xe2, 0xbe}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3e, 0xc5, 0x43, 0x5c, 0x3d, 0x53, 0x43, 0x2c, 0x1a, 0xa5, 0xb6, 0xbf, 0x7, 0x4, 0x5a, 0x3e, 0x40, 0x8b, 0xa4, 0x57, 0x12, 0x58, 0xbc, 0x42, 0xe2, 0xc3, 0xde, 0x76, 0x98, 0x80, 0xe2, 0xbe}}
return a, nil return a, nil
} }
@ -311,7 +311,7 @@ func _1596805115_create_group_chat_invitations_tableUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1596805115_create_group_chat_invitations_table.up.sql", size: 231, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "1596805115_create_group_chat_invitations_table.up.sql", size: 231, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6d, 0xb1, 0x14, 0x6d, 0x54, 0x28, 0x67, 0xc3, 0x23, 0x6a, 0xfc, 0x80, 0xdf, 0x9e, 0x4c, 0x35, 0x36, 0xf, 0xf8, 0xf3, 0x5f, 0xae, 0xad, 0xb, 0xc1, 0x51, 0x8e, 0x17, 0x7, 0xe5, 0x7f, 0x91}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6d, 0xb1, 0x14, 0x6d, 0x54, 0x28, 0x67, 0xc3, 0x23, 0x6a, 0xfc, 0x80, 0xdf, 0x9e, 0x4c, 0x35, 0x36, 0xf, 0xf8, 0xf3, 0x5f, 0xae, 0xad, 0xb, 0xc1, 0x51, 0x8e, 0x17, 0x7, 0xe5, 0x7f, 0x91}}
return a, nil return a, nil
} }
@ -331,7 +331,7 @@ func _1597322655_add_invitation_admin_chat_fieldUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1597322655_add_invitation_admin_chat_field.up.sql", size: 54, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "1597322655_add_invitation_admin_chat_field.up.sql", size: 54, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa9, 0x7a, 0xa0, 0xf2, 0xdb, 0x13, 0x91, 0x91, 0xa8, 0x34, 0x1a, 0xa1, 0x49, 0x68, 0xd5, 0xae, 0x2c, 0xd8, 0xd5, 0xea, 0x8f, 0x8c, 0xc7, 0x2, 0x4e, 0x58, 0x2c, 0x3a, 0x14, 0xd4, 0x4f, 0x2c}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa9, 0x7a, 0xa0, 0xf2, 0xdb, 0x13, 0x91, 0x91, 0xa8, 0x34, 0x1a, 0xa1, 0x49, 0x68, 0xd5, 0xae, 0x2c, 0xd8, 0xd5, 0xea, 0x8f, 0x8c, 0xc7, 0x2, 0x4e, 0x58, 0x2c, 0x3a, 0x14, 0xd4, 0x4f, 0x2c}}
return a, nil return a, nil
} }
@ -351,7 +351,7 @@ func _1597757544_add_nicknameUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1597757544_add_nickname.up.sql", size: 52, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "1597757544_add_nickname.up.sql", size: 52, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf4, 0xa2, 0x64, 0x50, 0xc5, 0x4, 0xb9, 0x8b, 0xd1, 0x18, 0x9b, 0xc3, 0x91, 0x36, 0x2a, 0x1f, 0xc3, 0x6c, 0x2d, 0x92, 0xf8, 0x5e, 0xff, 0xb1, 0x59, 0x61, 0x2, 0x1c, 0xe1, 0x85, 0x90, 0xa4}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf4, 0xa2, 0x64, 0x50, 0xc5, 0x4, 0xb9, 0x8b, 0xd1, 0x18, 0x9b, 0xc3, 0x91, 0x36, 0x2a, 0x1f, 0xc3, 0x6c, 0x2d, 0x92, 0xf8, 0x5e, 0xff, 0xb1, 0x59, 0x61, 0x2, 0x1c, 0xe1, 0x85, 0x90, 0xa4}}
return a, nil return a, nil
} }
@ -371,7 +371,7 @@ func _1598955122_add_mentionsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1598955122_add_mentions.up.sql", size: 52, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "1598955122_add_mentions.up.sql", size: 52, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8d, 0x22, 0x17, 0x92, 0xd2, 0x11, 0x4e, 0x7, 0x93, 0x9a, 0x55, 0xfd, 0xb, 0x97, 0xc4, 0x63, 0x6a, 0x81, 0x97, 0xcd, 0xb2, 0xf8, 0x4b, 0x5f, 0x3c, 0xfa, 0x3a, 0x38, 0x53, 0x10, 0xed, 0x9d}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8d, 0x22, 0x17, 0x92, 0xd2, 0x11, 0x4e, 0x7, 0x93, 0x9a, 0x55, 0xfd, 0xb, 0x97, 0xc4, 0x63, 0x6a, 0x81, 0x97, 0xcd, 0xb2, 0xf8, 0x4b, 0x5f, 0x3c, 0xfa, 0x3a, 0x38, 0x53, 0x10, 0xed, 0x9d}}
return a, nil return a, nil
} }
@ -391,7 +391,7 @@ func _1599641390_add_emoji_reactions_indexUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1599641390_add_emoji_reactions_index.up.sql", size: 126, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "1599641390_add_emoji_reactions_index.up.sql", size: 126, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf9, 0xd8, 0xdc, 0xa7, 0xb, 0x92, 0x7a, 0x61, 0x37, 0x24, 0x1c, 0x77, 0x5e, 0xe, 0x7e, 0xfc, 0x9f, 0x98, 0x7b, 0x65, 0xe7, 0xf9, 0x71, 0x57, 0x89, 0x2d, 0x90, 0x1b, 0xf6, 0x5e, 0x37, 0xe8}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf9, 0xd8, 0xdc, 0xa7, 0xb, 0x92, 0x7a, 0x61, 0x37, 0x24, 0x1c, 0x77, 0x5e, 0xe, 0x7e, 0xfc, 0x9f, 0x98, 0x7b, 0x65, 0xe7, 0xf9, 0x71, 0x57, 0x89, 0x2d, 0x90, 0x1b, 0xf6, 0x5e, 0x37, 0xe8}}
return a, nil return a, nil
} }
@ -411,7 +411,7 @@ func _1599720851_add_seen_index_remove_long_messagesUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1599720851_add_seen_index_remove_long_messages.up.sql", size: 150, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "1599720851_add_seen_index_remove_long_messages.up.sql", size: 150, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x24, 0x1c, 0xc4, 0x78, 0x91, 0xc7, 0xeb, 0xfe, 0xc8, 0xa0, 0xd8, 0x13, 0x27, 0x97, 0xc8, 0x96, 0x56, 0x97, 0x33, 0x2c, 0x1e, 0x16, 0x8a, 0xd3, 0x49, 0x99, 0x3, 0xe9, 0xbb, 0xc4, 0x5, 0x3c}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x24, 0x1c, 0xc4, 0x78, 0x91, 0xc7, 0xeb, 0xfe, 0xc8, 0xa0, 0xd8, 0x13, 0x27, 0x97, 0xc8, 0x96, 0x56, 0x97, 0x33, 0x2c, 0x1e, 0x16, 0x8a, 0xd3, 0x49, 0x99, 0x3, 0xe9, 0xbb, 0xc4, 0x5, 0x3c}}
return a, nil return a, nil
} }
@ -431,7 +431,7 @@ func _1603198582_add_profile_chat_fieldUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1603198582_add_profile_chat_field.up.sql", size: 45, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "1603198582_add_profile_chat_field.up.sql", size: 45, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xaa, 0xca, 0xe, 0x46, 0xa0, 0x9, 0x9d, 0x47, 0x57, 0xe9, 0xfb, 0x17, 0xeb, 0x9c, 0xf6, 0xb8, 0x1d, 0xe9, 0xd, 0x0, 0xd5, 0xe5, 0xd8, 0x9e, 0x60, 0xa, 0xbf, 0x32, 0x2c, 0x52, 0x7f, 0x6a}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xaa, 0xca, 0xe, 0x46, 0xa0, 0x9, 0x9d, 0x47, 0x57, 0xe9, 0xfb, 0x17, 0xeb, 0x9c, 0xf6, 0xb8, 0x1d, 0xe9, 0xd, 0x0, 0xd5, 0xe5, 0xd8, 0x9e, 0x60, 0xa, 0xbf, 0x32, 0x2c, 0x52, 0x7f, 0x6a}}
return a, nil return a, nil
} }
@ -451,7 +451,7 @@ func _1603816533_add_linksUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1603816533_add_links.up.sql", size: 48, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "1603816533_add_links.up.sql", size: 48, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc9, 0x24, 0xd6, 0x1d, 0xa, 0x83, 0x1e, 0x4d, 0xf, 0xae, 0x4d, 0x8c, 0x51, 0x32, 0xa8, 0x37, 0xb0, 0x14, 0xfb, 0x32, 0x34, 0xc8, 0xc, 0x4e, 0x5b, 0xc5, 0x15, 0x65, 0x73, 0x0, 0x0, 0x1d}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc9, 0x24, 0xd6, 0x1d, 0xa, 0x83, 0x1e, 0x4d, 0xf, 0xae, 0x4d, 0x8c, 0x51, 0x32, 0xa8, 0x37, 0xb0, 0x14, 0xfb, 0x32, 0x34, 0xc8, 0xc, 0x4e, 0x5b, 0xc5, 0x15, 0x65, 0x73, 0x0, 0x0, 0x1d}}
return a, nil return a, nil
} }
@ -471,7 +471,7 @@ func _1603888149_create_chat_identity_last_published_tableUpSql() (*asset, error
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1603888149_create_chat_identity_last_published_table.up.sql", size: 407, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "1603888149_create_chat_identity_last_published_table.up.sql", size: 407, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7f, 0x9, 0xf, 0xfb, 0xdb, 0x3c, 0x86, 0x70, 0x82, 0xda, 0x10, 0x25, 0xe2, 0x4e, 0x40, 0x45, 0xab, 0x8b, 0x1c, 0x91, 0x7c, 0xf1, 0x70, 0x2e, 0x81, 0xf3, 0x71, 0x45, 0xda, 0xe2, 0xa4, 0x57}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7f, 0x9, 0xf, 0xfb, 0xdb, 0x3c, 0x86, 0x70, 0x82, 0xda, 0x10, 0x25, 0xe2, 0x4e, 0x40, 0x45, 0xab, 0x8b, 0x1c, 0x91, 0x7c, 0xf1, 0x70, 0x2e, 0x81, 0xf3, 0x71, 0x45, 0xda, 0xe2, 0xa4, 0x57}}
return a, nil return a, nil
} }
@ -491,7 +491,7 @@ func _1605075346_add_communitiesUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1605075346_add_communities.up.sql", size: 6971, mode: os.FileMode(0644), modTime: time.Unix(1612251705, 0)} info := bindataFileInfo{name: "1605075346_add_communities.up.sql", size: 6971, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1f, 0x64, 0xea, 0xb4, 0xae, 0x9e, 0xdb, 0x9, 0x58, 0xb6, 0x5c, 0x7a, 0x50, 0xc5, 0xfe, 0x93, 0x5d, 0x36, 0x85, 0x5d, 0x6a, 0xba, 0xc9, 0x7e, 0x84, 0xd7, 0xbf, 0x2a, 0x53, 0xf3, 0x97, 0xf1}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1f, 0x64, 0xea, 0xb4, 0xae, 0x9e, 0xdb, 0x9, 0x58, 0xb6, 0x5c, 0x7a, 0x50, 0xc5, 0xfe, 0x93, 0x5d, 0x36, 0x85, 0x5d, 0x6a, 0xba, 0xc9, 0x7e, 0x84, 0xd7, 0xbf, 0x2a, 0x53, 0xf3, 0x97, 0xf1}}
return a, nil return a, nil
} }
@ -511,7 +511,7 @@ func _1610117927_add_message_cacheUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1610117927_add_message_cache.up.sql", size: 142, mode: os.FileMode(0644), modTime: time.Unix(1612251705, 0)} info := bindataFileInfo{name: "1610117927_add_message_cache.up.sql", size: 142, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x34, 0xf1, 0xf0, 0x82, 0x79, 0x28, 0x19, 0xc2, 0x39, 0x6a, 0xa5, 0x96, 0x59, 0x23, 0xa0, 0xed, 0x60, 0x58, 0x86, 0x9, 0xb9, 0xad, 0xfb, 0xa, 0xe3, 0x47, 0x6e, 0xa1, 0x18, 0xe8, 0x39, 0x2c}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x34, 0xf1, 0xf0, 0x82, 0x79, 0x28, 0x19, 0xc2, 0x39, 0x6a, 0xa5, 0x96, 0x59, 0x23, 0xa0, 0xed, 0x60, 0x58, 0x86, 0x9, 0xb9, 0xad, 0xfb, 0xa, 0xe3, 0x47, 0x6e, 0xa1, 0x18, 0xe8, 0x39, 0x2c}}
return a, nil return a, nil
} }
@ -531,7 +531,7 @@ func _1610959908_add_dont_wrap_to_raw_messagesUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1610959908_add_dont_wrap_to_raw_messages.up.sql", size: 83, mode: os.FileMode(0644), modTime: time.Unix(1614587915, 0)} info := bindataFileInfo{name: "1610959908_add_dont_wrap_to_raw_messages.up.sql", size: 83, mode: os.FileMode(0644), modTime: time.Unix(1611835527, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x71, 0x2, 0x9a, 0xca, 0xd4, 0x38, 0x44, 0x30, 0x2b, 0xa8, 0x27, 0x32, 0x63, 0x53, 0x22, 0x60, 0x59, 0x84, 0x23, 0x96, 0x77, 0xf0, 0x56, 0xd7, 0x94, 0xe0, 0x95, 0x28, 0x6, 0x1d, 0x4e, 0xb1}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x71, 0x2, 0x9a, 0xca, 0xd4, 0x38, 0x44, 0x30, 0x2b, 0xa8, 0x27, 0x32, 0x63, 0x53, 0x22, 0x60, 0x59, 0x84, 0x23, 0x96, 0x77, 0xf0, 0x56, 0xd7, 0x94, 0xe0, 0x95, 0x28, 0x6, 0x1d, 0x4e, 0xb1}}
return a, nil return a, nil
} }
@ -551,7 +551,7 @@ func _1610960912_add_send_on_personal_topicUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1610960912_add_send_on_personal_topic.up.sql", size: 82, mode: os.FileMode(0644), modTime: time.Unix(1614587915, 0)} info := bindataFileInfo{name: "1610960912_add_send_on_personal_topic.up.sql", size: 82, mode: os.FileMode(0644), modTime: time.Unix(1611835527, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x77, 0xac, 0x2f, 0xc4, 0xd, 0xa7, 0x1b, 0x37, 0x30, 0xc2, 0x68, 0xee, 0xde, 0x54, 0x5e, 0xbf, 0x3f, 0xa0, 0xd6, 0xc6, 0x9f, 0xd4, 0x34, 0x12, 0x76, 0x1e, 0x66, 0x4a, 0xfc, 0xf, 0xee, 0xc9}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x77, 0xac, 0x2f, 0xc4, 0xd, 0xa7, 0x1b, 0x37, 0x30, 0xc2, 0x68, 0xee, 0xde, 0x54, 0x5e, 0xbf, 0x3f, 0xa0, 0xd6, 0xc6, 0x9f, 0xd4, 0x34, 0x12, 0x76, 0x1e, 0x66, 0x4a, 0xfc, 0xf, 0xee, 0xc9}}
return a, nil return a, nil
} }
@ -571,7 +571,7 @@ func _1612870480_add_datasync_idUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1612870480_add_datasync_id.up.sql", size: 111, mode: os.FileMode(0644), modTime: time.Unix(1614587915, 0)} info := bindataFileInfo{name: "1612870480_add_datasync_id.up.sql", size: 111, mode: os.FileMode(0644), modTime: time.Unix(1614590948, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x34, 0x9a, 0xbc, 0xfa, 0xaa, 0x8c, 0x9c, 0x37, 0x67, 0x15, 0x9c, 0x7e, 0x78, 0x75, 0x66, 0x82, 0x18, 0x72, 0x10, 0xbc, 0xd4, 0xab, 0x44, 0xfe, 0x57, 0x85, 0x6d, 0x19, 0xf5, 0x96, 0x8a, 0xbe}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x34, 0x9a, 0xbc, 0xfa, 0xaa, 0x8c, 0x9c, 0x37, 0x67, 0x15, 0x9c, 0x7e, 0x78, 0x75, 0x66, 0x82, 0x18, 0x72, 0x10, 0xbc, 0xd4, 0xab, 0x44, 0xfe, 0x57, 0x85, 0x6d, 0x19, 0xf5, 0x96, 0x8a, 0xbe}}
return a, nil return a, nil
} }
@ -591,7 +591,7 @@ func _1614152139_add_communities_request_to_joinUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1614152139_add_communities_request_to_join.up.sql", size: 831, mode: os.FileMode(0644), modTime: time.Unix(1614587915, 0)} info := bindataFileInfo{name: "1614152139_add_communities_request_to_join.up.sql", size: 831, mode: os.FileMode(0644), modTime: time.Unix(1614590948, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x11, 0x3, 0x26, 0xf9, 0x29, 0x50, 0x4f, 0xcd, 0x46, 0xe5, 0xb1, 0x6b, 0xb9, 0x2, 0x40, 0xb1, 0xdf, 0x4a, 0x4c, 0x7a, 0xda, 0x3, 0x35, 0xcd, 0x2d, 0xcc, 0x80, 0x7d, 0x57, 0x5f, 0x3, 0x5c}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x11, 0x3, 0x26, 0xf9, 0x29, 0x50, 0x4f, 0xcd, 0x46, 0xe5, 0xb1, 0x6b, 0xb9, 0x2, 0x40, 0xb1, 0xdf, 0x4a, 0x4c, 0x7a, 0xda, 0x3, 0x35, 0xcd, 0x2d, 0xcc, 0x80, 0x7d, 0x57, 0x5f, 0x3, 0x5c}}
return a, nil return a, nil
} }
@ -611,7 +611,7 @@ func _1615374373_add_confirmationsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1615374373_add_confirmations.up.sql", size: 227, mode: os.FileMode(0644), modTime: time.Unix(1615889815, 0)} info := bindataFileInfo{name: "1615374373_add_confirmations.up.sql", size: 227, mode: os.FileMode(0644), modTime: time.Unix(1615901672, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xdd, 0xa6, 0x65, 0xc5, 0x1d, 0xb2, 0x77, 0x36, 0xe3, 0x79, 0xda, 0xe8, 0x7a, 0xa4, 0xdf, 0x45, 0xae, 0xd8, 0xb4, 0xba, 0x90, 0xfd, 0x74, 0x71, 0x14, 0x75, 0x73, 0x72, 0xb9, 0x9e, 0x1, 0x81}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xdd, 0xa6, 0x65, 0xc5, 0x1d, 0xb2, 0x77, 0x36, 0xe3, 0x79, 0xda, 0xe8, 0x7a, 0xa4, 0xdf, 0x45, 0xae, 0xd8, 0xb4, 0xba, 0x90, 0xfd, 0x74, 0x71, 0x14, 0x75, 0x73, 0x72, 0xb9, 0x9e, 0x1, 0x81}}
return a, nil return a, nil
} }
@ -631,7 +631,7 @@ func readmeMd() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "README.md", size: 554, mode: os.FileMode(0644), modTime: time.Unix(1612251705, 0)} info := bindataFileInfo{name: "README.md", size: 554, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1c, 0x6e, 0xfb, 0xcc, 0x81, 0x94, 0x4d, 0x8c, 0xa0, 0x3b, 0x5, 0xb0, 0x18, 0xd6, 0xbb, 0xb3, 0x79, 0xc8, 0x8f, 0xff, 0xc1, 0x10, 0xf9, 0xf, 0x20, 0x1b, 0x4a, 0x74, 0x96, 0x42, 0xd7, 0xa8}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1c, 0x6e, 0xfb, 0xcc, 0x81, 0x94, 0x4d, 0x8c, 0xa0, 0x3b, 0x5, 0xb0, 0x18, 0xd6, 0xbb, 0xb3, 0x79, 0xc8, 0x8f, 0xff, 0xc1, 0x10, 0xf9, 0xf, 0x20, 0x1b, 0x4a, 0x74, 0x96, 0x42, 0xd7, 0xa8}}
return a, nil return a, nil
} }
@ -651,7 +651,7 @@ func docGo() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "doc.go", size: 850, mode: os.FileMode(0644), modTime: time.Unix(1611588719, 0)} info := bindataFileInfo{name: "doc.go", size: 850, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa0, 0xcc, 0x41, 0xe1, 0x61, 0x12, 0x97, 0xe, 0x36, 0x8c, 0xa7, 0x9e, 0xe0, 0x6e, 0x59, 0x9e, 0xee, 0xd5, 0x4a, 0xcf, 0x1e, 0x60, 0xd6, 0xc3, 0x3a, 0xc9, 0x6c, 0xf2, 0x86, 0x5a, 0xb4, 0x1e}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa0, 0xcc, 0x41, 0xe1, 0x61, 0x12, 0x97, 0xe, 0x36, 0x8c, 0xa7, 0x9e, 0xe0, 0x6e, 0x59, 0x9e, 0xee, 0xd5, 0x4a, 0xcf, 0x1e, 0x60, 0xd6, 0xc3, 0x3a, 0xc9, 0x6c, 0xf2, 0x86, 0x5a, 0xb4, 0x1e}}
return a, nil return a, nil
} }

View File

@ -90,7 +90,7 @@ func _1593601729_initial_schemaDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1593601729_initial_schema.down.sql", size: 144, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "1593601729_initial_schema.down.sql", size: 144, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa, 0x95, 0x55, 0x64, 0x38, 0x40, 0x16, 0xbf, 0x8b, 0x1c, 0x18, 0xb4, 0xc5, 0x7f, 0xd0, 0xb8, 0xf0, 0x3c, 0xa2, 0x82, 0xf8, 0x8d, 0x5a, 0xd3, 0xb6, 0x6e, 0xa3, 0xb4, 0xc, 0x9, 0x33, 0x0}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa, 0x95, 0x55, 0x64, 0x38, 0x40, 0x16, 0xbf, 0x8b, 0x1c, 0x18, 0xb4, 0xc5, 0x7f, 0xd0, 0xb8, 0xf0, 0x3c, 0xa2, 0x82, 0xf8, 0x8d, 0x5a, 0xd3, 0xb6, 0x6e, 0xa3, 0xb4, 0xc, 0x9, 0x33, 0x0}}
return a, nil return a, nil
} }
@ -110,7 +110,7 @@ func _1593601729_initial_schemaUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1593601729_initial_schema.up.sql", size: 1773, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "1593601729_initial_schema.up.sql", size: 1773, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4e, 0x1e, 0x5, 0x35, 0x9, 0xb2, 0x2d, 0x6f, 0x33, 0x63, 0xa2, 0x7a, 0x5b, 0xd2, 0x2d, 0xcb, 0x79, 0x7e, 0x6, 0xb4, 0x9d, 0x35, 0xd8, 0x9b, 0x55, 0xe5, 0xf8, 0x44, 0xca, 0xa6, 0xf3, 0xd3}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4e, 0x1e, 0x5, 0x35, 0x9, 0xb2, 0x2d, 0x6f, 0x33, 0x63, 0xa2, 0x7a, 0x5b, 0xd2, 0x2d, 0xcb, 0x79, 0x7e, 0x6, 0xb4, 0x9d, 0x35, 0xd8, 0x9b, 0x55, 0xe5, 0xf8, 0x44, 0xca, 0xa6, 0xf3, 0xd3}}
return a, nil return a, nil
} }
@ -130,7 +130,7 @@ func _1597909626_add_server_typeDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1597909626_add_server_type.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "1597909626_add_server_type.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}
return a, nil return a, nil
} }
@ -150,7 +150,7 @@ func _1597909626_add_server_typeUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1597909626_add_server_type.up.sql", size: 145, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "1597909626_add_server_type.up.sql", size: 145, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc8, 0x3f, 0xe0, 0xe7, 0x57, 0x0, 0x5d, 0x60, 0xf3, 0x55, 0x64, 0x71, 0x80, 0x3c, 0xca, 0x8, 0x61, 0xb5, 0x3c, 0xe, 0xa1, 0xe4, 0x61, 0xd1, 0x4e, 0xd8, 0xb2, 0x55, 0xdd, 0x87, 0x62, 0x9b}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc8, 0x3f, 0xe0, 0xe7, 0x57, 0x0, 0x5d, 0x60, 0xf3, 0x55, 0x64, 0x71, 0x80, 0x3c, 0xca, 0x8, 0x61, 0xb5, 0x3c, 0xe, 0xa1, 0xe4, 0x61, 0xd1, 0x4e, 0xd8, 0xb2, 0x55, 0xdd, 0x87, 0x62, 0x9b}}
return a, nil return a, nil
} }
@ -170,7 +170,7 @@ func _1599053776_add_chat_id_and_typeDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1599053776_add_chat_id_and_type.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "1599053776_add_chat_id_and_type.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}
return a, nil return a, nil
} }
@ -190,7 +190,7 @@ func _1599053776_add_chat_id_and_typeUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1599053776_add_chat_id_and_type.up.sql", size: 264, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "1599053776_add_chat_id_and_type.up.sql", size: 264, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xea, 0x7a, 0xf9, 0xc4, 0xa2, 0x96, 0x2e, 0xf9, 0x8f, 0x7, 0xf1, 0x1e, 0x73, 0x8a, 0xa6, 0x3a, 0x13, 0x4, 0x73, 0x82, 0x83, 0xb, 0xe3, 0xb5, 0x3b, 0x7e, 0xd, 0x23, 0xce, 0x98, 0xd4, 0xdc}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xea, 0x7a, 0xf9, 0xc4, 0xa2, 0x96, 0x2e, 0xf9, 0x8f, 0x7, 0xf1, 0x1e, 0x73, 0x8a, 0xa6, 0x3a, 0x13, 0x4, 0x73, 0x82, 0x83, 0xb, 0xe3, 0xb5, 0x3b, 0x7e, 0xd, 0x23, 0xce, 0x98, 0xd4, 0xdc}}
return a, nil return a, nil
} }
@ -210,7 +210,7 @@ func docGo() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "doc.go", size: 382, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "doc.go", size: 382, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc0, 0x2f, 0x1e, 0x64, 0x9, 0x93, 0xe4, 0x8b, 0xf2, 0x98, 0x5a, 0x45, 0xe2, 0x80, 0x88, 0x67, 0x7a, 0x2d, 0xd7, 0x4b, 0xd1, 0x73, 0xb6, 0x6d, 0x15, 0xc2, 0x0, 0x34, 0xcd, 0xa0, 0xdb, 0x20}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc0, 0x2f, 0x1e, 0x64, 0x9, 0x93, 0xe4, 0x8b, 0xf2, 0x98, 0x5a, 0x45, 0xe2, 0x80, 0x88, 0x67, 0x7a, 0x2d, 0xd7, 0x4b, 0xd1, 0x73, 0xb6, 0x6d, 0x15, 0xc2, 0x0, 0x34, 0xcd, 0xa0, 0xdb, 0x20}}
return a, nil return a, nil
} }

View File

@ -88,7 +88,7 @@ func _1593601728_initial_schemaDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1593601728_initial_schema.down.sql", size: 200, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "1593601728_initial_schema.down.sql", size: 200, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x88, 0x8a, 0x61, 0x81, 0x57, 0x45, 0x9b, 0x97, 0x9b, 0x1f, 0xf6, 0x94, 0x8a, 0x20, 0xb3, 0x2b, 0xff, 0x69, 0x49, 0xf4, 0x58, 0xcc, 0xd0, 0x55, 0xcc, 0x9a, 0x8b, 0xb6, 0x7f, 0x29, 0x53, 0xc1}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x88, 0x8a, 0x61, 0x81, 0x57, 0x45, 0x9b, 0x97, 0x9b, 0x1f, 0xf6, 0x94, 0x8a, 0x20, 0xb3, 0x2b, 0xff, 0x69, 0x49, 0xf4, 0x58, 0xcc, 0xd0, 0x55, 0xcc, 0x9a, 0x8b, 0xb6, 0x7f, 0x29, 0x53, 0xc1}}
return a, nil return a, nil
} }
@ -108,7 +108,7 @@ func _1593601728_initial_schemaUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1593601728_initial_schema.up.sql", size: 675, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "1593601728_initial_schema.up.sql", size: 675, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xfd, 0x61, 0x90, 0x79, 0xd9, 0x14, 0x65, 0xe9, 0x96, 0x53, 0x17, 0x33, 0x54, 0xeb, 0x8b, 0x5d, 0x95, 0x99, 0x10, 0x36, 0x58, 0xdd, 0xb2, 0xbf, 0x45, 0xd9, 0xbb, 0xc4, 0x92, 0xe, 0xce, 0x2}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xfd, 0x61, 0x90, 0x79, 0xd9, 0x14, 0x65, 0xe9, 0x96, 0x53, 0x17, 0x33, 0x54, 0xeb, 0x8b, 0x5d, 0x95, 0x99, 0x10, 0x36, 0x58, 0xdd, 0xb2, 0xbf, 0x45, 0xd9, 0xbb, 0xc4, 0x92, 0xe, 0xce, 0x2}}
return a, nil return a, nil
} }
@ -128,7 +128,7 @@ func _1598419937_add_push_notifications_tableDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1598419937_add_push_notifications_table.down.sql", size: 51, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "1598419937_add_push_notifications_table.down.sql", size: 51, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc, 0x98, 0xc8, 0x30, 0x45, 0x5b, 0xc5, 0x7d, 0x13, 0x5d, 0xe7, 0xc8, 0x23, 0x43, 0xf7, 0xdc, 0x9c, 0xe2, 0xdd, 0x63, 0xf0, 0xb7, 0x16, 0x40, 0xc, 0xda, 0xb9, 0x16, 0x70, 0x2b, 0x5a, 0x7e}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc, 0x98, 0xc8, 0x30, 0x45, 0x5b, 0xc5, 0x7d, 0x13, 0x5d, 0xe7, 0xc8, 0x23, 0x43, 0xf7, 0xdc, 0x9c, 0xe2, 0xdd, 0x63, 0xf0, 0xb7, 0x16, 0x40, 0xc, 0xda, 0xb9, 0x16, 0x70, 0x2b, 0x5a, 0x7e}}
return a, nil return a, nil
} }
@ -148,7 +148,7 @@ func _1598419937_add_push_notifications_tableUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1598419937_add_push_notifications_table.up.sql", size: 104, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "1598419937_add_push_notifications_table.up.sql", size: 104, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x2, 0x3e, 0xef, 0xf, 0xc2, 0xdf, 0xbc, 0x99, 0x7a, 0xc2, 0xd3, 0x64, 0x4f, 0x4c, 0x7e, 0xfc, 0x2e, 0x8c, 0xa7, 0x54, 0xd3, 0x4d, 0x25, 0x98, 0x41, 0xbc, 0xea, 0xd7, 0x2, 0xc1, 0xd0, 0x52}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x2, 0x3e, 0xef, 0xf, 0xc2, 0xdf, 0xbc, 0x99, 0x7a, 0xc2, 0xd3, 0x64, 0x4f, 0x4c, 0x7e, 0xfc, 0x2e, 0x8c, 0xa7, 0x54, 0xd3, 0x4d, 0x25, 0x98, 0x41, 0xbc, 0xea, 0xd7, 0x2, 0xc1, 0xd0, 0x52}}
return a, nil return a, nil
} }
@ -168,7 +168,7 @@ func docGo() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "doc.go", size: 382, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "doc.go", size: 382, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc0, 0x2f, 0x1e, 0x64, 0x9, 0x93, 0xe4, 0x8b, 0xf2, 0x98, 0x5a, 0x45, 0xe2, 0x80, 0x88, 0x67, 0x7a, 0x2d, 0xd7, 0x4b, 0xd1, 0x73, 0xb6, 0x6d, 0x15, 0xc2, 0x0, 0x34, 0xcd, 0xa0, 0xdb, 0x20}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc0, 0x2f, 0x1e, 0x64, 0x9, 0x93, 0xe4, 0x8b, 0xf2, 0x98, 0x5a, 0x45, 0xe2, 0x80, 0x88, 0x67, 0x7a, 0x2d, 0xd7, 0x4b, 0xd1, 0x73, 0xb6, 0x6d, 0x15, 0xc2, 0x0, 0x34, 0xcd, 0xa0, 0xdb, 0x20}}
return a, nil return a, nil
} }

View File

@ -86,7 +86,7 @@ func _1561059284_add_waku_keysDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1561059284_add_waku_keys.down.sql", size: 22, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "1561059284_add_waku_keys.down.sql", size: 22, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe5, 0x2a, 0x7e, 0x9, 0xa3, 0xdd, 0xc6, 0x3, 0xfa, 0xaa, 0x98, 0xa0, 0x26, 0x5e, 0x67, 0x43, 0xe6, 0x20, 0xfd, 0x10, 0xfd, 0x60, 0x89, 0x17, 0x13, 0x87, 0x1b, 0x44, 0x36, 0x79, 0xb6, 0x60}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe5, 0x2a, 0x7e, 0x9, 0xa3, 0xdd, 0xc6, 0x3, 0xfa, 0xaa, 0x98, 0xa0, 0x26, 0x5e, 0x67, 0x43, 0xe6, 0x20, 0xfd, 0x10, 0xfd, 0x60, 0x89, 0x17, 0x13, 0x87, 0x1b, 0x44, 0x36, 0x79, 0xb6, 0x60}}
return a, nil return a, nil
} }
@ -106,7 +106,7 @@ func _1561059284_add_waku_keysUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1561059284_add_waku_keys.up.sql", size: 109, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "1561059284_add_waku_keys.up.sql", size: 109, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa9, 0x5c, 0x8, 0x32, 0xef, 0x12, 0x88, 0x21, 0xd, 0x7a, 0x42, 0x4d, 0xe7, 0x2d, 0x6c, 0x99, 0xb6, 0x1, 0xf1, 0xba, 0x2c, 0x40, 0x8d, 0xa9, 0x4b, 0xe6, 0xc4, 0x21, 0xec, 0x47, 0x6b, 0xf7}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa9, 0x5c, 0x8, 0x32, 0xef, 0x12, 0x88, 0x21, 0xd, 0x7a, 0x42, 0x4d, 0xe7, 0x2d, 0x6c, 0x99, 0xb6, 0x1, 0xf1, 0xba, 0x2c, 0x40, 0x8d, 0xa9, 0x4b, 0xe6, 0xc4, 0x21, 0xec, 0x47, 0x6b, 0xf7}}
return a, nil return a, nil
} }
@ -126,7 +126,7 @@ func docGo() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "doc.go", size: 373, mode: os.FileMode(0644), modTime: time.Unix(1603694101, 0)} info := bindataFileInfo{name: "doc.go", size: 373, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x23, 0x6a, 0xc1, 0xce, 0x94, 0xf6, 0xef, 0xf1, 0x97, 0x95, 0xb, 0x35, 0xaf, 0x5f, 0xe7, 0x5f, 0xac, 0x6e, 0xb8, 0xab, 0xba, 0xb5, 0x35, 0x97, 0x22, 0x36, 0x11, 0xce, 0x44, 0xfc, 0xfa, 0xac}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x23, 0x6a, 0xc1, 0xce, 0x94, 0xf6, 0xef, 0xf1, 0x97, 0x95, 0xb, 0x35, 0xaf, 0x5f, 0xe7, 0x5f, 0xac, 0x6e, 0xb8, 0xab, 0xba, 0xb5, 0x35, 0x97, 0x22, 0x36, 0x11, 0xce, 0x44, 0xfc, 0xfa, 0xac}}
return a, nil return a, nil
} }

View File

@ -86,7 +86,7 @@ func _1561059285_add_whisper_keysDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1561059285_add_whisper_keys.down.sql", size: 25, mode: os.FileMode(0644), modTime: time.Unix(1610115164, 0)} info := bindataFileInfo{name: "1561059285_add_whisper_keys.down.sql", size: 25, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb9, 0x31, 0x3f, 0xce, 0xfa, 0x44, 0x36, 0x1b, 0xb0, 0xec, 0x5d, 0xb, 0x90, 0xb, 0x21, 0x4f, 0xd5, 0xe5, 0x50, 0xed, 0xc7, 0x43, 0xdf, 0x83, 0xb4, 0x3a, 0xc1, 0x55, 0x2e, 0x53, 0x7c, 0x67}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb9, 0x31, 0x3f, 0xce, 0xfa, 0x44, 0x36, 0x1b, 0xb0, 0xec, 0x5d, 0xb, 0x90, 0xb, 0x21, 0x4f, 0xd5, 0xe5, 0x50, 0xed, 0xc7, 0x43, 0xdf, 0x83, 0xb4, 0x3a, 0xc1, 0x55, 0x2e, 0x53, 0x7c, 0x67}}
return a, nil return a, nil
} }
@ -106,7 +106,7 @@ func _1561059285_add_whisper_keysUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1561059285_add_whisper_keys.up.sql", size: 112, mode: os.FileMode(0644), modTime: time.Unix(1610115164, 0)} info := bindataFileInfo{name: "1561059285_add_whisper_keys.up.sql", size: 112, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x25, 0x41, 0xc, 0x92, 0xdd, 0x9e, 0xff, 0x5d, 0xd0, 0x93, 0xe4, 0x24, 0x50, 0x29, 0xcf, 0xc6, 0xf7, 0x49, 0x3c, 0x73, 0xd9, 0x8c, 0xfa, 0xf2, 0xcf, 0xf6, 0x6f, 0xbc, 0x31, 0xe6, 0xf7, 0xe2}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x25, 0x41, 0xc, 0x92, 0xdd, 0x9e, 0xff, 0x5d, 0xd0, 0x93, 0xe4, 0x24, 0x50, 0x29, 0xcf, 0xc6, 0xf7, 0x49, 0x3c, 0x73, 0xd9, 0x8c, 0xfa, 0xf2, 0xcf, 0xf6, 0x6f, 0xbc, 0x31, 0xe6, 0xf7, 0xe2}}
return a, nil return a, nil
} }
@ -126,7 +126,7 @@ func docGo() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "doc.go", size: 373, mode: os.FileMode(0644), modTime: time.Unix(1610115164, 0)} info := bindataFileInfo{name: "doc.go", size: 373, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x23, 0x6a, 0xc1, 0xce, 0x94, 0xf6, 0xef, 0xf1, 0x97, 0x95, 0xb, 0x35, 0xaf, 0x5f, 0xe7, 0x5f, 0xac, 0x6e, 0xb8, 0xab, 0xba, 0xb5, 0x35, 0x97, 0x22, 0x36, 0x11, 0xce, 0x44, 0xfc, 0xfa, 0xac}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x23, 0x6a, 0xc1, 0xce, 0x94, 0xf6, 0xef, 0xf1, 0x97, 0x95, 0xb, 0x35, 0xaf, 0x5f, 0xe7, 0x5f, 0xac, 0x6e, 0xb8, 0xab, 0xba, 0xb5, 0x35, 0x97, 0x22, 0x36, 0x11, 0xce, 0x44, 0xfc, 0xfa, 0xac}}
return a, nil return a, nil
} }

View File

@ -0,0 +1,31 @@
package appmetrics
import (
"context"
"github.com/ethereum/go-ethereum/log"
"github.com/status-im/status-go/appmetrics"
)
func NewAPI(db *appmetrics.Database) *API {
return &API{db: db}
}
type API struct {
db *appmetrics.Database
}
func (api *API) ValidateAppMetrics(ctx context.Context, appMetrics []appmetrics.AppMetric) error {
log.Debug("[AppMetricsAPI::ValidateAppMetrics]")
return api.db.ValidateAppMetrics(appMetrics)
}
func (api *API) SaveAppMetrics(ctx context.Context, appMetrics []appmetrics.AppMetric) error {
log.Debug("[AppMetricsAPI::SaveAppMetrics]")
return api.db.SaveAppMetrics(appMetrics)
}
func (api *API) GetAppMetrics(ctx context.Context, limit int, offset int) ([]appmetrics.AppMetric, error) {
log.Debug("[AppMetricsAPI::GetAppMetrics]")
return api.db.GetAppMetrics(limit, offset)
}

View File

@ -0,0 +1,49 @@
package appmetrics
import (
"context"
"encoding/json"
"io/ioutil"
"os"
"testing"
"github.com/status-im/status-go/appdatabase"
"github.com/status-im/status-go/appmetrics"
"github.com/stretchr/testify/require"
)
func setupTestDB(t *testing.T) (*appmetrics.Database, func()) {
tmpfile, err := ioutil.TempFile("", "appmetrics-service")
require.NoError(t, err)
db, err := appdatabase.InitializeDB(tmpfile.Name(), "appmetrics-tests")
require.NoError(t, err)
return appmetrics.NewDB(db), func() {
require.NoError(t, db.Close())
require.NoError(t, os.Remove(tmpfile.Name()))
}
}
func TestValidateAppMetrics(t *testing.T) {
db, close := setupTestDB(t)
defer close()
api := NewAPI(db)
validMetrics := []appmetrics.AppMetric{appmetrics.AppMetric{
Event: "navigation/navigate-to",
Value: json.RawMessage(`{"view_id": "some-view-oid", "params": {"screen": "allowed-screen-name"}}`),
AppVersion: "1.12",
OS: "android"}}
invalidMetrics := []appmetrics.AppMetric{appmetrics.AppMetric{
Event: "navigation/navigate-to",
Value: json.RawMessage("{}"),
AppVersion: "1.12",
OS: "android"}}
err := api.ValidateAppMetrics(context.Background(), validMetrics)
require.NoError(t, err)
err = api.ValidateAppMetrics(context.Background(), invalidMetrics)
require.Error(t, err)
}

View File

@ -0,0 +1,39 @@
package appmetrics
import (
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/rpc"
"github.com/status-im/status-go/appmetrics"
)
func NewService(db *appmetrics.Database) *Service {
return &Service{db: db}
}
type Service struct {
db *appmetrics.Database
}
func (s *Service) Start(*p2p.Server) error {
return nil
}
func (s *Service) Stop() error {
return nil
}
func (s *Service) APIs() []rpc.API {
return []rpc.API{
{
Namespace: "appmetrics",
Version: "0.1.0",
Service: NewAPI(s.db),
Public: true,
},
}
}
func (s *Service) Protocols() []p2p.Protocol {
return nil
}

View File

@ -97,7 +97,7 @@ func ConfigReadmeMd() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "../config/README.md", size: 3153, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "../config/README.md", size: 3153, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x84, 0xf1, 0x44, 0x5b, 0x33, 0xe5, 0xfa, 0xe6, 0x38, 0x77, 0x19, 0x91, 0x6e, 0xd8, 0x8e, 0x8d, 0xaa, 0x32, 0xb2, 0x8b, 0xf9, 0x4f, 0x3, 0xe, 0xc0, 0xca, 0x5e, 0x5d, 0xec, 0x37, 0x1d, 0xc3}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x84, 0xf1, 0x44, 0x5b, 0x33, 0xe5, 0xfa, 0xe6, 0x38, 0x77, 0x19, 0x91, 0x6e, 0xd8, 0x8e, 0x8d, 0xaa, 0x32, 0xb2, 0x8b, 0xf9, 0x4f, 0x3, 0xe, 0xc0, 0xca, 0x5e, 0x5d, 0xec, 0x37, 0x1d, 0xc3}}
return a, nil return a, nil
} }
@ -117,7 +117,7 @@ func ConfigCliFleetEthProdJson() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "../config/cli/fleet-eth.prod.json", size: 4925, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "../config/cli/fleet-eth.prod.json", size: 4925, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x97, 0x5a, 0x72, 0x64, 0x6b, 0xee, 0xe6, 0x97, 0xda, 0xe8, 0x53, 0x7c, 0x33, 0x25, 0x27, 0x55, 0xa8, 0xe0, 0x9a, 0xc2, 0x16, 0xcf, 0xc0, 0x91, 0x8a, 0xbc, 0x98, 0x5, 0xe5, 0x63, 0x83, 0x77}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x97, 0x5a, 0x72, 0x64, 0x6b, 0xee, 0xe6, 0x97, 0xda, 0xe8, 0x53, 0x7c, 0x33, 0x25, 0x27, 0x55, 0xa8, 0xe0, 0x9a, 0xc2, 0x16, 0xcf, 0xc0, 0x91, 0x8a, 0xbc, 0x98, 0x5, 0xe5, 0x63, 0x83, 0x77}}
return a, nil return a, nil
} }
@ -137,7 +137,7 @@ func ConfigCliFleetEthStagingJson() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "../config/cli/fleet-eth.staging.json", size: 2307, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "../config/cli/fleet-eth.staging.json", size: 2307, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa, 0x55, 0x35, 0x5e, 0xf5, 0x90, 0x37, 0x0, 0xe4, 0x45, 0x8c, 0x19, 0x77, 0x14, 0xa0, 0x94, 0x1c, 0x9b, 0x78, 0xa7, 0x2e, 0x58, 0x45, 0x1b, 0xba, 0xf3, 0xfb, 0x62, 0x87, 0x97, 0xf8, 0x96}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa, 0x55, 0x35, 0x5e, 0xf5, 0x90, 0x37, 0x0, 0xe4, 0x45, 0x8c, 0x19, 0x77, 0x14, 0xa0, 0x94, 0x1c, 0x9b, 0x78, 0xa7, 0x2e, 0x58, 0x45, 0x1b, 0xba, 0xf3, 0xfb, 0x62, 0x87, 0x97, 0xf8, 0x96}}
return a, nil return a, nil
} }
@ -157,7 +157,7 @@ func ConfigCliFleetEthTestJson() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "../config/cli/fleet-eth.test.json", size: 2315, mode: os.FileMode(0644), modTime: time.Unix(1611588835, 0)} info := bindataFileInfo{name: "../config/cli/fleet-eth.test.json", size: 2315, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7, 0x92, 0x14, 0x26, 0xa6, 0x12, 0xb1, 0x0, 0x2d, 0xbe, 0x9a, 0xb7, 0x55, 0xdb, 0xfc, 0x1f, 0x75, 0xe, 0xc2, 0x2b, 0xad, 0xfe, 0x44, 0x78, 0x2e, 0x7f, 0x8, 0x7f, 0xd1, 0x9e, 0x11, 0x2c}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7, 0x92, 0x14, 0x26, 0xa6, 0x12, 0xb1, 0x0, 0x2d, 0xbe, 0x9a, 0xb7, 0x55, 0xdb, 0xfc, 0x1f, 0x75, 0xe, 0xc2, 0x2b, 0xad, 0xfe, 0x44, 0x78, 0x2e, 0x7f, 0x8, 0x7f, 0xd1, 0x9e, 0x11, 0x2c}}
return a, nil return a, nil
} }
@ -177,7 +177,7 @@ func ConfigCliLesEnabledJson() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "../config/cli/les-enabled.json", size: 58, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "../config/cli/les-enabled.json", size: 58, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7e, 0xee, 0x27, 0xa7, 0x74, 0xa0, 0x46, 0xa1, 0x41, 0xed, 0x4d, 0x16, 0x5b, 0xf3, 0xf0, 0x7c, 0xc8, 0x2f, 0x6f, 0x47, 0xa4, 0xbb, 0x5f, 0x43, 0x33, 0xd, 0x9, 0x9d, 0xea, 0x9e, 0x15, 0xee}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7e, 0xee, 0x27, 0xa7, 0x74, 0xa0, 0x46, 0xa1, 0x41, 0xed, 0x4d, 0x16, 0x5b, 0xf3, 0xf0, 0x7c, 0xc8, 0x2f, 0x6f, 0x47, 0xa4, 0xbb, 0x5f, 0x43, 0x33, 0xd, 0x9, 0x9d, 0xea, 0x9e, 0x15, 0xee}}
return a, nil return a, nil
} }
@ -197,7 +197,7 @@ func ConfigCliMailserverEnabledJson() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "../config/cli/mailserver-enabled.json", size: 176, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "../config/cli/mailserver-enabled.json", size: 176, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x34, 0xec, 0x81, 0x8b, 0x99, 0xb6, 0xdb, 0xc0, 0x8b, 0x46, 0x97, 0x96, 0xc7, 0x58, 0x30, 0x33, 0xef, 0x54, 0x25, 0x87, 0x7b, 0xb9, 0x94, 0x6b, 0x18, 0xa4, 0x5b, 0x58, 0x67, 0x7c, 0x44, 0xa6}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x34, 0xec, 0x81, 0x8b, 0x99, 0xb6, 0xdb, 0xc0, 0x8b, 0x46, 0x97, 0x96, 0xc7, 0x58, 0x30, 0x33, 0xef, 0x54, 0x25, 0x87, 0x7b, 0xb9, 0x94, 0x6b, 0x18, 0xa4, 0x5b, 0x58, 0x67, 0x7c, 0x44, 0xa6}}
return a, nil return a, nil
} }
@ -217,7 +217,7 @@ func ConfigStatusChainGenesisJson() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "../config/status-chain-genesis.json", size: 612, mode: os.FileMode(0644), modTime: time.Unix(1599559876, 0)} info := bindataFileInfo{name: "../config/status-chain-genesis.json", size: 612, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb, 0xf0, 0xc, 0x1, 0x95, 0x65, 0x6, 0x55, 0x48, 0x8f, 0x83, 0xa0, 0xb4, 0x81, 0xda, 0xad, 0x30, 0x6d, 0xb2, 0x78, 0x1b, 0x26, 0x4, 0x13, 0x12, 0x9, 0x6, 0xae, 0x3a, 0x2c, 0x1, 0x71}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb, 0xf0, 0xc, 0x1, 0x95, 0x65, 0x6, 0x55, 0x48, 0x8f, 0x83, 0xa0, 0xb4, 0x81, 0xda, 0xad, 0x30, 0x6d, 0xb2, 0x78, 0x1b, 0x26, 0x4, 0x13, 0x12, 0x9, 0x6, 0xae, 0x3a, 0x2c, 0x1, 0x71}}
return a, nil return a, nil
} }
@ -237,7 +237,7 @@ func keysBootnodeKey() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "keys/bootnode.key", size: 65, mode: os.FileMode(0644), modTime: time.Unix(1599559877, 0)} info := bindataFileInfo{name: "keys/bootnode.key", size: 65, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x31, 0xcf, 0x27, 0xd4, 0x96, 0x2e, 0x32, 0xcd, 0x58, 0x96, 0x2a, 0xe5, 0x8c, 0xa0, 0xf1, 0x73, 0x1f, 0xd6, 0xd6, 0x8b, 0xb, 0x73, 0xd3, 0x2c, 0x84, 0x1a, 0x56, 0xa4, 0x74, 0xb6, 0x95, 0x20}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x31, 0xcf, 0x27, 0xd4, 0x96, 0x2e, 0x32, 0xcd, 0x58, 0x96, 0x2a, 0xe5, 0x8c, 0xa0, 0xf1, 0x73, 0x1f, 0xd6, 0xd6, 0x8b, 0xb, 0x73, 0xd3, 0x2c, 0x84, 0x1a, 0x56, 0xa4, 0x74, 0xb6, 0x95, 0x20}}
return a, nil return a, nil
} }
@ -257,7 +257,7 @@ func keysFirebaseauthkey() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "keys/firebaseauthkey", size: 153, mode: os.FileMode(0644), modTime: time.Unix(1536843582, 0)} info := bindataFileInfo{name: "keys/firebaseauthkey", size: 153, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe, 0x69, 0x23, 0x64, 0x7d, 0xf9, 0x14, 0x37, 0x6f, 0x2b, 0x1, 0xf0, 0xb0, 0xa4, 0xb2, 0xd0, 0x18, 0xcd, 0xf9, 0xeb, 0x57, 0xa3, 0xfd, 0x79, 0x25, 0xa7, 0x9c, 0x3, 0xce, 0x26, 0xec, 0xe1}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe, 0x69, 0x23, 0x64, 0x7d, 0xf9, 0x14, 0x37, 0x6f, 0x2b, 0x1, 0xf0, 0xb0, 0xa4, 0xb2, 0xd0, 0x18, 0xcd, 0xf9, 0xeb, 0x57, 0xa3, 0xfd, 0x79, 0x25, 0xa7, 0x9c, 0x3, 0xce, 0x26, 0xec, 0xe1}}
return a, nil return a, nil
} }
@ -277,7 +277,7 @@ func keysTestAccount1StatusChainPk() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "keys/test-account1-status-chain.pk", size: 489, mode: os.FileMode(0644), modTime: time.Unix(1599559877, 0)} info := bindataFileInfo{name: "keys/test-account1-status-chain.pk", size: 489, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8f, 0xba, 0x35, 0x1, 0x2b, 0x9d, 0xad, 0xf0, 0x2d, 0x3c, 0x4d, 0x6, 0xb5, 0x22, 0x2, 0x47, 0xd4, 0x1c, 0xf4, 0x31, 0x2f, 0xb, 0x5b, 0x27, 0x5d, 0x43, 0x97, 0x58, 0x2d, 0xf0, 0xe1, 0xbe}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8f, 0xba, 0x35, 0x1, 0x2b, 0x9d, 0xad, 0xf0, 0x2d, 0x3c, 0x4d, 0x6, 0xb5, 0x22, 0x2, 0x47, 0xd4, 0x1c, 0xf4, 0x31, 0x2f, 0xb, 0x5b, 0x27, 0x5d, 0x43, 0x97, 0x58, 0x2d, 0xf0, 0xe1, 0xbe}}
return a, nil return a, nil
} }
@ -297,7 +297,7 @@ func keysTestAccount1Pk() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "keys/test-account1.pk", size: 491, mode: os.FileMode(0644), modTime: time.Unix(1599559877, 0)} info := bindataFileInfo{name: "keys/test-account1.pk", size: 491, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9, 0x43, 0xc2, 0xf4, 0x8c, 0xc6, 0x64, 0x25, 0x8c, 0x7, 0x8c, 0xa8, 0x89, 0x2b, 0x7b, 0x9b, 0x4f, 0x81, 0xcb, 0xce, 0x3d, 0xef, 0x82, 0x9c, 0x27, 0x27, 0xa9, 0xc5, 0x46, 0x70, 0x30, 0x38}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9, 0x43, 0xc2, 0xf4, 0x8c, 0xc6, 0x64, 0x25, 0x8c, 0x7, 0x8c, 0xa8, 0x89, 0x2b, 0x7b, 0x9b, 0x4f, 0x81, 0xcb, 0xce, 0x3d, 0xef, 0x82, 0x9c, 0x27, 0x27, 0xa9, 0xc5, 0x46, 0x70, 0x30, 0x38}}
return a, nil return a, nil
} }
@ -317,7 +317,7 @@ func keysTestAccount2StatusChainPk() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "keys/test-account2-status-chain.pk", size: 489, mode: os.FileMode(0644), modTime: time.Unix(1599559877, 0)} info := bindataFileInfo{name: "keys/test-account2-status-chain.pk", size: 489, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9, 0xf8, 0x5c, 0xe9, 0x92, 0x96, 0x2d, 0x88, 0x2b, 0x8e, 0x42, 0x3f, 0xa4, 0x93, 0x6c, 0xad, 0xe9, 0xc0, 0x1b, 0x8a, 0x8, 0x8c, 0x5e, 0x7a, 0x84, 0xa2, 0xf, 0x9f, 0x77, 0x58, 0x2c, 0x2c}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9, 0xf8, 0x5c, 0xe9, 0x92, 0x96, 0x2d, 0x88, 0x2b, 0x8e, 0x42, 0x3f, 0xa4, 0x93, 0x6c, 0xad, 0xe9, 0xc0, 0x1b, 0x8a, 0x8, 0x8c, 0x5e, 0x7a, 0x84, 0xa2, 0xf, 0x9f, 0x77, 0x58, 0x2c, 0x2c}}
return a, nil return a, nil
} }
@ -337,7 +337,7 @@ func keysTestAccount2Pk() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "keys/test-account2.pk", size: 491, mode: os.FileMode(0644), modTime: time.Unix(1599559877, 0)} info := bindataFileInfo{name: "keys/test-account2.pk", size: 491, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9f, 0x72, 0xd5, 0x95, 0x5c, 0x5a, 0x99, 0x9d, 0x2f, 0x21, 0x83, 0xd7, 0x10, 0x17, 0x4a, 0x3d, 0x65, 0xc9, 0x26, 0x1a, 0x2c, 0x9d, 0x65, 0x63, 0xd2, 0xa0, 0xfc, 0x7c, 0x0, 0x87, 0x38, 0x9f}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9f, 0x72, 0xd5, 0x95, 0x5c, 0x5a, 0x99, 0x9d, 0x2f, 0x21, 0x83, 0xd7, 0x10, 0x17, 0x4a, 0x3d, 0x65, 0xc9, 0x26, 0x1a, 0x2c, 0x9d, 0x65, 0x63, 0xd2, 0xa0, 0xfc, 0x7c, 0x0, 0x87, 0x38, 0x9f}}
return a, nil return a, nil
} }
@ -357,7 +357,7 @@ func keysTestAccount3BeforeEip55Pk() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "keys/test-account3-before-eip55.pk", size: 489, mode: os.FileMode(0644), modTime: time.Unix(1599559877, 0)} info := bindataFileInfo{name: "keys/test-account3-before-eip55.pk", size: 489, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x81, 0x40, 0x56, 0xc1, 0x5e, 0x10, 0x6e, 0x28, 0x15, 0x3, 0x4e, 0xc4, 0xc4, 0x71, 0x4d, 0x16, 0x99, 0xcc, 0x1b, 0x63, 0xee, 0x10, 0x20, 0xe4, 0x59, 0x52, 0x3f, 0xc0, 0xad, 0x15, 0x13, 0x72}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x81, 0x40, 0x56, 0xc1, 0x5e, 0x10, 0x6e, 0x28, 0x15, 0x3, 0x4e, 0xc4, 0xc4, 0x71, 0x4d, 0x16, 0x99, 0xcc, 0x1b, 0x63, 0xee, 0x10, 0x20, 0xe4, 0x59, 0x52, 0x3f, 0xc0, 0xad, 0x15, 0x13, 0x72}}
return a, nil return a, nil
} }

View File

@ -86,7 +86,7 @@ func configPublicChainAccountsJson() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "config/public-chain-accounts.json", size: 307, mode: os.FileMode(0644), modTime: time.Unix(1599559877, 0)} info := bindataFileInfo{name: "config/public-chain-accounts.json", size: 307, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x76, 0x5d, 0xc0, 0xfe, 0x57, 0x50, 0x18, 0xec, 0x2d, 0x61, 0x1b, 0xa9, 0x81, 0x11, 0x5f, 0x77, 0xf7, 0xb6, 0x67, 0x82, 0x1, 0x40, 0x68, 0x9d, 0xc5, 0x41, 0xaf, 0xce, 0x43, 0x81, 0x92, 0x96}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x76, 0x5d, 0xc0, 0xfe, 0x57, 0x50, 0x18, 0xec, 0x2d, 0x61, 0x1b, 0xa9, 0x81, 0x11, 0x5f, 0x77, 0xf7, 0xb6, 0x67, 0x82, 0x1, 0x40, 0x68, 0x9d, 0xc5, 0x41, 0xaf, 0xce, 0x43, 0x81, 0x92, 0x96}}
return a, nil return a, nil
} }
@ -106,7 +106,7 @@ func configStatusChainAccountsJson() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "config/status-chain-accounts.json", size: 543, mode: os.FileMode(0644), modTime: time.Unix(1599559877, 0)} info := bindataFileInfo{name: "config/status-chain-accounts.json", size: 543, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8e, 0xb3, 0x61, 0x51, 0x70, 0x3c, 0x12, 0x3e, 0xf1, 0x1c, 0x81, 0xfb, 0x9a, 0x7c, 0xe3, 0x63, 0xd0, 0x8f, 0x12, 0xc5, 0x2d, 0xf4, 0xea, 0x27, 0x33, 0xef, 0xca, 0xf9, 0x3f, 0x72, 0x44, 0xbf}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8e, 0xb3, 0x61, 0x51, 0x70, 0x3c, 0x12, 0x3e, 0xf1, 0x1c, 0x81, 0xfb, 0x9a, 0x7c, 0xe3, 0x63, 0xd0, 0x8f, 0x12, 0xc5, 0x2d, 0xf4, 0xea, 0x27, 0x33, 0xef, 0xca, 0xf9, 0x3f, 0x72, 0x44, 0xbf}}
return a, nil return a, nil
} }
@ -126,7 +126,7 @@ func configTestDataJson() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "config/test-data.json", size: 84, mode: os.FileMode(0644), modTime: time.Unix(1599559877, 0)} info := bindataFileInfo{name: "config/test-data.json", size: 84, mode: os.FileMode(0644), modTime: time.Unix(1610969514, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xce, 0x9d, 0x80, 0xf5, 0x87, 0xfa, 0x57, 0x1d, 0xa1, 0xd5, 0x7a, 0x10, 0x3, 0xac, 0xd7, 0xf4, 0x64, 0x32, 0x96, 0x2b, 0xb7, 0x21, 0xb7, 0xa6, 0x80, 0x40, 0xe9, 0x65, 0xe3, 0xd6, 0xbd, 0x40}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xce, 0x9d, 0x80, 0xf5, 0x87, 0xfa, 0x57, 0x1d, 0xa1, 0xd5, 0x7a, 0x10, 0x3, 0xac, 0xd7, 0xf4, 0x64, 0x32, 0x96, 0x2b, 0xb7, 0x21, 0xb7, 0xa6, 0x80, 0x40, 0xe9, 0x65, 0xe3, 0xd6, 0xbd, 0x40}}
return a, nil return a, nil
} }

View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2015 xeipuuv
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

41
vendor/github.com/xeipuuv/gojsonpointer/README.md generated vendored Normal file
View File

@ -0,0 +1,41 @@
# gojsonpointer
An implementation of JSON Pointer - Go language
## Usage
jsonText := `{
"name": "Bobby B",
"occupation": {
"title" : "King",
"years" : 15,
"heir" : "Joffrey B"
}
}`
var jsonDocument map[string]interface{}
json.Unmarshal([]byte(jsonText), &jsonDocument)
//create a JSON pointer
pointerString := "/occupation/title"
pointer, _ := NewJsonPointer(pointerString)
//SET a new value for the "title" in the document
pointer.Set(jsonDocument, "Supreme Leader of Westeros")
//GET the new "title" from the document
title, _, _ := pointer.Get(jsonDocument)
fmt.Println(title) //outputs "Supreme Leader of Westeros"
//DELETE the "heir" from the document
deletePointer := NewJsonPointer("/occupation/heir")
deletePointer.Delete(jsonDocument)
b, _ := json.Marshal(jsonDocument)
fmt.Println(string(b))
//outputs `{"name":"Bobby B","occupation":{"title":"Supreme Leader of Westeros","years":15}}`
## References
http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
### Note
The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented.

211
vendor/github.com/xeipuuv/gojsonpointer/pointer.go generated vendored Normal file
View File

@ -0,0 +1,211 @@
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// author xeipuuv
// author-github https://github.com/xeipuuv
// author-mail xeipuuv@gmail.com
//
// repository-name gojsonpointer
// repository-desc An implementation of JSON Pointer - Go language
//
// description Main and unique file.
//
// created 25-02-2013
package gojsonpointer
import (
"errors"
"fmt"
"reflect"
"strconv"
"strings"
)
const (
const_empty_pointer = ``
const_pointer_separator = `/`
const_invalid_start = `JSON pointer must be empty or start with a "` + const_pointer_separator + `"`
)
type implStruct struct {
mode string // "SET" or "GET"
inDocument interface{}
setInValue interface{}
getOutNode interface{}
getOutKind reflect.Kind
outError error
}
type JsonPointer struct {
referenceTokens []string
}
// NewJsonPointer parses the given string JSON pointer and returns an object
func NewJsonPointer(jsonPointerString string) (p JsonPointer, err error) {
// Pointer to the root of the document
if len(jsonPointerString) == 0 {
// Keep referenceTokens nil
return
}
if jsonPointerString[0] != '/' {
return p, errors.New(const_invalid_start)
}
p.referenceTokens = strings.Split(jsonPointerString[1:], const_pointer_separator)
return
}
// Uses the pointer to retrieve a value from a JSON document
func (p *JsonPointer) Get(document interface{}) (interface{}, reflect.Kind, error) {
is := &implStruct{mode: "GET", inDocument: document}
p.implementation(is)
return is.getOutNode, is.getOutKind, is.outError
}
// Uses the pointer to update a value from a JSON document
func (p *JsonPointer) Set(document interface{}, value interface{}) (interface{}, error) {
is := &implStruct{mode: "SET", inDocument: document, setInValue: value}
p.implementation(is)
return document, is.outError
}
// Uses the pointer to delete a value from a JSON document
func (p *JsonPointer) Delete(document interface{}) (interface{}, error) {
is := &implStruct{mode: "DEL", inDocument: document}
p.implementation(is)
return document, is.outError
}
// Both Get and Set functions use the same implementation to avoid code duplication
func (p *JsonPointer) implementation(i *implStruct) {
kind := reflect.Invalid
// Full document when empty
if len(p.referenceTokens) == 0 {
i.getOutNode = i.inDocument
i.outError = nil
i.getOutKind = kind
i.outError = nil
return
}
node := i.inDocument
previousNodes := make([]interface{}, len(p.referenceTokens))
previousTokens := make([]string, len(p.referenceTokens))
for ti, token := range p.referenceTokens {
isLastToken := ti == len(p.referenceTokens)-1
previousNodes[ti] = node
previousTokens[ti] = token
switch v := node.(type) {
case map[string]interface{}:
decodedToken := decodeReferenceToken(token)
if _, ok := v[decodedToken]; ok {
node = v[decodedToken]
if isLastToken && i.mode == "SET" {
v[decodedToken] = i.setInValue
} else if isLastToken && i.mode =="DEL" {
delete(v,decodedToken)
}
} else if (isLastToken && i.mode == "SET") {
v[decodedToken] = i.setInValue
} else {
i.outError = fmt.Errorf("Object has no key '%s'", decodedToken)
i.getOutKind = reflect.Map
i.getOutNode = nil
return
}
case []interface{}:
tokenIndex, err := strconv.Atoi(token)
if err != nil {
i.outError = fmt.Errorf("Invalid array index '%s'", token)
i.getOutKind = reflect.Slice
i.getOutNode = nil
return
}
if tokenIndex < 0 || tokenIndex >= len(v) {
i.outError = fmt.Errorf("Out of bound array[0,%d] index '%d'", len(v), tokenIndex)
i.getOutKind = reflect.Slice
i.getOutNode = nil
return
}
node = v[tokenIndex]
if isLastToken && i.mode == "SET" {
v[tokenIndex] = i.setInValue
} else if isLastToken && i.mode =="DEL" {
v[tokenIndex] = v[len(v)-1]
v[len(v)-1] = nil
v = v[:len(v)-1]
previousNodes[ti-1].(map[string]interface{})[previousTokens[ti-1]] = v
}
default:
i.outError = fmt.Errorf("Invalid token reference '%s'", token)
i.getOutKind = reflect.ValueOf(node).Kind()
i.getOutNode = nil
return
}
}
i.getOutNode = node
i.getOutKind = reflect.ValueOf(node).Kind()
i.outError = nil
}
// Pointer to string representation function
func (p *JsonPointer) String() string {
if len(p.referenceTokens) == 0 {
return const_empty_pointer
}
pointerString := const_pointer_separator + strings.Join(p.referenceTokens, const_pointer_separator)
return pointerString
}
// Specific JSON pointer encoding here
// ~0 => ~
// ~1 => /
// ... and vice versa
func decodeReferenceToken(token string) string {
step1 := strings.Replace(token, `~1`, `/`, -1)
step2 := strings.Replace(step1, `~0`, `~`, -1)
return step2
}
func encodeReferenceToken(token string) string {
step1 := strings.Replace(token, `~`, `~0`, -1)
step2 := strings.Replace(step1, `/`, `~1`, -1)
return step2
}

View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2015 xeipuuv
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

10
vendor/github.com/xeipuuv/gojsonreference/README.md generated vendored Normal file
View File

@ -0,0 +1,10 @@
# gojsonreference
An implementation of JSON Reference - Go language
## Dependencies
https://github.com/xeipuuv/gojsonpointer
## References
http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03

147
vendor/github.com/xeipuuv/gojsonreference/reference.go generated vendored Normal file
View File

@ -0,0 +1,147 @@
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// author xeipuuv
// author-github https://github.com/xeipuuv
// author-mail xeipuuv@gmail.com
//
// repository-name gojsonreference
// repository-desc An implementation of JSON Reference - Go language
//
// description Main and unique file.
//
// created 26-02-2013
package gojsonreference
import (
"errors"
"net/url"
"path/filepath"
"runtime"
"strings"
"github.com/xeipuuv/gojsonpointer"
)
const (
const_fragment_char = `#`
)
func NewJsonReference(jsonReferenceString string) (JsonReference, error) {
var r JsonReference
err := r.parse(jsonReferenceString)
return r, err
}
type JsonReference struct {
referenceUrl *url.URL
referencePointer gojsonpointer.JsonPointer
HasFullUrl bool
HasUrlPathOnly bool
HasFragmentOnly bool
HasFileScheme bool
HasFullFilePath bool
}
func (r *JsonReference) GetUrl() *url.URL {
return r.referenceUrl
}
func (r *JsonReference) GetPointer() *gojsonpointer.JsonPointer {
return &r.referencePointer
}
func (r *JsonReference) String() string {
if r.referenceUrl != nil {
return r.referenceUrl.String()
}
if r.HasFragmentOnly {
return const_fragment_char + r.referencePointer.String()
}
return r.referencePointer.String()
}
func (r *JsonReference) IsCanonical() bool {
return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullUrl)
}
// "Constructor", parses the given string JSON reference
func (r *JsonReference) parse(jsonReferenceString string) (err error) {
r.referenceUrl, err = url.Parse(jsonReferenceString)
if err != nil {
return
}
refUrl := r.referenceUrl
if refUrl.Scheme != "" && refUrl.Host != "" {
r.HasFullUrl = true
} else {
if refUrl.Path != "" {
r.HasUrlPathOnly = true
} else if refUrl.RawQuery == "" && refUrl.Fragment != "" {
r.HasFragmentOnly = true
}
}
r.HasFileScheme = refUrl.Scheme == "file"
if runtime.GOOS == "windows" {
// on Windows, a file URL may have an extra leading slash, and if it
// doesn't then its first component will be treated as the host by the
// Go runtime
if refUrl.Host == "" && strings.HasPrefix(refUrl.Path, "/") {
r.HasFullFilePath = filepath.IsAbs(refUrl.Path[1:])
} else {
r.HasFullFilePath = filepath.IsAbs(refUrl.Host + refUrl.Path)
}
} else {
r.HasFullFilePath = filepath.IsAbs(refUrl.Path)
}
// invalid json-pointer error means url has no json-pointer fragment. simply ignore error
r.referencePointer, _ = gojsonpointer.NewJsonPointer(refUrl.Fragment)
return
}
// Creates a new reference from a parent and a child
// If the child cannot inherit from the parent, an error is returned
func (r *JsonReference) Inherits(child JsonReference) (*JsonReference, error) {
if child.GetUrl() == nil {
return nil, errors.New("childUrl is nil!")
}
if r.GetUrl() == nil {
return nil, errors.New("parentUrl is nil!")
}
// Get a copy of the parent url to make sure we do not modify the original.
// URL reference resolving fails if the fragment of the child is empty, but the parent's is not.
// The fragment of the child must be used, so the fragment of the parent is manually removed.
parentUrl := *r.GetUrl()
parentUrl.Fragment = ""
ref, err := NewJsonReference(parentUrl.ResolveReference(child.GetUrl()).String())
if err != nil {
return nil, err
}
return &ref, err
}

3
vendor/github.com/xeipuuv/gojsonschema/.gitignore generated vendored Normal file
View File

@ -0,0 +1,3 @@
*.sw[nop]
*.iml
.vscode/

9
vendor/github.com/xeipuuv/gojsonschema/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,9 @@
language: go
go:
- "1.11"
- "1.12"
- "1.13"
before_install:
- go get github.com/xeipuuv/gojsonreference
- go get github.com/xeipuuv/gojsonpointer
- go get github.com/stretchr/testify/assert

View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2015 xeipuuv
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

466
vendor/github.com/xeipuuv/gojsonschema/README.md generated vendored Normal file
View File

@ -0,0 +1,466 @@
[![GoDoc](https://godoc.org/github.com/xeipuuv/gojsonschema?status.svg)](https://godoc.org/github.com/xeipuuv/gojsonschema)
[![Build Status](https://travis-ci.org/xeipuuv/gojsonschema.svg)](https://travis-ci.org/xeipuuv/gojsonschema)
[![Go Report Card](https://goreportcard.com/badge/github.com/xeipuuv/gojsonschema)](https://goreportcard.com/report/github.com/xeipuuv/gojsonschema)
# gojsonschema
## Description
An implementation of JSON Schema for the Go programming language. Supports draft-04, draft-06 and draft-07.
References :
* http://json-schema.org
* http://json-schema.org/latest/json-schema-core.html
* http://json-schema.org/latest/json-schema-validation.html
## Installation
```
go get github.com/xeipuuv/gojsonschema
```
Dependencies :
* [github.com/xeipuuv/gojsonpointer](https://github.com/xeipuuv/gojsonpointer)
* [github.com/xeipuuv/gojsonreference](https://github.com/xeipuuv/gojsonreference)
* [github.com/stretchr/testify/assert](https://github.com/stretchr/testify#assert-package)
## Usage
### Example
```go
package main
import (
"fmt"
"github.com/xeipuuv/gojsonschema"
)
func main() {
schemaLoader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json")
documentLoader := gojsonschema.NewReferenceLoader("file:///home/me/document.json")
result, err := gojsonschema.Validate(schemaLoader, documentLoader)
if err != nil {
panic(err.Error())
}
if result.Valid() {
fmt.Printf("The document is valid\n")
} else {
fmt.Printf("The document is not valid. see errors :\n")
for _, desc := range result.Errors() {
fmt.Printf("- %s\n", desc)
}
}
}
```
#### Loaders
There are various ways to load your JSON data.
In order to load your schemas and documents,
first declare an appropriate loader :
* Web / HTTP, using a reference :
```go
loader := gojsonschema.NewReferenceLoader("http://www.some_host.com/schema.json")
```
* Local file, using a reference :
```go
loader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json")
```
References use the URI scheme, the prefix (file://) and a full path to the file are required.
* JSON strings :
```go
loader := gojsonschema.NewStringLoader(`{"type": "string"}`)
```
* Custom Go types :
```go
m := map[string]interface{}{"type": "string"}
loader := gojsonschema.NewGoLoader(m)
```
And
```go
type Root struct {
Users []User `json:"users"`
}
type User struct {
Name string `json:"name"`
}
...
data := Root{}
data.Users = append(data.Users, User{"John"})
data.Users = append(data.Users, User{"Sophia"})
data.Users = append(data.Users, User{"Bill"})
loader := gojsonschema.NewGoLoader(data)
```
#### Validation
Once the loaders are set, validation is easy :
```go
result, err := gojsonschema.Validate(schemaLoader, documentLoader)
```
Alternatively, you might want to load a schema only once and process to multiple validations :
```go
schema, err := gojsonschema.NewSchema(schemaLoader)
...
result1, err := schema.Validate(documentLoader1)
...
result2, err := schema.Validate(documentLoader2)
...
// etc ...
```
To check the result :
```go
if result.Valid() {
fmt.Printf("The document is valid\n")
} else {
fmt.Printf("The document is not valid. see errors :\n")
for _, err := range result.Errors() {
// Err implements the ResultError interface
fmt.Printf("- %s\n", err)
}
}
```
## Loading local schemas
By default `file` and `http(s)` references to external schemas are loaded automatically via the file system or via http(s). An external schema can also be loaded using a `SchemaLoader`.
```go
sl := gojsonschema.NewSchemaLoader()
loader1 := gojsonschema.NewStringLoader(`{ "type" : "string" }`)
err := sl.AddSchema("http://some_host.com/string.json", loader1)
```
Alternatively if your schema already has an `$id` you can use the `AddSchemas` function
```go
loader2 := gojsonschema.NewStringLoader(`{
"$id" : "http://some_host.com/maxlength.json",
"maxLength" : 5
}`)
err = sl.AddSchemas(loader2)
```
The main schema should be passed to the `Compile` function. This main schema can then directly reference the added schemas without needing to download them.
```go
loader3 := gojsonschema.NewStringLoader(`{
"$id" : "http://some_host.com/main.json",
"allOf" : [
{ "$ref" : "http://some_host.com/string.json" },
{ "$ref" : "http://some_host.com/maxlength.json" }
]
}`)
schema, err := sl.Compile(loader3)
documentLoader := gojsonschema.NewStringLoader(`"hello world"`)
result, err := schema.Validate(documentLoader)
```
It's also possible to pass a `ReferenceLoader` to the `Compile` function that references a loaded schema.
```go
err = sl.AddSchemas(loader3)
schema, err := sl.Compile(gojsonschema.NewReferenceLoader("http://some_host.com/main.json"))
```
Schemas added by `AddSchema` and `AddSchemas` are only validated when the entire schema is compiled, unless meta-schema validation is used.
## Using a specific draft
By default `gojsonschema` will try to detect the draft of a schema by using the `$schema` keyword and parse it in a strict draft-04, draft-06 or draft-07 mode. If `$schema` is missing, or the draft version is not explicitely set, a hybrid mode is used which merges together functionality of all drafts into one mode.
Autodectection can be turned off with the `AutoDetect` property. Specific draft versions can be specified with the `Draft` property.
```go
sl := gojsonschema.NewSchemaLoader()
sl.Draft = gojsonschema.Draft7
sl.AutoDetect = false
```
If autodetection is on (default), a draft-07 schema can savely reference draft-04 schemas and vice-versa, as long as `$schema` is specified in all schemas.
## Meta-schema validation
Schemas that are added using the `AddSchema`, `AddSchemas` and `Compile` can be validated against their meta-schema by setting the `Validate` property.
The following example will produce an error as `multipleOf` must be a number. If `Validate` is off (default), this error is only returned at the `Compile` step.
```go
sl := gojsonschema.NewSchemaLoader()
sl.Validate = true
err := sl.AddSchemas(gojsonschema.NewStringLoader(`{
$id" : "http://some_host.com/invalid.json",
"$schema": "http://json-schema.org/draft-07/schema#",
"multipleOf" : true
}`))
```
```
```
Errors returned by meta-schema validation are more readable and contain more information, which helps significantly if you are developing a schema.
Meta-schema validation also works with a custom `$schema`. In case `$schema` is missing, or `AutoDetect` is set to `false`, the meta-schema of the used draft is used.
## Working with Errors
The library handles string error codes which you can customize by creating your own gojsonschema.locale and setting it
```go
gojsonschema.Locale = YourCustomLocale{}
```
However, each error contains additional contextual information.
Newer versions of `gojsonschema` may have new additional errors, so code that uses a custom locale will need to be updated when this happens.
**err.Type()**: *string* Returns the "type" of error that occurred. Note you can also type check. See below
Note: An error of RequiredType has an err.Type() return value of "required"
"required": RequiredError
"invalid_type": InvalidTypeError
"number_any_of": NumberAnyOfError
"number_one_of": NumberOneOfError
"number_all_of": NumberAllOfError
"number_not": NumberNotError
"missing_dependency": MissingDependencyError
"internal": InternalError
"const": ConstEror
"enum": EnumError
"array_no_additional_items": ArrayNoAdditionalItemsError
"array_min_items": ArrayMinItemsError
"array_max_items": ArrayMaxItemsError
"unique": ItemsMustBeUniqueError
"contains" : ArrayContainsError
"array_min_properties": ArrayMinPropertiesError
"array_max_properties": ArrayMaxPropertiesError
"additional_property_not_allowed": AdditionalPropertyNotAllowedError
"invalid_property_pattern": InvalidPropertyPatternError
"invalid_property_name": InvalidPropertyNameError
"string_gte": StringLengthGTEError
"string_lte": StringLengthLTEError
"pattern": DoesNotMatchPatternError
"multiple_of": MultipleOfError
"number_gte": NumberGTEError
"number_gt": NumberGTError
"number_lte": NumberLTEError
"number_lt": NumberLTError
"condition_then" : ConditionThenError
"condition_else" : ConditionElseError
**err.Value()**: *interface{}* Returns the value given
**err.Context()**: *gojsonschema.JsonContext* Returns the context. This has a String() method that will print something like this: (root).firstName
**err.Field()**: *string* Returns the fieldname in the format firstName, or for embedded properties, person.firstName. This returns the same as the String() method on *err.Context()* but removes the (root). prefix.
**err.Description()**: *string* The error description. This is based on the locale you are using. See the beginning of this section for overwriting the locale with a custom implementation.
**err.DescriptionFormat()**: *string* The error description format. This is relevant if you are adding custom validation errors afterwards to the result.
**err.Details()**: *gojsonschema.ErrorDetails* Returns a map[string]interface{} of additional error details specific to the error. For example, GTE errors will have a "min" value, LTE will have a "max" value. See errors.go for a full description of all the error details. Every error always contains a "field" key that holds the value of *err.Field()*
Note in most cases, the err.Details() will be used to generate replacement strings in your locales, and not used directly. These strings follow the text/template format i.e.
```
{{.field}} must be greater than or equal to {{.min}}
```
The library allows you to specify custom template functions, should you require more complex error message handling.
```go
gojsonschema.ErrorTemplateFuncs = map[string]interface{}{
"allcaps": func(s string) string {
return strings.ToUpper(s)
},
}
```
Given the above definition, you can use the custom function `"allcaps"` in your localization templates:
```
{{allcaps .field}} must be greater than or equal to {{.min}}
```
The above error message would then be rendered with the `field` value in capital letters. For example:
```
"PASSWORD must be greater than or equal to 8"
```
Learn more about what types of template functions you can use in `ErrorTemplateFuncs` by referring to Go's [text/template FuncMap](https://golang.org/pkg/text/template/#FuncMap) type.
## Formats
JSON Schema allows for optional "format" property to validate instances against well-known formats. gojsonschema ships with all of the formats defined in the spec that you can use like this:
````json
{"type": "string", "format": "email"}
````
Not all formats defined in draft-07 are available. Implemented formats are:
* `date`
* `time`
* `date-time`
* `hostname`. Subdomains that start with a number are also supported, but this means that it doesn't strictly follow [RFC1034](http://tools.ietf.org/html/rfc1034#section-3.5) and has the implication that ipv4 addresses are also recognized as valid hostnames.
* `email`. Go's email parser deviates slightly from [RFC5322](https://tools.ietf.org/html/rfc5322). Includes unicode support.
* `idn-email`. Same caveat as `email`.
* `ipv4`
* `ipv6`
* `uri`. Includes unicode support.
* `uri-reference`. Includes unicode support.
* `iri`
* `iri-reference`
* `uri-template`
* `uuid`
* `regex`. Go uses the [RE2](https://github.com/google/re2/wiki/Syntax) engine and is not [ECMA262](http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-262.pdf) compatible.
* `json-pointer`
* `relative-json-pointer`
`email`, `uri` and `uri-reference` use the same validation code as their unicode counterparts `idn-email`, `iri` and `iri-reference`. If you rely on unicode support you should use the specific
unicode enabled formats for the sake of interoperability as other implementations might not support unicode in the regular formats.
The validation code for `uri`, `idn-email` and their relatives use mostly standard library code.
For repetitive or more complex formats, you can create custom format checkers and add them to gojsonschema like this:
```go
// Define the format checker
type RoleFormatChecker struct {}
// Ensure it meets the gojsonschema.FormatChecker interface
func (f RoleFormatChecker) IsFormat(input interface{}) bool {
asString, ok := input.(string)
if ok == false {
return false
}
return strings.HasPrefix("ROLE_", asString)
}
// Add it to the library
gojsonschema.FormatCheckers.Add("role", RoleFormatChecker{})
````
Now to use in your json schema:
````json
{"type": "string", "format": "role"}
````
Another example would be to check if the provided integer matches an id on database:
JSON schema:
```json
{"type": "integer", "format": "ValidUserId"}
```
```go
// Define the format checker
type ValidUserIdFormatChecker struct {}
// Ensure it meets the gojsonschema.FormatChecker interface
func (f ValidUserIdFormatChecker) IsFormat(input interface{}) bool {
asFloat64, ok := input.(float64) // Numbers are always float64 here
if ok == false {
return false
}
// XXX
// do the magic on the database looking for the int(asFloat64)
return true
}
// Add it to the library
gojsonschema.FormatCheckers.Add("ValidUserId", ValidUserIdFormatChecker{})
````
Formats can also be removed, for example if you want to override one of the formats that is defined by default.
```go
gojsonschema.FormatCheckers.Remove("hostname")
```
## Additional custom validation
After the validation has run and you have the results, you may add additional
errors using `Result.AddError`. This is useful to maintain the same format within the resultset instead
of having to add special exceptions for your own errors. Below is an example.
```go
type AnswerInvalidError struct {
gojsonschema.ResultErrorFields
}
func newAnswerInvalidError(context *gojsonschema.JsonContext, value interface{}, details gojsonschema.ErrorDetails) *AnswerInvalidError {
err := AnswerInvalidError{}
err.SetContext(context)
err.SetType("custom_invalid_error")
// it is important to use SetDescriptionFormat() as this is used to call SetDescription() after it has been parsed
// using the description of err will be overridden by this.
err.SetDescriptionFormat("Answer to the Ultimate Question of Life, the Universe, and Everything is {{.answer}}")
err.SetValue(value)
err.SetDetails(details)
return &err
}
func main() {
// ...
schema, err := gojsonschema.NewSchema(schemaLoader)
result, err := gojsonschema.Validate(schemaLoader, documentLoader)
if true { // some validation
jsonContext := gojsonschema.NewJsonContext("question", nil)
errDetail := gojsonschema.ErrorDetails{
"answer": 42,
}
result.AddError(
newAnswerInvalidError(
gojsonschema.NewJsonContext("answer", jsonContext),
52,
errDetail,
),
errDetail,
)
}
return result, err
}
```
This is especially useful if you want to add validation beyond what the
json schema drafts can provide such business specific logic.
## Uses
gojsonschema uses the following test suite :
https://github.com/json-schema/JSON-Schema-Test-Suite

125
vendor/github.com/xeipuuv/gojsonschema/draft.go generated vendored Normal file
View File

@ -0,0 +1,125 @@
// Copyright 2018 johandorland ( https://github.com/johandorland )
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gojsonschema
import (
"errors"
"math"
"reflect"
"github.com/xeipuuv/gojsonreference"
)
// Draft is a JSON-schema draft version
type Draft int
// Supported Draft versions
const (
Draft4 Draft = 4
Draft6 Draft = 6
Draft7 Draft = 7
Hybrid Draft = math.MaxInt32
)
type draftConfig struct {
Version Draft
MetaSchemaURL string
MetaSchema string
}
type draftConfigs []draftConfig
var drafts draftConfigs
func init() {
drafts = []draftConfig{
{
Version: Draft4,
MetaSchemaURL: "http://json-schema.org/draft-04/schema",
MetaSchema: `{"id":"http://json-schema.org/draft-04/schema#","$schema":"http://json-schema.org/draft-04/schema#","description":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"positiveInteger":{"type":"integer","minimum":0},"positiveIntegerDefault0":{"allOf":[{"$ref":"#/definitions/positiveInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"minItems":1,"uniqueItems":true}},"type":"object","properties":{"id":{"type":"string"},"$schema":{"type":"string"},"title":{"type":"string"},"description":{"type":"string"},"default":{},"multipleOf":{"type":"number","minimum":0,"exclusiveMinimum":true},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"boolean","default":false},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"boolean","default":false},"maxLength":{"$ref":"#/definitions/positiveInteger"},"minLength":{"$ref":"#/definitions/positiveIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"anyOf":[{"type":"boolean"},{"$ref":"#"}],"default":{}},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":{}},"maxItems":{"$ref":"#/definitions/positiveInteger"},"minItems":{"$ref":"#/definitions/positiveIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"maxProperties":{"$ref":"#/definitions/positiveInteger"},"minProperties":{"$ref":"#/definitions/positiveIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"anyOf":[{"type":"boolean"},{"$ref":"#"}],"default":{}},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"enum":{"type":"array","minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"dependencies":{"exclusiveMaximum":["maximum"],"exclusiveMinimum":["minimum"]},"default":{}}`,
},
{
Version: Draft6,
MetaSchemaURL: "http://json-schema.org/draft-06/schema",
MetaSchema: `{"$schema":"http://json-schema.org/draft-06/schema#","$id":"http://json-schema.org/draft-06/schema#","title":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"nonNegativeInteger":{"type":"integer","minimum":0},"nonNegativeIntegerDefault0":{"allOf":[{"$ref":"#/definitions/nonNegativeInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"uniqueItems":true,"default":[]}},"type":["object","boolean"],"properties":{"$id":{"type":"string","format":"uri-reference"},"$schema":{"type":"string","format":"uri"},"$ref":{"type":"string","format":"uri-reference"},"title":{"type":"string"},"description":{"type":"string"},"default":{},"examples":{"type":"array","items":{}},"multipleOf":{"type":"number","exclusiveMinimum":0},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"number"},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"number"},"maxLength":{"$ref":"#/definitions/nonNegativeInteger"},"minLength":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"$ref":"#"},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":{}},"maxItems":{"$ref":"#/definitions/nonNegativeInteger"},"minItems":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"contains":{"$ref":"#"},"maxProperties":{"$ref":"#/definitions/nonNegativeInteger"},"minProperties":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"$ref":"#"},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"propertyNames":{"$ref":"#"},"const":{},"enum":{"type":"array","minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"default":{}}`,
},
{
Version: Draft7,
MetaSchemaURL: "http://json-schema.org/draft-07/schema",
MetaSchema: `{"$schema":"http://json-schema.org/draft-07/schema#","$id":"http://json-schema.org/draft-07/schema#","title":"Core schema meta-schema","definitions":{"schemaArray":{"type":"array","minItems":1,"items":{"$ref":"#"}},"nonNegativeInteger":{"type":"integer","minimum":0},"nonNegativeIntegerDefault0":{"allOf":[{"$ref":"#/definitions/nonNegativeInteger"},{"default":0}]},"simpleTypes":{"enum":["array","boolean","integer","null","number","object","string"]},"stringArray":{"type":"array","items":{"type":"string"},"uniqueItems":true,"default":[]}},"type":["object","boolean"],"properties":{"$id":{"type":"string","format":"uri-reference"},"$schema":{"type":"string","format":"uri"},"$ref":{"type":"string","format":"uri-reference"},"$comment":{"type":"string"},"title":{"type":"string"},"description":{"type":"string"},"default":true,"readOnly":{"type":"boolean","default":false},"examples":{"type":"array","items":true},"multipleOf":{"type":"number","exclusiveMinimum":0},"maximum":{"type":"number"},"exclusiveMaximum":{"type":"number"},"minimum":{"type":"number"},"exclusiveMinimum":{"type":"number"},"maxLength":{"$ref":"#/definitions/nonNegativeInteger"},"minLength":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"pattern":{"type":"string","format":"regex"},"additionalItems":{"$ref":"#"},"items":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/schemaArray"}],"default":true},"maxItems":{"$ref":"#/definitions/nonNegativeInteger"},"minItems":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"uniqueItems":{"type":"boolean","default":false},"contains":{"$ref":"#"},"maxProperties":{"$ref":"#/definitions/nonNegativeInteger"},"minProperties":{"$ref":"#/definitions/nonNegativeIntegerDefault0"},"required":{"$ref":"#/definitions/stringArray"},"additionalProperties":{"$ref":"#"},"definitions":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"properties":{"type":"object","additionalProperties":{"$ref":"#"},"default":{}},"patternProperties":{"type":"object","additionalProperties":{"$ref":"#"},"propertyNames":{"format":"regex"},"default":{}},"dependencies":{"type":"object","additionalProperties":{"anyOf":[{"$ref":"#"},{"$ref":"#/definitions/stringArray"}]}},"propertyNames":{"$ref":"#"},"const":true,"enum":{"type":"array","items":true,"minItems":1,"uniqueItems":true},"type":{"anyOf":[{"$ref":"#/definitions/simpleTypes"},{"type":"array","items":{"$ref":"#/definitions/simpleTypes"},"minItems":1,"uniqueItems":true}]},"format":{"type":"string"},"contentMediaType":{"type":"string"},"contentEncoding":{"type":"string"},"if":{"$ref":"#"},"then":{"$ref":"#"},"else":{"$ref":"#"},"allOf":{"$ref":"#/definitions/schemaArray"},"anyOf":{"$ref":"#/definitions/schemaArray"},"oneOf":{"$ref":"#/definitions/schemaArray"},"not":{"$ref":"#"}},"default":true}`,
},
}
}
func (dc draftConfigs) GetMetaSchema(url string) string {
for _, config := range dc {
if config.MetaSchemaURL == url {
return config.MetaSchema
}
}
return ""
}
func (dc draftConfigs) GetDraftVersion(url string) *Draft {
for _, config := range dc {
if config.MetaSchemaURL == url {
return &config.Version
}
}
return nil
}
func (dc draftConfigs) GetSchemaURL(draft Draft) string {
for _, config := range dc {
if config.Version == draft {
return config.MetaSchemaURL
}
}
return ""
}
func parseSchemaURL(documentNode interface{}) (string, *Draft, error) {
if isKind(documentNode, reflect.Bool) {
return "", nil, nil
}
if !isKind(documentNode, reflect.Map) {
return "", nil, errors.New("schema is invalid")
}
m := documentNode.(map[string]interface{})
if existsMapKey(m, KEY_SCHEMA) {
if !isKind(m[KEY_SCHEMA], reflect.String) {
return "", nil, errors.New(formatErrorDescription(
Locale.MustBeOfType(),
ErrorDetails{
"key": KEY_SCHEMA,
"type": TYPE_STRING,
},
))
}
schemaReference, err := gojsonreference.NewJsonReference(m[KEY_SCHEMA].(string))
if err != nil {
return "", nil, err
}
schema := schemaReference.String()
return schema, drafts.GetDraftVersion(schema), nil
}
return "", nil, nil
}

364
vendor/github.com/xeipuuv/gojsonschema/errors.go generated vendored Normal file
View File

@ -0,0 +1,364 @@
package gojsonschema
import (
"bytes"
"sync"
"text/template"
)
var errorTemplates = errorTemplate{template.New("errors-new"), sync.RWMutex{}}
// template.Template is not thread-safe for writing, so some locking is done
// sync.RWMutex is used for efficiently locking when new templates are created
type errorTemplate struct {
*template.Template
sync.RWMutex
}
type (
// FalseError. ErrorDetails: -
FalseError struct {
ResultErrorFields
}
// RequiredError indicates that a required field is missing
// ErrorDetails: property string
RequiredError struct {
ResultErrorFields
}
// InvalidTypeError indicates that a field has the incorrect type
// ErrorDetails: expected, given
InvalidTypeError struct {
ResultErrorFields
}
// NumberAnyOfError is produced in case of a failing "anyOf" validation
// ErrorDetails: -
NumberAnyOfError struct {
ResultErrorFields
}
// NumberOneOfError is produced in case of a failing "oneOf" validation
// ErrorDetails: -
NumberOneOfError struct {
ResultErrorFields
}
// NumberAllOfError is produced in case of a failing "allOf" validation
// ErrorDetails: -
NumberAllOfError struct {
ResultErrorFields
}
// NumberNotError is produced if a "not" validation failed
// ErrorDetails: -
NumberNotError struct {
ResultErrorFields
}
// MissingDependencyError is produced in case of a "missing dependency" problem
// ErrorDetails: dependency
MissingDependencyError struct {
ResultErrorFields
}
// InternalError indicates an internal error
// ErrorDetails: error
InternalError struct {
ResultErrorFields
}
// ConstError indicates a const error
// ErrorDetails: allowed
ConstError struct {
ResultErrorFields
}
// EnumError indicates an enum error
// ErrorDetails: allowed
EnumError struct {
ResultErrorFields
}
// ArrayNoAdditionalItemsError is produced if additional items were found, but not allowed
// ErrorDetails: -
ArrayNoAdditionalItemsError struct {
ResultErrorFields
}
// ArrayMinItemsError is produced if an array contains less items than the allowed minimum
// ErrorDetails: min
ArrayMinItemsError struct {
ResultErrorFields
}
// ArrayMaxItemsError is produced if an array contains more items than the allowed maximum
// ErrorDetails: max
ArrayMaxItemsError struct {
ResultErrorFields
}
// ItemsMustBeUniqueError is produced if an array requires unique items, but contains non-unique items
// ErrorDetails: type, i, j
ItemsMustBeUniqueError struct {
ResultErrorFields
}
// ArrayContainsError is produced if an array contains invalid items
// ErrorDetails:
ArrayContainsError struct {
ResultErrorFields
}
// ArrayMinPropertiesError is produced if an object contains less properties than the allowed minimum
// ErrorDetails: min
ArrayMinPropertiesError struct {
ResultErrorFields
}
// ArrayMaxPropertiesError is produced if an object contains more properties than the allowed maximum
// ErrorDetails: max
ArrayMaxPropertiesError struct {
ResultErrorFields
}
// AdditionalPropertyNotAllowedError is produced if an object has additional properties, but not allowed
// ErrorDetails: property
AdditionalPropertyNotAllowedError struct {
ResultErrorFields
}
// InvalidPropertyPatternError is produced if an pattern was found
// ErrorDetails: property, pattern
InvalidPropertyPatternError struct {
ResultErrorFields
}
// InvalidPropertyNameError is produced if an invalid-named property was found
// ErrorDetails: property
InvalidPropertyNameError struct {
ResultErrorFields
}
// StringLengthGTEError is produced if a string is shorter than the minimum required length
// ErrorDetails: min
StringLengthGTEError struct {
ResultErrorFields
}
// StringLengthLTEError is produced if a string is longer than the maximum allowed length
// ErrorDetails: max
StringLengthLTEError struct {
ResultErrorFields
}
// DoesNotMatchPatternError is produced if a string does not match the defined pattern
// ErrorDetails: pattern
DoesNotMatchPatternError struct {
ResultErrorFields
}
// DoesNotMatchFormatError is produced if a string does not match the defined format
// ErrorDetails: format
DoesNotMatchFormatError struct {
ResultErrorFields
}
// MultipleOfError is produced if a number is not a multiple of the defined multipleOf
// ErrorDetails: multiple
MultipleOfError struct {
ResultErrorFields
}
// NumberGTEError is produced if a number is lower than the allowed minimum
// ErrorDetails: min
NumberGTEError struct {
ResultErrorFields
}
// NumberGTError is produced if a number is lower than, or equal to the specified minimum, and exclusiveMinimum is set
// ErrorDetails: min
NumberGTError struct {
ResultErrorFields
}
// NumberLTEError is produced if a number is higher than the allowed maximum
// ErrorDetails: max
NumberLTEError struct {
ResultErrorFields
}
// NumberLTError is produced if a number is higher than, or equal to the specified maximum, and exclusiveMaximum is set
// ErrorDetails: max
NumberLTError struct {
ResultErrorFields
}
// ConditionThenError is produced if a condition's "then" validation is invalid
// ErrorDetails: -
ConditionThenError struct {
ResultErrorFields
}
// ConditionElseError is produced if a condition's "else" condition is invalid
// ErrorDetails: -
ConditionElseError struct {
ResultErrorFields
}
)
// newError takes a ResultError type and sets the type, context, description, details, value, and field
func newError(err ResultError, context *JsonContext, value interface{}, locale locale, details ErrorDetails) {
var t string
var d string
switch err.(type) {
case *FalseError:
t = "false"
d = locale.False()
case *RequiredError:
t = "required"
d = locale.Required()
case *InvalidTypeError:
t = "invalid_type"
d = locale.InvalidType()
case *NumberAnyOfError:
t = "number_any_of"
d = locale.NumberAnyOf()
case *NumberOneOfError:
t = "number_one_of"
d = locale.NumberOneOf()
case *NumberAllOfError:
t = "number_all_of"
d = locale.NumberAllOf()
case *NumberNotError:
t = "number_not"
d = locale.NumberNot()
case *MissingDependencyError:
t = "missing_dependency"
d = locale.MissingDependency()
case *InternalError:
t = "internal"
d = locale.Internal()
case *ConstError:
t = "const"
d = locale.Const()
case *EnumError:
t = "enum"
d = locale.Enum()
case *ArrayNoAdditionalItemsError:
t = "array_no_additional_items"
d = locale.ArrayNoAdditionalItems()
case *ArrayMinItemsError:
t = "array_min_items"
d = locale.ArrayMinItems()
case *ArrayMaxItemsError:
t = "array_max_items"
d = locale.ArrayMaxItems()
case *ItemsMustBeUniqueError:
t = "unique"
d = locale.Unique()
case *ArrayContainsError:
t = "contains"
d = locale.ArrayContains()
case *ArrayMinPropertiesError:
t = "array_min_properties"
d = locale.ArrayMinProperties()
case *ArrayMaxPropertiesError:
t = "array_max_properties"
d = locale.ArrayMaxProperties()
case *AdditionalPropertyNotAllowedError:
t = "additional_property_not_allowed"
d = locale.AdditionalPropertyNotAllowed()
case *InvalidPropertyPatternError:
t = "invalid_property_pattern"
d = locale.InvalidPropertyPattern()
case *InvalidPropertyNameError:
t = "invalid_property_name"
d = locale.InvalidPropertyName()
case *StringLengthGTEError:
t = "string_gte"
d = locale.StringGTE()
case *StringLengthLTEError:
t = "string_lte"
d = locale.StringLTE()
case *DoesNotMatchPatternError:
t = "pattern"
d = locale.DoesNotMatchPattern()
case *DoesNotMatchFormatError:
t = "format"
d = locale.DoesNotMatchFormat()
case *MultipleOfError:
t = "multiple_of"
d = locale.MultipleOf()
case *NumberGTEError:
t = "number_gte"
d = locale.NumberGTE()
case *NumberGTError:
t = "number_gt"
d = locale.NumberGT()
case *NumberLTEError:
t = "number_lte"
d = locale.NumberLTE()
case *NumberLTError:
t = "number_lt"
d = locale.NumberLT()
case *ConditionThenError:
t = "condition_then"
d = locale.ConditionThen()
case *ConditionElseError:
t = "condition_else"
d = locale.ConditionElse()
}
err.SetType(t)
err.SetContext(context)
err.SetValue(value)
err.SetDetails(details)
err.SetDescriptionFormat(d)
details["field"] = err.Field()
if _, exists := details["context"]; !exists && context != nil {
details["context"] = context.String()
}
err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details))
}
// formatErrorDescription takes a string in the default text/template
// format and converts it to a string with replacements. The fields come
// from the ErrorDetails struct and vary for each type of error.
func formatErrorDescription(s string, details ErrorDetails) string {
var tpl *template.Template
var descrAsBuffer bytes.Buffer
var err error
errorTemplates.RLock()
tpl = errorTemplates.Lookup(s)
errorTemplates.RUnlock()
if tpl == nil {
errorTemplates.Lock()
tpl = errorTemplates.New(s)
if ErrorTemplateFuncs != nil {
tpl.Funcs(ErrorTemplateFuncs)
}
tpl, err = tpl.Parse(s)
errorTemplates.Unlock()
if err != nil {
return err.Error()
}
}
err = tpl.Execute(&descrAsBuffer, details)
if err != nil {
return err.Error()
}
return descrAsBuffer.String()
}

View File

@ -0,0 +1,368 @@
package gojsonschema
import (
"net"
"net/mail"
"net/url"
"regexp"
"strings"
"sync"
"time"
)
type (
// FormatChecker is the interface all formatters added to FormatCheckerChain must implement
FormatChecker interface {
// IsFormat checks if input has the correct format and type
IsFormat(input interface{}) bool
}
// FormatCheckerChain holds the formatters
FormatCheckerChain struct {
formatters map[string]FormatChecker
}
// EmailFormatChecker verifies email address formats
EmailFormatChecker struct{}
// IPV4FormatChecker verifies IP addresses in the IPv4 format
IPV4FormatChecker struct{}
// IPV6FormatChecker verifies IP addresses in the IPv6 format
IPV6FormatChecker struct{}
// DateTimeFormatChecker verifies date/time formats per RFC3339 5.6
//
// Valid formats:
// Partial Time: HH:MM:SS
// Full Date: YYYY-MM-DD
// Full Time: HH:MM:SSZ-07:00
// Date Time: YYYY-MM-DDTHH:MM:SSZ-0700
//
// Where
// YYYY = 4DIGIT year
// MM = 2DIGIT month ; 01-12
// DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year
// HH = 2DIGIT hour ; 00-23
// MM = 2DIGIT ; 00-59
// SS = 2DIGIT ; 00-58, 00-60 based on leap second rules
// T = Literal
// Z = Literal
//
// Note: Nanoseconds are also suported in all formats
//
// http://tools.ietf.org/html/rfc3339#section-5.6
DateTimeFormatChecker struct{}
// DateFormatChecker verifies date formats
//
// Valid format:
// Full Date: YYYY-MM-DD
//
// Where
// YYYY = 4DIGIT year
// MM = 2DIGIT month ; 01-12
// DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year
DateFormatChecker struct{}
// TimeFormatChecker verifies time formats
//
// Valid formats:
// Partial Time: HH:MM:SS
// Full Time: HH:MM:SSZ-07:00
//
// Where
// HH = 2DIGIT hour ; 00-23
// MM = 2DIGIT ; 00-59
// SS = 2DIGIT ; 00-58, 00-60 based on leap second rules
// T = Literal
// Z = Literal
TimeFormatChecker struct{}
// URIFormatChecker validates a URI with a valid Scheme per RFC3986
URIFormatChecker struct{}
// URIReferenceFormatChecker validates a URI or relative-reference per RFC3986
URIReferenceFormatChecker struct{}
// URITemplateFormatChecker validates a URI template per RFC6570
URITemplateFormatChecker struct{}
// HostnameFormatChecker validates a hostname is in the correct format
HostnameFormatChecker struct{}
// UUIDFormatChecker validates a UUID is in the correct format
UUIDFormatChecker struct{}
// RegexFormatChecker validates a regex is in the correct format
RegexFormatChecker struct{}
// JSONPointerFormatChecker validates a JSON Pointer per RFC6901
JSONPointerFormatChecker struct{}
// RelativeJSONPointerFormatChecker validates a relative JSON Pointer is in the correct format
RelativeJSONPointerFormatChecker struct{}
)
var (
// FormatCheckers holds the valid formatters, and is a public variable
// so library users can add custom formatters
FormatCheckers = FormatCheckerChain{
formatters: map[string]FormatChecker{
"date": DateFormatChecker{},
"time": TimeFormatChecker{},
"date-time": DateTimeFormatChecker{},
"hostname": HostnameFormatChecker{},
"email": EmailFormatChecker{},
"idn-email": EmailFormatChecker{},
"ipv4": IPV4FormatChecker{},
"ipv6": IPV6FormatChecker{},
"uri": URIFormatChecker{},
"uri-reference": URIReferenceFormatChecker{},
"iri": URIFormatChecker{},
"iri-reference": URIReferenceFormatChecker{},
"uri-template": URITemplateFormatChecker{},
"uuid": UUIDFormatChecker{},
"regex": RegexFormatChecker{},
"json-pointer": JSONPointerFormatChecker{},
"relative-json-pointer": RelativeJSONPointerFormatChecker{},
},
}
// Regex credit: https://www.socketloop.com/tutorials/golang-validate-hostname
rxHostname = regexp.MustCompile(`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`)
// Use a regex to make sure curly brackets are balanced properly after validating it as a AURI
rxURITemplate = regexp.MustCompile("^([^{]*({[^}]*})?)*$")
rxUUID = regexp.MustCompile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$")
rxJSONPointer = regexp.MustCompile("^(?:/(?:[^~/]|~0|~1)*)*$")
rxRelJSONPointer = regexp.MustCompile("^(?:0|[1-9][0-9]*)(?:#|(?:/(?:[^~/]|~0|~1)*)*)$")
lock = new(sync.RWMutex)
)
// Add adds a FormatChecker to the FormatCheckerChain
// The name used will be the value used for the format key in your json schema
func (c *FormatCheckerChain) Add(name string, f FormatChecker) *FormatCheckerChain {
lock.Lock()
c.formatters[name] = f
lock.Unlock()
return c
}
// Remove deletes a FormatChecker from the FormatCheckerChain (if it exists)
func (c *FormatCheckerChain) Remove(name string) *FormatCheckerChain {
lock.Lock()
delete(c.formatters, name)
lock.Unlock()
return c
}
// Has checks to see if the FormatCheckerChain holds a FormatChecker with the given name
func (c *FormatCheckerChain) Has(name string) bool {
lock.RLock()
_, ok := c.formatters[name]
lock.RUnlock()
return ok
}
// IsFormat will check an input against a FormatChecker with the given name
// to see if it is the correct format
func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool {
lock.RLock()
f, ok := c.formatters[name]
lock.RUnlock()
// If a format is unrecognized it should always pass validation
if !ok {
return true
}
return f.IsFormat(input)
}
// IsFormat checks if input is a correctly formatted e-mail address
func (f EmailFormatChecker) IsFormat(input interface{}) bool {
asString, ok := input.(string)
if !ok {
return false
}
_, err := mail.ParseAddress(asString)
return err == nil
}
// IsFormat checks if input is a correctly formatted IPv4-address
func (f IPV4FormatChecker) IsFormat(input interface{}) bool {
asString, ok := input.(string)
if !ok {
return false
}
// Credit: https://github.com/asaskevich/govalidator
ip := net.ParseIP(asString)
return ip != nil && strings.Contains(asString, ".")
}
// IsFormat checks if input is a correctly formatted IPv6=address
func (f IPV6FormatChecker) IsFormat(input interface{}) bool {
asString, ok := input.(string)
if !ok {
return false
}
// Credit: https://github.com/asaskevich/govalidator
ip := net.ParseIP(asString)
return ip != nil && strings.Contains(asString, ":")
}
// IsFormat checks if input is a correctly formatted date/time per RFC3339 5.6
func (f DateTimeFormatChecker) IsFormat(input interface{}) bool {
asString, ok := input.(string)
if !ok {
return false
}
formats := []string{
"15:04:05",
"15:04:05Z07:00",
"2006-01-02",
time.RFC3339,
time.RFC3339Nano,
}
for _, format := range formats {
if _, err := time.Parse(format, asString); err == nil {
return true
}
}
return false
}
// IsFormat checks if input is a correctly formatted date (YYYY-MM-DD)
func (f DateFormatChecker) IsFormat(input interface{}) bool {
asString, ok := input.(string)
if !ok {
return false
}
_, err := time.Parse("2006-01-02", asString)
return err == nil
}
// IsFormat checks if input correctly formatted time (HH:MM:SS or HH:MM:SSZ-07:00)
func (f TimeFormatChecker) IsFormat(input interface{}) bool {
asString, ok := input.(string)
if !ok {
return false
}
if _, err := time.Parse("15:04:05Z07:00", asString); err == nil {
return true
}
_, err := time.Parse("15:04:05", asString)
return err == nil
}
// IsFormat checks if input is correctly formatted URI with a valid Scheme per RFC3986
func (f URIFormatChecker) IsFormat(input interface{}) bool {
asString, ok := input.(string)
if !ok {
return false
}
u, err := url.Parse(asString)
if err != nil || u.Scheme == "" {
return false
}
return !strings.Contains(asString, `\`)
}
// IsFormat checks if input is a correctly formatted URI or relative-reference per RFC3986
func (f URIReferenceFormatChecker) IsFormat(input interface{}) bool {
asString, ok := input.(string)
if !ok {
return false
}
_, err := url.Parse(asString)
return err == nil && !strings.Contains(asString, `\`)
}
// IsFormat checks if input is a correctly formatted URI template per RFC6570
func (f URITemplateFormatChecker) IsFormat(input interface{}) bool {
asString, ok := input.(string)
if !ok {
return false
}
u, err := url.Parse(asString)
if err != nil || strings.Contains(asString, `\`) {
return false
}
return rxURITemplate.MatchString(u.Path)
}
// IsFormat checks if input is a correctly formatted hostname
func (f HostnameFormatChecker) IsFormat(input interface{}) bool {
asString, ok := input.(string)
if !ok {
return false
}
return rxHostname.MatchString(asString) && len(asString) < 256
}
// IsFormat checks if input is a correctly formatted UUID
func (f UUIDFormatChecker) IsFormat(input interface{}) bool {
asString, ok := input.(string)
if !ok {
return false
}
return rxUUID.MatchString(asString)
}
// IsFormat checks if input is a correctly formatted regular expression
func (f RegexFormatChecker) IsFormat(input interface{}) bool {
asString, ok := input.(string)
if !ok {
return false
}
if asString == "" {
return true
}
_, err := regexp.Compile(asString)
return err == nil
}
// IsFormat checks if input is a correctly formatted JSON Pointer per RFC6901
func (f JSONPointerFormatChecker) IsFormat(input interface{}) bool {
asString, ok := input.(string)
if !ok {
return false
}
return rxJSONPointer.MatchString(asString)
}
// IsFormat checks if input is a correctly formatted relative JSON Pointer
func (f RelativeJSONPointerFormatChecker) IsFormat(input interface{}) bool {
asString, ok := input.(string)
if !ok {
return false
}
return rxRelJSONPointer.MatchString(asString)
}

13
vendor/github.com/xeipuuv/gojsonschema/glide.yaml generated vendored Normal file
View File

@ -0,0 +1,13 @@
package: github.com/xeipuuv/gojsonschema
license: Apache 2.0
import:
- package: github.com/xeipuuv/gojsonschema
- package: github.com/xeipuuv/gojsonpointer
- package: github.com/xeipuuv/gojsonreference
testImport:
- package: github.com/stretchr/testify
subpackages:
- assert

7
vendor/github.com/xeipuuv/gojsonschema/go.mod generated vendored Normal file
View File

@ -0,0 +1,7 @@
module github.com/xeipuuv/gojsonschema
require (
github.com/stretchr/testify v1.3.0
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415
)

11
vendor/github.com/xeipuuv/gojsonschema/go.sum generated vendored Normal file
View File

@ -0,0 +1,11 @@
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=

37
vendor/github.com/xeipuuv/gojsonschema/internalLog.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// author xeipuuv
// author-github https://github.com/xeipuuv
// author-mail xeipuuv@gmail.com
//
// repository-name gojsonschema
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
//
// description Very simple log wrapper.
// Used for debugging/testing purposes.
//
// created 01-01-2015
package gojsonschema
import (
"log"
)
const internalLogEnabled = false
func internalLog(format string, v ...interface{}) {
log.Printf(format, v...)
}

73
vendor/github.com/xeipuuv/gojsonschema/jsonContext.go generated vendored Normal file
View File

@ -0,0 +1,73 @@
// Copyright 2013 MongoDB, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// author tolsen
// author-github https://github.com/tolsen
//
// repository-name gojsonschema
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
//
// description Implements a persistent (immutable w/ shared structure) singly-linked list of strings for the purpose of storing a json context
//
// created 04-09-2013
package gojsonschema
import "bytes"
// JsonContext implements a persistent linked-list of strings
type JsonContext struct {
head string
tail *JsonContext
}
// NewJsonContext creates a new JsonContext
func NewJsonContext(head string, tail *JsonContext) *JsonContext {
return &JsonContext{head, tail}
}
// String displays the context in reverse.
// This plays well with the data structure's persistent nature with
// Cons and a json document's tree structure.
func (c *JsonContext) String(del ...string) string {
byteArr := make([]byte, 0, c.stringLen())
buf := bytes.NewBuffer(byteArr)
c.writeStringToBuffer(buf, del)
return buf.String()
}
func (c *JsonContext) stringLen() int {
length := 0
if c.tail != nil {
length = c.tail.stringLen() + 1 // add 1 for "."
}
length += len(c.head)
return length
}
func (c *JsonContext) writeStringToBuffer(buf *bytes.Buffer, del []string) {
if c.tail != nil {
c.tail.writeStringToBuffer(buf, del)
if len(del) > 0 {
buf.WriteString(del[0])
} else {
buf.WriteString(".")
}
}
buf.WriteString(c.head)
}

386
vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go generated vendored Normal file
View File

@ -0,0 +1,386 @@
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// author xeipuuv
// author-github https://github.com/xeipuuv
// author-mail xeipuuv@gmail.com
//
// repository-name gojsonschema
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
//
// description Different strategies to load JSON files.
// Includes References (file and HTTP), JSON strings and Go types.
//
// created 01-02-2015
package gojsonschema
import (
"bytes"
"encoding/json"
"errors"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/xeipuuv/gojsonreference"
)
var osFS = osFileSystem(os.Open)
// JSONLoader defines the JSON loader interface
type JSONLoader interface {
JsonSource() interface{}
LoadJSON() (interface{}, error)
JsonReference() (gojsonreference.JsonReference, error)
LoaderFactory() JSONLoaderFactory
}
// JSONLoaderFactory defines the JSON loader factory interface
type JSONLoaderFactory interface {
// New creates a new JSON loader for the given source
New(source string) JSONLoader
}
// DefaultJSONLoaderFactory is the default JSON loader factory
type DefaultJSONLoaderFactory struct {
}
// FileSystemJSONLoaderFactory is a JSON loader factory that uses http.FileSystem
type FileSystemJSONLoaderFactory struct {
fs http.FileSystem
}
// New creates a new JSON loader for the given source
func (d DefaultJSONLoaderFactory) New(source string) JSONLoader {
return &jsonReferenceLoader{
fs: osFS,
source: source,
}
}
// New creates a new JSON loader for the given source
func (f FileSystemJSONLoaderFactory) New(source string) JSONLoader {
return &jsonReferenceLoader{
fs: f.fs,
source: source,
}
}
// osFileSystem is a functional wrapper for os.Open that implements http.FileSystem.
type osFileSystem func(string) (*os.File, error)
// Opens a file with the given name
func (o osFileSystem) Open(name string) (http.File, error) {
return o(name)
}
// JSON Reference loader
// references are used to load JSONs from files and HTTP
type jsonReferenceLoader struct {
fs http.FileSystem
source string
}
func (l *jsonReferenceLoader) JsonSource() interface{} {
return l.source
}
func (l *jsonReferenceLoader) JsonReference() (gojsonreference.JsonReference, error) {
return gojsonreference.NewJsonReference(l.JsonSource().(string))
}
func (l *jsonReferenceLoader) LoaderFactory() JSONLoaderFactory {
return &FileSystemJSONLoaderFactory{
fs: l.fs,
}
}
// NewReferenceLoader returns a JSON reference loader using the given source and the local OS file system.
func NewReferenceLoader(source string) JSONLoader {
return &jsonReferenceLoader{
fs: osFS,
source: source,
}
}
// NewReferenceLoaderFileSystem returns a JSON reference loader using the given source and file system.
func NewReferenceLoaderFileSystem(source string, fs http.FileSystem) JSONLoader {
return &jsonReferenceLoader{
fs: fs,
source: source,
}
}
func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) {
var err error
reference, err := gojsonreference.NewJsonReference(l.JsonSource().(string))
if err != nil {
return nil, err
}
refToURL := reference
refToURL.GetUrl().Fragment = ""
var document interface{}
if reference.HasFileScheme {
filename := strings.TrimPrefix(refToURL.String(), "file://")
filename, err = url.QueryUnescape(filename)
if err != nil {
return nil, err
}
if runtime.GOOS == "windows" {
// on Windows, a file URL may have an extra leading slash, use slashes
// instead of backslashes, and have spaces escaped
filename = strings.TrimPrefix(filename, "/")
filename = filepath.FromSlash(filename)
}
document, err = l.loadFromFile(filename)
if err != nil {
return nil, err
}
} else {
document, err = l.loadFromHTTP(refToURL.String())
if err != nil {
return nil, err
}
}
return document, nil
}
func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) {
// returned cached versions for metaschemas for drafts 4, 6 and 7
// for performance and allow for easier offline use
if metaSchema := drafts.GetMetaSchema(address); metaSchema != "" {
return decodeJSONUsingNumber(strings.NewReader(metaSchema))
}
resp, err := http.Get(address)
if err != nil {
return nil, err
}
// must return HTTP Status 200 OK
if resp.StatusCode != http.StatusOK {
return nil, errors.New(formatErrorDescription(Locale.HttpBadStatus(), ErrorDetails{"status": resp.Status}))
}
bodyBuff, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return decodeJSONUsingNumber(bytes.NewReader(bodyBuff))
}
func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) {
f, err := l.fs.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
bodyBuff, err := ioutil.ReadAll(f)
if err != nil {
return nil, err
}
return decodeJSONUsingNumber(bytes.NewReader(bodyBuff))
}
// JSON string loader
type jsonStringLoader struct {
source string
}
func (l *jsonStringLoader) JsonSource() interface{} {
return l.source
}
func (l *jsonStringLoader) JsonReference() (gojsonreference.JsonReference, error) {
return gojsonreference.NewJsonReference("#")
}
func (l *jsonStringLoader) LoaderFactory() JSONLoaderFactory {
return &DefaultJSONLoaderFactory{}
}
// NewStringLoader creates a new JSONLoader, taking a string as source
func NewStringLoader(source string) JSONLoader {
return &jsonStringLoader{source: source}
}
func (l *jsonStringLoader) LoadJSON() (interface{}, error) {
return decodeJSONUsingNumber(strings.NewReader(l.JsonSource().(string)))
}
// JSON bytes loader
type jsonBytesLoader struct {
source []byte
}
func (l *jsonBytesLoader) JsonSource() interface{} {
return l.source
}
func (l *jsonBytesLoader) JsonReference() (gojsonreference.JsonReference, error) {
return gojsonreference.NewJsonReference("#")
}
func (l *jsonBytesLoader) LoaderFactory() JSONLoaderFactory {
return &DefaultJSONLoaderFactory{}
}
// NewBytesLoader creates a new JSONLoader, taking a `[]byte` as source
func NewBytesLoader(source []byte) JSONLoader {
return &jsonBytesLoader{source: source}
}
func (l *jsonBytesLoader) LoadJSON() (interface{}, error) {
return decodeJSONUsingNumber(bytes.NewReader(l.JsonSource().([]byte)))
}
// JSON Go (types) loader
// used to load JSONs from the code as maps, interface{}, structs ...
type jsonGoLoader struct {
source interface{}
}
func (l *jsonGoLoader) JsonSource() interface{} {
return l.source
}
func (l *jsonGoLoader) JsonReference() (gojsonreference.JsonReference, error) {
return gojsonreference.NewJsonReference("#")
}
func (l *jsonGoLoader) LoaderFactory() JSONLoaderFactory {
return &DefaultJSONLoaderFactory{}
}
// NewGoLoader creates a new JSONLoader from a given Go struct
func NewGoLoader(source interface{}) JSONLoader {
return &jsonGoLoader{source: source}
}
func (l *jsonGoLoader) LoadJSON() (interface{}, error) {
// convert it to a compliant JSON first to avoid types "mismatches"
jsonBytes, err := json.Marshal(l.JsonSource())
if err != nil {
return nil, err
}
return decodeJSONUsingNumber(bytes.NewReader(jsonBytes))
}
type jsonIOLoader struct {
buf *bytes.Buffer
}
// NewReaderLoader creates a new JSON loader using the provided io.Reader
func NewReaderLoader(source io.Reader) (JSONLoader, io.Reader) {
buf := &bytes.Buffer{}
return &jsonIOLoader{buf: buf}, io.TeeReader(source, buf)
}
// NewWriterLoader creates a new JSON loader using the provided io.Writer
func NewWriterLoader(source io.Writer) (JSONLoader, io.Writer) {
buf := &bytes.Buffer{}
return &jsonIOLoader{buf: buf}, io.MultiWriter(source, buf)
}
func (l *jsonIOLoader) JsonSource() interface{} {
return l.buf.String()
}
func (l *jsonIOLoader) LoadJSON() (interface{}, error) {
return decodeJSONUsingNumber(l.buf)
}
func (l *jsonIOLoader) JsonReference() (gojsonreference.JsonReference, error) {
return gojsonreference.NewJsonReference("#")
}
func (l *jsonIOLoader) LoaderFactory() JSONLoaderFactory {
return &DefaultJSONLoaderFactory{}
}
// JSON raw loader
// In case the JSON is already marshalled to interface{} use this loader
// This is used for testing as otherwise there is no guarantee the JSON is marshalled
// "properly" by using https://golang.org/pkg/encoding/json/#Decoder.UseNumber
type jsonRawLoader struct {
source interface{}
}
// NewRawLoader creates a new JSON raw loader for the given source
func NewRawLoader(source interface{}) JSONLoader {
return &jsonRawLoader{source: source}
}
func (l *jsonRawLoader) JsonSource() interface{} {
return l.source
}
func (l *jsonRawLoader) LoadJSON() (interface{}, error) {
return l.source, nil
}
func (l *jsonRawLoader) JsonReference() (gojsonreference.JsonReference, error) {
return gojsonreference.NewJsonReference("#")
}
func (l *jsonRawLoader) LoaderFactory() JSONLoaderFactory {
return &DefaultJSONLoaderFactory{}
}
func decodeJSONUsingNumber(r io.Reader) (interface{}, error) {
var document interface{}
decoder := json.NewDecoder(r)
decoder.UseNumber()
err := decoder.Decode(&document)
if err != nil {
return nil, err
}
return document, nil
}

472
vendor/github.com/xeipuuv/gojsonschema/locales.go generated vendored Normal file
View File

@ -0,0 +1,472 @@
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// author xeipuuv
// author-github https://github.com/xeipuuv
// author-mail xeipuuv@gmail.com
//
// repository-name gojsonschema
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
//
// description Contains const string and messages.
//
// created 01-01-2015
package gojsonschema
type (
// locale is an interface for defining custom error strings
locale interface {
// False returns a format-string for "false" schema validation errors
False() string
// Required returns a format-string for "required" schema validation errors
Required() string
// InvalidType returns a format-string for "invalid type" schema validation errors
InvalidType() string
// NumberAnyOf returns a format-string for "anyOf" schema validation errors
NumberAnyOf() string
// NumberOneOf returns a format-string for "oneOf" schema validation errors
NumberOneOf() string
// NumberAllOf returns a format-string for "allOf" schema validation errors
NumberAllOf() string
// NumberNot returns a format-string to format a NumberNotError
NumberNot() string
// MissingDependency returns a format-string for "missing dependency" schema validation errors
MissingDependency() string
// Internal returns a format-string for internal errors
Internal() string
// Const returns a format-string to format a ConstError
Const() string
// Enum returns a format-string to format an EnumError
Enum() string
// ArrayNotEnoughItems returns a format-string to format an error for arrays having not enough items to match positional list of schema
ArrayNotEnoughItems() string
// ArrayNoAdditionalItems returns a format-string to format an ArrayNoAdditionalItemsError
ArrayNoAdditionalItems() string
// ArrayMinItems returns a format-string to format an ArrayMinItemsError
ArrayMinItems() string
// ArrayMaxItems returns a format-string to format an ArrayMaxItemsError
ArrayMaxItems() string
// Unique returns a format-string to format an ItemsMustBeUniqueError
Unique() string
// ArrayContains returns a format-string to format an ArrayContainsError
ArrayContains() string
// ArrayMinProperties returns a format-string to format an ArrayMinPropertiesError
ArrayMinProperties() string
// ArrayMaxProperties returns a format-string to format an ArrayMaxPropertiesError
ArrayMaxProperties() string
// AdditionalPropertyNotAllowed returns a format-string to format an AdditionalPropertyNotAllowedError
AdditionalPropertyNotAllowed() string
// InvalidPropertyPattern returns a format-string to format an InvalidPropertyPatternError
InvalidPropertyPattern() string
// InvalidPropertyName returns a format-string to format an InvalidPropertyNameError
InvalidPropertyName() string
// StringGTE returns a format-string to format an StringLengthGTEError
StringGTE() string
// StringLTE returns a format-string to format an StringLengthLTEError
StringLTE() string
// DoesNotMatchPattern returns a format-string to format an DoesNotMatchPatternError
DoesNotMatchPattern() string
// DoesNotMatchFormat returns a format-string to format an DoesNotMatchFormatError
DoesNotMatchFormat() string
// MultipleOf returns a format-string to format an MultipleOfError
MultipleOf() string
// NumberGTE returns a format-string to format an NumberGTEError
NumberGTE() string
// NumberGT returns a format-string to format an NumberGTError
NumberGT() string
// NumberLTE returns a format-string to format an NumberLTEError
NumberLTE() string
// NumberLT returns a format-string to format an NumberLTError
NumberLT() string
// Schema validations
// RegexPattern returns a format-string to format a regex-pattern error
RegexPattern() string
// GreaterThanZero returns a format-string to format an error where a number must be greater than zero
GreaterThanZero() string
// MustBeOfA returns a format-string to format an error where a value is of the wrong type
MustBeOfA() string
// MustBeOfAn returns a format-string to format an error where a value is of the wrong type
MustBeOfAn() string
// CannotBeUsedWithout returns a format-string to format a "cannot be used without" error
CannotBeUsedWithout() string
// CannotBeGT returns a format-string to format an error where a value are greater than allowed
CannotBeGT() string
// MustBeOfType returns a format-string to format an error where a value does not match the required type
MustBeOfType() string
// MustBeValidRegex returns a format-string to format an error where a regex is invalid
MustBeValidRegex() string
// MustBeValidFormat returns a format-string to format an error where a value does not match the expected format
MustBeValidFormat() string
// MustBeGTEZero returns a format-string to format an error where a value must be greater or equal than 0
MustBeGTEZero() string
// KeyCannotBeGreaterThan returns a format-string to format an error where a key is greater than the maximum allowed
KeyCannotBeGreaterThan() string
// KeyItemsMustBeOfType returns a format-string to format an error where a key is of the wrong type
KeyItemsMustBeOfType() string
// KeyItemsMustBeUnique returns a format-string to format an error where keys are not unique
KeyItemsMustBeUnique() string
// ReferenceMustBeCanonical returns a format-string to format a "reference must be canonical" error
ReferenceMustBeCanonical() string
// NotAValidType returns a format-string to format an invalid type error
NotAValidType() string
// Duplicated returns a format-string to format an error where types are duplicated
Duplicated() string
// HttpBadStatus returns a format-string for errors when loading a schema using HTTP
HttpBadStatus() string
// ParseError returns a format-string for JSON parsing errors
ParseError() string
// ConditionThen returns a format-string for ConditionThenError errors
ConditionThen() string
// ConditionElse returns a format-string for ConditionElseError errors
ConditionElse() string
// ErrorFormat returns a format string for errors
ErrorFormat() string
}
// DefaultLocale is the default locale for this package
DefaultLocale struct{}
)
// False returns a format-string for "false" schema validation errors
func (l DefaultLocale) False() string {
return "False always fails validation"
}
// Required returns a format-string for "required" schema validation errors
func (l DefaultLocale) Required() string {
return `{{.property}} is required`
}
// InvalidType returns a format-string for "invalid type" schema validation errors
func (l DefaultLocale) InvalidType() string {
return `Invalid type. Expected: {{.expected}}, given: {{.given}}`
}
// NumberAnyOf returns a format-string for "anyOf" schema validation errors
func (l DefaultLocale) NumberAnyOf() string {
return `Must validate at least one schema (anyOf)`
}
// NumberOneOf returns a format-string for "oneOf" schema validation errors
func (l DefaultLocale) NumberOneOf() string {
return `Must validate one and only one schema (oneOf)`
}
// NumberAllOf returns a format-string for "allOf" schema validation errors
func (l DefaultLocale) NumberAllOf() string {
return `Must validate all the schemas (allOf)`
}
// NumberNot returns a format-string to format a NumberNotError
func (l DefaultLocale) NumberNot() string {
return `Must not validate the schema (not)`
}
// MissingDependency returns a format-string for "missing dependency" schema validation errors
func (l DefaultLocale) MissingDependency() string {
return `Has a dependency on {{.dependency}}`
}
// Internal returns a format-string for internal errors
func (l DefaultLocale) Internal() string {
return `Internal Error {{.error}}`
}
// Const returns a format-string to format a ConstError
func (l DefaultLocale) Const() string {
return `{{.field}} does not match: {{.allowed}}`
}
// Enum returns a format-string to format an EnumError
func (l DefaultLocale) Enum() string {
return `{{.field}} must be one of the following: {{.allowed}}`
}
// ArrayNoAdditionalItems returns a format-string to format an ArrayNoAdditionalItemsError
func (l DefaultLocale) ArrayNoAdditionalItems() string {
return `No additional items allowed on array`
}
// ArrayNotEnoughItems returns a format-string to format an error for arrays having not enough items to match positional list of schema
func (l DefaultLocale) ArrayNotEnoughItems() string {
return `Not enough items on array to match positional list of schema`
}
// ArrayMinItems returns a format-string to format an ArrayMinItemsError
func (l DefaultLocale) ArrayMinItems() string {
return `Array must have at least {{.min}} items`
}
// ArrayMaxItems returns a format-string to format an ArrayMaxItemsError
func (l DefaultLocale) ArrayMaxItems() string {
return `Array must have at most {{.max}} items`
}
// Unique returns a format-string to format an ItemsMustBeUniqueError
func (l DefaultLocale) Unique() string {
return `{{.type}} items[{{.i}},{{.j}}] must be unique`
}
// ArrayContains returns a format-string to format an ArrayContainsError
func (l DefaultLocale) ArrayContains() string {
return `At least one of the items must match`
}
// ArrayMinProperties returns a format-string to format an ArrayMinPropertiesError
func (l DefaultLocale) ArrayMinProperties() string {
return `Must have at least {{.min}} properties`
}
// ArrayMaxProperties returns a format-string to format an ArrayMaxPropertiesError
func (l DefaultLocale) ArrayMaxProperties() string {
return `Must have at most {{.max}} properties`
}
// AdditionalPropertyNotAllowed returns a format-string to format an AdditionalPropertyNotAllowedError
func (l DefaultLocale) AdditionalPropertyNotAllowed() string {
return `Additional property {{.property}} is not allowed`
}
// InvalidPropertyPattern returns a format-string to format an InvalidPropertyPatternError
func (l DefaultLocale) InvalidPropertyPattern() string {
return `Property "{{.property}}" does not match pattern {{.pattern}}`
}
// InvalidPropertyName returns a format-string to format an InvalidPropertyNameError
func (l DefaultLocale) InvalidPropertyName() string {
return `Property name of "{{.property}}" does not match`
}
// StringGTE returns a format-string to format an StringLengthGTEError
func (l DefaultLocale) StringGTE() string {
return `String length must be greater than or equal to {{.min}}`
}
// StringLTE returns a format-string to format an StringLengthLTEError
func (l DefaultLocale) StringLTE() string {
return `String length must be less than or equal to {{.max}}`
}
// DoesNotMatchPattern returns a format-string to format an DoesNotMatchPatternError
func (l DefaultLocale) DoesNotMatchPattern() string {
return `Does not match pattern '{{.pattern}}'`
}
// DoesNotMatchFormat returns a format-string to format an DoesNotMatchFormatError
func (l DefaultLocale) DoesNotMatchFormat() string {
return `Does not match format '{{.format}}'`
}
// MultipleOf returns a format-string to format an MultipleOfError
func (l DefaultLocale) MultipleOf() string {
return `Must be a multiple of {{.multiple}}`
}
// NumberGTE returns the format string to format a NumberGTEError
func (l DefaultLocale) NumberGTE() string {
return `Must be greater than or equal to {{.min}}`
}
// NumberGT returns the format string to format a NumberGTError
func (l DefaultLocale) NumberGT() string {
return `Must be greater than {{.min}}`
}
// NumberLTE returns the format string to format a NumberLTEError
func (l DefaultLocale) NumberLTE() string {
return `Must be less than or equal to {{.max}}`
}
// NumberLT returns the format string to format a NumberLTError
func (l DefaultLocale) NumberLT() string {
return `Must be less than {{.max}}`
}
// Schema validators
// RegexPattern returns a format-string to format a regex-pattern error
func (l DefaultLocale) RegexPattern() string {
return `Invalid regex pattern '{{.pattern}}'`
}
// GreaterThanZero returns a format-string to format an error where a number must be greater than zero
func (l DefaultLocale) GreaterThanZero() string {
return `{{.number}} must be strictly greater than 0`
}
// MustBeOfA returns a format-string to format an error where a value is of the wrong type
func (l DefaultLocale) MustBeOfA() string {
return `{{.x}} must be of a {{.y}}`
}
// MustBeOfAn returns a format-string to format an error where a value is of the wrong type
func (l DefaultLocale) MustBeOfAn() string {
return `{{.x}} must be of an {{.y}}`
}
// CannotBeUsedWithout returns a format-string to format a "cannot be used without" error
func (l DefaultLocale) CannotBeUsedWithout() string {
return `{{.x}} cannot be used without {{.y}}`
}
// CannotBeGT returns a format-string to format an error where a value are greater than allowed
func (l DefaultLocale) CannotBeGT() string {
return `{{.x}} cannot be greater than {{.y}}`
}
// MustBeOfType returns a format-string to format an error where a value does not match the required type
func (l DefaultLocale) MustBeOfType() string {
return `{{.key}} must be of type {{.type}}`
}
// MustBeValidRegex returns a format-string to format an error where a regex is invalid
func (l DefaultLocale) MustBeValidRegex() string {
return `{{.key}} must be a valid regex`
}
// MustBeValidFormat returns a format-string to format an error where a value does not match the expected format
func (l DefaultLocale) MustBeValidFormat() string {
return `{{.key}} must be a valid format {{.given}}`
}
// MustBeGTEZero returns a format-string to format an error where a value must be greater or equal than 0
func (l DefaultLocale) MustBeGTEZero() string {
return `{{.key}} must be greater than or equal to 0`
}
// KeyCannotBeGreaterThan returns a format-string to format an error where a value is greater than the maximum allowed
func (l DefaultLocale) KeyCannotBeGreaterThan() string {
return `{{.key}} cannot be greater than {{.y}}`
}
// KeyItemsMustBeOfType returns a format-string to format an error where a key is of the wrong type
func (l DefaultLocale) KeyItemsMustBeOfType() string {
return `{{.key}} items must be {{.type}}`
}
// KeyItemsMustBeUnique returns a format-string to format an error where keys are not unique
func (l DefaultLocale) KeyItemsMustBeUnique() string {
return `{{.key}} items must be unique`
}
// ReferenceMustBeCanonical returns a format-string to format a "reference must be canonical" error
func (l DefaultLocale) ReferenceMustBeCanonical() string {
return `Reference {{.reference}} must be canonical`
}
// NotAValidType returns a format-string to format an invalid type error
func (l DefaultLocale) NotAValidType() string {
return `has a primitive type that is NOT VALID -- given: {{.given}} Expected valid values are:{{.expected}}`
}
// Duplicated returns a format-string to format an error where types are duplicated
func (l DefaultLocale) Duplicated() string {
return `{{.type}} type is duplicated`
}
// HttpBadStatus returns a format-string for errors when loading a schema using HTTP
func (l DefaultLocale) HttpBadStatus() string {
return `Could not read schema from HTTP, response status is {{.status}}`
}
// ErrorFormat returns a format string for errors
// Replacement options: field, description, context, value
func (l DefaultLocale) ErrorFormat() string {
return `{{.field}}: {{.description}}`
}
// ParseError returns a format-string for JSON parsing errors
func (l DefaultLocale) ParseError() string {
return `Expected: {{.expected}}, given: Invalid JSON`
}
// ConditionThen returns a format-string for ConditionThenError errors
// If/Else
func (l DefaultLocale) ConditionThen() string {
return `Must validate "then" as "if" was valid`
}
// ConditionElse returns a format-string for ConditionElseError errors
func (l DefaultLocale) ConditionElse() string {
return `Must validate "else" as "if" was not valid`
}
// constants
const (
STRING_NUMBER = "number"
STRING_ARRAY_OF_STRINGS = "array of strings"
STRING_ARRAY_OF_SCHEMAS = "array of schemas"
STRING_SCHEMA = "valid schema"
STRING_SCHEMA_OR_ARRAY_OF_STRINGS = "schema or array of strings"
STRING_PROPERTIES = "properties"
STRING_DEPENDENCY = "dependency"
STRING_PROPERTY = "property"
STRING_UNDEFINED = "undefined"
STRING_CONTEXT_ROOT = "(root)"
STRING_ROOT_SCHEMA_PROPERTY = "(root)"
)

220
vendor/github.com/xeipuuv/gojsonschema/result.go generated vendored Normal file
View File

@ -0,0 +1,220 @@
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// author xeipuuv
// author-github https://github.com/xeipuuv
// author-mail xeipuuv@gmail.com
//
// repository-name gojsonschema
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
//
// description Result and ResultError implementations.
//
// created 01-01-2015
package gojsonschema
import (
"fmt"
"strings"
)
type (
// ErrorDetails is a map of details specific to each error.
// While the values will vary, every error will contain a "field" value
ErrorDetails map[string]interface{}
// ResultError is the interface that library errors must implement
ResultError interface {
// Field returns the field name without the root context
// i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName
Field() string
// SetType sets the error-type
SetType(string)
// Type returns the error-type
Type() string
// SetContext sets the JSON-context for the error
SetContext(*JsonContext)
// Context returns the JSON-context of the error
Context() *JsonContext
// SetDescription sets a description for the error
SetDescription(string)
// Description returns the description of the error
Description() string
// SetDescriptionFormat sets the format for the description in the default text/template format
SetDescriptionFormat(string)
// DescriptionFormat returns the format for the description in the default text/template format
DescriptionFormat() string
// SetValue sets the value related to the error
SetValue(interface{})
// Value returns the value related to the error
Value() interface{}
// SetDetails sets the details specific to the error
SetDetails(ErrorDetails)
// Details returns details about the error
Details() ErrorDetails
// String returns a string representation of the error
String() string
}
// ResultErrorFields holds the fields for each ResultError implementation.
// ResultErrorFields implements the ResultError interface, so custom errors
// can be defined by just embedding this type
ResultErrorFields struct {
errorType string // A string with the type of error (i.e. invalid_type)
context *JsonContext // Tree like notation of the part that failed the validation. ex (root).a.b ...
description string // A human readable error message
descriptionFormat string // A format for human readable error message
value interface{} // Value given by the JSON file that is the source of the error
details ErrorDetails
}
// Result holds the result of a validation
Result struct {
errors []ResultError
// Scores how well the validation matched. Useful in generating
// better error messages for anyOf and oneOf.
score int
}
)
// Field returns the field name without the root context
// i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName
func (v *ResultErrorFields) Field() string {
return strings.TrimPrefix(v.context.String(), STRING_ROOT_SCHEMA_PROPERTY+".")
}
// SetType sets the error-type
func (v *ResultErrorFields) SetType(errorType string) {
v.errorType = errorType
}
// Type returns the error-type
func (v *ResultErrorFields) Type() string {
return v.errorType
}
// SetContext sets the JSON-context for the error
func (v *ResultErrorFields) SetContext(context *JsonContext) {
v.context = context
}
// Context returns the JSON-context of the error
func (v *ResultErrorFields) Context() *JsonContext {
return v.context
}
// SetDescription sets a description for the error
func (v *ResultErrorFields) SetDescription(description string) {
v.description = description
}
// Description returns the description of the error
func (v *ResultErrorFields) Description() string {
return v.description
}
// SetDescriptionFormat sets the format for the description in the default text/template format
func (v *ResultErrorFields) SetDescriptionFormat(descriptionFormat string) {
v.descriptionFormat = descriptionFormat
}
// DescriptionFormat returns the format for the description in the default text/template format
func (v *ResultErrorFields) DescriptionFormat() string {
return v.descriptionFormat
}
// SetValue sets the value related to the error
func (v *ResultErrorFields) SetValue(value interface{}) {
v.value = value
}
// Value returns the value related to the error
func (v *ResultErrorFields) Value() interface{} {
return v.value
}
// SetDetails sets the details specific to the error
func (v *ResultErrorFields) SetDetails(details ErrorDetails) {
v.details = details
}
// Details returns details about the error
func (v *ResultErrorFields) Details() ErrorDetails {
return v.details
}
// String returns a string representation of the error
func (v ResultErrorFields) String() string {
// as a fallback, the value is displayed go style
valueString := fmt.Sprintf("%v", v.value)
// marshal the go value value to json
if v.value == nil {
valueString = TYPE_NULL
} else {
if vs, err := marshalToJSONString(v.value); err == nil {
if vs == nil {
valueString = TYPE_NULL
} else {
valueString = *vs
}
}
}
return formatErrorDescription(Locale.ErrorFormat(), ErrorDetails{
"context": v.context.String(),
"description": v.description,
"value": valueString,
"field": v.Field(),
})
}
// Valid indicates if no errors were found
func (v *Result) Valid() bool {
return len(v.errors) == 0
}
// Errors returns the errors that were found
func (v *Result) Errors() []ResultError {
return v.errors
}
// AddError appends a fully filled error to the error set
// SetDescription() will be called with the result of the parsed err.DescriptionFormat()
func (v *Result) AddError(err ResultError, details ErrorDetails) {
if _, exists := details["context"]; !exists && err.Context() != nil {
details["context"] = err.Context().String()
}
err.SetDescription(formatErrorDescription(err.DescriptionFormat(), details))
v.errors = append(v.errors, err)
}
func (v *Result) addInternalError(err ResultError, context *JsonContext, value interface{}, details ErrorDetails) {
newError(err, context, value, Locale, details)
v.errors = append(v.errors, err)
v.score -= 2 // results in a net -1 when added to the +1 we get at the end of the validation function
}
// Used to copy errors from a sub-schema to the main one
func (v *Result) mergeErrors(otherResult *Result) {
v.errors = append(v.errors, otherResult.Errors()...)
v.score += otherResult.score
}
func (v *Result) incrementScore() {
v.score++
}

1087
vendor/github.com/xeipuuv/gojsonschema/schema.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

206
vendor/github.com/xeipuuv/gojsonschema/schemaLoader.go generated vendored Normal file
View File

@ -0,0 +1,206 @@
// Copyright 2018 johandorland ( https://github.com/johandorland )
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gojsonschema
import (
"bytes"
"errors"
"github.com/xeipuuv/gojsonreference"
)
// SchemaLoader is used to load schemas
type SchemaLoader struct {
pool *schemaPool
AutoDetect bool
Validate bool
Draft Draft
}
// NewSchemaLoader creates a new NewSchemaLoader
func NewSchemaLoader() *SchemaLoader {
ps := &SchemaLoader{
pool: &schemaPool{
schemaPoolDocuments: make(map[string]*schemaPoolDocument),
},
AutoDetect: true,
Validate: false,
Draft: Hybrid,
}
ps.pool.autoDetect = &ps.AutoDetect
return ps
}
func (sl *SchemaLoader) validateMetaschema(documentNode interface{}) error {
var (
schema string
err error
)
if sl.AutoDetect {
schema, _, err = parseSchemaURL(documentNode)
if err != nil {
return err
}
}
// If no explicit "$schema" is used, use the default metaschema associated with the draft used
if schema == "" {
if sl.Draft == Hybrid {
return nil
}
schema = drafts.GetSchemaURL(sl.Draft)
}
//Disable validation when loading the metaschema to prevent an infinite recursive loop
sl.Validate = false
metaSchema, err := sl.Compile(NewReferenceLoader(schema))
if err != nil {
return err
}
sl.Validate = true
result := metaSchema.validateDocument(documentNode)
if !result.Valid() {
var res bytes.Buffer
for _, err := range result.Errors() {
res.WriteString(err.String())
res.WriteString("\n")
}
return errors.New(res.String())
}
return nil
}
// AddSchemas adds an arbritrary amount of schemas to the schema cache. As this function does not require
// an explicit URL, every schema should contain an $id, so that it can be referenced by the main schema
func (sl *SchemaLoader) AddSchemas(loaders ...JSONLoader) error {
emptyRef, _ := gojsonreference.NewJsonReference("")
for _, loader := range loaders {
doc, err := loader.LoadJSON()
if err != nil {
return err
}
if sl.Validate {
if err := sl.validateMetaschema(doc); err != nil {
return err
}
}
// Directly use the Recursive function, so that it get only added to the schema pool by $id
// and not by the ref of the document as it's empty
if err = sl.pool.parseReferences(doc, emptyRef, false); err != nil {
return err
}
}
return nil
}
//AddSchema adds a schema under the provided URL to the schema cache
func (sl *SchemaLoader) AddSchema(url string, loader JSONLoader) error {
ref, err := gojsonreference.NewJsonReference(url)
if err != nil {
return err
}
doc, err := loader.LoadJSON()
if err != nil {
return err
}
if sl.Validate {
if err := sl.validateMetaschema(doc); err != nil {
return err
}
}
return sl.pool.parseReferences(doc, ref, true)
}
// Compile loads and compiles a schema
func (sl *SchemaLoader) Compile(rootSchema JSONLoader) (*Schema, error) {
ref, err := rootSchema.JsonReference()
if err != nil {
return nil, err
}
d := Schema{}
d.pool = sl.pool
d.pool.jsonLoaderFactory = rootSchema.LoaderFactory()
d.documentReference = ref
d.referencePool = newSchemaReferencePool()
var doc interface{}
if ref.String() != "" {
// Get document from schema pool
spd, err := d.pool.GetDocument(d.documentReference)
if err != nil {
return nil, err
}
doc = spd.Document
} else {
// Load JSON directly
doc, err = rootSchema.LoadJSON()
if err != nil {
return nil, err
}
// References need only be parsed if loading JSON directly
// as pool.GetDocument already does this for us if loading by reference
err = sl.pool.parseReferences(doc, ref, true)
if err != nil {
return nil, err
}
}
if sl.Validate {
if err := sl.validateMetaschema(doc); err != nil {
return nil, err
}
}
draft := sl.Draft
if sl.AutoDetect {
_, detectedDraft, err := parseSchemaURL(doc)
if err != nil {
return nil, err
}
if detectedDraft != nil {
draft = *detectedDraft
}
}
err = d.parse(doc, draft)
if err != nil {
return nil, err
}
return &d, nil
}

215
vendor/github.com/xeipuuv/gojsonschema/schemaPool.go generated vendored Normal file
View File

@ -0,0 +1,215 @@
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// author xeipuuv
// author-github https://github.com/xeipuuv
// author-mail xeipuuv@gmail.com
//
// repository-name gojsonschema
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
//
// description Defines resources pooling.
// Eases referencing and avoids downloading the same resource twice.
//
// created 26-02-2013
package gojsonschema
import (
"errors"
"fmt"
"reflect"
"github.com/xeipuuv/gojsonreference"
)
type schemaPoolDocument struct {
Document interface{}
Draft *Draft
}
type schemaPool struct {
schemaPoolDocuments map[string]*schemaPoolDocument
jsonLoaderFactory JSONLoaderFactory
autoDetect *bool
}
func (p *schemaPool) parseReferences(document interface{}, ref gojsonreference.JsonReference, pooled bool) error {
var (
draft *Draft
err error
reference = ref.String()
)
// Only the root document should be added to the schema pool if pooled is true
if _, ok := p.schemaPoolDocuments[reference]; pooled && ok {
return fmt.Errorf("Reference already exists: \"%s\"", reference)
}
if *p.autoDetect {
_, draft, err = parseSchemaURL(document)
if err != nil {
return err
}
}
err = p.parseReferencesRecursive(document, ref, draft)
if pooled {
p.schemaPoolDocuments[reference] = &schemaPoolDocument{Document: document, Draft: draft}
}
return err
}
func (p *schemaPool) parseReferencesRecursive(document interface{}, ref gojsonreference.JsonReference, draft *Draft) error {
// parseReferencesRecursive parses a JSON document and resolves all $id and $ref references.
// For $ref references it takes into account the $id scope it is in and replaces
// the reference by the absolute resolved reference
// When encountering errors it fails silently. Error handling is done when the schema
// is syntactically parsed and any error encountered here should also come up there.
switch m := document.(type) {
case []interface{}:
for _, v := range m {
p.parseReferencesRecursive(v, ref, draft)
}
case map[string]interface{}:
localRef := &ref
keyID := KEY_ID_NEW
if existsMapKey(m, KEY_ID) {
keyID = KEY_ID
}
if existsMapKey(m, keyID) && isKind(m[keyID], reflect.String) {
jsonReference, err := gojsonreference.NewJsonReference(m[keyID].(string))
if err == nil {
localRef, err = ref.Inherits(jsonReference)
if err == nil {
if _, ok := p.schemaPoolDocuments[localRef.String()]; ok {
return fmt.Errorf("Reference already exists: \"%s\"", localRef.String())
}
p.schemaPoolDocuments[localRef.String()] = &schemaPoolDocument{Document: document, Draft: draft}
}
}
}
if existsMapKey(m, KEY_REF) && isKind(m[KEY_REF], reflect.String) {
jsonReference, err := gojsonreference.NewJsonReference(m[KEY_REF].(string))
if err == nil {
absoluteRef, err := localRef.Inherits(jsonReference)
if err == nil {
m[KEY_REF] = absoluteRef.String()
}
}
}
for k, v := range m {
// const and enums should be interpreted literally, so ignore them
if k == KEY_CONST || k == KEY_ENUM {
continue
}
// Something like a property or a dependency is not a valid schema, as it might describe properties named "$ref", "$id" or "const", etc
// Therefore don't treat it like a schema.
if k == KEY_PROPERTIES || k == KEY_DEPENDENCIES || k == KEY_PATTERN_PROPERTIES {
if child, ok := v.(map[string]interface{}); ok {
for _, v := range child {
p.parseReferencesRecursive(v, *localRef, draft)
}
}
} else {
p.parseReferencesRecursive(v, *localRef, draft)
}
}
}
return nil
}
func (p *schemaPool) GetDocument(reference gojsonreference.JsonReference) (*schemaPoolDocument, error) {
var (
spd *schemaPoolDocument
draft *Draft
ok bool
err error
)
if internalLogEnabled {
internalLog("Get Document ( %s )", reference.String())
}
// Create a deep copy, so we can remove the fragment part later on without altering the original
refToURL, _ := gojsonreference.NewJsonReference(reference.String())
// First check if the given fragment is a location independent identifier
// http://json-schema.org/latest/json-schema-core.html#rfc.section.8.2.3
if spd, ok = p.schemaPoolDocuments[refToURL.String()]; ok {
if internalLogEnabled {
internalLog(" From pool")
}
return spd, nil
}
// If the given reference is not a location independent identifier,
// strip the fragment and look for a document with it's base URI
refToURL.GetUrl().Fragment = ""
if cachedSpd, ok := p.schemaPoolDocuments[refToURL.String()]; ok {
document, _, err := reference.GetPointer().Get(cachedSpd.Document)
if err != nil {
return nil, err
}
if internalLogEnabled {
internalLog(" From pool")
}
spd = &schemaPoolDocument{Document: document, Draft: cachedSpd.Draft}
p.schemaPoolDocuments[reference.String()] = spd
return spd, nil
}
// It is not possible to load anything remotely that is not canonical...
if !reference.IsCanonical() {
return nil, errors.New(formatErrorDescription(
Locale.ReferenceMustBeCanonical(),
ErrorDetails{"reference": reference.String()},
))
}
jsonReferenceLoader := p.jsonLoaderFactory.New(reference.String())
document, err := jsonReferenceLoader.LoadJSON()
if err != nil {
return nil, err
}
// add the whole document to the pool for potential re-use
p.parseReferences(document, refToURL, true)
_, draft, _ = parseSchemaURL(document)
// resolve the potential fragment and also cache it
document, _, err = reference.GetPointer().Get(document)
if err != nil {
return nil, err
}
return &schemaPoolDocument{Document: document, Draft: draft}, nil
}

View File

@ -0,0 +1,68 @@
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// author xeipuuv
// author-github https://github.com/xeipuuv
// author-mail xeipuuv@gmail.com
//
// repository-name gojsonschema
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
//
// description Pool of referenced schemas.
//
// created 25-06-2013
package gojsonschema
import (
"fmt"
)
type schemaReferencePool struct {
documents map[string]*subSchema
}
func newSchemaReferencePool() *schemaReferencePool {
p := &schemaReferencePool{}
p.documents = make(map[string]*subSchema)
return p
}
func (p *schemaReferencePool) Get(ref string) (r *subSchema, o bool) {
if internalLogEnabled {
internalLog(fmt.Sprintf("Schema Reference ( %s )", ref))
}
if sch, ok := p.documents[ref]; ok {
if internalLogEnabled {
internalLog(fmt.Sprintf(" From pool"))
}
return sch, true
}
return nil, false
}
func (p *schemaReferencePool) Add(ref string, sch *subSchema) {
if internalLogEnabled {
internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref))
}
if _, ok := p.documents[ref]; !ok {
p.documents[ref] = sch
}
}

83
vendor/github.com/xeipuuv/gojsonschema/schemaType.go generated vendored Normal file
View File

@ -0,0 +1,83 @@
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// author xeipuuv
// author-github https://github.com/xeipuuv
// author-mail xeipuuv@gmail.com
//
// repository-name gojsonschema
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
//
// description Helper structure to handle schema types, and the combination of them.
//
// created 28-02-2013
package gojsonschema
import (
"errors"
"fmt"
"strings"
)
type jsonSchemaType struct {
types []string
}
// Is the schema typed ? that is containing at least one type
// When not typed, the schema does not need any type validation
func (t *jsonSchemaType) IsTyped() bool {
return len(t.types) > 0
}
func (t *jsonSchemaType) Add(etype string) error {
if !isStringInSlice(JSON_TYPES, etype) {
return errors.New(formatErrorDescription(Locale.NotAValidType(), ErrorDetails{"given": "/" + etype + "/", "expected": JSON_TYPES}))
}
if t.Contains(etype) {
return errors.New(formatErrorDescription(Locale.Duplicated(), ErrorDetails{"type": etype}))
}
t.types = append(t.types, etype)
return nil
}
func (t *jsonSchemaType) Contains(etype string) bool {
for _, v := range t.types {
if v == etype {
return true
}
}
return false
}
func (t *jsonSchemaType) String() string {
if len(t.types) == 0 {
return STRING_UNDEFINED // should never happen
}
// Displayed as a list [type1,type2,...]
if len(t.types) > 1 {
return fmt.Sprintf("[%s]", strings.Join(t.types, ","))
}
// Only one type: name only
return t.types[0]
}

149
vendor/github.com/xeipuuv/gojsonschema/subSchema.go generated vendored Normal file
View File

@ -0,0 +1,149 @@
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// author xeipuuv
// author-github https://github.com/xeipuuv
// author-mail xeipuuv@gmail.com
//
// repository-name gojsonschema
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
//
// description Defines the structure of a sub-subSchema.
// A sub-subSchema can contain other sub-schemas.
//
// created 27-02-2013
package gojsonschema
import (
"github.com/xeipuuv/gojsonreference"
"math/big"
"regexp"
)
// Constants
const (
KEY_SCHEMA = "$schema"
KEY_ID = "id"
KEY_ID_NEW = "$id"
KEY_REF = "$ref"
KEY_TITLE = "title"
KEY_DESCRIPTION = "description"
KEY_TYPE = "type"
KEY_ITEMS = "items"
KEY_ADDITIONAL_ITEMS = "additionalItems"
KEY_PROPERTIES = "properties"
KEY_PATTERN_PROPERTIES = "patternProperties"
KEY_ADDITIONAL_PROPERTIES = "additionalProperties"
KEY_PROPERTY_NAMES = "propertyNames"
KEY_DEFINITIONS = "definitions"
KEY_MULTIPLE_OF = "multipleOf"
KEY_MINIMUM = "minimum"
KEY_MAXIMUM = "maximum"
KEY_EXCLUSIVE_MINIMUM = "exclusiveMinimum"
KEY_EXCLUSIVE_MAXIMUM = "exclusiveMaximum"
KEY_MIN_LENGTH = "minLength"
KEY_MAX_LENGTH = "maxLength"
KEY_PATTERN = "pattern"
KEY_FORMAT = "format"
KEY_MIN_PROPERTIES = "minProperties"
KEY_MAX_PROPERTIES = "maxProperties"
KEY_DEPENDENCIES = "dependencies"
KEY_REQUIRED = "required"
KEY_MIN_ITEMS = "minItems"
KEY_MAX_ITEMS = "maxItems"
KEY_UNIQUE_ITEMS = "uniqueItems"
KEY_CONTAINS = "contains"
KEY_CONST = "const"
KEY_ENUM = "enum"
KEY_ONE_OF = "oneOf"
KEY_ANY_OF = "anyOf"
KEY_ALL_OF = "allOf"
KEY_NOT = "not"
KEY_IF = "if"
KEY_THEN = "then"
KEY_ELSE = "else"
)
type subSchema struct {
draft *Draft
// basic subSchema meta properties
id *gojsonreference.JsonReference
title *string
description *string
property string
// Quick pass/fail for boolean schemas
pass *bool
// Types associated with the subSchema
types jsonSchemaType
// Reference url
ref *gojsonreference.JsonReference
// Schema referenced
refSchema *subSchema
// hierarchy
parent *subSchema
itemsChildren []*subSchema
itemsChildrenIsSingleSchema bool
propertiesChildren []*subSchema
// validation : number / integer
multipleOf *big.Rat
maximum *big.Rat
exclusiveMaximum *big.Rat
minimum *big.Rat
exclusiveMinimum *big.Rat
// validation : string
minLength *int
maxLength *int
pattern *regexp.Regexp
format string
// validation : object
minProperties *int
maxProperties *int
required []string
dependencies map[string]interface{}
additionalProperties interface{}
patternProperties map[string]*subSchema
propertyNames *subSchema
// validation : array
minItems *int
maxItems *int
uniqueItems bool
contains *subSchema
additionalItems interface{}
// validation : all
_const *string //const is a golang keyword
enum []string
// validation : subSchema
oneOf []*subSchema
anyOf []*subSchema
allOf []*subSchema
not *subSchema
_if *subSchema // if/else are golang keywords
_then *subSchema
_else *subSchema
}

62
vendor/github.com/xeipuuv/gojsonschema/types.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// author xeipuuv
// author-github https://github.com/xeipuuv
// author-mail xeipuuv@gmail.com
//
// repository-name gojsonschema
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
//
// description Contains const types for schema and JSON.
//
// created 28-02-2013
package gojsonschema
// Type constants
const (
TYPE_ARRAY = `array`
TYPE_BOOLEAN = `boolean`
TYPE_INTEGER = `integer`
TYPE_NUMBER = `number`
TYPE_NULL = `null`
TYPE_OBJECT = `object`
TYPE_STRING = `string`
)
// JSON_TYPES hosts the list of type that are supported in JSON
var JSON_TYPES []string
// SCHEMA_TYPES hosts the list of type that are supported in schemas
var SCHEMA_TYPES []string
func init() {
JSON_TYPES = []string{
TYPE_ARRAY,
TYPE_BOOLEAN,
TYPE_INTEGER,
TYPE_NUMBER,
TYPE_NULL,
TYPE_OBJECT,
TYPE_STRING}
SCHEMA_TYPES = []string{
TYPE_ARRAY,
TYPE_BOOLEAN,
TYPE_INTEGER,
TYPE_NUMBER,
TYPE_OBJECT,
TYPE_STRING}
}

197
vendor/github.com/xeipuuv/gojsonschema/utils.go generated vendored Normal file
View File

@ -0,0 +1,197 @@
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// author xeipuuv
// author-github https://github.com/xeipuuv
// author-mail xeipuuv@gmail.com
//
// repository-name gojsonschema
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
//
// description Various utility functions.
//
// created 26-02-2013
package gojsonschema
import (
"encoding/json"
"math/big"
"reflect"
)
func isKind(what interface{}, kinds ...reflect.Kind) bool {
target := what
if isJSONNumber(what) {
// JSON Numbers are strings!
target = *mustBeNumber(what)
}
targetKind := reflect.ValueOf(target).Kind()
for _, kind := range kinds {
if targetKind == kind {
return true
}
}
return false
}
func existsMapKey(m map[string]interface{}, k string) bool {
_, ok := m[k]
return ok
}
func isStringInSlice(s []string, what string) bool {
for i := range s {
if s[i] == what {
return true
}
}
return false
}
// indexStringInSlice returns the index of the first instance of 'what' in s or -1 if it is not found in s.
func indexStringInSlice(s []string, what string) int {
for i := range s {
if s[i] == what {
return i
}
}
return -1
}
func marshalToJSONString(value interface{}) (*string, error) {
mBytes, err := json.Marshal(value)
if err != nil {
return nil, err
}
sBytes := string(mBytes)
return &sBytes, nil
}
func marshalWithoutNumber(value interface{}) (*string, error) {
// The JSON is decoded using https://golang.org/pkg/encoding/json/#Decoder.UseNumber
// This means the numbers are internally still represented as strings and therefore 1.00 is unequal to 1
// One way to eliminate these differences is to decode and encode the JSON one more time without Decoder.UseNumber
// so that these differences in representation are removed
jsonString, err := marshalToJSONString(value)
if err != nil {
return nil, err
}
var document interface{}
err = json.Unmarshal([]byte(*jsonString), &document)
if err != nil {
return nil, err
}
return marshalToJSONString(document)
}
func isJSONNumber(what interface{}) bool {
switch what.(type) {
case json.Number:
return true
}
return false
}
func checkJSONInteger(what interface{}) (isInt bool) {
jsonNumber := what.(json.Number)
bigFloat, isValidNumber := new(big.Rat).SetString(string(jsonNumber))
return isValidNumber && bigFloat.IsInt()
}
// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER
const (
maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1
minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1
)
func mustBeInteger(what interface{}) *int {
if isJSONNumber(what) {
number := what.(json.Number)
isInt := checkJSONInteger(number)
if isInt {
int64Value, err := number.Int64()
if err != nil {
return nil
}
int32Value := int(int64Value)
return &int32Value
}
}
return nil
}
func mustBeNumber(what interface{}) *big.Rat {
if isJSONNumber(what) {
number := what.(json.Number)
float64Value, success := new(big.Rat).SetString(string(number))
if success {
return float64Value
}
}
return nil
}
func convertDocumentNode(val interface{}) interface{} {
if lval, ok := val.([]interface{}); ok {
res := []interface{}{}
for _, v := range lval {
res = append(res, convertDocumentNode(v))
}
return res
}
if mval, ok := val.(map[interface{}]interface{}); ok {
res := map[string]interface{}{}
for k, v := range mval {
res[k.(string)] = convertDocumentNode(v)
}
return res
}
return val
}

858
vendor/github.com/xeipuuv/gojsonschema/validation.go generated vendored Normal file
View File

@ -0,0 +1,858 @@
// Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// author xeipuuv
// author-github https://github.com/xeipuuv
// author-mail xeipuuv@gmail.com
//
// repository-name gojsonschema
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
//
// description Extends Schema and subSchema, implements the validation phase.
//
// created 28-02-2013
package gojsonschema
import (
"encoding/json"
"math/big"
"reflect"
"regexp"
"strconv"
"strings"
"unicode/utf8"
)
// Validate loads and validates a JSON schema
func Validate(ls JSONLoader, ld JSONLoader) (*Result, error) {
// load schema
schema, err := NewSchema(ls)
if err != nil {
return nil, err
}
return schema.Validate(ld)
}
// Validate loads and validates a JSON document
func (v *Schema) Validate(l JSONLoader) (*Result, error) {
root, err := l.LoadJSON()
if err != nil {
return nil, err
}
return v.validateDocument(root), nil
}
func (v *Schema) validateDocument(root interface{}) *Result {
result := &Result{}
context := NewJsonContext(STRING_CONTEXT_ROOT, nil)
v.rootSchema.validateRecursive(v.rootSchema, root, result, context)
return result
}
func (v *subSchema) subValidateWithContext(document interface{}, context *JsonContext) *Result {
result := &Result{}
v.validateRecursive(v, document, result, context)
return result
}
// Walker function to validate the json recursively against the subSchema
func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *JsonContext) {
if internalLogEnabled {
internalLog("validateRecursive %s", context.String())
internalLog(" %v", currentNode)
}
// Handle true/false schema as early as possible as all other fields will be nil
if currentSubSchema.pass != nil {
if !*currentSubSchema.pass {
result.addInternalError(
new(FalseError),
context,
currentNode,
ErrorDetails{},
)
}
return
}
// Handle referenced schemas, returns directly when a $ref is found
if currentSubSchema.refSchema != nil {
v.validateRecursive(currentSubSchema.refSchema, currentNode, result, context)
return
}
// Check for null value
if currentNode == nil {
if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_NULL) {
result.addInternalError(
new(InvalidTypeError),
context,
currentNode,
ErrorDetails{
"expected": currentSubSchema.types.String(),
"given": TYPE_NULL,
},
)
return
}
currentSubSchema.validateSchema(currentSubSchema, currentNode, result, context)
v.validateCommon(currentSubSchema, currentNode, result, context)
} else { // Not a null value
if isJSONNumber(currentNode) {
value := currentNode.(json.Number)
isInt := checkJSONInteger(value)
validType := currentSubSchema.types.Contains(TYPE_NUMBER) || (isInt && currentSubSchema.types.Contains(TYPE_INTEGER))
if currentSubSchema.types.IsTyped() && !validType {
givenType := TYPE_INTEGER
if !isInt {
givenType = TYPE_NUMBER
}
result.addInternalError(
new(InvalidTypeError),
context,
currentNode,
ErrorDetails{
"expected": currentSubSchema.types.String(),
"given": givenType,
},
)
return
}
currentSubSchema.validateSchema(currentSubSchema, value, result, context)
v.validateNumber(currentSubSchema, value, result, context)
v.validateCommon(currentSubSchema, value, result, context)
v.validateString(currentSubSchema, value, result, context)
} else {
rValue := reflect.ValueOf(currentNode)
rKind := rValue.Kind()
switch rKind {
// Slice => JSON array
case reflect.Slice:
if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_ARRAY) {
result.addInternalError(
new(InvalidTypeError),
context,
currentNode,
ErrorDetails{
"expected": currentSubSchema.types.String(),
"given": TYPE_ARRAY,
},
)
return
}
castCurrentNode := currentNode.([]interface{})
currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context)
v.validateArray(currentSubSchema, castCurrentNode, result, context)
v.validateCommon(currentSubSchema, castCurrentNode, result, context)
// Map => JSON object
case reflect.Map:
if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_OBJECT) {
result.addInternalError(
new(InvalidTypeError),
context,
currentNode,
ErrorDetails{
"expected": currentSubSchema.types.String(),
"given": TYPE_OBJECT,
},
)
return
}
castCurrentNode, ok := currentNode.(map[string]interface{})
if !ok {
castCurrentNode = convertDocumentNode(currentNode).(map[string]interface{})
}
currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context)
v.validateObject(currentSubSchema, castCurrentNode, result, context)
v.validateCommon(currentSubSchema, castCurrentNode, result, context)
for _, pSchema := range currentSubSchema.propertiesChildren {
nextNode, ok := castCurrentNode[pSchema.property]
if ok {
subContext := NewJsonContext(pSchema.property, context)
v.validateRecursive(pSchema, nextNode, result, subContext)
}
}
// Simple JSON values : string, number, boolean
case reflect.Bool:
if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_BOOLEAN) {
result.addInternalError(
new(InvalidTypeError),
context,
currentNode,
ErrorDetails{
"expected": currentSubSchema.types.String(),
"given": TYPE_BOOLEAN,
},
)
return
}
value := currentNode.(bool)
currentSubSchema.validateSchema(currentSubSchema, value, result, context)
v.validateNumber(currentSubSchema, value, result, context)
v.validateCommon(currentSubSchema, value, result, context)
v.validateString(currentSubSchema, value, result, context)
case reflect.String:
if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_STRING) {
result.addInternalError(
new(InvalidTypeError),
context,
currentNode,
ErrorDetails{
"expected": currentSubSchema.types.String(),
"given": TYPE_STRING,
},
)
return
}
value := currentNode.(string)
currentSubSchema.validateSchema(currentSubSchema, value, result, context)
v.validateNumber(currentSubSchema, value, result, context)
v.validateCommon(currentSubSchema, value, result, context)
v.validateString(currentSubSchema, value, result, context)
}
}
}
result.incrementScore()
}
// Different kinds of validation there, subSchema / common / array / object / string...
func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *JsonContext) {
if internalLogEnabled {
internalLog("validateSchema %s", context.String())
internalLog(" %v", currentNode)
}
if len(currentSubSchema.anyOf) > 0 {
validatedAnyOf := false
var bestValidationResult *Result
for _, anyOfSchema := range currentSubSchema.anyOf {
if !validatedAnyOf {
validationResult := anyOfSchema.subValidateWithContext(currentNode, context)
validatedAnyOf = validationResult.Valid()
if !validatedAnyOf && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) {
bestValidationResult = validationResult
}
}
}
if !validatedAnyOf {
result.addInternalError(new(NumberAnyOfError), context, currentNode, ErrorDetails{})
if bestValidationResult != nil {
// add error messages of closest matching subSchema as
// that's probably the one the user was trying to match
result.mergeErrors(bestValidationResult)
}
}
}
if len(currentSubSchema.oneOf) > 0 {
nbValidated := 0
var bestValidationResult *Result
for _, oneOfSchema := range currentSubSchema.oneOf {
validationResult := oneOfSchema.subValidateWithContext(currentNode, context)
if validationResult.Valid() {
nbValidated++
} else if nbValidated == 0 && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) {
bestValidationResult = validationResult
}
}
if nbValidated != 1 {
result.addInternalError(new(NumberOneOfError), context, currentNode, ErrorDetails{})
if nbValidated == 0 {
// add error messages of closest matching subSchema as
// that's probably the one the user was trying to match
result.mergeErrors(bestValidationResult)
}
}
}
if len(currentSubSchema.allOf) > 0 {
nbValidated := 0
for _, allOfSchema := range currentSubSchema.allOf {
validationResult := allOfSchema.subValidateWithContext(currentNode, context)
if validationResult.Valid() {
nbValidated++
}
result.mergeErrors(validationResult)
}
if nbValidated != len(currentSubSchema.allOf) {
result.addInternalError(new(NumberAllOfError), context, currentNode, ErrorDetails{})
}
}
if currentSubSchema.not != nil {
validationResult := currentSubSchema.not.subValidateWithContext(currentNode, context)
if validationResult.Valid() {
result.addInternalError(new(NumberNotError), context, currentNode, ErrorDetails{})
}
}
if currentSubSchema.dependencies != nil && len(currentSubSchema.dependencies) > 0 {
if isKind(currentNode, reflect.Map) {
for elementKey := range currentNode.(map[string]interface{}) {
if dependency, ok := currentSubSchema.dependencies[elementKey]; ok {
switch dependency := dependency.(type) {
case []string:
for _, dependOnKey := range dependency {
if _, dependencyResolved := currentNode.(map[string]interface{})[dependOnKey]; !dependencyResolved {
result.addInternalError(
new(MissingDependencyError),
context,
currentNode,
ErrorDetails{"dependency": dependOnKey},
)
}
}
case *subSchema:
dependency.validateRecursive(dependency, currentNode, result, context)
}
}
}
}
}
if currentSubSchema._if != nil {
validationResultIf := currentSubSchema._if.subValidateWithContext(currentNode, context)
if currentSubSchema._then != nil && validationResultIf.Valid() {
validationResultThen := currentSubSchema._then.subValidateWithContext(currentNode, context)
if !validationResultThen.Valid() {
result.addInternalError(new(ConditionThenError), context, currentNode, ErrorDetails{})
result.mergeErrors(validationResultThen)
}
}
if currentSubSchema._else != nil && !validationResultIf.Valid() {
validationResultElse := currentSubSchema._else.subValidateWithContext(currentNode, context)
if !validationResultElse.Valid() {
result.addInternalError(new(ConditionElseError), context, currentNode, ErrorDetails{})
result.mergeErrors(validationResultElse)
}
}
}
result.incrementScore()
}
func (v *subSchema) validateCommon(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) {
if internalLogEnabled {
internalLog("validateCommon %s", context.String())
internalLog(" %v", value)
}
// const:
if currentSubSchema._const != nil {
vString, err := marshalWithoutNumber(value)
if err != nil {
result.addInternalError(new(InternalError), context, value, ErrorDetails{"error": err})
}
if *vString != *currentSubSchema._const {
result.addInternalError(new(ConstError),
context,
value,
ErrorDetails{
"allowed": *currentSubSchema._const,
},
)
}
}
// enum:
if len(currentSubSchema.enum) > 0 {
vString, err := marshalWithoutNumber(value)
if err != nil {
result.addInternalError(new(InternalError), context, value, ErrorDetails{"error": err})
}
if !isStringInSlice(currentSubSchema.enum, *vString) {
result.addInternalError(
new(EnumError),
context,
value,
ErrorDetails{
"allowed": strings.Join(currentSubSchema.enum, ", "),
},
)
}
}
result.incrementScore()
}
func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface{}, result *Result, context *JsonContext) {
if internalLogEnabled {
internalLog("validateArray %s", context.String())
internalLog(" %v", value)
}
nbValues := len(value)
// TODO explain
if currentSubSchema.itemsChildrenIsSingleSchema {
for i := range value {
subContext := NewJsonContext(strconv.Itoa(i), context)
validationResult := currentSubSchema.itemsChildren[0].subValidateWithContext(value[i], subContext)
result.mergeErrors(validationResult)
}
} else {
if currentSubSchema.itemsChildren != nil && len(currentSubSchema.itemsChildren) > 0 {
nbItems := len(currentSubSchema.itemsChildren)
// while we have both schemas and values, check them against each other
for i := 0; i != nbItems && i != nbValues; i++ {
subContext := NewJsonContext(strconv.Itoa(i), context)
validationResult := currentSubSchema.itemsChildren[i].subValidateWithContext(value[i], subContext)
result.mergeErrors(validationResult)
}
if nbItems < nbValues {
// we have less schemas than elements in the instance array,
// but that might be ok if "additionalItems" is specified.
switch currentSubSchema.additionalItems.(type) {
case bool:
if !currentSubSchema.additionalItems.(bool) {
result.addInternalError(new(ArrayNoAdditionalItemsError), context, value, ErrorDetails{})
}
case *subSchema:
additionalItemSchema := currentSubSchema.additionalItems.(*subSchema)
for i := nbItems; i != nbValues; i++ {
subContext := NewJsonContext(strconv.Itoa(i), context)
validationResult := additionalItemSchema.subValidateWithContext(value[i], subContext)
result.mergeErrors(validationResult)
}
}
}
}
}
// minItems & maxItems
if currentSubSchema.minItems != nil {
if nbValues < int(*currentSubSchema.minItems) {
result.addInternalError(
new(ArrayMinItemsError),
context,
value,
ErrorDetails{"min": *currentSubSchema.minItems},
)
}
}
if currentSubSchema.maxItems != nil {
if nbValues > int(*currentSubSchema.maxItems) {
result.addInternalError(
new(ArrayMaxItemsError),
context,
value,
ErrorDetails{"max": *currentSubSchema.maxItems},
)
}
}
// uniqueItems:
if currentSubSchema.uniqueItems {
var stringifiedItems = make(map[string]int)
for j, v := range value {
vString, err := marshalWithoutNumber(v)
if err != nil {
result.addInternalError(new(InternalError), context, value, ErrorDetails{"err": err})
}
if i, ok := stringifiedItems[*vString]; ok {
result.addInternalError(
new(ItemsMustBeUniqueError),
context,
value,
ErrorDetails{"type": TYPE_ARRAY, "i": i, "j": j},
)
}
stringifiedItems[*vString] = j
}
}
// contains:
if currentSubSchema.contains != nil {
validatedOne := false
var bestValidationResult *Result
for i, v := range value {
subContext := NewJsonContext(strconv.Itoa(i), context)
validationResult := currentSubSchema.contains.subValidateWithContext(v, subContext)
if validationResult.Valid() {
validatedOne = true
break
} else {
if bestValidationResult == nil || validationResult.score > bestValidationResult.score {
bestValidationResult = validationResult
}
}
}
if !validatedOne {
result.addInternalError(
new(ArrayContainsError),
context,
value,
ErrorDetails{},
)
if bestValidationResult != nil {
result.mergeErrors(bestValidationResult)
}
}
}
result.incrementScore()
}
func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string]interface{}, result *Result, context *JsonContext) {
if internalLogEnabled {
internalLog("validateObject %s", context.String())
internalLog(" %v", value)
}
// minProperties & maxProperties:
if currentSubSchema.minProperties != nil {
if len(value) < int(*currentSubSchema.minProperties) {
result.addInternalError(
new(ArrayMinPropertiesError),
context,
value,
ErrorDetails{"min": *currentSubSchema.minProperties},
)
}
}
if currentSubSchema.maxProperties != nil {
if len(value) > int(*currentSubSchema.maxProperties) {
result.addInternalError(
new(ArrayMaxPropertiesError),
context,
value,
ErrorDetails{"max": *currentSubSchema.maxProperties},
)
}
}
// required:
for _, requiredProperty := range currentSubSchema.required {
_, ok := value[requiredProperty]
if ok {
result.incrementScore()
} else {
result.addInternalError(
new(RequiredError),
context,
value,
ErrorDetails{"property": requiredProperty},
)
}
}
// additionalProperty & patternProperty:
for pk := range value {
// Check whether this property is described by "properties"
found := false
for _, spValue := range currentSubSchema.propertiesChildren {
if pk == spValue.property {
found = true
}
}
// Check whether this property is described by "patternProperties"
ppMatch := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context)
// If it is not described by neither "properties" nor "patternProperties" it must pass "additionalProperties"
if !found && !ppMatch {
switch ap := currentSubSchema.additionalProperties.(type) {
case bool:
// Handle the boolean case separately as it's cleaner to return a specific error than failing to pass the false schema
if !ap {
result.addInternalError(
new(AdditionalPropertyNotAllowedError),
context,
value[pk],
ErrorDetails{"property": pk},
)
}
case *subSchema:
validationResult := ap.subValidateWithContext(value[pk], NewJsonContext(pk, context))
result.mergeErrors(validationResult)
}
}
}
// propertyNames:
if currentSubSchema.propertyNames != nil {
for pk := range value {
validationResult := currentSubSchema.propertyNames.subValidateWithContext(pk, context)
if !validationResult.Valid() {
result.addInternalError(new(InvalidPropertyNameError),
context,
value, ErrorDetails{
"property": pk,
})
result.mergeErrors(validationResult)
}
}
}
result.incrementScore()
}
func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key string, value interface{}, result *Result, context *JsonContext) bool {
if internalLogEnabled {
internalLog("validatePatternProperty %s", context.String())
internalLog(" %s %v", key, value)
}
validated := false
for pk, pv := range currentSubSchema.patternProperties {
if matches, _ := regexp.MatchString(pk, key); matches {
validated = true
subContext := NewJsonContext(key, context)
validationResult := pv.subValidateWithContext(value, subContext)
result.mergeErrors(validationResult)
}
}
if !validated {
return false
}
result.incrementScore()
return true
}
func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) {
// Ignore JSON numbers
if isJSONNumber(value) {
return
}
// Ignore non strings
if !isKind(value, reflect.String) {
return
}
if internalLogEnabled {
internalLog("validateString %s", context.String())
internalLog(" %v", value)
}
stringValue := value.(string)
// minLength & maxLength:
if currentSubSchema.minLength != nil {
if utf8.RuneCount([]byte(stringValue)) < int(*currentSubSchema.minLength) {
result.addInternalError(
new(StringLengthGTEError),
context,
value,
ErrorDetails{"min": *currentSubSchema.minLength},
)
}
}
if currentSubSchema.maxLength != nil {
if utf8.RuneCount([]byte(stringValue)) > int(*currentSubSchema.maxLength) {
result.addInternalError(
new(StringLengthLTEError),
context,
value,
ErrorDetails{"max": *currentSubSchema.maxLength},
)
}
}
// pattern:
if currentSubSchema.pattern != nil {
if !currentSubSchema.pattern.MatchString(stringValue) {
result.addInternalError(
new(DoesNotMatchPatternError),
context,
value,
ErrorDetails{"pattern": currentSubSchema.pattern},
)
}
}
// format
if currentSubSchema.format != "" {
if !FormatCheckers.IsFormat(currentSubSchema.format, stringValue) {
result.addInternalError(
new(DoesNotMatchFormatError),
context,
value,
ErrorDetails{"format": currentSubSchema.format},
)
}
}
result.incrementScore()
}
func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{}, result *Result, context *JsonContext) {
// Ignore non numbers
if !isJSONNumber(value) {
return
}
if internalLogEnabled {
internalLog("validateNumber %s", context.String())
internalLog(" %v", value)
}
number := value.(json.Number)
float64Value, _ := new(big.Rat).SetString(string(number))
// multipleOf:
if currentSubSchema.multipleOf != nil {
if q := new(big.Rat).Quo(float64Value, currentSubSchema.multipleOf); !q.IsInt() {
result.addInternalError(
new(MultipleOfError),
context,
number,
ErrorDetails{
"multiple": new(big.Float).SetRat(currentSubSchema.multipleOf),
},
)
}
}
//maximum & exclusiveMaximum:
if currentSubSchema.maximum != nil {
if float64Value.Cmp(currentSubSchema.maximum) == 1 {
result.addInternalError(
new(NumberLTEError),
context,
number,
ErrorDetails{
"max": new(big.Float).SetRat(currentSubSchema.maximum),
},
)
}
}
if currentSubSchema.exclusiveMaximum != nil {
if float64Value.Cmp(currentSubSchema.exclusiveMaximum) >= 0 {
result.addInternalError(
new(NumberLTError),
context,
number,
ErrorDetails{
"max": new(big.Float).SetRat(currentSubSchema.exclusiveMaximum),
},
)
}
}
//minimum & exclusiveMinimum:
if currentSubSchema.minimum != nil {
if float64Value.Cmp(currentSubSchema.minimum) == -1 {
result.addInternalError(
new(NumberGTEError),
context,
number,
ErrorDetails{
"min": new(big.Float).SetRat(currentSubSchema.minimum),
},
)
}
}
if currentSubSchema.exclusiveMinimum != nil {
if float64Value.Cmp(currentSubSchema.exclusiveMinimum) <= 0 {
result.addInternalError(
new(NumberGTError),
context,
number,
ErrorDetails{
"min": new(big.Float).SetRat(currentSubSchema.exclusiveMinimum),
},
)
}
}
// format
if currentSubSchema.format != "" {
if !FormatCheckers.IsFormat(currentSubSchema.format, float64Value) {
result.addInternalError(
new(DoesNotMatchFormatError),
context,
value,
ErrorDetails{"format": currentSubSchema.format},
)
}
}
result.incrementScore()
}

6
vendor/modules.txt vendored
View File

@ -469,6 +469,12 @@ github.com/whyrusleeping/mafmt
github.com/whyrusleeping/multiaddr-filter github.com/whyrusleeping/multiaddr-filter
# github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 # github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208
github.com/wsddn/go-ecdh github.com/wsddn/go-ecdh
# github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f
github.com/xeipuuv/gojsonpointer
# github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415
github.com/xeipuuv/gojsonreference
# github.com/xeipuuv/gojsonschema v1.2.0
github.com/xeipuuv/gojsonschema
# go.opencensus.io v0.22.1 # go.opencensus.io v0.22.1
go.opencensus.io/internal/tagencoding go.opencensus.io/internal/tagencoding
go.opencensus.io/metric/metricdata go.opencensus.io/metric/metricdata