feat: add bookmarks table for the browser bookmarks

plus get bookmark icon using a lib
This commit is contained in:
Jonathan Rainville 2020-11-16 13:31:34 -05:00 committed by GitHub
parent e8dbc66227
commit b2d8f7f416
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
101 changed files with 24138 additions and 342 deletions

View File

@ -1 +1 @@
0.63.8 0.63.9

View File

@ -28,7 +28,9 @@
// 0015_link_previews.down.sql (0) // 0015_link_previews.down.sql (0)
// 0015_link_previews.up.sql (203B) // 0015_link_previews.up.sql (203B)
// 0016_local_notifications_preferences.down.sql (43B) // 0016_local_notifications_preferences.down.sql (43B)
// 0016_local_notifications_preferences.up.sql (213B) // 0016_local_notifications_preferences.up.sql (204B)
// 0017_bookmarks.down.sql (22B)
// 0017_bookmarks.up.sql (147B)
// doc.go (74B) // doc.go (74B)
package migrations package migrations
@ -113,7 +115,7 @@ func _0001_appDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0001_app.down.sql", size: 356, mode: os.FileMode(0644), modTime: time.Unix(1593853962, 0)} info := bindataFileInfo{name: "0001_app.down.sql", size: 356, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb5, 0x25, 0xa0, 0xf8, 0x7d, 0x2d, 0xd, 0xcf, 0x18, 0xe4, 0x73, 0xc3, 0x95, 0xf5, 0x24, 0x20, 0xa9, 0xe6, 0x9e, 0x1d, 0x93, 0xe5, 0xc5, 0xad, 0x93, 0x8f, 0x5e, 0x40, 0xb5, 0x30, 0xaa, 0x25}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb5, 0x25, 0xa0, 0xf8, 0x7d, 0x2d, 0xd, 0xcf, 0x18, 0xe4, 0x73, 0xc3, 0x95, 0xf5, 0x24, 0x20, 0xa9, 0xe6, 0x9e, 0x1d, 0x93, 0xe5, 0xc5, 0xad, 0x93, 0x8f, 0x5e, 0x40, 0xb5, 0x30, 0xaa, 0x25}}
return a, nil return a, nil
} }
@ -133,7 +135,7 @@ func _0001_appUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0001_app.up.sql", size: 2967, mode: os.FileMode(0644), modTime: time.Unix(1593853962, 0)} info := bindataFileInfo{name: "0001_app.up.sql", size: 2967, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf7, 0x3a, 0xa7, 0xf2, 0x8f, 0xfa, 0x82, 0x7c, 0xc5, 0x49, 0xac, 0xac, 0xf, 0xc, 0x77, 0xe2, 0xba, 0xe8, 0x4d, 0xe, 0x6f, 0x5d, 0x2c, 0x2c, 0x18, 0x80, 0xc2, 0x1d, 0xe, 0x25, 0xe, 0x18}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf7, 0x3a, 0xa7, 0xf2, 0x8f, 0xfa, 0x82, 0x7c, 0xc5, 0x49, 0xac, 0xac, 0xf, 0xc, 0x77, 0xe2, 0xba, 0xe8, 0x4d, 0xe, 0x6f, 0x5d, 0x2c, 0x2c, 0x18, 0x80, 0xc2, 0x1d, 0xe, 0x25, 0xe, 0x18}}
return a, nil return a, nil
} }
@ -153,7 +155,7 @@ func _0002_tokensDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0002_tokens.down.sql", size: 19, mode: os.FileMode(0644), modTime: time.Unix(1593853962, 0)} info := bindataFileInfo{name: "0002_tokens.down.sql", size: 19, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd1, 0x31, 0x2, 0xcc, 0x2f, 0x38, 0x90, 0xf7, 0x58, 0x37, 0x47, 0xf4, 0x18, 0xf7, 0x72, 0x74, 0x67, 0x14, 0x7e, 0xf3, 0xb1, 0xd6, 0x5f, 0xb0, 0xd5, 0xe7, 0x91, 0xf4, 0x26, 0x77, 0x8e, 0x68}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd1, 0x31, 0x2, 0xcc, 0x2f, 0x38, 0x90, 0xf7, 0x58, 0x37, 0x47, 0xf4, 0x18, 0xf7, 0x72, 0x74, 0x67, 0x14, 0x7e, 0xf3, 0xb1, 0xd6, 0x5f, 0xb0, 0xd5, 0xe7, 0x91, 0xf4, 0x26, 0x77, 0x8e, 0x68}}
return a, nil return a, nil
} }
@ -173,7 +175,7 @@ func _0002_tokensUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0002_tokens.up.sql", size: 248, mode: os.FileMode(0644), modTime: time.Unix(1593853962, 0)} info := bindataFileInfo{name: "0002_tokens.up.sql", size: 248, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xcc, 0xd6, 0xde, 0xd3, 0x7b, 0xee, 0x92, 0x11, 0x38, 0xa4, 0xeb, 0x84, 0xca, 0xcb, 0x37, 0x75, 0x5, 0x77, 0x7f, 0x14, 0x39, 0xee, 0xa1, 0x8b, 0xd4, 0x5c, 0x6e, 0x55, 0x6, 0x50, 0x16, 0xd4}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xcc, 0xd6, 0xde, 0xd3, 0x7b, 0xee, 0x92, 0x11, 0x38, 0xa4, 0xeb, 0x84, 0xca, 0xcb, 0x37, 0x75, 0x5, 0x77, 0x7f, 0x14, 0x39, 0xee, 0xa1, 0x8b, 0xd4, 0x5c, 0x6e, 0x55, 0x6, 0x50, 0x16, 0xd4}}
return a, nil return a, nil
} }
@ -193,7 +195,7 @@ func _0003_settingsDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0003_settings.down.sql", size: 118, mode: os.FileMode(0644), modTime: time.Unix(1593853962, 0)} info := bindataFileInfo{name: "0003_settings.down.sql", size: 118, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe5, 0xa6, 0xf5, 0xc0, 0x60, 0x64, 0x77, 0xe2, 0xe7, 0x3c, 0x9b, 0xb1, 0x52, 0xa9, 0x95, 0x16, 0xf8, 0x60, 0x2f, 0xa5, 0xeb, 0x46, 0xb9, 0xb9, 0x8f, 0x4c, 0xf4, 0xfd, 0xbb, 0xe7, 0xe5, 0xe5}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe5, 0xa6, 0xf5, 0xc0, 0x60, 0x64, 0x77, 0xe2, 0xe7, 0x3c, 0x9b, 0xb1, 0x52, 0xa9, 0x95, 0x16, 0xf8, 0x60, 0x2f, 0xa5, 0xeb, 0x46, 0xb9, 0xb9, 0x8f, 0x4c, 0xf4, 0xfd, 0xbb, 0xe7, 0xe5, 0xe5}}
return a, nil return a, nil
} }
@ -213,7 +215,7 @@ func _0003_settingsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0003_settings.up.sql", size: 1311, mode: os.FileMode(0644), modTime: time.Unix(1593853962, 0)} info := bindataFileInfo{name: "0003_settings.up.sql", size: 1311, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xea, 0x35, 0x0, 0xeb, 0xe2, 0x33, 0x68, 0xb9, 0xf4, 0xf6, 0x8e, 0x9e, 0x10, 0xe9, 0x58, 0x68, 0x28, 0xb, 0xcd, 0xec, 0x74, 0x71, 0xa7, 0x9a, 0x5a, 0x77, 0x59, 0xb1, 0x13, 0x1c, 0xa1, 0x5b}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xea, 0x35, 0x0, 0xeb, 0xe2, 0x33, 0x68, 0xb9, 0xf4, 0xf6, 0x8e, 0x9e, 0x10, 0xe9, 0x58, 0x68, 0x28, 0xb, 0xcd, 0xec, 0x74, 0x71, 0xa7, 0x9a, 0x5a, 0x77, 0x59, 0xb1, 0x13, 0x1c, 0xa1, 0x5b}}
return a, nil return a, nil
} }
@ -233,7 +235,7 @@ func _0004_pending_stickersDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0004_pending_stickers.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1593853962, 0)} info := bindataFileInfo{name: "0004_pending_stickers.down.sql", size: 0, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}
return a, nil return a, nil
} }
@ -253,7 +255,7 @@ func _0004_pending_stickersUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0004_pending_stickers.up.sql", size: 61, mode: os.FileMode(0644), modTime: time.Unix(1593853962, 0)} info := bindataFileInfo{name: "0004_pending_stickers.up.sql", size: 61, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3c, 0xed, 0x25, 0xdf, 0x75, 0x2, 0x6c, 0xf0, 0xa2, 0xa8, 0x37, 0x62, 0x65, 0xad, 0xfd, 0x98, 0xa0, 0x9d, 0x63, 0x94, 0xdf, 0x6b, 0x46, 0xe0, 0x68, 0xec, 0x9c, 0x7f, 0x77, 0xdd, 0xb3, 0x6}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3c, 0xed, 0x25, 0xdf, 0x75, 0x2, 0x6c, 0xf0, 0xa2, 0xa8, 0x37, 0x62, 0x65, 0xad, 0xfd, 0x98, 0xa0, 0x9d, 0x63, 0x94, 0xdf, 0x6b, 0x46, 0xe0, 0x68, 0xec, 0x9c, 0x7f, 0x77, 0xdd, 0xb3, 0x6}}
return a, nil return a, nil
} }
@ -273,7 +275,7 @@ func _0005_waku_modeDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0005_waku_mode.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1593853962, 0)} info := bindataFileInfo{name: "0005_waku_mode.down.sql", size: 0, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}
return a, nil return a, nil
} }
@ -293,7 +295,7 @@ func _0005_waku_modeUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0005_waku_mode.up.sql", size: 146, mode: os.FileMode(0644), modTime: time.Unix(1593853962, 0)} info := bindataFileInfo{name: "0005_waku_mode.up.sql", size: 146, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa6, 0x91, 0xc, 0xd7, 0x89, 0x61, 0x2e, 0x4c, 0x5a, 0xb6, 0x67, 0xd1, 0xc1, 0x42, 0x24, 0x38, 0xd6, 0x1b, 0x75, 0x41, 0x9c, 0x23, 0xb0, 0xca, 0x5c, 0xf1, 0x5c, 0xd0, 0x13, 0x92, 0x3e, 0xe1}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa6, 0x91, 0xc, 0xd7, 0x89, 0x61, 0x2e, 0x4c, 0x5a, 0xb6, 0x67, 0xd1, 0xc1, 0x42, 0x24, 0x38, 0xd6, 0x1b, 0x75, 0x41, 0x9c, 0x23, 0xb0, 0xca, 0x5c, 0xf1, 0x5c, 0xd0, 0x13, 0x92, 0x3e, 0xe1}}
return a, nil return a, nil
} }
@ -313,7 +315,7 @@ func _0006_appearanceUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0006_appearance.up.sql", size: 67, mode: os.FileMode(0644), modTime: time.Unix(1593853962, 0)} info := bindataFileInfo{name: "0006_appearance.up.sql", size: 67, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xae, 0x6, 0x25, 0x6c, 0xe4, 0x9d, 0xa7, 0x72, 0xe8, 0xbc, 0xe4, 0x1f, 0x1e, 0x2d, 0x7c, 0xb7, 0xf6, 0xa3, 0xec, 0x3b, 0x4e, 0x93, 0x2e, 0xa4, 0xec, 0x6f, 0xe5, 0x95, 0x94, 0xe8, 0x4, 0xfb}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xae, 0x6, 0x25, 0x6c, 0xe4, 0x9d, 0xa7, 0x72, 0xe8, 0xbc, 0xe4, 0x1f, 0x1e, 0x2d, 0x7c, 0xb7, 0xf6, 0xa3, 0xec, 0x3b, 0x4e, 0x93, 0x2e, 0xa4, 0xec, 0x6f, 0xe5, 0x95, 0x94, 0xe8, 0x4, 0xfb}}
return a, nil return a, nil
} }
@ -333,7 +335,7 @@ func _0007_enable_waku_defaultUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0007_enable_waku_default.up.sql", size: 38, mode: os.FileMode(0644), modTime: time.Unix(1593853962, 0)} info := bindataFileInfo{name: "0007_enable_waku_default.up.sql", size: 38, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd4, 0x42, 0xb6, 0xe5, 0x48, 0x41, 0xeb, 0xc0, 0x7e, 0x3b, 0xe6, 0x8e, 0x96, 0x33, 0x20, 0x92, 0x24, 0x5a, 0x60, 0xfa, 0xa0, 0x3, 0x5e, 0x76, 0x4b, 0x89, 0xaa, 0x37, 0x66, 0xbc, 0x26, 0x11}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd4, 0x42, 0xb6, 0xe5, 0x48, 0x41, 0xeb, 0xc0, 0x7e, 0x3b, 0xe6, 0x8e, 0x96, 0x33, 0x20, 0x92, 0x24, 0x5a, 0x60, 0xfa, 0xa0, 0x3, 0x5e, 0x76, 0x4b, 0x89, 0xaa, 0x37, 0x66, 0xbc, 0x26, 0x11}}
return a, nil return a, nil
} }
@ -353,7 +355,7 @@ func _0008_add_push_notificationsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0008_add_push_notifications.up.sql", size: 349, mode: os.FileMode(0644), modTime: time.Unix(1598257438, 0)} info := bindataFileInfo{name: "0008_add_push_notifications.up.sql", size: 349, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x5a, 0x0, 0xbf, 0xd0, 0xdd, 0xcd, 0x73, 0xe0, 0x7c, 0x56, 0xef, 0xdc, 0x57, 0x61, 0x94, 0x64, 0x70, 0xb9, 0xfa, 0xa1, 0x2a, 0x36, 0xc, 0x2f, 0xf8, 0x95, 0xa, 0x57, 0x3e, 0x7a, 0xd7, 0x12}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x5a, 0x0, 0xbf, 0xd0, 0xdd, 0xcd, 0x73, 0xe0, 0x7c, 0x56, 0xef, 0xdc, 0x57, 0x61, 0x94, 0x64, 0x70, 0xb9, 0xfa, 0xa1, 0x2a, 0x36, 0xc, 0x2f, 0xf8, 0x95, 0xa, 0x57, 0x3e, 0x7a, 0xd7, 0x12}}
return a, nil return a, nil
} }
@ -373,7 +375,7 @@ func _0009_enable_sending_push_notificationsDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0009_enable_sending_push_notifications.down.sql", size: 49, mode: os.FileMode(0644), modTime: time.Unix(1600853026, 0)} info := bindataFileInfo{name: "0009_enable_sending_push_notifications.down.sql", size: 49, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe9, 0xae, 0x1b, 0x41, 0xcb, 0x9c, 0x2c, 0x93, 0xc6, 0x2a, 0x77, 0x3, 0xb9, 0x51, 0xe0, 0x68, 0x68, 0x0, 0xf7, 0x5b, 0xb3, 0x1e, 0x94, 0x44, 0xba, 0x9c, 0xd0, 0x3b, 0x80, 0x21, 0x6f, 0xb5}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe9, 0xae, 0x1b, 0x41, 0xcb, 0x9c, 0x2c, 0x93, 0xc6, 0x2a, 0x77, 0x3, 0xb9, 0x51, 0xe0, 0x68, 0x68, 0x0, 0xf7, 0x5b, 0xb3, 0x1e, 0x94, 0x44, 0xba, 0x9c, 0xd0, 0x3b, 0x80, 0x21, 0x6f, 0xb5}}
return a, nil return a, nil
} }
@ -393,7 +395,7 @@ func _0009_enable_sending_push_notificationsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0009_enable_sending_push_notifications.up.sql", size: 49, mode: os.FileMode(0644), modTime: time.Unix(1600853026, 0)} info := bindataFileInfo{name: "0009_enable_sending_push_notifications.up.sql", size: 49, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1b, 0x80, 0xe4, 0x9c, 0xc8, 0xb8, 0xd5, 0xef, 0xce, 0x74, 0x9b, 0x7b, 0xdd, 0xa, 0x99, 0x1e, 0xef, 0x7f, 0xb8, 0x99, 0x84, 0x4, 0x0, 0x6b, 0x1d, 0x2c, 0xa, 0xf8, 0x2c, 0x4f, 0xb5, 0x44}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1b, 0x80, 0xe4, 0x9c, 0xc8, 0xb8, 0xd5, 0xef, 0xce, 0x74, 0x9b, 0x7b, 0xdd, 0xa, 0x99, 0x1e, 0xef, 0x7f, 0xb8, 0x99, 0x84, 0x4, 0x0, 0x6b, 0x1d, 0x2c, 0xa, 0xf8, 0x2c, 0x4f, 0xb5, 0x44}}
return a, nil return a, nil
} }
@ -413,7 +415,7 @@ func _0010_add_block_mentionsDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0010_add_block_mentions.down.sql", size: 83, mode: os.FileMode(0644), modTime: time.Unix(1600853026, 0)} info := bindataFileInfo{name: "0010_add_block_mentions.down.sql", size: 83, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6d, 0x9e, 0x27, 0x1e, 0xba, 0x9f, 0xca, 0xae, 0x98, 0x2e, 0x6e, 0xe3, 0xdd, 0xac, 0x73, 0x34, 0x4e, 0x69, 0x92, 0xb5, 0xf6, 0x9, 0xab, 0x50, 0x35, 0xd, 0xee, 0xeb, 0x3e, 0xcc, 0x7e, 0xce}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6d, 0x9e, 0x27, 0x1e, 0xba, 0x9f, 0xca, 0xae, 0x98, 0x2e, 0x6e, 0xe3, 0xdd, 0xac, 0x73, 0x34, 0x4e, 0x69, 0x92, 0xb5, 0xf6, 0x9, 0xab, 0x50, 0x35, 0xd, 0xee, 0xeb, 0x3e, 0xcc, 0x7e, 0xce}}
return a, nil return a, nil
} }
@ -433,7 +435,7 @@ func _0010_add_block_mentionsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0010_add_block_mentions.up.sql", size: 89, mode: os.FileMode(0644), modTime: time.Unix(1600853026, 0)} info := bindataFileInfo{name: "0010_add_block_mentions.up.sql", size: 89, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd7, 0x23, 0x85, 0xa2, 0xb5, 0xb6, 0xb4, 0x3f, 0xdc, 0x4e, 0xff, 0xe2, 0x6b, 0x66, 0x68, 0x5e, 0xb2, 0xb4, 0x14, 0xb2, 0x1b, 0x4d, 0xb1, 0xce, 0xf7, 0x6, 0x58, 0xa7, 0xaf, 0x93, 0x3f, 0x25}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd7, 0x23, 0x85, 0xa2, 0xb5, 0xb6, 0xb4, 0x3f, 0xdc, 0x4e, 0xff, 0xe2, 0x6b, 0x66, 0x68, 0x5e, 0xb2, 0xb4, 0x14, 0xb2, 0x1b, 0x4d, 0xb1, 0xce, 0xf7, 0x6, 0x58, 0xa7, 0xaf, 0x93, 0x3f, 0x25}}
return a, nil return a, nil
} }
@ -453,7 +455,7 @@ func _0011_allow_webview_permission_requestsDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0011_allow_webview_permission_requests.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1600853026, 0)} info := bindataFileInfo{name: "0011_allow_webview_permission_requests.down.sql", size: 0, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}
return a, nil return a, nil
} }
@ -473,7 +475,7 @@ func _0011_allow_webview_permission_requestsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0011_allow_webview_permission_requests.up.sql", size: 88, mode: os.FileMode(0644), modTime: time.Unix(1600853026, 0)} info := bindataFileInfo{name: "0011_allow_webview_permission_requests.up.sql", size: 88, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x43, 0x5f, 0x22, 0x4c, 0x98, 0x1d, 0xc6, 0xf4, 0x89, 0xaf, 0xf4, 0x44, 0xba, 0xf8, 0x28, 0xa7, 0xb5, 0xb9, 0xf0, 0xf2, 0xcb, 0x5, 0x59, 0x7a, 0xc, 0xdf, 0xd3, 0x38, 0xa4, 0xb8, 0x98, 0xc2}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x43, 0x5f, 0x22, 0x4c, 0x98, 0x1d, 0xc6, 0xf4, 0x89, 0xaf, 0xf4, 0x44, 0xba, 0xf8, 0x28, 0xa7, 0xb5, 0xb9, 0xf0, 0xf2, 0xcb, 0x5, 0x59, 0x7a, 0xc, 0xdf, 0xd3, 0x38, 0xa4, 0xb8, 0x98, 0xc2}}
return a, nil return a, nil
} }
@ -493,7 +495,7 @@ func _0012_pending_transactionsDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0012_pending_transactions.down.sql", size: 33, mode: os.FileMode(0644), modTime: time.Unix(1600853026, 0)} info := bindataFileInfo{name: "0012_pending_transactions.down.sql", size: 33, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7e, 0x41, 0xfe, 0x5c, 0xd8, 0xc3, 0x29, 0xfd, 0x31, 0x78, 0x99, 0x7a, 0xeb, 0x17, 0x62, 0x88, 0x41, 0xb3, 0xe7, 0xb5, 0x5, 0x0, 0x90, 0xa1, 0x7, 0x1a, 0x23, 0x88, 0x81, 0xba, 0x56, 0x9d}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7e, 0x41, 0xfe, 0x5c, 0xd8, 0xc3, 0x29, 0xfd, 0x31, 0x78, 0x99, 0x7a, 0xeb, 0x17, 0x62, 0x88, 0x41, 0xb3, 0xe7, 0xb5, 0x5, 0x0, 0x90, 0xa1, 0x7, 0x1a, 0x23, 0x88, 0x81, 0xba, 0x56, 0x9d}}
return a, nil return a, nil
} }
@ -513,7 +515,7 @@ func _0012_pending_transactionsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0012_pending_transactions.up.sql", size: 321, mode: os.FileMode(0644), modTime: time.Unix(1600853026, 0)} info := bindataFileInfo{name: "0012_pending_transactions.up.sql", size: 321, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd, 0x17, 0xff, 0xd7, 0xa7, 0x49, 0x1e, 0x7b, 0x34, 0x63, 0x7c, 0x53, 0xaa, 0x6b, 0x2d, 0xc8, 0xe0, 0x82, 0x21, 0x90, 0x3a, 0x94, 0xf1, 0xa6, 0xe4, 0x70, 0xe5, 0x85, 0x1a, 0x48, 0x25, 0xb}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd, 0x17, 0xff, 0xd7, 0xa7, 0x49, 0x1e, 0x7b, 0x34, 0x63, 0x7c, 0x53, 0xaa, 0x6b, 0x2d, 0xc8, 0xe0, 0x82, 0x21, 0x90, 0x3a, 0x94, 0xf1, 0xa6, 0xe4, 0x70, 0xe5, 0x85, 0x1a, 0x48, 0x25, 0xb}}
return a, nil return a, nil
} }
@ -533,7 +535,7 @@ func _0013_favouritesDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0013_favourites.down.sql", size: 23, mode: os.FileMode(0644), modTime: time.Unix(1600853026, 0)} info := bindataFileInfo{name: "0013_favourites.down.sql", size: 23, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x32, 0xf8, 0x55, 0x13, 0x4f, 0x4a, 0x19, 0x83, 0x9c, 0xda, 0x34, 0xb8, 0x3, 0x54, 0x82, 0x1e, 0x99, 0x36, 0x6b, 0x42, 0x3, 0xf6, 0x43, 0xde, 0xe6, 0x32, 0xb6, 0xdf, 0xe2, 0x59, 0x8c, 0x84}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x32, 0xf8, 0x55, 0x13, 0x4f, 0x4a, 0x19, 0x83, 0x9c, 0xda, 0x34, 0xb8, 0x3, 0x54, 0x82, 0x1e, 0x99, 0x36, 0x6b, 0x42, 0x3, 0xf6, 0x43, 0xde, 0xe6, 0x32, 0xb6, 0xdf, 0xe2, 0x59, 0x8c, 0x84}}
return a, nil return a, nil
} }
@ -553,7 +555,7 @@ func _0013_favouritesUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0013_favourites.up.sql", size: 132, mode: os.FileMode(0644), modTime: time.Unix(1600853026, 0)} info := bindataFileInfo{name: "0013_favourites.up.sql", size: 132, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xbe, 0x1, 0x27, 0x38, 0x76, 0xf5, 0xcb, 0x61, 0xda, 0x5b, 0xce, 0xd9, 0x8b, 0x18, 0x77, 0x61, 0x84, 0xe7, 0x22, 0xe2, 0x13, 0x99, 0xab, 0x32, 0xbc, 0xbe, 0xed, 0x1f, 0x2f, 0xb0, 0xe4, 0x8d}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xbe, 0x1, 0x27, 0x38, 0x76, 0xf5, 0xcb, 0x61, 0xda, 0x5b, 0xce, 0xd9, 0x8b, 0x18, 0x77, 0x61, 0x84, 0xe7, 0x22, 0xe2, 0x13, 0x99, 0xab, 0x32, 0xbc, 0xbe, 0xed, 0x1f, 0x2f, 0xb0, 0xe4, 0x8d}}
return a, nil return a, nil
} }
@ -573,7 +575,7 @@ func _0014_add_use_mailserversDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0014_add_use_mailservers.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1603868977, 0)} info := bindataFileInfo{name: "0014_add_use_mailservers.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1604087519, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}
return a, nil return a, nil
} }
@ -593,7 +595,7 @@ func _0014_add_use_mailserversUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0014_add_use_mailservers.up.sql", size: 111, mode: os.FileMode(0644), modTime: time.Unix(1603868977, 0)} info := bindataFileInfo{name: "0014_add_use_mailservers.up.sql", size: 111, mode: os.FileMode(0644), modTime: time.Unix(1604087519, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc9, 0xba, 0x65, 0xbf, 0x1b, 0xc9, 0x6d, 0x45, 0xf2, 0xf5, 0x30, 0x7c, 0xc1, 0xde, 0xb8, 0xe3, 0x3f, 0xa9, 0x2f, 0x9f, 0xea, 0x1, 0x29, 0x29, 0x65, 0xe7, 0x38, 0xab, 0xa4, 0x62, 0xf, 0xd0}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc9, 0xba, 0x65, 0xbf, 0x1b, 0xc9, 0x6d, 0x45, 0xf2, 0xf5, 0x30, 0x7c, 0xc1, 0xde, 0xb8, 0xe3, 0x3f, 0xa9, 0x2f, 0x9f, 0xea, 0x1, 0x29, 0x29, 0x65, 0xe7, 0x38, 0xab, 0xa4, 0x62, 0xf, 0xd0}}
return a, nil return a, nil
} }
@ -613,7 +615,7 @@ func _0015_link_previewsDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0015_link_previews.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1603869530, 0)} info := bindataFileInfo{name: "0015_link_previews.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1604087519, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}
return a, nil return a, nil
} }
@ -633,7 +635,7 @@ func _0015_link_previewsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0015_link_previews.up.sql", size: 203, mode: os.FileMode(0644), modTime: time.Unix(1603869530, 0)} info := bindataFileInfo{name: "0015_link_previews.up.sql", size: 203, mode: os.FileMode(0644), modTime: time.Unix(1604087530, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb1, 0xf7, 0x38, 0x25, 0xa6, 0xfc, 0x6b, 0x9, 0xe4, 0xd9, 0xbf, 0x58, 0x7b, 0x80, 0xd8, 0x48, 0x63, 0xde, 0xa5, 0x5e, 0x30, 0xa3, 0xeb, 0x68, 0x8e, 0x6a, 0x9f, 0xfd, 0xf4, 0x46, 0x41, 0x34}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb1, 0xf7, 0x38, 0x25, 0xa6, 0xfc, 0x6b, 0x9, 0xe4, 0xd9, 0xbf, 0x58, 0x7b, 0x80, 0xd8, 0x48, 0x63, 0xde, 0xa5, 0x5e, 0x30, 0xa3, 0xeb, 0x68, 0x8e, 0x6a, 0x9f, 0xfd, 0xf4, 0x46, 0x41, 0x34}}
return a, nil return a, nil
} }
@ -653,12 +655,12 @@ func _0016_local_notifications_preferencesDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0016_local_notifications_preferences.down.sql", size: 43, mode: os.FileMode(0644), modTime: time.Unix(1603869534, 0)} info := bindataFileInfo{name: "0016_local_notifications_preferences.down.sql", size: 43, mode: os.FileMode(0644), modTime: time.Unix(1604087530, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe0, 0x50, 0xc7, 0xdd, 0x53, 0x9c, 0x5d, 0x1e, 0xb5, 0x71, 0x25, 0x50, 0x58, 0xcf, 0x6d, 0xbe, 0x5a, 0x8, 0x12, 0xc9, 0x13, 0xd, 0x9a, 0x3d, 0x4b, 0x7a, 0x2f, 0x1b, 0xe5, 0x23, 0x52, 0x78}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe0, 0x50, 0xc7, 0xdd, 0x53, 0x9c, 0x5d, 0x1e, 0xb5, 0x71, 0x25, 0x50, 0x58, 0xcf, 0x6d, 0xbe, 0x5a, 0x8, 0x12, 0xc9, 0x13, 0xd, 0x9a, 0x3d, 0x4b, 0x7a, 0x2f, 0x1b, 0xe5, 0x23, 0x52, 0x78}}
return a, nil return a, nil
} }
var __0016_local_notifications_preferencesUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x54\x8d\xc1\x6a\x84\x30\x14\x45\xf7\x7e\xc5\x5d\x2a\xf8\x07\x5d\x45\x4d\x51\x9a\x9a\x12\x62\xc5\x95\xa4\xf1\x09\x81\x10\x4b\x22\x7e\xff\x30\x3a\xc3\x30\xdb\x7b\x2e\xe7\xd4\x8a\x33\xcd\xa1\x59\x25\x38\xfc\x66\x8d\x9f\xc3\xb6\xbb\xd5\x59\xb3\xbb\x2d\xa4\xf9\x3f\xd2\x4a\x91\x82\xa5\x84\x3c\x03\x80\x44\xf1\x70\x96\xf0\xcb\x54\xdd\x32\x85\x5e\x6a\xf4\x83\x10\xe5\x49\xe9\xa0\xb0\x3f\xd9\x35\xb9\x85\xc2\x5d\x49\xf1\x7d\xa7\x60\xfe\x3c\x2d\xa8\xa4\x14\x9c\xf5\x68\xf8\x27\x1b\x84\xc6\x6a\x7c\xa2\xeb\xf2\xa3\xba\x6f\xa6\x26\x7c\xf1\x29\x7f\x74\xcb\xb3\x50\xbe\xa4\x45\x56\x60\xec\x74\x2b\x07\x0d\x25\xc7\xae\xf9\xb8\x05\x00\x00\xff\xff\xd8\x7b\x68\x58\xd5\x00\x00\x00") var __0016_local_notifications_preferencesUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x5c\x8d\x41\x0b\x82\x30\x1c\x47\xef\x7e\x8a\xdf\x51\xc1\x6f\xd0\x69\xea\x42\xc9\x5a\x8c\x99\x78\x92\x35\xff\xc2\x60\xcc\xd8\xc4\xcf\x1f\x59\x10\x75\x7d\x3c\xde\x2b\x25\x67\x8a\x43\xb1\xa2\xe5\x70\x8b\xd1\x6e\xf4\xcb\x6a\x67\x6b\xf4\x6a\x17\x1f\xc7\x47\xa0\x99\x02\x79\x43\x11\x69\x02\x00\x91\xc2\x66\x0d\xe1\xc6\x64\x59\x33\x99\xef\x90\x36\xf2\xeb\x2f\xb2\x13\xf9\x57\x89\xc2\x9f\xea\xf5\xdd\xd1\x84\x42\x88\x96\xb3\x0b\x2a\x7e\x64\x5d\xab\x30\x6b\x17\xe9\xad\x5c\x65\x73\x66\x72\xc0\x89\x0f\xe9\x67\x97\xef\x87\xfc\x1b\xcd\x92\x0c\x7d\xa3\x6a\xd1\x29\x48\xd1\x37\xd5\xe1\x19\x00\x00\xff\xff\x02\x6d\x5e\xec\xcc\x00\x00\x00")
func _0016_local_notifications_preferencesUpSqlBytes() ([]byte, error) { func _0016_local_notifications_preferencesUpSqlBytes() ([]byte, error) {
return bindataRead( return bindataRead(
@ -673,8 +675,48 @@ func _0016_local_notifications_preferencesUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0016_local_notifications_preferences.up.sql", size: 213, mode: os.FileMode(0644), modTime: time.Unix(1603869534, 0)} info := bindataFileInfo{name: "0016_local_notifications_preferences.up.sql", size: 204, mode: os.FileMode(0644), modTime: time.Unix(1604087530, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8, 0xde, 0x2, 0xbe, 0xf2, 0x84, 0x7, 0x3e, 0x44, 0xe5, 0xc6, 0xc9, 0xb, 0x70, 0x9f, 0x68, 0x2c, 0x6, 0x99, 0xe, 0xdd, 0xbb, 0x20, 0x0, 0x6a, 0x48, 0x15, 0xee, 0xb, 0x76, 0x9f, 0xd8}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3f, 0x3a, 0x16, 0x25, 0xdf, 0xba, 0x62, 0xd3, 0x81, 0x73, 0xc, 0x10, 0x85, 0xbc, 0x8d, 0xe, 0x1d, 0x62, 0xcb, 0xb, 0x6d, 0x8c, 0x4f, 0x63, 0x5f, 0xe2, 0xd, 0xc5, 0x46, 0xa8, 0x35, 0x5b}}
return a, nil
}
var __0017_bookmarksDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\x48\xca\xcf\xcf\xce\x4d\x2c\xca\x2e\xb6\xe6\x02\x04\x00\x00\xff\xff\xfb\x69\xe4\xcd\x16\x00\x00\x00")
func _0017_bookmarksDownSqlBytes() ([]byte, error) {
return bindataRead(
__0017_bookmarksDownSql,
"0017_bookmarks.down.sql",
)
}
func _0017_bookmarksDownSql() (*asset, error) {
bytes, err := _0017_bookmarksDownSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "0017_bookmarks.down.sql", size: 22, mode: os.FileMode(0644), modTime: time.Unix(1604421608, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9a, 0x13, 0x2a, 0x44, 0xb0, 0x3, 0x18, 0x63, 0xb8, 0x33, 0xda, 0x3a, 0xeb, 0xb8, 0xcb, 0xd1, 0x98, 0x29, 0xa7, 0xf0, 0x6, 0x9d, 0xc9, 0x62, 0xe7, 0x89, 0x7f, 0x77, 0xaf, 0xec, 0x6b, 0x8f}}
return a, nil
}
var __0017_bookmarksUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x6c\x8b\x3d\xaa\xc2\x40\x10\xc7\xfb\x3d\xc5\xbf\x4c\xe0\xdd\xe0\x55\x63\x1c\xc9\xe0\x9a\x95\xc9\xc4\x98\x4a\x56\x08\x22\xc9\x1a\x58\xc9\xfd\x45\x2b\x0b\xdb\xdf\x47\xa5\x4c\xc6\x30\xda\x78\x86\xec\xd0\x04\x03\x9f\xa5\xb5\x16\xd7\x65\x99\x52\xcc\xd3\x13\x85\x03\xd6\x3c\xe3\x44\x5a\xd5\xa4\x9f\xa8\xe9\xbc\xff\x73\xc0\x23\xa6\xf1\xa7\xb8\xa7\x78\x1b\x2f\x5f\xdb\x1b\x1e\x55\x0e\xa4\x03\xf6\x3c\xa0\x58\xf3\x5c\xba\x12\xbd\x58\x1d\x3a\x83\x86\x5e\xb6\xff\xce\xbd\x02\x00\x00\xff\xff\x91\xa6\x3e\xcb\x93\x00\x00\x00")
func _0017_bookmarksUpSqlBytes() ([]byte, error) {
return bindataRead(
__0017_bookmarksUpSql,
"0017_bookmarks.up.sql",
)
}
func _0017_bookmarksUpSql() (*asset, error) {
bytes, err := _0017_bookmarksUpSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "0017_bookmarks.up.sql", size: 147, mode: os.FileMode(0644), modTime: time.Unix(1604424317, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xbc, 0x47, 0xe1, 0xe3, 0xd8, 0xc6, 0x4, 0x6d, 0x5f, 0x2f, 0xa, 0x51, 0xa6, 0x8c, 0x6a, 0xe0, 0x3d, 0x8c, 0x91, 0x47, 0xbc, 0x1, 0x75, 0x46, 0x92, 0x2, 0x18, 0x6e, 0xe3, 0x4f, 0x18, 0x57}}
return a, nil return a, nil
} }
@ -693,7 +735,7 @@ func docGo() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "doc.go", size: 74, mode: os.FileMode(0644), modTime: time.Unix(1593853962, 0)} info := bindataFileInfo{name: "doc.go", size: 74, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xde, 0x7c, 0x28, 0xcd, 0x47, 0xf2, 0xfa, 0x7c, 0x51, 0x2d, 0xd8, 0x38, 0xb, 0xb0, 0x34, 0x9d, 0x4c, 0x62, 0xa, 0x9e, 0x28, 0xc3, 0x31, 0x23, 0xd9, 0xbb, 0x89, 0x9f, 0xa0, 0x89, 0x1f, 0xe8}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xde, 0x7c, 0x28, 0xcd, 0x47, 0xf2, 0xfa, 0x7c, 0x51, 0x2d, 0xd8, 0x38, 0xb, 0xb0, 0x34, 0x9d, 0x4c, 0x62, 0xa, 0x9e, 0x28, 0xc3, 0x31, 0x23, 0xd9, 0xbb, 0x89, 0x9f, 0xa0, 0x89, 0x1f, 0xe8}}
return a, nil return a, nil
} }
@ -847,6 +889,10 @@ var _bindata = map[string]func() (*asset, error){
"0016_local_notifications_preferences.up.sql": _0016_local_notifications_preferencesUpSql, "0016_local_notifications_preferences.up.sql": _0016_local_notifications_preferencesUpSql,
"0017_bookmarks.down.sql": _0017_bookmarksDownSql,
"0017_bookmarks.up.sql": _0017_bookmarksUpSql,
"doc.go": docGo, "doc.go": docGo,
} }
@ -920,7 +966,9 @@ var _bintree = &bintree{nil, map[string]*bintree{
"0015_link_previews.up.sql": &bintree{_0015_link_previewsUpSql, map[string]*bintree{}}, "0015_link_previews.up.sql": &bintree{_0015_link_previewsUpSql, map[string]*bintree{}},
"0016_local_notifications_preferences.down.sql": &bintree{_0016_local_notifications_preferencesDownSql, map[string]*bintree{}}, "0016_local_notifications_preferences.down.sql": &bintree{_0016_local_notifications_preferencesDownSql, map[string]*bintree{}},
"0016_local_notifications_preferences.up.sql": &bintree{_0016_local_notifications_preferencesUpSql, map[string]*bintree{}}, "0016_local_notifications_preferences.up.sql": &bintree{_0016_local_notifications_preferencesUpSql, map[string]*bintree{}},
"doc.go": &bintree{docGo, map[string]*bintree{}}, "0017_bookmarks.down.sql": &bintree{_0017_bookmarksDownSql, map[string]*bintree{}},
"0017_bookmarks.up.sql": &bintree{_0017_bookmarksUpSql, map[string]*bintree{}},
"doc.go": &bintree{docGo, map[string]*bintree{}},
}} }}
// RestoreAsset restores an asset under the given directory. // RestoreAsset restores an asset under the given directory.

View File

@ -0,0 +1 @@
DROP TABLE bookmarks;

View File

@ -0,0 +1,7 @@
CREATE TABLE IF NOT EXISTS bookmarks (
url VARCHAR NOT NULL,
name VARCHAR NOT NULL,
image_url VARCHAR,
PRIMARY KEY (url)
) WITHOUT ROWID;

5
go.mod
View File

@ -9,6 +9,7 @@ replace github.com/Sirupsen/logrus v1.4.2 => github.com/sirupsen/logrus v1.4.2
replace github.com/docker/docker => github.com/docker/engine v1.4.2-0.20190717161051-705d9623b7c1 replace github.com/docker/docker => github.com/docker/engine v1.4.2-0.20190717161051-705d9623b7c1
require ( require (
github.com/PuerkitoBio/goquery v1.6.0 // indirect
github.com/beevik/ntp v0.2.0 github.com/beevik/ntp v0.2.0
github.com/btcsuite/btcd v0.20.1-beta github.com/btcsuite/btcd v0.20.1-beta
github.com/cenkalti/backoff/v3 v3.2.2 github.com/cenkalti/backoff/v3 v3.2.2
@ -30,6 +31,7 @@ require (
github.com/libp2p/go-libp2p v0.4.2 // indirect github.com/libp2p/go-libp2p v0.4.2 // indirect
github.com/libp2p/go-libp2p-core v0.2.4 github.com/libp2p/go-libp2p-core v0.2.4
github.com/lucasb-eyer/go-colorful v1.0.3 github.com/lucasb-eyer/go-colorful v1.0.3
github.com/mat/besticon v3.12.0+incompatible
github.com/mattn/go-colorable v0.1.4 // indirect github.com/mattn/go-colorable v0.1.4 // indirect
github.com/mattn/go-isatty v0.0.10 // indirect github.com/mattn/go-isatty v0.0.10 // indirect
github.com/mattn/go-pointer v0.0.0-20190911064623-a0a44394634f github.com/mattn/go-pointer v0.0.0-20190911064623-a0a44394634f
@ -46,6 +48,7 @@ require (
github.com/pborman/uuid v1.2.0 github.com/pborman/uuid v1.2.0
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.5.0 github.com/prometheus/client_golang v1.5.0
github.com/prometheus/common v0.9.1
github.com/russolsen/ohyeah v0.0.0-20160324131710-f4938c005315 // indirect github.com/russolsen/ohyeah v0.0.0-20160324131710-f4938c005315 // indirect
github.com/russolsen/same v0.0.0-20160222130632-f089df61f51d // indirect github.com/russolsen/same v0.0.0-20160222130632-f089df61f51d // indirect
github.com/russolsen/transit v0.0.0-20180705123435-0794b4c4505a github.com/russolsen/transit v0.0.0-20180705123435-0794b4c4505a
@ -65,7 +68,7 @@ require (
go.uber.org/multierr v1.4.0 // indirect go.uber.org/multierr v1.4.0 // indirect
go.uber.org/zap v1.13.0 go.uber.org/zap v1.13.0
golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c
golang.org/x/net v0.0.0-20191119073136-fc4aabc6c914 // indirect golang.org/x/image v0.0.0-20200927104501-e162460cd6b5 // indirect
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect
golang.org/x/tools v0.0.0-20200211045251-2de505fc5306 // indirect golang.org/x/tools v0.0.0-20200211045251-2de505fc5306 // indirect
google.golang.org/genproto v0.0.0-20191115221424-83cc0476cb11 // indirect google.golang.org/genproto v0.0.0-20191115221424-83cc0476cb11 // indirect

16
go.sum
View File

@ -17,18 +17,24 @@ github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/goquery v1.6.0 h1:j7taAbelrdcsOlGeMenZxc2AWXD5fieT1/znArdnx94=
github.com/PuerkitoBio/goquery v1.6.0/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/allegro/bigcache v0.0.0-20190218064605-e24eb225f156 h1:hh7BAWFHv41r0gce0KRYtDJpL4erKfmB1/mpgoSADeI= github.com/allegro/bigcache v0.0.0-20190218064605-e24eb225f156 h1:hh7BAWFHv41r0gce0KRYtDJpL4erKfmB1/mpgoSADeI=
github.com/allegro/bigcache v0.0.0-20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/allegro/bigcache v0.0.0-20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/allegro/bigcache v1.2.0 h1:qDaE0QoF29wKBb3+pXFrJFy1ihe5OT9OiXhg1t85SxM= github.com/allegro/bigcache v1.2.0 h1:qDaE0QoF29wKBb3+pXFrJFy1ihe5OT9OiXhg1t85SxM=
github.com/allegro/bigcache v1.2.0/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/allegro/bigcache v1.2.0/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/andybalholm/cascadia v1.1.0 h1:BuuO6sSfQNFRu1LppgbD25Hr2vLYW25JvxHs5zzsLTo=
github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apilayer/freegeoip v3.5.0+incompatible/go.mod h1:CUfFqErhFhXneJendyQ/rRcuA8kH8JxHvYnbOozmlCU= github.com/apilayer/freegeoip v3.5.0+incompatible/go.mod h1:CUfFqErhFhXneJendyQ/rRcuA8kH8JxHvYnbOozmlCU=
github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ=
@ -434,6 +440,8 @@ github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/maruel/panicparse v0.0.0-20160720141634-ad661195ed0e/go.mod h1:nty42YY5QByNC5MM7q/nj938VbgPU7avs45z6NClpxI= github.com/maruel/panicparse v0.0.0-20160720141634-ad661195ed0e/go.mod h1:nty42YY5QByNC5MM7q/nj938VbgPU7avs45z6NClpxI=
github.com/mat/besticon v3.12.0+incompatible h1:1KTD6wisfjfnX+fk9Kx/6VEZL+MAW1LhCkL9Q47H9Bg=
github.com/mat/besticon v3.12.0+incompatible/go.mod h1:mA1auQYHt6CW5e7L9HJLmqVQC8SzNk2gVwouO0AbiEU=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg=
@ -756,6 +764,8 @@ golang.org/x/crypto v0.0.0-20191119213627-4f8c1d86b1ba/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c h1:/nJuwDLoL/zrqY6gf57vxC+Pi+pZ8bfhpPkicO5H7W4= golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c h1:/nJuwDLoL/zrqY6gf57vxC+Pi+pZ8bfhpPkicO5H7W4=
golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/image v0.0.0-20200927104501-e162460cd6b5 h1:QelT11PB4FXiDEXucrfNckHoFxwt8USGY1ajP1ZF5lM=
golang.org/x/image v0.0.0-20200927104501-e162460cd6b5/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -765,6 +775,7 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -784,8 +795,8 @@ golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191119073136-fc4aabc6c914 h1:MlY3mEfbnWGmUi4rtHOtNnnnN4UJRGSyLPx+DXA5Sq4= golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
golang.org/x/net v0.0.0-20191119073136-fc4aabc6c914/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -880,6 +891,7 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View File

@ -86,7 +86,7 @@ func _1557732988_initialize_dbDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1557732988_initialize_db.down.sql", size: 72, mode: os.FileMode(0644), modTime: time.Unix(1573806410, 0)} info := bindataFileInfo{name: "1557732988_initialize_db.down.sql", size: 72, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x77, 0x40, 0x78, 0xb7, 0x71, 0x3c, 0x20, 0x3b, 0xc9, 0xb, 0x2f, 0x49, 0xe4, 0xff, 0x1c, 0x84, 0x54, 0xa1, 0x30, 0xe3, 0x90, 0xf8, 0x73, 0xda, 0xb0, 0x2a, 0xea, 0x8e, 0xf1, 0x82, 0xe7, 0xd2}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x77, 0x40, 0x78, 0xb7, 0x71, 0x3c, 0x20, 0x3b, 0xc9, 0xb, 0x2f, 0x49, 0xe4, 0xff, 0x1c, 0x84, 0x54, 0xa1, 0x30, 0xe3, 0x90, 0xf8, 0x73, 0xda, 0xb0, 0x2a, 0xea, 0x8e, 0xf1, 0x82, 0xe7, 0xd2}}
return a, nil return a, nil
} }
@ -106,7 +106,7 @@ func _1557732988_initialize_dbUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1557732988_initialize_db.up.sql", size: 234, mode: os.FileMode(0644), modTime: time.Unix(1573806410, 0)} info := bindataFileInfo{name: "1557732988_initialize_db.up.sql", size: 234, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8f, 0xa, 0x31, 0xf, 0x94, 0xe, 0xd7, 0xd6, 0xaa, 0x22, 0xd6, 0x6c, 0x7a, 0xbc, 0xad, 0x6a, 0xed, 0x2e, 0x7a, 0xf0, 0x24, 0x81, 0x87, 0x14, 0xe, 0x1c, 0x8a, 0xf1, 0x45, 0xaf, 0x9e, 0x85}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8f, 0xa, 0x31, 0xf, 0x94, 0xe, 0xd7, 0xd6, 0xaa, 0x22, 0xd6, 0x6c, 0x7a, 0xbc, 0xad, 0x6a, 0xed, 0x2e, 0x7a, 0xf0, 0x24, 0x81, 0x87, 0x14, 0xe, 0x1c, 0x8a, 0xf1, 0x45, 0xaf, 0x9e, 0x85}}
return a, nil return a, nil
} }
@ -126,7 +126,7 @@ func staticGo() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "static.go", size: 178, mode: os.FileMode(0644), modTime: time.Unix(1573806410, 0)} info := bindataFileInfo{name: "static.go", size: 178, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xab, 0x8a, 0xf4, 0x27, 0x24, 0x9d, 0x2a, 0x1, 0x7b, 0x54, 0xea, 0xae, 0x4a, 0x35, 0x40, 0x92, 0xb5, 0xf9, 0xb3, 0x54, 0x3e, 0x3a, 0x1a, 0x2b, 0xae, 0xfb, 0x9e, 0x82, 0xeb, 0x4c, 0xf, 0x6}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xab, 0x8a, 0xf4, 0x27, 0x24, 0x9d, 0x2a, 0x1, 0x7b, 0x54, 0xea, 0xae, 0x4a, 0x35, 0x40, 0x92, 0xb5, 0xf9, 0xb3, 0x54, 0x3e, 0x3a, 0x1a, 0x2b, 0xae, 0xfb, 0x9e, 0x82, 0xeb, 0x4c, 0xf, 0x6}}
return a, nil return a, nil
} }

View File

@ -86,7 +86,7 @@ func _0001_accountsDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0001_accounts.down.sql", size: 21, mode: os.FileMode(0644), modTime: time.Unix(1573806410, 0)} info := bindataFileInfo{name: "0001_accounts.down.sql", size: 21, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd2, 0x61, 0x4c, 0x18, 0xfc, 0xc, 0xdf, 0x5c, 0x1f, 0x5e, 0xd3, 0xbd, 0xfa, 0x12, 0x5e, 0x8d, 0x8d, 0x8b, 0xb9, 0x5f, 0x99, 0x46, 0x63, 0xa5, 0xe3, 0xa6, 0x8a, 0x4, 0xf1, 0x73, 0x8a, 0xe9}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd2, 0x61, 0x4c, 0x18, 0xfc, 0xc, 0xdf, 0x5c, 0x1f, 0x5e, 0xd3, 0xbd, 0xfa, 0x12, 0x5e, 0x8d, 0x8d, 0x8b, 0xb9, 0x5f, 0x99, 0x46, 0x63, 0xa5, 0xe3, 0xa6, 0x8a, 0x4, 0xf1, 0x73, 0x8a, 0xe9}}
return a, nil return a, nil
} }
@ -106,7 +106,7 @@ func _0001_accountsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "0001_accounts.up.sql", size: 163, mode: os.FileMode(0644), modTime: time.Unix(1575903446, 0)} info := bindataFileInfo{name: "0001_accounts.up.sql", size: 163, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf2, 0xfa, 0x99, 0x8e, 0x96, 0xb3, 0x13, 0x6c, 0x1f, 0x6, 0x27, 0xc5, 0xd2, 0xd4, 0xe0, 0xa5, 0x26, 0x82, 0xa7, 0x26, 0xf2, 0x68, 0x9d, 0xed, 0x9c, 0x3d, 0xbb, 0xdc, 0x37, 0x28, 0xbc, 0x1}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf2, 0xfa, 0x99, 0x8e, 0x96, 0xb3, 0x13, 0x6c, 0x1f, 0x6, 0x27, 0xc5, 0xd2, 0xd4, 0xe0, 0xa5, 0x26, 0x82, 0xa7, 0x26, 0xf2, 0x68, 0x9d, 0xed, 0x9c, 0x3d, 0xbb, 0xdc, 0x37, 0x28, 0xbc, 0x1}}
return a, nil return a, nil
} }
@ -126,7 +126,7 @@ func docGo() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "doc.go", size: 74, mode: os.FileMode(0644), modTime: time.Unix(1573806410, 0)} info := bindataFileInfo{name: "doc.go", size: 74, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xde, 0x7c, 0x28, 0xcd, 0x47, 0xf2, 0xfa, 0x7c, 0x51, 0x2d, 0xd8, 0x38, 0xb, 0xb0, 0x34, 0x9d, 0x4c, 0x62, 0xa, 0x9e, 0x28, 0xc3, 0x31, 0x23, 0xd9, 0xbb, 0x89, 0x9f, 0xa0, 0x89, 0x1f, 0xe8}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xde, 0x7c, 0x28, 0xcd, 0x47, 0xf2, 0xfa, 0x7c, 0x51, 0x2d, 0xd8, 0x38, 0xb, 0xb0, 0x34, 0x9d, 0x4c, 0x62, 0xa, 0x9e, 0x28, 0xc3, 0x31, 0x23, 0xd9, 0xbb, 0x89, 0x9f, 0xa0, 0x89, 0x1f, 0xe8}}
return a, nil return a, nil
} }

View File

@ -100,7 +100,7 @@ func _1536754952_initial_schemaDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1536754952_initial_schema.down.sql", size: 83, mode: os.FileMode(0644), modTime: time.Unix(1574354941, 0)} info := bindataFileInfo{name: "1536754952_initial_schema.down.sql", size: 83, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x44, 0xcf, 0x76, 0x71, 0x1f, 0x5e, 0x9a, 0x43, 0xd8, 0xcd, 0xb8, 0xc3, 0x70, 0xc3, 0x7f, 0xfc, 0x90, 0xb4, 0x25, 0x1e, 0xf4, 0x66, 0x20, 0xb8, 0x33, 0x7e, 0xb0, 0x76, 0x1f, 0xc, 0xc0, 0x75}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x44, 0xcf, 0x76, 0x71, 0x1f, 0x5e, 0x9a, 0x43, 0xd8, 0xcd, 0xb8, 0xc3, 0x70, 0xc3, 0x7f, 0xfc, 0x90, 0xb4, 0x25, 0x1e, 0xf4, 0x66, 0x20, 0xb8, 0x33, 0x7e, 0xb0, 0x76, 0x1f, 0xc, 0xc0, 0x75}}
return a, nil return a, nil
} }
@ -120,7 +120,7 @@ func _1536754952_initial_schemaUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1536754952_initial_schema.up.sql", size: 962, mode: os.FileMode(0644), modTime: time.Unix(1574354941, 0)} info := bindataFileInfo{name: "1536754952_initial_schema.up.sql", size: 962, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xea, 0x90, 0x5a, 0x59, 0x3e, 0x3, 0xe2, 0x3c, 0x81, 0x42, 0xcd, 0x4c, 0x9a, 0xe8, 0xda, 0x93, 0x2b, 0x70, 0xa4, 0xd5, 0x29, 0x3e, 0xd5, 0xc9, 0x27, 0xb6, 0xb7, 0x65, 0xff, 0x0, 0xcb, 0xde}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xea, 0x90, 0x5a, 0x59, 0x3e, 0x3, 0xe2, 0x3c, 0x81, 0x42, 0xcd, 0x4c, 0x9a, 0xe8, 0xda, 0x93, 0x2b, 0x70, 0xa4, 0xd5, 0x29, 0x3e, 0xd5, 0xc9, 0x27, 0xb6, 0xb7, 0x65, 0xff, 0x0, 0xcb, 0xde}}
return a, nil return a, nil
} }
@ -140,7 +140,7 @@ func _1539249977_update_ratchet_infoDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1539249977_update_ratchet_info.down.sql", size: 311, mode: os.FileMode(0644), modTime: time.Unix(1574354941, 0)} info := bindataFileInfo{name: "1539249977_update_ratchet_info.down.sql", size: 311, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1, 0xa4, 0xeb, 0xa0, 0xe6, 0xa0, 0xd4, 0x48, 0xbb, 0xad, 0x6f, 0x7d, 0x67, 0x8c, 0xbd, 0x25, 0xde, 0x1f, 0x73, 0x9a, 0xbb, 0xa8, 0xc9, 0x30, 0xb7, 0xa9, 0x7c, 0xaf, 0xb5, 0x1, 0x61, 0xdd}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1, 0xa4, 0xeb, 0xa0, 0xe6, 0xa0, 0xd4, 0x48, 0xbb, 0xad, 0x6f, 0x7d, 0x67, 0x8c, 0xbd, 0x25, 0xde, 0x1f, 0x73, 0x9a, 0xbb, 0xa8, 0xc9, 0x30, 0xb7, 0xa9, 0x7c, 0xaf, 0xb5, 0x1, 0x61, 0xdd}}
return a, nil return a, nil
} }
@ -160,7 +160,7 @@ func _1539249977_update_ratchet_infoUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1539249977_update_ratchet_info.up.sql", size: 368, mode: os.FileMode(0644), modTime: time.Unix(1574354941, 0)} info := bindataFileInfo{name: "1539249977_update_ratchet_info.up.sql", size: 368, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc, 0x8e, 0xbf, 0x6f, 0xa, 0xc0, 0xe1, 0x3c, 0x42, 0x28, 0x88, 0x1d, 0xdb, 0xba, 0x1c, 0x83, 0xec, 0xba, 0xd3, 0x5f, 0x5c, 0x77, 0x5e, 0xa7, 0x46, 0x36, 0xec, 0x69, 0xa, 0x4b, 0x17, 0x79}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc, 0x8e, 0xbf, 0x6f, 0xa, 0xc0, 0xe1, 0x3c, 0x42, 0x28, 0x88, 0x1d, 0xdb, 0xba, 0x1c, 0x83, 0xec, 0xba, 0xd3, 0x5f, 0x5c, 0x77, 0x5e, 0xa7, 0x46, 0x36, 0xec, 0x69, 0xa, 0x4b, 0x17, 0x79}}
return a, nil return a, nil
} }
@ -180,7 +180,7 @@ func _1540715431_add_versionDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1540715431_add_version.down.sql", size: 127, mode: os.FileMode(0644), modTime: time.Unix(1574354941, 0)} info := bindataFileInfo{name: "1540715431_add_version.down.sql", size: 127, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf5, 0x9, 0x4, 0xe3, 0x76, 0x2e, 0xb8, 0x9, 0x23, 0xf0, 0x70, 0x93, 0xc4, 0x50, 0xe, 0x9d, 0x84, 0x22, 0x8c, 0x94, 0xd3, 0x24, 0x9, 0x9a, 0xc1, 0xa1, 0x48, 0x45, 0xfd, 0x40, 0x6e, 0xe6}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf5, 0x9, 0x4, 0xe3, 0x76, 0x2e, 0xb8, 0x9, 0x23, 0xf0, 0x70, 0x93, 0xc4, 0x50, 0xe, 0x9d, 0x84, 0x22, 0x8c, 0x94, 0xd3, 0x24, 0x9, 0x9a, 0xc1, 0xa1, 0x48, 0x45, 0xfd, 0x40, 0x6e, 0xe6}}
return a, nil return a, nil
} }
@ -200,7 +200,7 @@ func _1540715431_add_versionUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1540715431_add_version.up.sql", size: 265, mode: os.FileMode(0644), modTime: time.Unix(1574354941, 0)} info := bindataFileInfo{name: "1540715431_add_version.up.sql", size: 265, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc7, 0x4c, 0x36, 0x96, 0xdf, 0x16, 0x10, 0xa6, 0x27, 0x1a, 0x79, 0x8b, 0x42, 0x83, 0x23, 0xc, 0x7e, 0xb6, 0x3d, 0x2, 0xda, 0xa4, 0xb4, 0xd, 0x27, 0x55, 0xba, 0xdc, 0xb2, 0x88, 0x8f, 0xa6}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc7, 0x4c, 0x36, 0x96, 0xdf, 0x16, 0x10, 0xa6, 0x27, 0x1a, 0x79, 0x8b, 0x42, 0x83, 0x23, 0xc, 0x7e, 0xb6, 0x3d, 0x2, 0xda, 0xa4, 0xb4, 0xd, 0x27, 0x55, 0xba, 0xdc, 0xb2, 0x88, 0x8f, 0xa6}}
return a, nil return a, nil
} }
@ -220,7 +220,7 @@ func _1541164797_add_installationsDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1541164797_add_installations.down.sql", size: 26, mode: os.FileMode(0644), modTime: time.Unix(1574354941, 0)} info := bindataFileInfo{name: "1541164797_add_installations.down.sql", size: 26, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf5, 0xfd, 0xe6, 0xd8, 0xca, 0x3b, 0x38, 0x18, 0xee, 0x0, 0x5f, 0x36, 0x9e, 0x1e, 0xd, 0x19, 0x3e, 0xb4, 0x73, 0x53, 0xe9, 0xa5, 0xac, 0xdd, 0xa1, 0x2f, 0xc7, 0x6c, 0xa8, 0xd9, 0xa, 0x88}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf5, 0xfd, 0xe6, 0xd8, 0xca, 0x3b, 0x38, 0x18, 0xee, 0x0, 0x5f, 0x36, 0x9e, 0x1e, 0xd, 0x19, 0x3e, 0xb4, 0x73, 0x53, 0xe9, 0xa5, 0xac, 0xdd, 0xa1, 0x2f, 0xc7, 0x6c, 0xa8, 0xd9, 0xa, 0x88}}
return a, nil return a, nil
} }
@ -240,7 +240,7 @@ func _1541164797_add_installationsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1541164797_add_installations.up.sql", size: 216, mode: os.FileMode(0644), modTime: time.Unix(1574354941, 0)} info := bindataFileInfo{name: "1541164797_add_installations.up.sql", size: 216, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x2d, 0x18, 0x26, 0xb8, 0x88, 0x47, 0xdb, 0x83, 0xcc, 0xb6, 0x9d, 0x1c, 0x1, 0xae, 0x2f, 0xde, 0x97, 0x82, 0x3, 0x30, 0xa8, 0x63, 0xa1, 0x78, 0x4b, 0xa5, 0x9, 0x8, 0x75, 0xa2, 0x57, 0x81}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x2d, 0x18, 0x26, 0xb8, 0x88, 0x47, 0xdb, 0x83, 0xcc, 0xb6, 0x9d, 0x1c, 0x1, 0xae, 0x2f, 0xde, 0x97, 0x82, 0x3, 0x30, 0xa8, 0x63, 0xa1, 0x78, 0x4b, 0xa5, 0x9, 0x8, 0x75, 0xa2, 0x57, 0x81}}
return a, nil return a, nil
} }
@ -260,7 +260,7 @@ func _1558084410_add_secretDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1558084410_add_secret.down.sql", size: 56, mode: os.FileMode(0644), modTime: time.Unix(1574354941, 0)} info := bindataFileInfo{name: "1558084410_add_secret.down.sql", size: 56, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x49, 0xb, 0x65, 0xdf, 0x59, 0xbf, 0xe9, 0x5, 0x5b, 0x6f, 0xd5, 0x3a, 0xb7, 0x57, 0xe8, 0x78, 0x38, 0x73, 0x53, 0x57, 0xf7, 0x24, 0x4, 0xe4, 0xa2, 0x49, 0x22, 0xa2, 0xc6, 0xfd, 0x80, 0xa4}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x49, 0xb, 0x65, 0xdf, 0x59, 0xbf, 0xe9, 0x5, 0x5b, 0x6f, 0xd5, 0x3a, 0xb7, 0x57, 0xe8, 0x78, 0x38, 0x73, 0x53, 0x57, 0xf7, 0x24, 0x4, 0xe4, 0xa2, 0x49, 0x22, 0xa2, 0xc6, 0xfd, 0x80, 0xa4}}
return a, nil return a, nil
} }
@ -280,7 +280,7 @@ func _1558084410_add_secretUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1558084410_add_secret.up.sql", size: 301, mode: os.FileMode(0644), modTime: time.Unix(1574354941, 0)} info := bindataFileInfo{name: "1558084410_add_secret.up.sql", size: 301, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf5, 0x32, 0x36, 0x8e, 0x47, 0xb0, 0x8f, 0xc1, 0xc6, 0xf7, 0xc6, 0x9f, 0x2d, 0x44, 0x75, 0x2b, 0x26, 0xec, 0x6, 0xa0, 0x7b, 0xa5, 0xbd, 0xc8, 0x76, 0x8a, 0x82, 0x68, 0x2, 0x42, 0xb5, 0xf4}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf5, 0x32, 0x36, 0x8e, 0x47, 0xb0, 0x8f, 0xc1, 0xc6, 0xf7, 0xc6, 0x9f, 0x2d, 0x44, 0x75, 0x2b, 0x26, 0xec, 0x6, 0xa0, 0x7b, 0xa5, 0xbd, 0xc8, 0x76, 0x8a, 0x82, 0x68, 0x2, 0x42, 0xb5, 0xf4}}
return a, nil return a, nil
} }
@ -300,7 +300,7 @@ func _1558588866_add_versionDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1558588866_add_version.down.sql", size: 47, mode: os.FileMode(0644), modTime: time.Unix(1574354941, 0)} info := bindataFileInfo{name: "1558588866_add_version.down.sql", size: 47, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xde, 0x52, 0x34, 0x3c, 0x46, 0x4a, 0xf0, 0x72, 0x47, 0x6f, 0x49, 0x5c, 0xc7, 0xf9, 0x32, 0xce, 0xc4, 0x3d, 0xfd, 0x61, 0xa1, 0x8b, 0x8f, 0xf2, 0x31, 0x34, 0xde, 0x15, 0x49, 0xa6, 0xde, 0xb9}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xde, 0x52, 0x34, 0x3c, 0x46, 0x4a, 0xf0, 0x72, 0x47, 0x6f, 0x49, 0x5c, 0xc7, 0xf9, 0x32, 0xce, 0xc4, 0x3d, 0xfd, 0x61, 0xa1, 0x8b, 0x8f, 0xf2, 0x31, 0x34, 0xde, 0x15, 0x49, 0xa6, 0xde, 0xb9}}
return a, nil return a, nil
} }
@ -320,7 +320,7 @@ func _1558588866_add_versionUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1558588866_add_version.up.sql", size: 57, mode: os.FileMode(0644), modTime: time.Unix(1574354941, 0)} info := bindataFileInfo{name: "1558588866_add_version.up.sql", size: 57, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x2a, 0xea, 0x64, 0x39, 0x61, 0x20, 0x83, 0x83, 0xb, 0x2e, 0x79, 0x64, 0xb, 0x53, 0xfa, 0xfe, 0xc6, 0xf7, 0x67, 0x42, 0xd3, 0x4f, 0xdc, 0x7e, 0x30, 0x32, 0xe8, 0x14, 0x41, 0xe9, 0xe7, 0x3b}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x2a, 0xea, 0x64, 0x39, 0x61, 0x20, 0x83, 0x83, 0xb, 0x2e, 0x79, 0x64, 0xb, 0x53, 0xfa, 0xfe, 0xc6, 0xf7, 0x67, 0x42, 0xd3, 0x4f, 0xdc, 0x7e, 0x30, 0x32, 0xe8, 0x14, 0x41, 0xe9, 0xe7, 0x3b}}
return a, nil return a, nil
} }
@ -340,7 +340,7 @@ func _1559627659_add_contact_codeDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1559627659_add_contact_code.down.sql", size: 32, mode: os.FileMode(0644), modTime: time.Unix(1574354941, 0)} info := bindataFileInfo{name: "1559627659_add_contact_code.down.sql", size: 32, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x5d, 0x64, 0x6d, 0xce, 0x24, 0x42, 0x20, 0x8d, 0x4f, 0x37, 0xaa, 0x9d, 0xc, 0x57, 0x98, 0xc1, 0xd1, 0x1a, 0x34, 0xcd, 0x9f, 0x8f, 0x34, 0x86, 0xb3, 0xd3, 0xdc, 0xf1, 0x7d, 0xe5, 0x1b, 0x6e}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x5d, 0x64, 0x6d, 0xce, 0x24, 0x42, 0x20, 0x8d, 0x4f, 0x37, 0xaa, 0x9d, 0xc, 0x57, 0x98, 0xc1, 0xd1, 0x1a, 0x34, 0xcd, 0x9f, 0x8f, 0x34, 0x86, 0xb3, 0xd3, 0xdc, 0xf1, 0x7d, 0xe5, 0x1b, 0x6e}}
return a, nil return a, nil
} }
@ -360,7 +360,7 @@ func _1559627659_add_contact_codeUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1559627659_add_contact_code.up.sql", size: 198, mode: os.FileMode(0644), modTime: time.Unix(1574354941, 0)} info := bindataFileInfo{name: "1559627659_add_contact_code.up.sql", size: 198, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x16, 0xf6, 0xc2, 0x62, 0x9c, 0xd2, 0xc9, 0x1e, 0xd8, 0xea, 0xaa, 0xea, 0x95, 0x8f, 0x89, 0x6a, 0x85, 0x5d, 0x9d, 0x99, 0x78, 0x3c, 0x90, 0x66, 0x99, 0x3e, 0x4b, 0x19, 0x62, 0xfb, 0x31, 0x4d}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x16, 0xf6, 0xc2, 0x62, 0x9c, 0xd2, 0xc9, 0x1e, 0xd8, 0xea, 0xaa, 0xea, 0x95, 0x8f, 0x89, 0x6a, 0x85, 0x5d, 0x9d, 0x99, 0x78, 0x3c, 0x90, 0x66, 0x99, 0x3e, 0x4b, 0x19, 0x62, 0xfb, 0x31, 0x4d}}
return a, nil return a, nil
} }
@ -380,7 +380,7 @@ func _1561368210_add_installation_metadataDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1561368210_add_installation_metadata.down.sql", size: 35, mode: os.FileMode(0644), modTime: time.Unix(1574354941, 0)} info := bindataFileInfo{name: "1561368210_add_installation_metadata.down.sql", size: 35, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa8, 0xde, 0x3f, 0xd2, 0x4a, 0x50, 0x98, 0x56, 0xe3, 0xc0, 0xcd, 0x9d, 0xb0, 0x34, 0x3b, 0xe5, 0x62, 0x18, 0xb5, 0x20, 0xc9, 0x3e, 0xdc, 0x6a, 0x40, 0x36, 0x66, 0xea, 0x51, 0x8c, 0x71, 0xf5}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa8, 0xde, 0x3f, 0xd2, 0x4a, 0x50, 0x98, 0x56, 0xe3, 0xc0, 0xcd, 0x9d, 0xb0, 0x34, 0x3b, 0xe5, 0x62, 0x18, 0xb5, 0x20, 0xc9, 0x3e, 0xdc, 0x6a, 0x40, 0x36, 0x66, 0xea, 0x51, 0x8c, 0x71, 0xf5}}
return a, nil return a, nil
} }
@ -400,7 +400,7 @@ func _1561368210_add_installation_metadataUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1561368210_add_installation_metadata.up.sql", size: 267, mode: os.FileMode(0644), modTime: time.Unix(1574354941, 0)} info := bindataFileInfo{name: "1561368210_add_installation_metadata.up.sql", size: 267, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb4, 0x71, 0x8f, 0x29, 0xb1, 0xaa, 0xd6, 0xd1, 0x8c, 0x17, 0xef, 0x6c, 0xd5, 0x80, 0xb8, 0x2c, 0xc3, 0xfe, 0xec, 0x24, 0x4d, 0xc8, 0x25, 0xd3, 0xb4, 0xcd, 0xa9, 0xac, 0x63, 0x61, 0xb2, 0x9c}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb4, 0x71, 0x8f, 0x29, 0xb1, 0xaa, 0xd6, 0xd1, 0x8c, 0x17, 0xef, 0x6c, 0xd5, 0x80, 0xb8, 0x2c, 0xc3, 0xfe, 0xec, 0x24, 0x4d, 0xc8, 0x25, 0xd3, 0xb4, 0xcd, 0xa9, 0xac, 0x63, 0x61, 0xb2, 0x9c}}
return a, nil return a, nil
} }
@ -420,7 +420,7 @@ func docGo() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "doc.go", size: 377, mode: os.FileMode(0644), modTime: time.Unix(1574354941, 0)} info := bindataFileInfo{name: "doc.go", size: 377, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xef, 0xaf, 0xdf, 0xcf, 0x65, 0xae, 0x19, 0xfc, 0x9d, 0x29, 0xc1, 0x91, 0xaf, 0xb5, 0xd5, 0xb1, 0x56, 0xf3, 0xee, 0xa8, 0xba, 0x13, 0x65, 0xdb, 0xab, 0xcf, 0x4e, 0xac, 0x92, 0xe9, 0x60, 0xf1}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xef, 0xaf, 0xdf, 0xcf, 0x65, 0xae, 0x19, 0xfc, 0x9d, 0x29, 0xc1, 0x91, 0xaf, 0xb5, 0xd5, 0xb1, 0x56, 0xf3, 0xee, 0xa8, 0xba, 0x13, 0x65, 0xdb, 0xab, 0xcf, 0x4e, 0xac, 0x92, 0xe9, 0x60, 0xf1}}
return a, nil return a, nil
} }

View File

@ -494,7 +494,9 @@ func init() {
proto.RegisterMapType((map[string]*DirectMessageProtocol)(nil), "encryption.ProtocolMessage.DirectMessageEntry") proto.RegisterMapType((map[string]*DirectMessageProtocol)(nil), "encryption.ProtocolMessage.DirectMessageEntry")
} }
func init() { proto.RegisterFile("protocol_message.proto", fileDescriptor_4e37b52004a72e16) } func init() {
proto.RegisterFile("protocol_message.proto", fileDescriptor_4e37b52004a72e16)
}
var fileDescriptor_4e37b52004a72e16 = []byte{ var fileDescriptor_4e37b52004a72e16 = []byte{
// 563 bytes of a gzipped FileDescriptorProto // 563 bytes of a gzipped FileDescriptorProto

View File

@ -51,7 +51,7 @@ import (
func bindataRead(data []byte, name string) ([]byte, error) { func bindataRead(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data)) gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil { if err != nil {
return nil, fmt.Errorf("read %q: %w", name, err) return nil, fmt.Errorf("read %q: %v", name, err)
} }
var buf bytes.Buffer var buf bytes.Buffer
@ -59,7 +59,7 @@ func bindataRead(data []byte, name string) ([]byte, error) {
clErr := gz.Close() clErr := gz.Close()
if err != nil { if err != nil {
return nil, fmt.Errorf("read %q: %w", name, err) return nil, fmt.Errorf("read %q: %v", name, err)
} }
if clErr != nil { if clErr != nil {
return nil, err return nil, err
@ -115,7 +115,7 @@ func _000001_initDownDbSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "000001_init.down.db.sql", size: 65, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "000001_init.down.db.sql", size: 65, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x5e, 0xbb, 0x3f, 0x1, 0x75, 0x19, 0x70, 0x86, 0xa7, 0x34, 0x40, 0x17, 0x34, 0x3e, 0x18, 0x51, 0x79, 0xd4, 0x22, 0xad, 0x8f, 0x80, 0xcc, 0xa6, 0xcc, 0x6, 0x2b, 0x62, 0x2, 0x47, 0xba, 0xf9}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x5e, 0xbb, 0x3f, 0x1, 0x75, 0x19, 0x70, 0x86, 0xa7, 0x34, 0x40, 0x17, 0x34, 0x3e, 0x18, 0x51, 0x79, 0xd4, 0x22, 0xad, 0x8f, 0x80, 0xcc, 0xa6, 0xcc, 0x6, 0x2b, 0x62, 0x2, 0x47, 0xba, 0xf9}}
return a, nil return a, nil
} }
@ -135,7 +135,7 @@ func _000001_initUpDbSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "000001_init.up.db.sql", size: 2719, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "000001_init.up.db.sql", size: 2719, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x60, 0xdc, 0xeb, 0xe, 0xc2, 0x4f, 0x75, 0xa, 0xf6, 0x3e, 0xc7, 0xc4, 0x4, 0xe2, 0xe1, 0xa4, 0x73, 0x2f, 0x4a, 0xad, 0x1a, 0x0, 0xc3, 0x93, 0x9d, 0x77, 0x3e, 0x31, 0x91, 0x77, 0x2e, 0xc8}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x60, 0xdc, 0xeb, 0xe, 0xc2, 0x4f, 0x75, 0xa, 0xf6, 0x3e, 0xc7, 0xc4, 0x4, 0xe2, 0xe1, 0xa4, 0x73, 0x2f, 0x4a, 0xad, 0x1a, 0x0, 0xc3, 0x93, 0x9d, 0x77, 0x3e, 0x31, 0x91, 0x77, 0x2e, 0xc8}}
return a, nil return a, nil
} }
@ -155,7 +155,7 @@ func _000002_add_last_ens_clock_valueDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "000002_add_last_ens_clock_value.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "000002_add_last_ens_clock_value.down.sql", size: 0, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}
return a, nil return a, nil
} }
@ -175,7 +175,7 @@ func _000002_add_last_ens_clock_valueUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "000002_add_last_ens_clock_value.up.sql", size: 77, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "000002_add_last_ens_clock_value.up.sql", size: 77, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4d, 0x3, 0x8f, 0xd5, 0x85, 0x83, 0x47, 0xbe, 0xf9, 0x82, 0x7e, 0x81, 0xa4, 0xbd, 0xaa, 0xd5, 0x98, 0x18, 0x5, 0x2d, 0x82, 0x42, 0x3b, 0x3, 0x50, 0xc3, 0x1e, 0x84, 0x35, 0xf, 0xb6, 0x2b}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4d, 0x3, 0x8f, 0xd5, 0x85, 0x83, 0x47, 0xbe, 0xf9, 0x82, 0x7e, 0x81, 0xa4, 0xbd, 0xaa, 0xd5, 0x98, 0x18, 0x5, 0x2d, 0x82, 0x42, 0x3b, 0x3, 0x50, 0xc3, 0x1e, 0x84, 0x35, 0xf, 0xb6, 0x2b}}
return a, nil return a, nil
} }
@ -195,7 +195,7 @@ func _1586358095_add_replaceDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1586358095_add_replace.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "1586358095_add_replace.down.sql", size: 0, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}
return a, nil return a, nil
} }
@ -215,7 +215,7 @@ func _1586358095_add_replaceUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1586358095_add_replace.up.sql", size: 224, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "1586358095_add_replace.up.sql", size: 224, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd2, 0xb3, 0xa9, 0xc7, 0x7f, 0x9d, 0x8f, 0x43, 0x8c, 0x9e, 0x58, 0x8d, 0x44, 0xbc, 0xfa, 0x6b, 0x5f, 0x3f, 0x5a, 0xbe, 0xe8, 0xb1, 0x16, 0xf, 0x91, 0x2a, 0xa0, 0x71, 0xbb, 0x8d, 0x6b, 0xcb}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd2, 0xb3, 0xa9, 0xc7, 0x7f, 0x9d, 0x8f, 0x43, 0x8c, 0x9e, 0x58, 0x8d, 0x44, 0xbc, 0xfa, 0x6b, 0x5f, 0x3f, 0x5a, 0xbe, 0xe8, 0xb1, 0x16, 0xf, 0x91, 0x2a, 0xa0, 0x71, 0xbb, 0x8d, 0x6b, 0xcb}}
return a, nil return a, nil
} }
@ -235,7 +235,7 @@ func _1588665364_add_image_dataDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1588665364_add_image_data.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "1588665364_add_image_data.down.sql", size: 0, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}
return a, nil return a, nil
} }
@ -255,7 +255,7 @@ func _1588665364_add_image_dataUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1588665364_add_image_data.up.sql", size: 186, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "1588665364_add_image_data.up.sql", size: 186, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd6, 0xc6, 0x35, 0xb4, 0x4c, 0x39, 0x96, 0x29, 0x30, 0xda, 0xf4, 0x8f, 0xcb, 0xf1, 0x9f, 0x84, 0xdc, 0x88, 0xd4, 0xd5, 0xbc, 0xb6, 0x5b, 0x46, 0x78, 0x67, 0x76, 0x1a, 0x5, 0x36, 0xdc, 0xe5}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd6, 0xc6, 0x35, 0xb4, 0x4c, 0x39, 0x96, 0x29, 0x30, 0xda, 0xf4, 0x8f, 0xcb, 0xf1, 0x9f, 0x84, 0xdc, 0x88, 0xd4, 0xd5, 0xbc, 0xb6, 0x5b, 0x46, 0x78, 0x67, 0x76, 0x1a, 0x5, 0x36, 0xdc, 0xe5}}
return a, nil return a, nil
} }
@ -275,7 +275,7 @@ func _1589365189_add_pow_targetDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1589365189_add_pow_target.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "1589365189_add_pow_target.down.sql", size: 0, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}
return a, nil return a, nil
} }
@ -295,7 +295,7 @@ func _1589365189_add_pow_targetUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1589365189_add_pow_target.up.sql", size: 66, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "1589365189_add_pow_target.up.sql", size: 66, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4e, 0x3a, 0xe2, 0x2e, 0x7d, 0xaf, 0xbb, 0xcc, 0x21, 0xa1, 0x7a, 0x41, 0x9a, 0xd0, 0xbb, 0xa9, 0xc8, 0x35, 0xf9, 0x32, 0x34, 0x46, 0x44, 0x9a, 0x86, 0x40, 0x7c, 0xb9, 0x23, 0xc7, 0x3, 0x3f}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4e, 0x3a, 0xe2, 0x2e, 0x7d, 0xaf, 0xbb, 0xcc, 0x21, 0xa1, 0x7a, 0x41, 0x9a, 0xd0, 0xbb, 0xa9, 0xc8, 0x35, 0xf9, 0x32, 0x34, 0x46, 0x44, 0x9a, 0x86, 0x40, 0x7c, 0xb9, 0x23, 0xc7, 0x3, 0x3f}}
return a, nil return a, nil
} }
@ -315,7 +315,7 @@ func _1591277220_add_index_messagesDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1591277220_add_index_messages.down.sql", size: 237, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "1591277220_add_index_messages.down.sql", size: 237, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x79, 0xe5, 0x42, 0x56, 0x64, 0x1d, 0xb7, 0x8a, 0x1b, 0x0, 0x99, 0xf0, 0x18, 0x8c, 0x69, 0xe3, 0x14, 0x3a, 0x7f, 0x78, 0xfe, 0xe3, 0x2e, 0xcb, 0x6e, 0x5c, 0x8c, 0x1f, 0x7b, 0xfc, 0x21, 0xc7}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x79, 0xe5, 0x42, 0x56, 0x64, 0x1d, 0xb7, 0x8a, 0x1b, 0x0, 0x99, 0xf0, 0x18, 0x8c, 0x69, 0xe3, 0x14, 0x3a, 0x7f, 0x78, 0xfe, 0xe3, 0x2e, 0xcb, 0x6e, 0x5c, 0x8c, 0x1f, 0x7b, 0xfc, 0x21, 0xc7}}
return a, nil return a, nil
} }
@ -335,7 +335,7 @@ func _1591277220_add_index_messagesUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1591277220_add_index_messages.up.sql", size: 240, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "1591277220_add_index_messages.up.sql", size: 240, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9c, 0xfe, 0xbe, 0xd5, 0xb8, 0x8f, 0xdd, 0xef, 0xbb, 0xa8, 0xad, 0x7f, 0xed, 0x5b, 0x5b, 0x2f, 0xe6, 0x82, 0x27, 0x78, 0x1f, 0xb9, 0x57, 0xdc, 0x8, 0xc2, 0xb2, 0xa9, 0x9a, 0x4, 0xe1, 0x7a}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9c, 0xfe, 0xbe, 0xd5, 0xb8, 0x8f, 0xdd, 0xef, 0xbb, 0xa8, 0xad, 0x7f, 0xed, 0x5b, 0x5b, 0x2f, 0xe6, 0x82, 0x27, 0x78, 0x1f, 0xb9, 0x57, 0xdc, 0x8, 0xc2, 0xb2, 0xa9, 0x9a, 0x4, 0xe1, 0x7a}}
return a, nil return a, nil
} }
@ -355,7 +355,7 @@ func _1593087212_add_mute_chat_and_raw_message_fieldsDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1593087212_add_mute_chat_and_raw_message_fields.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "1593087212_add_mute_chat_and_raw_message_fields.down.sql", size: 0, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}
return a, nil return a, nil
} }
@ -375,7 +375,7 @@ func _1593087212_add_mute_chat_and_raw_message_fieldsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1593087212_add_mute_chat_and_raw_message_fields.up.sql", size: 215, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "1593087212_add_mute_chat_and_raw_message_fields.up.sql", size: 215, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x73, 0x99, 0x61, 0xd1, 0xaa, 0xb4, 0xbf, 0xaf, 0xd7, 0x20, 0x17, 0x40, 0xf9, 0x2, 0xfb, 0xcc, 0x40, 0x2a, 0xd, 0x86, 0x36, 0x30, 0x88, 0x89, 0x25, 0x80, 0x42, 0xb0, 0x5b, 0xe9, 0x73, 0x78}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x73, 0x99, 0x61, 0xd1, 0xaa, 0xb4, 0xbf, 0xaf, 0xd7, 0x20, 0x17, 0x40, 0xf9, 0x2, 0xfb, 0xcc, 0x40, 0x2a, 0xd, 0x86, 0x36, 0x30, 0x88, 0x89, 0x25, 0x80, 0x42, 0xb0, 0x5b, 0xe9, 0x73, 0x78}}
return a, nil return a, nil
} }
@ -395,7 +395,7 @@ func _1595862781_add_audio_dataDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1595862781_add_audio_data.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "1595862781_add_audio_data.down.sql", size: 0, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}
return a, nil return a, nil
} }
@ -415,7 +415,7 @@ func _1595862781_add_audio_dataUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1595862781_add_audio_data.up.sql", size: 246, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "1595862781_add_audio_data.up.sql", size: 246, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xae, 0xd2, 0xee, 0x55, 0xfb, 0x36, 0xa4, 0x92, 0x66, 0xe, 0x81, 0x62, 0x1e, 0x7a, 0x69, 0xa, 0xd5, 0x4b, 0xa5, 0x6a, 0x8d, 0x1d, 0xce, 0xf3, 0x3e, 0xc0, 0x5f, 0x9c, 0x66, 0x1b, 0xb4, 0xed}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xae, 0xd2, 0xee, 0x55, 0xfb, 0x36, 0xa4, 0x92, 0x66, 0xe, 0x81, 0x62, 0x1e, 0x7a, 0x69, 0xa, 0xd5, 0x4b, 0xa5, 0x6a, 0x8d, 0x1d, 0xce, 0xf3, 0x3e, 0xc0, 0x5f, 0x9c, 0x66, 0x1b, 0xb4, 0xed}}
return a, nil return a, nil
} }
@ -435,7 +435,7 @@ func _1595865249_create_emoji_reactions_tableDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1595865249_create_emoji_reactions_table.down.sql", size: 27, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "1595865249_create_emoji_reactions_table.down.sql", size: 27, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6f, 0xbb, 0xdb, 0x8c, 0xd1, 0x17, 0x1b, 0x19, 0x2a, 0x80, 0xc6, 0xb1, 0xc5, 0x47, 0x74, 0x97, 0x32, 0x30, 0x5, 0xa9, 0x9c, 0xa7, 0x60, 0xa, 0xfe, 0xfb, 0x41, 0x6b, 0x25, 0xad, 0x84, 0x20}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6f, 0xbb, 0xdb, 0x8c, 0xd1, 0x17, 0x1b, 0x19, 0x2a, 0x80, 0xc6, 0xb1, 0xc5, 0x47, 0x74, 0x97, 0x32, 0x30, 0x5, 0xa9, 0x9c, 0xa7, 0x60, 0xa, 0xfe, 0xfb, 0x41, 0x6b, 0x25, 0xad, 0x84, 0x20}}
return a, nil return a, nil
} }
@ -455,7 +455,7 @@ func _1595865249_create_emoji_reactions_tableUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1595865249_create_emoji_reactions_table.up.sql", size: 300, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "1595865249_create_emoji_reactions_table.up.sql", size: 300, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3e, 0xc5, 0x43, 0x5c, 0x3d, 0x53, 0x43, 0x2c, 0x1a, 0xa5, 0xb6, 0xbf, 0x7, 0x4, 0x5a, 0x3e, 0x40, 0x8b, 0xa4, 0x57, 0x12, 0x58, 0xbc, 0x42, 0xe2, 0xc3, 0xde, 0x76, 0x98, 0x80, 0xe2, 0xbe}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3e, 0xc5, 0x43, 0x5c, 0x3d, 0x53, 0x43, 0x2c, 0x1a, 0xa5, 0xb6, 0xbf, 0x7, 0x4, 0x5a, 0x3e, 0x40, 0x8b, 0xa4, 0x57, 0x12, 0x58, 0xbc, 0x42, 0xe2, 0xc3, 0xde, 0x76, 0x98, 0x80, 0xe2, 0xbe}}
return a, nil return a, nil
} }
@ -475,7 +475,7 @@ func _1596805115_create_group_chat_invitations_tableDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1596805115_create_group_chat_invitations_table.down.sql", size: 34, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "1596805115_create_group_chat_invitations_table.down.sql", size: 34, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6b, 0x5a, 0x17, 0xd8, 0x8d, 0xb3, 0xfe, 0xcb, 0xb6, 0xc0, 0xcb, 0x14, 0x68, 0x8c, 0x5b, 0x18, 0xf8, 0x7d, 0xc9, 0x2c, 0xa6, 0x41, 0xc9, 0x71, 0xeb, 0x3f, 0xc6, 0xa, 0x45, 0xee, 0x5d, 0x2a}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6b, 0x5a, 0x17, 0xd8, 0x8d, 0xb3, 0xfe, 0xcb, 0xb6, 0xc0, 0xcb, 0x14, 0x68, 0x8c, 0x5b, 0x18, 0xf8, 0x7d, 0xc9, 0x2c, 0xa6, 0x41, 0xc9, 0x71, 0xeb, 0x3f, 0xc6, 0xa, 0x45, 0xee, 0x5d, 0x2a}}
return a, nil return a, nil
} }
@ -495,7 +495,7 @@ func _1596805115_create_group_chat_invitations_tableUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1596805115_create_group_chat_invitations_table.up.sql", size: 231, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "1596805115_create_group_chat_invitations_table.up.sql", size: 231, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6d, 0xb1, 0x14, 0x6d, 0x54, 0x28, 0x67, 0xc3, 0x23, 0x6a, 0xfc, 0x80, 0xdf, 0x9e, 0x4c, 0x35, 0x36, 0xf, 0xf8, 0xf3, 0x5f, 0xae, 0xad, 0xb, 0xc1, 0x51, 0x8e, 0x17, 0x7, 0xe5, 0x7f, 0x91}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6d, 0xb1, 0x14, 0x6d, 0x54, 0x28, 0x67, 0xc3, 0x23, 0x6a, 0xfc, 0x80, 0xdf, 0x9e, 0x4c, 0x35, 0x36, 0xf, 0xf8, 0xf3, 0x5f, 0xae, 0xad, 0xb, 0xc1, 0x51, 0x8e, 0x17, 0x7, 0xe5, 0x7f, 0x91}}
return a, nil return a, nil
} }
@ -515,7 +515,7 @@ func _1597322655_add_invitation_admin_chat_fieldUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1597322655_add_invitation_admin_chat_field.up.sql", size: 54, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "1597322655_add_invitation_admin_chat_field.up.sql", size: 54, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa9, 0x7a, 0xa0, 0xf2, 0xdb, 0x13, 0x91, 0x91, 0xa8, 0x34, 0x1a, 0xa1, 0x49, 0x68, 0xd5, 0xae, 0x2c, 0xd8, 0xd5, 0xea, 0x8f, 0x8c, 0xc7, 0x2, 0x4e, 0x58, 0x2c, 0x3a, 0x14, 0xd4, 0x4f, 0x2c}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa9, 0x7a, 0xa0, 0xf2, 0xdb, 0x13, 0x91, 0x91, 0xa8, 0x34, 0x1a, 0xa1, 0x49, 0x68, 0xd5, 0xae, 0x2c, 0xd8, 0xd5, 0xea, 0x8f, 0x8c, 0xc7, 0x2, 0x4e, 0x58, 0x2c, 0x3a, 0x14, 0xd4, 0x4f, 0x2c}}
return a, nil return a, nil
} }
@ -535,7 +535,7 @@ func _1597757544_add_nicknameUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1597757544_add_nickname.up.sql", size: 52, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "1597757544_add_nickname.up.sql", size: 52, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf4, 0xa2, 0x64, 0x50, 0xc5, 0x4, 0xb9, 0x8b, 0xd1, 0x18, 0x9b, 0xc3, 0x91, 0x36, 0x2a, 0x1f, 0xc3, 0x6c, 0x2d, 0x92, 0xf8, 0x5e, 0xff, 0xb1, 0x59, 0x61, 0x2, 0x1c, 0xe1, 0x85, 0x90, 0xa4}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf4, 0xa2, 0x64, 0x50, 0xc5, 0x4, 0xb9, 0x8b, 0xd1, 0x18, 0x9b, 0xc3, 0x91, 0x36, 0x2a, 0x1f, 0xc3, 0x6c, 0x2d, 0x92, 0xf8, 0x5e, 0xff, 0xb1, 0x59, 0x61, 0x2, 0x1c, 0xe1, 0x85, 0x90, 0xa4}}
return a, nil return a, nil
} }
@ -555,7 +555,7 @@ func _1598955122_add_mentionsDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1598955122_add_mentions.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "1598955122_add_mentions.down.sql", size: 0, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}
return a, nil return a, nil
} }
@ -575,7 +575,7 @@ func _1598955122_add_mentionsUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1598955122_add_mentions.up.sql", size: 52, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "1598955122_add_mentions.up.sql", size: 52, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8d, 0x22, 0x17, 0x92, 0xd2, 0x11, 0x4e, 0x7, 0x93, 0x9a, 0x55, 0xfd, 0xb, 0x97, 0xc4, 0x63, 0x6a, 0x81, 0x97, 0xcd, 0xb2, 0xf8, 0x4b, 0x5f, 0x3c, 0xfa, 0x3a, 0x38, 0x53, 0x10, 0xed, 0x9d}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8d, 0x22, 0x17, 0x92, 0xd2, 0x11, 0x4e, 0x7, 0x93, 0x9a, 0x55, 0xfd, 0xb, 0x97, 0xc4, 0x63, 0x6a, 0x81, 0x97, 0xcd, 0xb2, 0xf8, 0x4b, 0x5f, 0x3c, 0xfa, 0x3a, 0x38, 0x53, 0x10, 0xed, 0x9d}}
return a, nil return a, nil
} }
@ -595,7 +595,7 @@ func _1599641390_add_emoji_reactions_indexDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1599641390_add_emoji_reactions_index.down.sql", size: 67, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "1599641390_add_emoji_reactions_index.down.sql", size: 67, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x89, 0x39, 0x0, 0x51, 0x5b, 0x48, 0xc3, 0xf3, 0x6a, 0x96, 0xf1, 0xd2, 0xa6, 0x60, 0xa8, 0x68, 0x21, 0xb5, 0xa0, 0x11, 0x11, 0x99, 0xde, 0xad, 0xa6, 0xa7, 0x56, 0xc1, 0xb2, 0xa6, 0x63, 0xe4}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x89, 0x39, 0x0, 0x51, 0x5b, 0x48, 0xc3, 0xf3, 0x6a, 0x96, 0xf1, 0xd2, 0xa6, 0x60, 0xa8, 0x68, 0x21, 0xb5, 0xa0, 0x11, 0x11, 0x99, 0xde, 0xad, 0xa6, 0xa7, 0x56, 0xc1, 0xb2, 0xa6, 0x63, 0xe4}}
return a, nil return a, nil
} }
@ -615,7 +615,7 @@ func _1599641390_add_emoji_reactions_indexUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1599641390_add_emoji_reactions_index.up.sql", size: 126, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "1599641390_add_emoji_reactions_index.up.sql", size: 126, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf9, 0xd8, 0xdc, 0xa7, 0xb, 0x92, 0x7a, 0x61, 0x37, 0x24, 0x1c, 0x77, 0x5e, 0xe, 0x7e, 0xfc, 0x9f, 0x98, 0x7b, 0x65, 0xe7, 0xf9, 0x71, 0x57, 0x89, 0x2d, 0x90, 0x1b, 0xf6, 0x5e, 0x37, 0xe8}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf9, 0xd8, 0xdc, 0xa7, 0xb, 0x92, 0x7a, 0x61, 0x37, 0x24, 0x1c, 0x77, 0x5e, 0xe, 0x7e, 0xfc, 0x9f, 0x98, 0x7b, 0x65, 0xe7, 0xf9, 0x71, 0x57, 0x89, 0x2d, 0x90, 0x1b, 0xf6, 0x5e, 0x37, 0xe8}}
return a, nil return a, nil
} }
@ -635,7 +635,7 @@ func _1599720851_add_seen_index_remove_long_messagesDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1599720851_add_seen_index_remove_long_messages.down.sql", size: 96, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "1599720851_add_seen_index_remove_long_messages.down.sql", size: 96, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6f, 0x80, 0x18, 0xaf, 0xf9, 0x83, 0xd6, 0xcb, 0xa1, 0xc1, 0xf1, 0xf6, 0x32, 0x11, 0xd2, 0x72, 0xef, 0x74, 0x83, 0x53, 0x3a, 0xc4, 0x77, 0x6, 0x66, 0xa0, 0xe3, 0x5a, 0x4d, 0x1b, 0x30, 0x6}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6f, 0x80, 0x18, 0xaf, 0xf9, 0x83, 0xd6, 0xcb, 0xa1, 0xc1, 0xf1, 0xf6, 0x32, 0x11, 0xd2, 0x72, 0xef, 0x74, 0x83, 0x53, 0x3a, 0xc4, 0x77, 0x6, 0x66, 0xa0, 0xe3, 0x5a, 0x4d, 0x1b, 0x30, 0x6}}
return a, nil return a, nil
} }
@ -655,7 +655,7 @@ func _1599720851_add_seen_index_remove_long_messagesUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1599720851_add_seen_index_remove_long_messages.up.sql", size: 150, mode: os.FileMode(0644), modTime: time.Unix(1603194874, 0)} info := bindataFileInfo{name: "1599720851_add_seen_index_remove_long_messages.up.sql", size: 150, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x24, 0x1c, 0xc4, 0x78, 0x91, 0xc7, 0xeb, 0xfe, 0xc8, 0xa0, 0xd8, 0x13, 0x27, 0x97, 0xc8, 0x96, 0x56, 0x97, 0x33, 0x2c, 0x1e, 0x16, 0x8a, 0xd3, 0x49, 0x99, 0x3, 0xe9, 0xbb, 0xc4, 0x5, 0x3c}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x24, 0x1c, 0xc4, 0x78, 0x91, 0xc7, 0xeb, 0xfe, 0xc8, 0xa0, 0xd8, 0x13, 0x27, 0x97, 0xc8, 0x96, 0x56, 0x97, 0x33, 0x2c, 0x1e, 0x16, 0x8a, 0xd3, 0x49, 0x99, 0x3, 0xe9, 0xbb, 0xc4, 0x5, 0x3c}}
return a, nil return a, nil
} }
@ -675,7 +675,7 @@ func _1603198582_add_profile_chat_fieldUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1603198582_add_profile_chat_field.up.sql", size: 45, mode: os.FileMode(0644), modTime: time.Unix(1603816327, 0)} info := bindataFileInfo{name: "1603198582_add_profile_chat_field.up.sql", size: 45, mode: os.FileMode(0644), modTime: time.Unix(1604087530, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xaa, 0xca, 0xe, 0x46, 0xa0, 0x9, 0x9d, 0x47, 0x57, 0xe9, 0xfb, 0x17, 0xeb, 0x9c, 0xf6, 0xb8, 0x1d, 0xe9, 0xd, 0x0, 0xd5, 0xe5, 0xd8, 0x9e, 0x60, 0xa, 0xbf, 0x32, 0x2c, 0x52, 0x7f, 0x6a}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xaa, 0xca, 0xe, 0x46, 0xa0, 0x9, 0x9d, 0x47, 0x57, 0xe9, 0xfb, 0x17, 0xeb, 0x9c, 0xf6, 0xb8, 0x1d, 0xe9, 0xd, 0x0, 0xd5, 0xe5, 0xd8, 0x9e, 0x60, 0xa, 0xbf, 0x32, 0x2c, 0x52, 0x7f, 0x6a}}
return a, nil return a, nil
} }
@ -695,7 +695,7 @@ func _1603816533_add_linksDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1603816533_add_links.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1603816327, 0)} info := bindataFileInfo{name: "1603816533_add_links.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1604087530, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}
return a, nil return a, nil
} }
@ -715,7 +715,7 @@ func _1603816533_add_linksUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1603816533_add_links.up.sql", size: 48, mode: os.FileMode(0644), modTime: time.Unix(1603816327, 0)} info := bindataFileInfo{name: "1603816533_add_links.up.sql", size: 48, mode: os.FileMode(0644), modTime: time.Unix(1604087530, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc9, 0x24, 0xd6, 0x1d, 0xa, 0x83, 0x1e, 0x4d, 0xf, 0xae, 0x4d, 0x8c, 0x51, 0x32, 0xa8, 0x37, 0xb0, 0x14, 0xfb, 0x32, 0x34, 0xc8, 0xc, 0x4e, 0x5b, 0xc5, 0x15, 0x65, 0x73, 0x0, 0x0, 0x1d}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc9, 0x24, 0xd6, 0x1d, 0xa, 0x83, 0x1e, 0x4d, 0xf, 0xae, 0x4d, 0x8c, 0x51, 0x32, 0xa8, 0x37, 0xb0, 0x14, 0xfb, 0x32, 0x34, 0xc8, 0xc, 0x4e, 0x5b, 0xc5, 0x15, 0x65, 0x73, 0x0, 0x0, 0x1d}}
return a, nil return a, nil
} }
@ -735,7 +735,7 @@ func docGo() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "doc.go", size: 850, mode: os.FileMode(0644), modTime: time.Unix(1601293035, 0)} info := bindataFileInfo{name: "doc.go", size: 850, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa0, 0xcc, 0x41, 0xe1, 0x61, 0x12, 0x97, 0xe, 0x36, 0x8c, 0xa7, 0x9e, 0xe0, 0x6e, 0x59, 0x9e, 0xee, 0xd5, 0x4a, 0xcf, 0x1e, 0x60, 0xd6, 0xc3, 0x3a, 0xc9, 0x6c, 0xf2, 0x86, 0x5a, 0xb4, 0x1e}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa0, 0xcc, 0x41, 0xe1, 0x61, 0x12, 0x97, 0xe, 0x36, 0x8c, 0xa7, 0x9e, 0xe0, 0x6e, 0x59, 0x9e, 0xee, 0xd5, 0x4a, 0xcf, 0x1e, 0x60, 0xd6, 0xc3, 0x3a, 0xc9, 0x6c, 0xf2, 0x86, 0x5a, 0xb4, 0x1e}}
return a, nil return a, nil
} }
@ -831,42 +831,70 @@ func AssetNames() []string {
// _bindata is a table, holding each asset generator, mapped to its name. // _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){ var _bindata = map[string]func() (*asset, error){
"000001_init.down.db.sql": _000001_initDownDbSql, "000001_init.down.db.sql": _000001_initDownDbSql,
"000001_init.up.db.sql": _000001_initUpDbSql,
"000002_add_last_ens_clock_value.down.sql": _000002_add_last_ens_clock_valueDownSql,
"000002_add_last_ens_clock_value.up.sql": _000002_add_last_ens_clock_valueUpSql,
"1586358095_add_replace.down.sql": _1586358095_add_replaceDownSql,
"1586358095_add_replace.up.sql": _1586358095_add_replaceUpSql,
"1588665364_add_image_data.down.sql": _1588665364_add_image_dataDownSql,
"1588665364_add_image_data.up.sql": _1588665364_add_image_dataUpSql,
"1589365189_add_pow_target.down.sql": _1589365189_add_pow_targetDownSql,
"1589365189_add_pow_target.up.sql": _1589365189_add_pow_targetUpSql,
"1591277220_add_index_messages.down.sql": _1591277220_add_index_messagesDownSql,
"1591277220_add_index_messages.up.sql": _1591277220_add_index_messagesUpSql,
"1593087212_add_mute_chat_and_raw_message_fields.down.sql": _1593087212_add_mute_chat_and_raw_message_fieldsDownSql,
"1593087212_add_mute_chat_and_raw_message_fields.up.sql": _1593087212_add_mute_chat_and_raw_message_fieldsUpSql,
"1595862781_add_audio_data.down.sql": _1595862781_add_audio_dataDownSql,
"1595862781_add_audio_data.up.sql": _1595862781_add_audio_dataUpSql,
"1595865249_create_emoji_reactions_table.down.sql": _1595865249_create_emoji_reactions_tableDownSql,
"1595865249_create_emoji_reactions_table.up.sql": _1595865249_create_emoji_reactions_tableUpSql,
"1596805115_create_group_chat_invitations_table.down.sql": _1596805115_create_group_chat_invitations_tableDownSql,
"1596805115_create_group_chat_invitations_table.up.sql": _1596805115_create_group_chat_invitations_tableUpSql,
"1597322655_add_invitation_admin_chat_field.up.sql": _1597322655_add_invitation_admin_chat_fieldUpSql,
"1597757544_add_nickname.up.sql": _1597757544_add_nicknameUpSql,
"1598955122_add_mentions.down.sql": _1598955122_add_mentionsDownSql,
"1598955122_add_mentions.up.sql": _1598955122_add_mentionsUpSql,
"1599641390_add_emoji_reactions_index.down.sql": _1599641390_add_emoji_reactions_indexDownSql,
"1599641390_add_emoji_reactions_index.up.sql": _1599641390_add_emoji_reactions_indexUpSql,
"1599720851_add_seen_index_remove_long_messages.down.sql": _1599720851_add_seen_index_remove_long_messagesDownSql,
"1599720851_add_seen_index_remove_long_messages.up.sql": _1599720851_add_seen_index_remove_long_messagesUpSql,
"1603198582_add_profile_chat_field.up.sql": _1603198582_add_profile_chat_fieldUpSql,
"1603816533_add_links.down.sql": _1603816533_add_linksDownSql,
"1603816533_add_links.up.sql": _1603816533_add_linksUpSql,
"doc.go": docGo,
}
// AssetDebug is true if the assets were built with the debug flag enabled. "000001_init.up.db.sql": _000001_initUpDbSql,
const AssetDebug = false
"000002_add_last_ens_clock_value.down.sql": _000002_add_last_ens_clock_valueDownSql,
"000002_add_last_ens_clock_value.up.sql": _000002_add_last_ens_clock_valueUpSql,
"1586358095_add_replace.down.sql": _1586358095_add_replaceDownSql,
"1586358095_add_replace.up.sql": _1586358095_add_replaceUpSql,
"1588665364_add_image_data.down.sql": _1588665364_add_image_dataDownSql,
"1588665364_add_image_data.up.sql": _1588665364_add_image_dataUpSql,
"1589365189_add_pow_target.down.sql": _1589365189_add_pow_targetDownSql,
"1589365189_add_pow_target.up.sql": _1589365189_add_pow_targetUpSql,
"1591277220_add_index_messages.down.sql": _1591277220_add_index_messagesDownSql,
"1591277220_add_index_messages.up.sql": _1591277220_add_index_messagesUpSql,
"1593087212_add_mute_chat_and_raw_message_fields.down.sql": _1593087212_add_mute_chat_and_raw_message_fieldsDownSql,
"1593087212_add_mute_chat_and_raw_message_fields.up.sql": _1593087212_add_mute_chat_and_raw_message_fieldsUpSql,
"1595862781_add_audio_data.down.sql": _1595862781_add_audio_dataDownSql,
"1595862781_add_audio_data.up.sql": _1595862781_add_audio_dataUpSql,
"1595865249_create_emoji_reactions_table.down.sql": _1595865249_create_emoji_reactions_tableDownSql,
"1595865249_create_emoji_reactions_table.up.sql": _1595865249_create_emoji_reactions_tableUpSql,
"1596805115_create_group_chat_invitations_table.down.sql": _1596805115_create_group_chat_invitations_tableDownSql,
"1596805115_create_group_chat_invitations_table.up.sql": _1596805115_create_group_chat_invitations_tableUpSql,
"1597322655_add_invitation_admin_chat_field.up.sql": _1597322655_add_invitation_admin_chat_fieldUpSql,
"1597757544_add_nickname.up.sql": _1597757544_add_nicknameUpSql,
"1598955122_add_mentions.down.sql": _1598955122_add_mentionsDownSql,
"1598955122_add_mentions.up.sql": _1598955122_add_mentionsUpSql,
"1599641390_add_emoji_reactions_index.down.sql": _1599641390_add_emoji_reactions_indexDownSql,
"1599641390_add_emoji_reactions_index.up.sql": _1599641390_add_emoji_reactions_indexUpSql,
"1599720851_add_seen_index_remove_long_messages.down.sql": _1599720851_add_seen_index_remove_long_messagesDownSql,
"1599720851_add_seen_index_remove_long_messages.up.sql": _1599720851_add_seen_index_remove_long_messagesUpSql,
"1603198582_add_profile_chat_field.up.sql": _1603198582_add_profile_chat_fieldUpSql,
"1603816533_add_links.down.sql": _1603816533_add_linksDownSql,
"1603816533_add_links.up.sql": _1603816533_add_linksUpSql,
"doc.go": docGo,
}
// AssetDir returns the file names below a certain // AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata. // directory embedded in the file by go-bindata.

View File

@ -174,7 +174,9 @@ func init() {
proto.RegisterType((*ApplicationMetadataMessage)(nil), "protobuf.ApplicationMetadataMessage") proto.RegisterType((*ApplicationMetadataMessage)(nil), "protobuf.ApplicationMetadataMessage")
} }
func init() { proto.RegisterFile("application_metadata_message.proto", fileDescriptor_ad09a6406fcf24c7) } func init() {
proto.RegisterFile("application_metadata_message.proto", fileDescriptor_ad09a6406fcf24c7)
}
var fileDescriptor_ad09a6406fcf24c7 = []byte{ var fileDescriptor_ad09a6406fcf24c7 = []byte{
// 493 bytes of a gzipped FileDescriptorProto // 493 bytes of a gzipped FileDescriptorProto

View File

@ -459,7 +459,9 @@ func init() {
proto.RegisterType((*ChatMessage)(nil), "protobuf.ChatMessage") proto.RegisterType((*ChatMessage)(nil), "protobuf.ChatMessage")
} }
func init() { proto.RegisterFile("chat_message.proto", fileDescriptor_263952f55fd35689) } func init() {
proto.RegisterFile("chat_message.proto", fileDescriptor_263952f55fd35689)
}
var fileDescriptor_263952f55fd35689 = []byte{ var fileDescriptor_263952f55fd35689 = []byte{
// 618 bytes of a gzipped FileDescriptorProto // 618 bytes of a gzipped FileDescriptorProto

View File

@ -359,7 +359,9 @@ func init() {
proto.RegisterType((*SendTransaction)(nil), "protobuf.SendTransaction") proto.RegisterType((*SendTransaction)(nil), "protobuf.SendTransaction")
} }
func init() { proto.RegisterFile("command.proto", fileDescriptor_213c0bb044472049) } func init() {
proto.RegisterFile("command.proto", fileDescriptor_213c0bb044472049)
}
var fileDescriptor_213c0bb044472049 = []byte{ var fileDescriptor_213c0bb044472049 = []byte{
// 257 bytes of a gzipped FileDescriptorProto // 257 bytes of a gzipped FileDescriptorProto

View File

@ -79,7 +79,9 @@ func init() {
proto.RegisterType((*ContactUpdate)(nil), "protobuf.ContactUpdate") proto.RegisterType((*ContactUpdate)(nil), "protobuf.ContactUpdate")
} }
func init() { proto.RegisterFile("contact.proto", fileDescriptor_a5036fff2565fb15) } func init() {
proto.RegisterFile("contact.proto", fileDescriptor_a5036fff2565fb15)
}
var fileDescriptor_a5036fff2565fb15 = []byte{ var fileDescriptor_a5036fff2565fb15 = []byte{
// 135 bytes of a gzipped FileDescriptorProto // 135 bytes of a gzipped FileDescriptorProto

View File

@ -151,7 +151,9 @@ func init() {
proto.RegisterType((*EmojiReaction)(nil), "protobuf.EmojiReaction") proto.RegisterType((*EmojiReaction)(nil), "protobuf.EmojiReaction")
} }
func init() { proto.RegisterFile("emoji_reaction.proto", fileDescriptor_0a088c907bbc7ed6) } func init() {
proto.RegisterFile("emoji_reaction.proto", fileDescriptor_0a088c907bbc7ed6)
}
var fileDescriptor_0a088c907bbc7ed6 = []byte{ var fileDescriptor_0a088c907bbc7ed6 = []byte{
// 305 bytes of a gzipped FileDescriptorProto // 305 bytes of a gzipped FileDescriptorProto

View File

@ -59,7 +59,9 @@ func init() {
proto.RegisterEnum("protobuf.MessageType", MessageType_name, MessageType_value) proto.RegisterEnum("protobuf.MessageType", MessageType_name, MessageType_value)
} }
func init() { proto.RegisterFile("enums.proto", fileDescriptor_888b6bd9597961ff) } func init() {
proto.RegisterFile("enums.proto", fileDescriptor_888b6bd9597961ff)
}
var fileDescriptor_888b6bd9597961ff = []byte{ var fileDescriptor_888b6bd9597961ff = []byte{
// 153 bytes of a gzipped FileDescriptorProto // 153 bytes of a gzipped FileDescriptorProto

View File

@ -123,7 +123,9 @@ func init() {
proto.RegisterType((*GroupChatInvitation)(nil), "protobuf.GroupChatInvitation") proto.RegisterType((*GroupChatInvitation)(nil), "protobuf.GroupChatInvitation")
} }
func init() { proto.RegisterFile("group_chat_invitation.proto", fileDescriptor_a6a73333de6a8ebe) } func init() {
proto.RegisterFile("group_chat_invitation.proto", fileDescriptor_a6a73333de6a8ebe)
}
var fileDescriptor_a6a73333de6a8ebe = []byte{ var fileDescriptor_a6a73333de6a8ebe = []byte{
// 234 bytes of a gzipped FileDescriptorProto // 234 bytes of a gzipped FileDescriptorProto

View File

@ -240,7 +240,9 @@ func init() {
proto.RegisterType((*MembershipUpdateMessage)(nil), "protobuf.MembershipUpdateMessage") proto.RegisterType((*MembershipUpdateMessage)(nil), "protobuf.MembershipUpdateMessage")
} }
func init() { proto.RegisterFile("membership_update_message.proto", fileDescriptor_8d37dd0dc857a6be) } func init() {
proto.RegisterFile("membership_update_message.proto", fileDescriptor_8d37dd0dc857a6be)
}
var fileDescriptor_8d37dd0dc857a6be = []byte{ var fileDescriptor_8d37dd0dc857a6be = []byte{
// 393 bytes of a gzipped FileDescriptorProto // 393 bytes of a gzipped FileDescriptorProto

View File

@ -335,7 +335,9 @@ func init() {
proto.RegisterType((*SyncInstallation)(nil), "protobuf.SyncInstallation") proto.RegisterType((*SyncInstallation)(nil), "protobuf.SyncInstallation")
} }
func init() { proto.RegisterFile("pairing.proto", fileDescriptor_d61ab7221f0b5518) } func init() {
proto.RegisterFile("pairing.proto", fileDescriptor_d61ab7221f0b5518)
}
var fileDescriptor_d61ab7221f0b5518 = []byte{ var fileDescriptor_d61ab7221f0b5518 = []byte{
// 397 bytes of a gzipped FileDescriptorProto // 397 bytes of a gzipped FileDescriptorProto

View File

@ -820,7 +820,9 @@ func init() {
proto.RegisterType((*PushNotificationResponse)(nil), "protobuf.PushNotificationResponse") proto.RegisterType((*PushNotificationResponse)(nil), "protobuf.PushNotificationResponse")
} }
func init() { proto.RegisterFile("push_notifications.proto", fileDescriptor_200acd86044eaa5d) } func init() {
proto.RegisterFile("push_notifications.proto", fileDescriptor_200acd86044eaa5d)
}
var fileDescriptor_200acd86044eaa5d = []byte{ var fileDescriptor_200acd86044eaa5d = []byte{
// 1002 bytes of a gzipped FileDescriptorProto // 1002 bytes of a gzipped FileDescriptorProto

View File

@ -90,7 +90,7 @@ func _1593601729_initial_schemaDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1593601729_initial_schema.down.sql", size: 144, mode: os.FileMode(0644), modTime: time.Unix(1596198373, 0)} info := bindataFileInfo{name: "1593601729_initial_schema.down.sql", size: 144, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa, 0x95, 0x55, 0x64, 0x38, 0x40, 0x16, 0xbf, 0x8b, 0x1c, 0x18, 0xb4, 0xc5, 0x7f, 0xd0, 0xb8, 0xf0, 0x3c, 0xa2, 0x82, 0xf8, 0x8d, 0x5a, 0xd3, 0xb6, 0x6e, 0xa3, 0xb4, 0xc, 0x9, 0x33, 0x0}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa, 0x95, 0x55, 0x64, 0x38, 0x40, 0x16, 0xbf, 0x8b, 0x1c, 0x18, 0xb4, 0xc5, 0x7f, 0xd0, 0xb8, 0xf0, 0x3c, 0xa2, 0x82, 0xf8, 0x8d, 0x5a, 0xd3, 0xb6, 0x6e, 0xa3, 0xb4, 0xc, 0x9, 0x33, 0x0}}
return a, nil return a, nil
} }
@ -110,7 +110,7 @@ func _1593601729_initial_schemaUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1593601729_initial_schema.up.sql", size: 1773, mode: os.FileMode(0644), modTime: time.Unix(1596198373, 0)} info := bindataFileInfo{name: "1593601729_initial_schema.up.sql", size: 1773, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4e, 0x1e, 0x5, 0x35, 0x9, 0xb2, 0x2d, 0x6f, 0x33, 0x63, 0xa2, 0x7a, 0x5b, 0xd2, 0x2d, 0xcb, 0x79, 0x7e, 0x6, 0xb4, 0x9d, 0x35, 0xd8, 0x9b, 0x55, 0xe5, 0xf8, 0x44, 0xca, 0xa6, 0xf3, 0xd3}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4e, 0x1e, 0x5, 0x35, 0x9, 0xb2, 0x2d, 0x6f, 0x33, 0x63, 0xa2, 0x7a, 0x5b, 0xd2, 0x2d, 0xcb, 0x79, 0x7e, 0x6, 0xb4, 0x9d, 0x35, 0xd8, 0x9b, 0x55, 0xe5, 0xf8, 0x44, 0xca, 0xa6, 0xf3, 0xd3}}
return a, nil return a, nil
} }
@ -130,7 +130,7 @@ func _1597909626_add_server_typeDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1597909626_add_server_type.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1598949727, 0)} info := bindataFileInfo{name: "1597909626_add_server_type.down.sql", size: 0, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}
return a, nil return a, nil
} }
@ -150,7 +150,7 @@ func _1597909626_add_server_typeUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1597909626_add_server_type.up.sql", size: 145, mode: os.FileMode(0644), modTime: time.Unix(1598949727, 0)} info := bindataFileInfo{name: "1597909626_add_server_type.up.sql", size: 145, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc8, 0x3f, 0xe0, 0xe7, 0x57, 0x0, 0x5d, 0x60, 0xf3, 0x55, 0x64, 0x71, 0x80, 0x3c, 0xca, 0x8, 0x61, 0xb5, 0x3c, 0xe, 0xa1, 0xe4, 0x61, 0xd1, 0x4e, 0xd8, 0xb2, 0x55, 0xdd, 0x87, 0x62, 0x9b}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc8, 0x3f, 0xe0, 0xe7, 0x57, 0x0, 0x5d, 0x60, 0xf3, 0x55, 0x64, 0x71, 0x80, 0x3c, 0xca, 0x8, 0x61, 0xb5, 0x3c, 0xe, 0xa1, 0xe4, 0x61, 0xd1, 0x4e, 0xd8, 0xb2, 0x55, 0xdd, 0x87, 0x62, 0x9b}}
return a, nil return a, nil
} }
@ -170,7 +170,7 @@ func _1599053776_add_chat_id_and_typeDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1599053776_add_chat_id_and_type.down.sql", size: 0, mode: os.FileMode(0644), modTime: time.Unix(1599053859, 0)} info := bindataFileInfo{name: "1599053776_add_chat_id_and_type.down.sql", size: 0, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}}
return a, nil return a, nil
} }
@ -190,7 +190,7 @@ func _1599053776_add_chat_id_and_typeUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1599053776_add_chat_id_and_type.up.sql", size: 264, mode: os.FileMode(0644), modTime: time.Unix(1599053853, 0)} info := bindataFileInfo{name: "1599053776_add_chat_id_and_type.up.sql", size: 264, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xea, 0x7a, 0xf9, 0xc4, 0xa2, 0x96, 0x2e, 0xf9, 0x8f, 0x7, 0xf1, 0x1e, 0x73, 0x8a, 0xa6, 0x3a, 0x13, 0x4, 0x73, 0x82, 0x83, 0xb, 0xe3, 0xb5, 0x3b, 0x7e, 0xd, 0x23, 0xce, 0x98, 0xd4, 0xdc}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xea, 0x7a, 0xf9, 0xc4, 0xa2, 0x96, 0x2e, 0xf9, 0x8f, 0x7, 0xf1, 0x1e, 0x73, 0x8a, 0xa6, 0x3a, 0x13, 0x4, 0x73, 0x82, 0x83, 0xb, 0xe3, 0xb5, 0x3b, 0x7e, 0xd, 0x23, 0xce, 0x98, 0xd4, 0xdc}}
return a, nil return a, nil
} }
@ -210,7 +210,7 @@ func docGo() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "doc.go", size: 382, mode: os.FileMode(0644), modTime: time.Unix(1596198373, 0)} info := bindataFileInfo{name: "doc.go", size: 382, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc0, 0x2f, 0x1e, 0x64, 0x9, 0x93, 0xe4, 0x8b, 0xf2, 0x98, 0x5a, 0x45, 0xe2, 0x80, 0x88, 0x67, 0x7a, 0x2d, 0xd7, 0x4b, 0xd1, 0x73, 0xb6, 0x6d, 0x15, 0xc2, 0x0, 0x34, 0xcd, 0xa0, 0xdb, 0x20}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc0, 0x2f, 0x1e, 0x64, 0x9, 0x93, 0xe4, 0x8b, 0xf2, 0x98, 0x5a, 0x45, 0xe2, 0x80, 0x88, 0x67, 0x7a, 0x2d, 0xd7, 0x4b, 0xd1, 0x73, 0xb6, 0x6d, 0x15, 0xc2, 0x0, 0x34, 0xcd, 0xa0, 0xdb, 0x20}}
return a, nil return a, nil
} }

View File

@ -88,7 +88,7 @@ func _1593601728_initial_schemaDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1593601728_initial_schema.down.sql", size: 200, mode: os.FileMode(0644), modTime: time.Unix(1596198373, 0)} info := bindataFileInfo{name: "1593601728_initial_schema.down.sql", size: 200, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x88, 0x8a, 0x61, 0x81, 0x57, 0x45, 0x9b, 0x97, 0x9b, 0x1f, 0xf6, 0x94, 0x8a, 0x20, 0xb3, 0x2b, 0xff, 0x69, 0x49, 0xf4, 0x58, 0xcc, 0xd0, 0x55, 0xcc, 0x9a, 0x8b, 0xb6, 0x7f, 0x29, 0x53, 0xc1}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x88, 0x8a, 0x61, 0x81, 0x57, 0x45, 0x9b, 0x97, 0x9b, 0x1f, 0xf6, 0x94, 0x8a, 0x20, 0xb3, 0x2b, 0xff, 0x69, 0x49, 0xf4, 0x58, 0xcc, 0xd0, 0x55, 0xcc, 0x9a, 0x8b, 0xb6, 0x7f, 0x29, 0x53, 0xc1}}
return a, nil return a, nil
} }
@ -108,7 +108,7 @@ func _1593601728_initial_schemaUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1593601728_initial_schema.up.sql", size: 675, mode: os.FileMode(0644), modTime: time.Unix(1596198373, 0)} info := bindataFileInfo{name: "1593601728_initial_schema.up.sql", size: 675, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xfd, 0x61, 0x90, 0x79, 0xd9, 0x14, 0x65, 0xe9, 0x96, 0x53, 0x17, 0x33, 0x54, 0xeb, 0x8b, 0x5d, 0x95, 0x99, 0x10, 0x36, 0x58, 0xdd, 0xb2, 0xbf, 0x45, 0xd9, 0xbb, 0xc4, 0x92, 0xe, 0xce, 0x2}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xfd, 0x61, 0x90, 0x79, 0xd9, 0x14, 0x65, 0xe9, 0x96, 0x53, 0x17, 0x33, 0x54, 0xeb, 0x8b, 0x5d, 0x95, 0x99, 0x10, 0x36, 0x58, 0xdd, 0xb2, 0xbf, 0x45, 0xd9, 0xbb, 0xc4, 0x92, 0xe, 0xce, 0x2}}
return a, nil return a, nil
} }
@ -128,7 +128,7 @@ func _1598419937_add_push_notifications_tableDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1598419937_add_push_notifications_table.down.sql", size: 51, mode: os.FileMode(0644), modTime: time.Unix(1598420015, 0)} info := bindataFileInfo{name: "1598419937_add_push_notifications_table.down.sql", size: 51, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc, 0x98, 0xc8, 0x30, 0x45, 0x5b, 0xc5, 0x7d, 0x13, 0x5d, 0xe7, 0xc8, 0x23, 0x43, 0xf7, 0xdc, 0x9c, 0xe2, 0xdd, 0x63, 0xf0, 0xb7, 0x16, 0x40, 0xc, 0xda, 0xb9, 0x16, 0x70, 0x2b, 0x5a, 0x7e}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc, 0x98, 0xc8, 0x30, 0x45, 0x5b, 0xc5, 0x7d, 0x13, 0x5d, 0xe7, 0xc8, 0x23, 0x43, 0xf7, 0xdc, 0x9c, 0xe2, 0xdd, 0x63, 0xf0, 0xb7, 0x16, 0x40, 0xc, 0xda, 0xb9, 0x16, 0x70, 0x2b, 0x5a, 0x7e}}
return a, nil return a, nil
} }
@ -148,7 +148,7 @@ func _1598419937_add_push_notifications_tableUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1598419937_add_push_notifications_table.up.sql", size: 104, mode: os.FileMode(0644), modTime: time.Unix(1598419975, 0)} info := bindataFileInfo{name: "1598419937_add_push_notifications_table.up.sql", size: 104, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x2, 0x3e, 0xef, 0xf, 0xc2, 0xdf, 0xbc, 0x99, 0x7a, 0xc2, 0xd3, 0x64, 0x4f, 0x4c, 0x7e, 0xfc, 0x2e, 0x8c, 0xa7, 0x54, 0xd3, 0x4d, 0x25, 0x98, 0x41, 0xbc, 0xea, 0xd7, 0x2, 0xc1, 0xd0, 0x52}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x2, 0x3e, 0xef, 0xf, 0xc2, 0xdf, 0xbc, 0x99, 0x7a, 0xc2, 0xd3, 0x64, 0x4f, 0x4c, 0x7e, 0xfc, 0x2e, 0x8c, 0xa7, 0x54, 0xd3, 0x4d, 0x25, 0x98, 0x41, 0xbc, 0xea, 0xd7, 0x2, 0xc1, 0xd0, 0x52}}
return a, nil return a, nil
} }
@ -168,7 +168,7 @@ func docGo() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "doc.go", size: 382, mode: os.FileMode(0644), modTime: time.Unix(1596198373, 0)} info := bindataFileInfo{name: "doc.go", size: 382, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc0, 0x2f, 0x1e, 0x64, 0x9, 0x93, 0xe4, 0x8b, 0xf2, 0x98, 0x5a, 0x45, 0xe2, 0x80, 0x88, 0x67, 0x7a, 0x2d, 0xd7, 0x4b, 0xd1, 0x73, 0xb6, 0x6d, 0x15, 0xc2, 0x0, 0x34, 0xcd, 0xa0, 0xdb, 0x20}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc0, 0x2f, 0x1e, 0x64, 0x9, 0x93, 0xe4, 0x8b, 0xf2, 0x98, 0x5a, 0x45, 0xe2, 0x80, 0x88, 0x67, 0x7a, 0x2d, 0xd7, 0x4b, 0xd1, 0x73, 0xb6, 0x6d, 0x15, 0xc2, 0x0, 0x34, 0xcd, 0xa0, 0xdb, 0x20}}
return a, nil return a, nil
} }

View File

@ -86,7 +86,7 @@ func _1561059284_add_waku_keysDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1561059284_add_waku_keys.down.sql", size: 22, mode: os.FileMode(0644), modTime: time.Unix(1579069853, 0)} info := bindataFileInfo{name: "1561059284_add_waku_keys.down.sql", size: 22, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe5, 0x2a, 0x7e, 0x9, 0xa3, 0xdd, 0xc6, 0x3, 0xfa, 0xaa, 0x98, 0xa0, 0x26, 0x5e, 0x67, 0x43, 0xe6, 0x20, 0xfd, 0x10, 0xfd, 0x60, 0x89, 0x17, 0x13, 0x87, 0x1b, 0x44, 0x36, 0x79, 0xb6, 0x60}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe5, 0x2a, 0x7e, 0x9, 0xa3, 0xdd, 0xc6, 0x3, 0xfa, 0xaa, 0x98, 0xa0, 0x26, 0x5e, 0x67, 0x43, 0xe6, 0x20, 0xfd, 0x10, 0xfd, 0x60, 0x89, 0x17, 0x13, 0x87, 0x1b, 0x44, 0x36, 0x79, 0xb6, 0x60}}
return a, nil return a, nil
} }
@ -106,7 +106,7 @@ func _1561059284_add_waku_keysUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1561059284_add_waku_keys.up.sql", size: 109, mode: os.FileMode(0644), modTime: time.Unix(1579069853, 0)} info := bindataFileInfo{name: "1561059284_add_waku_keys.up.sql", size: 109, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa9, 0x5c, 0x8, 0x32, 0xef, 0x12, 0x88, 0x21, 0xd, 0x7a, 0x42, 0x4d, 0xe7, 0x2d, 0x6c, 0x99, 0xb6, 0x1, 0xf1, 0xba, 0x2c, 0x40, 0x8d, 0xa9, 0x4b, 0xe6, 0xc4, 0x21, 0xec, 0x47, 0x6b, 0xf7}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa9, 0x5c, 0x8, 0x32, 0xef, 0x12, 0x88, 0x21, 0xd, 0x7a, 0x42, 0x4d, 0xe7, 0x2d, 0x6c, 0x99, 0xb6, 0x1, 0xf1, 0xba, 0x2c, 0x40, 0x8d, 0xa9, 0x4b, 0xe6, 0xc4, 0x21, 0xec, 0x47, 0x6b, 0xf7}}
return a, nil return a, nil
} }
@ -126,7 +126,7 @@ func docGo() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "doc.go", size: 373, mode: os.FileMode(0644), modTime: time.Unix(1579069853, 0)} info := bindataFileInfo{name: "doc.go", size: 373, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x23, 0x6a, 0xc1, 0xce, 0x94, 0xf6, 0xef, 0xf1, 0x97, 0x95, 0xb, 0x35, 0xaf, 0x5f, 0xe7, 0x5f, 0xac, 0x6e, 0xb8, 0xab, 0xba, 0xb5, 0x35, 0x97, 0x22, 0x36, 0x11, 0xce, 0x44, 0xfc, 0xfa, 0xac}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x23, 0x6a, 0xc1, 0xce, 0x94, 0xf6, 0xef, 0xf1, 0x97, 0x95, 0xb, 0x35, 0xaf, 0x5f, 0xe7, 0x5f, 0xac, 0x6e, 0xb8, 0xab, 0xba, 0xb5, 0x35, 0x97, 0x22, 0x36, 0x11, 0xce, 0x44, 0xfc, 0xfa, 0xac}}
return a, nil return a, nil
} }

View File

@ -86,7 +86,7 @@ func _1561059285_add_whisper_keysDownSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1561059285_add_whisper_keys.down.sql", size: 25, mode: os.FileMode(0644), modTime: time.Unix(1574354941, 0)} info := bindataFileInfo{name: "1561059285_add_whisper_keys.down.sql", size: 25, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb9, 0x31, 0x3f, 0xce, 0xfa, 0x44, 0x36, 0x1b, 0xb0, 0xec, 0x5d, 0xb, 0x90, 0xb, 0x21, 0x4f, 0xd5, 0xe5, 0x50, 0xed, 0xc7, 0x43, 0xdf, 0x83, 0xb4, 0x3a, 0xc1, 0x55, 0x2e, 0x53, 0x7c, 0x67}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb9, 0x31, 0x3f, 0xce, 0xfa, 0x44, 0x36, 0x1b, 0xb0, 0xec, 0x5d, 0xb, 0x90, 0xb, 0x21, 0x4f, 0xd5, 0xe5, 0x50, 0xed, 0xc7, 0x43, 0xdf, 0x83, 0xb4, 0x3a, 0xc1, 0x55, 0x2e, 0x53, 0x7c, 0x67}}
return a, nil return a, nil
} }
@ -106,7 +106,7 @@ func _1561059285_add_whisper_keysUpSql() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "1561059285_add_whisper_keys.up.sql", size: 112, mode: os.FileMode(0644), modTime: time.Unix(1574354941, 0)} info := bindataFileInfo{name: "1561059285_add_whisper_keys.up.sql", size: 112, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x25, 0x41, 0xc, 0x92, 0xdd, 0x9e, 0xff, 0x5d, 0xd0, 0x93, 0xe4, 0x24, 0x50, 0x29, 0xcf, 0xc6, 0xf7, 0x49, 0x3c, 0x73, 0xd9, 0x8c, 0xfa, 0xf2, 0xcf, 0xf6, 0x6f, 0xbc, 0x31, 0xe6, 0xf7, 0xe2}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x25, 0x41, 0xc, 0x92, 0xdd, 0x9e, 0xff, 0x5d, 0xd0, 0x93, 0xe4, 0x24, 0x50, 0x29, 0xcf, 0xc6, 0xf7, 0x49, 0x3c, 0x73, 0xd9, 0x8c, 0xfa, 0xf2, 0xcf, 0xf6, 0x6f, 0xbc, 0x31, 0xe6, 0xf7, 0xe2}}
return a, nil return a, nil
} }
@ -126,7 +126,7 @@ func docGo() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "doc.go", size: 373, mode: os.FileMode(0644), modTime: time.Unix(1574354941, 0)} info := bindataFileInfo{name: "doc.go", size: 373, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x23, 0x6a, 0xc1, 0xce, 0x94, 0xf6, 0xef, 0xf1, 0x97, 0x95, 0xb, 0x35, 0xaf, 0x5f, 0xe7, 0x5f, 0xac, 0x6e, 0xb8, 0xab, 0xba, 0xb5, 0x35, 0x97, 0x22, 0x36, 0x11, 0xce, 0x44, 0xfc, 0xfa, 0xac}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x23, 0x6a, 0xc1, 0xce, 0x94, 0xf6, 0xef, 0xf1, 0x97, 0x95, 0xb, 0x35, 0xaf, 0x5f, 0xe7, 0x5f, 0xac, 0x6e, 0xb8, 0xab, 0xba, 0xb5, 0x35, 0x97, 0x22, 0x36, 0x11, 0xce, 0x44, 0xfc, 0xfa, 0xac}}
return a, nil return a, nil
} }

View File

@ -2,6 +2,8 @@ package browsers
import ( import (
"context" "context"
"github.com/ethereum/go-ethereum/log"
) )
func NewAPI(db *Database) *API { func NewAPI(db *Database) *API {
@ -24,3 +26,31 @@ func (api *API) GetBrowsers(ctx context.Context) ([]*Browser, error) {
func (api *API) DeleteBrowser(ctx context.Context, id string) error { func (api *API) DeleteBrowser(ctx context.Context, id string) error {
return api.db.DeleteBrowser(id) return api.db.DeleteBrowser(id)
} }
func (api *API) GetBookmarks(ctx context.Context) ([]*Bookmark, error) {
log.Debug("call to get bookmarks")
rst, err := api.db.GetBookmarks()
log.Debug("result from database for bookmarks", "len", len(rst))
return rst, err
}
func (api *API) StoreBookmark(ctx context.Context, bookmark Bookmark) (Bookmark, error) {
log.Debug("call to create a bookmark")
bookmarkResult, err := api.db.StoreBookmark(bookmark)
log.Debug("result from database for creating a bookmark", "err", err)
return bookmarkResult, err
}
func (api *API) UpdateBookmark(ctx context.Context, originalURL string, bookmark Bookmark) error {
log.Debug("call to update a bookmark")
err := api.db.UpdateBookmark(originalURL, bookmark)
log.Debug("result from database for updating a bookmark", "err", err)
return err
}
func (api *API) DeleteBookmark(ctx context.Context, url string) error {
log.Debug("call to remove a bookmark")
err := api.db.DeleteBookmark(url)
log.Debug("result from database for remove a bookmark", "err", err)
return err
}

View File

@ -130,3 +130,26 @@ func TestDeleteBrowser(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Len(t, rst, 0) require.Len(t, rst, 0)
} }
func TestBookmarks(t *testing.T) {
api, cancel := setupTestAPI(t)
defer cancel()
bookmark := &Bookmark{
Name: "MyBookmark",
URL: "https://status.im",
ImageURL: "",
}
_, err := api.StoreBookmark(context.TODO(), *bookmark)
require.NoError(t, err)
rst, err := api.GetBookmarks(context.TODO())
require.NoError(t, err)
require.Len(t, rst, 1)
require.NoError(t, api.DeleteBookmark(context.TODO(), bookmark.URL))
rst, err = api.GetBookmarks(context.TODO())
require.NoError(t, err)
require.Len(t, rst, 0)
}

View File

@ -2,6 +2,10 @@ package browsers
import ( import (
"database/sql" "database/sql"
"github.com/mat/besticon/besticon"
"github.com/ethereum/go-ethereum/log"
) )
// Database sql wrapper for operations with browser objects. // Database sql wrapper for operations with browser objects.
@ -122,3 +126,72 @@ func (db *Database) DeleteBrowser(id string) error {
_, err := db.db.Exec("DELETE from browsers WHERE id = ?", id) _, err := db.db.Exec("DELETE from browsers WHERE id = ?", id)
return err return err
} }
type BookmarksType string
type Bookmark struct {
URL string `json:"url"`
Name string `json:"name"`
ImageURL string `json:"imageUrl"`
}
func (db *Database) GetBookmarks() ([]*Bookmark, error) {
rows, err := db.db.Query(`SELECT url, name, image_url FROM bookmarks`)
if err != nil {
return nil, err
}
defer rows.Close()
var rst []*Bookmark
for rows.Next() {
bookmark := &Bookmark{}
err := rows.Scan(&bookmark.URL, &bookmark.Name, &bookmark.ImageURL)
if err != nil {
return nil, err
}
rst = append(rst, bookmark)
}
return rst, nil
}
func (db *Database) StoreBookmark(bookmark Bookmark) (Bookmark, error) {
insert, err := db.db.Prepare("INSERT OR REPLACE INTO bookmarks (url, name, image_url) VALUES (?, ?, ?)")
if err != nil {
return bookmark, err
}
// Get the right icon
finder := besticon.IconFinder{}
icons, iconError := finder.FetchIcons(bookmark.URL)
if iconError == nil && len(icons) > 0 {
icon := finder.IconInSizeRange(besticon.SizeRange{48, 48, 100})
if icon != nil {
bookmark.ImageURL = icon.URL
} else {
bookmark.ImageURL = icons[0].URL
}
} else {
log.Error("error getting the bookmark icon", "iconError", iconError)
}
_, err = insert.Exec(bookmark.URL, bookmark.Name, bookmark.ImageURL)
return bookmark, err
}
func (db *Database) UpdateBookmark(originalURL string, bookmark Bookmark) error {
insert, err := db.db.Prepare("UPDATE bookmarks SET url = ?, name = ?, image_url = ? WHERE url = ?")
if err != nil {
return err
}
_, err = insert.Exec(bookmark.URL, bookmark.Name, bookmark.ImageURL, originalURL)
return err
}
func (db *Database) DeleteBookmark(url string) error {
_, err := db.db.Exec(`DELETE FROM bookmarks WHERE url = ?`, url)
return err
}

View File

@ -97,7 +97,7 @@ func ConfigReadmeMd() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "../config/README.md", size: 3153, mode: os.FileMode(0644), modTime: time.Unix(1597914349, 0)} info := bindataFileInfo{name: "../config/README.md", size: 3153, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x84, 0xf1, 0x44, 0x5b, 0x33, 0xe5, 0xfa, 0xe6, 0x38, 0x77, 0x19, 0x91, 0x6e, 0xd8, 0x8e, 0x8d, 0xaa, 0x32, 0xb2, 0x8b, 0xf9, 0x4f, 0x3, 0xe, 0xc0, 0xca, 0x5e, 0x5d, 0xec, 0x37, 0x1d, 0xc3}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x84, 0xf1, 0x44, 0x5b, 0x33, 0xe5, 0xfa, 0xe6, 0x38, 0x77, 0x19, 0x91, 0x6e, 0xd8, 0x8e, 0x8d, 0xaa, 0x32, 0xb2, 0x8b, 0xf9, 0x4f, 0x3, 0xe, 0xc0, 0xca, 0x5e, 0x5d, 0xec, 0x37, 0x1d, 0xc3}}
return a, nil return a, nil
} }
@ -117,7 +117,7 @@ func ConfigCliFleetEthProdJson() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "../config/cli/fleet-eth.prod.json", size: 4925, mode: os.FileMode(0644), modTime: time.Unix(1598266915, 0)} info := bindataFileInfo{name: "../config/cli/fleet-eth.prod.json", size: 4925, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x97, 0x5a, 0x72, 0x64, 0x6b, 0xee, 0xe6, 0x97, 0xda, 0xe8, 0x53, 0x7c, 0x33, 0x25, 0x27, 0x55, 0xa8, 0xe0, 0x9a, 0xc2, 0x16, 0xcf, 0xc0, 0x91, 0x8a, 0xbc, 0x98, 0x5, 0xe5, 0x63, 0x83, 0x77}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x97, 0x5a, 0x72, 0x64, 0x6b, 0xee, 0xe6, 0x97, 0xda, 0xe8, 0x53, 0x7c, 0x33, 0x25, 0x27, 0x55, 0xa8, 0xe0, 0x9a, 0xc2, 0x16, 0xcf, 0xc0, 0x91, 0x8a, 0xbc, 0x98, 0x5, 0xe5, 0x63, 0x83, 0x77}}
return a, nil return a, nil
} }
@ -137,7 +137,7 @@ func ConfigCliFleetEthStagingJson() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "../config/cli/fleet-eth.staging.json", size: 2307, mode: os.FileMode(0644), modTime: time.Unix(1598266852, 0)} info := bindataFileInfo{name: "../config/cli/fleet-eth.staging.json", size: 2307, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa, 0x55, 0x35, 0x5e, 0xf5, 0x90, 0x37, 0x0, 0xe4, 0x45, 0x8c, 0x19, 0x77, 0x14, 0xa0, 0x94, 0x1c, 0x9b, 0x78, 0xa7, 0x2e, 0x58, 0x45, 0x1b, 0xba, 0xf3, 0xfb, 0x62, 0x87, 0x97, 0xf8, 0x96}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa, 0x55, 0x35, 0x5e, 0xf5, 0x90, 0x37, 0x0, 0xe4, 0x45, 0x8c, 0x19, 0x77, 0x14, 0xa0, 0x94, 0x1c, 0x9b, 0x78, 0xa7, 0x2e, 0x58, 0x45, 0x1b, 0xba, 0xf3, 0xfb, 0x62, 0x87, 0x97, 0xf8, 0x96}}
return a, nil return a, nil
} }
@ -157,7 +157,7 @@ func ConfigCliFleetEthTestJson() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "../config/cli/fleet-eth.test.json", size: 2315, mode: os.FileMode(0644), modTime: time.Unix(1597914380, 0)} info := bindataFileInfo{name: "../config/cli/fleet-eth.test.json", size: 2315, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7, 0x92, 0x14, 0x26, 0xa6, 0x12, 0xb1, 0x0, 0x2d, 0xbe, 0x9a, 0xb7, 0x55, 0xdb, 0xfc, 0x1f, 0x75, 0xe, 0xc2, 0x2b, 0xad, 0xfe, 0x44, 0x78, 0x2e, 0x7f, 0x8, 0x7f, 0xd1, 0x9e, 0x11, 0x2c}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7, 0x92, 0x14, 0x26, 0xa6, 0x12, 0xb1, 0x0, 0x2d, 0xbe, 0x9a, 0xb7, 0x55, 0xdb, 0xfc, 0x1f, 0x75, 0xe, 0xc2, 0x2b, 0xad, 0xfe, 0x44, 0x78, 0x2e, 0x7f, 0x8, 0x7f, 0xd1, 0x9e, 0x11, 0x2c}}
return a, nil return a, nil
} }
@ -177,7 +177,7 @@ func ConfigCliLesEnabledJson() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "../config/cli/les-enabled.json", size: 58, mode: os.FileMode(0644), modTime: time.Unix(1573806410, 0)} info := bindataFileInfo{name: "../config/cli/les-enabled.json", size: 58, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7e, 0xee, 0x27, 0xa7, 0x74, 0xa0, 0x46, 0xa1, 0x41, 0xed, 0x4d, 0x16, 0x5b, 0xf3, 0xf0, 0x7c, 0xc8, 0x2f, 0x6f, 0x47, 0xa4, 0xbb, 0x5f, 0x43, 0x33, 0xd, 0x9, 0x9d, 0xea, 0x9e, 0x15, 0xee}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7e, 0xee, 0x27, 0xa7, 0x74, 0xa0, 0x46, 0xa1, 0x41, 0xed, 0x4d, 0x16, 0x5b, 0xf3, 0xf0, 0x7c, 0xc8, 0x2f, 0x6f, 0x47, 0xa4, 0xbb, 0x5f, 0x43, 0x33, 0xd, 0x9, 0x9d, 0xea, 0x9e, 0x15, 0xee}}
return a, nil return a, nil
} }
@ -197,7 +197,7 @@ func ConfigCliMailserverEnabledJson() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "../config/cli/mailserver-enabled.json", size: 176, mode: os.FileMode(0644), modTime: time.Unix(1573806410, 0)} info := bindataFileInfo{name: "../config/cli/mailserver-enabled.json", size: 176, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x34, 0xec, 0x81, 0x8b, 0x99, 0xb6, 0xdb, 0xc0, 0x8b, 0x46, 0x97, 0x96, 0xc7, 0x58, 0x30, 0x33, 0xef, 0x54, 0x25, 0x87, 0x7b, 0xb9, 0x94, 0x6b, 0x18, 0xa4, 0x5b, 0x58, 0x67, 0x7c, 0x44, 0xa6}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x34, 0xec, 0x81, 0x8b, 0x99, 0xb6, 0xdb, 0xc0, 0x8b, 0x46, 0x97, 0x96, 0xc7, 0x58, 0x30, 0x33, 0xef, 0x54, 0x25, 0x87, 0x7b, 0xb9, 0x94, 0x6b, 0x18, 0xa4, 0x5b, 0x58, 0x67, 0x7c, 0x44, 0xa6}}
return a, nil return a, nil
} }
@ -217,7 +217,7 @@ func ConfigStatusChainGenesisJson() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "../config/status-chain-genesis.json", size: 612, mode: os.FileMode(0644), modTime: time.Unix(1573806410, 0)} info := bindataFileInfo{name: "../config/status-chain-genesis.json", size: 612, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb, 0xf0, 0xc, 0x1, 0x95, 0x65, 0x6, 0x55, 0x48, 0x8f, 0x83, 0xa0, 0xb4, 0x81, 0xda, 0xad, 0x30, 0x6d, 0xb2, 0x78, 0x1b, 0x26, 0x4, 0x13, 0x12, 0x9, 0x6, 0xae, 0x3a, 0x2c, 0x1, 0x71}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb, 0xf0, 0xc, 0x1, 0x95, 0x65, 0x6, 0x55, 0x48, 0x8f, 0x83, 0xa0, 0xb4, 0x81, 0xda, 0xad, 0x30, 0x6d, 0xb2, 0x78, 0x1b, 0x26, 0x4, 0x13, 0x12, 0x9, 0x6, 0xae, 0x3a, 0x2c, 0x1, 0x71}}
return a, nil return a, nil
} }
@ -237,7 +237,7 @@ func keysBootnodeKey() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "keys/bootnode.key", size: 65, mode: os.FileMode(0644), modTime: time.Unix(1573806410, 0)} info := bindataFileInfo{name: "keys/bootnode.key", size: 65, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x31, 0xcf, 0x27, 0xd4, 0x96, 0x2e, 0x32, 0xcd, 0x58, 0x96, 0x2a, 0xe5, 0x8c, 0xa0, 0xf1, 0x73, 0x1f, 0xd6, 0xd6, 0x8b, 0xb, 0x73, 0xd3, 0x2c, 0x84, 0x1a, 0x56, 0xa4, 0x74, 0xb6, 0x95, 0x20}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x31, 0xcf, 0x27, 0xd4, 0x96, 0x2e, 0x32, 0xcd, 0x58, 0x96, 0x2a, 0xe5, 0x8c, 0xa0, 0xf1, 0x73, 0x1f, 0xd6, 0xd6, 0x8b, 0xb, 0x73, 0xd3, 0x2c, 0x84, 0x1a, 0x56, 0xa4, 0x74, 0xb6, 0x95, 0x20}}
return a, nil return a, nil
} }
@ -257,7 +257,7 @@ func keysFirebaseauthkey() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "keys/firebaseauthkey", size: 153, mode: os.FileMode(0644), modTime: time.Unix(1536843582, 0)} info := bindataFileInfo{name: "keys/firebaseauthkey", size: 153, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe, 0x69, 0x23, 0x64, 0x7d, 0xf9, 0x14, 0x37, 0x6f, 0x2b, 0x1, 0xf0, 0xb0, 0xa4, 0xb2, 0xd0, 0x18, 0xcd, 0xf9, 0xeb, 0x57, 0xa3, 0xfd, 0x79, 0x25, 0xa7, 0x9c, 0x3, 0xce, 0x26, 0xec, 0xe1}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe, 0x69, 0x23, 0x64, 0x7d, 0xf9, 0x14, 0x37, 0x6f, 0x2b, 0x1, 0xf0, 0xb0, 0xa4, 0xb2, 0xd0, 0x18, 0xcd, 0xf9, 0xeb, 0x57, 0xa3, 0xfd, 0x79, 0x25, 0xa7, 0x9c, 0x3, 0xce, 0x26, 0xec, 0xe1}}
return a, nil return a, nil
} }
@ -277,7 +277,7 @@ func keysTestAccount1StatusChainPk() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "keys/test-account1-status-chain.pk", size: 489, mode: os.FileMode(0644), modTime: time.Unix(1573806410, 0)} info := bindataFileInfo{name: "keys/test-account1-status-chain.pk", size: 489, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8f, 0xba, 0x35, 0x1, 0x2b, 0x9d, 0xad, 0xf0, 0x2d, 0x3c, 0x4d, 0x6, 0xb5, 0x22, 0x2, 0x47, 0xd4, 0x1c, 0xf4, 0x31, 0x2f, 0xb, 0x5b, 0x27, 0x5d, 0x43, 0x97, 0x58, 0x2d, 0xf0, 0xe1, 0xbe}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8f, 0xba, 0x35, 0x1, 0x2b, 0x9d, 0xad, 0xf0, 0x2d, 0x3c, 0x4d, 0x6, 0xb5, 0x22, 0x2, 0x47, 0xd4, 0x1c, 0xf4, 0x31, 0x2f, 0xb, 0x5b, 0x27, 0x5d, 0x43, 0x97, 0x58, 0x2d, 0xf0, 0xe1, 0xbe}}
return a, nil return a, nil
} }
@ -297,7 +297,7 @@ func keysTestAccount1Pk() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "keys/test-account1.pk", size: 491, mode: os.FileMode(0644), modTime: time.Unix(1573806410, 0)} info := bindataFileInfo{name: "keys/test-account1.pk", size: 491, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9, 0x43, 0xc2, 0xf4, 0x8c, 0xc6, 0x64, 0x25, 0x8c, 0x7, 0x8c, 0xa8, 0x89, 0x2b, 0x7b, 0x9b, 0x4f, 0x81, 0xcb, 0xce, 0x3d, 0xef, 0x82, 0x9c, 0x27, 0x27, 0xa9, 0xc5, 0x46, 0x70, 0x30, 0x38}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9, 0x43, 0xc2, 0xf4, 0x8c, 0xc6, 0x64, 0x25, 0x8c, 0x7, 0x8c, 0xa8, 0x89, 0x2b, 0x7b, 0x9b, 0x4f, 0x81, 0xcb, 0xce, 0x3d, 0xef, 0x82, 0x9c, 0x27, 0x27, 0xa9, 0xc5, 0x46, 0x70, 0x30, 0x38}}
return a, nil return a, nil
} }
@ -317,7 +317,7 @@ func keysTestAccount2StatusChainPk() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "keys/test-account2-status-chain.pk", size: 489, mode: os.FileMode(0644), modTime: time.Unix(1573806410, 0)} info := bindataFileInfo{name: "keys/test-account2-status-chain.pk", size: 489, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9, 0xf8, 0x5c, 0xe9, 0x92, 0x96, 0x2d, 0x88, 0x2b, 0x8e, 0x42, 0x3f, 0xa4, 0x93, 0x6c, 0xad, 0xe9, 0xc0, 0x1b, 0x8a, 0x8, 0x8c, 0x5e, 0x7a, 0x84, 0xa2, 0xf, 0x9f, 0x77, 0x58, 0x2c, 0x2c}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9, 0xf8, 0x5c, 0xe9, 0x92, 0x96, 0x2d, 0x88, 0x2b, 0x8e, 0x42, 0x3f, 0xa4, 0x93, 0x6c, 0xad, 0xe9, 0xc0, 0x1b, 0x8a, 0x8, 0x8c, 0x5e, 0x7a, 0x84, 0xa2, 0xf, 0x9f, 0x77, 0x58, 0x2c, 0x2c}}
return a, nil return a, nil
} }
@ -337,7 +337,7 @@ func keysTestAccount2Pk() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "keys/test-account2.pk", size: 491, mode: os.FileMode(0644), modTime: time.Unix(1573806410, 0)} info := bindataFileInfo{name: "keys/test-account2.pk", size: 491, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9f, 0x72, 0xd5, 0x95, 0x5c, 0x5a, 0x99, 0x9d, 0x2f, 0x21, 0x83, 0xd7, 0x10, 0x17, 0x4a, 0x3d, 0x65, 0xc9, 0x26, 0x1a, 0x2c, 0x9d, 0x65, 0x63, 0xd2, 0xa0, 0xfc, 0x7c, 0x0, 0x87, 0x38, 0x9f}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9f, 0x72, 0xd5, 0x95, 0x5c, 0x5a, 0x99, 0x9d, 0x2f, 0x21, 0x83, 0xd7, 0x10, 0x17, 0x4a, 0x3d, 0x65, 0xc9, 0x26, 0x1a, 0x2c, 0x9d, 0x65, 0x63, 0xd2, 0xa0, 0xfc, 0x7c, 0x0, 0x87, 0x38, 0x9f}}
return a, nil return a, nil
} }
@ -357,7 +357,7 @@ func keysTestAccount3BeforeEip55Pk() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "keys/test-account3-before-eip55.pk", size: 489, mode: os.FileMode(0644), modTime: time.Unix(1573806410, 0)} info := bindataFileInfo{name: "keys/test-account3-before-eip55.pk", size: 489, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x81, 0x40, 0x56, 0xc1, 0x5e, 0x10, 0x6e, 0x28, 0x15, 0x3, 0x4e, 0xc4, 0xc4, 0x71, 0x4d, 0x16, 0x99, 0xcc, 0x1b, 0x63, 0xee, 0x10, 0x20, 0xe4, 0x59, 0x52, 0x3f, 0xc0, 0xad, 0x15, 0x13, 0x72}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x81, 0x40, 0x56, 0xc1, 0x5e, 0x10, 0x6e, 0x28, 0x15, 0x3, 0x4e, 0xc4, 0xc4, 0x71, 0x4d, 0x16, 0x99, 0xcc, 0x1b, 0x63, 0xee, 0x10, 0x20, 0xe4, 0x59, 0x52, 0x3f, 0xc0, 0xad, 0x15, 0x13, 0x72}}
return a, nil return a, nil
} }

View File

@ -86,7 +86,7 @@ func configPublicChainAccountsJson() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "config/public-chain-accounts.json", size: 307, mode: os.FileMode(0644), modTime: time.Unix(1573806410, 0)} info := bindataFileInfo{name: "config/public-chain-accounts.json", size: 307, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x76, 0x5d, 0xc0, 0xfe, 0x57, 0x50, 0x18, 0xec, 0x2d, 0x61, 0x1b, 0xa9, 0x81, 0x11, 0x5f, 0x77, 0xf7, 0xb6, 0x67, 0x82, 0x1, 0x40, 0x68, 0x9d, 0xc5, 0x41, 0xaf, 0xce, 0x43, 0x81, 0x92, 0x96}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x76, 0x5d, 0xc0, 0xfe, 0x57, 0x50, 0x18, 0xec, 0x2d, 0x61, 0x1b, 0xa9, 0x81, 0x11, 0x5f, 0x77, 0xf7, 0xb6, 0x67, 0x82, 0x1, 0x40, 0x68, 0x9d, 0xc5, 0x41, 0xaf, 0xce, 0x43, 0x81, 0x92, 0x96}}
return a, nil return a, nil
} }
@ -106,7 +106,7 @@ func configStatusChainAccountsJson() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "config/status-chain-accounts.json", size: 543, mode: os.FileMode(0644), modTime: time.Unix(1573806410, 0)} info := bindataFileInfo{name: "config/status-chain-accounts.json", size: 543, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8e, 0xb3, 0x61, 0x51, 0x70, 0x3c, 0x12, 0x3e, 0xf1, 0x1c, 0x81, 0xfb, 0x9a, 0x7c, 0xe3, 0x63, 0xd0, 0x8f, 0x12, 0xc5, 0x2d, 0xf4, 0xea, 0x27, 0x33, 0xef, 0xca, 0xf9, 0x3f, 0x72, 0x44, 0xbf}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8e, 0xb3, 0x61, 0x51, 0x70, 0x3c, 0x12, 0x3e, 0xf1, 0x1c, 0x81, 0xfb, 0x9a, 0x7c, 0xe3, 0x63, 0xd0, 0x8f, 0x12, 0xc5, 0x2d, 0xf4, 0xea, 0x27, 0x33, 0xef, 0xca, 0xf9, 0x3f, 0x72, 0x44, 0xbf}}
return a, nil return a, nil
} }
@ -126,7 +126,7 @@ func configTestDataJson() (*asset, error) {
return nil, err return nil, err
} }
info := bindataFileInfo{name: "config/test-data.json", size: 84, mode: os.FileMode(0644), modTime: time.Unix(1573806410, 0)} info := bindataFileInfo{name: "config/test-data.json", size: 84, mode: os.FileMode(0664), modTime: time.Unix(1604073469, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xce, 0x9d, 0x80, 0xf5, 0x87, 0xfa, 0x57, 0x1d, 0xa1, 0xd5, 0x7a, 0x10, 0x3, 0xac, 0xd7, 0xf4, 0x64, 0x32, 0x96, 0x2b, 0xb7, 0x21, 0xb7, 0xa6, 0x80, 0x40, 0xe9, 0x65, 0xe3, 0xd6, 0xbd, 0x40}} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xce, 0x9d, 0x80, 0xf5, 0x87, 0xfa, 0x57, 0x1d, 0xa1, 0xd5, 0x7a, 0x10, 0x3, 0xac, 0xd7, 0xf4, 0x64, 0x32, 0x96, 0x2b, 0xb7, 0x21, 0xb7, 0xa6, 0x80, 0x40, 0xe9, 0x65, 0xe3, 0xd6, 0xbd, 0x40}}
return a, nil return a, nil
} }

1
vendor/github.com/PuerkitoBio/goquery/.gitattributes generated vendored Normal file
View File

@ -0,0 +1 @@
testdata/* linguist-vendored

16
vendor/github.com/PuerkitoBio/goquery/.gitignore generated vendored Normal file
View File

@ -0,0 +1,16 @@
# editor temporary files
*.sublime-*
.DS_Store
*.swp
#*.*#
tags
# direnv config
.env*
# test binaries
*.test
# coverage and profilte outputs
*.out

17
vendor/github.com/PuerkitoBio/goquery/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,17 @@
language: go
go:
- 1.2.x
- 1.3.x
- 1.4.x
- 1.5.x
- 1.6.x
- 1.7.x
- 1.8.x
- 1.9.x
- 1.10.x
- 1.11.x
- 1.12.x
- 1.13.x
- tip

12
vendor/github.com/PuerkitoBio/goquery/LICENSE generated vendored Normal file
View File

@ -0,0 +1,12 @@
Copyright (c) 2012-2016, Martin Angers & Contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

187
vendor/github.com/PuerkitoBio/goquery/README.md generated vendored Normal file
View File

@ -0,0 +1,187 @@
# goquery - a little like that j-thing, only in Go
[![build status](https://secure.travis-ci.org/PuerkitoBio/goquery.svg?branch=master)](http://travis-ci.org/PuerkitoBio/goquery) [![GoDoc](https://godoc.org/github.com/PuerkitoBio/goquery?status.png)](http://godoc.org/github.com/PuerkitoBio/goquery) [![Sourcegraph Badge](https://sourcegraph.com/github.com/PuerkitoBio/goquery/-/badge.svg)](https://sourcegraph.com/github.com/PuerkitoBio/goquery?badge)
goquery brings a syntax and a set of features similar to [jQuery][] to the [Go language][go]. It is based on Go's [net/html package][html] and the CSS Selector library [cascadia][]. Since the net/html parser returns nodes, and not a full-featured DOM tree, jQuery's stateful manipulation functions (like height(), css(), detach()) have been left off.
Also, because the net/html parser requires UTF-8 encoding, so does goquery: it is the caller's responsibility to ensure that the source document provides UTF-8 encoded HTML. See the [wiki][] for various options to do this.
Syntax-wise, it is as close as possible to jQuery, with the same function names when possible, and that warm and fuzzy chainable interface. jQuery being the ultra-popular library that it is, I felt that writing a similar HTML-manipulating library was better to follow its API than to start anew (in the same spirit as Go's `fmt` package), even though some of its methods are less than intuitive (looking at you, [index()][index]...).
## Table of Contents
* [Installation](#installation)
* [Changelog](#changelog)
* [API](#api)
* [Examples](#examples)
* [Related Projects](#related-projects)
* [Support](#support)
* [License](#license)
## Installation
Please note that because of the net/html dependency, goquery requires Go1.1+.
$ go get github.com/PuerkitoBio/goquery
(optional) To run unit tests:
$ cd $GOPATH/src/github.com/PuerkitoBio/goquery
$ go test
(optional) To run benchmarks (warning: it runs for a few minutes):
$ cd $GOPATH/src/github.com/PuerkitoBio/goquery
$ go test -bench=".*"
## Changelog
**Note that goquery's API is now stable, and will not break.**
* **2020-10-08 (v1.6.0)** : Parse html in context of the container node for all functions that deal with html strings (`AfterHtml`, `AppendHtml`, etc.). Thanks to [@thiemok][thiemok] and [@davidjwilkins][djw] for their work on this.
* **2020-02-04 (v1.5.1)** : Update module dependencies.
* **2018-11-15 (v1.5.0)** : Go module support (thanks @Zaba505).
* **2018-06-07 (v1.4.1)** : Add `NewDocumentFromReader` examples.
* **2018-03-24 (v1.4.0)** : Deprecate `NewDocument(url)` and `NewDocumentFromResponse(response)`.
* **2018-01-28 (v1.3.0)** : Add `ToEnd` constant to `Slice` until the end of the selection (thanks to @davidjwilkins for raising the issue).
* **2018-01-11 (v1.2.0)** : Add `AddBack*` and deprecate `AndSelf` (thanks to @davidjwilkins).
* **2017-02-12 (v1.1.0)** : Add `SetHtml` and `SetText` (thanks to @glebtv).
* **2016-12-29 (v1.0.2)** : Optimize allocations for `Selection.Text` (thanks to @radovskyb).
* **2016-08-28 (v1.0.1)** : Optimize performance for large documents.
* **2016-07-27 (v1.0.0)** : Tag version 1.0.0.
* **2016-06-15** : Invalid selector strings internally compile to a `Matcher` implementation that never matches any node (instead of a panic). So for example, `doc.Find("~")` returns an empty `*Selection` object.
* **2016-02-02** : Add `NodeName` utility function similar to the DOM's `nodeName` property. It returns the tag name of the first element in a selection, and other relevant values of non-element nodes (see godoc for details). Add `OuterHtml` utility function similar to the DOM's `outerHTML` property (named `OuterHtml` in small caps for consistency with the existing `Html` method on the `Selection`).
* **2015-04-20** : Add `AttrOr` helper method to return the attribute's value or a default value if absent. Thanks to [piotrkowalczuk][piotr].
* **2015-02-04** : Add more manipulation functions - Prepend* - thanks again to [Andrew Stone][thatguystone].
* **2014-11-28** : Add more manipulation functions - ReplaceWith*, Wrap* and Unwrap - thanks again to [Andrew Stone][thatguystone].
* **2014-11-07** : Add manipulation functions (thanks to [Andrew Stone][thatguystone]) and `*Matcher` functions, that receive compiled cascadia selectors instead of selector strings, thus avoiding potential panics thrown by goquery via `cascadia.MustCompile` calls. This results in better performance (selectors can be compiled once and reused) and more idiomatic error handling (you can handle cascadia's compilation errors, instead of recovering from panics, which had been bugging me for a long time). Note that the actual type expected is a `Matcher` interface, that `cascadia.Selector` implements. Other matcher implementations could be used.
* **2014-11-06** : Change import paths of net/html to golang.org/x/net/html (see https://groups.google.com/forum/#!topic/golang-nuts/eD8dh3T9yyA). Make sure to update your code to use the new import path too when you call goquery with `html.Node`s.
* **v0.3.2** : Add `NewDocumentFromReader()` (thanks jweir) which allows creating a goquery document from an io.Reader.
* **v0.3.1** : Add `NewDocumentFromResponse()` (thanks assassingj) which allows creating a goquery document from an http response.
* **v0.3.0** : Add `EachWithBreak()` which allows to break out of an `Each()` loop by returning false. This function was added instead of changing the existing `Each()` to avoid breaking compatibility.
* **v0.2.1** : Make go-getable, now that [go.net/html is Go1.0-compatible][gonet] (thanks to @matrixik for pointing this out).
* **v0.2.0** : Add support for negative indices in Slice(). **BREAKING CHANGE** `Document.Root` is removed, `Document` is now a `Selection` itself (a selection of one, the root element, just like `Document.Root` was before). Add jQuery's Closest() method.
* **v0.1.1** : Add benchmarks to use as baseline for refactorings, refactor Next...() and Prev...() methods to use the new html package's linked list features (Next/PrevSibling, FirstChild). Good performance boost (40+% in some cases).
* **v0.1.0** : Initial release.
## API
goquery exposes two structs, `Document` and `Selection`, and the `Matcher` interface. Unlike jQuery, which is loaded as part of a DOM document, and thus acts on its containing document, goquery doesn't know which HTML document to act upon. So it needs to be told, and that's what the `Document` type is for. It holds the root document node as the initial Selection value to manipulate.
jQuery often has many variants for the same function (no argument, a selector string argument, a jQuery object argument, a DOM element argument, ...). Instead of exposing the same features in goquery as a single method with variadic empty interface arguments, statically-typed signatures are used following this naming convention:
* When the jQuery equivalent can be called with no argument, it has the same name as jQuery for the no argument signature (e.g.: `Prev()`), and the version with a selector string argument is called `XxxFiltered()` (e.g.: `PrevFiltered()`)
* When the jQuery equivalent **requires** one argument, the same name as jQuery is used for the selector string version (e.g.: `Is()`)
* The signatures accepting a jQuery object as argument are defined in goquery as `XxxSelection()` and take a `*Selection` object as argument (e.g.: `FilterSelection()`)
* The signatures accepting a DOM element as argument in jQuery are defined in goquery as `XxxNodes()` and take a variadic argument of type `*html.Node` (e.g.: `FilterNodes()`)
* The signatures accepting a function as argument in jQuery are defined in goquery as `XxxFunction()` and take a function as argument (e.g.: `FilterFunction()`)
* The goquery methods that can be called with a selector string have a corresponding version that take a `Matcher` interface and are defined as `XxxMatcher()` (e.g.: `IsMatcher()`)
Utility functions that are not in jQuery but are useful in Go are implemented as functions (that take a `*Selection` as parameter), to avoid a potential naming clash on the `*Selection`'s methods (reserved for jQuery-equivalent behaviour).
The complete [godoc reference documentation can be found here][doc].
Please note that Cascadia's selectors do not necessarily match all supported selectors of jQuery (Sizzle). See the [cascadia project][cascadia] for details. Invalid selector strings compile to a `Matcher` that fails to match any node. Behaviour of the various functions that take a selector string as argument follows from that fact, e.g. (where `~` is an invalid selector string):
* `Find("~")` returns an empty selection because the selector string doesn't match anything.
* `Add("~")` returns a new selection that holds the same nodes as the original selection, because it didn't add any node (selector string didn't match anything).
* `ParentsFiltered("~")` returns an empty selection because the selector string doesn't match anything.
* `ParentsUntil("~")` returns all parents of the selection because the selector string didn't match any element to stop before the top element.
## Examples
See some tips and tricks in the [wiki][].
Adapted from example_test.go:
```Go
package main
import (
"fmt"
"log"
"net/http"
"github.com/PuerkitoBio/goquery"
)
func ExampleScrape() {
// Request the HTML page.
res, err := http.Get("http://metalsucks.net")
if err != nil {
log.Fatal(err)
}
defer res.Body.Close()
if res.StatusCode != 200 {
log.Fatalf("status code error: %d %s", res.StatusCode, res.Status)
}
// Load the HTML document
doc, err := goquery.NewDocumentFromReader(res.Body)
if err != nil {
log.Fatal(err)
}
// Find the review items
doc.Find(".sidebar-reviews article .content-block").Each(func(i int, s *goquery.Selection) {
// For each item found, get the band and title
band := s.Find("a").Text()
title := s.Find("i").Text()
fmt.Printf("Review %d: %s - %s\n", i, band, title)
})
}
func main() {
ExampleScrape()
}
```
## Related Projects
- [Goq][goq], an HTML deserialization and scraping library based on goquery and struct tags.
- [andybalholm/cascadia][cascadia], the CSS selector library used by goquery.
- [suntong/cascadia][cascadiacli], a command-line interface to the cascadia CSS selector library, useful to test selectors.
- [gocolly/colly](https://github.com/gocolly/colly), a lightning fast and elegant Scraping Framework
- [gnulnx/goperf](https://github.com/gnulnx/goperf), a website performance test tool that also fetches static assets.
- [MontFerret/ferret](https://github.com/MontFerret/ferret), declarative web scraping.
- [tacusci/berrycms](https://github.com/tacusci/berrycms), a modern simple to use CMS with easy to write plugins
- [Dataflow kit](https://github.com/slotix/dataflowkit), Web Scraping framework for Gophers.
- [Geziyor](https://github.com/geziyor/geziyor), a fast web crawling & scraping framework for Go. Supports JS rendering.
- [Pagser](https://github.com/foolin/pagser), a simple, easy, extensible, configurable HTML parser to struct based on goquery and struct tags.
## Support
There are a number of ways you can support the project:
* Use it, star it, build something with it, spread the word!
- If you do build something open-source or otherwise publicly-visible, let me know so I can add it to the [Related Projects](#related-projects) section!
* Raise issues to improve the project (note: doc typos and clarifications are issues too!)
- Please search existing issues before opening a new one - it may have already been adressed.
* Pull requests: please discuss new code in an issue first, unless the fix is really trivial.
- Make sure new code is tested.
- Be mindful of existing code - PRs that break existing code have a high probability of being declined, unless it fixes a serious issue.
If you desperately want to send money my way, I have a BuyMeACoffee.com page:
<a href="https://www.buymeacoffee.com/mna" target="_blank"><img src="https://www.buymeacoffee.com/assets/img/custom_images/orange_img.png" alt="Buy Me A Coffee" style="height: 41px !important;width: 174px !important;box-shadow: 0px 3px 2px 0px rgba(190, 190, 190, 0.5) !important;-webkit-box-shadow: 0px 3px 2px 0px rgba(190, 190, 190, 0.5) !important;" ></a>
## License
The [BSD 3-Clause license][bsd], the same as the [Go language][golic]. Cascadia's license is [here][caslic].
[jquery]: http://jquery.com/
[go]: http://golang.org/
[cascadia]: https://github.com/andybalholm/cascadia
[cascadiacli]: https://github.com/suntong/cascadia
[bsd]: http://opensource.org/licenses/BSD-3-Clause
[golic]: http://golang.org/LICENSE
[caslic]: https://github.com/andybalholm/cascadia/blob/master/LICENSE
[doc]: http://godoc.org/github.com/PuerkitoBio/goquery
[index]: http://api.jquery.com/index/
[gonet]: https://github.com/golang/net/
[html]: http://godoc.org/golang.org/x/net/html
[wiki]: https://github.com/PuerkitoBio/goquery/wiki/Tips-and-tricks
[thatguystone]: https://github.com/thatguystone
[piotr]: https://github.com/piotrkowalczuk
[goq]: https://github.com/andrewstuart/goq
[thiemok]: https://github.com/thiemok
[djw]: https://github.com/davidjwilkins

124
vendor/github.com/PuerkitoBio/goquery/array.go generated vendored Normal file
View File

@ -0,0 +1,124 @@
package goquery
import (
"golang.org/x/net/html"
)
const (
maxUint = ^uint(0)
maxInt = int(maxUint >> 1)
// ToEnd is a special index value that can be used as end index in a call
// to Slice so that all elements are selected until the end of the Selection.
// It is equivalent to passing (*Selection).Length().
ToEnd = maxInt
)
// First reduces the set of matched elements to the first in the set.
// It returns a new Selection object, and an empty Selection object if the
// the selection is empty.
func (s *Selection) First() *Selection {
return s.Eq(0)
}
// Last reduces the set of matched elements to the last in the set.
// It returns a new Selection object, and an empty Selection object if
// the selection is empty.
func (s *Selection) Last() *Selection {
return s.Eq(-1)
}
// Eq reduces the set of matched elements to the one at the specified index.
// If a negative index is given, it counts backwards starting at the end of the
// set. It returns a new Selection object, and an empty Selection object if the
// index is invalid.
func (s *Selection) Eq(index int) *Selection {
if index < 0 {
index += len(s.Nodes)
}
if index >= len(s.Nodes) || index < 0 {
return newEmptySelection(s.document)
}
return s.Slice(index, index+1)
}
// Slice reduces the set of matched elements to a subset specified by a range
// of indices. The start index is 0-based and indicates the index of the first
// element to select. The end index is 0-based and indicates the index at which
// the elements stop being selected (the end index is not selected).
//
// The indices may be negative, in which case they represent an offset from the
// end of the selection.
//
// The special value ToEnd may be specified as end index, in which case all elements
// until the end are selected. This works both for a positive and negative start
// index.
func (s *Selection) Slice(start, end int) *Selection {
if start < 0 {
start += len(s.Nodes)
}
if end == ToEnd {
end = len(s.Nodes)
} else if end < 0 {
end += len(s.Nodes)
}
return pushStack(s, s.Nodes[start:end])
}
// Get retrieves the underlying node at the specified index.
// Get without parameter is not implemented, since the node array is available
// on the Selection object.
func (s *Selection) Get(index int) *html.Node {
if index < 0 {
index += len(s.Nodes) // Negative index gets from the end
}
return s.Nodes[index]
}
// Index returns the position of the first element within the Selection object
// relative to its sibling elements.
func (s *Selection) Index() int {
if len(s.Nodes) > 0 {
return newSingleSelection(s.Nodes[0], s.document).PrevAll().Length()
}
return -1
}
// IndexSelector returns the position of the first element within the
// Selection object relative to the elements matched by the selector, or -1 if
// not found.
func (s *Selection) IndexSelector(selector string) int {
if len(s.Nodes) > 0 {
sel := s.document.Find(selector)
return indexInSlice(sel.Nodes, s.Nodes[0])
}
return -1
}
// IndexMatcher returns the position of the first element within the
// Selection object relative to the elements matched by the matcher, or -1 if
// not found.
func (s *Selection) IndexMatcher(m Matcher) int {
if len(s.Nodes) > 0 {
sel := s.document.FindMatcher(m)
return indexInSlice(sel.Nodes, s.Nodes[0])
}
return -1
}
// IndexOfNode returns the position of the specified node within the Selection
// object, or -1 if not found.
func (s *Selection) IndexOfNode(node *html.Node) int {
return indexInSlice(s.Nodes, node)
}
// IndexOfSelection returns the position of the first node in the specified
// Selection object within this Selection object, or -1 if not found.
func (s *Selection) IndexOfSelection(sel *Selection) int {
if sel != nil && len(sel.Nodes) > 0 {
return indexInSlice(s.Nodes, sel.Nodes[0])
}
return -1
}

123
vendor/github.com/PuerkitoBio/goquery/doc.go generated vendored Normal file
View File

@ -0,0 +1,123 @@
// Copyright (c) 2012-2016, Martin Angers & Contributors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation and/or
// other materials provided with the distribution.
// * Neither the name of the author nor the names of its contributors may be used to
// endorse or promote products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
// OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
// AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
Package goquery implements features similar to jQuery, including the chainable
syntax, to manipulate and query an HTML document.
It brings a syntax and a set of features similar to jQuery to the Go language.
It is based on Go's net/html package and the CSS Selector library cascadia.
Since the net/html parser returns nodes, and not a full-featured DOM
tree, jQuery's stateful manipulation functions (like height(), css(), detach())
have been left off.
Also, because the net/html parser requires UTF-8 encoding, so does goquery: it is
the caller's responsibility to ensure that the source document provides UTF-8 encoded HTML.
See the repository's wiki for various options on how to do this.
Syntax-wise, it is as close as possible to jQuery, with the same method names when
possible, and that warm and fuzzy chainable interface. jQuery being the
ultra-popular library that it is, writing a similar HTML-manipulating
library was better to follow its API than to start anew (in the same spirit as
Go's fmt package), even though some of its methods are less than intuitive (looking
at you, index()...).
It is hosted on GitHub, along with additional documentation in the README.md
file: https://github.com/puerkitobio/goquery
Please note that because of the net/html dependency, goquery requires Go1.1+.
The various methods are split into files based on the category of behavior.
The three dots (...) indicate that various "overloads" are available.
* array.go : array-like positional manipulation of the selection.
- Eq()
- First()
- Get()
- Index...()
- Last()
- Slice()
* expand.go : methods that expand or augment the selection's set.
- Add...()
- AndSelf()
- Union(), which is an alias for AddSelection()
* filter.go : filtering methods, that reduce the selection's set.
- End()
- Filter...()
- Has...()
- Intersection(), which is an alias of FilterSelection()
- Not...()
* iteration.go : methods to loop over the selection's nodes.
- Each()
- EachWithBreak()
- Map()
* manipulation.go : methods for modifying the document
- After...()
- Append...()
- Before...()
- Clone()
- Empty()
- Prepend...()
- Remove...()
- ReplaceWith...()
- Unwrap()
- Wrap...()
- WrapAll...()
- WrapInner...()
* property.go : methods that inspect and get the node's properties values.
- Attr*(), RemoveAttr(), SetAttr()
- AddClass(), HasClass(), RemoveClass(), ToggleClass()
- Html()
- Length()
- Size(), which is an alias for Length()
- Text()
* query.go : methods that query, or reflect, a node's identity.
- Contains()
- Is...()
* traversal.go : methods to traverse the HTML document tree.
- Children...()
- Contents()
- Find...()
- Next...()
- Parent[s]...()
- Prev...()
- Siblings...()
* type.go : definition of the types exposed by goquery.
- Document
- Selection
- Matcher
* utilities.go : definition of helper functions (and not methods on a *Selection)
that are not part of jQuery, but are useful to goquery.
- NodeName
- OuterHtml
*/
package goquery

70
vendor/github.com/PuerkitoBio/goquery/expand.go generated vendored Normal file
View File

@ -0,0 +1,70 @@
package goquery
import "golang.org/x/net/html"
// Add adds the selector string's matching nodes to those in the current
// selection and returns a new Selection object.
// The selector string is run in the context of the document of the current
// Selection object.
func (s *Selection) Add(selector string) *Selection {
return s.AddNodes(findWithMatcher([]*html.Node{s.document.rootNode}, compileMatcher(selector))...)
}
// AddMatcher adds the matcher's matching nodes to those in the current
// selection and returns a new Selection object.
// The matcher is run in the context of the document of the current
// Selection object.
func (s *Selection) AddMatcher(m Matcher) *Selection {
return s.AddNodes(findWithMatcher([]*html.Node{s.document.rootNode}, m)...)
}
// AddSelection adds the specified Selection object's nodes to those in the
// current selection and returns a new Selection object.
func (s *Selection) AddSelection(sel *Selection) *Selection {
if sel == nil {
return s.AddNodes()
}
return s.AddNodes(sel.Nodes...)
}
// Union is an alias for AddSelection.
func (s *Selection) Union(sel *Selection) *Selection {
return s.AddSelection(sel)
}
// AddNodes adds the specified nodes to those in the
// current selection and returns a new Selection object.
func (s *Selection) AddNodes(nodes ...*html.Node) *Selection {
return pushStack(s, appendWithoutDuplicates(s.Nodes, nodes, nil))
}
// AndSelf adds the previous set of elements on the stack to the current set.
// It returns a new Selection object containing the current Selection combined
// with the previous one.
// Deprecated: This function has been deprecated and is now an alias for AddBack().
func (s *Selection) AndSelf() *Selection {
return s.AddBack()
}
// AddBack adds the previous set of elements on the stack to the current set.
// It returns a new Selection object containing the current Selection combined
// with the previous one.
func (s *Selection) AddBack() *Selection {
return s.AddSelection(s.prevSel)
}
// AddBackFiltered reduces the previous set of elements on the stack to those that
// match the selector string, and adds them to the current set.
// It returns a new Selection object containing the current Selection combined
// with the filtered previous one
func (s *Selection) AddBackFiltered(selector string) *Selection {
return s.AddSelection(s.prevSel.Filter(selector))
}
// AddBackMatcher reduces the previous set of elements on the stack to those that match
// the mateher, and adds them to the curernt set.
// It returns a new Selection object containing the current Selection combined
// with the filtered previous one
func (s *Selection) AddBackMatcher(m Matcher) *Selection {
return s.AddSelection(s.prevSel.FilterMatcher(m))
}

163
vendor/github.com/PuerkitoBio/goquery/filter.go generated vendored Normal file
View File

@ -0,0 +1,163 @@
package goquery
import "golang.org/x/net/html"
// Filter reduces the set of matched elements to those that match the selector string.
// It returns a new Selection object for this subset of matching elements.
func (s *Selection) Filter(selector string) *Selection {
return s.FilterMatcher(compileMatcher(selector))
}
// FilterMatcher reduces the set of matched elements to those that match
// the given matcher. It returns a new Selection object for this subset
// of matching elements.
func (s *Selection) FilterMatcher(m Matcher) *Selection {
return pushStack(s, winnow(s, m, true))
}
// Not removes elements from the Selection that match the selector string.
// It returns a new Selection object with the matching elements removed.
func (s *Selection) Not(selector string) *Selection {
return s.NotMatcher(compileMatcher(selector))
}
// NotMatcher removes elements from the Selection that match the given matcher.
// It returns a new Selection object with the matching elements removed.
func (s *Selection) NotMatcher(m Matcher) *Selection {
return pushStack(s, winnow(s, m, false))
}
// FilterFunction reduces the set of matched elements to those that pass the function's test.
// It returns a new Selection object for this subset of elements.
func (s *Selection) FilterFunction(f func(int, *Selection) bool) *Selection {
return pushStack(s, winnowFunction(s, f, true))
}
// NotFunction removes elements from the Selection that pass the function's test.
// It returns a new Selection object with the matching elements removed.
func (s *Selection) NotFunction(f func(int, *Selection) bool) *Selection {
return pushStack(s, winnowFunction(s, f, false))
}
// FilterNodes reduces the set of matched elements to those that match the specified nodes.
// It returns a new Selection object for this subset of elements.
func (s *Selection) FilterNodes(nodes ...*html.Node) *Selection {
return pushStack(s, winnowNodes(s, nodes, true))
}
// NotNodes removes elements from the Selection that match the specified nodes.
// It returns a new Selection object with the matching elements removed.
func (s *Selection) NotNodes(nodes ...*html.Node) *Selection {
return pushStack(s, winnowNodes(s, nodes, false))
}
// FilterSelection reduces the set of matched elements to those that match a
// node in the specified Selection object.
// It returns a new Selection object for this subset of elements.
func (s *Selection) FilterSelection(sel *Selection) *Selection {
if sel == nil {
return pushStack(s, winnowNodes(s, nil, true))
}
return pushStack(s, winnowNodes(s, sel.Nodes, true))
}
// NotSelection removes elements from the Selection that match a node in the specified
// Selection object. It returns a new Selection object with the matching elements removed.
func (s *Selection) NotSelection(sel *Selection) *Selection {
if sel == nil {
return pushStack(s, winnowNodes(s, nil, false))
}
return pushStack(s, winnowNodes(s, sel.Nodes, false))
}
// Intersection is an alias for FilterSelection.
func (s *Selection) Intersection(sel *Selection) *Selection {
return s.FilterSelection(sel)
}
// Has reduces the set of matched elements to those that have a descendant
// that matches the selector.
// It returns a new Selection object with the matching elements.
func (s *Selection) Has(selector string) *Selection {
return s.HasSelection(s.document.Find(selector))
}
// HasMatcher reduces the set of matched elements to those that have a descendant
// that matches the matcher.
// It returns a new Selection object with the matching elements.
func (s *Selection) HasMatcher(m Matcher) *Selection {
return s.HasSelection(s.document.FindMatcher(m))
}
// HasNodes reduces the set of matched elements to those that have a
// descendant that matches one of the nodes.
// It returns a new Selection object with the matching elements.
func (s *Selection) HasNodes(nodes ...*html.Node) *Selection {
return s.FilterFunction(func(_ int, sel *Selection) bool {
// Add all nodes that contain one of the specified nodes
for _, n := range nodes {
if sel.Contains(n) {
return true
}
}
return false
})
}
// HasSelection reduces the set of matched elements to those that have a
// descendant that matches one of the nodes of the specified Selection object.
// It returns a new Selection object with the matching elements.
func (s *Selection) HasSelection(sel *Selection) *Selection {
if sel == nil {
return s.HasNodes()
}
return s.HasNodes(sel.Nodes...)
}
// End ends the most recent filtering operation in the current chain and
// returns the set of matched elements to its previous state.
func (s *Selection) End() *Selection {
if s.prevSel != nil {
return s.prevSel
}
return newEmptySelection(s.document)
}
// Filter based on the matcher, and the indicator to keep (Filter) or
// to get rid of (Not) the matching elements.
func winnow(sel *Selection, m Matcher, keep bool) []*html.Node {
// Optimize if keep is requested
if keep {
return m.Filter(sel.Nodes)
}
// Use grep
return grep(sel, func(i int, s *Selection) bool {
return !m.Match(s.Get(0))
})
}
// Filter based on an array of nodes, and the indicator to keep (Filter) or
// to get rid of (Not) the matching elements.
func winnowNodes(sel *Selection, nodes []*html.Node, keep bool) []*html.Node {
if len(nodes)+len(sel.Nodes) < minNodesForSet {
return grep(sel, func(i int, s *Selection) bool {
return isInSlice(nodes, s.Get(0)) == keep
})
}
set := make(map[*html.Node]bool)
for _, n := range nodes {
set[n] = true
}
return grep(sel, func(i int, s *Selection) bool {
return set[s.Get(0)] == keep
})
}
// Filter based on a function test, and the indicator to keep (Filter) or
// to get rid of (Not) the matching elements.
func winnowFunction(sel *Selection, f func(int, *Selection) bool, keep bool) []*html.Node {
return grep(sel, func(i int, s *Selection) bool {
return f(i, s) == keep
})
}

8
vendor/github.com/PuerkitoBio/goquery/go.mod generated vendored Normal file
View File

@ -0,0 +1,8 @@
module github.com/PuerkitoBio/goquery
require (
github.com/andybalholm/cascadia v1.1.0
golang.org/x/net v0.0.0-20200202094626-16171245cfb2
)
go 1.13

8
vendor/github.com/PuerkitoBio/goquery/go.sum generated vendored Normal file
View File

@ -0,0 +1,8 @@
github.com/andybalholm/cascadia v1.1.0 h1:BuuO6sSfQNFRu1LppgbD25Hr2vLYW25JvxHs5zzsLTo=
github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=

39
vendor/github.com/PuerkitoBio/goquery/iteration.go generated vendored Normal file
View File

@ -0,0 +1,39 @@
package goquery
// Each iterates over a Selection object, executing a function for each
// matched element. It returns the current Selection object. The function
// f is called for each element in the selection with the index of the
// element in that selection starting at 0, and a *Selection that contains
// only that element.
func (s *Selection) Each(f func(int, *Selection)) *Selection {
for i, n := range s.Nodes {
f(i, newSingleSelection(n, s.document))
}
return s
}
// EachWithBreak iterates over a Selection object, executing a function for each
// matched element. It is identical to Each except that it is possible to break
// out of the loop by returning false in the callback function. It returns the
// current Selection object.
func (s *Selection) EachWithBreak(f func(int, *Selection) bool) *Selection {
for i, n := range s.Nodes {
if !f(i, newSingleSelection(n, s.document)) {
return s
}
}
return s
}
// Map passes each element in the current matched set through a function,
// producing a slice of string holding the returned values. The function
// f is called for each element in the selection with the index of the
// element in that selection starting at 0, and a *Selection that contains
// only that element.
func (s *Selection) Map(f func(int, *Selection) string) (result []string) {
for i, n := range s.Nodes {
result = append(result, f(i, newSingleSelection(n, s.document)))
}
return result
}

676
vendor/github.com/PuerkitoBio/goquery/manipulation.go generated vendored Normal file
View File

@ -0,0 +1,676 @@
package goquery
import (
"strings"
"golang.org/x/net/html"
)
// After applies the selector from the root document and inserts the matched elements
// after the elements in the set of matched elements.
//
// If one of the matched elements in the selection is not currently in the
// document, it's impossible to insert nodes after it, so it will be ignored.
//
// This follows the same rules as Selection.Append.
func (s *Selection) After(selector string) *Selection {
return s.AfterMatcher(compileMatcher(selector))
}
// AfterMatcher applies the matcher from the root document and inserts the matched elements
// after the elements in the set of matched elements.
//
// If one of the matched elements in the selection is not currently in the
// document, it's impossible to insert nodes after it, so it will be ignored.
//
// This follows the same rules as Selection.Append.
func (s *Selection) AfterMatcher(m Matcher) *Selection {
return s.AfterNodes(m.MatchAll(s.document.rootNode)...)
}
// AfterSelection inserts the elements in the selection after each element in the set of matched
// elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) AfterSelection(sel *Selection) *Selection {
return s.AfterNodes(sel.Nodes...)
}
// AfterHtml parses the html and inserts it after the set of matched elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) AfterHtml(htmlStr string) *Selection {
return s.eachNodeHtml(htmlStr, true, func(node *html.Node, nodes []*html.Node) {
nextSibling := node.NextSibling
for _, n := range nodes {
if node.Parent != nil {
node.Parent.InsertBefore(n, nextSibling)
}
}
})
}
// AfterNodes inserts the nodes after each element in the set of matched elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) AfterNodes(ns ...*html.Node) *Selection {
return s.manipulateNodes(ns, true, func(sn *html.Node, n *html.Node) {
if sn.Parent != nil {
sn.Parent.InsertBefore(n, sn.NextSibling)
}
})
}
// Append appends the elements specified by the selector to the end of each element
// in the set of matched elements, following those rules:
//
// 1) The selector is applied to the root document.
//
// 2) Elements that are part of the document will be moved to the new location.
//
// 3) If there are multiple locations to append to, cloned nodes will be
// appended to all target locations except the last one, which will be moved
// as noted in (2).
func (s *Selection) Append(selector string) *Selection {
return s.AppendMatcher(compileMatcher(selector))
}
// AppendMatcher appends the elements specified by the matcher to the end of each element
// in the set of matched elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) AppendMatcher(m Matcher) *Selection {
return s.AppendNodes(m.MatchAll(s.document.rootNode)...)
}
// AppendSelection appends the elements in the selection to the end of each element
// in the set of matched elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) AppendSelection(sel *Selection) *Selection {
return s.AppendNodes(sel.Nodes...)
}
// AppendHtml parses the html and appends it to the set of matched elements.
func (s *Selection) AppendHtml(htmlStr string) *Selection {
return s.eachNodeHtml(htmlStr, false, func(node *html.Node, nodes []*html.Node) {
for _, n := range nodes {
node.AppendChild(n)
}
})
}
// AppendNodes appends the specified nodes to each node in the set of matched elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) AppendNodes(ns ...*html.Node) *Selection {
return s.manipulateNodes(ns, false, func(sn *html.Node, n *html.Node) {
sn.AppendChild(n)
})
}
// Before inserts the matched elements before each element in the set of matched elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) Before(selector string) *Selection {
return s.BeforeMatcher(compileMatcher(selector))
}
// BeforeMatcher inserts the matched elements before each element in the set of matched elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) BeforeMatcher(m Matcher) *Selection {
return s.BeforeNodes(m.MatchAll(s.document.rootNode)...)
}
// BeforeSelection inserts the elements in the selection before each element in the set of matched
// elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) BeforeSelection(sel *Selection) *Selection {
return s.BeforeNodes(sel.Nodes...)
}
// BeforeHtml parses the html and inserts it before the set of matched elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) BeforeHtml(htmlStr string) *Selection {
return s.eachNodeHtml(htmlStr, true, func(node *html.Node, nodes []*html.Node) {
for _, n := range nodes {
if node.Parent != nil {
node.Parent.InsertBefore(n, node)
}
}
})
}
// BeforeNodes inserts the nodes before each element in the set of matched elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) BeforeNodes(ns ...*html.Node) *Selection {
return s.manipulateNodes(ns, false, func(sn *html.Node, n *html.Node) {
if sn.Parent != nil {
sn.Parent.InsertBefore(n, sn)
}
})
}
// Clone creates a deep copy of the set of matched nodes. The new nodes will not be
// attached to the document.
func (s *Selection) Clone() *Selection {
ns := newEmptySelection(s.document)
ns.Nodes = cloneNodes(s.Nodes)
return ns
}
// Empty removes all children nodes from the set of matched elements.
// It returns the children nodes in a new Selection.
func (s *Selection) Empty() *Selection {
var nodes []*html.Node
for _, n := range s.Nodes {
for c := n.FirstChild; c != nil; c = n.FirstChild {
n.RemoveChild(c)
nodes = append(nodes, c)
}
}
return pushStack(s, nodes)
}
// Prepend prepends the elements specified by the selector to each element in
// the set of matched elements, following the same rules as Append.
func (s *Selection) Prepend(selector string) *Selection {
return s.PrependMatcher(compileMatcher(selector))
}
// PrependMatcher prepends the elements specified by the matcher to each
// element in the set of matched elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) PrependMatcher(m Matcher) *Selection {
return s.PrependNodes(m.MatchAll(s.document.rootNode)...)
}
// PrependSelection prepends the elements in the selection to each element in
// the set of matched elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) PrependSelection(sel *Selection) *Selection {
return s.PrependNodes(sel.Nodes...)
}
// PrependHtml parses the html and prepends it to the set of matched elements.
func (s *Selection) PrependHtml(htmlStr string) *Selection {
return s.eachNodeHtml(htmlStr, false, func(node *html.Node, nodes []*html.Node) {
firstChild := node.FirstChild
for _, n := range nodes {
node.InsertBefore(n, firstChild)
}
})
}
// PrependNodes prepends the specified nodes to each node in the set of
// matched elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) PrependNodes(ns ...*html.Node) *Selection {
return s.manipulateNodes(ns, true, func(sn *html.Node, n *html.Node) {
// sn.FirstChild may be nil, in which case this functions like
// sn.AppendChild()
sn.InsertBefore(n, sn.FirstChild)
})
}
// Remove removes the set of matched elements from the document.
// It returns the same selection, now consisting of nodes not in the document.
func (s *Selection) Remove() *Selection {
for _, n := range s.Nodes {
if n.Parent != nil {
n.Parent.RemoveChild(n)
}
}
return s
}
// RemoveFiltered removes from the current set of matched elements those that
// match the selector filter. It returns the Selection of removed nodes.
//
// For example if the selection s contains "<h1>", "<h2>" and "<h3>"
// and s.RemoveFiltered("h2") is called, only the "<h2>" node is removed
// (and returned), while "<h1>" and "<h3>" are kept in the document.
func (s *Selection) RemoveFiltered(selector string) *Selection {
return s.RemoveMatcher(compileMatcher(selector))
}
// RemoveMatcher removes from the current set of matched elements those that
// match the Matcher filter. It returns the Selection of removed nodes.
// See RemoveFiltered for additional information.
func (s *Selection) RemoveMatcher(m Matcher) *Selection {
return s.FilterMatcher(m).Remove()
}
// ReplaceWith replaces each element in the set of matched elements with the
// nodes matched by the given selector.
// It returns the removed elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) ReplaceWith(selector string) *Selection {
return s.ReplaceWithMatcher(compileMatcher(selector))
}
// ReplaceWithMatcher replaces each element in the set of matched elements with
// the nodes matched by the given Matcher.
// It returns the removed elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) ReplaceWithMatcher(m Matcher) *Selection {
return s.ReplaceWithNodes(m.MatchAll(s.document.rootNode)...)
}
// ReplaceWithSelection replaces each element in the set of matched elements with
// the nodes from the given Selection.
// It returns the removed elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) ReplaceWithSelection(sel *Selection) *Selection {
return s.ReplaceWithNodes(sel.Nodes...)
}
// ReplaceWithHtml replaces each element in the set of matched elements with
// the parsed HTML.
// It returns the removed elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) ReplaceWithHtml(htmlStr string) *Selection {
s.eachNodeHtml(htmlStr, true, func(node *html.Node, nodes []*html.Node) {
nextSibling := node.NextSibling
for _, n := range nodes {
if node.Parent != nil {
node.Parent.InsertBefore(n, nextSibling)
}
}
})
return s.Remove()
}
// ReplaceWithNodes replaces each element in the set of matched elements with
// the given nodes.
// It returns the removed elements.
//
// This follows the same rules as Selection.Append.
func (s *Selection) ReplaceWithNodes(ns ...*html.Node) *Selection {
s.AfterNodes(ns...)
return s.Remove()
}
// SetHtml sets the html content of each element in the selection to
// specified html string.
func (s *Selection) SetHtml(htmlStr string) *Selection {
for _, context := range s.Nodes {
for c := context.FirstChild; c != nil; c = context.FirstChild {
context.RemoveChild(c)
}
}
return s.eachNodeHtml(htmlStr, false, func(node *html.Node, nodes []*html.Node) {
for _, n := range nodes {
node.AppendChild(n)
}
})
}
// SetText sets the content of each element in the selection to specified content.
// The provided text string is escaped.
func (s *Selection) SetText(text string) *Selection {
return s.SetHtml(html.EscapeString(text))
}
// Unwrap removes the parents of the set of matched elements, leaving the matched
// elements (and their siblings, if any) in their place.
// It returns the original selection.
func (s *Selection) Unwrap() *Selection {
s.Parent().Each(func(i int, ss *Selection) {
// For some reason, jquery allows unwrap to remove the <head> element, so
// allowing it here too. Same for <html>. Why it allows those elements to
// be unwrapped while not allowing body is a mystery to me.
if ss.Nodes[0].Data != "body" {
ss.ReplaceWithSelection(ss.Contents())
}
})
return s
}
// Wrap wraps each element in the set of matched elements inside the first
// element matched by the given selector. The matched child is cloned before
// being inserted into the document.
//
// It returns the original set of elements.
func (s *Selection) Wrap(selector string) *Selection {
return s.WrapMatcher(compileMatcher(selector))
}
// WrapMatcher wraps each element in the set of matched elements inside the
// first element matched by the given matcher. The matched child is cloned
// before being inserted into the document.
//
// It returns the original set of elements.
func (s *Selection) WrapMatcher(m Matcher) *Selection {
return s.wrapNodes(m.MatchAll(s.document.rootNode)...)
}
// WrapSelection wraps each element in the set of matched elements inside the
// first element in the given Selection. The element is cloned before being
// inserted into the document.
//
// It returns the original set of elements.
func (s *Selection) WrapSelection(sel *Selection) *Selection {
return s.wrapNodes(sel.Nodes...)
}
// WrapHtml wraps each element in the set of matched elements inside the inner-
// most child of the given HTML.
//
// It returns the original set of elements.
func (s *Selection) WrapHtml(htmlStr string) *Selection {
nodesMap := make(map[string][]*html.Node)
for _, context := range s.Nodes {
var parent *html.Node
if context.Parent != nil {
parent = context.Parent
} else {
parent = &html.Node{Type: html.ElementNode}
}
nodes, found := nodesMap[nodeName(parent)]
if !found {
nodes = parseHtmlWithContext(htmlStr, parent)
nodesMap[nodeName(parent)] = nodes
}
newSingleSelection(context, s.document).wrapAllNodes(cloneNodes(nodes)...)
}
return s
}
// WrapNode wraps each element in the set of matched elements inside the inner-
// most child of the given node. The given node is copied before being inserted
// into the document.
//
// It returns the original set of elements.
func (s *Selection) WrapNode(n *html.Node) *Selection {
return s.wrapNodes(n)
}
func (s *Selection) wrapNodes(ns ...*html.Node) *Selection {
s.Each(func(i int, ss *Selection) {
ss.wrapAllNodes(ns...)
})
return s
}
// WrapAll wraps a single HTML structure, matched by the given selector, around
// all elements in the set of matched elements. The matched child is cloned
// before being inserted into the document.
//
// It returns the original set of elements.
func (s *Selection) WrapAll(selector string) *Selection {
return s.WrapAllMatcher(compileMatcher(selector))
}
// WrapAllMatcher wraps a single HTML structure, matched by the given Matcher,
// around all elements in the set of matched elements. The matched child is
// cloned before being inserted into the document.
//
// It returns the original set of elements.
func (s *Selection) WrapAllMatcher(m Matcher) *Selection {
return s.wrapAllNodes(m.MatchAll(s.document.rootNode)...)
}
// WrapAllSelection wraps a single HTML structure, the first node of the given
// Selection, around all elements in the set of matched elements. The matched
// child is cloned before being inserted into the document.
//
// It returns the original set of elements.
func (s *Selection) WrapAllSelection(sel *Selection) *Selection {
return s.wrapAllNodes(sel.Nodes...)
}
// WrapAllHtml wraps the given HTML structure around all elements in the set of
// matched elements. The matched child is cloned before being inserted into the
// document.
//
// It returns the original set of elements.
func (s *Selection) WrapAllHtml(htmlStr string) *Selection {
var context *html.Node
var nodes []*html.Node
if len(s.Nodes) > 0 {
context = s.Nodes[0]
if context.Parent != nil {
nodes = parseHtmlWithContext(htmlStr, context)
} else {
nodes = parseHtml(htmlStr)
}
}
return s.wrapAllNodes(nodes...)
}
func (s *Selection) wrapAllNodes(ns ...*html.Node) *Selection {
if len(ns) > 0 {
return s.WrapAllNode(ns[0])
}
return s
}
// WrapAllNode wraps the given node around the first element in the Selection,
// making all other nodes in the Selection children of the given node. The node
// is cloned before being inserted into the document.
//
// It returns the original set of elements.
func (s *Selection) WrapAllNode(n *html.Node) *Selection {
if s.Size() == 0 {
return s
}
wrap := cloneNode(n)
first := s.Nodes[0]
if first.Parent != nil {
first.Parent.InsertBefore(wrap, first)
first.Parent.RemoveChild(first)
}
for c := getFirstChildEl(wrap); c != nil; c = getFirstChildEl(wrap) {
wrap = c
}
newSingleSelection(wrap, s.document).AppendSelection(s)
return s
}
// WrapInner wraps an HTML structure, matched by the given selector, around the
// content of element in the set of matched elements. The matched child is
// cloned before being inserted into the document.
//
// It returns the original set of elements.
func (s *Selection) WrapInner(selector string) *Selection {
return s.WrapInnerMatcher(compileMatcher(selector))
}
// WrapInnerMatcher wraps an HTML structure, matched by the given selector,
// around the content of element in the set of matched elements. The matched
// child is cloned before being inserted into the document.
//
// It returns the original set of elements.
func (s *Selection) WrapInnerMatcher(m Matcher) *Selection {
return s.wrapInnerNodes(m.MatchAll(s.document.rootNode)...)
}
// WrapInnerSelection wraps an HTML structure, matched by the given selector,
// around the content of element in the set of matched elements. The matched
// child is cloned before being inserted into the document.
//
// It returns the original set of elements.
func (s *Selection) WrapInnerSelection(sel *Selection) *Selection {
return s.wrapInnerNodes(sel.Nodes...)
}
// WrapInnerHtml wraps an HTML structure, matched by the given selector, around
// the content of element in the set of matched elements. The matched child is
// cloned before being inserted into the document.
//
// It returns the original set of elements.
func (s *Selection) WrapInnerHtml(htmlStr string) *Selection {
nodesMap := make(map[string][]*html.Node)
for _, context := range s.Nodes {
nodes, found := nodesMap[nodeName(context)]
if !found {
nodes = parseHtmlWithContext(htmlStr, context)
nodesMap[nodeName(context)] = nodes
}
newSingleSelection(context, s.document).wrapInnerNodes(cloneNodes(nodes)...)
}
return s
}
// WrapInnerNode wraps an HTML structure, matched by the given selector, around
// the content of element in the set of matched elements. The matched child is
// cloned before being inserted into the document.
//
// It returns the original set of elements.
func (s *Selection) WrapInnerNode(n *html.Node) *Selection {
return s.wrapInnerNodes(n)
}
func (s *Selection) wrapInnerNodes(ns ...*html.Node) *Selection {
if len(ns) == 0 {
return s
}
s.Each(func(i int, s *Selection) {
contents := s.Contents()
if contents.Size() > 0 {
contents.wrapAllNodes(ns...)
} else {
s.AppendNodes(cloneNode(ns[0]))
}
})
return s
}
func parseHtml(h string) []*html.Node {
// Errors are only returned when the io.Reader returns any error besides
// EOF, but strings.Reader never will
nodes, err := html.ParseFragment(strings.NewReader(h), &html.Node{Type: html.ElementNode})
if err != nil {
panic("goquery: failed to parse HTML: " + err.Error())
}
return nodes
}
func parseHtmlWithContext(h string, context *html.Node) []*html.Node {
// Errors are only returned when the io.Reader returns any error besides
// EOF, but strings.Reader never will
nodes, err := html.ParseFragment(strings.NewReader(h), context)
if err != nil {
panic("goquery: failed to parse HTML: " + err.Error())
}
return nodes
}
// Get the first child that is an ElementNode
func getFirstChildEl(n *html.Node) *html.Node {
c := n.FirstChild
for c != nil && c.Type != html.ElementNode {
c = c.NextSibling
}
return c
}
// Deep copy a slice of nodes.
func cloneNodes(ns []*html.Node) []*html.Node {
cns := make([]*html.Node, 0, len(ns))
for _, n := range ns {
cns = append(cns, cloneNode(n))
}
return cns
}
// Deep copy a node. The new node has clones of all the original node's
// children but none of its parents or siblings.
func cloneNode(n *html.Node) *html.Node {
nn := &html.Node{
Type: n.Type,
DataAtom: n.DataAtom,
Data: n.Data,
Attr: make([]html.Attribute, len(n.Attr)),
}
copy(nn.Attr, n.Attr)
for c := n.FirstChild; c != nil; c = c.NextSibling {
nn.AppendChild(cloneNode(c))
}
return nn
}
func (s *Selection) manipulateNodes(ns []*html.Node, reverse bool,
f func(sn *html.Node, n *html.Node)) *Selection {
lasti := s.Size() - 1
// net.Html doesn't provide document fragments for insertion, so to get
// things in the correct order with After() and Prepend(), the callback
// needs to be called on the reverse of the nodes.
if reverse {
for i, j := 0, len(ns)-1; i < j; i, j = i+1, j-1 {
ns[i], ns[j] = ns[j], ns[i]
}
}
for i, sn := range s.Nodes {
for _, n := range ns {
if i != lasti {
f(sn, cloneNode(n))
} else {
if n.Parent != nil {
n.Parent.RemoveChild(n)
}
f(sn, n)
}
}
}
return s
}
// eachNodeHtml parses the given html string and inserts the resulting nodes in the dom with the mergeFn.
// The parsed nodes are inserted for each element of the selection.
// isParent can be used to indicate that the elements of the selection should be treated as the parent for the parsed html.
// A cache is used to avoid parsing the html multiple times should the elements of the selection result in the same context.
func (s *Selection) eachNodeHtml(htmlStr string, isParent bool, mergeFn func(n *html.Node, nodes []*html.Node)) *Selection {
// cache to avoid parsing the html for the same context multiple times
nodeCache := make(map[string][]*html.Node)
var context *html.Node
for _, n := range s.Nodes {
if isParent {
context = n.Parent
} else {
context = n
}
if context != nil {
nodes, found := nodeCache[nodeName(context)]
if !found {
nodes = parseHtmlWithContext(htmlStr, context)
nodeCache[nodeName(context)] = nodes
}
mergeFn(n, cloneNodes(nodes))
}
}
return s
}

275
vendor/github.com/PuerkitoBio/goquery/property.go generated vendored Normal file
View File

@ -0,0 +1,275 @@
package goquery
import (
"bytes"
"regexp"
"strings"
"golang.org/x/net/html"
)
var rxClassTrim = regexp.MustCompile("[\t\r\n]")
// Attr gets the specified attribute's value for the first element in the
// Selection. To get the value for each element individually, use a looping
// construct such as Each or Map method.
func (s *Selection) Attr(attrName string) (val string, exists bool) {
if len(s.Nodes) == 0 {
return
}
return getAttributeValue(attrName, s.Nodes[0])
}
// AttrOr works like Attr but returns default value if attribute is not present.
func (s *Selection) AttrOr(attrName, defaultValue string) string {
if len(s.Nodes) == 0 {
return defaultValue
}
val, exists := getAttributeValue(attrName, s.Nodes[0])
if !exists {
return defaultValue
}
return val
}
// RemoveAttr removes the named attribute from each element in the set of matched elements.
func (s *Selection) RemoveAttr(attrName string) *Selection {
for _, n := range s.Nodes {
removeAttr(n, attrName)
}
return s
}
// SetAttr sets the given attribute on each element in the set of matched elements.
func (s *Selection) SetAttr(attrName, val string) *Selection {
for _, n := range s.Nodes {
attr := getAttributePtr(attrName, n)
if attr == nil {
n.Attr = append(n.Attr, html.Attribute{Key: attrName, Val: val})
} else {
attr.Val = val
}
}
return s
}
// Text gets the combined text contents of each element in the set of matched
// elements, including their descendants.
func (s *Selection) Text() string {
var buf bytes.Buffer
// Slightly optimized vs calling Each: no single selection object created
var f func(*html.Node)
f = func(n *html.Node) {
if n.Type == html.TextNode {
// Keep newlines and spaces, like jQuery
buf.WriteString(n.Data)
}
if n.FirstChild != nil {
for c := n.FirstChild; c != nil; c = c.NextSibling {
f(c)
}
}
}
for _, n := range s.Nodes {
f(n)
}
return buf.String()
}
// Size is an alias for Length.
func (s *Selection) Size() int {
return s.Length()
}
// Length returns the number of elements in the Selection object.
func (s *Selection) Length() int {
return len(s.Nodes)
}
// Html gets the HTML contents of the first element in the set of matched
// elements. It includes text and comment nodes.
func (s *Selection) Html() (ret string, e error) {
// Since there is no .innerHtml, the HTML content must be re-created from
// the nodes using html.Render.
var buf bytes.Buffer
if len(s.Nodes) > 0 {
for c := s.Nodes[0].FirstChild; c != nil; c = c.NextSibling {
e = html.Render(&buf, c)
if e != nil {
return
}
}
ret = buf.String()
}
return
}
// AddClass adds the given class(es) to each element in the set of matched elements.
// Multiple class names can be specified, separated by a space or via multiple arguments.
func (s *Selection) AddClass(class ...string) *Selection {
classStr := strings.TrimSpace(strings.Join(class, " "))
if classStr == "" {
return s
}
tcls := getClassesSlice(classStr)
for _, n := range s.Nodes {
curClasses, attr := getClassesAndAttr(n, true)
for _, newClass := range tcls {
if !strings.Contains(curClasses, " "+newClass+" ") {
curClasses += newClass + " "
}
}
setClasses(n, attr, curClasses)
}
return s
}
// HasClass determines whether any of the matched elements are assigned the
// given class.
func (s *Selection) HasClass(class string) bool {
class = " " + class + " "
for _, n := range s.Nodes {
classes, _ := getClassesAndAttr(n, false)
if strings.Contains(classes, class) {
return true
}
}
return false
}
// RemoveClass removes the given class(es) from each element in the set of matched elements.
// Multiple class names can be specified, separated by a space or via multiple arguments.
// If no class name is provided, all classes are removed.
func (s *Selection) RemoveClass(class ...string) *Selection {
var rclasses []string
classStr := strings.TrimSpace(strings.Join(class, " "))
remove := classStr == ""
if !remove {
rclasses = getClassesSlice(classStr)
}
for _, n := range s.Nodes {
if remove {
removeAttr(n, "class")
} else {
classes, attr := getClassesAndAttr(n, true)
for _, rcl := range rclasses {
classes = strings.Replace(classes, " "+rcl+" ", " ", -1)
}
setClasses(n, attr, classes)
}
}
return s
}
// ToggleClass adds or removes the given class(es) for each element in the set of matched elements.
// Multiple class names can be specified, separated by a space or via multiple arguments.
func (s *Selection) ToggleClass(class ...string) *Selection {
classStr := strings.TrimSpace(strings.Join(class, " "))
if classStr == "" {
return s
}
tcls := getClassesSlice(classStr)
for _, n := range s.Nodes {
classes, attr := getClassesAndAttr(n, true)
for _, tcl := range tcls {
if strings.Contains(classes, " "+tcl+" ") {
classes = strings.Replace(classes, " "+tcl+" ", " ", -1)
} else {
classes += tcl + " "
}
}
setClasses(n, attr, classes)
}
return s
}
func getAttributePtr(attrName string, n *html.Node) *html.Attribute {
if n == nil {
return nil
}
for i, a := range n.Attr {
if a.Key == attrName {
return &n.Attr[i]
}
}
return nil
}
// Private function to get the specified attribute's value from a node.
func getAttributeValue(attrName string, n *html.Node) (val string, exists bool) {
if a := getAttributePtr(attrName, n); a != nil {
val = a.Val
exists = true
}
return
}
// Get and normalize the "class" attribute from the node.
func getClassesAndAttr(n *html.Node, create bool) (classes string, attr *html.Attribute) {
// Applies only to element nodes
if n.Type == html.ElementNode {
attr = getAttributePtr("class", n)
if attr == nil && create {
n.Attr = append(n.Attr, html.Attribute{
Key: "class",
Val: "",
})
attr = &n.Attr[len(n.Attr)-1]
}
}
if attr == nil {
classes = " "
} else {
classes = rxClassTrim.ReplaceAllString(" "+attr.Val+" ", " ")
}
return
}
func getClassesSlice(classes string) []string {
return strings.Split(rxClassTrim.ReplaceAllString(" "+classes+" ", " "), " ")
}
func removeAttr(n *html.Node, attrName string) {
for i, a := range n.Attr {
if a.Key == attrName {
n.Attr[i], n.Attr[len(n.Attr)-1], n.Attr =
n.Attr[len(n.Attr)-1], html.Attribute{}, n.Attr[:len(n.Attr)-1]
return
}
}
}
func setClasses(n *html.Node, attr *html.Attribute, classes string) {
classes = strings.TrimSpace(classes)
if classes == "" {
removeAttr(n, "class")
return
}
attr.Val = classes
}

49
vendor/github.com/PuerkitoBio/goquery/query.go generated vendored Normal file
View File

@ -0,0 +1,49 @@
package goquery
import "golang.org/x/net/html"
// Is checks the current matched set of elements against a selector and
// returns true if at least one of these elements matches.
func (s *Selection) Is(selector string) bool {
return s.IsMatcher(compileMatcher(selector))
}
// IsMatcher checks the current matched set of elements against a matcher and
// returns true if at least one of these elements matches.
func (s *Selection) IsMatcher(m Matcher) bool {
if len(s.Nodes) > 0 {
if len(s.Nodes) == 1 {
return m.Match(s.Nodes[0])
}
return len(m.Filter(s.Nodes)) > 0
}
return false
}
// IsFunction checks the current matched set of elements against a predicate and
// returns true if at least one of these elements matches.
func (s *Selection) IsFunction(f func(int, *Selection) bool) bool {
return s.FilterFunction(f).Length() > 0
}
// IsSelection checks the current matched set of elements against a Selection object
// and returns true if at least one of these elements matches.
func (s *Selection) IsSelection(sel *Selection) bool {
return s.FilterSelection(sel).Length() > 0
}
// IsNodes checks the current matched set of elements against the specified nodes
// and returns true if at least one of these elements matches.
func (s *Selection) IsNodes(nodes ...*html.Node) bool {
return s.FilterNodes(nodes...).Length() > 0
}
// Contains returns true if the specified Node is within,
// at any depth, one of the nodes in the Selection object.
// It is NOT inclusive, to behave like jQuery's implementation, and
// unlike Javascript's .contains, so if the contained
// node is itself in the selection, it returns false.
func (s *Selection) Contains(n *html.Node) bool {
return sliceContains(s.Nodes, n)
}

698
vendor/github.com/PuerkitoBio/goquery/traversal.go generated vendored Normal file
View File

@ -0,0 +1,698 @@
package goquery
import "golang.org/x/net/html"
type siblingType int
// Sibling type, used internally when iterating over children at the same
// level (siblings) to specify which nodes are requested.
const (
siblingPrevUntil siblingType = iota - 3
siblingPrevAll
siblingPrev
siblingAll
siblingNext
siblingNextAll
siblingNextUntil
siblingAllIncludingNonElements
)
// Find gets the descendants of each element in the current set of matched
// elements, filtered by a selector. It returns a new Selection object
// containing these matched elements.
func (s *Selection) Find(selector string) *Selection {
return pushStack(s, findWithMatcher(s.Nodes, compileMatcher(selector)))
}
// FindMatcher gets the descendants of each element in the current set of matched
// elements, filtered by the matcher. It returns a new Selection object
// containing these matched elements.
func (s *Selection) FindMatcher(m Matcher) *Selection {
return pushStack(s, findWithMatcher(s.Nodes, m))
}
// FindSelection gets the descendants of each element in the current
// Selection, filtered by a Selection. It returns a new Selection object
// containing these matched elements.
func (s *Selection) FindSelection(sel *Selection) *Selection {
if sel == nil {
return pushStack(s, nil)
}
return s.FindNodes(sel.Nodes...)
}
// FindNodes gets the descendants of each element in the current
// Selection, filtered by some nodes. It returns a new Selection object
// containing these matched elements.
func (s *Selection) FindNodes(nodes ...*html.Node) *Selection {
return pushStack(s, mapNodes(nodes, func(i int, n *html.Node) []*html.Node {
if sliceContains(s.Nodes, n) {
return []*html.Node{n}
}
return nil
}))
}
// Contents gets the children of each element in the Selection,
// including text and comment nodes. It returns a new Selection object
// containing these elements.
func (s *Selection) Contents() *Selection {
return pushStack(s, getChildrenNodes(s.Nodes, siblingAllIncludingNonElements))
}
// ContentsFiltered gets the children of each element in the Selection,
// filtered by the specified selector. It returns a new Selection
// object containing these elements. Since selectors only act on Element nodes,
// this function is an alias to ChildrenFiltered unless the selector is empty,
// in which case it is an alias to Contents.
func (s *Selection) ContentsFiltered(selector string) *Selection {
if selector != "" {
return s.ChildrenFiltered(selector)
}
return s.Contents()
}
// ContentsMatcher gets the children of each element in the Selection,
// filtered by the specified matcher. It returns a new Selection
// object containing these elements. Since matchers only act on Element nodes,
// this function is an alias to ChildrenMatcher.
func (s *Selection) ContentsMatcher(m Matcher) *Selection {
return s.ChildrenMatcher(m)
}
// Children gets the child elements of each element in the Selection.
// It returns a new Selection object containing these elements.
func (s *Selection) Children() *Selection {
return pushStack(s, getChildrenNodes(s.Nodes, siblingAll))
}
// ChildrenFiltered gets the child elements of each element in the Selection,
// filtered by the specified selector. It returns a new
// Selection object containing these elements.
func (s *Selection) ChildrenFiltered(selector string) *Selection {
return filterAndPush(s, getChildrenNodes(s.Nodes, siblingAll), compileMatcher(selector))
}
// ChildrenMatcher gets the child elements of each element in the Selection,
// filtered by the specified matcher. It returns a new
// Selection object containing these elements.
func (s *Selection) ChildrenMatcher(m Matcher) *Selection {
return filterAndPush(s, getChildrenNodes(s.Nodes, siblingAll), m)
}
// Parent gets the parent of each element in the Selection. It returns a
// new Selection object containing the matched elements.
func (s *Selection) Parent() *Selection {
return pushStack(s, getParentNodes(s.Nodes))
}
// ParentFiltered gets the parent of each element in the Selection filtered by a
// selector. It returns a new Selection object containing the matched elements.
func (s *Selection) ParentFiltered(selector string) *Selection {
return filterAndPush(s, getParentNodes(s.Nodes), compileMatcher(selector))
}
// ParentMatcher gets the parent of each element in the Selection filtered by a
// matcher. It returns a new Selection object containing the matched elements.
func (s *Selection) ParentMatcher(m Matcher) *Selection {
return filterAndPush(s, getParentNodes(s.Nodes), m)
}
// Closest gets the first element that matches the selector by testing the
// element itself and traversing up through its ancestors in the DOM tree.
func (s *Selection) Closest(selector string) *Selection {
cs := compileMatcher(selector)
return s.ClosestMatcher(cs)
}
// ClosestMatcher gets the first element that matches the matcher by testing the
// element itself and traversing up through its ancestors in the DOM tree.
func (s *Selection) ClosestMatcher(m Matcher) *Selection {
return pushStack(s, mapNodes(s.Nodes, func(i int, n *html.Node) []*html.Node {
// For each node in the selection, test the node itself, then each parent
// until a match is found.
for ; n != nil; n = n.Parent {
if m.Match(n) {
return []*html.Node{n}
}
}
return nil
}))
}
// ClosestNodes gets the first element that matches one of the nodes by testing the
// element itself and traversing up through its ancestors in the DOM tree.
func (s *Selection) ClosestNodes(nodes ...*html.Node) *Selection {
set := make(map[*html.Node]bool)
for _, n := range nodes {
set[n] = true
}
return pushStack(s, mapNodes(s.Nodes, func(i int, n *html.Node) []*html.Node {
// For each node in the selection, test the node itself, then each parent
// until a match is found.
for ; n != nil; n = n.Parent {
if set[n] {
return []*html.Node{n}
}
}
return nil
}))
}
// ClosestSelection gets the first element that matches one of the nodes in the
// Selection by testing the element itself and traversing up through its ancestors
// in the DOM tree.
func (s *Selection) ClosestSelection(sel *Selection) *Selection {
if sel == nil {
return pushStack(s, nil)
}
return s.ClosestNodes(sel.Nodes...)
}
// Parents gets the ancestors of each element in the current Selection. It
// returns a new Selection object with the matched elements.
func (s *Selection) Parents() *Selection {
return pushStack(s, getParentsNodes(s.Nodes, nil, nil))
}
// ParentsFiltered gets the ancestors of each element in the current
// Selection. It returns a new Selection object with the matched elements.
func (s *Selection) ParentsFiltered(selector string) *Selection {
return filterAndPush(s, getParentsNodes(s.Nodes, nil, nil), compileMatcher(selector))
}
// ParentsMatcher gets the ancestors of each element in the current
// Selection. It returns a new Selection object with the matched elements.
func (s *Selection) ParentsMatcher(m Matcher) *Selection {
return filterAndPush(s, getParentsNodes(s.Nodes, nil, nil), m)
}
// ParentsUntil gets the ancestors of each element in the Selection, up to but
// not including the element matched by the selector. It returns a new Selection
// object containing the matched elements.
func (s *Selection) ParentsUntil(selector string) *Selection {
return pushStack(s, getParentsNodes(s.Nodes, compileMatcher(selector), nil))
}
// ParentsUntilMatcher gets the ancestors of each element in the Selection, up to but
// not including the element matched by the matcher. It returns a new Selection
// object containing the matched elements.
func (s *Selection) ParentsUntilMatcher(m Matcher) *Selection {
return pushStack(s, getParentsNodes(s.Nodes, m, nil))
}
// ParentsUntilSelection gets the ancestors of each element in the Selection,
// up to but not including the elements in the specified Selection. It returns a
// new Selection object containing the matched elements.
func (s *Selection) ParentsUntilSelection(sel *Selection) *Selection {
if sel == nil {
return s.Parents()
}
return s.ParentsUntilNodes(sel.Nodes...)
}
// ParentsUntilNodes gets the ancestors of each element in the Selection,
// up to but not including the specified nodes. It returns a
// new Selection object containing the matched elements.
func (s *Selection) ParentsUntilNodes(nodes ...*html.Node) *Selection {
return pushStack(s, getParentsNodes(s.Nodes, nil, nodes))
}
// ParentsFilteredUntil is like ParentsUntil, with the option to filter the
// results based on a selector string. It returns a new Selection
// object containing the matched elements.
func (s *Selection) ParentsFilteredUntil(filterSelector, untilSelector string) *Selection {
return filterAndPush(s, getParentsNodes(s.Nodes, compileMatcher(untilSelector), nil), compileMatcher(filterSelector))
}
// ParentsFilteredUntilMatcher is like ParentsUntilMatcher, with the option to filter the
// results based on a matcher. It returns a new Selection object containing the matched elements.
func (s *Selection) ParentsFilteredUntilMatcher(filter, until Matcher) *Selection {
return filterAndPush(s, getParentsNodes(s.Nodes, until, nil), filter)
}
// ParentsFilteredUntilSelection is like ParentsUntilSelection, with the
// option to filter the results based on a selector string. It returns a new
// Selection object containing the matched elements.
func (s *Selection) ParentsFilteredUntilSelection(filterSelector string, sel *Selection) *Selection {
return s.ParentsMatcherUntilSelection(compileMatcher(filterSelector), sel)
}
// ParentsMatcherUntilSelection is like ParentsUntilSelection, with the
// option to filter the results based on a matcher. It returns a new
// Selection object containing the matched elements.
func (s *Selection) ParentsMatcherUntilSelection(filter Matcher, sel *Selection) *Selection {
if sel == nil {
return s.ParentsMatcher(filter)
}
return s.ParentsMatcherUntilNodes(filter, sel.Nodes...)
}
// ParentsFilteredUntilNodes is like ParentsUntilNodes, with the
// option to filter the results based on a selector string. It returns a new
// Selection object containing the matched elements.
func (s *Selection) ParentsFilteredUntilNodes(filterSelector string, nodes ...*html.Node) *Selection {
return filterAndPush(s, getParentsNodes(s.Nodes, nil, nodes), compileMatcher(filterSelector))
}
// ParentsMatcherUntilNodes is like ParentsUntilNodes, with the
// option to filter the results based on a matcher. It returns a new
// Selection object containing the matched elements.
func (s *Selection) ParentsMatcherUntilNodes(filter Matcher, nodes ...*html.Node) *Selection {
return filterAndPush(s, getParentsNodes(s.Nodes, nil, nodes), filter)
}
// Siblings gets the siblings of each element in the Selection. It returns
// a new Selection object containing the matched elements.
func (s *Selection) Siblings() *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingAll, nil, nil))
}
// SiblingsFiltered gets the siblings of each element in the Selection
// filtered by a selector. It returns a new Selection object containing the
// matched elements.
func (s *Selection) SiblingsFiltered(selector string) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingAll, nil, nil), compileMatcher(selector))
}
// SiblingsMatcher gets the siblings of each element in the Selection
// filtered by a matcher. It returns a new Selection object containing the
// matched elements.
func (s *Selection) SiblingsMatcher(m Matcher) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingAll, nil, nil), m)
}
// Next gets the immediately following sibling of each element in the
// Selection. It returns a new Selection object containing the matched elements.
func (s *Selection) Next() *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingNext, nil, nil))
}
// NextFiltered gets the immediately following sibling of each element in the
// Selection filtered by a selector. It returns a new Selection object
// containing the matched elements.
func (s *Selection) NextFiltered(selector string) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNext, nil, nil), compileMatcher(selector))
}
// NextMatcher gets the immediately following sibling of each element in the
// Selection filtered by a matcher. It returns a new Selection object
// containing the matched elements.
func (s *Selection) NextMatcher(m Matcher) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNext, nil, nil), m)
}
// NextAll gets all the following siblings of each element in the
// Selection. It returns a new Selection object containing the matched elements.
func (s *Selection) NextAll() *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingNextAll, nil, nil))
}
// NextAllFiltered gets all the following siblings of each element in the
// Selection filtered by a selector. It returns a new Selection object
// containing the matched elements.
func (s *Selection) NextAllFiltered(selector string) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextAll, nil, nil), compileMatcher(selector))
}
// NextAllMatcher gets all the following siblings of each element in the
// Selection filtered by a matcher. It returns a new Selection object
// containing the matched elements.
func (s *Selection) NextAllMatcher(m Matcher) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextAll, nil, nil), m)
}
// Prev gets the immediately preceding sibling of each element in the
// Selection. It returns a new Selection object containing the matched elements.
func (s *Selection) Prev() *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingPrev, nil, nil))
}
// PrevFiltered gets the immediately preceding sibling of each element in the
// Selection filtered by a selector. It returns a new Selection object
// containing the matched elements.
func (s *Selection) PrevFiltered(selector string) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrev, nil, nil), compileMatcher(selector))
}
// PrevMatcher gets the immediately preceding sibling of each element in the
// Selection filtered by a matcher. It returns a new Selection object
// containing the matched elements.
func (s *Selection) PrevMatcher(m Matcher) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrev, nil, nil), m)
}
// PrevAll gets all the preceding siblings of each element in the
// Selection. It returns a new Selection object containing the matched elements.
func (s *Selection) PrevAll() *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingPrevAll, nil, nil))
}
// PrevAllFiltered gets all the preceding siblings of each element in the
// Selection filtered by a selector. It returns a new Selection object
// containing the matched elements.
func (s *Selection) PrevAllFiltered(selector string) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevAll, nil, nil), compileMatcher(selector))
}
// PrevAllMatcher gets all the preceding siblings of each element in the
// Selection filtered by a matcher. It returns a new Selection object
// containing the matched elements.
func (s *Selection) PrevAllMatcher(m Matcher) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevAll, nil, nil), m)
}
// NextUntil gets all following siblings of each element up to but not
// including the element matched by the selector. It returns a new Selection
// object containing the matched elements.
func (s *Selection) NextUntil(selector string) *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingNextUntil,
compileMatcher(selector), nil))
}
// NextUntilMatcher gets all following siblings of each element up to but not
// including the element matched by the matcher. It returns a new Selection
// object containing the matched elements.
func (s *Selection) NextUntilMatcher(m Matcher) *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingNextUntil,
m, nil))
}
// NextUntilSelection gets all following siblings of each element up to but not
// including the element matched by the Selection. It returns a new Selection
// object containing the matched elements.
func (s *Selection) NextUntilSelection(sel *Selection) *Selection {
if sel == nil {
return s.NextAll()
}
return s.NextUntilNodes(sel.Nodes...)
}
// NextUntilNodes gets all following siblings of each element up to but not
// including the element matched by the nodes. It returns a new Selection
// object containing the matched elements.
func (s *Selection) NextUntilNodes(nodes ...*html.Node) *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingNextUntil,
nil, nodes))
}
// PrevUntil gets all preceding siblings of each element up to but not
// including the element matched by the selector. It returns a new Selection
// object containing the matched elements.
func (s *Selection) PrevUntil(selector string) *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
compileMatcher(selector), nil))
}
// PrevUntilMatcher gets all preceding siblings of each element up to but not
// including the element matched by the matcher. It returns a new Selection
// object containing the matched elements.
func (s *Selection) PrevUntilMatcher(m Matcher) *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
m, nil))
}
// PrevUntilSelection gets all preceding siblings of each element up to but not
// including the element matched by the Selection. It returns a new Selection
// object containing the matched elements.
func (s *Selection) PrevUntilSelection(sel *Selection) *Selection {
if sel == nil {
return s.PrevAll()
}
return s.PrevUntilNodes(sel.Nodes...)
}
// PrevUntilNodes gets all preceding siblings of each element up to but not
// including the element matched by the nodes. It returns a new Selection
// object containing the matched elements.
func (s *Selection) PrevUntilNodes(nodes ...*html.Node) *Selection {
return pushStack(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
nil, nodes))
}
// NextFilteredUntil is like NextUntil, with the option to filter
// the results based on a selector string.
// It returns a new Selection object containing the matched elements.
func (s *Selection) NextFilteredUntil(filterSelector, untilSelector string) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextUntil,
compileMatcher(untilSelector), nil), compileMatcher(filterSelector))
}
// NextFilteredUntilMatcher is like NextUntilMatcher, with the option to filter
// the results based on a matcher.
// It returns a new Selection object containing the matched elements.
func (s *Selection) NextFilteredUntilMatcher(filter, until Matcher) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextUntil,
until, nil), filter)
}
// NextFilteredUntilSelection is like NextUntilSelection, with the
// option to filter the results based on a selector string. It returns a new
// Selection object containing the matched elements.
func (s *Selection) NextFilteredUntilSelection(filterSelector string, sel *Selection) *Selection {
return s.NextMatcherUntilSelection(compileMatcher(filterSelector), sel)
}
// NextMatcherUntilSelection is like NextUntilSelection, with the
// option to filter the results based on a matcher. It returns a new
// Selection object containing the matched elements.
func (s *Selection) NextMatcherUntilSelection(filter Matcher, sel *Selection) *Selection {
if sel == nil {
return s.NextMatcher(filter)
}
return s.NextMatcherUntilNodes(filter, sel.Nodes...)
}
// NextFilteredUntilNodes is like NextUntilNodes, with the
// option to filter the results based on a selector string. It returns a new
// Selection object containing the matched elements.
func (s *Selection) NextFilteredUntilNodes(filterSelector string, nodes ...*html.Node) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextUntil,
nil, nodes), compileMatcher(filterSelector))
}
// NextMatcherUntilNodes is like NextUntilNodes, with the
// option to filter the results based on a matcher. It returns a new
// Selection object containing the matched elements.
func (s *Selection) NextMatcherUntilNodes(filter Matcher, nodes ...*html.Node) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingNextUntil,
nil, nodes), filter)
}
// PrevFilteredUntil is like PrevUntil, with the option to filter
// the results based on a selector string.
// It returns a new Selection object containing the matched elements.
func (s *Selection) PrevFilteredUntil(filterSelector, untilSelector string) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
compileMatcher(untilSelector), nil), compileMatcher(filterSelector))
}
// PrevFilteredUntilMatcher is like PrevUntilMatcher, with the option to filter
// the results based on a matcher.
// It returns a new Selection object containing the matched elements.
func (s *Selection) PrevFilteredUntilMatcher(filter, until Matcher) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
until, nil), filter)
}
// PrevFilteredUntilSelection is like PrevUntilSelection, with the
// option to filter the results based on a selector string. It returns a new
// Selection object containing the matched elements.
func (s *Selection) PrevFilteredUntilSelection(filterSelector string, sel *Selection) *Selection {
return s.PrevMatcherUntilSelection(compileMatcher(filterSelector), sel)
}
// PrevMatcherUntilSelection is like PrevUntilSelection, with the
// option to filter the results based on a matcher. It returns a new
// Selection object containing the matched elements.
func (s *Selection) PrevMatcherUntilSelection(filter Matcher, sel *Selection) *Selection {
if sel == nil {
return s.PrevMatcher(filter)
}
return s.PrevMatcherUntilNodes(filter, sel.Nodes...)
}
// PrevFilteredUntilNodes is like PrevUntilNodes, with the
// option to filter the results based on a selector string. It returns a new
// Selection object containing the matched elements.
func (s *Selection) PrevFilteredUntilNodes(filterSelector string, nodes ...*html.Node) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
nil, nodes), compileMatcher(filterSelector))
}
// PrevMatcherUntilNodes is like PrevUntilNodes, with the
// option to filter the results based on a matcher. It returns a new
// Selection object containing the matched elements.
func (s *Selection) PrevMatcherUntilNodes(filter Matcher, nodes ...*html.Node) *Selection {
return filterAndPush(s, getSiblingNodes(s.Nodes, siblingPrevUntil,
nil, nodes), filter)
}
// Filter and push filters the nodes based on a matcher, and pushes the results
// on the stack, with the srcSel as previous selection.
func filterAndPush(srcSel *Selection, nodes []*html.Node, m Matcher) *Selection {
// Create a temporary Selection with the specified nodes to filter using winnow
sel := &Selection{nodes, srcSel.document, nil}
// Filter based on matcher and push on stack
return pushStack(srcSel, winnow(sel, m, true))
}
// Internal implementation of Find that return raw nodes.
func findWithMatcher(nodes []*html.Node, m Matcher) []*html.Node {
// Map nodes to find the matches within the children of each node
return mapNodes(nodes, func(i int, n *html.Node) (result []*html.Node) {
// Go down one level, becausejQuery's Find selects only within descendants
for c := n.FirstChild; c != nil; c = c.NextSibling {
if c.Type == html.ElementNode {
result = append(result, m.MatchAll(c)...)
}
}
return
})
}
// Internal implementation to get all parent nodes, stopping at the specified
// node (or nil if no stop).
func getParentsNodes(nodes []*html.Node, stopm Matcher, stopNodes []*html.Node) []*html.Node {
return mapNodes(nodes, func(i int, n *html.Node) (result []*html.Node) {
for p := n.Parent; p != nil; p = p.Parent {
sel := newSingleSelection(p, nil)
if stopm != nil {
if sel.IsMatcher(stopm) {
break
}
} else if len(stopNodes) > 0 {
if sel.IsNodes(stopNodes...) {
break
}
}
if p.Type == html.ElementNode {
result = append(result, p)
}
}
return
})
}
// Internal implementation of sibling nodes that return a raw slice of matches.
func getSiblingNodes(nodes []*html.Node, st siblingType, untilm Matcher, untilNodes []*html.Node) []*html.Node {
var f func(*html.Node) bool
// If the requested siblings are ...Until, create the test function to
// determine if the until condition is reached (returns true if it is)
if st == siblingNextUntil || st == siblingPrevUntil {
f = func(n *html.Node) bool {
if untilm != nil {
// Matcher-based condition
sel := newSingleSelection(n, nil)
return sel.IsMatcher(untilm)
} else if len(untilNodes) > 0 {
// Nodes-based condition
sel := newSingleSelection(n, nil)
return sel.IsNodes(untilNodes...)
}
return false
}
}
return mapNodes(nodes, func(i int, n *html.Node) []*html.Node {
return getChildrenWithSiblingType(n.Parent, st, n, f)
})
}
// Gets the children nodes of each node in the specified slice of nodes,
// based on the sibling type request.
func getChildrenNodes(nodes []*html.Node, st siblingType) []*html.Node {
return mapNodes(nodes, func(i int, n *html.Node) []*html.Node {
return getChildrenWithSiblingType(n, st, nil, nil)
})
}
// Gets the children of the specified parent, based on the requested sibling
// type, skipping a specified node if required.
func getChildrenWithSiblingType(parent *html.Node, st siblingType, skipNode *html.Node,
untilFunc func(*html.Node) bool) (result []*html.Node) {
// Create the iterator function
var iter = func(cur *html.Node) (ret *html.Node) {
// Based on the sibling type requested, iterate the right way
for {
switch st {
case siblingAll, siblingAllIncludingNonElements:
if cur == nil {
// First iteration, start with first child of parent
// Skip node if required
if ret = parent.FirstChild; ret == skipNode && skipNode != nil {
ret = skipNode.NextSibling
}
} else {
// Skip node if required
if ret = cur.NextSibling; ret == skipNode && skipNode != nil {
ret = skipNode.NextSibling
}
}
case siblingPrev, siblingPrevAll, siblingPrevUntil:
if cur == nil {
// Start with previous sibling of the skip node
ret = skipNode.PrevSibling
} else {
ret = cur.PrevSibling
}
case siblingNext, siblingNextAll, siblingNextUntil:
if cur == nil {
// Start with next sibling of the skip node
ret = skipNode.NextSibling
} else {
ret = cur.NextSibling
}
default:
panic("Invalid sibling type.")
}
if ret == nil || ret.Type == html.ElementNode || st == siblingAllIncludingNonElements {
return
}
// Not a valid node, try again from this one
cur = ret
}
}
for c := iter(nil); c != nil; c = iter(c) {
// If this is an ...Until case, test before append (returns true
// if the until condition is reached)
if st == siblingNextUntil || st == siblingPrevUntil {
if untilFunc(c) {
return
}
}
result = append(result, c)
if st == siblingNext || st == siblingPrev {
// Only one node was requested (immediate next or previous), so exit
return
}
}
return
}
// Internal implementation of parent nodes that return a raw slice of Nodes.
func getParentNodes(nodes []*html.Node) []*html.Node {
return mapNodes(nodes, func(i int, n *html.Node) []*html.Node {
if n.Parent != nil && n.Parent.Type == html.ElementNode {
return []*html.Node{n.Parent}
}
return nil
})
}
// Internal map function used by many traversing methods. Takes the source nodes
// to iterate on and the mapping function that returns an array of nodes.
// Returns an array of nodes mapped by calling the callback function once for
// each node in the source nodes.
func mapNodes(nodes []*html.Node, f func(int, *html.Node) []*html.Node) (result []*html.Node) {
set := make(map[*html.Node]bool)
for i, n := range nodes {
if vals := f(i, n); len(vals) > 0 {
result = appendWithoutDuplicates(result, vals, set)
}
}
return result
}

141
vendor/github.com/PuerkitoBio/goquery/type.go generated vendored Normal file
View File

@ -0,0 +1,141 @@
package goquery
import (
"errors"
"io"
"net/http"
"net/url"
"github.com/andybalholm/cascadia"
"golang.org/x/net/html"
)
// Document represents an HTML document to be manipulated. Unlike jQuery, which
// is loaded as part of a DOM document, and thus acts upon its containing
// document, GoQuery doesn't know which HTML document to act upon. So it needs
// to be told, and that's what the Document class is for. It holds the root
// document node to manipulate, and can make selections on this document.
type Document struct {
*Selection
Url *url.URL
rootNode *html.Node
}
// NewDocumentFromNode is a Document constructor that takes a root html Node
// as argument.
func NewDocumentFromNode(root *html.Node) *Document {
return newDocument(root, nil)
}
// NewDocument is a Document constructor that takes a string URL as argument.
// It loads the specified document, parses it, and stores the root Document
// node, ready to be manipulated.
//
// Deprecated: Use the net/http standard library package to make the request
// and validate the response before calling goquery.NewDocumentFromReader
// with the response's body.
func NewDocument(url string) (*Document, error) {
// Load the URL
res, e := http.Get(url)
if e != nil {
return nil, e
}
return NewDocumentFromResponse(res)
}
// NewDocumentFromReader returns a Document from an io.Reader.
// It returns an error as second value if the reader's data cannot be parsed
// as html. It does not check if the reader is also an io.Closer, the
// provided reader is never closed by this call. It is the responsibility
// of the caller to close it if required.
func NewDocumentFromReader(r io.Reader) (*Document, error) {
root, e := html.Parse(r)
if e != nil {
return nil, e
}
return newDocument(root, nil), nil
}
// NewDocumentFromResponse is another Document constructor that takes an http response as argument.
// It loads the specified response's document, parses it, and stores the root Document
// node, ready to be manipulated. The response's body is closed on return.
//
// Deprecated: Use goquery.NewDocumentFromReader with the response's body.
func NewDocumentFromResponse(res *http.Response) (*Document, error) {
if res == nil {
return nil, errors.New("Response is nil")
}
defer res.Body.Close()
if res.Request == nil {
return nil, errors.New("Response.Request is nil")
}
// Parse the HTML into nodes
root, e := html.Parse(res.Body)
if e != nil {
return nil, e
}
// Create and fill the document
return newDocument(root, res.Request.URL), nil
}
// CloneDocument creates a deep-clone of a document.
func CloneDocument(doc *Document) *Document {
return newDocument(cloneNode(doc.rootNode), doc.Url)
}
// Private constructor, make sure all fields are correctly filled.
func newDocument(root *html.Node, url *url.URL) *Document {
// Create and fill the document
d := &Document{nil, url, root}
d.Selection = newSingleSelection(root, d)
return d
}
// Selection represents a collection of nodes matching some criteria. The
// initial Selection can be created by using Document.Find, and then
// manipulated using the jQuery-like chainable syntax and methods.
type Selection struct {
Nodes []*html.Node
document *Document
prevSel *Selection
}
// Helper constructor to create an empty selection
func newEmptySelection(doc *Document) *Selection {
return &Selection{nil, doc, nil}
}
// Helper constructor to create a selection of only one node
func newSingleSelection(node *html.Node, doc *Document) *Selection {
return &Selection{[]*html.Node{node}, doc, nil}
}
// Matcher is an interface that defines the methods to match
// HTML nodes against a compiled selector string. Cascadia's
// Selector implements this interface.
type Matcher interface {
Match(*html.Node) bool
MatchAll(*html.Node) []*html.Node
Filter([]*html.Node) []*html.Node
}
// compileMatcher compiles the selector string s and returns
// the corresponding Matcher. If s is an invalid selector string,
// it returns a Matcher that fails all matches.
func compileMatcher(s string) Matcher {
cs, err := cascadia.Compile(s)
if err != nil {
return invalidMatcher{}
}
return cs
}
// invalidMatcher is a Matcher that always fails to match.
type invalidMatcher struct{}
func (invalidMatcher) Match(n *html.Node) bool { return false }
func (invalidMatcher) MatchAll(n *html.Node) []*html.Node { return nil }
func (invalidMatcher) Filter(ns []*html.Node) []*html.Node { return nil }

171
vendor/github.com/PuerkitoBio/goquery/utilities.go generated vendored Normal file
View File

@ -0,0 +1,171 @@
package goquery
import (
"bytes"
"golang.org/x/net/html"
)
// used to determine if a set (map[*html.Node]bool) should be used
// instead of iterating over a slice. The set uses more memory and
// is slower than slice iteration for small N.
const minNodesForSet = 1000
var nodeNames = []string{
html.ErrorNode: "#error",
html.TextNode: "#text",
html.DocumentNode: "#document",
html.CommentNode: "#comment",
}
// NodeName returns the node name of the first element in the selection.
// It tries to behave in a similar way as the DOM's nodeName property
// (https://developer.mozilla.org/en-US/docs/Web/API/Node/nodeName).
//
// Go's net/html package defines the following node types, listed with
// the corresponding returned value from this function:
//
// ErrorNode : #error
// TextNode : #text
// DocumentNode : #document
// ElementNode : the element's tag name
// CommentNode : #comment
// DoctypeNode : the name of the document type
//
func NodeName(s *Selection) string {
if s.Length() == 0 {
return ""
}
return nodeName(s.Get(0))
}
// nodeName returns the node name of the given html node.
// See NodeName for additional details on behaviour.
func nodeName(node *html.Node) string {
if node == nil {
return ""
}
switch node.Type {
case html.ElementNode, html.DoctypeNode:
return node.Data
default:
if node.Type >= 0 && int(node.Type) < len(nodeNames) {
return nodeNames[node.Type]
}
return ""
}
}
// OuterHtml returns the outer HTML rendering of the first item in
// the selection - that is, the HTML including the first element's
// tag and attributes.
//
// Unlike InnerHtml, this is a function and not a method on the Selection,
// because this is not a jQuery method (in javascript-land, this is
// a property provided by the DOM).
func OuterHtml(s *Selection) (string, error) {
var buf bytes.Buffer
if s.Length() == 0 {
return "", nil
}
n := s.Get(0)
if err := html.Render(&buf, n); err != nil {
return "", err
}
return buf.String(), nil
}
// Loop through all container nodes to search for the target node.
func sliceContains(container []*html.Node, contained *html.Node) bool {
for _, n := range container {
if nodeContains(n, contained) {
return true
}
}
return false
}
// Checks if the contained node is within the container node.
func nodeContains(container *html.Node, contained *html.Node) bool {
// Check if the parent of the contained node is the container node, traversing
// upward until the top is reached, or the container is found.
for contained = contained.Parent; contained != nil; contained = contained.Parent {
if container == contained {
return true
}
}
return false
}
// Checks if the target node is in the slice of nodes.
func isInSlice(slice []*html.Node, node *html.Node) bool {
return indexInSlice(slice, node) > -1
}
// Returns the index of the target node in the slice, or -1.
func indexInSlice(slice []*html.Node, node *html.Node) int {
if node != nil {
for i, n := range slice {
if n == node {
return i
}
}
}
return -1
}
// Appends the new nodes to the target slice, making sure no duplicate is added.
// There is no check to the original state of the target slice, so it may still
// contain duplicates. The target slice is returned because append() may create
// a new underlying array. If targetSet is nil, a local set is created with the
// target if len(target) + len(nodes) is greater than minNodesForSet.
func appendWithoutDuplicates(target []*html.Node, nodes []*html.Node, targetSet map[*html.Node]bool) []*html.Node {
// if there are not that many nodes, don't use the map, faster to just use nested loops
// (unless a non-nil targetSet is passed, in which case the caller knows better).
if targetSet == nil && len(target)+len(nodes) < minNodesForSet {
for _, n := range nodes {
if !isInSlice(target, n) {
target = append(target, n)
}
}
return target
}
// if a targetSet is passed, then assume it is reliable, otherwise create one
// and initialize it with the current target contents.
if targetSet == nil {
targetSet = make(map[*html.Node]bool, len(target))
for _, n := range target {
targetSet[n] = true
}
}
for _, n := range nodes {
if !targetSet[n] {
target = append(target, n)
targetSet[n] = true
}
}
return target
}
// Loop through a selection, returning only those nodes that pass the predicate
// function.
func grep(sel *Selection, predicate func(i int, s *Selection) bool) (result []*html.Node) {
for i, n := range sel.Nodes {
if predicate(i, newSingleSelection(n, sel.document)) {
result = append(result, n)
}
}
return result
}
// Creates a new Selection object based on the specified nodes, and keeps the
// source Selection object on the stack (linked list).
func pushStack(fromSel *Selection, nodes []*html.Node) *Selection {
result := &Selection{nodes, fromSel.document, fromSel}
return result
}

14
vendor/github.com/andybalholm/cascadia/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,14 @@
language: go
go:
- 1.3
- 1.4
install:
- go get github.com/andybalholm/cascadia
script:
- go test -v
notifications:
email: false

24
vendor/github.com/andybalholm/cascadia/LICENSE generated vendored Normal file
View File

@ -0,0 +1,24 @@
Copyright (c) 2011 Andy Balholm. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

9
vendor/github.com/andybalholm/cascadia/README.md generated vendored Normal file
View File

@ -0,0 +1,9 @@
# cascadia
[![](https://travis-ci.org/andybalholm/cascadia.svg)](https://travis-ci.org/andybalholm/cascadia)
The Cascadia package implements CSS selectors for use with the parse trees produced by the html package.
To test CSS selectors without writing Go code, check out [cascadia](https://github.com/suntong/cascadia) the command line tool, a thin wrapper around this package.
[Refer to godoc here](https://godoc.org/github.com/andybalholm/cascadia).

3
vendor/github.com/andybalholm/cascadia/go.mod generated vendored Normal file
View File

@ -0,0 +1,3 @@
module "github.com/andybalholm/cascadia"
require "golang.org/x/net" v0.0.0-20180218175443-cbe0f9307d01

801
vendor/github.com/andybalholm/cascadia/parser.go generated vendored Normal file
View File

@ -0,0 +1,801 @@
// Package cascadia is an implementation of CSS selectors.
package cascadia
import (
"errors"
"fmt"
"regexp"
"strconv"
"strings"
)
// a parser for CSS selectors
type parser struct {
s string // the source text
i int // the current position
}
// parseEscape parses a backslash escape.
func (p *parser) parseEscape() (result string, err error) {
if len(p.s) < p.i+2 || p.s[p.i] != '\\' {
return "", errors.New("invalid escape sequence")
}
start := p.i + 1
c := p.s[start]
switch {
case c == '\r' || c == '\n' || c == '\f':
return "", errors.New("escaped line ending outside string")
case hexDigit(c):
// unicode escape (hex)
var i int
for i = start; i < p.i+6 && i < len(p.s) && hexDigit(p.s[i]); i++ {
// empty
}
v, _ := strconv.ParseUint(p.s[start:i], 16, 21)
if len(p.s) > i {
switch p.s[i] {
case '\r':
i++
if len(p.s) > i && p.s[i] == '\n' {
i++
}
case ' ', '\t', '\n', '\f':
i++
}
}
p.i = i
return string(rune(v)), nil
}
// Return the literal character after the backslash.
result = p.s[start : start+1]
p.i += 2
return result, nil
}
// toLowerASCII returns s with all ASCII capital letters lowercased.
func toLowerASCII(s string) string {
var b []byte
for i := 0; i < len(s); i++ {
if c := s[i]; 'A' <= c && c <= 'Z' {
if b == nil {
b = make([]byte, len(s))
copy(b, s)
}
b[i] = s[i] + ('a' - 'A')
}
}
if b == nil {
return s
}
return string(b)
}
func hexDigit(c byte) bool {
return '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F'
}
// nameStart returns whether c can be the first character of an identifier
// (not counting an initial hyphen, or an escape sequence).
func nameStart(c byte) bool {
return 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || c == '_' || c > 127
}
// nameChar returns whether c can be a character within an identifier
// (not counting an escape sequence).
func nameChar(c byte) bool {
return 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || c == '_' || c > 127 ||
c == '-' || '0' <= c && c <= '9'
}
// parseIdentifier parses an identifier.
func (p *parser) parseIdentifier() (result string, err error) {
startingDash := false
if len(p.s) > p.i && p.s[p.i] == '-' {
startingDash = true
p.i++
}
if len(p.s) <= p.i {
return "", errors.New("expected identifier, found EOF instead")
}
if c := p.s[p.i]; !(nameStart(c) || c == '\\') {
return "", fmt.Errorf("expected identifier, found %c instead", c)
}
result, err = p.parseName()
if startingDash && err == nil {
result = "-" + result
}
return
}
// parseName parses a name (which is like an identifier, but doesn't have
// extra restrictions on the first character).
func (p *parser) parseName() (result string, err error) {
i := p.i
loop:
for i < len(p.s) {
c := p.s[i]
switch {
case nameChar(c):
start := i
for i < len(p.s) && nameChar(p.s[i]) {
i++
}
result += p.s[start:i]
case c == '\\':
p.i = i
val, err := p.parseEscape()
if err != nil {
return "", err
}
i = p.i
result += val
default:
break loop
}
}
if result == "" {
return "", errors.New("expected name, found EOF instead")
}
p.i = i
return result, nil
}
// parseString parses a single- or double-quoted string.
func (p *parser) parseString() (result string, err error) {
i := p.i
if len(p.s) < i+2 {
return "", errors.New("expected string, found EOF instead")
}
quote := p.s[i]
i++
loop:
for i < len(p.s) {
switch p.s[i] {
case '\\':
if len(p.s) > i+1 {
switch c := p.s[i+1]; c {
case '\r':
if len(p.s) > i+2 && p.s[i+2] == '\n' {
i += 3
continue loop
}
fallthrough
case '\n', '\f':
i += 2
continue loop
}
}
p.i = i
val, err := p.parseEscape()
if err != nil {
return "", err
}
i = p.i
result += val
case quote:
break loop
case '\r', '\n', '\f':
return "", errors.New("unexpected end of line in string")
default:
start := i
for i < len(p.s) {
if c := p.s[i]; c == quote || c == '\\' || c == '\r' || c == '\n' || c == '\f' {
break
}
i++
}
result += p.s[start:i]
}
}
if i >= len(p.s) {
return "", errors.New("EOF in string")
}
// Consume the final quote.
i++
p.i = i
return result, nil
}
// parseRegex parses a regular expression; the end is defined by encountering an
// unmatched closing ')' or ']' which is not consumed
func (p *parser) parseRegex() (rx *regexp.Regexp, err error) {
i := p.i
if len(p.s) < i+2 {
return nil, errors.New("expected regular expression, found EOF instead")
}
// number of open parens or brackets;
// when it becomes negative, finished parsing regex
open := 0
loop:
for i < len(p.s) {
switch p.s[i] {
case '(', '[':
open++
case ')', ']':
open--
if open < 0 {
break loop
}
}
i++
}
if i >= len(p.s) {
return nil, errors.New("EOF in regular expression")
}
rx, err = regexp.Compile(p.s[p.i:i])
p.i = i
return rx, err
}
// skipWhitespace consumes whitespace characters and comments.
// It returns true if there was actually anything to skip.
func (p *parser) skipWhitespace() bool {
i := p.i
for i < len(p.s) {
switch p.s[i] {
case ' ', '\t', '\r', '\n', '\f':
i++
continue
case '/':
if strings.HasPrefix(p.s[i:], "/*") {
end := strings.Index(p.s[i+len("/*"):], "*/")
if end != -1 {
i += end + len("/**/")
continue
}
}
}
break
}
if i > p.i {
p.i = i
return true
}
return false
}
// consumeParenthesis consumes an opening parenthesis and any following
// whitespace. It returns true if there was actually a parenthesis to skip.
func (p *parser) consumeParenthesis() bool {
if p.i < len(p.s) && p.s[p.i] == '(' {
p.i++
p.skipWhitespace()
return true
}
return false
}
// consumeClosingParenthesis consumes a closing parenthesis and any preceding
// whitespace. It returns true if there was actually a parenthesis to skip.
func (p *parser) consumeClosingParenthesis() bool {
i := p.i
p.skipWhitespace()
if p.i < len(p.s) && p.s[p.i] == ')' {
p.i++
return true
}
p.i = i
return false
}
// parseTypeSelector parses a type selector (one that matches by tag name).
func (p *parser) parseTypeSelector() (result tagSelector, err error) {
tag, err := p.parseIdentifier()
if err != nil {
return
}
return tagSelector{tag: toLowerASCII(tag)}, nil
}
// parseIDSelector parses a selector that matches by id attribute.
func (p *parser) parseIDSelector() (idSelector, error) {
if p.i >= len(p.s) {
return idSelector{}, fmt.Errorf("expected id selector (#id), found EOF instead")
}
if p.s[p.i] != '#' {
return idSelector{}, fmt.Errorf("expected id selector (#id), found '%c' instead", p.s[p.i])
}
p.i++
id, err := p.parseName()
if err != nil {
return idSelector{}, err
}
return idSelector{id: id}, nil
}
// parseClassSelector parses a selector that matches by class attribute.
func (p *parser) parseClassSelector() (classSelector, error) {
if p.i >= len(p.s) {
return classSelector{}, fmt.Errorf("expected class selector (.class), found EOF instead")
}
if p.s[p.i] != '.' {
return classSelector{}, fmt.Errorf("expected class selector (.class), found '%c' instead", p.s[p.i])
}
p.i++
class, err := p.parseIdentifier()
if err != nil {
return classSelector{}, err
}
return classSelector{class: class}, nil
}
// parseAttributeSelector parses a selector that matches by attribute value.
func (p *parser) parseAttributeSelector() (attrSelector, error) {
if p.i >= len(p.s) {
return attrSelector{}, fmt.Errorf("expected attribute selector ([attribute]), found EOF instead")
}
if p.s[p.i] != '[' {
return attrSelector{}, fmt.Errorf("expected attribute selector ([attribute]), found '%c' instead", p.s[p.i])
}
p.i++
p.skipWhitespace()
key, err := p.parseIdentifier()
if err != nil {
return attrSelector{}, err
}
key = toLowerASCII(key)
p.skipWhitespace()
if p.i >= len(p.s) {
return attrSelector{}, errors.New("unexpected EOF in attribute selector")
}
if p.s[p.i] == ']' {
p.i++
return attrSelector{key: key, operation: ""}, nil
}
if p.i+2 >= len(p.s) {
return attrSelector{}, errors.New("unexpected EOF in attribute selector")
}
op := p.s[p.i : p.i+2]
if op[0] == '=' {
op = "="
} else if op[1] != '=' {
return attrSelector{}, fmt.Errorf(`expected equality operator, found "%s" instead`, op)
}
p.i += len(op)
p.skipWhitespace()
if p.i >= len(p.s) {
return attrSelector{}, errors.New("unexpected EOF in attribute selector")
}
var val string
var rx *regexp.Regexp
if op == "#=" {
rx, err = p.parseRegex()
} else {
switch p.s[p.i] {
case '\'', '"':
val, err = p.parseString()
default:
val, err = p.parseIdentifier()
}
}
if err != nil {
return attrSelector{}, err
}
p.skipWhitespace()
if p.i >= len(p.s) {
return attrSelector{}, errors.New("unexpected EOF in attribute selector")
}
if p.s[p.i] != ']' {
return attrSelector{}, fmt.Errorf("expected ']', found '%c' instead", p.s[p.i])
}
p.i++
switch op {
case "=", "!=", "~=", "|=", "^=", "$=", "*=", "#=":
return attrSelector{key: key, val: val, operation: op, regexp: rx}, nil
default:
return attrSelector{}, fmt.Errorf("attribute operator %q is not supported", op)
}
}
var errExpectedParenthesis = errors.New("expected '(' but didn't find it")
var errExpectedClosingParenthesis = errors.New("expected ')' but didn't find it")
var errUnmatchedParenthesis = errors.New("unmatched '('")
// parsePseudoclassSelector parses a pseudoclass selector like :not(p)
func (p *parser) parsePseudoclassSelector() (out Sel, err error) {
if p.i >= len(p.s) {
return nil, fmt.Errorf("expected pseudoclass selector (:pseudoclass), found EOF instead")
}
if p.s[p.i] != ':' {
return nil, fmt.Errorf("expected attribute selector (:pseudoclass), found '%c' instead", p.s[p.i])
}
p.i++
if p.s[p.i] == ':' { // we found a pseudo-element
p.i++
}
name, err := p.parseIdentifier()
if err != nil {
return
}
name = toLowerASCII(name)
switch name {
case "not", "has", "haschild":
if !p.consumeParenthesis() {
return out, errExpectedParenthesis
}
sel, parseErr := p.parseSelectorGroup()
if parseErr != nil {
return out, parseErr
}
if !p.consumeClosingParenthesis() {
return out, errExpectedClosingParenthesis
}
out = relativePseudoClassSelector{name: name, match: sel}
case "contains", "containsown":
if !p.consumeParenthesis() {
return out, errExpectedParenthesis
}
if p.i == len(p.s) {
return out, errUnmatchedParenthesis
}
var val string
switch p.s[p.i] {
case '\'', '"':
val, err = p.parseString()
default:
val, err = p.parseIdentifier()
}
if err != nil {
return out, err
}
val = strings.ToLower(val)
p.skipWhitespace()
if p.i >= len(p.s) {
return out, errors.New("unexpected EOF in pseudo selector")
}
if !p.consumeClosingParenthesis() {
return out, errExpectedClosingParenthesis
}
out = containsPseudoClassSelector{own: name == "containsown", value: val}
case "matches", "matchesown":
if !p.consumeParenthesis() {
return out, errExpectedParenthesis
}
rx, err := p.parseRegex()
if err != nil {
return out, err
}
if p.i >= len(p.s) {
return out, errors.New("unexpected EOF in pseudo selector")
}
if !p.consumeClosingParenthesis() {
return out, errExpectedClosingParenthesis
}
out = regexpPseudoClassSelector{own: name == "matchesown", regexp: rx}
case "nth-child", "nth-last-child", "nth-of-type", "nth-last-of-type":
if !p.consumeParenthesis() {
return out, errExpectedParenthesis
}
a, b, err := p.parseNth()
if err != nil {
return out, err
}
if !p.consumeClosingParenthesis() {
return out, errExpectedClosingParenthesis
}
last := name == "nth-last-child" || name == "nth-last-of-type"
ofType := name == "nth-of-type" || name == "nth-last-of-type"
out = nthPseudoClassSelector{a: a, b: b, last: last, ofType: ofType}
case "first-child":
out = nthPseudoClassSelector{a: 0, b: 1, ofType: false, last: false}
case "last-child":
out = nthPseudoClassSelector{a: 0, b: 1, ofType: false, last: true}
case "first-of-type":
out = nthPseudoClassSelector{a: 0, b: 1, ofType: true, last: false}
case "last-of-type":
out = nthPseudoClassSelector{a: 0, b: 1, ofType: true, last: true}
case "only-child":
out = onlyChildPseudoClassSelector{ofType: false}
case "only-of-type":
out = onlyChildPseudoClassSelector{ofType: true}
case "input":
out = inputPseudoClassSelector{}
case "empty":
out = emptyElementPseudoClassSelector{}
case "root":
out = rootPseudoClassSelector{}
case "after", "backdrop", "before", "cue", "first-letter", "first-line", "grammar-error", "marker", "placeholder", "selection", "spelling-error":
return out, errors.New("pseudo-elements are not yet supported")
default:
return out, fmt.Errorf("unknown pseudoclass or pseudoelement :%s", name)
}
return
}
// parseInteger parses a decimal integer.
func (p *parser) parseInteger() (int, error) {
i := p.i
start := i
for i < len(p.s) && '0' <= p.s[i] && p.s[i] <= '9' {
i++
}
if i == start {
return 0, errors.New("expected integer, but didn't find it")
}
p.i = i
val, err := strconv.Atoi(p.s[start:i])
if err != nil {
return 0, err
}
return val, nil
}
// parseNth parses the argument for :nth-child (normally of the form an+b).
func (p *parser) parseNth() (a, b int, err error) {
// initial state
if p.i >= len(p.s) {
goto eof
}
switch p.s[p.i] {
case '-':
p.i++
goto negativeA
case '+':
p.i++
goto positiveA
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
goto positiveA
case 'n', 'N':
a = 1
p.i++
goto readN
case 'o', 'O', 'e', 'E':
id, nameErr := p.parseName()
if nameErr != nil {
return 0, 0, nameErr
}
id = toLowerASCII(id)
if id == "odd" {
return 2, 1, nil
}
if id == "even" {
return 2, 0, nil
}
return 0, 0, fmt.Errorf("expected 'odd' or 'even', but found '%s' instead", id)
default:
goto invalid
}
positiveA:
if p.i >= len(p.s) {
goto eof
}
switch p.s[p.i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
a, err = p.parseInteger()
if err != nil {
return 0, 0, err
}
goto readA
case 'n', 'N':
a = 1
p.i++
goto readN
default:
goto invalid
}
negativeA:
if p.i >= len(p.s) {
goto eof
}
switch p.s[p.i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
a, err = p.parseInteger()
if err != nil {
return 0, 0, err
}
a = -a
goto readA
case 'n', 'N':
a = -1
p.i++
goto readN
default:
goto invalid
}
readA:
if p.i >= len(p.s) {
goto eof
}
switch p.s[p.i] {
case 'n', 'N':
p.i++
goto readN
default:
// The number we read as a is actually b.
return 0, a, nil
}
readN:
p.skipWhitespace()
if p.i >= len(p.s) {
goto eof
}
switch p.s[p.i] {
case '+':
p.i++
p.skipWhitespace()
b, err = p.parseInteger()
if err != nil {
return 0, 0, err
}
return a, b, nil
case '-':
p.i++
p.skipWhitespace()
b, err = p.parseInteger()
if err != nil {
return 0, 0, err
}
return a, -b, nil
default:
return a, 0, nil
}
eof:
return 0, 0, errors.New("unexpected EOF while attempting to parse expression of form an+b")
invalid:
return 0, 0, errors.New("unexpected character while attempting to parse expression of form an+b")
}
// parseSimpleSelectorSequence parses a selector sequence that applies to
// a single element.
func (p *parser) parseSimpleSelectorSequence() (Sel, error) {
var selectors []Sel
if p.i >= len(p.s) {
return nil, errors.New("expected selector, found EOF instead")
}
switch p.s[p.i] {
case '*':
// It's the universal selector. Just skip over it, since it doesn't affect the meaning.
p.i++
case '#', '.', '[', ':':
// There's no type selector. Wait to process the other till the main loop.
default:
r, err := p.parseTypeSelector()
if err != nil {
return nil, err
}
selectors = append(selectors, r)
}
loop:
for p.i < len(p.s) {
var (
ns Sel
err error
)
switch p.s[p.i] {
case '#':
ns, err = p.parseIDSelector()
case '.':
ns, err = p.parseClassSelector()
case '[':
ns, err = p.parseAttributeSelector()
case ':':
ns, err = p.parsePseudoclassSelector()
default:
break loop
}
if err != nil {
return nil, err
}
selectors = append(selectors, ns)
}
if len(selectors) == 1 { // no need wrap the selectors in compoundSelector
return selectors[0], nil
}
return compoundSelector{selectors: selectors}, nil
}
// parseSelector parses a selector that may include combinators.
func (p *parser) parseSelector() (Sel, error) {
p.skipWhitespace()
result, err := p.parseSimpleSelectorSequence()
if err != nil {
return nil, err
}
for {
var (
combinator byte
c Sel
)
if p.skipWhitespace() {
combinator = ' '
}
if p.i >= len(p.s) {
return result, nil
}
switch p.s[p.i] {
case '+', '>', '~':
combinator = p.s[p.i]
p.i++
p.skipWhitespace()
case ',', ')':
// These characters can't begin a selector, but they can legally occur after one.
return result, nil
}
if combinator == 0 {
return result, nil
}
c, err = p.parseSimpleSelectorSequence()
if err != nil {
return nil, err
}
result = combinedSelector{first: result, combinator: combinator, second: c}
}
}
// parseSelectorGroup parses a group of selectors, separated by commas.
func (p *parser) parseSelectorGroup() (SelectorGroup, error) {
current, err := p.parseSelector()
if err != nil {
return nil, err
}
result := SelectorGroup{current}
for p.i < len(p.s) {
if p.s[p.i] != ',' {
break
}
p.i++
c, err := p.parseSelector()
if err != nil {
return nil, err
}
result = append(result, c)
}
return result, nil
}

833
vendor/github.com/andybalholm/cascadia/selector.go generated vendored Normal file
View File

@ -0,0 +1,833 @@
package cascadia
import (
"bytes"
"fmt"
"regexp"
"strings"
"golang.org/x/net/html"
)
// Matcher is the interface for basic selector functionality.
// Match returns whether a selector matches n.
type Matcher interface {
Match(n *html.Node) bool
}
// Sel is the interface for all the functionality provided by selectors.
// It is currently the same as Matcher, but other methods may be added in the
// future.
type Sel interface {
Matcher
Specificity() Specificity
}
// Parse parses a selector.
func Parse(sel string) (Sel, error) {
p := &parser{s: sel}
compiled, err := p.parseSelector()
if err != nil {
return nil, err
}
if p.i < len(sel) {
return nil, fmt.Errorf("parsing %q: %d bytes left over", sel, len(sel)-p.i)
}
return compiled, nil
}
// ParseGroup parses a selector, or a group of selectors separated by commas.
func ParseGroup(sel string) (SelectorGroup, error) {
p := &parser{s: sel}
compiled, err := p.parseSelectorGroup()
if err != nil {
return nil, err
}
if p.i < len(sel) {
return nil, fmt.Errorf("parsing %q: %d bytes left over", sel, len(sel)-p.i)
}
return compiled, nil
}
// A Selector is a function which tells whether a node matches or not.
//
// This type is maintained for compatibility; I recommend using the newer and
// more idiomatic interfaces Sel and Matcher.
type Selector func(*html.Node) bool
// Compile parses a selector and returns, if successful, a Selector object
// that can be used to match against html.Node objects.
func Compile(sel string) (Selector, error) {
compiled, err := ParseGroup(sel)
if err != nil {
return nil, err
}
return Selector(compiled.Match), nil
}
// MustCompile is like Compile, but panics instead of returning an error.
func MustCompile(sel string) Selector {
compiled, err := Compile(sel)
if err != nil {
panic(err)
}
return compiled
}
// MatchAll returns a slice of the nodes that match the selector,
// from n and its children.
func (s Selector) MatchAll(n *html.Node) []*html.Node {
return s.matchAllInto(n, nil)
}
func (s Selector) matchAllInto(n *html.Node, storage []*html.Node) []*html.Node {
if s(n) {
storage = append(storage, n)
}
for child := n.FirstChild; child != nil; child = child.NextSibling {
storage = s.matchAllInto(child, storage)
}
return storage
}
func queryInto(n *html.Node, m Matcher, storage []*html.Node) []*html.Node {
for child := n.FirstChild; child != nil; child = child.NextSibling {
if m.Match(child) {
storage = append(storage, child)
}
storage = queryInto(child, m, storage)
}
return storage
}
// QueryAll returns a slice of all the nodes that match m, from the descendants
// of n.
func QueryAll(n *html.Node, m Matcher) []*html.Node {
return queryInto(n, m, nil)
}
// Match returns true if the node matches the selector.
func (s Selector) Match(n *html.Node) bool {
return s(n)
}
// MatchFirst returns the first node that matches s, from n and its children.
func (s Selector) MatchFirst(n *html.Node) *html.Node {
if s.Match(n) {
return n
}
for c := n.FirstChild; c != nil; c = c.NextSibling {
m := s.MatchFirst(c)
if m != nil {
return m
}
}
return nil
}
// Query returns the first node that matches m, from the descendants of n.
// If none matches, it returns nil.
func Query(n *html.Node, m Matcher) *html.Node {
for c := n.FirstChild; c != nil; c = c.NextSibling {
if m.Match(c) {
return c
}
if matched := Query(c, m); matched != nil {
return matched
}
}
return nil
}
// Filter returns the nodes in nodes that match the selector.
func (s Selector) Filter(nodes []*html.Node) (result []*html.Node) {
for _, n := range nodes {
if s(n) {
result = append(result, n)
}
}
return result
}
// Filter returns the nodes that match m.
func Filter(nodes []*html.Node, m Matcher) (result []*html.Node) {
for _, n := range nodes {
if m.Match(n) {
result = append(result, n)
}
}
return result
}
type tagSelector struct {
tag string
}
// Matches elements with a given tag name.
func (t tagSelector) Match(n *html.Node) bool {
return n.Type == html.ElementNode && n.Data == t.tag
}
func (c tagSelector) Specificity() Specificity {
return Specificity{0, 0, 1}
}
type classSelector struct {
class string
}
// Matches elements by class attribute.
func (t classSelector) Match(n *html.Node) bool {
return matchAttribute(n, "class", func(s string) bool {
return matchInclude(t.class, s)
})
}
func (c classSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
type idSelector struct {
id string
}
// Matches elements by id attribute.
func (t idSelector) Match(n *html.Node) bool {
return matchAttribute(n, "id", func(s string) bool {
return s == t.id
})
}
func (c idSelector) Specificity() Specificity {
return Specificity{1, 0, 0}
}
type attrSelector struct {
key, val, operation string
regexp *regexp.Regexp
}
// Matches elements by attribute value.
func (t attrSelector) Match(n *html.Node) bool {
switch t.operation {
case "":
return matchAttribute(n, t.key, func(string) bool { return true })
case "=":
return matchAttribute(n, t.key, func(s string) bool { return s == t.val })
case "!=":
return attributeNotEqualMatch(t.key, t.val, n)
case "~=":
// matches elements where the attribute named key is a whitespace-separated list that includes val.
return matchAttribute(n, t.key, func(s string) bool { return matchInclude(t.val, s) })
case "|=":
return attributeDashMatch(t.key, t.val, n)
case "^=":
return attributePrefixMatch(t.key, t.val, n)
case "$=":
return attributeSuffixMatch(t.key, t.val, n)
case "*=":
return attributeSubstringMatch(t.key, t.val, n)
case "#=":
return attributeRegexMatch(t.key, t.regexp, n)
default:
panic(fmt.Sprintf("unsuported operation : %s", t.operation))
}
}
// matches elements where the attribute named key satisifes the function f.
func matchAttribute(n *html.Node, key string, f func(string) bool) bool {
if n.Type != html.ElementNode {
return false
}
for _, a := range n.Attr {
if a.Key == key && f(a.Val) {
return true
}
}
return false
}
// attributeNotEqualMatch matches elements where
// the attribute named key does not have the value val.
func attributeNotEqualMatch(key, val string, n *html.Node) bool {
if n.Type != html.ElementNode {
return false
}
for _, a := range n.Attr {
if a.Key == key && a.Val == val {
return false
}
}
return true
}
// returns true if s is a whitespace-separated list that includes val.
func matchInclude(val, s string) bool {
for s != "" {
i := strings.IndexAny(s, " \t\r\n\f")
if i == -1 {
return s == val
}
if s[:i] == val {
return true
}
s = s[i+1:]
}
return false
}
// matches elements where the attribute named key equals val or starts with val plus a hyphen.
func attributeDashMatch(key, val string, n *html.Node) bool {
return matchAttribute(n, key,
func(s string) bool {
if s == val {
return true
}
if len(s) <= len(val) {
return false
}
if s[:len(val)] == val && s[len(val)] == '-' {
return true
}
return false
})
}
// attributePrefixMatch returns a Selector that matches elements where
// the attribute named key starts with val.
func attributePrefixMatch(key, val string, n *html.Node) bool {
return matchAttribute(n, key,
func(s string) bool {
if strings.TrimSpace(s) == "" {
return false
}
return strings.HasPrefix(s, val)
})
}
// attributeSuffixMatch matches elements where
// the attribute named key ends with val.
func attributeSuffixMatch(key, val string, n *html.Node) bool {
return matchAttribute(n, key,
func(s string) bool {
if strings.TrimSpace(s) == "" {
return false
}
return strings.HasSuffix(s, val)
})
}
// attributeSubstringMatch matches nodes where
// the attribute named key contains val.
func attributeSubstringMatch(key, val string, n *html.Node) bool {
return matchAttribute(n, key,
func(s string) bool {
if strings.TrimSpace(s) == "" {
return false
}
return strings.Contains(s, val)
})
}
// attributeRegexMatch matches nodes where
// the attribute named key matches the regular expression rx
func attributeRegexMatch(key string, rx *regexp.Regexp, n *html.Node) bool {
return matchAttribute(n, key,
func(s string) bool {
return rx.MatchString(s)
})
}
func (c attrSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
// ---------------- Pseudo class selectors ----------------
// we use severals concrete types of pseudo-class selectors
type relativePseudoClassSelector struct {
name string // one of "not", "has", "haschild"
match SelectorGroup
}
func (s relativePseudoClassSelector) Match(n *html.Node) bool {
if n.Type != html.ElementNode {
return false
}
switch s.name {
case "not":
// matches elements that do not match a.
return !s.match.Match(n)
case "has":
// matches elements with any descendant that matches a.
return hasDescendantMatch(n, s.match)
case "haschild":
// matches elements with a child that matches a.
return hasChildMatch(n, s.match)
default:
panic(fmt.Sprintf("unsupported relative pseudo class selector : %s", s.name))
}
}
// hasChildMatch returns whether n has any child that matches a.
func hasChildMatch(n *html.Node, a Matcher) bool {
for c := n.FirstChild; c != nil; c = c.NextSibling {
if a.Match(c) {
return true
}
}
return false
}
// hasDescendantMatch performs a depth-first search of n's descendants,
// testing whether any of them match a. It returns true as soon as a match is
// found, or false if no match is found.
func hasDescendantMatch(n *html.Node, a Matcher) bool {
for c := n.FirstChild; c != nil; c = c.NextSibling {
if a.Match(c) || (c.Type == html.ElementNode && hasDescendantMatch(c, a)) {
return true
}
}
return false
}
// Specificity returns the specificity of the most specific selectors
// in the pseudo-class arguments.
// See https://www.w3.org/TR/selectors/#specificity-rules
func (s relativePseudoClassSelector) Specificity() Specificity {
var max Specificity
for _, sel := range s.match {
newSpe := sel.Specificity()
if max.Less(newSpe) {
max = newSpe
}
}
return max
}
type containsPseudoClassSelector struct {
own bool
value string
}
func (s containsPseudoClassSelector) Match(n *html.Node) bool {
var text string
if s.own {
// matches nodes that directly contain the given text
text = strings.ToLower(nodeOwnText(n))
} else {
// matches nodes that contain the given text.
text = strings.ToLower(nodeText(n))
}
return strings.Contains(text, s.value)
}
func (s containsPseudoClassSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
type regexpPseudoClassSelector struct {
own bool
regexp *regexp.Regexp
}
func (s regexpPseudoClassSelector) Match(n *html.Node) bool {
var text string
if s.own {
// matches nodes whose text directly matches the specified regular expression
text = nodeOwnText(n)
} else {
// matches nodes whose text matches the specified regular expression
text = nodeText(n)
}
return s.regexp.MatchString(text)
}
// writeNodeText writes the text contained in n and its descendants to b.
func writeNodeText(n *html.Node, b *bytes.Buffer) {
switch n.Type {
case html.TextNode:
b.WriteString(n.Data)
case html.ElementNode:
for c := n.FirstChild; c != nil; c = c.NextSibling {
writeNodeText(c, b)
}
}
}
// nodeText returns the text contained in n and its descendants.
func nodeText(n *html.Node) string {
var b bytes.Buffer
writeNodeText(n, &b)
return b.String()
}
// nodeOwnText returns the contents of the text nodes that are direct
// children of n.
func nodeOwnText(n *html.Node) string {
var b bytes.Buffer
for c := n.FirstChild; c != nil; c = c.NextSibling {
if c.Type == html.TextNode {
b.WriteString(c.Data)
}
}
return b.String()
}
func (s regexpPseudoClassSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
type nthPseudoClassSelector struct {
a, b int
last, ofType bool
}
func (s nthPseudoClassSelector) Match(n *html.Node) bool {
if s.a == 0 {
if s.last {
return simpleNthLastChildMatch(s.b, s.ofType, n)
} else {
return simpleNthChildMatch(s.b, s.ofType, n)
}
}
return nthChildMatch(s.a, s.b, s.last, s.ofType, n)
}
// nthChildMatch implements :nth-child(an+b).
// If last is true, implements :nth-last-child instead.
// If ofType is true, implements :nth-of-type instead.
func nthChildMatch(a, b int, last, ofType bool, n *html.Node) bool {
if n.Type != html.ElementNode {
return false
}
parent := n.Parent
if parent == nil {
return false
}
if parent.Type == html.DocumentNode {
return false
}
i := -1
count := 0
for c := parent.FirstChild; c != nil; c = c.NextSibling {
if (c.Type != html.ElementNode) || (ofType && c.Data != n.Data) {
continue
}
count++
if c == n {
i = count
if !last {
break
}
}
}
if i == -1 {
// This shouldn't happen, since n should always be one of its parent's children.
return false
}
if last {
i = count - i + 1
}
i -= b
if a == 0 {
return i == 0
}
return i%a == 0 && i/a >= 0
}
// simpleNthChildMatch implements :nth-child(b).
// If ofType is true, implements :nth-of-type instead.
func simpleNthChildMatch(b int, ofType bool, n *html.Node) bool {
if n.Type != html.ElementNode {
return false
}
parent := n.Parent
if parent == nil {
return false
}
if parent.Type == html.DocumentNode {
return false
}
count := 0
for c := parent.FirstChild; c != nil; c = c.NextSibling {
if c.Type != html.ElementNode || (ofType && c.Data != n.Data) {
continue
}
count++
if c == n {
return count == b
}
if count >= b {
return false
}
}
return false
}
// simpleNthLastChildMatch implements :nth-last-child(b).
// If ofType is true, implements :nth-last-of-type instead.
func simpleNthLastChildMatch(b int, ofType bool, n *html.Node) bool {
if n.Type != html.ElementNode {
return false
}
parent := n.Parent
if parent == nil {
return false
}
if parent.Type == html.DocumentNode {
return false
}
count := 0
for c := parent.LastChild; c != nil; c = c.PrevSibling {
if c.Type != html.ElementNode || (ofType && c.Data != n.Data) {
continue
}
count++
if c == n {
return count == b
}
if count >= b {
return false
}
}
return false
}
// Specificity for nth-child pseudo-class.
// Does not support a list of selectors
func (s nthPseudoClassSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
type onlyChildPseudoClassSelector struct {
ofType bool
}
// Match implements :only-child.
// If `ofType` is true, it implements :only-of-type instead.
func (s onlyChildPseudoClassSelector) Match(n *html.Node) bool {
if n.Type != html.ElementNode {
return false
}
parent := n.Parent
if parent == nil {
return false
}
if parent.Type == html.DocumentNode {
return false
}
count := 0
for c := parent.FirstChild; c != nil; c = c.NextSibling {
if (c.Type != html.ElementNode) || (s.ofType && c.Data != n.Data) {
continue
}
count++
if count > 1 {
return false
}
}
return count == 1
}
func (s onlyChildPseudoClassSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
type inputPseudoClassSelector struct{}
// Matches input, select, textarea and button elements.
func (s inputPseudoClassSelector) Match(n *html.Node) bool {
return n.Type == html.ElementNode && (n.Data == "input" || n.Data == "select" || n.Data == "textarea" || n.Data == "button")
}
func (s inputPseudoClassSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
type emptyElementPseudoClassSelector struct{}
// Matches empty elements.
func (s emptyElementPseudoClassSelector) Match(n *html.Node) bool {
if n.Type != html.ElementNode {
return false
}
for c := n.FirstChild; c != nil; c = c.NextSibling {
switch c.Type {
case html.ElementNode, html.TextNode:
return false
}
}
return true
}
func (s emptyElementPseudoClassSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
type rootPseudoClassSelector struct{}
// Match implements :root
func (s rootPseudoClassSelector) Match(n *html.Node) bool {
if n.Type != html.ElementNode {
return false
}
if n.Parent == nil {
return false
}
return n.Parent.Type == html.DocumentNode
}
func (s rootPseudoClassSelector) Specificity() Specificity {
return Specificity{0, 1, 0}
}
type compoundSelector struct {
selectors []Sel
}
// Matches elements if each sub-selectors matches.
func (t compoundSelector) Match(n *html.Node) bool {
if len(t.selectors) == 0 {
return n.Type == html.ElementNode
}
for _, sel := range t.selectors {
if !sel.Match(n) {
return false
}
}
return true
}
func (s compoundSelector) Specificity() Specificity {
var out Specificity
for _, sel := range s.selectors {
out = out.Add(sel.Specificity())
}
return out
}
type combinedSelector struct {
first Sel
combinator byte
second Sel
}
func (t combinedSelector) Match(n *html.Node) bool {
if t.first == nil {
return false // maybe we should panic
}
switch t.combinator {
case 0:
return t.first.Match(n)
case ' ':
return descendantMatch(t.first, t.second, n)
case '>':
return childMatch(t.first, t.second, n)
case '+':
return siblingMatch(t.first, t.second, true, n)
case '~':
return siblingMatch(t.first, t.second, false, n)
default:
panic("unknown combinator")
}
}
// matches an element if it matches d and has an ancestor that matches a.
func descendantMatch(a, d Matcher, n *html.Node) bool {
if !d.Match(n) {
return false
}
for p := n.Parent; p != nil; p = p.Parent {
if a.Match(p) {
return true
}
}
return false
}
// matches an element if it matches d and its parent matches a.
func childMatch(a, d Matcher, n *html.Node) bool {
return d.Match(n) && n.Parent != nil && a.Match(n.Parent)
}
// matches an element if it matches s2 and is preceded by an element that matches s1.
// If adjacent is true, the sibling must be immediately before the element.
func siblingMatch(s1, s2 Matcher, adjacent bool, n *html.Node) bool {
if !s2.Match(n) {
return false
}
if adjacent {
for n = n.PrevSibling; n != nil; n = n.PrevSibling {
if n.Type == html.TextNode || n.Type == html.CommentNode {
continue
}
return s1.Match(n)
}
return false
}
// Walk backwards looking for element that matches s1
for c := n.PrevSibling; c != nil; c = c.PrevSibling {
if s1.Match(c) {
return true
}
}
return false
}
func (s combinedSelector) Specificity() Specificity {
spec := s.first.Specificity()
if s.second != nil {
spec = spec.Add(s.second.Specificity())
}
return spec
}
// A SelectorGroup is a list of selectors, which matches if any of the
// individual selectors matches.
type SelectorGroup []Sel
// Match returns true if the node matches one of the single selectors.
func (s SelectorGroup) Match(n *html.Node) bool {
for _, sel := range s {
if sel.Match(n) {
return true
}
}
return false
}

26
vendor/github.com/andybalholm/cascadia/specificity.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
package cascadia
// Specificity is the CSS specificity as defined in
// https://www.w3.org/TR/selectors/#specificity-rules
// with the convention Specificity = [A,B,C].
type Specificity [3]int
// returns `true` if s < other (strictly), false otherwise
func (s Specificity) Less(other Specificity) bool {
for i := range s {
if s[i] < other[i] {
return true
}
if s[i] > other[i] {
return false
}
}
return false
}
func (s Specificity) Add(other Specificity) Specificity {
for i, sp := range other {
s[i] += sp
}
return s
}

1
vendor/github.com/golang/groupcache/.gitignore generated vendored Normal file
View File

@ -0,0 +1 @@
*~

19
vendor/github.com/golang/groupcache/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,19 @@
language: go
go_import_path: github.com/golang/groupcache
os: linux
dist: trusty
sudo: false
script:
- go test ./...
go:
- 1.9.x
- 1.10.x
- 1.11.x
- master
cache:
directories:
- $GOPATH/pkg

191
vendor/github.com/golang/groupcache/LICENSE generated vendored Normal file
View File

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

74
vendor/github.com/golang/groupcache/README.md generated vendored Normal file
View File

@ -0,0 +1,74 @@
# groupcache
## Summary
groupcache is a distributed caching and cache-filling library, intended as a
replacement for a pool of memcached nodes in many cases.
For API docs and examples, see http://godoc.org/github.com/golang/groupcache
## Comparison to memcached
### **Like memcached**, groupcache:
* shards by key to select which peer is responsible for that key
### **Unlike memcached**, groupcache:
* does not require running a separate set of servers, thus massively
reducing deployment/configuration pain. groupcache is a client
library as well as a server. It connects to its own peers, forming
a distributed cache.
* comes with a cache filling mechanism. Whereas memcached just says
"Sorry, cache miss", often resulting in a thundering herd of
database (or whatever) loads from an unbounded number of clients
(which has resulted in several fun outages), groupcache coordinates
cache fills such that only one load in one process of an entire
replicated set of processes populates the cache, then multiplexes
the loaded value to all callers.
* does not support versioned values. If key "foo" is value "bar",
key "foo" must always be "bar". There are neither cache expiration
times, nor explicit cache evictions. Thus there is also no CAS,
nor Increment/Decrement. This also means that groupcache....
* ... supports automatic mirroring of super-hot items to multiple
processes. This prevents memcached hot spotting where a machine's
CPU and/or NIC are overloaded by very popular keys/values.
* is currently only available for Go. It's very unlikely that I
(bradfitz@) will port the code to any other language.
## Loading process
In a nutshell, a groupcache lookup of **Get("foo")** looks like:
(On machine #5 of a set of N machines running the same code)
1. Is the value of "foo" in local memory because it's super hot? If so, use it.
2. Is the value of "foo" in local memory because peer #5 (the current
peer) is the owner of it? If so, use it.
3. Amongst all the peers in my set of N, am I the owner of the key
"foo"? (e.g. does it consistent hash to 5?) If so, load it. If
other callers come in, via the same process or via RPC requests
from peers, they block waiting for the load to finish and get the
same answer. If not, RPC to the peer that's the owner and get
the answer. If the RPC fails, just load it locally (still with
local dup suppression).
## Users
groupcache is in production use by dl.google.com (its original user),
parts of Blogger, parts of Google Code, parts of Google Fiber, parts
of Google production monitoring systems, etc.
## Presentations
See http://talks.golang.org/2013/oscon-dl.slide
## Help
Use the golang-nuts mailing list for any discussion or questions.

175
vendor/github.com/golang/groupcache/byteview.go generated vendored Normal file
View File

@ -0,0 +1,175 @@
/*
Copyright 2012 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package groupcache
import (
"bytes"
"errors"
"io"
"strings"
)
// A ByteView holds an immutable view of bytes.
// Internally it wraps either a []byte or a string,
// but that detail is invisible to callers.
//
// A ByteView is meant to be used as a value type, not
// a pointer (like a time.Time).
type ByteView struct {
// If b is non-nil, b is used, else s is used.
b []byte
s string
}
// Len returns the view's length.
func (v ByteView) Len() int {
if v.b != nil {
return len(v.b)
}
return len(v.s)
}
// ByteSlice returns a copy of the data as a byte slice.
func (v ByteView) ByteSlice() []byte {
if v.b != nil {
return cloneBytes(v.b)
}
return []byte(v.s)
}
// String returns the data as a string, making a copy if necessary.
func (v ByteView) String() string {
if v.b != nil {
return string(v.b)
}
return v.s
}
// At returns the byte at index i.
func (v ByteView) At(i int) byte {
if v.b != nil {
return v.b[i]
}
return v.s[i]
}
// Slice slices the view between the provided from and to indices.
func (v ByteView) Slice(from, to int) ByteView {
if v.b != nil {
return ByteView{b: v.b[from:to]}
}
return ByteView{s: v.s[from:to]}
}
// SliceFrom slices the view from the provided index until the end.
func (v ByteView) SliceFrom(from int) ByteView {
if v.b != nil {
return ByteView{b: v.b[from:]}
}
return ByteView{s: v.s[from:]}
}
// Copy copies b into dest and returns the number of bytes copied.
func (v ByteView) Copy(dest []byte) int {
if v.b != nil {
return copy(dest, v.b)
}
return copy(dest, v.s)
}
// Equal returns whether the bytes in b are the same as the bytes in
// b2.
func (v ByteView) Equal(b2 ByteView) bool {
if b2.b == nil {
return v.EqualString(b2.s)
}
return v.EqualBytes(b2.b)
}
// EqualString returns whether the bytes in b are the same as the bytes
// in s.
func (v ByteView) EqualString(s string) bool {
if v.b == nil {
return v.s == s
}
l := v.Len()
if len(s) != l {
return false
}
for i, bi := range v.b {
if bi != s[i] {
return false
}
}
return true
}
// EqualBytes returns whether the bytes in b are the same as the bytes
// in b2.
func (v ByteView) EqualBytes(b2 []byte) bool {
if v.b != nil {
return bytes.Equal(v.b, b2)
}
l := v.Len()
if len(b2) != l {
return false
}
for i, bi := range b2 {
if bi != v.s[i] {
return false
}
}
return true
}
// Reader returns an io.ReadSeeker for the bytes in v.
func (v ByteView) Reader() io.ReadSeeker {
if v.b != nil {
return bytes.NewReader(v.b)
}
return strings.NewReader(v.s)
}
// ReadAt implements io.ReaderAt on the bytes in v.
func (v ByteView) ReadAt(p []byte, off int64) (n int, err error) {
if off < 0 {
return 0, errors.New("view: invalid offset")
}
if off >= int64(v.Len()) {
return 0, io.EOF
}
n = v.SliceFrom(int(off)).Copy(p)
if n < len(p) {
err = io.EOF
}
return
}
// WriteTo implements io.WriterTo on the bytes in v.
func (v ByteView) WriteTo(w io.Writer) (n int64, err error) {
var m int
if v.b != nil {
m, err = w.Write(v.b)
} else {
m, err = io.WriteString(w, v.s)
}
if err == nil && m < v.Len() {
err = io.ErrShortWrite
}
n = int64(m)
return
}

View File

@ -0,0 +1,81 @@
/*
Copyright 2013 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package consistenthash provides an implementation of a ring hash.
package consistenthash
import (
"hash/crc32"
"sort"
"strconv"
)
type Hash func(data []byte) uint32
type Map struct {
hash Hash
replicas int
keys []int // Sorted
hashMap map[int]string
}
func New(replicas int, fn Hash) *Map {
m := &Map{
replicas: replicas,
hash: fn,
hashMap: make(map[int]string),
}
if m.hash == nil {
m.hash = crc32.ChecksumIEEE
}
return m
}
// IsEmpty returns true if there are no items available.
func (m *Map) IsEmpty() bool {
return len(m.keys) == 0
}
// Add adds some keys to the hash.
func (m *Map) Add(keys ...string) {
for _, key := range keys {
for i := 0; i < m.replicas; i++ {
hash := int(m.hash([]byte(strconv.Itoa(i) + key)))
m.keys = append(m.keys, hash)
m.hashMap[hash] = key
}
}
sort.Ints(m.keys)
}
// Get gets the closest item in the hash to the provided key.
func (m *Map) Get(key string) string {
if m.IsEmpty() {
return ""
}
hash := int(m.hash([]byte(key)))
// Binary search for appropriate replica.
idx := sort.Search(len(m.keys), func(i int) bool { return m.keys[i] >= hash })
// Means we have cycled back to the first replica.
if idx == len(m.keys) {
idx = 0
}
return m.hashMap[m.keys[idx]]
}

491
vendor/github.com/golang/groupcache/groupcache.go generated vendored Normal file
View File

@ -0,0 +1,491 @@
/*
Copyright 2012 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package groupcache provides a data loading mechanism with caching
// and de-duplication that works across a set of peer processes.
//
// Each data Get first consults its local cache, otherwise delegates
// to the requested key's canonical owner, which then checks its cache
// or finally gets the data. In the common case, many concurrent
// cache misses across a set of peers for the same key result in just
// one cache fill.
package groupcache
import (
"errors"
"math/rand"
"strconv"
"sync"
"sync/atomic"
pb "github.com/golang/groupcache/groupcachepb"
"github.com/golang/groupcache/lru"
"github.com/golang/groupcache/singleflight"
)
// A Getter loads data for a key.
type Getter interface {
// Get returns the value identified by key, populating dest.
//
// The returned data must be unversioned. That is, key must
// uniquely describe the loaded data, without an implicit
// current time, and without relying on cache expiration
// mechanisms.
Get(ctx Context, key string, dest Sink) error
}
// A GetterFunc implements Getter with a function.
type GetterFunc func(ctx Context, key string, dest Sink) error
func (f GetterFunc) Get(ctx Context, key string, dest Sink) error {
return f(ctx, key, dest)
}
var (
mu sync.RWMutex
groups = make(map[string]*Group)
initPeerServerOnce sync.Once
initPeerServer func()
)
// GetGroup returns the named group previously created with NewGroup, or
// nil if there's no such group.
func GetGroup(name string) *Group {
mu.RLock()
g := groups[name]
mu.RUnlock()
return g
}
// NewGroup creates a coordinated group-aware Getter from a Getter.
//
// The returned Getter tries (but does not guarantee) to run only one
// Get call at once for a given key across an entire set of peer
// processes. Concurrent callers both in the local process and in
// other processes receive copies of the answer once the original Get
// completes.
//
// The group name must be unique for each getter.
func NewGroup(name string, cacheBytes int64, getter Getter) *Group {
return newGroup(name, cacheBytes, getter, nil)
}
// If peers is nil, the peerPicker is called via a sync.Once to initialize it.
func newGroup(name string, cacheBytes int64, getter Getter, peers PeerPicker) *Group {
if getter == nil {
panic("nil Getter")
}
mu.Lock()
defer mu.Unlock()
initPeerServerOnce.Do(callInitPeerServer)
if _, dup := groups[name]; dup {
panic("duplicate registration of group " + name)
}
g := &Group{
name: name,
getter: getter,
peers: peers,
cacheBytes: cacheBytes,
loadGroup: &singleflight.Group{},
}
if fn := newGroupHook; fn != nil {
fn(g)
}
groups[name] = g
return g
}
// newGroupHook, if non-nil, is called right after a new group is created.
var newGroupHook func(*Group)
// RegisterNewGroupHook registers a hook that is run each time
// a group is created.
func RegisterNewGroupHook(fn func(*Group)) {
if newGroupHook != nil {
panic("RegisterNewGroupHook called more than once")
}
newGroupHook = fn
}
// RegisterServerStart registers a hook that is run when the first
// group is created.
func RegisterServerStart(fn func()) {
if initPeerServer != nil {
panic("RegisterServerStart called more than once")
}
initPeerServer = fn
}
func callInitPeerServer() {
if initPeerServer != nil {
initPeerServer()
}
}
// A Group is a cache namespace and associated data loaded spread over
// a group of 1 or more machines.
type Group struct {
name string
getter Getter
peersOnce sync.Once
peers PeerPicker
cacheBytes int64 // limit for sum of mainCache and hotCache size
// mainCache is a cache of the keys for which this process
// (amongst its peers) is authoritative. That is, this cache
// contains keys which consistent hash on to this process's
// peer number.
mainCache cache
// hotCache contains keys/values for which this peer is not
// authoritative (otherwise they would be in mainCache), but
// are popular enough to warrant mirroring in this process to
// avoid going over the network to fetch from a peer. Having
// a hotCache avoids network hotspotting, where a peer's
// network card could become the bottleneck on a popular key.
// This cache is used sparingly to maximize the total number
// of key/value pairs that can be stored globally.
hotCache cache
// loadGroup ensures that each key is only fetched once
// (either locally or remotely), regardless of the number of
// concurrent callers.
loadGroup flightGroup
_ int32 // force Stats to be 8-byte aligned on 32-bit platforms
// Stats are statistics on the group.
Stats Stats
}
// flightGroup is defined as an interface which flightgroup.Group
// satisfies. We define this so that we may test with an alternate
// implementation.
type flightGroup interface {
// Done is called when Do is done.
Do(key string, fn func() (interface{}, error)) (interface{}, error)
}
// Stats are per-group statistics.
type Stats struct {
Gets AtomicInt // any Get request, including from peers
CacheHits AtomicInt // either cache was good
PeerLoads AtomicInt // either remote load or remote cache hit (not an error)
PeerErrors AtomicInt
Loads AtomicInt // (gets - cacheHits)
LoadsDeduped AtomicInt // after singleflight
LocalLoads AtomicInt // total good local loads
LocalLoadErrs AtomicInt // total bad local loads
ServerRequests AtomicInt // gets that came over the network from peers
}
// Name returns the name of the group.
func (g *Group) Name() string {
return g.name
}
func (g *Group) initPeers() {
if g.peers == nil {
g.peers = getPeers(g.name)
}
}
func (g *Group) Get(ctx Context, key string, dest Sink) error {
g.peersOnce.Do(g.initPeers)
g.Stats.Gets.Add(1)
if dest == nil {
return errors.New("groupcache: nil dest Sink")
}
value, cacheHit := g.lookupCache(key)
if cacheHit {
g.Stats.CacheHits.Add(1)
return setSinkView(dest, value)
}
// Optimization to avoid double unmarshalling or copying: keep
// track of whether the dest was already populated. One caller
// (if local) will set this; the losers will not. The common
// case will likely be one caller.
destPopulated := false
value, destPopulated, err := g.load(ctx, key, dest)
if err != nil {
return err
}
if destPopulated {
return nil
}
return setSinkView(dest, value)
}
// load loads key either by invoking the getter locally or by sending it to another machine.
func (g *Group) load(ctx Context, key string, dest Sink) (value ByteView, destPopulated bool, err error) {
g.Stats.Loads.Add(1)
viewi, err := g.loadGroup.Do(key, func() (interface{}, error) {
// Check the cache again because singleflight can only dedup calls
// that overlap concurrently. It's possible for 2 concurrent
// requests to miss the cache, resulting in 2 load() calls. An
// unfortunate goroutine scheduling would result in this callback
// being run twice, serially. If we don't check the cache again,
// cache.nbytes would be incremented below even though there will
// be only one entry for this key.
//
// Consider the following serialized event ordering for two
// goroutines in which this callback gets called twice for the
// same key:
// 1: Get("key")
// 2: Get("key")
// 1: lookupCache("key")
// 2: lookupCache("key")
// 1: load("key")
// 2: load("key")
// 1: loadGroup.Do("key", fn)
// 1: fn()
// 2: loadGroup.Do("key", fn)
// 2: fn()
if value, cacheHit := g.lookupCache(key); cacheHit {
g.Stats.CacheHits.Add(1)
return value, nil
}
g.Stats.LoadsDeduped.Add(1)
var value ByteView
var err error
if peer, ok := g.peers.PickPeer(key); ok {
value, err = g.getFromPeer(ctx, peer, key)
if err == nil {
g.Stats.PeerLoads.Add(1)
return value, nil
}
g.Stats.PeerErrors.Add(1)
// TODO(bradfitz): log the peer's error? keep
// log of the past few for /groupcachez? It's
// probably boring (normal task movement), so not
// worth logging I imagine.
}
value, err = g.getLocally(ctx, key, dest)
if err != nil {
g.Stats.LocalLoadErrs.Add(1)
return nil, err
}
g.Stats.LocalLoads.Add(1)
destPopulated = true // only one caller of load gets this return value
g.populateCache(key, value, &g.mainCache)
return value, nil
})
if err == nil {
value = viewi.(ByteView)
}
return
}
func (g *Group) getLocally(ctx Context, key string, dest Sink) (ByteView, error) {
err := g.getter.Get(ctx, key, dest)
if err != nil {
return ByteView{}, err
}
return dest.view()
}
func (g *Group) getFromPeer(ctx Context, peer ProtoGetter, key string) (ByteView, error) {
req := &pb.GetRequest{
Group: &g.name,
Key: &key,
}
res := &pb.GetResponse{}
err := peer.Get(ctx, req, res)
if err != nil {
return ByteView{}, err
}
value := ByteView{b: res.Value}
// TODO(bradfitz): use res.MinuteQps or something smart to
// conditionally populate hotCache. For now just do it some
// percentage of the time.
if rand.Intn(10) == 0 {
g.populateCache(key, value, &g.hotCache)
}
return value, nil
}
func (g *Group) lookupCache(key string) (value ByteView, ok bool) {
if g.cacheBytes <= 0 {
return
}
value, ok = g.mainCache.get(key)
if ok {
return
}
value, ok = g.hotCache.get(key)
return
}
func (g *Group) populateCache(key string, value ByteView, cache *cache) {
if g.cacheBytes <= 0 {
return
}
cache.add(key, value)
// Evict items from cache(s) if necessary.
for {
mainBytes := g.mainCache.bytes()
hotBytes := g.hotCache.bytes()
if mainBytes+hotBytes <= g.cacheBytes {
return
}
// TODO(bradfitz): this is good-enough-for-now logic.
// It should be something based on measurements and/or
// respecting the costs of different resources.
victim := &g.mainCache
if hotBytes > mainBytes/8 {
victim = &g.hotCache
}
victim.removeOldest()
}
}
// CacheType represents a type of cache.
type CacheType int
const (
// The MainCache is the cache for items that this peer is the
// owner for.
MainCache CacheType = iota + 1
// The HotCache is the cache for items that seem popular
// enough to replicate to this node, even though it's not the
// owner.
HotCache
)
// CacheStats returns stats about the provided cache within the group.
func (g *Group) CacheStats(which CacheType) CacheStats {
switch which {
case MainCache:
return g.mainCache.stats()
case HotCache:
return g.hotCache.stats()
default:
return CacheStats{}
}
}
// cache is a wrapper around an *lru.Cache that adds synchronization,
// makes values always be ByteView, and counts the size of all keys and
// values.
type cache struct {
mu sync.RWMutex
nbytes int64 // of all keys and values
lru *lru.Cache
nhit, nget int64
nevict int64 // number of evictions
}
func (c *cache) stats() CacheStats {
c.mu.RLock()
defer c.mu.RUnlock()
return CacheStats{
Bytes: c.nbytes,
Items: c.itemsLocked(),
Gets: c.nget,
Hits: c.nhit,
Evictions: c.nevict,
}
}
func (c *cache) add(key string, value ByteView) {
c.mu.Lock()
defer c.mu.Unlock()
if c.lru == nil {
c.lru = &lru.Cache{
OnEvicted: func(key lru.Key, value interface{}) {
val := value.(ByteView)
c.nbytes -= int64(len(key.(string))) + int64(val.Len())
c.nevict++
},
}
}
c.lru.Add(key, value)
c.nbytes += int64(len(key)) + int64(value.Len())
}
func (c *cache) get(key string) (value ByteView, ok bool) {
c.mu.Lock()
defer c.mu.Unlock()
c.nget++
if c.lru == nil {
return
}
vi, ok := c.lru.Get(key)
if !ok {
return
}
c.nhit++
return vi.(ByteView), true
}
func (c *cache) removeOldest() {
c.mu.Lock()
defer c.mu.Unlock()
if c.lru != nil {
c.lru.RemoveOldest()
}
}
func (c *cache) bytes() int64 {
c.mu.RLock()
defer c.mu.RUnlock()
return c.nbytes
}
func (c *cache) items() int64 {
c.mu.RLock()
defer c.mu.RUnlock()
return c.itemsLocked()
}
func (c *cache) itemsLocked() int64 {
if c.lru == nil {
return 0
}
return int64(c.lru.Len())
}
// An AtomicInt is an int64 to be accessed atomically.
type AtomicInt int64
// Add atomically adds n to i.
func (i *AtomicInt) Add(n int64) {
atomic.AddInt64((*int64)(i), n)
}
// Get atomically gets the value of i.
func (i *AtomicInt) Get() int64 {
return atomic.LoadInt64((*int64)(i))
}
func (i *AtomicInt) String() string {
return strconv.FormatInt(i.Get(), 10)
}
// CacheStats are returned by stats accessors on Group.
type CacheStats struct {
Bytes int64
Items int64
Gets int64
Hits int64
Evictions int64
}

View File

@ -0,0 +1,65 @@
// Code generated by protoc-gen-go.
// source: groupcache.proto
// DO NOT EDIT!
package groupcachepb
import proto "github.com/golang/protobuf/proto"
import json "encoding/json"
import math "math"
// Reference proto, json, and math imports to suppress error if they are not otherwise used.
var _ = proto.Marshal
var _ = &json.SyntaxError{}
var _ = math.Inf
type GetRequest struct {
Group *string `protobuf:"bytes,1,req,name=group" json:"group,omitempty"`
Key *string `protobuf:"bytes,2,req,name=key" json:"key,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *GetRequest) Reset() { *m = GetRequest{} }
func (m *GetRequest) String() string { return proto.CompactTextString(m) }
func (*GetRequest) ProtoMessage() {}
func (m *GetRequest) GetGroup() string {
if m != nil && m.Group != nil {
return *m.Group
}
return ""
}
func (m *GetRequest) GetKey() string {
if m != nil && m.Key != nil {
return *m.Key
}
return ""
}
type GetResponse struct {
Value []byte `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"`
MinuteQps *float64 `protobuf:"fixed64,2,opt,name=minute_qps" json:"minute_qps,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *GetResponse) Reset() { *m = GetResponse{} }
func (m *GetResponse) String() string { return proto.CompactTextString(m) }
func (*GetResponse) ProtoMessage() {}
func (m *GetResponse) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
func (m *GetResponse) GetMinuteQps() float64 {
if m != nil && m.MinuteQps != nil {
return *m.MinuteQps
}
return 0
}
func init() {
}

View File

@ -0,0 +1,34 @@
/*
Copyright 2012 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto2";
package groupcachepb;
message GetRequest {
required string group = 1;
required string key = 2; // not actually required/guaranteed to be UTF-8
}
message GetResponse {
optional bytes value = 1;
optional double minute_qps = 2;
}
service GroupCache {
rpc Get(GetRequest) returns (GetResponse) {
};
}

227
vendor/github.com/golang/groupcache/http.go generated vendored Normal file
View File

@ -0,0 +1,227 @@
/*
Copyright 2013 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package groupcache
import (
"bytes"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"sync"
"github.com/golang/groupcache/consistenthash"
pb "github.com/golang/groupcache/groupcachepb"
"github.com/golang/protobuf/proto"
)
const defaultBasePath = "/_groupcache/"
const defaultReplicas = 50
// HTTPPool implements PeerPicker for a pool of HTTP peers.
type HTTPPool struct {
// Context optionally specifies a context for the server to use when it
// receives a request.
// If nil, the server uses a nil Context.
Context func(*http.Request) Context
// Transport optionally specifies an http.RoundTripper for the client
// to use when it makes a request.
// If nil, the client uses http.DefaultTransport.
Transport func(Context) http.RoundTripper
// this peer's base URL, e.g. "https://example.net:8000"
self string
// opts specifies the options.
opts HTTPPoolOptions
mu sync.Mutex // guards peers and httpGetters
peers *consistenthash.Map
httpGetters map[string]*httpGetter // keyed by e.g. "http://10.0.0.2:8008"
}
// HTTPPoolOptions are the configurations of a HTTPPool.
type HTTPPoolOptions struct {
// BasePath specifies the HTTP path that will serve groupcache requests.
// If blank, it defaults to "/_groupcache/".
BasePath string
// Replicas specifies the number of key replicas on the consistent hash.
// If blank, it defaults to 50.
Replicas int
// HashFn specifies the hash function of the consistent hash.
// If blank, it defaults to crc32.ChecksumIEEE.
HashFn consistenthash.Hash
}
// NewHTTPPool initializes an HTTP pool of peers, and registers itself as a PeerPicker.
// For convenience, it also registers itself as an http.Handler with http.DefaultServeMux.
// The self argument should be a valid base URL that points to the current server,
// for example "http://example.net:8000".
func NewHTTPPool(self string) *HTTPPool {
p := NewHTTPPoolOpts(self, nil)
http.Handle(p.opts.BasePath, p)
return p
}
var httpPoolMade bool
// NewHTTPPoolOpts initializes an HTTP pool of peers with the given options.
// Unlike NewHTTPPool, this function does not register the created pool as an HTTP handler.
// The returned *HTTPPool implements http.Handler and must be registered using http.Handle.
func NewHTTPPoolOpts(self string, o *HTTPPoolOptions) *HTTPPool {
if httpPoolMade {
panic("groupcache: NewHTTPPool must be called only once")
}
httpPoolMade = true
p := &HTTPPool{
self: self,
httpGetters: make(map[string]*httpGetter),
}
if o != nil {
p.opts = *o
}
if p.opts.BasePath == "" {
p.opts.BasePath = defaultBasePath
}
if p.opts.Replicas == 0 {
p.opts.Replicas = defaultReplicas
}
p.peers = consistenthash.New(p.opts.Replicas, p.opts.HashFn)
RegisterPeerPicker(func() PeerPicker { return p })
return p
}
// Set updates the pool's list of peers.
// Each peer value should be a valid base URL,
// for example "http://example.net:8000".
func (p *HTTPPool) Set(peers ...string) {
p.mu.Lock()
defer p.mu.Unlock()
p.peers = consistenthash.New(p.opts.Replicas, p.opts.HashFn)
p.peers.Add(peers...)
p.httpGetters = make(map[string]*httpGetter, len(peers))
for _, peer := range peers {
p.httpGetters[peer] = &httpGetter{transport: p.Transport, baseURL: peer + p.opts.BasePath}
}
}
func (p *HTTPPool) PickPeer(key string) (ProtoGetter, bool) {
p.mu.Lock()
defer p.mu.Unlock()
if p.peers.IsEmpty() {
return nil, false
}
if peer := p.peers.Get(key); peer != p.self {
return p.httpGetters[peer], true
}
return nil, false
}
func (p *HTTPPool) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Parse request.
if !strings.HasPrefix(r.URL.Path, p.opts.BasePath) {
panic("HTTPPool serving unexpected path: " + r.URL.Path)
}
parts := strings.SplitN(r.URL.Path[len(p.opts.BasePath):], "/", 2)
if len(parts) != 2 {
http.Error(w, "bad request", http.StatusBadRequest)
return
}
groupName := parts[0]
key := parts[1]
// Fetch the value for this group/key.
group := GetGroup(groupName)
if group == nil {
http.Error(w, "no such group: "+groupName, http.StatusNotFound)
return
}
var ctx Context
if p.Context != nil {
ctx = p.Context(r)
}
group.Stats.ServerRequests.Add(1)
var value []byte
err := group.Get(ctx, key, AllocatingByteSliceSink(&value))
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Write the value to the response body as a proto message.
body, err := proto.Marshal(&pb.GetResponse{Value: value})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/x-protobuf")
w.Write(body)
}
type httpGetter struct {
transport func(Context) http.RoundTripper
baseURL string
}
var bufferPool = sync.Pool{
New: func() interface{} { return new(bytes.Buffer) },
}
func (h *httpGetter) Get(context Context, in *pb.GetRequest, out *pb.GetResponse) error {
u := fmt.Sprintf(
"%v%v/%v",
h.baseURL,
url.QueryEscape(in.GetGroup()),
url.QueryEscape(in.GetKey()),
)
req, err := http.NewRequest("GET", u, nil)
if err != nil {
return err
}
tr := http.DefaultTransport
if h.transport != nil {
tr = h.transport(context)
}
res, err := tr.RoundTrip(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return fmt.Errorf("server returned: %v", res.Status)
}
b := bufferPool.Get().(*bytes.Buffer)
b.Reset()
defer bufferPool.Put(b)
_, err = io.Copy(b, res.Body)
if err != nil {
return fmt.Errorf("reading response body: %v", err)
}
err = proto.Unmarshal(b.Bytes(), out)
if err != nil {
return fmt.Errorf("decoding response body: %v", err)
}
return nil
}

133
vendor/github.com/golang/groupcache/lru/lru.go generated vendored Normal file
View File

@ -0,0 +1,133 @@
/*
Copyright 2013 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package lru implements an LRU cache.
package lru
import "container/list"
// Cache is an LRU cache. It is not safe for concurrent access.
type Cache struct {
// MaxEntries is the maximum number of cache entries before
// an item is evicted. Zero means no limit.
MaxEntries int
// OnEvicted optionally specifies a callback function to be
// executed when an entry is purged from the cache.
OnEvicted func(key Key, value interface{})
ll *list.List
cache map[interface{}]*list.Element
}
// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators
type Key interface{}
type entry struct {
key Key
value interface{}
}
// New creates a new Cache.
// If maxEntries is zero, the cache has no limit and it's assumed
// that eviction is done by the caller.
func New(maxEntries int) *Cache {
return &Cache{
MaxEntries: maxEntries,
ll: list.New(),
cache: make(map[interface{}]*list.Element),
}
}
// Add adds a value to the cache.
func (c *Cache) Add(key Key, value interface{}) {
if c.cache == nil {
c.cache = make(map[interface{}]*list.Element)
c.ll = list.New()
}
if ee, ok := c.cache[key]; ok {
c.ll.MoveToFront(ee)
ee.Value.(*entry).value = value
return
}
ele := c.ll.PushFront(&entry{key, value})
c.cache[key] = ele
if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries {
c.RemoveOldest()
}
}
// Get looks up a key's value from the cache.
func (c *Cache) Get(key Key) (value interface{}, ok bool) {
if c.cache == nil {
return
}
if ele, hit := c.cache[key]; hit {
c.ll.MoveToFront(ele)
return ele.Value.(*entry).value, true
}
return
}
// Remove removes the provided key from the cache.
func (c *Cache) Remove(key Key) {
if c.cache == nil {
return
}
if ele, hit := c.cache[key]; hit {
c.removeElement(ele)
}
}
// RemoveOldest removes the oldest item from the cache.
func (c *Cache) RemoveOldest() {
if c.cache == nil {
return
}
ele := c.ll.Back()
if ele != nil {
c.removeElement(ele)
}
}
func (c *Cache) removeElement(e *list.Element) {
c.ll.Remove(e)
kv := e.Value.(*entry)
delete(c.cache, kv.key)
if c.OnEvicted != nil {
c.OnEvicted(kv.key, kv.value)
}
}
// Len returns the number of items in the cache.
func (c *Cache) Len() int {
if c.cache == nil {
return 0
}
return c.ll.Len()
}
// Clear purges all stored items from the cache.
func (c *Cache) Clear() {
if c.OnEvicted != nil {
for _, e := range c.cache {
kv := e.Value.(*entry)
c.OnEvicted(kv.key, kv.value)
}
}
c.ll = nil
c.cache = nil
}

85
vendor/github.com/golang/groupcache/peers.go generated vendored Normal file
View File

@ -0,0 +1,85 @@
/*
Copyright 2012 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// peers.go defines how processes find and communicate with their peers.
package groupcache
import (
pb "github.com/golang/groupcache/groupcachepb"
)
// Context is an opaque value passed through calls to the
// ProtoGetter. It may be nil if your ProtoGetter implementation does
// not require a context.
type Context interface{}
// ProtoGetter is the interface that must be implemented by a peer.
type ProtoGetter interface {
Get(context Context, in *pb.GetRequest, out *pb.GetResponse) error
}
// PeerPicker is the interface that must be implemented to locate
// the peer that owns a specific key.
type PeerPicker interface {
// PickPeer returns the peer that owns the specific key
// and true to indicate that a remote peer was nominated.
// It returns nil, false if the key owner is the current peer.
PickPeer(key string) (peer ProtoGetter, ok bool)
}
// NoPeers is an implementation of PeerPicker that never finds a peer.
type NoPeers struct{}
func (NoPeers) PickPeer(key string) (peer ProtoGetter, ok bool) { return }
var (
portPicker func(groupName string) PeerPicker
)
// RegisterPeerPicker registers the peer initialization function.
// It is called once, when the first group is created.
// Either RegisterPeerPicker or RegisterPerGroupPeerPicker should be
// called exactly once, but not both.
func RegisterPeerPicker(fn func() PeerPicker) {
if portPicker != nil {
panic("RegisterPeerPicker called more than once")
}
portPicker = func(_ string) PeerPicker { return fn() }
}
// RegisterPerGroupPeerPicker registers the peer initialization function,
// which takes the groupName, to be used in choosing a PeerPicker.
// It is called once, when the first group is created.
// Either RegisterPeerPicker or RegisterPerGroupPeerPicker should be
// called exactly once, but not both.
func RegisterPerGroupPeerPicker(fn func(groupName string) PeerPicker) {
if portPicker != nil {
panic("RegisterPeerPicker called more than once")
}
portPicker = fn
}
func getPeers(groupName string) PeerPicker {
if portPicker == nil {
return NoPeers{}
}
pk := portPicker(groupName)
if pk == nil {
pk = NoPeers{}
}
return pk
}

View File

@ -0,0 +1,64 @@
/*
Copyright 2012 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package singleflight provides a duplicate function call suppression
// mechanism.
package singleflight
import "sync"
// call is an in-flight or completed Do call
type call struct {
wg sync.WaitGroup
val interface{}
err error
}
// Group represents a class of work and forms a namespace in which
// units of work can be executed with duplicate suppression.
type Group struct {
mu sync.Mutex // protects m
m map[string]*call // lazily initialized
}
// Do executes and returns the results of the given function, making
// sure that only one execution is in-flight for a given key at a
// time. If a duplicate comes in, the duplicate caller waits for the
// original to complete and receives the same results.
func (g *Group) Do(key string, fn func() (interface{}, error)) (interface{}, error) {
g.mu.Lock()
if g.m == nil {
g.m = make(map[string]*call)
}
if c, ok := g.m[key]; ok {
g.mu.Unlock()
c.wg.Wait()
return c.val, c.err
}
c := new(call)
c.wg.Add(1)
g.m[key] = c
g.mu.Unlock()
c.val, c.err = fn()
c.wg.Done()
g.mu.Lock()
delete(g.m, key)
g.mu.Unlock()
return c.val, c.err
}

322
vendor/github.com/golang/groupcache/sinks.go generated vendored Normal file
View File

@ -0,0 +1,322 @@
/*
Copyright 2012 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package groupcache
import (
"errors"
"github.com/golang/protobuf/proto"
)
// A Sink receives data from a Get call.
//
// Implementation of Getter must call exactly one of the Set methods
// on success.
type Sink interface {
// SetString sets the value to s.
SetString(s string) error
// SetBytes sets the value to the contents of v.
// The caller retains ownership of v.
SetBytes(v []byte) error
// SetProto sets the value to the encoded version of m.
// The caller retains ownership of m.
SetProto(m proto.Message) error
// view returns a frozen view of the bytes for caching.
view() (ByteView, error)
}
func cloneBytes(b []byte) []byte {
c := make([]byte, len(b))
copy(c, b)
return c
}
func setSinkView(s Sink, v ByteView) error {
// A viewSetter is a Sink that can also receive its value from
// a ByteView. This is a fast path to minimize copies when the
// item was already cached locally in memory (where it's
// cached as a ByteView)
type viewSetter interface {
setView(v ByteView) error
}
if vs, ok := s.(viewSetter); ok {
return vs.setView(v)
}
if v.b != nil {
return s.SetBytes(v.b)
}
return s.SetString(v.s)
}
// StringSink returns a Sink that populates the provided string pointer.
func StringSink(sp *string) Sink {
return &stringSink{sp: sp}
}
type stringSink struct {
sp *string
v ByteView
// TODO(bradfitz): track whether any Sets were called.
}
func (s *stringSink) view() (ByteView, error) {
// TODO(bradfitz): return an error if no Set was called
return s.v, nil
}
func (s *stringSink) SetString(v string) error {
s.v.b = nil
s.v.s = v
*s.sp = v
return nil
}
func (s *stringSink) SetBytes(v []byte) error {
return s.SetString(string(v))
}
func (s *stringSink) SetProto(m proto.Message) error {
b, err := proto.Marshal(m)
if err != nil {
return err
}
s.v.b = b
*s.sp = string(b)
return nil
}
// ByteViewSink returns a Sink that populates a ByteView.
func ByteViewSink(dst *ByteView) Sink {
if dst == nil {
panic("nil dst")
}
return &byteViewSink{dst: dst}
}
type byteViewSink struct {
dst *ByteView
// if this code ever ends up tracking that at least one set*
// method was called, don't make it an error to call set
// methods multiple times. Lorry's payload.go does that, and
// it makes sense. The comment at the top of this file about
// "exactly one of the Set methods" is overly strict. We
// really care about at least once (in a handler), but if
// multiple handlers fail (or multiple functions in a program
// using a Sink), it's okay to re-use the same one.
}
func (s *byteViewSink) setView(v ByteView) error {
*s.dst = v
return nil
}
func (s *byteViewSink) view() (ByteView, error) {
return *s.dst, nil
}
func (s *byteViewSink) SetProto(m proto.Message) error {
b, err := proto.Marshal(m)
if err != nil {
return err
}
*s.dst = ByteView{b: b}
return nil
}
func (s *byteViewSink) SetBytes(b []byte) error {
*s.dst = ByteView{b: cloneBytes(b)}
return nil
}
func (s *byteViewSink) SetString(v string) error {
*s.dst = ByteView{s: v}
return nil
}
// ProtoSink returns a sink that unmarshals binary proto values into m.
func ProtoSink(m proto.Message) Sink {
return &protoSink{
dst: m,
}
}
type protoSink struct {
dst proto.Message // authoritative value
typ string
v ByteView // encoded
}
func (s *protoSink) view() (ByteView, error) {
return s.v, nil
}
func (s *protoSink) SetBytes(b []byte) error {
err := proto.Unmarshal(b, s.dst)
if err != nil {
return err
}
s.v.b = cloneBytes(b)
s.v.s = ""
return nil
}
func (s *protoSink) SetString(v string) error {
b := []byte(v)
err := proto.Unmarshal(b, s.dst)
if err != nil {
return err
}
s.v.b = b
s.v.s = ""
return nil
}
func (s *protoSink) SetProto(m proto.Message) error {
b, err := proto.Marshal(m)
if err != nil {
return err
}
// TODO(bradfitz): optimize for same-task case more and write
// right through? would need to document ownership rules at
// the same time. but then we could just assign *dst = *m
// here. This works for now:
err = proto.Unmarshal(b, s.dst)
if err != nil {
return err
}
s.v.b = b
s.v.s = ""
return nil
}
// AllocatingByteSliceSink returns a Sink that allocates
// a byte slice to hold the received value and assigns
// it to *dst. The memory is not retained by groupcache.
func AllocatingByteSliceSink(dst *[]byte) Sink {
return &allocBytesSink{dst: dst}
}
type allocBytesSink struct {
dst *[]byte
v ByteView
}
func (s *allocBytesSink) view() (ByteView, error) {
return s.v, nil
}
func (s *allocBytesSink) setView(v ByteView) error {
if v.b != nil {
*s.dst = cloneBytes(v.b)
} else {
*s.dst = []byte(v.s)
}
s.v = v
return nil
}
func (s *allocBytesSink) SetProto(m proto.Message) error {
b, err := proto.Marshal(m)
if err != nil {
return err
}
return s.setBytesOwned(b)
}
func (s *allocBytesSink) SetBytes(b []byte) error {
return s.setBytesOwned(cloneBytes(b))
}
func (s *allocBytesSink) setBytesOwned(b []byte) error {
if s.dst == nil {
return errors.New("nil AllocatingByteSliceSink *[]byte dst")
}
*s.dst = cloneBytes(b) // another copy, protecting the read-only s.v.b view
s.v.b = b
s.v.s = ""
return nil
}
func (s *allocBytesSink) SetString(v string) error {
if s.dst == nil {
return errors.New("nil AllocatingByteSliceSink *[]byte dst")
}
*s.dst = []byte(v)
s.v.b = nil
s.v.s = v
return nil
}
// TruncatingByteSliceSink returns a Sink that writes up to len(*dst)
// bytes to *dst. If more bytes are available, they're silently
// truncated. If fewer bytes are available than len(*dst), *dst
// is shrunk to fit the number of bytes available.
func TruncatingByteSliceSink(dst *[]byte) Sink {
return &truncBytesSink{dst: dst}
}
type truncBytesSink struct {
dst *[]byte
v ByteView
}
func (s *truncBytesSink) view() (ByteView, error) {
return s.v, nil
}
func (s *truncBytesSink) SetProto(m proto.Message) error {
b, err := proto.Marshal(m)
if err != nil {
return err
}
return s.setBytesOwned(b)
}
func (s *truncBytesSink) SetBytes(b []byte) error {
return s.setBytesOwned(cloneBytes(b))
}
func (s *truncBytesSink) setBytesOwned(b []byte) error {
if s.dst == nil {
return errors.New("nil TruncatingByteSliceSink *[]byte dst")
}
n := copy(*s.dst, b)
if n < len(*s.dst) {
*s.dst = (*s.dst)[:n]
}
s.v.b = b
s.v.s = ""
return nil
}
func (s *truncBytesSink) SetString(v string) error {
if s.dst == nil {
return errors.New("nil TruncatingByteSliceSink *[]byte dst")
}
n := copy(*s.dst, v)
if n < len(*s.dst) {
*s.dst = (*s.dst)[:n]
}
s.v.b = nil
s.v.s = v
return nil
}

21
vendor/github.com/mat/besticon/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015-2020 Matthias Lüdtke, Hamburg - https://github.com/mat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

5114
vendor/github.com/mat/besticon/NOTICES generated vendored Normal file

File diff suppressed because it is too large Load Diff

571
vendor/github.com/mat/besticon/besticon/besticon.go generated vendored Normal file
View File

@ -0,0 +1,571 @@
// Package besticon includes functions
// finding icons for a given web site.
package besticon
import (
"bytes"
"crypto/sha1"
"errors"
"fmt"
"image"
"io"
"io/ioutil"
"log"
"net/http"
"net/http/cookiejar"
"net/url"
"os"
"sort"
"strings"
"time"
"image/color"
// Load supported image formats.
_ "image/gif"
_ "image/png"
_ "github.com/mat/besticon/ico"
"github.com/mat/besticon/colorfinder"
"github.com/PuerkitoBio/goquery"
"golang.org/x/net/html/charset"
"golang.org/x/net/idna"
"golang.org/x/net/publicsuffix"
)
var defaultFormats []string
const MinIconSize = 0
// TODO: Turn into env var: https://github.com/rendomnet/besticon/commit/c85867cc80c00c898053ce8daf40d51a93b9d39f#diff-37b57e3fdbe4246771791e86deb4d69dL41
const MaxIconSize = 500
// Icon holds icon information.
type Icon struct {
URL string `json:"url"`
Width int `json:"width"`
Height int `json:"height"`
Format string `json:"format"`
Bytes int `json:"bytes"`
Error error `json:"error"`
Sha1sum string `json:"sha1sum"`
ImageData []byte `json:",omitempty"`
}
type IconFinder struct {
FormatsAllowed []string
HostOnlyDomains []string
KeepImageBytes bool
icons []Icon
}
func (f *IconFinder) FetchIcons(url string) ([]Icon, error) {
url = strings.TrimSpace(url)
if !strings.HasPrefix(url, "http:") && !strings.HasPrefix(url, "https:") {
url = "http://" + url
}
url = f.stripIfNecessary(url)
var err error
if CacheEnabled() {
f.icons, err = resultFromCache(url)
} else {
f.icons, err = fetchIcons(url)
}
return f.Icons(), err
}
// stripIfNecessary removes everything from URL but the Scheme and Host
// part if URL.Host is found in HostOnlyDomains.
// This can be used for very popular domains like youtube.com where throttling is
// an issue.
func (f *IconFinder) stripIfNecessary(URL string) string {
u, e := url.Parse(URL)
if e != nil {
return URL
}
for _, h := range f.HostOnlyDomains {
if h == u.Host || h == "*" {
domainOnlyURL := url.URL{Scheme: u.Scheme, Host: u.Host}
return domainOnlyURL.String()
}
}
return URL
}
func (f *IconFinder) IconInSizeRange(r SizeRange) *Icon {
icons := f.Icons()
// Try to return smallest in range perfect..max
sortIcons(icons, false)
for _, ico := range icons {
if (ico.Width >= r.Perfect && ico.Height >= r.Perfect) && (ico.Width <= r.Max && ico.Height <= r.Max) {
return &ico
}
}
// Try to return biggest in range perfect..min
sortIcons(icons, true)
for _, ico := range icons {
if (ico.Width >= r.Min && ico.Height >= r.Min) && (ico.Width <= r.Perfect && ico.Height <= r.Perfect) {
return &ico
}
}
return nil
}
func (f *IconFinder) MainColorForIcons() *color.RGBA {
return MainColorForIcons(f.icons)
}
func (f *IconFinder) Icons() []Icon {
return discardUnwantedFormats(f.icons, f.FormatsAllowed)
}
func (ico *Icon) Image() (*image.Image, error) {
img, _, err := image.Decode(bytes.NewReader(ico.ImageData))
return &img, err
}
func discardUnwantedFormats(icons []Icon, wantedFormats []string) []Icon {
formats := defaultFormats
if len(wantedFormats) > 0 {
formats = wantedFormats
}
return filterIcons(icons, func(ico Icon) bool {
return includesString(formats, ico.Format)
})
}
type iconPredicate func(Icon) bool
func filterIcons(icons []Icon, pred iconPredicate) []Icon {
var result []Icon
for _, ico := range icons {
if pred(ico) {
result = append(result, ico)
}
}
return result
}
func includesString(arr []string, str string) bool {
for _, e := range arr {
if e == str {
return true
}
}
return false
}
func fetchIcons(siteURL string) ([]Icon, error) {
html, urlAfterRedirect, e := fetchHTML(siteURL)
if e != nil {
return nil, e
}
links, e := findIconLinks(urlAfterRedirect, html)
if e != nil {
return nil, e
}
icons := fetchAllIcons(links)
icons = rejectBrokenIcons(icons)
sortIcons(icons, true)
return icons, nil
}
const maxResponseBodySize = 10485760 // 10MB
func fetchHTML(url string) ([]byte, *url.URL, error) {
r, e := Get(url)
if e != nil {
return nil, nil, e
}
if !(r.StatusCode >= 200 && r.StatusCode < 300) {
return nil, nil, errors.New("besticon: not found")
}
b, e := GetBodyBytes(r)
if e != nil {
return nil, nil, e
}
if len(b) == 0 {
return nil, nil, errors.New("besticon: empty response")
}
reader := bytes.NewReader(b)
contentType := r.Header.Get("Content-Type")
utf8reader, e := charset.NewReader(reader, contentType)
if e != nil {
return nil, nil, e
}
utf8bytes, e := ioutil.ReadAll(utf8reader)
if e != nil {
return nil, nil, e
}
return utf8bytes, r.Request.URL, nil
}
var iconPaths = []string{
"/favicon.ico",
"/apple-touch-icon.png",
"/apple-touch-icon-precomposed.png",
}
type empty struct{}
func findIconLinks(siteURL *url.URL, html []byte) ([]string, error) {
doc, e := docFromHTML(html)
if e != nil {
return nil, e
}
baseURL := determineBaseURL(siteURL, doc)
links := make(map[string]empty)
// Add common, hard coded icon paths
for _, path := range iconPaths {
links[urlFromBase(baseURL, path)] = empty{}
}
// Add icons found in page
urls := extractIconTags(doc)
for _, u := range urls {
absoluteURL, e := absoluteURL(baseURL, u)
if e == nil {
links[absoluteURL] = empty{}
}
}
// Turn unique keys into array
var result []string
for u := range links {
result = append(result, u)
}
sort.Strings(result)
return result, nil
}
func determineBaseURL(siteURL *url.URL, doc *goquery.Document) *url.URL {
baseTagHref := extractBaseTag(doc)
if baseTagHref != "" {
baseTagURL, e := url.Parse(baseTagHref)
if e != nil {
return siteURL
}
return baseTagURL
}
return siteURL
}
func docFromHTML(html []byte) (*goquery.Document, error) {
doc, e := goquery.NewDocumentFromReader(bytes.NewReader(html))
if e != nil || doc == nil {
return nil, errParseHTML
}
return doc, nil
}
var csspaths = strings.Join([]string{
"link[rel='icon']",
"link[rel='shortcut icon']",
"link[rel='apple-touch-icon']",
"link[rel='apple-touch-icon-precomposed']",
// Capitalized variants, TODO: refactor
"link[rel='ICON']",
"link[rel='SHORTCUT ICON']",
"link[rel='APPLE-TOUCH-ICON']",
"link[rel='APPLE-TOUCH-ICON-PRECOMPOSED']",
}, ", ")
var errParseHTML = errors.New("besticon: could not parse html")
func extractBaseTag(doc *goquery.Document) string {
href := ""
doc.Find("head base[href]").First().Each(func(i int, s *goquery.Selection) {
href, _ = s.Attr("href")
})
return href
}
func extractIconTags(doc *goquery.Document) []string {
var hits []string
doc.Find(csspaths).Each(func(i int, s *goquery.Selection) {
href, ok := s.Attr("href")
if ok && href != "" {
hits = append(hits, href)
}
})
return hits
}
func MainColorForIcons(icons []Icon) *color.RGBA {
if len(icons) == 0 {
return nil
}
var icon *Icon
// Prefer .png and .gif
for _, ico := range icons {
if ico.Format == "png" || ico.Format == "gif" {
icon = &ico
break
}
}
// Try .ico else
if icon == nil {
for _, ico := range icons {
if ico.Format == "ico" {
icon = &ico
break
}
}
}
if icon == nil {
return nil
}
img, err := icon.Image()
if err != nil {
return nil
}
cf := colorfinder.ColorFinder{}
mainColor, err := cf.FindMainColor(*img)
if err != nil {
return nil
}
return &mainColor
}
func fetchAllIcons(urls []string) []Icon {
ch := make(chan Icon)
for _, u := range urls {
go func(u string) { ch <- fetchIconDetails(u) }(u)
}
var icons []Icon
for range urls {
icon := <-ch
icons = append(icons, icon)
}
return icons
}
func fetchIconDetails(url string) Icon {
i := Icon{URL: url}
response, e := Get(url)
if e != nil {
i.Error = e
return i
}
b, e := GetBodyBytes(response)
if e != nil {
i.Error = e
return i
}
cfg, format, e := image.DecodeConfig(bytes.NewReader(b))
if e != nil {
i.Error = fmt.Errorf("besticon: unknown image format: %s", e)
return i
}
i.Width = cfg.Width
i.Height = cfg.Height
i.Format = format
i.Bytes = len(b)
i.Sha1sum = sha1Sum(b)
if keepImageBytes {
i.ImageData = b
}
return i
}
func Get(urlstring string) (*http.Response, error) {
u, e := url.Parse(urlstring)
if e != nil {
return nil, e
}
// Maybe we can get rid of this conversion someday
// https://github.com/golang/go/issues/13835
u.Host, e = idna.ToASCII(u.Host)
if e != nil {
return nil, e
}
req, e := http.NewRequest("GET", u.String(), nil)
if e != nil {
return nil, e
}
setDefaultHeaders(req)
start := time.Now()
resp, err := client.Do(req)
end := time.Now()
duration := end.Sub(start)
if err != nil {
logger.Printf("Error: %s %s %s %.2fms",
req.Method,
req.URL,
err,
float64(duration)/float64(time.Millisecond),
)
} else {
logger.Printf("%s %s %d %.2fms %d",
req.Method,
req.URL,
resp.StatusCode,
float64(duration)/float64(time.Millisecond),
resp.ContentLength,
)
}
return resp, err
}
func GetBodyBytes(r *http.Response) ([]byte, error) {
limitReader := io.LimitReader(r.Body, maxResponseBodySize)
b, e := ioutil.ReadAll(limitReader)
r.Body.Close()
if len(b) >= maxResponseBodySize {
return nil, errors.New("body too large")
}
return b, e
}
func setDefaultHeaders(req *http.Request) {
req.Header.Set("Accept", "*/*")
req.Header.Set("User-Agent", getenvOrFallback("HTTP_USER_AGENT", "Mozilla/5.0 (iPhone; CPU iPhone OS 10_0 like Mac OS X) AppleWebKit/602.1.38 (KHTML, like Gecko) Version/10.0 Mobile/14A5297c Safari/602.1"))
}
func mustInitCookieJar() *cookiejar.Jar {
options := cookiejar.Options{
PublicSuffixList: publicsuffix.List,
}
jar, e := cookiejar.New(&options)
if e != nil {
panic(e)
}
return jar
}
func checkRedirect(req *http.Request, via []*http.Request) error {
setDefaultHeaders(req)
if len(via) >= 10 {
return errors.New("stopped after 10 redirects")
}
return nil
}
func absoluteURL(baseURL *url.URL, path string) (string, error) {
u, e := url.Parse(path)
if e != nil {
return "", e
}
u.Scheme = baseURL.Scheme
if u.Scheme == "" {
u.Scheme = "http"
}
if u.Host == "" {
u.Host = baseURL.Host
}
return baseURL.ResolveReference(u).String(), nil
}
func urlFromBase(baseURL *url.URL, path string) string {
u := *baseURL
u.Path = path
if u.Scheme == "" {
u.Scheme = "http"
}
return u.String()
}
func rejectBrokenIcons(icons []Icon) []Icon {
var result []Icon
for _, img := range icons {
if img.Error == nil && (img.Width > 1 && img.Height > 1) {
result = append(result, img)
}
}
return result
}
func sha1Sum(b []byte) string {
hash := sha1.New()
hash.Write(b)
bs := hash.Sum(nil)
return fmt.Sprintf("%x", bs)
}
var client *http.Client
var keepImageBytes bool
func init() {
duration, e := time.ParseDuration(getenvOrFallback("HTTP_CLIENT_TIMEOUT", "5s"))
if e != nil {
panic(e)
}
setHTTPClient(&http.Client{Timeout: duration})
// Needs to be kept in sync with those image/... imports
defaultFormats = []string{"png", "gif", "ico"}
}
func setHTTPClient(c *http.Client) {
c.Jar = mustInitCookieJar()
c.CheckRedirect = checkRedirect
client = c
}
var logger *log.Logger
// SetLogOutput sets the output for the package's logger.
func SetLogOutput(w io.Writer) {
logger = log.New(w, "http: ", log.LstdFlags|log.Lmicroseconds)
}
func init() {
SetLogOutput(os.Stdout)
keepImageBytes = true
}
func getenvOrFallback(key string, fallbackValue string) string {
value := os.Getenv(key)
if len(strings.TrimSpace(value)) != 0 {
return value
}
return fallbackValue
}
var BuildDate string // set via ldflags on Make

83
vendor/github.com/mat/besticon/besticon/caching.go generated vendored Normal file
View File

@ -0,0 +1,83 @@
package besticon
import (
"encoding/json"
"errors"
"fmt"
"time"
"github.com/golang/groupcache"
)
var iconCache *groupcache.Group
type result struct {
Icons []Icon
Error string
}
func resultFromCache(siteURL string) ([]Icon, error) {
if iconCache == nil {
return fetchIcons(siteURL)
}
// Let results expire after a day
now := time.Now()
key := fmt.Sprintf("%d-%02d-%02d-%s", now.Year(), now.Month(), now.Day(), siteURL)
var data []byte
err := iconCache.Get(siteURL, key, groupcache.AllocatingByteSliceSink(&data))
if err != nil {
logger.Println("ERR:", err)
return fetchIcons(siteURL)
}
res := &result{}
err = json.Unmarshal(data, res)
if err != nil {
panic(err)
}
if res.Error != "" {
return res.Icons, errors.New(res.Error)
}
return res.Icons, nil
}
func generatorFunc(ctx groupcache.Context, key string, sink groupcache.Sink) error {
siteURL := ctx.(string)
icons, err := fetchIcons(siteURL)
if err != nil {
// Don't cache errors
return err
}
res := result{Icons: icons}
if err != nil {
res.Error = err.Error()
}
bytes, err := json.Marshal(res)
if err != nil {
panic(err)
}
sink.SetBytes(bytes)
return nil
}
func CacheEnabled() bool {
return iconCache != nil
}
// SetCacheMaxSize enables icon caching if sizeInMB > 0.
func SetCacheMaxSize(sizeInMB int64) {
if sizeInMB > 0 {
iconCache = groupcache.NewGroup("icons", sizeInMB<<20, groupcache.GetterFunc(generatorFunc))
} else {
iconCache = nil
}
}
// GetCacheStats returns cache statistics.
func GetCacheStats() groupcache.CacheStats {
return iconCache.CacheStats(groupcache.MainCache)
}

View File

@ -0,0 +1,13 @@
package besticon
import (
"os"
"strings"
)
// PopularSites we might use for examples and testing.
var PopularSites []string
func init() {
PopularSites = strings.Split(os.Getenv("POPULAR_SITES"), ",")
}

50
vendor/github.com/mat/besticon/besticon/size_range.go generated vendored Normal file
View File

@ -0,0 +1,50 @@
package besticon
import (
"errors"
"strconv"
"strings"
)
// SizeRange represents the desired icon dimensions
type SizeRange struct {
Min int
Perfect int
Max int
}
var errBadSize = errors.New("besticon: bad size")
// ParseSizeRange parses a string like 60..100..200 into a SizeRange
func ParseSizeRange(s string) (*SizeRange, error) {
parts := strings.SplitN(s, "..", 3)
switch len(parts) {
case 1:
size, ok := parseSize(parts[0])
if !ok {
return nil, errBadSize
}
return &SizeRange{size, size, MaxIconSize}, nil
case 3:
n1, ok1 := parseSize(parts[0])
n2, ok2 := parseSize(parts[1])
n3, ok3 := parseSize(parts[2])
if !ok1 || !ok2 || !ok3 {
return nil, errBadSize
}
if !((n1 <= n2) && (n2 <= n3)) {
return nil, errBadSize
}
return &SizeRange{n1, n2, n3}, nil
}
return nil, errBadSize
}
func parseSize(s string) (int, bool) {
minSize, err := strconv.Atoi(s)
if err != nil || minSize < MinIconSize || minSize > MaxIconSize {
return -1, false
}
return minSize, true
}

35
vendor/github.com/mat/besticon/besticon/sorting.go generated vendored Normal file
View File

@ -0,0 +1,35 @@
package besticon
import "sort"
func sortIcons(icons []Icon, sizeDescending bool) {
// Order after sorting: (width/height, bytes, url)
sort.Stable(byURL(icons))
sort.Stable(byBytes(icons))
if sizeDescending {
sort.Stable(sort.Reverse(byWidthHeight(icons)))
} else {
sort.Stable(byWidthHeight(icons))
}
}
type byWidthHeight []Icon
func (a byWidthHeight) Len() int { return len(a) }
func (a byWidthHeight) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byWidthHeight) Less(i, j int) bool {
return (a[i].Width < a[j].Width) || (a[i].Height < a[j].Height)
}
type byBytes []Icon
func (a byBytes) Len() int { return len(a) }
func (a byBytes) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byBytes) Less(i, j int) bool { return (a[i].Bytes < a[j].Bytes) }
type byURL []Icon
func (a byURL) Len() int { return len(a) }
func (a byURL) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byURL) Less(i, j int) bool { return (a[i].URL < a[j].URL) }

4
vendor/github.com/mat/besticon/besticon/version.go generated vendored Normal file
View File

@ -0,0 +1,4 @@
package besticon
// Version string, same as VERSION, generated my Make
const VersionString = "v3.12.0"

View File

@ -0,0 +1,199 @@
package colorfinder
// colorfinder takes an image and tries to find its main color.
// It is a liberal port of
// http://pieroxy.net/blog/pages/color-finder/demo.html
import (
"fmt"
"image"
"io"
"log"
"math"
"net/http"
"os"
"strings"
"image/color"
// Load supported image formats
_ "github.com/mat/besticon/ico"
_ "image/gif"
_ "image/png"
)
func main() {
arg := os.Args[1]
var imageReader io.ReadCloser
if strings.HasPrefix(arg, "http") {
var err error
response, err := http.Get(arg)
if err != nil {
log.Fatal(err)
}
imageReader = response.Body
} else {
var err error
fmt.Fprintln(os.Stderr, "Reading "+arg+"...")
imageReader, err = os.Open(arg)
if err != nil {
log.Fatal(err)
}
}
defer imageReader.Close()
img, _, err := image.Decode(imageReader)
if err != nil {
log.Fatal(err)
}
cf := ColorFinder{}
c, err := cf.FindMainColor(img)
if err != nil {
log.Fatal(err)
}
fmt.Println("#" + ColorToHex(c))
}
type ColorFinder struct {
img image.Image
}
// FindMainColor tries to identify the most important color in the given logo.
func (cf *ColorFinder) FindMainColor(img image.Image) (color.RGBA, error) {
cf.img = img
colorMap := cf.buildColorMap()
sRGB := cf.findMainColor(colorMap, 6, nil)
sRGB = cf.findMainColor(colorMap, 4, &sRGB)
sRGB = cf.findMainColor(colorMap, 2, &sRGB)
sRGB = cf.findMainColor(colorMap, 0, &sRGB)
return sRGB.rgb, nil
}
const sampleThreshold = 160 * 160
func (cf *ColorFinder) buildColorMap() *map[color.RGBA]colorStats {
colorMap := make(map[color.RGBA]colorStats)
bounds := cf.img.Bounds()
for y := bounds.Min.Y; y < bounds.Max.Y; y++ {
for x := bounds.Min.X; x < bounds.Max.X; x++ {
r, g, b, a := cf.img.At(x, y).RGBA()
rgb := color.RGBA{}
rgb.R = uint8(r >> shiftRGB)
rgb.G = uint8(g >> shiftRGB)
rgb.B = uint8(b >> shiftRGB)
rgb.A = uint8(a >> shiftRGB)
colrStats, exist := colorMap[rgb]
if exist {
colrStats.count++
} else {
colrStats := colorStats{count: 1, weight: weight(&rgb)}
if colrStats.weight <= 0 {
colrStats.weight = 1e-10
}
colorMap[rgb] = colrStats
}
}
}
return &colorMap
}
// Turns out using this is faster than using
// RGBAModel.Convert(img.At(x, y))).(color.RGBA)
const shiftRGB = uint8(8)
func (cf *ColorFinder) findMainColor(colorMap *map[color.RGBA]colorStats, shift uint, targetColor *shiftedRGBA) shiftedRGBA {
colorWeights := make(map[shiftedRGBA]float64)
bounds := cf.img.Bounds()
stepLength := stepLength(bounds)
for y := bounds.Min.Y; y < bounds.Max.Y; y += stepLength {
for x := bounds.Min.X; x < bounds.Max.X; x += stepLength {
r, g, b, a := cf.img.At(x, y).RGBA()
color := color.RGBA{}
color.R = uint8(r >> shiftRGB)
color.G = uint8(g >> shiftRGB)
color.B = uint8(b >> shiftRGB)
color.A = uint8(a >> shiftRGB)
if rgbMatchesTargetColor(targetColor, &color) {
increaseColorWeight(&colorWeights, colorMap, &color, shift)
}
}
}
maxColor := shiftedRGBA{}
maxWeight := 0.0
for sRGB, weight := range colorWeights {
if weight > maxWeight {
maxColor = sRGB
maxWeight = weight
}
}
return maxColor
}
func increaseColorWeight(weightedColors *map[shiftedRGBA]float64, colorMap *map[color.RGBA]colorStats, rgb *color.RGBA, shift uint) {
shiftedColor := color.RGBA{R: rgb.R >> shift, G: rgb.G >> shift, B: rgb.B >> shift}
pixelGroup := shiftedRGBA{rgb: shiftedColor, shift: shift}
colorStats := (*colorMap)[*rgb]
(*weightedColors)[pixelGroup] += colorStats.weight * float64(colorStats.count)
}
type shiftedRGBA struct {
rgb color.RGBA
shift uint
}
func rgbMatchesTargetColor(targetCol *shiftedRGBA, rgb *color.RGBA) bool {
if targetCol == nil {
return true
}
return targetCol.rgb.R == (rgb.R>>targetCol.shift) &&
targetCol.rgb.G == (rgb.G>>targetCol.shift) &&
targetCol.rgb.B == (rgb.B>>targetCol.shift)
}
type colorStats struct {
weight float64
count int64
}
func stepLength(bounds image.Rectangle) int {
width := bounds.Dx()
height := bounds.Dy()
pixelCount := width * height
var stepLength int
if pixelCount > sampleThreshold {
stepLength = 2
} else {
stepLength = 1
}
return stepLength
}
func weight(rgb *color.RGBA) float64 {
rr := float64(rgb.R)
gg := float64(rgb.G)
bb := float64(rgb.B)
return (abs(rr-gg)*abs(rr-gg)+abs(rr-bb)*abs(rr-bb)+abs(gg-bb)*abs(gg-bb))/65535.0*1000.0 + 1
}
func abs(n float64) float64 {
return math.Abs(float64(n))
}
func ColorToHex(c color.RGBA) string {
return fmt.Sprintf("%02x%02x%02x", c.R, c.G, c.B)
}

BIN
vendor/github.com/mat/besticon/ico/addthis.ico generated vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.6 KiB

BIN
vendor/github.com/mat/besticon/ico/besticon.ico generated vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 97 KiB

BIN
vendor/github.com/mat/besticon/ico/broken.ico generated vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 32 B

BIN
vendor/github.com/mat/besticon/ico/codeplex.ico generated vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

BIN
vendor/github.com/mat/besticon/ico/favicon.ico generated vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.7 KiB

BIN
vendor/github.com/mat/besticon/ico/github.ico generated vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.4 KiB

261
vendor/github.com/mat/besticon/ico/ico.go generated vendored Normal file
View File

@ -0,0 +1,261 @@
// Package ico registers image.Decode and DecodeConfig support
// for the icon (container) format.
package ico
import (
"bytes"
"encoding/binary"
"errors"
"image"
"io"
"io/ioutil"
"image/png"
"golang.org/x/image/bmp"
)
type icondir struct {
Reserved uint16
Type uint16
Count uint16
Entries []icondirEntry
}
type icondirEntry struct {
Width byte
Height byte
PaletteCount byte
Reserved byte
ColorPlanes uint16
BitsPerPixel uint16
Size uint32
Offset uint32
}
func (dir *icondir) FindBestIcon() *icondirEntry {
if len(dir.Entries) == 0 {
return nil
}
best := dir.Entries[0]
for _, e := range dir.Entries {
if (e.width() > best.width()) && (e.height() > best.height()) {
best = e
}
}
return &best
}
// ParseIco parses the icon and returns meta information for the icons as icondir.
func ParseIco(r io.Reader) (*icondir, error) {
dir := icondir{}
var err error
err = binary.Read(r, binary.LittleEndian, &dir.Reserved)
if err != nil {
return nil, err
}
err = binary.Read(r, binary.LittleEndian, &dir.Type)
if err != nil {
return nil, err
}
err = binary.Read(r, binary.LittleEndian, &dir.Count)
if err != nil {
return nil, err
}
for i := uint16(0); i < dir.Count; i++ {
entry := icondirEntry{}
e := parseIcondirEntry(r, &entry)
if e != nil {
return nil, e
}
dir.Entries = append(dir.Entries, entry)
}
return &dir, err
}
func parseIcondirEntry(r io.Reader, e *icondirEntry) error {
err := binary.Read(r, binary.LittleEndian, e)
if err != nil {
return err
}
return nil
}
type dibHeader struct {
dibHeaderSize uint32
width uint32
height uint32
}
func (e *icondirEntry) ColorCount() int {
if e.PaletteCount == 0 {
return 256
}
return int(e.PaletteCount)
}
func (e *icondirEntry) width() int {
if e.Width == 0 {
return 256
}
return int(e.Width)
}
func (e *icondirEntry) height() int {
if e.Height == 0 {
return 256
}
return int(e.Height)
}
// DecodeConfig returns just the dimensions of the largest image
// contained in the icon withou decoding the entire icon file.
func DecodeConfig(r io.Reader) (image.Config, error) {
dir, err := ParseIco(r)
if err != nil {
return image.Config{}, err
}
best := dir.FindBestIcon()
if best == nil {
return image.Config{}, errInvalid
}
return image.Config{Width: best.width(), Height: best.height()}, nil
}
// The bitmap header structure we read from an icondirEntry
type bitmapHeaderRead struct {
Size uint32
Width uint32
Height uint32
Planes uint16
BitCount uint16
Compression uint32
ImageSize uint32
XPixelsPerMeter uint32
YPixelsPerMeter uint32
ColorsUsed uint32
ColorsImportant uint32
}
// The bitmap header structure we need to generate for bmp.Decode()
type bitmapHeaderWrite struct {
sigBM [2]byte
fileSize uint32
resverved [2]uint16
pixOffset uint32
Size uint32
Width uint32
Height uint32
Planes uint16
BitCount uint16
Compression uint32
ImageSize uint32
XPixelsPerMeter uint32
YPixelsPerMeter uint32
ColorsUsed uint32
ColorsImportant uint32
}
var errInvalid = errors.New("ico: invalid ICO image")
// Decode returns the largest image contained in the icon
// which might be a bmp or png
func Decode(r io.Reader) (image.Image, error) {
icoBytes, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
r = bytes.NewReader(icoBytes)
dir, err := ParseIco(r)
if err != nil {
return nil, errInvalid
}
best := dir.FindBestIcon()
if best == nil {
return nil, errInvalid
}
return parseImage(best, icoBytes)
}
func parseImage(entry *icondirEntry, icoBytes []byte) (image.Image, error) {
r := bytes.NewReader(icoBytes)
r.Seek(int64(entry.Offset), 0)
// Try PNG first then BMP
img, err := png.Decode(r)
if err != nil {
return parseBMP(entry, icoBytes)
}
return img, nil
}
func parseBMP(entry *icondirEntry, icoBytes []byte) (image.Image, error) {
bmpBytes, err := makeFullBMPBytes(entry, icoBytes)
if err != nil {
return nil, err
}
return bmp.Decode(bmpBytes)
}
func makeFullBMPBytes(entry *icondirEntry, icoBytes []byte) (*bytes.Buffer, error) {
r := bytes.NewReader(icoBytes)
r.Seek(int64(entry.Offset), 0)
var err error
h := bitmapHeaderRead{}
err = binary.Read(r, binary.LittleEndian, &h)
if err != nil {
return nil, err
}
if h.Size != 40 || h.Planes != 1 {
return nil, errInvalid
}
var pixOffset uint32
if h.ColorsUsed == 0 && h.BitCount <= 8 {
pixOffset = 14 + 40 + 4*(1<<h.BitCount)
} else {
pixOffset = 14 + 40 + 4*h.ColorsUsed
}
writeHeader := &bitmapHeaderWrite{
sigBM: [2]byte{'B', 'M'},
fileSize: 14 + 40 + uint32(len(icoBytes)), // correct? important?
pixOffset: pixOffset,
Size: 40,
Width: uint32(h.Width),
Height: uint32(h.Height / 2),
Planes: h.Planes,
BitCount: h.BitCount,
Compression: h.Compression,
ColorsUsed: h.ColorsUsed,
ColorsImportant: h.ColorsImportant,
}
buf := new(bytes.Buffer)
if err = binary.Write(buf, binary.LittleEndian, writeHeader); err != nil {
return nil, err
}
io.CopyN(buf, r, int64(entry.Size))
return buf, nil
}
const icoHeader = "\x00\x00\x01\x00"
func init() {
image.RegisterFormat("ico", icoHeader, Decode, DecodeConfig)
}

BIN
vendor/github.com/mat/besticon/ico/wowhead.ico generated vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.5 KiB

3
vendor/golang.org/x/image/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

3
vendor/golang.org/x/image/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

27
vendor/golang.org/x/image/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/image/PATENTS generated vendored Normal file
View File

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

219
vendor/golang.org/x/image/bmp/reader.go generated vendored Normal file
View File

@ -0,0 +1,219 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package bmp implements a BMP image decoder and encoder.
//
// The BMP specification is at http://www.digicamsoft.com/bmp/bmp.html.
package bmp // import "golang.org/x/image/bmp"
import (
"errors"
"image"
"image/color"
"io"
)
// ErrUnsupported means that the input BMP image uses a valid but unsupported
// feature.
var ErrUnsupported = errors.New("bmp: unsupported BMP image")
func readUint16(b []byte) uint16 {
return uint16(b[0]) | uint16(b[1])<<8
}
func readUint32(b []byte) uint32 {
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
// decodePaletted reads an 8 bit-per-pixel BMP image from r.
// If topDown is false, the image rows will be read bottom-up.
func decodePaletted(r io.Reader, c image.Config, topDown bool) (image.Image, error) {
paletted := image.NewPaletted(image.Rect(0, 0, c.Width, c.Height), c.ColorModel.(color.Palette))
if c.Width == 0 || c.Height == 0 {
return paletted, nil
}
var tmp [4]byte
y0, y1, yDelta := c.Height-1, -1, -1
if topDown {
y0, y1, yDelta = 0, c.Height, +1
}
for y := y0; y != y1; y += yDelta {
p := paletted.Pix[y*paletted.Stride : y*paletted.Stride+c.Width]
if _, err := io.ReadFull(r, p); err != nil {
return nil, err
}
// Each row is 4-byte aligned.
if c.Width%4 != 0 {
_, err := io.ReadFull(r, tmp[:4-c.Width%4])
if err != nil {
return nil, err
}
}
}
return paletted, nil
}
// decodeRGB reads a 24 bit-per-pixel BMP image from r.
// If topDown is false, the image rows will be read bottom-up.
func decodeRGB(r io.Reader, c image.Config, topDown bool) (image.Image, error) {
rgba := image.NewRGBA(image.Rect(0, 0, c.Width, c.Height))
if c.Width == 0 || c.Height == 0 {
return rgba, nil
}
// There are 3 bytes per pixel, and each row is 4-byte aligned.
b := make([]byte, (3*c.Width+3)&^3)
y0, y1, yDelta := c.Height-1, -1, -1
if topDown {
y0, y1, yDelta = 0, c.Height, +1
}
for y := y0; y != y1; y += yDelta {
if _, err := io.ReadFull(r, b); err != nil {
return nil, err
}
p := rgba.Pix[y*rgba.Stride : y*rgba.Stride+c.Width*4]
for i, j := 0, 0; i < len(p); i, j = i+4, j+3 {
// BMP images are stored in BGR order rather than RGB order.
p[i+0] = b[j+2]
p[i+1] = b[j+1]
p[i+2] = b[j+0]
p[i+3] = 0xFF
}
}
return rgba, nil
}
// decodeNRGBA reads a 32 bit-per-pixel BMP image from r.
// If topDown is false, the image rows will be read bottom-up.
func decodeNRGBA(r io.Reader, c image.Config, topDown bool) (image.Image, error) {
rgba := image.NewNRGBA(image.Rect(0, 0, c.Width, c.Height))
if c.Width == 0 || c.Height == 0 {
return rgba, nil
}
y0, y1, yDelta := c.Height-1, -1, -1
if topDown {
y0, y1, yDelta = 0, c.Height, +1
}
for y := y0; y != y1; y += yDelta {
p := rgba.Pix[y*rgba.Stride : y*rgba.Stride+c.Width*4]
if _, err := io.ReadFull(r, p); err != nil {
return nil, err
}
for i := 0; i < len(p); i += 4 {
// BMP images are stored in BGRA order rather than RGBA order.
p[i+0], p[i+2] = p[i+2], p[i+0]
}
}
return rgba, nil
}
// Decode reads a BMP image from r and returns it as an image.Image.
// Limitation: The file must be 8, 24 or 32 bits per pixel.
func Decode(r io.Reader) (image.Image, error) {
c, bpp, topDown, err := decodeConfig(r)
if err != nil {
return nil, err
}
switch bpp {
case 8:
return decodePaletted(r, c, topDown)
case 24:
return decodeRGB(r, c, topDown)
case 32:
return decodeNRGBA(r, c, topDown)
}
panic("unreachable")
}
// DecodeConfig returns the color model and dimensions of a BMP image without
// decoding the entire image.
// Limitation: The file must be 8, 24 or 32 bits per pixel.
func DecodeConfig(r io.Reader) (image.Config, error) {
config, _, _, err := decodeConfig(r)
return config, err
}
func decodeConfig(r io.Reader) (config image.Config, bitsPerPixel int, topDown bool, err error) {
// We only support those BMP images that are a BITMAPFILEHEADER
// immediately followed by a BITMAPINFOHEADER.
const (
fileHeaderLen = 14
infoHeaderLen = 40
v4InfoHeaderLen = 108
v5InfoHeaderLen = 124
)
var b [1024]byte
if _, err := io.ReadFull(r, b[:fileHeaderLen+4]); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return image.Config{}, 0, false, err
}
if string(b[:2]) != "BM" {
return image.Config{}, 0, false, errors.New("bmp: invalid format")
}
offset := readUint32(b[10:14])
infoLen := readUint32(b[14:18])
if infoLen != infoHeaderLen && infoLen != v4InfoHeaderLen && infoLen != v5InfoHeaderLen {
return image.Config{}, 0, false, ErrUnsupported
}
if _, err := io.ReadFull(r, b[fileHeaderLen+4:fileHeaderLen+infoLen]); err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return image.Config{}, 0, false, err
}
width := int(int32(readUint32(b[18:22])))
height := int(int32(readUint32(b[22:26])))
if height < 0 {
height, topDown = -height, true
}
if width < 0 || height < 0 {
return image.Config{}, 0, false, ErrUnsupported
}
// We only support 1 plane and 8, 24 or 32 bits per pixel and no
// compression.
planes, bpp, compression := readUint16(b[26:28]), readUint16(b[28:30]), readUint32(b[30:34])
// if compression is set to BITFIELDS, but the bitmask is set to the default bitmask
// that would be used if compression was set to 0, we can continue as if compression was 0
if compression == 3 && infoLen > infoHeaderLen &&
readUint32(b[54:58]) == 0xff0000 && readUint32(b[58:62]) == 0xff00 &&
readUint32(b[62:66]) == 0xff && readUint32(b[66:70]) == 0xff000000 {
compression = 0
}
if planes != 1 || compression != 0 {
return image.Config{}, 0, false, ErrUnsupported
}
switch bpp {
case 8:
if offset != fileHeaderLen+infoLen+256*4 {
return image.Config{}, 0, false, ErrUnsupported
}
_, err = io.ReadFull(r, b[:256*4])
if err != nil {
return image.Config{}, 0, false, err
}
pcm := make(color.Palette, 256)
for i := range pcm {
// BMP images are stored in BGR order rather than RGB order.
// Every 4th byte is padding.
pcm[i] = color.RGBA{b[4*i+2], b[4*i+1], b[4*i+0], 0xFF}
}
return image.Config{ColorModel: pcm, Width: width, Height: height}, 8, topDown, nil
case 24:
if offset != fileHeaderLen+infoLen {
return image.Config{}, 0, false, ErrUnsupported
}
return image.Config{ColorModel: color.RGBAModel, Width: width, Height: height}, 24, topDown, nil
case 32:
if offset != fileHeaderLen+infoLen {
return image.Config{}, 0, false, ErrUnsupported
}
return image.Config{ColorModel: color.RGBAModel, Width: width, Height: height}, 32, topDown, nil
}
return image.Config{}, 0, false, ErrUnsupported
}
func init() {
image.RegisterFormat("bmp", "BM????\x00\x00\x00\x00", Decode, DecodeConfig)
}

262
vendor/golang.org/x/image/bmp/writer.go generated vendored Normal file
View File

@ -0,0 +1,262 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bmp
import (
"encoding/binary"
"errors"
"image"
"io"
)
type header struct {
sigBM [2]byte
fileSize uint32
resverved [2]uint16
pixOffset uint32
dibHeaderSize uint32
width uint32
height uint32
colorPlane uint16
bpp uint16
compression uint32
imageSize uint32
xPixelsPerMeter uint32
yPixelsPerMeter uint32
colorUse uint32
colorImportant uint32
}
func encodePaletted(w io.Writer, pix []uint8, dx, dy, stride, step int) error {
var padding []byte
if dx < step {
padding = make([]byte, step-dx)
}
for y := dy - 1; y >= 0; y-- {
min := y*stride + 0
max := y*stride + dx
if _, err := w.Write(pix[min:max]); err != nil {
return err
}
if padding != nil {
if _, err := w.Write(padding); err != nil {
return err
}
}
}
return nil
}
func encodeRGBA(w io.Writer, pix []uint8, dx, dy, stride, step int, opaque bool) error {
buf := make([]byte, step)
if opaque {
for y := dy - 1; y >= 0; y-- {
min := y*stride + 0
max := y*stride + dx*4
off := 0
for i := min; i < max; i += 4 {
buf[off+2] = pix[i+0]
buf[off+1] = pix[i+1]
buf[off+0] = pix[i+2]
off += 3
}
if _, err := w.Write(buf); err != nil {
return err
}
}
} else {
for y := dy - 1; y >= 0; y-- {
min := y*stride + 0
max := y*stride + dx*4
off := 0
for i := min; i < max; i += 4 {
a := uint32(pix[i+3])
if a == 0 {
buf[off+2] = 0
buf[off+1] = 0
buf[off+0] = 0
buf[off+3] = 0
off += 4
continue
} else if a == 0xff {
buf[off+2] = pix[i+0]
buf[off+1] = pix[i+1]
buf[off+0] = pix[i+2]
buf[off+3] = 0xff
off += 4
continue
}
buf[off+2] = uint8(((uint32(pix[i+0]) * 0xffff) / a) >> 8)
buf[off+1] = uint8(((uint32(pix[i+1]) * 0xffff) / a) >> 8)
buf[off+0] = uint8(((uint32(pix[i+2]) * 0xffff) / a) >> 8)
buf[off+3] = uint8(a)
off += 4
}
if _, err := w.Write(buf); err != nil {
return err
}
}
}
return nil
}
func encodeNRGBA(w io.Writer, pix []uint8, dx, dy, stride, step int, opaque bool) error {
buf := make([]byte, step)
if opaque {
for y := dy - 1; y >= 0; y-- {
min := y*stride + 0
max := y*stride + dx*4
off := 0
for i := min; i < max; i += 4 {
buf[off+2] = pix[i+0]
buf[off+1] = pix[i+1]
buf[off+0] = pix[i+2]
off += 3
}
if _, err := w.Write(buf); err != nil {
return err
}
}
} else {
for y := dy - 1; y >= 0; y-- {
min := y*stride + 0
max := y*stride + dx*4
off := 0
for i := min; i < max; i += 4 {
buf[off+2] = pix[i+0]
buf[off+1] = pix[i+1]
buf[off+0] = pix[i+2]
buf[off+3] = pix[i+3]
off += 4
}
if _, err := w.Write(buf); err != nil {
return err
}
}
}
return nil
}
func encode(w io.Writer, m image.Image, step int) error {
b := m.Bounds()
buf := make([]byte, step)
for y := b.Max.Y - 1; y >= b.Min.Y; y-- {
off := 0
for x := b.Min.X; x < b.Max.X; x++ {
r, g, b, _ := m.At(x, y).RGBA()
buf[off+2] = byte(r >> 8)
buf[off+1] = byte(g >> 8)
buf[off+0] = byte(b >> 8)
off += 3
}
if _, err := w.Write(buf); err != nil {
return err
}
}
return nil
}
// Encode writes the image m to w in BMP format.
func Encode(w io.Writer, m image.Image) error {
d := m.Bounds().Size()
if d.X < 0 || d.Y < 0 {
return errors.New("bmp: negative bounds")
}
h := &header{
sigBM: [2]byte{'B', 'M'},
fileSize: 14 + 40,
pixOffset: 14 + 40,
dibHeaderSize: 40,
width: uint32(d.X),
height: uint32(d.Y),
colorPlane: 1,
}
var step int
var palette []byte
var opaque bool
switch m := m.(type) {
case *image.Gray:
step = (d.X + 3) &^ 3
palette = make([]byte, 1024)
for i := 0; i < 256; i++ {
palette[i*4+0] = uint8(i)
palette[i*4+1] = uint8(i)
palette[i*4+2] = uint8(i)
palette[i*4+3] = 0xFF
}
h.imageSize = uint32(d.Y * step)
h.fileSize += uint32(len(palette)) + h.imageSize
h.pixOffset += uint32(len(palette))
h.bpp = 8
case *image.Paletted:
step = (d.X + 3) &^ 3
palette = make([]byte, 1024)
for i := 0; i < len(m.Palette) && i < 256; i++ {
r, g, b, _ := m.Palette[i].RGBA()
palette[i*4+0] = uint8(b >> 8)
palette[i*4+1] = uint8(g >> 8)
palette[i*4+2] = uint8(r >> 8)
palette[i*4+3] = 0xFF
}
h.imageSize = uint32(d.Y * step)
h.fileSize += uint32(len(palette)) + h.imageSize
h.pixOffset += uint32(len(palette))
h.bpp = 8
case *image.RGBA:
opaque = m.Opaque()
if opaque {
step = (3*d.X + 3) &^ 3
h.bpp = 24
} else {
step = 4 * d.X
h.bpp = 32
}
h.imageSize = uint32(d.Y * step)
h.fileSize += h.imageSize
case *image.NRGBA:
opaque = m.Opaque()
if opaque {
step = (3*d.X + 3) &^ 3
h.bpp = 24
} else {
step = 4 * d.X
h.bpp = 32
}
h.imageSize = uint32(d.Y * step)
h.fileSize += h.imageSize
default:
step = (3*d.X + 3) &^ 3
h.imageSize = uint32(d.Y * step)
h.fileSize += h.imageSize
h.bpp = 24
}
if err := binary.Write(w, binary.LittleEndian, h); err != nil {
return err
}
if palette != nil {
if err := binary.Write(w, binary.LittleEndian, palette); err != nil {
return err
}
}
if d.X == 0 || d.Y == 0 {
return nil
}
switch m := m.(type) {
case *image.Gray:
return encodePaletted(w, m.Pix, d.X, d.Y, m.Stride, step)
case *image.Paletted:
return encodePaletted(w, m.Pix, d.X, d.Y, m.Stride, step)
case *image.RGBA:
return encodeRGBA(w, m.Pix, d.X, d.Y, m.Stride, step, opaque)
case *image.NRGBA:
return encodeNRGBA(w, m.Pix, d.X, d.Y, m.Stride, step, opaque)
}
return encode(w, m, step)
}

View File

@ -52,7 +52,6 @@ var isSpecialElementMap = map[string]bool{
"iframe": true, "iframe": true,
"img": true, "img": true,
"input": true, "input": true,
"isindex": true, // The 'isindex' element has been removed, but keep it for backwards compatibility.
"keygen": true, "keygen": true,
"li": true, "li": true,
"link": true, "link": true,

View File

@ -172,7 +172,6 @@ var svgAttributeAdjustments = map[string]string{
"diffuseconstant": "diffuseConstant", "diffuseconstant": "diffuseConstant",
"edgemode": "edgeMode", "edgemode": "edgeMode",
"externalresourcesrequired": "externalResourcesRequired", "externalresourcesrequired": "externalResourcesRequired",
"filterres": "filterRes",
"filterunits": "filterUnits", "filterunits": "filterUnits",
"glyphref": "glyphRef", "glyphref": "glyphRef",
"gradienttransform": "gradientTransform", "gradienttransform": "gradientTransform",

View File

@ -18,6 +18,11 @@ const (
ElementNode ElementNode
CommentNode CommentNode
DoctypeNode DoctypeNode
// RawNode nodes are not returned by the parser, but can be part of the
// Node tree passed to func Render to insert raw HTML (without escaping).
// If so, this package makes no guarantee that the rendered HTML is secure
// (from e.g. Cross Site Scripting attacks) or well-formed.
RawNode
scopeMarkerNode scopeMarkerNode
) )

298
vendor/golang.org/x/net/html/parse.go generated vendored
View File

@ -184,6 +184,17 @@ func (p *parser) clearStackToContext(s scope) {
} }
} }
// parseGenericRawTextElements implements the generic raw text element parsing
// algorithm defined in 12.2.6.2.
// https://html.spec.whatwg.org/multipage/parsing.html#parsing-elements-that-contain-only-text
// TODO: Since both RAWTEXT and RCDATA states are treated as tokenizer's part
// officially, need to make tokenizer consider both states.
func (p *parser) parseGenericRawTextElement() {
p.addElement()
p.originalIM = p.im
p.im = textIM
}
// generateImpliedEndTags pops nodes off the stack of open elements as long as // generateImpliedEndTags pops nodes off the stack of open elements as long as
// the top node has a tag name of dd, dt, li, optgroup, option, p, rb, rp, rt or rtc. // the top node has a tag name of dd, dt, li, optgroup, option, p, rb, rp, rt or rtc.
// If exceptions are specified, nodes with that name will not be popped off. // If exceptions are specified, nodes with that name will not be popped off.
@ -192,16 +203,17 @@ func (p *parser) generateImpliedEndTags(exceptions ...string) {
loop: loop:
for i = len(p.oe) - 1; i >= 0; i-- { for i = len(p.oe) - 1; i >= 0; i-- {
n := p.oe[i] n := p.oe[i]
if n.Type == ElementNode { if n.Type != ElementNode {
switch n.DataAtom { break
case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc: }
for _, except := range exceptions { switch n.DataAtom {
if n.Data == except { case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc:
break loop for _, except := range exceptions {
} if n.Data == except {
break loop
} }
continue
} }
continue
} }
break break
} }
@ -369,8 +381,7 @@ findIdenticalElements:
// Section 12.2.4.3. // Section 12.2.4.3.
func (p *parser) clearActiveFormattingElements() { func (p *parser) clearActiveFormattingElements() {
for { for {
n := p.afe.pop() if n := p.afe.pop(); len(p.afe) == 0 || n.Type == scopeMarkerNode {
if len(p.afe) == 0 || n.Type == scopeMarkerNode {
return return
} }
} }
@ -625,25 +636,29 @@ func inHeadIM(p *parser) bool {
switch p.tok.DataAtom { switch p.tok.DataAtom {
case a.Html: case a.Html:
return inBodyIM(p) return inBodyIM(p)
case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta: case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta:
p.addElement() p.addElement()
p.oe.pop() p.oe.pop()
p.acknowledgeSelfClosingTag() p.acknowledgeSelfClosingTag()
return true return true
case a.Noscript: case a.Noscript:
p.addElement()
if p.scripting { if p.scripting {
p.setOriginalIM() p.parseGenericRawTextElement()
p.im = textIM return true
} else {
p.im = inHeadNoscriptIM
} }
p.addElement()
p.im = inHeadNoscriptIM
// Don't let the tokenizer go into raw text mode when scripting is disabled.
p.tokenizer.NextIsNotRawText()
return true return true
case a.Script, a.Title, a.Noframes, a.Style: case a.Script, a.Title:
p.addElement() p.addElement()
p.setOriginalIM() p.setOriginalIM()
p.im = textIM p.im = textIM
return true return true
case a.Noframes, a.Style:
p.parseGenericRawTextElement()
return true
case a.Head: case a.Head:
// Ignore the token. // Ignore the token.
return true return true
@ -855,7 +870,7 @@ func inBodyIM(p *parser) bool {
return true return true
} }
copyAttributes(p.oe[0], p.tok) copyAttributes(p.oe[0], p.tok)
case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title: case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title:
return inHeadIM(p) return inHeadIM(p)
case a.Body: case a.Body:
if p.oe.contains(a.Template) { if p.oe.contains(a.Template) {
@ -881,7 +896,7 @@ func inBodyIM(p *parser) bool {
p.addElement() p.addElement()
p.im = inFramesetIM p.im = inFramesetIM
return true return true
case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul: case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Main, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul:
p.popUntil(buttonScope, a.P) p.popUntil(buttonScope, a.P)
p.addElement() p.addElement()
case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6: case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
@ -1014,53 +1029,6 @@ func inBodyIM(p *parser) bool {
p.tok.DataAtom = a.Img p.tok.DataAtom = a.Img
p.tok.Data = a.Img.String() p.tok.Data = a.Img.String()
return false return false
case a.Isindex:
if p.form != nil {
// Ignore the token.
return true
}
action := ""
prompt := "This is a searchable index. Enter search keywords: "
attr := []Attribute{{Key: "name", Val: "isindex"}}
for _, t := range p.tok.Attr {
switch t.Key {
case "action":
action = t.Val
case "name":
// Ignore the attribute.
case "prompt":
prompt = t.Val
default:
attr = append(attr, t)
}
}
p.acknowledgeSelfClosingTag()
p.popUntil(buttonScope, a.P)
p.parseImpliedToken(StartTagToken, a.Form, a.Form.String())
if p.form == nil {
// NOTE: The 'isindex' element has been removed,
// and the 'template' element has not been designed to be
// collaborative with the index element.
//
// Ignore the token.
return true
}
if action != "" {
p.form.Attr = []Attribute{{Key: "action", Val: action}}
}
p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
p.parseImpliedToken(StartTagToken, a.Label, a.Label.String())
p.addText(prompt)
p.addChild(&Node{
Type: ElementNode,
DataAtom: a.Input,
Data: a.Input.String(),
Attr: attr,
})
p.oe.pop()
p.parseImpliedToken(EndTagToken, a.Label, a.Label.String())
p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
p.parseImpliedToken(EndTagToken, a.Form, a.Form.String())
case a.Textarea: case a.Textarea:
p.addElement() p.addElement()
p.setOriginalIM() p.setOriginalIM()
@ -1070,18 +1038,21 @@ func inBodyIM(p *parser) bool {
p.popUntil(buttonScope, a.P) p.popUntil(buttonScope, a.P)
p.reconstructActiveFormattingElements() p.reconstructActiveFormattingElements()
p.framesetOK = false p.framesetOK = false
p.addElement() p.parseGenericRawTextElement()
p.setOriginalIM()
p.im = textIM
case a.Iframe: case a.Iframe:
p.framesetOK = false p.framesetOK = false
p.parseGenericRawTextElement()
case a.Noembed:
p.parseGenericRawTextElement()
case a.Noscript:
if p.scripting {
p.parseGenericRawTextElement()
return true
}
p.reconstructActiveFormattingElements()
p.addElement() p.addElement()
p.setOriginalIM() // Don't let the tokenizer go into raw text mode when scripting is disabled.
p.im = textIM p.tokenizer.NextIsNotRawText()
case a.Noembed, a.Noscript:
p.addElement()
p.setOriginalIM()
p.im = textIM
case a.Select: case a.Select:
p.reconstructActiveFormattingElements() p.reconstructActiveFormattingElements()
p.addElement() p.addElement()
@ -1137,7 +1108,7 @@ func inBodyIM(p *parser) bool {
return false return false
} }
return true return true
case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul: case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Main, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul:
p.popUntil(defaultScope, p.tok.DataAtom) p.popUntil(defaultScope, p.tok.DataAtom)
case a.Form: case a.Form:
if p.oe.contains(a.Template) { if p.oe.contains(a.Template) {
@ -1198,14 +1169,13 @@ func inBodyIM(p *parser) bool {
if len(p.templateStack) > 0 { if len(p.templateStack) > 0 {
p.im = inTemplateIM p.im = inTemplateIM
return false return false
} else { }
for _, e := range p.oe { for _, e := range p.oe {
switch e.DataAtom { switch e.DataAtom {
case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc, a.Tbody, a.Td, a.Tfoot, a.Th, case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc, a.Tbody, a.Td, a.Tfoot, a.Th,
a.Thead, a.Tr, a.Body, a.Html: a.Thead, a.Tr, a.Body, a.Html:
default: default:
return true return true
}
} }
} }
} }
@ -1221,9 +1191,15 @@ func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom, tagName string) {
// Once the code successfully parses the comprehensive test suite, we should // Once the code successfully parses the comprehensive test suite, we should
// refactor this code to be more idiomatic. // refactor this code to be more idiomatic.
// Steps 1-4. The outer loop. // Steps 1-2
if current := p.oe.top(); current.Data == tagName && p.afe.index(current) == -1 {
p.oe.pop()
return
}
// Steps 3-5. The outer loop.
for i := 0; i < 8; i++ { for i := 0; i < 8; i++ {
// Step 5. Find the formatting element. // Step 6. Find the formatting element.
var formattingElement *Node var formattingElement *Node
for j := len(p.afe) - 1; j >= 0; j-- { for j := len(p.afe) - 1; j >= 0; j-- {
if p.afe[j].Type == scopeMarkerNode { if p.afe[j].Type == scopeMarkerNode {
@ -1238,17 +1214,22 @@ func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom, tagName string) {
p.inBodyEndTagOther(tagAtom, tagName) p.inBodyEndTagOther(tagAtom, tagName)
return return
} }
// Step 7. Ignore the tag if formatting element is not in the stack of open elements.
feIndex := p.oe.index(formattingElement) feIndex := p.oe.index(formattingElement)
if feIndex == -1 { if feIndex == -1 {
p.afe.remove(formattingElement) p.afe.remove(formattingElement)
return return
} }
// Step 8. Ignore the tag if formatting element is not in the scope.
if !p.elementInScope(defaultScope, tagAtom) { if !p.elementInScope(defaultScope, tagAtom) {
// Ignore the tag. // Ignore the tag.
return return
} }
// Steps 9-10. Find the furthest block. // Step 9. This step is omitted because it's just a parse error but no need to return.
// Steps 10-11. Find the furthest block.
var furthestBlock *Node var furthestBlock *Node
for _, e := range p.oe[feIndex:] { for _, e := range p.oe[feIndex:] {
if isSpecialElement(e) { if isSpecialElement(e) {
@ -1265,47 +1246,65 @@ func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom, tagName string) {
return return
} }
// Steps 11-12. Find the common ancestor and bookmark node. // Steps 12-13. Find the common ancestor and bookmark node.
commonAncestor := p.oe[feIndex-1] commonAncestor := p.oe[feIndex-1]
bookmark := p.afe.index(formattingElement) bookmark := p.afe.index(formattingElement)
// Step 13. The inner loop. Find the lastNode to reparent. // Step 14. The inner loop. Find the lastNode to reparent.
lastNode := furthestBlock lastNode := furthestBlock
node := furthestBlock node := furthestBlock
x := p.oe.index(node) x := p.oe.index(node)
// Steps 13.1-13.2 // Step 14.1.
for j := 0; j < 3; j++ { j := 0
// Step 13.3. for {
// Step 14.2.
j++
// Step. 14.3.
x-- x--
node = p.oe[x] node = p.oe[x]
// Step 13.4 - 13.5. // Step 14.4. Go to the next step if node is formatting element.
if node == formattingElement {
break
}
// Step 14.5. Remove node from the list of active formatting elements if
// inner loop counter is greater than three and node is in the list of
// active formatting elements.
if ni := p.afe.index(node); j > 3 && ni > -1 {
p.afe.remove(node)
// If any element of the list of active formatting elements is removed,
// we need to take care whether bookmark should be decremented or not.
// This is because the value of bookmark may exceed the size of the
// list by removing elements from the list.
if ni <= bookmark {
bookmark--
}
continue
}
// Step 14.6. Continue the next inner loop if node is not in the list of
// active formatting elements.
if p.afe.index(node) == -1 { if p.afe.index(node) == -1 {
p.oe.remove(node) p.oe.remove(node)
continue continue
} }
// Step 13.6. // Step 14.7.
if node == formattingElement {
break
}
// Step 13.7.
clone := node.clone() clone := node.clone()
p.afe[p.afe.index(node)] = clone p.afe[p.afe.index(node)] = clone
p.oe[p.oe.index(node)] = clone p.oe[p.oe.index(node)] = clone
node = clone node = clone
// Step 13.8. // Step 14.8.
if lastNode == furthestBlock { if lastNode == furthestBlock {
bookmark = p.afe.index(node) + 1 bookmark = p.afe.index(node) + 1
} }
// Step 13.9. // Step 14.9.
if lastNode.Parent != nil { if lastNode.Parent != nil {
lastNode.Parent.RemoveChild(lastNode) lastNode.Parent.RemoveChild(lastNode)
} }
node.AppendChild(lastNode) node.AppendChild(lastNode)
// Step 13.10. // Step 14.10.
lastNode = node lastNode = node
} }
// Step 14. Reparent lastNode to the common ancestor, // Step 15. Reparent lastNode to the common ancestor,
// or for misnested table nodes, to the foster parent. // or for misnested table nodes, to the foster parent.
if lastNode.Parent != nil { if lastNode.Parent != nil {
lastNode.Parent.RemoveChild(lastNode) lastNode.Parent.RemoveChild(lastNode)
@ -1317,13 +1316,13 @@ func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom, tagName string) {
commonAncestor.AppendChild(lastNode) commonAncestor.AppendChild(lastNode)
} }
// Steps 15-17. Reparent nodes from the furthest block's children // Steps 16-18. Reparent nodes from the furthest block's children
// to a clone of the formatting element. // to a clone of the formatting element.
clone := formattingElement.clone() clone := formattingElement.clone()
reparentChildren(clone, furthestBlock) reparentChildren(clone, furthestBlock)
furthestBlock.AppendChild(clone) furthestBlock.AppendChild(clone)
// Step 18. Fix up the list of active formatting elements. // Step 19. Fix up the list of active formatting elements.
if oldLoc := p.afe.index(formattingElement); oldLoc != -1 && oldLoc < bookmark { if oldLoc := p.afe.index(formattingElement); oldLoc != -1 && oldLoc < bookmark {
// Move the bookmark with the rest of the list. // Move the bookmark with the rest of the list.
bookmark-- bookmark--
@ -1331,7 +1330,7 @@ func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom, tagName string) {
p.afe.remove(formattingElement) p.afe.remove(formattingElement)
p.afe.insert(bookmark, clone) p.afe.insert(bookmark, clone)
// Step 19. Fix up the stack of open elements. // Step 20. Fix up the stack of open elements.
p.oe.remove(formattingElement) p.oe.remove(formattingElement)
p.oe.insert(p.oe.index(furthestBlock)+1, clone) p.oe.insert(p.oe.index(furthestBlock)+1, clone)
} }
@ -1502,14 +1501,13 @@ func inCaptionIM(p *parser) bool {
case StartTagToken: case StartTagToken:
switch p.tok.DataAtom { switch p.tok.DataAtom {
case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Td, a.Tfoot, a.Thead, a.Tr: case a.Caption, a.Col, a.Colgroup, a.Tbody, a.Td, a.Tfoot, a.Thead, a.Tr:
if p.popUntil(tableScope, a.Caption) { if !p.popUntil(tableScope, a.Caption) {
p.clearActiveFormattingElements()
p.im = inTableIM
return false
} else {
// Ignore the token. // Ignore the token.
return true return true
} }
p.clearActiveFormattingElements()
p.im = inTableIM
return false
case a.Select: case a.Select:
p.reconstructActiveFormattingElements() p.reconstructActiveFormattingElements()
p.addElement() p.addElement()
@ -1526,14 +1524,13 @@ func inCaptionIM(p *parser) bool {
} }
return true return true
case a.Table: case a.Table:
if p.popUntil(tableScope, a.Caption) { if !p.popUntil(tableScope, a.Caption) {
p.clearActiveFormattingElements()
p.im = inTableIM
return false
} else {
// Ignore the token. // Ignore the token.
return true return true
} }
p.clearActiveFormattingElements()
p.im = inTableIM
return false
case a.Body, a.Col, a.Colgroup, a.Html, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr: case a.Body, a.Col, a.Colgroup, a.Html, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
// Ignore the token. // Ignore the token.
return true return true
@ -1777,12 +1774,11 @@ func inSelectIM(p *parser) bool {
} }
p.addElement() p.addElement()
case a.Select: case a.Select:
if p.popUntil(selectScope, a.Select) { if !p.popUntil(selectScope, a.Select) {
p.resetInsertionMode()
} else {
// Ignore the token. // Ignore the token.
return true return true
} }
p.resetInsertionMode()
case a.Input, a.Keygen, a.Textarea: case a.Input, a.Keygen, a.Textarea:
if p.elementInScope(selectScope, a.Select) { if p.elementInScope(selectScope, a.Select) {
p.parseImpliedToken(EndTagToken, a.Select, a.Select.String()) p.parseImpliedToken(EndTagToken, a.Select, a.Select.String())
@ -1810,12 +1806,11 @@ func inSelectIM(p *parser) bool {
p.oe = p.oe[:i] p.oe = p.oe[:i]
} }
case a.Select: case a.Select:
if p.popUntil(selectScope, a.Select) { if !p.popUntil(selectScope, a.Select) {
p.resetInsertionMode()
} else {
// Ignore the token. // Ignore the token.
return true return true
} }
p.resetInsertionMode()
case a.Template: case a.Template:
return inHeadIM(p) return inHeadIM(p)
} }
@ -2136,28 +2131,31 @@ func parseForeignContent(p *parser) bool {
Data: p.tok.Data, Data: p.tok.Data,
}) })
case StartTagToken: case StartTagToken:
b := breakout[p.tok.Data] if !p.fragment {
if p.tok.DataAtom == a.Font { b := breakout[p.tok.Data]
loop: if p.tok.DataAtom == a.Font {
for _, attr := range p.tok.Attr { loop:
switch attr.Key { for _, attr := range p.tok.Attr {
case "color", "face", "size": switch attr.Key {
b = true case "color", "face", "size":
break loop b = true
break loop
}
} }
} }
} if b {
if b { for i := len(p.oe) - 1; i >= 0; i-- {
for i := len(p.oe) - 1; i >= 0; i-- { n := p.oe[i]
n := p.oe[i] if n.Namespace == "" || htmlIntegrationPoint(n) || mathMLTextIntegrationPoint(n) {
if n.Namespace == "" || htmlIntegrationPoint(n) || mathMLTextIntegrationPoint(n) { p.oe = p.oe[:i+1]
p.oe = p.oe[:i+1] break
break }
} }
return false
} }
return false
} }
switch p.top().Namespace { current := p.adjustedCurrentNode()
switch current.Namespace {
case "math": case "math":
adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments) adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
case "svg": case "svg":
@ -2172,7 +2170,7 @@ func parseForeignContent(p *parser) bool {
panic("html: bad parser state: unexpected namespace") panic("html: bad parser state: unexpected namespace")
} }
adjustForeignAttributes(p.tok.Attr) adjustForeignAttributes(p.tok.Attr)
namespace := p.top().Namespace namespace := current.Namespace
p.addElement() p.addElement()
p.top().Namespace = namespace p.top().Namespace = namespace
if namespace != "" { if namespace != "" {
@ -2201,12 +2199,20 @@ func parseForeignContent(p *parser) bool {
return true return true
} }
// Section 12.2.4.2.
func (p *parser) adjustedCurrentNode() *Node {
if len(p.oe) == 1 && p.fragment && p.context != nil {
return p.context
}
return p.oe.top()
}
// Section 12.2.6. // Section 12.2.6.
func (p *parser) inForeignContent() bool { func (p *parser) inForeignContent() bool {
if len(p.oe) == 0 { if len(p.oe) == 0 {
return false return false
} }
n := p.oe[len(p.oe)-1] n := p.adjustedCurrentNode()
if n.Namespace == "" { if n.Namespace == "" {
return false return false
} }
@ -2341,8 +2347,7 @@ func ParseWithOptions(r io.Reader, opts ...ParseOption) (*Node, error) {
f(p) f(p)
} }
err := p.parse() if err := p.parse(); err != nil {
if err != nil {
return nil, err return nil, err
} }
return p.doc, nil return p.doc, nil
@ -2364,7 +2369,6 @@ func ParseFragmentWithOptions(r io.Reader, context *Node, opts ...ParseOption) (
contextTag = context.DataAtom.String() contextTag = context.DataAtom.String()
} }
p := &parser{ p := &parser{
tokenizer: NewTokenizerFragment(r, contextTag),
doc: &Node{ doc: &Node{
Type: DocumentNode, Type: DocumentNode,
}, },
@ -2372,6 +2376,11 @@ func ParseFragmentWithOptions(r io.Reader, context *Node, opts ...ParseOption) (
fragment: true, fragment: true,
context: context, context: context,
} }
if context != nil && context.Namespace != "" {
p.tokenizer = NewTokenizer(r)
} else {
p.tokenizer = NewTokenizerFragment(r, contextTag)
}
for _, f := range opts { for _, f := range opts {
f(p) f(p)
@ -2396,8 +2405,7 @@ func ParseFragmentWithOptions(r io.Reader, context *Node, opts ...ParseOption) (
} }
} }
err := p.parse() if err := p.parse(); err != nil {
if err != nil {
return nil, err return nil, err
} }

View File

@ -134,6 +134,9 @@ func render1(w writer, n *Node) error {
} }
} }
return w.WriteByte('>') return w.WriteByte('>')
case RawNode:
_, err := w.WriteString(n.Data)
return err
default: default:
return errors.New("html: unknown node type") return errors.New("html: unknown node type")
} }
@ -252,20 +255,19 @@ func writeQuoted(w writer, s string) error {
// Section 12.1.2, "Elements", gives this list of void elements. Void elements // Section 12.1.2, "Elements", gives this list of void elements. Void elements
// are those that can't have any contents. // are those that can't have any contents.
var voidElements = map[string]bool{ var voidElements = map[string]bool{
"area": true, "area": true,
"base": true, "base": true,
"br": true, "br": true,
"col": true, "col": true,
"command": true, "embed": true,
"embed": true, "hr": true,
"hr": true, "img": true,
"img": true, "input": true,
"input": true, "keygen": true,
"keygen": true, "link": true,
"link": true, "meta": true,
"meta": true, "param": true,
"param": true, "source": true,
"source": true, "track": true,
"track": true, "wbr": true,
"wbr": true,
} }

View File

@ -296,8 +296,7 @@ func (z *Tokenizer) Buffered() []byte {
// too many times in succession. // too many times in succession.
func readAtLeastOneByte(r io.Reader, b []byte) (int, error) { func readAtLeastOneByte(r io.Reader, b []byte) (int, error) {
for i := 0; i < 100; i++ { for i := 0; i < 100; i++ {
n, err := r.Read(b) if n, err := r.Read(b); n != 0 || err != nil {
if n != 0 || err != nil {
return n, err return n, err
} }
} }

181
vendor/golang.org/x/net/publicsuffix/list.go generated vendored Normal file
View File

@ -0,0 +1,181 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run gen.go
// Package publicsuffix provides a public suffix list based on data from
// https://publicsuffix.org/
//
// A public suffix is one under which Internet users can directly register
// names. It is related to, but different from, a TLD (top level domain).
//
// "com" is a TLD (top level domain). Top level means it has no dots.
//
// "com" is also a public suffix. Amazon and Google have registered different
// siblings under that domain: "amazon.com" and "google.com".
//
// "au" is another TLD, again because it has no dots. But it's not "amazon.au".
// Instead, it's "amazon.com.au".
//
// "com.au" isn't an actual TLD, because it's not at the top level (it has
// dots). But it is an eTLD (effective TLD), because that's the branching point
// for domain name registrars.
//
// Another name for "an eTLD" is "a public suffix". Often, what's more of
// interest is the eTLD+1, or one more label than the public suffix. For
// example, browsers partition read/write access to HTTP cookies according to
// the eTLD+1. Web pages served from "amazon.com.au" can't read cookies from
// "google.com.au", but web pages served from "maps.google.com" can share
// cookies from "www.google.com", so you don't have to sign into Google Maps
// separately from signing into Google Web Search. Note that all four of those
// domains have 3 labels and 2 dots. The first two domains are each an eTLD+1,
// the last two are not (but share the same eTLD+1: "google.com").
//
// All of these domains have the same eTLD+1:
// - "www.books.amazon.co.uk"
// - "books.amazon.co.uk"
// - "amazon.co.uk"
// Specifically, the eTLD+1 is "amazon.co.uk", because the eTLD is "co.uk".
//
// There is no closed form algorithm to calculate the eTLD of a domain.
// Instead, the calculation is data driven. This package provides a
// pre-compiled snapshot of Mozilla's PSL (Public Suffix List) data at
// https://publicsuffix.org/
package publicsuffix // import "golang.org/x/net/publicsuffix"
// TODO: specify case sensitivity and leading/trailing dot behavior for
// func PublicSuffix and func EffectiveTLDPlusOne.
import (
"fmt"
"net/http/cookiejar"
"strings"
)
// List implements the cookiejar.PublicSuffixList interface by calling the
// PublicSuffix function.
var List cookiejar.PublicSuffixList = list{}
type list struct{}
func (list) PublicSuffix(domain string) string {
ps, _ := PublicSuffix(domain)
return ps
}
func (list) String() string {
return version
}
// PublicSuffix returns the public suffix of the domain using a copy of the
// publicsuffix.org database compiled into the library.
//
// icann is whether the public suffix is managed by the Internet Corporation
// for Assigned Names and Numbers. If not, the public suffix is either a
// privately managed domain (and in practice, not a top level domain) or an
// unmanaged top level domain (and not explicitly mentioned in the
// publicsuffix.org list). For example, "foo.org" and "foo.co.uk" are ICANN
// domains, "foo.dyndns.org" and "foo.blogspot.co.uk" are private domains and
// "cromulent" is an unmanaged top level domain.
//
// Use cases for distinguishing ICANN domains like "foo.com" from private
// domains like "foo.appspot.com" can be found at
// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases
func PublicSuffix(domain string) (publicSuffix string, icann bool) {
lo, hi := uint32(0), uint32(numTLD)
s, suffix, icannNode, wildcard := domain, len(domain), false, false
loop:
for {
dot := strings.LastIndex(s, ".")
if wildcard {
icann = icannNode
suffix = 1 + dot
}
if lo == hi {
break
}
f := find(s[1+dot:], lo, hi)
if f == notFound {
break
}
u := nodes[f] >> (nodesBitsTextOffset + nodesBitsTextLength)
icannNode = u&(1<<nodesBitsICANN-1) != 0
u >>= nodesBitsICANN
u = children[u&(1<<nodesBitsChildren-1)]
lo = u & (1<<childrenBitsLo - 1)
u >>= childrenBitsLo
hi = u & (1<<childrenBitsHi - 1)
u >>= childrenBitsHi
switch u & (1<<childrenBitsNodeType - 1) {
case nodeTypeNormal:
suffix = 1 + dot
case nodeTypeException:
suffix = 1 + len(s)
break loop
}
u >>= childrenBitsNodeType
wildcard = u&(1<<childrenBitsWildcard-1) != 0
if !wildcard {
icann = icannNode
}
if dot == -1 {
break
}
s = s[:dot]
}
if suffix == len(domain) {
// If no rules match, the prevailing rule is "*".
return domain[1+strings.LastIndex(domain, "."):], icann
}
return domain[suffix:], icann
}
const notFound uint32 = 1<<32 - 1
// find returns the index of the node in the range [lo, hi) whose label equals
// label, or notFound if there is no such node. The range is assumed to be in
// strictly increasing node label order.
func find(label string, lo, hi uint32) uint32 {
for lo < hi {
mid := lo + (hi-lo)/2
s := nodeLabel(mid)
if s < label {
lo = mid + 1
} else if s == label {
return mid
} else {
hi = mid
}
}
return notFound
}
// nodeLabel returns the label for the i'th node.
func nodeLabel(i uint32) string {
x := nodes[i]
length := x & (1<<nodesBitsTextLength - 1)
x >>= nodesBitsTextLength
offset := x & (1<<nodesBitsTextOffset - 1)
return text[offset : offset+length]
}
// EffectiveTLDPlusOne returns the effective top level domain plus one more
// label. For example, the eTLD+1 for "foo.bar.golang.org" is "golang.org".
func EffectiveTLDPlusOne(domain string) (string, error) {
if strings.HasPrefix(domain, ".") || strings.HasSuffix(domain, ".") || strings.Contains(domain, "..") {
return "", fmt.Errorf("publicsuffix: empty label in domain %q", domain)
}
suffix, _ := PublicSuffix(domain)
if len(domain) <= len(suffix) {
return "", fmt.Errorf("publicsuffix: cannot derive eTLD+1 for domain %q", domain)
}
i := len(domain) - len(suffix) - 1
if domain[i] != '.' {
return "", fmt.Errorf("publicsuffix: invalid public suffix %q for domain %q", suffix, domain)
}
return domain[1+strings.LastIndex(domain[:i], "."):], nil
}

10002
vendor/golang.org/x/net/publicsuffix/table.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More