fix(wallet): Add collectibles migration file lost during rebase

This commit is contained in:
Ivan Belyakov 2023-08-22 13:15:24 +02:00 committed by IvanBelyakoff
parent d4a8766c27
commit 4beb769e6a
2 changed files with 90 additions and 18 deletions

View File

@ -1,6 +1,7 @@
// Code generated by go-bindata. DO NOT EDIT.
// sources:
// 1691753758_initial.up.sql (5.738kB)
// 1692701329_add_collectibles_and_collections_data_cache.up.sql (1.808kB)
// doc.go (74B)
package migrations
@ -11,6 +12,7 @@ import (
"crypto/sha256"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
@ -20,7 +22,7 @@ import (
func bindataRead(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf("read %q: %w", name, err)
return nil, fmt.Errorf("read %q: %v", name, err)
}
var buf bytes.Buffer
@ -28,7 +30,7 @@ func bindataRead(data []byte, name string) ([]byte, error) {
clErr := gz.Close()
if err != nil {
return nil, fmt.Errorf("read %q: %w", name, err)
return nil, fmt.Errorf("read %q: %v", name, err)
}
if clErr != nil {
return nil, err
@ -84,11 +86,31 @@ func _1691753758_initialUpSql() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "1691753758_initial.up.sql", size: 5738, mode: os.FileMode(0644), modTime: time.Unix(1692687412, 0)}
info := bindataFileInfo{name: "1691753758_initial.up.sql", size: 5738, mode: os.FileMode(0664), modTime: time.Unix(1692342414, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6b, 0x25, 0x31, 0xc8, 0x27, 0x3, 0x6b, 0x9f, 0x15, 0x42, 0x2f, 0x85, 0xfb, 0xe3, 0x6, 0xea, 0xf7, 0x97, 0x12, 0x56, 0x3c, 0x9a, 0x5b, 0x1a, 0xca, 0xb1, 0x23, 0xfa, 0xcd, 0x57, 0x25, 0x5c}}
return a, nil
}
var __1692701329_add_collectibles_and_collections_data_cacheUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x94\xc1\x8e\xda\x30\x10\x86\xef\x3c\xc5\x1c\x5b\x89\x37\xe8\x29\x24\x03\x8d\x1a\x99\x36\x24\xd5\xf6\x14\xcd\xda\x2e\x3b\xc2\xb1\x23\xc7\x59\x95\xb7\xaf\x80\x6e\xbb\x85\x75\xc8\xae\x84\xf6\xea\x19\xff\xb6\xe7\xfb\x7f\xa7\x25\x26\x15\x42\x95\x2c\x0a\x84\x7c\x09\x62\x5d\x01\xde\xe5\x9b\x6a\x03\xd2\x19\xa3\x65\xe0\x7b\xa3\x1b\x45\x81\x1a\x49\xf2\x41\xc3\x87\x19\x00\x80\x7c\x20\xb6\x0d\x2b\xa8\xc5\x26\x5f\x09\xcc\x60\x91\xaf\x72\x51\x1d\x05\x44\x5d\x14\xf3\x53\x9b\xb3\xc1\x93\x0c\x0d\x29\xe5\x75\xdf\xc3\xf7\xa4\x4c\x3f\x27\xe5\x59\x5b\x70\x3b\x7d\x54\x5b\x14\xeb\xc5\x59\xad\xf3\xee\x91\x95\xf6\x91\xad\x96\x5a\x1d\x29\x29\xdd\x4b\xcf\x5d\x60\x67\x23\x1d\x9d\xf6\x2d\x19\xb6\xbb\x48\x9d\x5b\xda\xea\x66\xf0\x26\x52\x27\xcb\x2d\x1d\xf4\x27\xf5\xb4\x5a\x31\x35\x61\xdf\xc5\x2e\x7c\x4f\x72\xb7\xf5\x6e\xb0\xaa\x91\xce\xb8\xd8\x93\x4f\xd3\x1a\x3c\x5f\xd4\x67\x1f\x3f\xcd\x66\x7f\x90\xd6\x22\xff\x56\x23\xe4\x22\xc3\xbb\x6b\x64\x59\x69\x1b\xf8\xe7\xbe\xd1\x36\xf8\x3d\xac\x45\x14\xfe\x13\xf7\xf9\x05\xda\xf9\x5f\x8a\xcf\x2e\x71\xcd\x57\xc1\x13\x87\xfe\x9d\x9c\x75\x3c\x7c\x8c\xc7\xa9\xe1\x91\xcc\x10\xb5\x18\xf7\x9d\xa1\xfd\x98\x48\x4b\xbf\x46\x25\x96\xeb\x12\xf3\x95\x80\x2f\xf8\x63\xda\x74\xa1\xc4\x25\x96\x28\x52\x8c\x65\x74\x9a\xce\xf1\x74\x38\xc0\xae\xbf\x66\x07\x58\x69\xb2\x49\x93\x0c\xff\xad\x67\x58\xe0\xb3\xf5\x89\x5c\x9d\xbd\xf5\x77\xf1\xf6\x2f\xa1\x37\xc3\xf6\xd5\x59\x7f\x65\xaa\x9e\xde\x1f\x0f\xd5\xd9\x84\xe2\xb4\xa6\x4f\xfc\xf6\x41\xba\x16\x96\x96\x2d\x94\x98\x14\x97\xf6\x7f\x69\x79\x9a\xeb\x5f\xf4\xfa\x7f\xd3\x1b\xdb\xfc\x16\x83\xff\x0e\x00\x00\xff\xff\xad\x71\x76\xba\x10\x07\x00\x00")
func _1692701329_add_collectibles_and_collections_data_cacheUpSqlBytes() ([]byte, error) {
return bindataRead(
__1692701329_add_collectibles_and_collections_data_cacheUpSql,
"1692701329_add_collectibles_and_collections_data_cache.up.sql",
)
}
func _1692701329_add_collectibles_and_collections_data_cacheUpSql() (*asset, error) {
bytes, err := _1692701329_add_collectibles_and_collections_data_cacheUpSqlBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "1692701329_add_collectibles_and_collections_data_cache.up.sql", size: 1808, mode: os.FileMode(0664), modTime: time.Unix(1692702851, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1, 0x51, 0xf4, 0x2b, 0x92, 0xde, 0x59, 0x65, 0xd8, 0x9b, 0x57, 0xe0, 0xfd, 0x7b, 0x12, 0xb, 0x29, 0x6e, 0x9d, 0xb5, 0x90, 0xe, 0xfa, 0x12, 0x97, 0xd, 0x61, 0x60, 0x7f, 0x32, 0x1d, 0xc3}}
return a, nil
}
var _docGo = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x2c\xc9\xb1\x0d\xc4\x20\x0c\x05\xd0\x9e\x29\xfe\x02\xd8\xfd\x6d\xe3\x4b\xac\x2f\x44\x82\x09\x78\x7f\xa5\x49\xfd\xa6\x1d\xdd\xe8\xd8\xcf\x55\x8a\x2a\xe3\x47\x1f\xbe\x2c\x1d\x8c\xfa\x6f\xe3\xb4\x34\xd4\xd9\x89\xbb\x71\x59\xb6\x18\x1b\x35\x20\xa2\x9f\x0a\x03\xa2\xe5\x0d\x00\x00\xff\xff\x60\xcd\x06\xbe\x4a\x00\x00\x00")
func docGoBytes() ([]byte, error) {
@ -104,7 +126,7 @@ func docGo() (*asset, error) {
return nil, err
}
info := bindataFileInfo{name: "doc.go", size: 74, mode: os.FileMode(0644), modTime: time.Unix(1692687412, 0)}
info := bindataFileInfo{name: "doc.go", size: 74, mode: os.FileMode(0664), modTime: time.Unix(1692342414, 0)}
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xde, 0x7c, 0x28, 0xcd, 0x47, 0xf2, 0xfa, 0x7c, 0x51, 0x2d, 0xd8, 0x38, 0xb, 0xb0, 0x34, 0x9d, 0x4c, 0x62, 0xa, 0x9e, 0x28, 0xc3, 0x31, 0x23, 0xd9, 0xbb, 0x89, 0x9f, 0xa0, 0x89, 0x1f, 0xe8}}
return a, nil
}
@ -201,23 +223,21 @@ func AssetNames() []string {
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"1691753758_initial.up.sql": _1691753758_initialUpSql,
"doc.go": docGo,
}
// AssetDebug is true if the assets were built with the debug flag enabled.
const AssetDebug = false
"1692701329_add_collectibles_and_collections_data_cache.up.sql": _1692701329_add_collectibles_and_collections_data_cacheUpSql,
"doc.go": docGo,
}
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
//
// data/
// foo.txt
// img/
// a.png
// b.png
//
// data/
// foo.txt
// img/
// a.png
// b.png
// then AssetDir("data") would return []string{"foo.txt", "img"},
// AssetDir("data/img") would return []string{"a.png", "b.png"},
// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
@ -250,8 +270,9 @@ type bintree struct {
}
var _bintree = &bintree{nil, map[string]*bintree{
"1691753758_initial.up.sql": {_1691753758_initialUpSql, map[string]*bintree{}},
"doc.go": {docGo, map[string]*bintree{}},
"1691753758_initial.up.sql": &bintree{_1691753758_initialUpSql, map[string]*bintree{}},
"1692701329_add_collectibles_and_collections_data_cache.up.sql": &bintree{_1692701329_add_collectibles_and_collections_data_cacheUpSql, map[string]*bintree{}},
"doc.go": &bintree{docGo, map[string]*bintree{}},
}}
// RestoreAsset restores an asset under the given directory.
@ -268,7 +289,7 @@ func RestoreAsset(dir, name string) error {
if err != nil {
return err
}
err = os.WriteFile(_filePath(dir, name), data, info.Mode())
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}

View File

@ -0,0 +1,51 @@
CREATE TABLE IF NOT EXISTS collectible_data_cache (
chain_id UNSIGNED BIGINT NOT NULL,
contract_address VARCHAR NOT NULL,
token_id BLOB NOT NULL,
provider VARCHAR NOT NULL,
name VARCHAR NOT NULL,
description VARCHAR NOT NULL,
permalink VARCHAR NOT NULL,
image_url VARCHAR NOT NULL,
animation_url VARCHAR NOT NULL,
animation_media_type VARCHAR NOT NULL,
background_color VARCHAR NOT NULL,
token_uri VARCHAR NOT NULL
);
CREATE UNIQUE INDEX IF NOT EXISTS collectible_data_identify_entry ON collectible_data_cache (chain_id, contract_address, token_id);
CREATE TABLE IF NOT EXISTS collectible_traits_cache (
chain_id UNSIGNED BIGINT NOT NULL,
contract_address VARCHAR NOT NULL,
token_id BLOB NOT NULL,
trait_type VARCHAR NOT NULL,
trait_value VARCHAR NOT NULL,
display_type VARCHAR NOT NULL,
max_value VARCHAR NOT NULL,
FOREIGN KEY(chain_id, contract_address, token_id) REFERENCES collectible_data_cache(chain_id, contract_address, token_id)
ON UPDATE CASCADE
ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS collection_data_cache (
chain_id UNSIGNED BIGINT NOT NULL,
contract_address VARCHAR NOT NULL,
provider VARCHAR NOT NULL,
name VARCHAR NOT NULL,
slug VARCHAR NOT NULL,
image_url VARCHAR NOT NULL
);
CREATE UNIQUE INDEX IF NOT EXISTS collection_data_identify_entry ON collection_data_cache (chain_id, contract_address);
CREATE TABLE IF NOT EXISTS collection_traits_cache (
chain_id UNSIGNED BIGINT NOT NULL,
contract_address VARCHAR NOT NULL,
trait_type VARCHAR NOT NULL,
min REAL NOT NULL,
max REAL NOT NULL,
FOREIGN KEY(chain_id, contract_address) REFERENCES collection_data_cache(chain_id, contract_address)
ON UPDATE CASCADE
ON DELETE CASCADE
);