mirror of
https://github.com/logos-messaging/telemetry.git
synced 2026-01-02 14:13:05 +00:00
Merge pull request #16 from waku-org/feat/batched-metrics
feat: add endpoint for batching different metrics in single request
This commit is contained in:
commit
8fe36168a5
@ -42,6 +42,11 @@ func dropTables(db *sql.DB) {
|
||||
log.Fatalf("an error '%s' was not expected when dropping the table", err)
|
||||
}
|
||||
|
||||
_, err = db.Exec("DROP TABLE IF EXISTS sentEnvelopes")
|
||||
if err != nil {
|
||||
log.Fatalf("an error '%s' was not expected when dropping the table", err)
|
||||
}
|
||||
|
||||
_, err = db.Exec("DROP TABLE IF EXISTS protocolStatsRate")
|
||||
if err != nil {
|
||||
log.Fatalf("an error '%s' was not expected when dropping the table", err)
|
||||
|
||||
@ -1,18 +1,21 @@
|
||||
// Code generated for package telemetry by go-bindata DO NOT EDIT. (@generated)
|
||||
// Code generated by go-bindata. DO NOT EDIT.
|
||||
// sources:
|
||||
// 000001_message_type.up.sql
|
||||
// 000002_bandwidth_protocol.up.sql
|
||||
// 000003_index_truncate.up.sql
|
||||
// 000004_envelope.table.up.sql
|
||||
// doc.go
|
||||
// 000001_message_type.up.sql (66B)
|
||||
// 000002_bandwidth_protocol.up.sql (719B)
|
||||
// 000003_index_truncate.up.sql (598B)
|
||||
// 000004_envelope.table.up.sql (531B)
|
||||
// 000005_pushed_envelope.up.sql (574B)
|
||||
// 000006_status_version.up.sql (198B)
|
||||
// doc.go (73B)
|
||||
|
||||
package telemetry
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@ -22,7 +25,7 @@ import (
|
||||
func bindataRead(data []byte, name string) ([]byte, error) {
|
||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Read %q: %v", name, err)
|
||||
return nil, fmt.Errorf("read %q: %w", name, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
@ -30,7 +33,7 @@ func bindataRead(data []byte, name string) ([]byte, error) {
|
||||
clErr := gz.Close()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Read %q: %v", name, err)
|
||||
return nil, fmt.Errorf("read %q: %w", name, err)
|
||||
}
|
||||
if clErr != nil {
|
||||
return nil, err
|
||||
@ -40,8 +43,9 @@ func bindataRead(data []byte, name string) ([]byte, error) {
|
||||
}
|
||||
|
||||
type asset struct {
|
||||
bytes []byte
|
||||
info os.FileInfo
|
||||
bytes []byte
|
||||
info os.FileInfo
|
||||
digest [sha256.Size]byte
|
||||
}
|
||||
|
||||
type bindataFileInfo struct {
|
||||
@ -51,32 +55,21 @@ type bindataFileInfo struct {
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
// Name return file name
|
||||
func (fi bindataFileInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
|
||||
// Size return file size
|
||||
func (fi bindataFileInfo) Size() int64 {
|
||||
return fi.size
|
||||
}
|
||||
|
||||
// Mode return file mode
|
||||
func (fi bindataFileInfo) Mode() os.FileMode {
|
||||
return fi.mode
|
||||
}
|
||||
|
||||
// Mode return file modify time
|
||||
func (fi bindataFileInfo) ModTime() time.Time {
|
||||
return fi.modTime
|
||||
}
|
||||
|
||||
// IsDir return file whether a directory
|
||||
func (fi bindataFileInfo) IsDir() bool {
|
||||
return fi.mode&os.ModeDir != 0
|
||||
return false
|
||||
}
|
||||
|
||||
// Sys return file is sys mode
|
||||
func (fi bindataFileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
@ -96,8 +89,8 @@ func _000001_message_typeUpSql() (*asset, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "000001_message_type.up.sql", size: 66, mode: os.FileMode(436), modTime: time.Unix(1715855770, 0)}
|
||||
a := &asset{bytes: bytes, info: info}
|
||||
info := bindataFileInfo{name: "000001_message_type.up.sql", size: 66, mode: os.FileMode(0644), modTime: time.Unix(1716427081, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe2, 0x43, 0xcc, 0xef, 0xad, 0x5f, 0x44, 0x58, 0x8d, 0x47, 0x70, 0x5d, 0x23, 0x30, 0xe2, 0x1f, 0xdb, 0x4d, 0xad, 0x6e, 0xd9, 0xe7, 0x50, 0x19, 0x43, 0x1c, 0x37, 0x57, 0xea, 0xc6, 0x57, 0xab}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
@ -116,8 +109,8 @@ func _000002_bandwidth_protocolUpSql() (*asset, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "000002_bandwidth_protocol.up.sql", size: 719, mode: os.FileMode(436), modTime: time.Unix(1715855770, 0)}
|
||||
a := &asset{bytes: bytes, info: info}
|
||||
info := bindataFileInfo{name: "000002_bandwidth_protocol.up.sql", size: 719, mode: os.FileMode(0644), modTime: time.Unix(1716427081, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xfe, 0x83, 0x69, 0xab, 0x3e, 0xf5, 0x8d, 0x44, 0xb2, 0x6e, 0x52, 0x8d, 0x27, 0xe8, 0x95, 0x28, 0x3c, 0xea, 0x29, 0x93, 0x6d, 0xa3, 0x10, 0xde, 0x9b, 0xc8, 0xa6, 0xb9, 0x80, 0xa1, 0x3, 0x6f}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
@ -136,8 +129,8 @@ func _000003_index_truncateUpSql() (*asset, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "000003_index_truncate.up.sql", size: 598, mode: os.FileMode(436), modTime: time.Unix(1716989343, 0)}
|
||||
a := &asset{bytes: bytes, info: info}
|
||||
info := bindataFileInfo{name: "000003_index_truncate.up.sql", size: 598, mode: os.FileMode(0644), modTime: time.Unix(1716427081, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xcf, 0x8, 0x4, 0x47, 0xc8, 0x65, 0x38, 0x79, 0x3e, 0x37, 0xec, 0x4e, 0x1a, 0x24, 0x50, 0x3c, 0x1c, 0x75, 0xe8, 0x3b, 0x2, 0x62, 0x2, 0x52, 0x50, 0xff, 0x4a, 0x8f, 0x9d, 0x71, 0x79, 0xf6}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
@ -156,8 +149,48 @@ func _000004_envelopeTableUpSql() (*asset, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "000004_envelope.table.up.sql", size: 531, mode: os.FileMode(436), modTime: time.Unix(1715855770, 0)}
|
||||
a := &asset{bytes: bytes, info: info}
|
||||
info := bindataFileInfo{name: "000004_envelope.table.up.sql", size: 531, mode: os.FileMode(0644), modTime: time.Unix(1716524216, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x32, 0xee, 0x49, 0xa0, 0x48, 0x2b, 0x8b, 0xe8, 0xd3, 0x6a, 0xae, 0x7f, 0x62, 0x65, 0x8a, 0x45, 0xbb, 0x8a, 0xee, 0xcd, 0x13, 0xde, 0xd6, 0x33, 0xe2, 0x3f, 0x32, 0xff, 0xfe, 0xf4, 0xda, 0xe7}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var __000005_pushed_envelopeUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x91\xcf\x6a\xf2\x40\x14\xc5\xf7\x79\x8a\xbb\x08\x24\x81\xac\x3e\x70\x95\xd5\x18\xef\xa7\x83\x71\x12\x26\x63\xab\x2b\xc9\x9f\x8b\x49\x89\x49\xea\xcc\x14\x7c\xfb\xa2\xa1\xa5\x16\xaa\xab\x81\x39\x87\xdf\x3d\x9c\xb3\x48\xc1\x75\x61\x8e\x4b\x2e\x1c\x00\x80\x58\x22\x53\x08\x6a\x9f\x21\x8c\xb6\xec\x5a\xdd\x1c\x4e\x64\x9a\xa1\x06\x96\x03\x8a\xed\x06\x7c\x2f\x69\x8f\x8d\xc9\xac\x6e\xbc\x10\x3c\x49\x5d\x71\xf1\x82\xc8\xc1\x5d\x8c\x99\xe2\xe9\x04\x7a\x5d\xa1\x80\xda\x8e\x5d\x5b\x15\x86\x0e\x43\xf9\x46\x95\x01\x75\xfd\xed\x6d\xd7\x45\x0e\x8a\x05\xb8\x6e\xe4\x38\x5f\x27\xd9\x3c\x41\xe0\xff\x41\xa4\x0a\x70\xc7\x73\x95\x83\xa6\xde\x60\xff\x41\xdd\x30\x92\x06\xff\x06\x6e\x6b\xc8\x51\x72\x96\x40\x26\xf9\x86\xc9\x3d\xac\x71\x1f\xde\xa4\x13\x69\x5d\x1c\x69\x55\xe8\x06\x5e\x98\x8c\x57\x4c\xfa\xff\x66\xb3\xe0\x86\x14\xdb\x24\x99\x6c\x57\x2a\x33\xc0\x85\xc2\x25\xca\x5f\x62\x75\xa6\xc2\x50\xfd\xa7\x6e\x86\xb1\xad\x1e\xd1\x47\x5b\xe6\xb6\x54\xcf\x6c\x9a\xfa\x9a\xce\x6b\xba\x6c\xf9\xe2\x91\xaf\x1f\x6a\x12\xc5\x89\x9e\x9c\xbc\x0e\xb5\x99\x76\xba\x9f\x6d\x72\xc4\xa9\xc8\x95\x64\x5c\xa8\xfb\x4e\x0f\xb6\x6f\xdf\x2d\xc1\xf4\xf8\x53\x33\xe1\xcf\x22\xc3\xbb\xa4\xe1\x77\x9e\xc0\x09\x22\xe7\x33\x00\x00\xff\xff\x3d\x18\x50\x60\x3e\x02\x00\x00")
|
||||
|
||||
func _000005_pushed_envelopeUpSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__000005_pushed_envelopeUpSql,
|
||||
"000005_pushed_envelope.up.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _000005_pushed_envelopeUpSql() (*asset, error) {
|
||||
bytes, err := _000005_pushed_envelopeUpSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "000005_pushed_envelope.up.sql", size: 574, mode: os.FileMode(0644), modTime: time.Unix(1717560336, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7d, 0xaf, 0x8a, 0xcb, 0x97, 0x1e, 0xc6, 0xf6, 0x86, 0xe4, 0x1b, 0x67, 0x10, 0x87, 0x8e, 0x80, 0x1d, 0x5a, 0x7d, 0x64, 0xd0, 0x89, 0x3f, 0x1e, 0x6f, 0x93, 0x87, 0x4a, 0xd7, 0x87, 0xb8, 0x5e}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var __000006_status_versionUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\x4a\x4d\x4e\xcd\x2c\x4b\x4d\xf1\x4d\x2d\x2e\x4e\x4c\x4f\x2d\x56\x70\x74\x71\x51\x70\xf6\xf7\x09\xf5\xf5\x53\x28\x2e\x49\x2c\x29\x2d\x0e\x4b\x2d\x2a\xce\xcc\xcf\x53\x08\x73\x0c\x72\xf6\x70\x0c\xd2\x30\x36\xd4\xb4\xe6\xc2\x66\x84\x6b\x5e\x59\x6a\x4e\x7e\x01\x59\x66\x14\xa7\xe6\x95\x90\xa8\x1f\x10\x00\x00\xff\xff\xeb\x4e\x39\x66\xc6\x00\x00\x00")
|
||||
|
||||
func _000006_status_versionUpSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__000006_status_versionUpSql,
|
||||
"000006_status_version.up.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _000006_status_versionUpSql() (*asset, error) {
|
||||
bytes, err := _000006_status_versionUpSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "000006_status_version.up.sql", size: 198, mode: os.FileMode(0644), modTime: time.Unix(1717560330, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x2b, 0x11, 0xee, 0x9f, 0x4f, 0xf5, 0x0, 0x9a, 0x98, 0xe9, 0x44, 0x21, 0x2e, 0x57, 0xf7, 0xae, 0xf3, 0xb2, 0x3d, 0x94, 0x40, 0x69, 0xa7, 0x1d, 0x62, 0x57, 0x31, 0x9f, 0x60, 0x6, 0xed, 0x80}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
@ -176,8 +209,8 @@ func docGo() (*asset, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "doc.go", size: 73, mode: os.FileMode(436), modTime: time.Unix(1715855770, 0)}
|
||||
a := &asset{bytes: bytes, info: info}
|
||||
info := bindataFileInfo{name: "doc.go", size: 73, mode: os.FileMode(0644), modTime: time.Unix(1716427081, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xae, 0x4f, 0xb8, 0x11, 0x84, 0x79, 0xbb, 0x6c, 0xf, 0xed, 0xc, 0xfc, 0x18, 0x32, 0x9d, 0xf1, 0x7, 0x2c, 0x20, 0xde, 0xe9, 0x97, 0x0, 0x62, 0x9f, 0x5e, 0x24, 0xfc, 0x8e, 0xc2, 0xd9, 0x2d}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
@ -185,8 +218,8 @@ func docGo() (*asset, error) {
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func Asset(name string) ([]byte, error) {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[cannonicalName]; ok {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
|
||||
@ -196,6 +229,12 @@ func Asset(name string) ([]byte, error) {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
|
||||
// AssetString returns the asset contents as a string (instead of a []byte).
|
||||
func AssetString(name string) (string, error) {
|
||||
data, err := Asset(name)
|
||||
return string(data), err
|
||||
}
|
||||
|
||||
// MustAsset is like Asset but panics when Asset would return an error.
|
||||
// It simplifies safe initialization of global variables.
|
||||
func MustAsset(name string) []byte {
|
||||
@ -207,12 +246,18 @@ func MustAsset(name string) []byte {
|
||||
return a
|
||||
}
|
||||
|
||||
// MustAssetString is like AssetString but panics when Asset would return an
|
||||
// error. It simplifies safe initialization of global variables.
|
||||
func MustAssetString(name string) string {
|
||||
return string(MustAsset(name))
|
||||
}
|
||||
|
||||
// AssetInfo loads and returns the asset info for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func AssetInfo(name string) (os.FileInfo, error) {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[cannonicalName]; ok {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
|
||||
@ -222,6 +267,33 @@ func AssetInfo(name string) (os.FileInfo, error) {
|
||||
return nil, fmt.Errorf("AssetInfo %s not found", name)
|
||||
}
|
||||
|
||||
// AssetDigest returns the digest of the file with the given name. It returns an
|
||||
// error if the asset could not be found or the digest could not be loaded.
|
||||
func AssetDigest(name string) ([sha256.Size]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.digest, nil
|
||||
}
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name)
|
||||
}
|
||||
|
||||
// Digests returns a map of all known files and their checksums.
|
||||
func Digests() (map[string][sha256.Size]byte, error) {
|
||||
mp := make(map[string][sha256.Size]byte, len(_bindata))
|
||||
for name := range _bindata {
|
||||
a, err := _bindata[name]()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mp[name] = a.digest
|
||||
}
|
||||
return mp, nil
|
||||
}
|
||||
|
||||
// AssetNames returns the names of the assets.
|
||||
func AssetNames() []string {
|
||||
names := make([]string, 0, len(_bindata))
|
||||
@ -237,27 +309,34 @@ var _bindata = map[string]func() (*asset, error){
|
||||
"000002_bandwidth_protocol.up.sql": _000002_bandwidth_protocolUpSql,
|
||||
"000003_index_truncate.up.sql": _000003_index_truncateUpSql,
|
||||
"000004_envelope.table.up.sql": _000004_envelopeTableUpSql,
|
||||
"000005_pushed_envelope.up.sql": _000005_pushed_envelopeUpSql,
|
||||
"000006_status_version.up.sql": _000006_status_versionUpSql,
|
||||
"doc.go": docGo,
|
||||
}
|
||||
|
||||
// AssetDebug is true if the assets were built with the debug flag enabled.
|
||||
const AssetDebug = false
|
||||
|
||||
// AssetDir returns the file names below a certain
|
||||
// directory embedded in the file by go-bindata.
|
||||
// For example if you run go-bindata on data/... and data contains the
|
||||
// following hierarchy:
|
||||
// data/
|
||||
// foo.txt
|
||||
// img/
|
||||
// a.png
|
||||
// b.png
|
||||
// then AssetDir("data") would return []string{"foo.txt", "img"}
|
||||
// AssetDir("data/img") would return []string{"a.png", "b.png"}
|
||||
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
|
||||
//
|
||||
// data/
|
||||
// foo.txt
|
||||
// img/
|
||||
// a.png
|
||||
// b.png
|
||||
//
|
||||
// then AssetDir("data") would return []string{"foo.txt", "img"},
|
||||
// AssetDir("data/img") would return []string{"a.png", "b.png"},
|
||||
// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
|
||||
// AssetDir("") will return []string{"data"}.
|
||||
func AssetDir(name string) ([]string, error) {
|
||||
node := _bintree
|
||||
if len(name) != 0 {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
pathList := strings.Split(cannonicalName, "/")
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
pathList := strings.Split(canonicalName, "/")
|
||||
for _, p := range pathList {
|
||||
node = node.Children[p]
|
||||
if node == nil {
|
||||
@ -281,14 +360,16 @@ type bintree struct {
|
||||
}
|
||||
|
||||
var _bintree = &bintree{nil, map[string]*bintree{
|
||||
"000001_message_type.up.sql": &bintree{_000001_message_typeUpSql, map[string]*bintree{}},
|
||||
"000002_bandwidth_protocol.up.sql": &bintree{_000002_bandwidth_protocolUpSql, map[string]*bintree{}},
|
||||
"000003_index_truncate.up.sql": &bintree{_000003_index_truncateUpSql, map[string]*bintree{}},
|
||||
"000004_envelope.table.up.sql": &bintree{_000004_envelopeTableUpSql, map[string]*bintree{}},
|
||||
"doc.go": &bintree{docGo, map[string]*bintree{}},
|
||||
"000001_message_type.up.sql": {_000001_message_typeUpSql, map[string]*bintree{}},
|
||||
"000002_bandwidth_protocol.up.sql": {_000002_bandwidth_protocolUpSql, map[string]*bintree{}},
|
||||
"000003_index_truncate.up.sql": {_000003_index_truncateUpSql, map[string]*bintree{}},
|
||||
"000004_envelope.table.up.sql": {_000004_envelopeTableUpSql, map[string]*bintree{}},
|
||||
"000005_pushed_envelope.up.sql": {_000005_pushed_envelopeUpSql, map[string]*bintree{}},
|
||||
"000006_status_version.up.sql": {_000006_status_versionUpSql, map[string]*bintree{}},
|
||||
"doc.go": {docGo, map[string]*bintree{}},
|
||||
}}
|
||||
|
||||
// RestoreAsset restores an asset under the given directory
|
||||
// RestoreAsset restores an asset under the given directory.
|
||||
func RestoreAsset(dir, name string) error {
|
||||
data, err := Asset(name)
|
||||
if err != nil {
|
||||
@ -302,18 +383,14 @@ func RestoreAsset(dir, name string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
|
||||
err = os.WriteFile(_filePath(dir, name), data, info.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
|
||||
}
|
||||
|
||||
// RestoreAssets restores an asset under the given directory recursively
|
||||
// RestoreAssets restores an asset under the given directory recursively.
|
||||
func RestoreAssets(dir, name string) error {
|
||||
children, err := AssetDir(name)
|
||||
// File
|
||||
@ -331,6 +408,6 @@ func RestoreAssets(dir, name string) error {
|
||||
}
|
||||
|
||||
func _filePath(dir, name string) string {
|
||||
cannonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...)
|
||||
}
|
||||
|
||||
@ -72,9 +72,7 @@ func createTables(db *sql.DB) error {
|
||||
sentAt INTEGER NOT NULL,
|
||||
topic VARCHAR(255) NOT NULL,
|
||||
createdAt INTEGER NOT NULL,
|
||||
|
||||
constraint receivedMessages_unique unique(chatId, messageHash, receiverKeyUID, nodeName)
|
||||
|
||||
);`
|
||||
_, err := db.Exec(sqlStmt)
|
||||
|
||||
|
||||
@ -16,13 +16,14 @@ type ReceivedEnvelope struct {
|
||||
ReceiverKeyUID string `json:"receiverKeyUID"`
|
||||
NodeName string `json:"nodeName"`
|
||||
ProcessingError string `json:"processingError"`
|
||||
StatusVersion string `json:"statusVersion"`
|
||||
}
|
||||
|
||||
func (r *ReceivedEnvelope) put(db *sql.DB) error {
|
||||
r.CreatedAt = time.Now().Unix()
|
||||
stmt, err := db.Prepare(`INSERT INTO receivedEnvelopes (messageHash, sentAt, createdAt, pubsubTopic,
|
||||
topic, receiverKeyUID, nodeName, processingError)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
||||
topic, receiverKeyUID, nodeName, processingError, statusVersion)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
||||
ON CONFLICT ON CONSTRAINT receivedEnvelopes_unique DO NOTHING
|
||||
RETURNING id;`)
|
||||
if err != nil {
|
||||
@ -30,7 +31,7 @@ func (r *ReceivedEnvelope) put(db *sql.DB) error {
|
||||
}
|
||||
|
||||
lastInsertId := 0
|
||||
err = stmt.QueryRow(r.MessageHash, r.SentAt, r.CreatedAt, r.PubsubTopic, r.Topic, r.ReceiverKeyUID, r.NodeName, r.ProcessingError).Scan(&lastInsertId)
|
||||
err = stmt.QueryRow(r.MessageHash, r.SentAt, r.CreatedAt, r.PubsubTopic, r.Topic, r.ReceiverKeyUID, r.NodeName, r.ProcessingError, r.StatusVersion).Scan(&lastInsertId)
|
||||
if err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil
|
||||
@ -60,3 +61,45 @@ func (r *ReceivedEnvelope) updateProcessingError(db *sql.DB) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type SentEnvelope struct {
|
||||
ID int `json:"id"`
|
||||
MessageHash string `json:"messageHash"`
|
||||
SentAt int64 `json:"sentAt"`
|
||||
CreatedAt int64 `json:"createdAt"`
|
||||
PubsubTopic string `json:"pubsubTopic"`
|
||||
Topic string `json:"topic"`
|
||||
SenderKeyUID string `json:"senderKeyUID"`
|
||||
NodeName string `json:"nodeName"`
|
||||
ProcessingError string `json:"processingError"`
|
||||
PublishMethod string `json:"publishMethod"`
|
||||
StatusVersion string `json:"statusVersion"`
|
||||
}
|
||||
|
||||
func (r *SentEnvelope) put(db *sql.DB) error {
|
||||
r.CreatedAt = time.Now().Unix()
|
||||
stmt, err := db.Prepare(`INSERT INTO sentEnvelopes (messageHash, sentAt, createdAt, pubsubTopic,
|
||||
topic, senderKeyUID, nodeName, publishMethod, statusVersion)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
||||
ON CONFLICT ON CONSTRAINT sentEnvelopes_unique DO NOTHING
|
||||
RETURNING id;`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lastInsertId := int64(0)
|
||||
res, err := stmt.Exec(r.MessageHash, r.SentAt, r.CreatedAt, r.PubsubTopic, r.Topic, r.SenderKeyUID, r.NodeName, r.PublishMethod, r.StatusVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lastInsertId, err = res.LastInsertId()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer stmt.Close()
|
||||
r.ID = int(lastInsertId)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -27,6 +27,7 @@ type ReceivedMessage struct {
|
||||
Topic string `json:"topic"`
|
||||
PubsubTopic string `json:"pubsubTopic"`
|
||||
CreatedAt int64 `json:"createdAt"`
|
||||
StatusVersion string `json:"statusVersion"`
|
||||
}
|
||||
|
||||
func queryReceivedMessagesBetween(db *sql.DB, startsAt time.Time, endsAt time.Time) ([]*ReceivedMessage, error) {
|
||||
@ -88,14 +89,14 @@ func didReceivedMessageBeforeAndAfterInChat(db *sql.DB, receiverPublicKey string
|
||||
}
|
||||
|
||||
func (r *ReceivedMessage) put(db *sql.DB) error {
|
||||
stmt, err := db.Prepare("INSERT INTO receivedMessages (chatId, messageHash, messageId, receiverKeyUID, nodeName, sentAt, topic, messageType, messageSize, createdAt, pubSubTopic) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) RETURNING id;")
|
||||
stmt, err := db.Prepare("INSERT INTO receivedMessages (chatId, messageHash, messageId, receiverKeyUID, nodeName, sentAt, topic, messageType, messageSize, createdAt, pubSubTopic, statusVersion) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) RETURNING id;")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.CreatedAt = time.Now().Unix()
|
||||
lastInsertId := 0
|
||||
err = stmt.QueryRow(r.ChatID, r.MessageHash, r.MessageID, r.ReceiverKeyUID, r.NodeName, r.SentAt, r.Topic, r.MessageType, r.MessageSize, r.CreatedAt, r.PubsubTopic).Scan(&lastInsertId)
|
||||
err = stmt.QueryRow(r.ChatID, r.MessageHash, r.MessageID, r.ReceiverKeyUID, r.NodeName, r.SentAt, r.Topic, r.MessageType, r.MessageSize, r.CreatedAt, r.PubsubTopic, r.StatusVersion).Scan(&lastInsertId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -42,8 +42,10 @@ func NewServer(db *sql.DB, logger *zap.Logger) *Server {
|
||||
server.Router.HandleFunc("/protocol-stats", server.createProtocolStats).Methods("POST")
|
||||
server.Router.HandleFunc("/received-messages", server.createReceivedMessages).Methods("POST")
|
||||
server.Router.HandleFunc("/received-envelope", server.createReceivedEnvelope).Methods("POST")
|
||||
server.Router.HandleFunc("/sent-envelope", server.createSentEnvelope).Methods("POST")
|
||||
server.Router.HandleFunc("/update-envelope", server.updateEnvelope).Methods("POST")
|
||||
server.Router.HandleFunc("/health", handleHealthCheck).Methods("GET")
|
||||
server.Router.HandleFunc("/record-metrics", server.createTelemetryData).Methods("POST")
|
||||
server.Router.Use(server.rateLimit)
|
||||
|
||||
return server
|
||||
@ -54,6 +56,88 @@ func handleHealthCheck(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintf(w, "OK")
|
||||
}
|
||||
|
||||
type TelemetryType string
|
||||
|
||||
const (
|
||||
ProtocolStatsMetric TelemetryType = "ProtocolStats"
|
||||
ReceivedEnvelopeMetric TelemetryType = "ReceivedEnvelope"
|
||||
SentEnvelopeMetric TelemetryType = "SentEnvelope"
|
||||
UpdateEnvelopeMetric TelemetryType = "UpdateEnvelope"
|
||||
ReceivedMessagesMetric TelemetryType = "ReceivedMessages"
|
||||
)
|
||||
|
||||
type TelemetryRequest struct {
|
||||
Id int `json:"id"`
|
||||
TelemetryType TelemetryType `json:"telemetry_type"`
|
||||
TelemetryData *json.RawMessage `json:"telemetry_data"`
|
||||
}
|
||||
|
||||
func (s *Server) createTelemetryData(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
var telemetryData []TelemetryRequest
|
||||
decoder := json.NewDecoder(r.Body)
|
||||
if err := decoder.Decode(&telemetryData); err != nil {
|
||||
log.Println(err)
|
||||
http.Error(w, "Failed to decode telemetry data", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var errorDetails []map[string]interface{}
|
||||
|
||||
for _, data := range telemetryData {
|
||||
switch data.TelemetryType {
|
||||
case ProtocolStatsMetric:
|
||||
var stats ProtocolStats
|
||||
if err := json.Unmarshal(*data.TelemetryData, &stats); err != nil {
|
||||
errorDetails = append(errorDetails, map[string]interface{}{"Id": data.Id, "Error": fmt.Sprintf("Error decoding protocol stats: %v", err)})
|
||||
continue
|
||||
}
|
||||
if err := stats.put(s.DB); err != nil {
|
||||
errorDetails = append(errorDetails, map[string]interface{}{"Id": data.Id, "Error": fmt.Sprintf("Error saving protocol stats: %v", err)})
|
||||
continue
|
||||
}
|
||||
case ReceivedEnvelopeMetric:
|
||||
var envelope ReceivedEnvelope
|
||||
if err := json.Unmarshal(*data.TelemetryData, &envelope); err != nil {
|
||||
errorDetails = append(errorDetails, map[string]interface{}{"Id": data.Id, "Error": fmt.Sprintf("Error decoding received envelope: %v", err)})
|
||||
continue
|
||||
}
|
||||
if err := envelope.put(s.DB); err != nil {
|
||||
errorDetails = append(errorDetails, map[string]interface{}{"Id": data.Id, "Error": fmt.Sprintf("Error saving received envelope: %v", err)})
|
||||
continue
|
||||
}
|
||||
case SentEnvelopeMetric:
|
||||
var envelope SentEnvelope
|
||||
if err := json.Unmarshal(*data.TelemetryData, &envelope); err != nil {
|
||||
errorDetails = append(errorDetails, map[string]interface{}{"Id": data.Id, "Error": fmt.Sprintf("Error decoding sent envelope: %v", err)})
|
||||
continue
|
||||
}
|
||||
if err := envelope.put(s.DB); err != nil {
|
||||
errorDetails = append(errorDetails, map[string]interface{}{"Id": data.Id, "Error": fmt.Sprintf("Error saving sent envelope: %v", err)})
|
||||
continue
|
||||
}
|
||||
default:
|
||||
errorDetails = append(errorDetails, map[string]interface{}{"Id": data.Id, "Error": fmt.Sprintf("Unknown telemetry type: %s", data.TelemetryType)})
|
||||
}
|
||||
}
|
||||
|
||||
if len(errorDetails) > 0 {
|
||||
log.Printf("Errors encountered: %v", errorDetails)
|
||||
}
|
||||
|
||||
err := respondWithJSON(w, http.StatusCreated, errorDetails)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
log.Printf(
|
||||
"%s\t%s\t%s",
|
||||
r.Method,
|
||||
r.RequestURI,
|
||||
time.Since(start),
|
||||
)
|
||||
}
|
||||
|
||||
func (s *Server) createReceivedMessages(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
var receivedMessages []ReceivedMessage
|
||||
@ -179,6 +263,39 @@ func (s *Server) updateEnvelope(w http.ResponseWriter, r *http.Request) {
|
||||
)
|
||||
}
|
||||
|
||||
func (s *Server) createSentEnvelope(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
var sentEnvelope SentEnvelope
|
||||
decoder := json.NewDecoder(r.Body)
|
||||
if err := decoder.Decode(&sentEnvelope); err != nil {
|
||||
log.Println(err)
|
||||
|
||||
err := respondWithError(w, http.StatusBadRequest, "Invalid request payload")
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
defer r.Body.Close()
|
||||
|
||||
err := sentEnvelope.put(s.DB)
|
||||
if err != nil {
|
||||
log.Println("could not save envelope", err, sentEnvelope)
|
||||
}
|
||||
|
||||
err = respondWithJSON(w, http.StatusCreated, sentEnvelope)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
log.Printf(
|
||||
"%s\t%s\t%s",
|
||||
r.Method,
|
||||
r.RequestURI,
|
||||
time.Since(start),
|
||||
)
|
||||
}
|
||||
|
||||
func (s *Server) createProtocolStats(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
var protocolStats ProtocolStats
|
||||
|
||||
18
telemetry/sql/000005_pushed_envelope.up.sql
Normal file
18
telemetry/sql/000005_pushed_envelope.up.sql
Normal file
@ -0,0 +1,18 @@
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE publish_method AS ENUM ('LightPush', 'Relay');
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN null;
|
||||
END $$;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS sentEnvelopes (
|
||||
id SERIAL PRIMARY KEY,
|
||||
messageHash VARCHAR(255) NOT NULL,
|
||||
sentAt INTEGER NOT NULL,
|
||||
createdAt INTEGER NOT NULL,
|
||||
topic VARCHAR(255) NOT NULL,
|
||||
pubSubTopic VARCHAR(255) NOT NULL,
|
||||
senderKeyUID VARCHAR(255) NOT NULL,
|
||||
nodeName VARCHAR(255) NOT NULL,
|
||||
publishMethod publish_method,
|
||||
CONSTRAINT sentEnvelopes_unique unique(sentAt, messageHash, senderKeyUID, nodeName)
|
||||
);
|
||||
3
telemetry/sql/000006_status_version.up.sql
Normal file
3
telemetry/sql/000006_status_version.up.sql
Normal file
@ -0,0 +1,3 @@
|
||||
ALTER TABLE receivedMessages ADD COLUMN statusVersion VARCHAR(31);
|
||||
ALTER TABLE receivedEnvelopes ADD COLUMN statusVersion VARCHAR(31);
|
||||
ALTER TABLE sentEnvelopes ADD COLUMN statusVersion VARCHAR(31);
|
||||
Loading…
x
Reference in New Issue
Block a user