mirror of https://github.com/status-im/op-geth.git
core, metrics, p2p: switch some invalid counters to gauges
This commit is contained in:
parent
4f6bf2f1c5
commit
72d5a27a39
|
@ -82,7 +82,7 @@ func newFreezer(datadir string, namespace string) (*freezer, error) {
|
||||||
var (
|
var (
|
||||||
readMeter = metrics.NewRegisteredMeter(namespace+"ancient/read", nil)
|
readMeter = metrics.NewRegisteredMeter(namespace+"ancient/read", nil)
|
||||||
writeMeter = metrics.NewRegisteredMeter(namespace+"ancient/write", nil)
|
writeMeter = metrics.NewRegisteredMeter(namespace+"ancient/write", nil)
|
||||||
sizeCounter = metrics.NewRegisteredCounter(namespace+"ancient/size", nil)
|
sizeGauge = metrics.NewRegisteredGauge(namespace+"ancient/size", nil)
|
||||||
)
|
)
|
||||||
// Ensure the datadir is not a symbolic link if it exists.
|
// Ensure the datadir is not a symbolic link if it exists.
|
||||||
if info, err := os.Lstat(datadir); !os.IsNotExist(err) {
|
if info, err := os.Lstat(datadir); !os.IsNotExist(err) {
|
||||||
|
@ -103,7 +103,7 @@ func newFreezer(datadir string, namespace string) (*freezer, error) {
|
||||||
instanceLock: lock,
|
instanceLock: lock,
|
||||||
}
|
}
|
||||||
for name, disableSnappy := range freezerNoSnappy {
|
for name, disableSnappy := range freezerNoSnappy {
|
||||||
table, err := newTable(datadir, name, readMeter, writeMeter, sizeCounter, disableSnappy)
|
table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, disableSnappy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
for _, table := range freezer.tables {
|
for _, table := range freezer.tables {
|
||||||
table.Close()
|
table.Close()
|
||||||
|
|
|
@ -97,15 +97,15 @@ type freezerTable struct {
|
||||||
headBytes uint32 // Number of bytes written to the head file
|
headBytes uint32 // Number of bytes written to the head file
|
||||||
readMeter metrics.Meter // Meter for measuring the effective amount of data read
|
readMeter metrics.Meter // Meter for measuring the effective amount of data read
|
||||||
writeMeter metrics.Meter // Meter for measuring the effective amount of data written
|
writeMeter metrics.Meter // Meter for measuring the effective amount of data written
|
||||||
sizeCounter metrics.Counter // Counter for tracking the combined size of all freezer tables
|
sizeGauge metrics.Gauge // Gauge for tracking the combined size of all freezer tables
|
||||||
|
|
||||||
logger log.Logger // Logger with database path and table name ambedded
|
logger log.Logger // Logger with database path and table name ambedded
|
||||||
lock sync.RWMutex // Mutex protecting the data file descriptors
|
lock sync.RWMutex // Mutex protecting the data file descriptors
|
||||||
}
|
}
|
||||||
|
|
||||||
// newTable opens a freezer table with default settings - 2G files
|
// newTable opens a freezer table with default settings - 2G files
|
||||||
func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeCounter metrics.Counter, disableSnappy bool) (*freezerTable, error) {
|
func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, disableSnappy bool) (*freezerTable, error) {
|
||||||
return newCustomTable(path, name, readMeter, writeMeter, sizeCounter, 2*1000*1000*1000, disableSnappy)
|
return newCustomTable(path, name, readMeter, writeMeter, sizeGauge, 2*1000*1000*1000, disableSnappy)
|
||||||
}
|
}
|
||||||
|
|
||||||
// openFreezerFileForAppend opens a freezer table file and seeks to the end
|
// openFreezerFileForAppend opens a freezer table file and seeks to the end
|
||||||
|
@ -149,7 +149,7 @@ func truncateFreezerFile(file *os.File, size int64) error {
|
||||||
// newCustomTable opens a freezer table, creating the data and index files if they are
|
// newCustomTable opens a freezer table, creating the data and index files if they are
|
||||||
// non existent. Both files are truncated to the shortest common length to ensure
|
// non existent. Both files are truncated to the shortest common length to ensure
|
||||||
// they don't go out of sync.
|
// they don't go out of sync.
|
||||||
func newCustomTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeCounter metrics.Counter, maxFilesize uint32, noCompression bool) (*freezerTable, error) {
|
func newCustomTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression bool) (*freezerTable, error) {
|
||||||
// Ensure the containing directory exists and open the indexEntry file
|
// Ensure the containing directory exists and open the indexEntry file
|
||||||
if err := os.MkdirAll(path, 0755); err != nil {
|
if err := os.MkdirAll(path, 0755); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -172,7 +172,7 @@ func newCustomTable(path string, name string, readMeter metrics.Meter, writeMete
|
||||||
files: make(map[uint32]*os.File),
|
files: make(map[uint32]*os.File),
|
||||||
readMeter: readMeter,
|
readMeter: readMeter,
|
||||||
writeMeter: writeMeter,
|
writeMeter: writeMeter,
|
||||||
sizeCounter: sizeCounter,
|
sizeGauge: sizeGauge,
|
||||||
name: name,
|
name: name,
|
||||||
path: path,
|
path: path,
|
||||||
logger: log.New("database", path, "table", name),
|
logger: log.New("database", path, "table", name),
|
||||||
|
@ -189,7 +189,7 @@ func newCustomTable(path string, name string, readMeter metrics.Meter, writeMete
|
||||||
tab.Close()
|
tab.Close()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
tab.sizeCounter.Inc(int64(size))
|
tab.sizeGauge.Inc(int64(size))
|
||||||
|
|
||||||
return tab, nil
|
return tab, nil
|
||||||
}
|
}
|
||||||
|
@ -378,7 +378,7 @@ func (t *freezerTable) truncate(items uint64) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
t.sizeCounter.Dec(int64(oldSize - newSize))
|
t.sizeGauge.Dec(int64(oldSize - newSize))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -510,7 +510,7 @@ func (t *freezerTable) Append(item uint64, blob []byte) error {
|
||||||
t.index.Write(idx.marshallBinary())
|
t.index.Write(idx.marshallBinary())
|
||||||
|
|
||||||
t.writeMeter.Mark(int64(bLen + indexEntrySize))
|
t.writeMeter.Mark(int64(bLen + indexEntrySize))
|
||||||
t.sizeCounter.Inc(int64(bLen + indexEntrySize))
|
t.sizeGauge.Inc(int64(bLen + indexEntrySize))
|
||||||
|
|
||||||
atomic.AddUint64(&t.items, 1)
|
atomic.AddUint64(&t.items, 1)
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -56,7 +56,7 @@ func TestFreezerBasics(t *testing.T) {
|
||||||
// set cutoff at 50 bytes
|
// set cutoff at 50 bytes
|
||||||
f, err := newCustomTable(os.TempDir(),
|
f, err := newCustomTable(os.TempDir(),
|
||||||
fmt.Sprintf("unittest-%d", rand.Uint64()),
|
fmt.Sprintf("unittest-%d", rand.Uint64()),
|
||||||
metrics.NewMeter(), metrics.NewMeter(), metrics.NewCounter(), 50, true)
|
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -99,11 +99,11 @@ func TestFreezerBasicsClosing(t *testing.T) {
|
||||||
// set cutoff at 50 bytes
|
// set cutoff at 50 bytes
|
||||||
var (
|
var (
|
||||||
fname = fmt.Sprintf("basics-close-%d", rand.Uint64())
|
fname = fmt.Sprintf("basics-close-%d", rand.Uint64())
|
||||||
rm, wm, sc = metrics.NewMeter(), metrics.NewMeter(), metrics.NewCounter()
|
rm, wm, sg = metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||||
f *freezerTable
|
f *freezerTable
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
f, err = newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
f, err = newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -112,7 +112,7 @@ func TestFreezerBasicsClosing(t *testing.T) {
|
||||||
data := getChunk(15, x)
|
data := getChunk(15, x)
|
||||||
f.Append(uint64(x), data)
|
f.Append(uint64(x), data)
|
||||||
f.Close()
|
f.Close()
|
||||||
f, err = newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
f, err = newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -129,7 +129,7 @@ func TestFreezerBasicsClosing(t *testing.T) {
|
||||||
t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
|
t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
|
||||||
}
|
}
|
||||||
f.Close()
|
f.Close()
|
||||||
f, err = newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
f, err = newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -139,11 +139,11 @@ func TestFreezerBasicsClosing(t *testing.T) {
|
||||||
// TestFreezerRepairDanglingHead tests that we can recover if index entries are removed
|
// TestFreezerRepairDanglingHead tests that we can recover if index entries are removed
|
||||||
func TestFreezerRepairDanglingHead(t *testing.T) {
|
func TestFreezerRepairDanglingHead(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
rm, wm, sc := metrics.NewMeter(), metrics.NewMeter(), metrics.NewCounter()
|
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||||
fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64())
|
fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64())
|
||||||
|
|
||||||
{ // Fill table
|
{ // Fill table
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -172,7 +172,7 @@ func TestFreezerRepairDanglingHead(t *testing.T) {
|
||||||
idxFile.Close()
|
idxFile.Close()
|
||||||
// Now open it again
|
// Now open it again
|
||||||
{
|
{
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -190,11 +190,11 @@ func TestFreezerRepairDanglingHead(t *testing.T) {
|
||||||
// TestFreezerRepairDanglingHeadLarge tests that we can recover if very many index entries are removed
|
// TestFreezerRepairDanglingHeadLarge tests that we can recover if very many index entries are removed
|
||||||
func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
|
func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
rm, wm, sc := metrics.NewMeter(), metrics.NewMeter(), metrics.NewCounter()
|
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||||
fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64())
|
fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64())
|
||||||
|
|
||||||
{ // Fill a table and close it
|
{ // Fill a table and close it
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -222,7 +222,7 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
|
||||||
idxFile.Close()
|
idxFile.Close()
|
||||||
// Now open it again
|
// Now open it again
|
||||||
{
|
{
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -243,7 +243,7 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
|
||||||
}
|
}
|
||||||
// And if we open it, we should now be able to read all of them (new values)
|
// And if we open it, we should now be able to read all of them (new values)
|
||||||
{
|
{
|
||||||
f, _ := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
f, _ := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
for y := 1; y < 255; y++ {
|
for y := 1; y < 255; y++ {
|
||||||
exp := getChunk(15, ^y)
|
exp := getChunk(15, ^y)
|
||||||
got, err := f.Retrieve(uint64(y))
|
got, err := f.Retrieve(uint64(y))
|
||||||
|
@ -260,11 +260,11 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
|
||||||
// TestSnappyDetection tests that we fail to open a snappy database and vice versa
|
// TestSnappyDetection tests that we fail to open a snappy database and vice versa
|
||||||
func TestSnappyDetection(t *testing.T) {
|
func TestSnappyDetection(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
rm, wm, sc := metrics.NewMeter(), metrics.NewMeter(), metrics.NewCounter()
|
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||||
fname := fmt.Sprintf("snappytest-%d", rand.Uint64())
|
fname := fmt.Sprintf("snappytest-%d", rand.Uint64())
|
||||||
// Open with snappy
|
// Open with snappy
|
||||||
{
|
{
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -277,7 +277,7 @@ func TestSnappyDetection(t *testing.T) {
|
||||||
}
|
}
|
||||||
// Open without snappy
|
// Open without snappy
|
||||||
{
|
{
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, false)
|
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -289,7 +289,7 @@ func TestSnappyDetection(t *testing.T) {
|
||||||
|
|
||||||
// Open with snappy
|
// Open with snappy
|
||||||
{
|
{
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -317,11 +317,11 @@ func assertFileSize(f string, size int64) error {
|
||||||
// the index is repaired
|
// the index is repaired
|
||||||
func TestFreezerRepairDanglingIndex(t *testing.T) {
|
func TestFreezerRepairDanglingIndex(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
rm, wm, sc := metrics.NewMeter(), metrics.NewMeter(), metrics.NewCounter()
|
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||||
fname := fmt.Sprintf("dangling_indextest-%d", rand.Uint64())
|
fname := fmt.Sprintf("dangling_indextest-%d", rand.Uint64())
|
||||||
|
|
||||||
{ // Fill a table and close it
|
{ // Fill a table and close it
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -357,7 +357,7 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
|
||||||
// 45, 45, 15
|
// 45, 45, 15
|
||||||
// with 3+3+1 items
|
// with 3+3+1 items
|
||||||
{
|
{
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -374,11 +374,11 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
|
||||||
func TestFreezerTruncate(t *testing.T) {
|
func TestFreezerTruncate(t *testing.T) {
|
||||||
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
rm, wm, sc := metrics.NewMeter(), metrics.NewMeter(), metrics.NewCounter()
|
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||||
fname := fmt.Sprintf("truncation-%d", rand.Uint64())
|
fname := fmt.Sprintf("truncation-%d", rand.Uint64())
|
||||||
|
|
||||||
{ // Fill table
|
{ // Fill table
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -395,7 +395,7 @@ func TestFreezerTruncate(t *testing.T) {
|
||||||
}
|
}
|
||||||
// Reopen, truncate
|
// Reopen, truncate
|
||||||
{
|
{
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -417,10 +417,10 @@ func TestFreezerTruncate(t *testing.T) {
|
||||||
// That will rewind the index, and _should_ truncate the head file
|
// That will rewind the index, and _should_ truncate the head file
|
||||||
func TestFreezerRepairFirstFile(t *testing.T) {
|
func TestFreezerRepairFirstFile(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
rm, wm, sc := metrics.NewMeter(), metrics.NewMeter(), metrics.NewCounter()
|
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||||
fname := fmt.Sprintf("truncationfirst-%d", rand.Uint64())
|
fname := fmt.Sprintf("truncationfirst-%d", rand.Uint64())
|
||||||
{ // Fill table
|
{ // Fill table
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -448,7 +448,7 @@ func TestFreezerRepairFirstFile(t *testing.T) {
|
||||||
}
|
}
|
||||||
// Reopen
|
// Reopen
|
||||||
{
|
{
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -473,10 +473,10 @@ func TestFreezerRepairFirstFile(t *testing.T) {
|
||||||
// - check that we did not keep the rdonly file descriptors
|
// - check that we did not keep the rdonly file descriptors
|
||||||
func TestFreezerReadAndTruncate(t *testing.T) {
|
func TestFreezerReadAndTruncate(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
rm, wm, sc := metrics.NewMeter(), metrics.NewMeter(), metrics.NewCounter()
|
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||||
fname := fmt.Sprintf("read_truncate-%d", rand.Uint64())
|
fname := fmt.Sprintf("read_truncate-%d", rand.Uint64())
|
||||||
{ // Fill table
|
{ // Fill table
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -493,7 +493,7 @@ func TestFreezerReadAndTruncate(t *testing.T) {
|
||||||
}
|
}
|
||||||
// Reopen and read all files
|
// Reopen and read all files
|
||||||
{
|
{
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 50, true)
|
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -519,10 +519,10 @@ func TestFreezerReadAndTruncate(t *testing.T) {
|
||||||
|
|
||||||
func TestOffset(t *testing.T) {
|
func TestOffset(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
rm, wm, sc := metrics.NewMeter(), metrics.NewMeter(), metrics.NewCounter()
|
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||||
fname := fmt.Sprintf("offset-%d", rand.Uint64())
|
fname := fmt.Sprintf("offset-%d", rand.Uint64())
|
||||||
{ // Fill table
|
{ // Fill table
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 40, true)
|
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 40, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -578,7 +578,7 @@ func TestOffset(t *testing.T) {
|
||||||
}
|
}
|
||||||
// Now open again
|
// Now open again
|
||||||
{
|
{
|
||||||
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sc, 40, true)
|
f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 40, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -101,9 +101,9 @@ var (
|
||||||
invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil)
|
invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil)
|
||||||
underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil)
|
underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil)
|
||||||
|
|
||||||
pendingCounter = metrics.NewRegisteredCounter("txpool/pending", nil)
|
pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil)
|
||||||
queuedCounter = metrics.NewRegisteredCounter("txpool/queued", nil)
|
queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil)
|
||||||
localCounter = metrics.NewRegisteredCounter("txpool/local", nil)
|
localGauge = metrics.NewRegisteredGauge("txpool/local", nil)
|
||||||
)
|
)
|
||||||
|
|
||||||
// TxStatus is the current status of a transaction as seen by the pool.
|
// TxStatus is the current status of a transaction as seen by the pool.
|
||||||
|
@ -628,7 +628,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if local || pool.locals.contains(from) {
|
if local || pool.locals.contains(from) {
|
||||||
localCounter.Inc(1)
|
localGauge.Inc(1)
|
||||||
}
|
}
|
||||||
pool.journalTx(from, tx)
|
pool.journalTx(from, tx)
|
||||||
|
|
||||||
|
@ -658,7 +658,7 @@ func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) (bool, er
|
||||||
queuedReplaceMeter.Mark(1)
|
queuedReplaceMeter.Mark(1)
|
||||||
} else {
|
} else {
|
||||||
// Nothing was replaced, bump the queued counter
|
// Nothing was replaced, bump the queued counter
|
||||||
queuedCounter.Inc(1)
|
queuedGauge.Inc(1)
|
||||||
}
|
}
|
||||||
if pool.all.Get(hash) == nil {
|
if pool.all.Get(hash) == nil {
|
||||||
pool.all.Add(tx)
|
pool.all.Add(tx)
|
||||||
|
@ -707,7 +707,7 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T
|
||||||
pendingReplaceMeter.Mark(1)
|
pendingReplaceMeter.Mark(1)
|
||||||
} else {
|
} else {
|
||||||
// Nothing was replaced, bump the pending counter
|
// Nothing was replaced, bump the pending counter
|
||||||
pendingCounter.Inc(1)
|
pendingGauge.Inc(1)
|
||||||
}
|
}
|
||||||
// Failsafe to work around direct pending inserts (tests)
|
// Failsafe to work around direct pending inserts (tests)
|
||||||
if pool.all.Get(hash) == nil {
|
if pool.all.Get(hash) == nil {
|
||||||
|
@ -841,7 +841,7 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
|
||||||
pool.priced.Removed(1)
|
pool.priced.Removed(1)
|
||||||
}
|
}
|
||||||
if pool.locals.contains(addr) {
|
if pool.locals.contains(addr) {
|
||||||
localCounter.Dec(1)
|
localGauge.Dec(1)
|
||||||
}
|
}
|
||||||
// Remove the transaction from the pending lists and reset the account nonce
|
// Remove the transaction from the pending lists and reset the account nonce
|
||||||
if pending := pool.pending[addr]; pending != nil {
|
if pending := pool.pending[addr]; pending != nil {
|
||||||
|
@ -858,7 +858,7 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
|
||||||
// Update the account nonce if needed
|
// Update the account nonce if needed
|
||||||
pool.pendingNonces.setIfLower(addr, tx.Nonce())
|
pool.pendingNonces.setIfLower(addr, tx.Nonce())
|
||||||
// Reduce the pending counter
|
// Reduce the pending counter
|
||||||
pendingCounter.Dec(int64(1 + len(invalids)))
|
pendingGauge.Dec(int64(1 + len(invalids)))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -866,7 +866,7 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
|
||||||
if future := pool.queue[addr]; future != nil {
|
if future := pool.queue[addr]; future != nil {
|
||||||
if removed, _ := future.Remove(tx); removed {
|
if removed, _ := future.Remove(tx); removed {
|
||||||
// Reduce the queued counter
|
// Reduce the queued counter
|
||||||
queuedCounter.Dec(1)
|
queuedGauge.Dec(1)
|
||||||
}
|
}
|
||||||
if future.Empty() {
|
if future.Empty() {
|
||||||
delete(pool.queue, addr)
|
delete(pool.queue, addr)
|
||||||
|
@ -1164,7 +1164,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Trans
|
||||||
promoted = append(promoted, tx)
|
promoted = append(promoted, tx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
queuedCounter.Dec(int64(len(readies)))
|
queuedGauge.Dec(int64(len(readies)))
|
||||||
|
|
||||||
// Drop all transactions over the allowed limit
|
// Drop all transactions over the allowed limit
|
||||||
var caps types.Transactions
|
var caps types.Transactions
|
||||||
|
@ -1179,9 +1179,9 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Trans
|
||||||
}
|
}
|
||||||
// Mark all the items dropped as removed
|
// Mark all the items dropped as removed
|
||||||
pool.priced.Removed(len(forwards) + len(drops) + len(caps))
|
pool.priced.Removed(len(forwards) + len(drops) + len(caps))
|
||||||
queuedCounter.Dec(int64(len(forwards) + len(drops) + len(caps)))
|
queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
|
||||||
if pool.locals.contains(addr) {
|
if pool.locals.contains(addr) {
|
||||||
localCounter.Dec(int64(len(forwards) + len(drops) + len(caps)))
|
localGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
|
||||||
}
|
}
|
||||||
// Delete the entire queue entry if it became empty.
|
// Delete the entire queue entry if it became empty.
|
||||||
if list.Empty() {
|
if list.Empty() {
|
||||||
|
@ -1240,9 +1240,9 @@ func (pool *TxPool) truncatePending() {
|
||||||
log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
|
log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
|
||||||
}
|
}
|
||||||
pool.priced.Removed(len(caps))
|
pool.priced.Removed(len(caps))
|
||||||
pendingCounter.Dec(int64(len(caps)))
|
pendingGauge.Dec(int64(len(caps)))
|
||||||
if pool.locals.contains(offenders[i]) {
|
if pool.locals.contains(offenders[i]) {
|
||||||
localCounter.Dec(int64(len(caps)))
|
localGauge.Dec(int64(len(caps)))
|
||||||
}
|
}
|
||||||
pending--
|
pending--
|
||||||
}
|
}
|
||||||
|
@ -1267,9 +1267,9 @@ func (pool *TxPool) truncatePending() {
|
||||||
log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
|
log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
|
||||||
}
|
}
|
||||||
pool.priced.Removed(len(caps))
|
pool.priced.Removed(len(caps))
|
||||||
pendingCounter.Dec(int64(len(caps)))
|
pendingGauge.Dec(int64(len(caps)))
|
||||||
if pool.locals.contains(addr) {
|
if pool.locals.contains(addr) {
|
||||||
localCounter.Dec(int64(len(caps)))
|
localGauge.Dec(int64(len(caps)))
|
||||||
}
|
}
|
||||||
pending--
|
pending--
|
||||||
}
|
}
|
||||||
|
@ -1353,9 +1353,9 @@ func (pool *TxPool) demoteUnexecutables() {
|
||||||
log.Trace("Demoting pending transaction", "hash", hash)
|
log.Trace("Demoting pending transaction", "hash", hash)
|
||||||
pool.enqueueTx(hash, tx)
|
pool.enqueueTx(hash, tx)
|
||||||
}
|
}
|
||||||
pendingCounter.Dec(int64(len(olds) + len(drops) + len(invalids)))
|
pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
|
||||||
if pool.locals.contains(addr) {
|
if pool.locals.contains(addr) {
|
||||||
localCounter.Dec(int64(len(olds) + len(drops) + len(invalids)))
|
localGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
|
||||||
}
|
}
|
||||||
// If there's a gap in front, alert (should never happen) and postpone all transactions
|
// If there's a gap in front, alert (should never happen) and postpone all transactions
|
||||||
if list.Len() > 0 && list.txs.Get(nonce) == nil {
|
if list.Len() > 0 && list.txs.Get(nonce) == nil {
|
||||||
|
@ -1365,7 +1365,7 @@ func (pool *TxPool) demoteUnexecutables() {
|
||||||
log.Error("Demoting invalidated transaction", "hash", hash)
|
log.Error("Demoting invalidated transaction", "hash", hash)
|
||||||
pool.enqueueTx(hash, tx)
|
pool.enqueueTx(hash, tx)
|
||||||
}
|
}
|
||||||
pendingCounter.Dec(int64(len(gapped)))
|
pendingGauge.Dec(int64(len(gapped)))
|
||||||
}
|
}
|
||||||
// Delete the entire queue entry if it became empty.
|
// Delete the entire queue entry if it became empty.
|
||||||
if list.Empty() {
|
if list.Empty() {
|
||||||
|
|
|
@ -6,6 +6,8 @@ import "sync/atomic"
|
||||||
type Gauge interface {
|
type Gauge interface {
|
||||||
Snapshot() Gauge
|
Snapshot() Gauge
|
||||||
Update(int64)
|
Update(int64)
|
||||||
|
Dec(int64)
|
||||||
|
Inc(int64)
|
||||||
Value() int64
|
Value() int64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,6 +67,16 @@ func (GaugeSnapshot) Update(int64) {
|
||||||
panic("Update called on a GaugeSnapshot")
|
panic("Update called on a GaugeSnapshot")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Dec panics.
|
||||||
|
func (GaugeSnapshot) Dec(int64) {
|
||||||
|
panic("Dec called on a GaugeSnapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inc panics.
|
||||||
|
func (GaugeSnapshot) Inc(int64) {
|
||||||
|
panic("Inc called on a GaugeSnapshot")
|
||||||
|
}
|
||||||
|
|
||||||
// Value returns the value at the time the snapshot was taken.
|
// Value returns the value at the time the snapshot was taken.
|
||||||
func (g GaugeSnapshot) Value() int64 { return int64(g) }
|
func (g GaugeSnapshot) Value() int64 { return int64(g) }
|
||||||
|
|
||||||
|
@ -77,6 +89,12 @@ func (NilGauge) Snapshot() Gauge { return NilGauge{} }
|
||||||
// Update is a no-op.
|
// Update is a no-op.
|
||||||
func (NilGauge) Update(v int64) {}
|
func (NilGauge) Update(v int64) {}
|
||||||
|
|
||||||
|
// Dec is a no-op.
|
||||||
|
func (NilGauge) Dec(i int64) {}
|
||||||
|
|
||||||
|
// Inc is a no-op.
|
||||||
|
func (NilGauge) Inc(i int64) {}
|
||||||
|
|
||||||
// Value is a no-op.
|
// Value is a no-op.
|
||||||
func (NilGauge) Value() int64 { return 0 }
|
func (NilGauge) Value() int64 { return 0 }
|
||||||
|
|
||||||
|
@ -101,6 +119,16 @@ func (g *StandardGauge) Value() int64 {
|
||||||
return atomic.LoadInt64(&g.value)
|
return atomic.LoadInt64(&g.value)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Dec decrements the gauge's current value by the given amount.
|
||||||
|
func (g *StandardGauge) Dec(i int64) {
|
||||||
|
atomic.AddInt64(&g.value, -i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inc increments the gauge's current value by the given amount.
|
||||||
|
func (g *StandardGauge) Inc(i int64) {
|
||||||
|
atomic.AddInt64(&g.value, i)
|
||||||
|
}
|
||||||
|
|
||||||
// FunctionalGauge returns value from given function
|
// FunctionalGauge returns value from given function
|
||||||
type FunctionalGauge struct {
|
type FunctionalGauge struct {
|
||||||
value func() int64
|
value func() int64
|
||||||
|
@ -118,3 +146,13 @@ func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) }
|
||||||
func (FunctionalGauge) Update(int64) {
|
func (FunctionalGauge) Update(int64) {
|
||||||
panic("Update called on a FunctionalGauge")
|
panic("Update called on a FunctionalGauge")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Dec panics.
|
||||||
|
func (FunctionalGauge) Dec(int64) {
|
||||||
|
panic("Dec called on a FunctionalGauge")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inc panics.
|
||||||
|
func (FunctionalGauge) Inc(int64) {
|
||||||
|
panic("Inc called on a FunctionalGauge")
|
||||||
|
}
|
||||||
|
|
|
@ -45,7 +45,7 @@ var (
|
||||||
ingressTrafficMeter = metrics.NewRegisteredMeter(MetricsInboundTraffic, nil) // Meter metering the cumulative ingress traffic
|
ingressTrafficMeter = metrics.NewRegisteredMeter(MetricsInboundTraffic, nil) // Meter metering the cumulative ingress traffic
|
||||||
egressConnectMeter = metrics.NewRegisteredMeter(MetricsOutboundConnects, nil) // Meter counting the egress connections
|
egressConnectMeter = metrics.NewRegisteredMeter(MetricsOutboundConnects, nil) // Meter counting the egress connections
|
||||||
egressTrafficMeter = metrics.NewRegisteredMeter(MetricsOutboundTraffic, nil) // Meter metering the cumulative egress traffic
|
egressTrafficMeter = metrics.NewRegisteredMeter(MetricsOutboundTraffic, nil) // Meter metering the cumulative egress traffic
|
||||||
activePeerCounter = metrics.NewRegisteredCounter("p2p/peers", nil) // Gauge tracking the current peer count
|
activePeerGauge = metrics.NewRegisteredGauge("p2p/peers", nil) // Gauge tracking the current peer count
|
||||||
|
|
||||||
PeerIngressRegistry = metrics.NewPrefixedChildRegistry(metrics.EphemeralRegistry, MetricsInboundTraffic+"/") // Registry containing the peer ingress
|
PeerIngressRegistry = metrics.NewPrefixedChildRegistry(metrics.EphemeralRegistry, MetricsInboundTraffic+"/") // Registry containing the peer ingress
|
||||||
PeerEgressRegistry = metrics.NewPrefixedChildRegistry(metrics.EphemeralRegistry, MetricsOutboundTraffic+"/") // Registry containing the peer egress
|
PeerEgressRegistry = metrics.NewPrefixedChildRegistry(metrics.EphemeralRegistry, MetricsOutboundTraffic+"/") // Registry containing the peer egress
|
||||||
|
@ -124,7 +124,7 @@ func newMeteredConn(conn net.Conn, ingress bool, ip net.IP) net.Conn {
|
||||||
} else {
|
} else {
|
||||||
egressConnectMeter.Mark(1)
|
egressConnectMeter.Mark(1)
|
||||||
}
|
}
|
||||||
activePeerCounter.Inc(1)
|
activePeerGauge.Inc(1)
|
||||||
|
|
||||||
return &meteredConn{
|
return &meteredConn{
|
||||||
Conn: conn,
|
Conn: conn,
|
||||||
|
@ -200,7 +200,7 @@ func (c *meteredConn) Close() error {
|
||||||
IP: c.ip,
|
IP: c.ip,
|
||||||
Elapsed: time.Since(c.connected),
|
Elapsed: time.Since(c.connected),
|
||||||
})
|
})
|
||||||
activePeerCounter.Dec(1)
|
activePeerGauge.Dec(1)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
id := c.id
|
id := c.id
|
||||||
|
@ -212,7 +212,7 @@ func (c *meteredConn) Close() error {
|
||||||
IP: c.ip,
|
IP: c.ip,
|
||||||
ID: id,
|
ID: id,
|
||||||
})
|
})
|
||||||
activePeerCounter.Dec(1)
|
activePeerGauge.Dec(1)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
ingress, egress := uint64(c.ingressMeter.Count()), uint64(c.egressMeter.Count())
|
ingress, egress := uint64(c.ingressMeter.Count()), uint64(c.egressMeter.Count())
|
||||||
|
@ -233,6 +233,6 @@ func (c *meteredConn) Close() error {
|
||||||
Ingress: ingress,
|
Ingress: ingress,
|
||||||
Egress: egress,
|
Egress: egress,
|
||||||
})
|
})
|
||||||
activePeerCounter.Dec(1)
|
activePeerGauge.Dec(1)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue