mirror of
https://github.com/status-im/status-go.git
synced 2025-01-17 18:22:13 +00:00
feat(logging)_: enable log namespaces configuration (#6161)
closes: #6128
This commit is contained in:
parent
f3eed58c78
commit
50933aa328
@ -0,0 +1 @@
|
|||||||
|
ALTER TABLE log_config ADD COLUMN log_namespaces VARCHAR DEFAULT '';
|
@ -34,7 +34,7 @@ func defaultLogger() *zap.Logger {
|
|||||||
zapcore.AddSync(io.Discard),
|
zapcore.AddSync(io.Discard),
|
||||||
zap.NewAtomicLevelAt(zap.InfoLevel),
|
zap.NewAtomicLevelAt(zap.InfoLevel),
|
||||||
)
|
)
|
||||||
return zap.New(core, zap.AddCaller())
|
return zap.New(newNamespaceFilteringCore(core), zap.AddCaller())
|
||||||
}
|
}
|
||||||
|
|
||||||
func defaultEncoder() zapcore.Encoder {
|
func defaultEncoder() zapcore.Encoder {
|
||||||
|
@ -12,7 +12,7 @@ func TestPrintOrigins(t *testing.T) {
|
|||||||
buffer := bytes.NewBuffer(nil)
|
buffer := bytes.NewBuffer(nil)
|
||||||
|
|
||||||
logger := defaultLogger()
|
logger := defaultLogger()
|
||||||
logger.Core().(*Core).UpdateSyncer(zapcore.AddSync(buffer))
|
logger.Core().(*namespaceFilteringCore).Parent().(*Core).UpdateSyncer(zapcore.AddSync(buffer))
|
||||||
|
|
||||||
logger.Info("hello")
|
logger.Info("hello")
|
||||||
|
|
||||||
|
@ -12,6 +12,13 @@ import (
|
|||||||
var namespacesRegex = regexp.MustCompile(`^([a-zA-Z0-9_\.]+:(debug|info|warn|error))(,[a-zA-Z0-9_\.]+:(debug|info|warn|error))*$`)
|
var namespacesRegex = regexp.MustCompile(`^([a-zA-Z0-9_\.]+:(debug|info|warn|error))(,[a-zA-Z0-9_\.]+:(debug|info|warn|error))*$`)
|
||||||
var errInvalidNamespacesFormat = fmt.Errorf("invalid namespaces format")
|
var errInvalidNamespacesFormat = fmt.Errorf("invalid namespaces format")
|
||||||
|
|
||||||
|
func ValidateNamespaces(namespaces string) error {
|
||||||
|
if namespaces != "" && !namespacesRegex.MatchString(namespaces) {
|
||||||
|
return errInvalidNamespacesFormat
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
type namespacesTree struct {
|
type namespacesTree struct {
|
||||||
namespaces map[string]*namespaceNode
|
namespaces map[string]*namespaceNode
|
||||||
minLevel zapcore.Level
|
minLevel zapcore.Level
|
||||||
|
@ -12,6 +12,7 @@ import (
|
|||||||
type LogSettings struct {
|
type LogSettings struct {
|
||||||
Enabled bool `json:"Enabled"`
|
Enabled bool `json:"Enabled"`
|
||||||
Level string `json:"Level"`
|
Level string `json:"Level"`
|
||||||
|
Namespaces string `json:"Namespaces"`
|
||||||
File string `json:"File"`
|
File string `json:"File"`
|
||||||
MaxSize int `json:"MaxSize"`
|
MaxSize int `json:"MaxSize"`
|
||||||
MaxBackups int `json:"MaxBackups"`
|
MaxBackups int `json:"MaxBackups"`
|
||||||
@ -20,10 +21,11 @@ type LogSettings struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func OverrideRootLoggerWithConfig(settings LogSettings) error {
|
func OverrideRootLoggerWithConfig(settings LogSettings) error {
|
||||||
return overrideCoreWithConfig(ZapLogger().Core().(*Core), settings)
|
return overrideCoreWithConfig(ZapLogger().Core().(*namespaceFilteringCore), settings)
|
||||||
}
|
}
|
||||||
|
|
||||||
func overrideCoreWithConfig(core *Core, settings LogSettings) error {
|
func overrideCoreWithConfig(filteringCore *namespaceFilteringCore, settings LogSettings) error {
|
||||||
|
core := filteringCore.Parent().(*Core)
|
||||||
if !settings.Enabled {
|
if !settings.Enabled {
|
||||||
core.UpdateSyncer(zapcore.AddSync(io.Discard))
|
core.UpdateSyncer(zapcore.AddSync(io.Discard))
|
||||||
return nil
|
return nil
|
||||||
@ -38,6 +40,11 @@ func overrideCoreWithConfig(core *Core, settings LogSettings) error {
|
|||||||
}
|
}
|
||||||
core.SetLevel(level)
|
core.SetLevel(level)
|
||||||
|
|
||||||
|
err = filteringCore.Rebuild(settings.Namespaces)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if settings.File != "" {
|
if settings.File != "" {
|
||||||
if settings.MaxBackups == 0 {
|
if settings.MaxBackups == 0 {
|
||||||
// Setting MaxBackups to 0 causes all log files to be kept. Even setting MaxAge to > 0 doesn't fix it
|
// Setting MaxBackups to 0 causes all log files to be kept. Even setting MaxAge to > 0 doesn't fix it
|
||||||
|
@ -65,7 +65,7 @@ func TestOverrideCoreWithConfig(t *testing.T) {
|
|||||||
zapcore.AddSync(io.Discard),
|
zapcore.AddSync(io.Discard),
|
||||||
zap.NewAtomicLevelAt(zap.InfoLevel),
|
zap.NewAtomicLevelAt(zap.InfoLevel),
|
||||||
)
|
)
|
||||||
err := overrideCoreWithConfig(core, tt.settings)
|
err := overrideCoreWithConfig(newNamespaceFilteringCore(core), tt.settings)
|
||||||
if tt.expectError {
|
if tt.expectError {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
} else {
|
} else {
|
||||||
|
@ -89,10 +89,10 @@ func insertHTTPConfig(tx *sql.Tx, c *params.NodeConfig) error {
|
|||||||
func insertLogConfig(tx *sql.Tx, c *params.NodeConfig) error {
|
func insertLogConfig(tx *sql.Tx, c *params.NodeConfig) error {
|
||||||
_, err := tx.Exec(`
|
_, err := tx.Exec(`
|
||||||
INSERT OR REPLACE INTO log_config (
|
INSERT OR REPLACE INTO log_config (
|
||||||
enabled, log_dir, log_level, max_backups, max_size,
|
enabled, log_dir, log_level, log_namespaces, max_backups, max_size,
|
||||||
file, compress_rotated, log_to_stderr, synthetic_id
|
file, compress_rotated, log_to_stderr, synthetic_id
|
||||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, 'id' )`,
|
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, 'id')`,
|
||||||
c.LogEnabled, c.LogDir, c.LogLevel, c.LogMaxBackups, c.LogMaxSize,
|
c.LogEnabled, c.LogDir, c.LogLevel, c.LogNamespaces, c.LogMaxBackups, c.LogMaxSize,
|
||||||
c.LogFile, c.LogCompressRotated, c.LogToStderr,
|
c.LogFile, c.LogCompressRotated, c.LogToStderr,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -488,8 +488,8 @@ func loadNodeConfig(tx *sql.Tx) (*params.NodeConfig, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = tx.QueryRow("SELECT enabled, log_dir, log_level, file, max_backups, max_size, compress_rotated, log_to_stderr FROM log_config WHERE synthetic_id = 'id'").Scan(
|
err = tx.QueryRow("SELECT enabled, log_dir, log_level, log_namespaces, file, max_backups, max_size, compress_rotated, log_to_stderr FROM log_config WHERE synthetic_id = 'id'").Scan(
|
||||||
&nodecfg.LogEnabled, &nodecfg.LogDir, &nodecfg.LogLevel, &nodecfg.LogFile, &nodecfg.LogMaxBackups, &nodecfg.LogMaxSize, &nodecfg.LogCompressRotated, &nodecfg.LogToStderr)
|
&nodecfg.LogEnabled, &nodecfg.LogDir, &nodecfg.LogLevel, &nodecfg.LogNamespaces, &nodecfg.LogFile, &nodecfg.LogMaxBackups, &nodecfg.LogMaxSize, &nodecfg.LogCompressRotated, &nodecfg.LogToStderr)
|
||||||
if err != nil && err != sql.ErrNoRows {
|
if err != nil && err != sql.ErrNoRows {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -791,6 +791,11 @@ func SetLogLevel(db *sql.DB, logLevel string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func SetLogNamespaces(db *sql.DB, logNamespaces string) error {
|
||||||
|
_, err := db.Exec(`UPDATE log_config SET log_namespaces = ?`, logNamespaces)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
func SetMaxLogBackups(db *sql.DB, maxLogBackups uint) error {
|
func SetMaxLogBackups(db *sql.DB, maxLogBackups uint) error {
|
||||||
_, err := db.Exec(`UPDATE log_config SET max_backups = ?`, maxLogBackups)
|
_, err := db.Exec(`UPDATE log_config SET max_backups = ?`, maxLogBackups)
|
||||||
return err
|
return err
|
||||||
|
@ -426,6 +426,10 @@ type NodeConfig struct {
|
|||||||
// LogLevel defines minimum log level. Valid names are "ERROR", "WARN", "INFO", "DEBUG", and "TRACE".
|
// LogLevel defines minimum log level. Valid names are "ERROR", "WARN", "INFO", "DEBUG", and "TRACE".
|
||||||
LogLevel string `validate:"eq=ERROR|eq=WARN|eq=INFO|eq=DEBUG|eq=TRACE"`
|
LogLevel string `validate:"eq=ERROR|eq=WARN|eq=INFO|eq=DEBUG|eq=TRACE"`
|
||||||
|
|
||||||
|
// LogNamespaces defines log level per namespace. Example: "namespace1:debug,namespace2.namespace3:error"
|
||||||
|
// It doesn't affect LogLevel for unmentioned namespaces.
|
||||||
|
LogNamespaces string
|
||||||
|
|
||||||
// LogMaxBackups defines number of rotated log files that will be stored.
|
// LogMaxBackups defines number of rotated log files that will be stored.
|
||||||
LogMaxBackups int
|
LogMaxBackups int
|
||||||
|
|
||||||
@ -1196,6 +1200,7 @@ func (c *NodeConfig) LogSettings() logutils.LogSettings {
|
|||||||
return logutils.LogSettings{
|
return logutils.LogSettings{
|
||||||
Enabled: c.LogEnabled,
|
Enabled: c.LogEnabled,
|
||||||
Level: c.LogLevel,
|
Level: c.LogLevel,
|
||||||
|
Namespaces: c.LogNamespaces,
|
||||||
File: c.LogFile,
|
File: c.LogFile,
|
||||||
MaxSize: c.LogMaxSize,
|
MaxSize: c.LogMaxSize,
|
||||||
MaxBackups: c.LogMaxBackups,
|
MaxBackups: c.LogMaxBackups,
|
||||||
|
@ -38,6 +38,14 @@ func (m *Messenger) SetLogLevel(request *requests.SetLogLevel) error {
|
|||||||
return nodecfg.SetLogLevel(m.database, request.LogLevel)
|
return nodecfg.SetLogLevel(m.database, request.LogLevel)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *Messenger) SetLogNamespaces(request *requests.SetLogNamespaces) error {
|
||||||
|
if err := request.Validate(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nodecfg.SetLogNamespaces(m.database, request.LogNamespaces)
|
||||||
|
}
|
||||||
|
|
||||||
func (m *Messenger) SetMaxLogBackups(request *requests.SetMaxLogBackups) error {
|
func (m *Messenger) SetMaxLogBackups(request *requests.SetMaxLogBackups) error {
|
||||||
return nodecfg.SetMaxLogBackups(m.database, request.MaxLogBackups)
|
return nodecfg.SetMaxLogBackups(m.database, request.MaxLogBackups)
|
||||||
}
|
}
|
||||||
|
11
protocol/requests/set_log_namespaces.go
Normal file
11
protocol/requests/set_log_namespaces.go
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
package requests
|
||||||
|
|
||||||
|
import "github.com/status-im/status-go/logutils"
|
||||||
|
|
||||||
|
type SetLogNamespaces struct {
|
||||||
|
LogNamespaces string `json:"logNamespaces"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *SetLogNamespaces) Validate() error {
|
||||||
|
return logutils.ValidateNamespaces(c.LogNamespaces)
|
||||||
|
}
|
@ -1808,10 +1808,28 @@ func (api *PublicAPI) SetLogLevel(request *requests.SetLogLevel) error {
|
|||||||
return api.service.messenger.SetLogLevel(request)
|
return api.service.messenger.SetLogLevel(request)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (api *PublicAPI) SetLogNamespaces(request *requests.SetLogNamespaces) error {
|
||||||
|
return api.service.messenger.SetLogNamespaces(request)
|
||||||
|
}
|
||||||
|
|
||||||
func (api *PublicAPI) SetMaxLogBackups(request *requests.SetMaxLogBackups) error {
|
func (api *PublicAPI) SetMaxLogBackups(request *requests.SetMaxLogBackups) error {
|
||||||
return api.service.messenger.SetMaxLogBackups(request)
|
return api.service.messenger.SetMaxLogBackups(request)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (api *PublicAPI) LogTest() error {
|
||||||
|
l1 := logutils.ZapLogger().Named("test1")
|
||||||
|
l2 := l1.Named("test2")
|
||||||
|
l3 := l2.Named("test3")
|
||||||
|
|
||||||
|
for level := zap.DebugLevel; level <= zap.ErrorLevel; level++ {
|
||||||
|
for _, logger := range []*zap.Logger{l1, l2, l3} {
|
||||||
|
logger.Check(level, "test message").Write(zap.String("level", level.String()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return l1.Sync()
|
||||||
|
}
|
||||||
|
|
||||||
func (api *PublicAPI) SetCustomNodes(request *requests.SetCustomNodes) error {
|
func (api *PublicAPI) SetCustomNodes(request *requests.SetCustomNodes) error {
|
||||||
return api.service.messenger.SetCustomNodes(request)
|
return api.service.messenger.SetCustomNodes(request)
|
||||||
}
|
}
|
||||||
|
@ -63,30 +63,34 @@ class StatusBackend(RpcClient, SignalClient):
|
|||||||
def init_status_backend(self, data_dir="/"):
|
def init_status_backend(self, data_dir="/"):
|
||||||
method = "InitializeApplication"
|
method = "InitializeApplication"
|
||||||
data = {
|
data = {
|
||||||
"dataDir": data_dir
|
"dataDir": data_dir,
|
||||||
}
|
}
|
||||||
return self.api_valid_request(method, data)
|
return self.api_valid_request(method, data)
|
||||||
|
|
||||||
def create_account_and_login(self, display_name=DEFAULT_DISPLAY_NAME, password=user_1.password):
|
def create_account_and_login(self, data_dir="/", display_name=DEFAULT_DISPLAY_NAME, password=user_1.password):
|
||||||
data_dir = f"dataDir_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
|
|
||||||
method = "CreateAccountAndLogin"
|
method = "CreateAccountAndLogin"
|
||||||
data = {
|
data = {
|
||||||
"rootDataDir": data_dir,
|
"rootDataDir": data_dir,
|
||||||
"kdfIterations": 256000,
|
"kdfIterations": 256000,
|
||||||
"displayName": display_name,
|
"displayName": display_name,
|
||||||
"password": password,
|
"password": password,
|
||||||
"customizationColor": "primary"
|
"customizationColor": "primary",
|
||||||
|
"logEnabled": True,
|
||||||
|
"logLevel": "INFO",
|
||||||
}
|
}
|
||||||
return self.api_valid_request(method, data)
|
return self.api_valid_request(method, data)
|
||||||
|
|
||||||
def restore_account_and_login(self, display_name=DEFAULT_DISPLAY_NAME, user=user_1):
|
def restore_account_and_login(self, data_dir="/",display_name=DEFAULT_DISPLAY_NAME, user=user_1):
|
||||||
method = "RestoreAccountAndLogin"
|
method = "RestoreAccountAndLogin"
|
||||||
data = {
|
data = {
|
||||||
"rootDataDir": "/",
|
"rootDataDir": data_dir,
|
||||||
|
"kdfIterations": 256000,
|
||||||
"displayName": display_name,
|
"displayName": display_name,
|
||||||
"password": user.password,
|
"password": user.password,
|
||||||
"mnemonic": user.passphrase,
|
"mnemonic": user.passphrase,
|
||||||
"customizationColor": "blue",
|
"customizationColor": "blue",
|
||||||
|
"logEnabled": True,
|
||||||
|
"logLevel": "INFO",
|
||||||
"testNetworksEnabled": True,
|
"testNetworksEnabled": True,
|
||||||
"networkId": 31337,
|
"networkId": 31337,
|
||||||
"networksOverride": [
|
"networksOverride": [
|
||||||
@ -107,6 +111,19 @@ class StatusBackend(RpcClient, SignalClient):
|
|||||||
}
|
}
|
||||||
return self.api_valid_request(method, data)
|
return self.api_valid_request(method, data)
|
||||||
|
|
||||||
|
def login(self, keyUid, user=user_1):
|
||||||
|
method = "LoginAccount"
|
||||||
|
data = {
|
||||||
|
"password": user.password,
|
||||||
|
"keyUid": keyUid,
|
||||||
|
"kdfIterations": 256000,
|
||||||
|
}
|
||||||
|
return self.api_valid_request(method, data)
|
||||||
|
|
||||||
|
def logout(self, user=user_1):
|
||||||
|
method = "Logout"
|
||||||
|
return self.api_valid_request(method, {})
|
||||||
|
|
||||||
def restore_account_and_wait_for_rpc_client_to_start(self, timeout=60):
|
def restore_account_and_wait_for_rpc_client_to_start(self, timeout=60):
|
||||||
self.restore_account_and_login()
|
self.restore_account_and_login()
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
@ -166,4 +183,4 @@ class StatusBackend(RpcClient, SignalClient):
|
|||||||
method = "wakuext_sendOneToOneMessage"
|
method = "wakuext_sendOneToOneMessage"
|
||||||
response = self.rpc_request(method, params)
|
response = self.rpc_request(method, params)
|
||||||
self.verify_is_valid_json_rpc_response(response)
|
self.verify_is_valid_json_rpc_response(response)
|
||||||
return response.json()
|
return response.json()
|
||||||
|
63
tests-functional/tests/test_logging.py
Normal file
63
tests-functional/tests/test_logging.py
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
import re
|
||||||
|
import time
|
||||||
|
from test_cases import StatusBackend
|
||||||
|
import pytest
|
||||||
|
import os
|
||||||
|
|
||||||
|
@pytest.mark.rpc
|
||||||
|
@pytest.mark.skip("waiting for status-backend to be executed on the same host/container")
|
||||||
|
class TestLogging:
|
||||||
|
|
||||||
|
@pytest.mark.init
|
||||||
|
def test_logging(self, tmp_path):
|
||||||
|
await_signals = [
|
||||||
|
"mediaserver.started",
|
||||||
|
"node.started",
|
||||||
|
"node.ready",
|
||||||
|
"node.login",
|
||||||
|
]
|
||||||
|
|
||||||
|
backend_client = StatusBackend(await_signals)
|
||||||
|
assert backend_client is not None
|
||||||
|
|
||||||
|
# Init and login
|
||||||
|
backend_client.init_status_backend(data_dir=str(tmp_path))
|
||||||
|
backend_client.create_account_and_login(data_dir=str(tmp_path))
|
||||||
|
key_uid = self.ensure_logged_in(backend_client)
|
||||||
|
|
||||||
|
# Configure logging
|
||||||
|
backend_client.rpc_valid_request("wakuext_setLogLevel", [{"logLevel": "ERROR"}])
|
||||||
|
backend_client.rpc_valid_request("wakuext_setLogNamespaces", [{"logNamespaces": "test1.test2:debug,test1.test2.test3:info"}])
|
||||||
|
|
||||||
|
# Re-login (logging settings take effect after re-login)
|
||||||
|
backend_client.logout()
|
||||||
|
backend_client.login(str(key_uid))
|
||||||
|
self.ensure_logged_in(backend_client)
|
||||||
|
|
||||||
|
# Test logging
|
||||||
|
backend_client.rpc_valid_request("wakuext_logTest")
|
||||||
|
self.expect_logs(tmp_path / "geth.log", "test message", [
|
||||||
|
r"DEBUG\s+test1\.test2",
|
||||||
|
r"INFO\s+test1\.test2",
|
||||||
|
r"INFO\s+test1\.test2\.test3",
|
||||||
|
r"WARN\s+test1\.test2",
|
||||||
|
r"WARN\s+test1\.test2\.test3",
|
||||||
|
r"ERROR\s+test1",
|
||||||
|
r"ERROR\s+test1\.test2",
|
||||||
|
r"ERROR\s+test1\.test2\.test3",
|
||||||
|
])
|
||||||
|
|
||||||
|
def expect_logs(self, log_file, filter_keyword, expected_logs):
|
||||||
|
with open(log_file, 'r') as f:
|
||||||
|
log_content = f.read()
|
||||||
|
|
||||||
|
filtered_logs = [line for line in log_content.splitlines() if filter_keyword in line]
|
||||||
|
for expected_log in expected_logs:
|
||||||
|
assert any(re.search(expected_log, log) for log in filtered_logs), f"Log entry not found: {expected_log}"
|
||||||
|
|
||||||
|
def ensure_logged_in(self, backend_client):
|
||||||
|
login_response = backend_client.wait_for_signal("node.login")
|
||||||
|
backend_client.verify_json_schema(login_response, "signal_node_login")
|
||||||
|
key_uid = login_response.get("event", {}).get("account", {}).get("key-uid")
|
||||||
|
assert key_uid is not None, "key-uid not found in login response"
|
||||||
|
return key_uid
|
Loading…
x
Reference in New Issue
Block a user