update status-go to v0.23.0-beta.9
This commit is contained in:
parent
15f52ee627
commit
44f1c1bd44
1
go.mod
1
go.mod
|
@ -123,6 +123,7 @@ require (
|
|||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect
|
||||
golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6 // indirect
|
||||
gopkg.in/go-playground/validator.v9 v9.24.0 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20181125150206-ccb656ba24c2 // indirect
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
|
||||
|
|
4
go.sum
4
go.sum
|
@ -333,6 +333,8 @@ github.com/status-im/status-go v0.16.4 h1:8pED8nOgQgHFhN+nIGDTvyK/aXA6VPQ6ZVYnEs
|
|||
github.com/status-im/status-go v0.16.4/go.mod h1:3ky7GKNvGXC46HmjcVnDe+3cCkzP+JTzPRz4d50AQ4w=
|
||||
github.com/status-im/status-go v0.23.0-beta.7 h1:62WVrbtRhmvWfUO/7q8maqMuC9mN/qoH4o8CIi5yqgY=
|
||||
github.com/status-im/status-go v0.23.0-beta.7/go.mod h1:3ky7GKNvGXC46HmjcVnDe+3cCkzP+JTzPRz4d50AQ4w=
|
||||
github.com/status-im/status-go v0.23.0-beta.9 h1:ZeBtlWgVQuI4qG/u5ffsziMxmgL9iIXi+ou5HjaVbEs=
|
||||
github.com/status-im/status-go v0.23.0-beta.9/go.mod h1:3ky7GKNvGXC46HmjcVnDe+3cCkzP+JTzPRz4d50AQ4w=
|
||||
github.com/status-im/whisper v1.4.5 h1:+B64WCnvIwlbYhuIQ7kEaJspPgcEwMDtk953U2JqxR8=
|
||||
github.com/status-im/whisper v1.4.5/go.mod h1:WS6z39YJQ8WJa9s+DmTuEM/s2nVF6Iz3B1SZYw5cYf0=
|
||||
github.com/status-im/whisper v1.4.8 h1:YA7FS9F7SFYn/SvrQta0gAu206/HXX8JpwCmZImxn6E=
|
||||
|
@ -397,6 +399,8 @@ gopkg.in/go-playground/validator.v9 v9.24.0 h1:4pXadp8xZVW4WR1Ygw8zDqeCMVHxTGI9t
|
|||
gopkg.in/go-playground/validator.v9 v9.24.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/ini.v1 v1.38.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20181125150206-ccb656ba24c2 h1:Z7a/sQcuVyTiDPnrKvrSZCe0G1L8wxZtqs/UQCuGtGY=
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
)
|
||||
|
||||
func init() {
|
||||
if err := logutils.OverrideRootLog(true, "INFO", "", false); err != nil {
|
||||
if err := logutils.OverrideRootLog(true, "INFO", logutils.FileOptions{}, false); err != nil {
|
||||
stdlog.Fatalf("failed to override root log: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -193,7 +193,13 @@ func (a *WhisperServiceAdapter) Request(ctx context.Context, options RequestOpti
|
|||
return err
|
||||
}
|
||||
|
||||
return a.requestMessages(ctx, enode, options)
|
||||
// TODO: handle cursor from the response.
|
||||
resp, err := a.requestMessages(ctx, enode, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return resp.Error
|
||||
}
|
||||
|
||||
func (a *WhisperServiceAdapter) selectAndAddMailServer() (string, error) {
|
||||
|
@ -223,16 +229,16 @@ func (a *WhisperServiceAdapter) selectAndAddMailServer() (string, error) {
|
|||
return enode, err
|
||||
}
|
||||
|
||||
func (a *WhisperServiceAdapter) requestMessages(ctx context.Context, enode string, params RequestOptions) error {
|
||||
func (a *WhisperServiceAdapter) requestMessages(ctx context.Context, enode string, params RequestOptions) (resp shhext.MessagesResponse, err error) {
|
||||
shhextService, err := a.node.ShhExtService()
|
||||
if err != nil {
|
||||
return err
|
||||
return
|
||||
}
|
||||
shhextAPI := shhext.NewPublicAPI(shhextService)
|
||||
|
||||
req, err := a.createMessagesRequest(enode, params)
|
||||
if err != nil {
|
||||
return err
|
||||
return
|
||||
}
|
||||
|
||||
return shhextAPI.RequestMessagesSync(shhext.RetryConfig{
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
package logutils
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
lumberjack "gopkg.in/natefinch/lumberjack.v2"
|
||||
)
|
||||
|
||||
// FileOptions are all options supported by internal rotation module.
|
||||
type FileOptions struct {
|
||||
// Base name for log file.
|
||||
Filename string
|
||||
// Size in megabytes.
|
||||
MaxSize int
|
||||
// Number of rotated log files.
|
||||
MaxBackups int
|
||||
// If true rotated log files will be gzipped.
|
||||
Compress bool
|
||||
}
|
||||
|
||||
// FileHandlerWithRotation instantiates log.Handler with a configured rotation
|
||||
func FileHandlerWithRotation(opts FileOptions, format log.Format) log.Handler {
|
||||
logger := &lumberjack.Logger{
|
||||
Filename: opts.Filename,
|
||||
MaxSize: opts.MaxSize,
|
||||
MaxBackups: opts.MaxBackups,
|
||||
Compress: opts.Compress,
|
||||
}
|
||||
return log.StreamHandler(logger, format)
|
||||
}
|
|
@ -5,38 +5,55 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/status-im/status-go/params"
|
||||
)
|
||||
|
||||
// OverrideWithStdLogger overwrites ethereum's root logger with a logger from golang std lib.
|
||||
func OverrideWithStdLogger(config *params.NodeConfig) error {
|
||||
return enableRootLog(config.LogLevel, NewStdHandler(log.LogfmtFormat()))
|
||||
}
|
||||
|
||||
// OverrideRootLogWithConfig derives all configuration from params.NodeConfig and configures logger using it.
|
||||
func OverrideRootLogWithConfig(config *params.NodeConfig, colors bool) error {
|
||||
if !config.LogEnabled {
|
||||
return nil
|
||||
}
|
||||
if config.LogMobileSystem {
|
||||
return OverrideWithStdLogger(config)
|
||||
}
|
||||
return OverrideRootLog(config.LogEnabled, config.LogLevel, FileOptions{
|
||||
Filename: config.LogFile,
|
||||
MaxSize: config.LogMaxSize,
|
||||
MaxBackups: config.LogMaxBackups,
|
||||
Compress: config.LogCompressRotated,
|
||||
}, colors)
|
||||
|
||||
}
|
||||
|
||||
// OverrideRootLog overrides root logger with file handler, if defined,
|
||||
// and log level (defaults to INFO).
|
||||
func OverrideRootLog(enabled bool, levelStr string, logFile string, terminal bool) error {
|
||||
func OverrideRootLog(enabled bool, levelStr string, fileOpts FileOptions, terminal bool) error {
|
||||
if !enabled {
|
||||
disableRootLog()
|
||||
return nil
|
||||
}
|
||||
var (
|
||||
handler log.Handler
|
||||
)
|
||||
if fileOpts.Filename != "" {
|
||||
handler = FileHandlerWithRotation(fileOpts, log.LogfmtFormat())
|
||||
} else {
|
||||
handler = log.StreamHandler(os.Stderr, log.TerminalFormat(terminal))
|
||||
}
|
||||
|
||||
return enableRootLog(levelStr, logFile, terminal)
|
||||
return enableRootLog(levelStr, handler)
|
||||
}
|
||||
|
||||
func disableRootLog() {
|
||||
log.Root().SetHandler(log.DiscardHandler())
|
||||
}
|
||||
|
||||
func enableRootLog(levelStr string, logFile string, terminal bool) error {
|
||||
var (
|
||||
handler log.Handler
|
||||
err error
|
||||
)
|
||||
|
||||
if logFile != "" {
|
||||
handler, err = log.FileHandler(logFile, log.LogfmtFormat())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
handler = log.StreamHandler(os.Stderr, log.TerminalFormat(terminal))
|
||||
}
|
||||
|
||||
func enableRootLog(levelStr string, handler log.Handler) error {
|
||||
if levelStr == "" {
|
||||
levelStr = "INFO"
|
||||
}
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
package logutils
|
||||
|
||||
import (
|
||||
stdlog "log"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// NewStdHandler returns handler that uses logger from golang std lib.
|
||||
func NewStdHandler(fmtr log.Format) log.Handler {
|
||||
return log.FuncHandler(func(r *log.Record) error {
|
||||
line := fmtr.Format(r)
|
||||
// 8 is a number of frames that will be skipped when log is printed.
|
||||
// this is needed to show the file (with line number) where call to a logger was made
|
||||
return stdlog.Output(8, string(line))
|
||||
})
|
||||
}
|
|
@ -40,7 +40,10 @@ import (
|
|||
|
||||
const (
|
||||
maxQueryRange = 24 * time.Hour
|
||||
noLimits = 0
|
||||
defaultLimit = 2000
|
||||
// When we default the upper limit, we want to extend the range a bit
|
||||
// to accommodate for envelopes with slightly higher timestamp, in seconds
|
||||
whisperTTLSafeThreshold = 60
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -265,6 +268,10 @@ func (s *WMailServer) DeliverMail(peer *whisper.Peer, request *whisper.Envelope)
|
|||
lower, upper, bloom, limit, cursor, err = s.validateRequest(peer.ID(), request)
|
||||
}
|
||||
|
||||
if limit == 0 {
|
||||
limit = defaultLimit
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
requestValidationErrorsCounter.Inc(1)
|
||||
log.Error("[mailserver:DeliverMail] request failed validaton",
|
||||
|
@ -535,7 +542,7 @@ func (s *WMailServer) processRequestInBundles(
|
|||
lastEnvelopeHash = envelope.Hash()
|
||||
processedEnvelopes++
|
||||
envelopeSize := whisper.EnvelopeHeaderLength + uint32(len(envelope.Data))
|
||||
limitReached := limit != noLimits && processedEnvelopes == limit
|
||||
limitReached := processedEnvelopes == limit
|
||||
newSize := bundleSize + envelopeSize
|
||||
|
||||
// If we still have some room for messages, add and continue
|
||||
|
@ -676,18 +683,15 @@ func (s *WMailServer) decodeRequest(peerID []byte, request *whisper.Envelope) (M
|
|||
return payload, fmt.Errorf("failed to decode data: %v", err)
|
||||
}
|
||||
|
||||
if payload.Upper == 0 {
|
||||
payload.Upper = uint32(time.Now().Unix() + whisperTTLSafeThreshold)
|
||||
}
|
||||
|
||||
if payload.Upper < payload.Lower {
|
||||
log.Error("Query range is invalid: lower > upper", "lower", payload.Lower, "upper", payload.Upper)
|
||||
return payload, errors.New("query range is invalid: lower > upper")
|
||||
}
|
||||
|
||||
lowerTime := time.Unix(int64(payload.Lower), 0)
|
||||
upperTime := time.Unix(int64(payload.Upper), 0)
|
||||
if upperTime.Sub(lowerTime) > maxQueryRange {
|
||||
log.Warn("Query range too long", "peerID", peerIDBytesString(peerID), "length", upperTime.Sub(lowerTime), "max", maxQueryRange)
|
||||
return payload, fmt.Errorf("query range must be shorted than %d", maxQueryRange)
|
||||
}
|
||||
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -259,12 +259,24 @@ type NodeConfig struct {
|
|||
// LogEnabled enables the logger
|
||||
LogEnabled bool `json:"LogEnabled"`
|
||||
|
||||
// LogMobileSystem enables log redirection to android/ios system logger.
|
||||
LogMobileSystem bool
|
||||
|
||||
// LogFile is filename where exposed logs get written to
|
||||
LogFile string
|
||||
|
||||
// LogLevel defines minimum log level. Valid names are "ERROR", "WARN", "INFO", "DEBUG", and "TRACE".
|
||||
LogLevel string `validate:"eq=ERROR|eq=WARN|eq=INFO|eq=DEBUG|eq=TRACE"`
|
||||
|
||||
// LogMaxBackups defines number of rotated log files that will be stored.
|
||||
LogMaxBackups int
|
||||
|
||||
// LogMaxSize after current size is reached log file will be rotated
|
||||
LogMaxSize int
|
||||
|
||||
// LogCompressRotated if true all rotated files will be gzipped.
|
||||
LogCompressRotated bool
|
||||
|
||||
// LogToStderr defines whether logged info should also be output to os.Stderr
|
||||
LogToStderr bool
|
||||
|
||||
|
@ -307,8 +319,7 @@ type ShhextConfig struct {
|
|||
// BackupDisabledDataDir is the file system folder the node should use for any data storage needs that it doesn't want backed up.
|
||||
BackupDisabledDataDir string
|
||||
// InstallationId id of the current installation
|
||||
InstallationID string
|
||||
DebugAPIEnabled bool
|
||||
InstallationID string
|
||||
// MailServerConfirmations should be true if client wants to receive confirmatons only from a selected mail servers.
|
||||
MailServerConfirmations bool
|
||||
// EnableConnectionManager turns on management of the mail server connections if true.
|
||||
|
@ -322,6 +333,9 @@ type ShhextConfig struct {
|
|||
|
||||
// MaxServerFailures defines maximum allowed expired requests before server will be swapped to another one.
|
||||
MaxServerFailures int
|
||||
|
||||
// MaxMessageDeliveryAttempts defines how many times we will try to deliver not-acknowledged envelopes.
|
||||
MaxMessageDeliveryAttempts int
|
||||
}
|
||||
|
||||
// Validate validates the ShhextConfig struct and returns an error if inconsistent values are found
|
||||
|
@ -378,6 +392,9 @@ func NewNodeConfigWithDefaults(dataDir string, networkID uint64, opts ...Option)
|
|||
c.ListenAddr = ":30303"
|
||||
c.LogEnabled = true
|
||||
c.LogLevel = "INFO"
|
||||
c.LogMaxSize = 100
|
||||
c.LogCompressRotated = true
|
||||
c.LogMaxBackups = 3
|
||||
c.LogToStderr = true
|
||||
c.WhisperConfig.Enabled = true
|
||||
c.WhisperConfig.EnableNTPSync = true
|
||||
|
|
|
@ -106,6 +106,17 @@ func (r *MessagesRequest) setDefaults(now time.Time) {
|
|||
}
|
||||
}
|
||||
|
||||
// MessagesResponse is a response for shhext_requestMessages2 method.
|
||||
type MessagesResponse struct {
|
||||
// Cursor from the response can be used to retrieve more messages
|
||||
// for the previous request.
|
||||
Cursor string `json:"cursor"`
|
||||
|
||||
// Error indicates that something wrong happened when sending messages
|
||||
// to the requester.
|
||||
Error error `json:"error"`
|
||||
}
|
||||
|
||||
// SyncMessagesRequest is a SyncMessages() request payload.
|
||||
type SyncMessagesRequest struct {
|
||||
// MailServerPeer is MailServer's enode address.
|
||||
|
@ -164,14 +175,15 @@ func NewPublicAPI(s *Service) *PublicAPI {
|
|||
}
|
||||
|
||||
// Post shamelessly copied from whisper codebase with slight modifications.
|
||||
func (api *PublicAPI) Post(ctx context.Context, req whisper.NewMessage) (hash hexutil.Bytes, err error) {
|
||||
hash, err = api.publicAPI.Post(ctx, req)
|
||||
func (api *PublicAPI) Post(ctx context.Context, req whisper.NewMessage) (hexutil.Bytes, error) {
|
||||
hexID, err := api.publicAPI.Post(ctx, req)
|
||||
if err == nil {
|
||||
var envHash common.Hash
|
||||
copy(envHash[:], hash[:]) // slice can't be used as key
|
||||
api.service.envelopesMonitor.Add(envHash)
|
||||
api.service.envelopesMonitor.Add(common.BytesToHash(hexID), req)
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
return hash, err
|
||||
mID := messageID(req)
|
||||
return mID[:], err
|
||||
}
|
||||
|
||||
func (api *PublicAPI) getPeer(rawurl string) (*enode.Node, error) {
|
||||
|
@ -190,7 +202,9 @@ type RetryConfig struct {
|
|||
}
|
||||
|
||||
// RequestMessagesSync repeats MessagesRequest using configuration in retry conf.
|
||||
func (api *PublicAPI) RequestMessagesSync(conf RetryConfig, r MessagesRequest) error {
|
||||
func (api *PublicAPI) RequestMessagesSync(conf RetryConfig, r MessagesRequest) (MessagesResponse, error) {
|
||||
var resp MessagesResponse
|
||||
|
||||
shh := api.service.w
|
||||
events := make(chan whisper.EnvelopeEvent, 10)
|
||||
sub := shh.SubscribeEnvelopeEvents(events)
|
||||
|
@ -206,19 +220,21 @@ func (api *PublicAPI) RequestMessagesSync(conf RetryConfig, r MessagesRequest) e
|
|||
r.Timeout = time.Duration(int(r.Timeout.Seconds()))
|
||||
requestID, err = api.RequestMessages(context.Background(), r)
|
||||
if err != nil {
|
||||
return err
|
||||
return resp, err
|
||||
}
|
||||
err = waitForExpiredOrCompleted(common.BytesToHash(requestID), events)
|
||||
mailServerResp, err := waitForExpiredOrCompleted(common.BytesToHash(requestID), events)
|
||||
if err == nil {
|
||||
return nil
|
||||
resp.Cursor = hex.EncodeToString(mailServerResp.Cursor)
|
||||
resp.Error = mailServerResp.Error
|
||||
return resp, nil
|
||||
}
|
||||
retries++
|
||||
api.log.Error("History request failed with %s. Making retry #%d", retries)
|
||||
api.log.Error("[RequestMessagesSync] failed", "err", err, "retries", retries)
|
||||
}
|
||||
return fmt.Errorf("failed to request messages after %d retries", retries)
|
||||
return resp, fmt.Errorf("failed to request messages after %d retries", retries)
|
||||
}
|
||||
|
||||
func waitForExpiredOrCompleted(requestID common.Hash, events chan whisper.EnvelopeEvent) error {
|
||||
func waitForExpiredOrCompleted(requestID common.Hash, events chan whisper.EnvelopeEvent) (*whisper.MailServerResponse, error) {
|
||||
for {
|
||||
ev := <-events
|
||||
if ev.Hash != requestID {
|
||||
|
@ -226,9 +242,13 @@ func waitForExpiredOrCompleted(requestID common.Hash, events chan whisper.Envelo
|
|||
}
|
||||
switch ev.Event {
|
||||
case whisper.EventMailServerRequestCompleted:
|
||||
return nil
|
||||
data, ok := ev.Data.(*whisper.MailServerResponse)
|
||||
if ok {
|
||||
return data, nil
|
||||
}
|
||||
return nil, errors.New("invalid event data type")
|
||||
case whisper.EventMailServerRequestExpired:
|
||||
return errors.New("request expired")
|
||||
return nil, errors.New("request expired")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,74 +0,0 @@
|
|||
package shhext
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/status-im/status-go/services"
|
||||
whisper "github.com/status-im/whisper/whisperv6"
|
||||
)
|
||||
|
||||
var (
|
||||
postSyncTimeout = 60 * time.Second
|
||||
errEnvelopeExpired = errors.New("envelope expired before being sent")
|
||||
errNoShhextAttachedAPI = errors.New("No shhext attached")
|
||||
)
|
||||
|
||||
// DebugAPI represents a set of APIs from the `web3.debug` namespace.
|
||||
type DebugAPI struct {
|
||||
s *Service
|
||||
}
|
||||
|
||||
// NewDebugAPI creates an instance of the debug API.
|
||||
func NewDebugAPI(s *Service) *DebugAPI {
|
||||
return &DebugAPI{s: s}
|
||||
}
|
||||
|
||||
// PostSync sends an envelope through shhext_post and waits until it's sent.
|
||||
func (api *DebugAPI) PostSync(ctx context.Context, req whisper.NewMessage) (hash hexutil.Bytes, err error) {
|
||||
shhAPI := services.APIByNamespace(api.s.APIs(), "shhext")
|
||||
if shhAPI == nil {
|
||||
err = errNoShhextAttachedAPI
|
||||
return
|
||||
}
|
||||
s, ok := shhAPI.(*PublicAPI)
|
||||
if !ok {
|
||||
err = errNoShhextAttachedAPI
|
||||
return
|
||||
}
|
||||
hash, err = s.Post(ctx, req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
ctxTimeout, cancel := context.WithTimeout(ctx, postSyncTimeout)
|
||||
defer cancel()
|
||||
err = api.waitForHash(ctxTimeout, hash)
|
||||
return
|
||||
}
|
||||
|
||||
// waitForHash waits for a specific hash to be sent
|
||||
func (api *DebugAPI) waitForHash(ctx context.Context, hash hexutil.Bytes) error {
|
||||
h := common.BytesToHash(hash)
|
||||
events := make(chan whisper.EnvelopeEvent, 100)
|
||||
sub := api.s.w.SubscribeEnvelopeEvents(events)
|
||||
defer sub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case ev := <-events:
|
||||
if ev.Hash == h {
|
||||
if ev.Event == whisper.EventEnvelopeSent {
|
||||
return nil
|
||||
}
|
||||
if ev.Event == whisper.EventEnvelopeExpired {
|
||||
return errEnvelopeExpired
|
||||
}
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("wait for hash canceled: %v", ctx.Err())
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,6 +1,8 @@
|
|||
package shhext
|
||||
|
||||
import (
|
||||
"context"
|
||||
"hash/fnv"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
|
@ -10,15 +12,51 @@ import (
|
|||
whisper "github.com/status-im/whisper/whisperv6"
|
||||
)
|
||||
|
||||
func messageID(message whisper.NewMessage) common.Hash {
|
||||
hash := fnv.New32()
|
||||
_, _ = hash.Write(message.Payload)
|
||||
_, _ = hash.Write(message.Topic[:])
|
||||
return common.BytesToHash(hash.Sum(nil))
|
||||
}
|
||||
|
||||
// NewEnvelopesMonitor returns a pointer to an instance of the EnvelopesMonitor.
|
||||
func NewEnvelopesMonitor(w *whisper.Whisper, handler EnvelopeEventsHandler, mailServerConfirmation bool, mailPeers *mailservers.PeerStore, maxAttempts int) *EnvelopesMonitor {
|
||||
return &EnvelopesMonitor{
|
||||
w: w,
|
||||
whisperAPI: whisper.NewPublicWhisperAPI(w),
|
||||
handler: handler,
|
||||
mailServerConfirmation: mailServerConfirmation,
|
||||
mailPeers: mailPeers,
|
||||
maxAttempts: maxAttempts,
|
||||
|
||||
// key is envelope hash (event.Hash)
|
||||
envelopes: map[common.Hash]EnvelopeState{},
|
||||
messages: map[common.Hash]whisper.NewMessage{},
|
||||
attempts: map[common.Hash]int{},
|
||||
|
||||
// key is messageID
|
||||
messageToEnvelope: map[common.Hash]common.Hash{},
|
||||
|
||||
// key is hash of the batch (event.Batch)
|
||||
batches: map[common.Hash]map[common.Hash]struct{}{},
|
||||
}
|
||||
}
|
||||
|
||||
// EnvelopesMonitor is responsible for monitoring whisper envelopes state.
|
||||
type EnvelopesMonitor struct {
|
||||
w *whisper.Whisper
|
||||
whisperAPI *whisper.PublicWhisperAPI
|
||||
handler EnvelopeEventsHandler
|
||||
mailServerConfirmation bool
|
||||
maxAttempts int
|
||||
|
||||
mu sync.Mutex
|
||||
cache map[common.Hash]EnvelopeState
|
||||
batches map[common.Hash]map[common.Hash]struct{}
|
||||
mu sync.Mutex
|
||||
envelopes map[common.Hash]EnvelopeState
|
||||
batches map[common.Hash]map[common.Hash]struct{}
|
||||
|
||||
messageToEnvelope map[common.Hash]common.Hash
|
||||
messages map[common.Hash]whisper.NewMessage
|
||||
attempts map[common.Hash]int
|
||||
|
||||
mailPeers *mailservers.PeerStore
|
||||
|
||||
|
@ -43,16 +81,33 @@ func (m *EnvelopesMonitor) Stop() {
|
|||
}
|
||||
|
||||
// Add hash to a tracker.
|
||||
func (m *EnvelopesMonitor) Add(hash common.Hash) {
|
||||
func (m *EnvelopesMonitor) Add(envelopeHash common.Hash, message whisper.NewMessage) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.cache[hash] = EnvelopePosted
|
||||
m.envelopes[envelopeHash] = EnvelopePosted
|
||||
m.messages[envelopeHash] = message
|
||||
m.attempts[envelopeHash] = 1
|
||||
m.messageToEnvelope[messageID(message)] = envelopeHash
|
||||
}
|
||||
|
||||
func (m *EnvelopesMonitor) GetState(hash common.Hash) EnvelopeState {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
state, exist := m.cache[hash]
|
||||
state, exist := m.envelopes[hash]
|
||||
if !exist {
|
||||
return NotRegistered
|
||||
}
|
||||
return state
|
||||
}
|
||||
|
||||
func (m *EnvelopesMonitor) GetMessageState(mID common.Hash) EnvelopeState {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
envelope, exist := m.messageToEnvelope[mID]
|
||||
if !exist {
|
||||
return NotRegistered
|
||||
}
|
||||
state, exist := m.envelopes[envelope]
|
||||
if !exist {
|
||||
return NotRegistered
|
||||
}
|
||||
|
@ -83,7 +138,6 @@ func (m *EnvelopesMonitor) handleEvent(event whisper.EnvelopeEvent) {
|
|||
whisper.EventBatchAcknowledged: m.handleAcknowledgedBatch,
|
||||
whisper.EventEnvelopeReceived: m.handleEventEnvelopeReceived,
|
||||
}
|
||||
|
||||
if handler, ok := handlers[event.Event]; ok {
|
||||
handler(event)
|
||||
}
|
||||
|
@ -99,7 +153,7 @@ func (m *EnvelopesMonitor) handleEventEnvelopeSent(event whisper.EnvelopeEvent)
|
|||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
state, ok := m.cache[event.Hash]
|
||||
state, ok := m.envelopes[event.Hash]
|
||||
// if we didn't send a message using extension - skip it
|
||||
// if message was already confirmed - skip it
|
||||
if !ok || state == EnvelopeSent {
|
||||
|
@ -113,9 +167,9 @@ func (m *EnvelopesMonitor) handleEventEnvelopeSent(event whisper.EnvelopeEvent)
|
|||
m.batches[event.Batch][event.Hash] = struct{}{}
|
||||
log.Debug("waiting for a confirmation", "batch", event.Batch)
|
||||
} else {
|
||||
m.cache[event.Hash] = EnvelopeSent
|
||||
m.envelopes[event.Hash] = EnvelopeSent
|
||||
if m.handler != nil {
|
||||
m.handler.EnvelopeSent(event.Hash)
|
||||
m.handler.EnvelopeSent(messageID(m.messages[event.Hash]))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -140,13 +194,13 @@ func (m *EnvelopesMonitor) handleAcknowledgedBatch(event whisper.EnvelopeEvent)
|
|||
}
|
||||
log.Debug("received a confirmation", "batch", event.Batch, "peer", event.Peer)
|
||||
for hash := range envelopes {
|
||||
state, ok := m.cache[hash]
|
||||
state, ok := m.envelopes[hash]
|
||||
if !ok || state == EnvelopeSent {
|
||||
continue
|
||||
}
|
||||
m.cache[hash] = EnvelopeSent
|
||||
m.envelopes[hash] = EnvelopeSent
|
||||
if m.handler != nil {
|
||||
m.handler.EnvelopeSent(hash)
|
||||
m.handler.EnvelopeSent(messageID(m.messages[hash]))
|
||||
}
|
||||
}
|
||||
delete(m.batches, event.Batch)
|
||||
|
@ -155,15 +209,33 @@ func (m *EnvelopesMonitor) handleAcknowledgedBatch(event whisper.EnvelopeEvent)
|
|||
func (m *EnvelopesMonitor) handleEventEnvelopeExpired(event whisper.EnvelopeEvent) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
if state, ok := m.cache[event.Hash]; ok {
|
||||
delete(m.cache, event.Hash)
|
||||
if state, ok := m.envelopes[event.Hash]; ok {
|
||||
message, exist := m.messages[event.Hash]
|
||||
if !exist {
|
||||
log.Error("message was deleted erroneously", "envelope hash", event.Hash)
|
||||
}
|
||||
mID := messageID(message)
|
||||
attempt := m.attempts[event.Hash]
|
||||
m.clearMessageState(event.Hash)
|
||||
if state == EnvelopeSent {
|
||||
return
|
||||
}
|
||||
log.Debug("envelope expired", "hash", event.Hash, "state", state)
|
||||
if m.handler != nil {
|
||||
m.handler.EnvelopeExpired(event.Hash)
|
||||
if attempt < m.maxAttempts {
|
||||
log.Debug("retrying to send a message", "message id", mID, "attempt", attempt+1)
|
||||
hex, err := m.whisperAPI.Post(context.TODO(), message)
|
||||
if err != nil {
|
||||
log.Error("failed to retry sending message", "message id", mID, "attempt", attempt+1)
|
||||
}
|
||||
envelopeID := common.BytesToHash(hex)
|
||||
m.messageToEnvelope[mID] = envelopeID
|
||||
m.envelopes[envelopeID] = EnvelopePosted
|
||||
m.messages[envelopeID] = message
|
||||
m.attempts[envelopeID] = attempt + 1
|
||||
} else {
|
||||
log.Debug("envelope expired", "hash", event.Hash, "state", state)
|
||||
if m.handler != nil {
|
||||
m.handler.EnvelopeExpired(mID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -176,13 +248,23 @@ func (m *EnvelopesMonitor) handleEventEnvelopeReceived(event whisper.EnvelopeEve
|
|||
}
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
state, ok := m.cache[event.Hash]
|
||||
state, ok := m.envelopes[event.Hash]
|
||||
if !ok || state != EnvelopePosted {
|
||||
return
|
||||
}
|
||||
log.Debug("expected envelope received", "hash", event.Hash, "peer", event.Peer)
|
||||
delete(m.cache, event.Hash)
|
||||
m.envelopes[event.Hash] = EnvelopeSent
|
||||
if m.handler != nil {
|
||||
m.handler.EnvelopeSent(event.Hash)
|
||||
m.handler.EnvelopeSent(messageID(m.messages[event.Hash]))
|
||||
}
|
||||
}
|
||||
|
||||
// clearMessageState removes all message and envelope state.
|
||||
// not thread-safe, should be protected on a higher level.
|
||||
func (m *EnvelopesMonitor) clearMessageState(envelopeID common.Hash) {
|
||||
delete(m.envelopes, envelopeID)
|
||||
mID := messageID(m.messages[envelopeID])
|
||||
delete(m.messageToEnvelope, mID)
|
||||
delete(m.messages, envelopeID)
|
||||
delete(m.attempts, envelopeID)
|
||||
}
|
||||
|
|
|
@ -50,7 +50,6 @@ type Service struct {
|
|||
nodeID *ecdsa.PrivateKey
|
||||
deduplicator *dedup.Deduplicator
|
||||
protocol *chat.ProtocolService
|
||||
debug bool
|
||||
dataDir string
|
||||
installationID string
|
||||
pfsEnabled bool
|
||||
|
@ -79,14 +78,7 @@ func New(w *whisper.Whisper, handler EnvelopeEventsHandler, db *leveldb.DB, conf
|
|||
cache: map[common.Hash]EnvelopeState{},
|
||||
requestsRegistry: requestsRegistry,
|
||||
}
|
||||
envelopesMonitor := &EnvelopesMonitor{
|
||||
w: w,
|
||||
handler: handler,
|
||||
cache: map[common.Hash]EnvelopeState{},
|
||||
batches: map[common.Hash]map[common.Hash]struct{}{},
|
||||
mailPeers: ps,
|
||||
mailServerConfirmation: config.MailServerConfirmations,
|
||||
}
|
||||
envelopesMonitor := NewEnvelopesMonitor(w, handler, config.MailServerConfirmations, ps, config.MaxMessageDeliveryAttempts)
|
||||
return &Service{
|
||||
w: w,
|
||||
config: config,
|
||||
|
@ -94,7 +86,6 @@ func New(w *whisper.Whisper, handler EnvelopeEventsHandler, db *leveldb.DB, conf
|
|||
mailMonitor: mailMonitor,
|
||||
requestsRegistry: requestsRegistry,
|
||||
deduplicator: dedup.NewDeduplicator(w, db),
|
||||
debug: config.DebugAPIEnabled,
|
||||
dataDir: config.BackupDisabledDataDir,
|
||||
installationID: config.InstallationID,
|
||||
pfsEnabled: config.PFSEnabled,
|
||||
|
@ -250,16 +241,6 @@ func (s *Service) APIs() []rpc.API {
|
|||
Public: true,
|
||||
},
|
||||
}
|
||||
|
||||
if s.debug {
|
||||
apis = append(apis, rpc.API{
|
||||
Namespace: "debug",
|
||||
Version: "1.0",
|
||||
Service: NewDebugAPI(s),
|
||||
Public: true,
|
||||
})
|
||||
}
|
||||
|
||||
return apis
|
||||
}
|
||||
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
package services
|
||||
|
||||
import "github.com/ethereum/go-ethereum/rpc"
|
||||
|
||||
// APIByNamespace retrieve an api by its namespace or returns nil.
|
||||
func APIByNamespace(apis []rpc.API, namespace string) interface{} {
|
||||
for _, api := range apis {
|
||||
if api.Namespace == namespace {
|
||||
return api.Service
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
|
@ -0,0 +1,6 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.8
|
||||
- 1.7
|
||||
- 1.6
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Nate Finch
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -0,0 +1,179 @@
|
|||
# lumberjack [![GoDoc](https://godoc.org/gopkg.in/natefinch/lumberjack.v2?status.png)](https://godoc.org/gopkg.in/natefinch/lumberjack.v2) [![Build Status](https://travis-ci.org/natefinch/lumberjack.svg?branch=v2.0)](https://travis-ci.org/natefinch/lumberjack) [![Build status](https://ci.appveyor.com/api/projects/status/00gchpxtg4gkrt5d)](https://ci.appveyor.com/project/natefinch/lumberjack) [![Coverage Status](https://coveralls.io/repos/natefinch/lumberjack/badge.svg?branch=v2.0)](https://coveralls.io/r/natefinch/lumberjack?branch=v2.0)
|
||||
|
||||
### Lumberjack is a Go package for writing logs to rolling files.
|
||||
|
||||
Package lumberjack provides a rolling logger.
|
||||
|
||||
Note that this is v2.0 of lumberjack, and should be imported using gopkg.in
|
||||
thusly:
|
||||
|
||||
import "gopkg.in/natefinch/lumberjack.v2"
|
||||
|
||||
The package name remains simply lumberjack, and the code resides at
|
||||
https://github.com/natefinch/lumberjack under the v2.0 branch.
|
||||
|
||||
Lumberjack is intended to be one part of a logging infrastructure.
|
||||
It is not an all-in-one solution, but instead is a pluggable
|
||||
component at the bottom of the logging stack that simply controls the files
|
||||
to which logs are written.
|
||||
|
||||
Lumberjack plays well with any logging package that can write to an
|
||||
io.Writer, including the standard library's log package.
|
||||
|
||||
Lumberjack assumes that only one process is writing to the output files.
|
||||
Using the same lumberjack configuration from multiple processes on the same
|
||||
machine will result in improper behavior.
|
||||
|
||||
|
||||
**Example**
|
||||
|
||||
To use lumberjack with the standard library's log package, just pass it into the SetOutput function when your application starts.
|
||||
|
||||
Code:
|
||||
|
||||
```go
|
||||
log.SetOutput(&lumberjack.Logger{
|
||||
Filename: "/var/log/myapp/foo.log",
|
||||
MaxSize: 500, // megabytes
|
||||
MaxBackups: 3,
|
||||
MaxAge: 28, //days
|
||||
Compress: true, // disabled by default
|
||||
})
|
||||
```
|
||||
|
||||
|
||||
|
||||
## type Logger
|
||||
``` go
|
||||
type Logger struct {
|
||||
// Filename is the file to write logs to. Backup log files will be retained
|
||||
// in the same directory. It uses <processname>-lumberjack.log in
|
||||
// os.TempDir() if empty.
|
||||
Filename string `json:"filename" yaml:"filename"`
|
||||
|
||||
// MaxSize is the maximum size in megabytes of the log file before it gets
|
||||
// rotated. It defaults to 100 megabytes.
|
||||
MaxSize int `json:"maxsize" yaml:"maxsize"`
|
||||
|
||||
// MaxAge is the maximum number of days to retain old log files based on the
|
||||
// timestamp encoded in their filename. Note that a day is defined as 24
|
||||
// hours and may not exactly correspond to calendar days due to daylight
|
||||
// savings, leap seconds, etc. The default is not to remove old log files
|
||||
// based on age.
|
||||
MaxAge int `json:"maxage" yaml:"maxage"`
|
||||
|
||||
// MaxBackups is the maximum number of old log files to retain. The default
|
||||
// is to retain all old log files (though MaxAge may still cause them to get
|
||||
// deleted.)
|
||||
MaxBackups int `json:"maxbackups" yaml:"maxbackups"`
|
||||
|
||||
// LocalTime determines if the time used for formatting the timestamps in
|
||||
// backup files is the computer's local time. The default is to use UTC
|
||||
// time.
|
||||
LocalTime bool `json:"localtime" yaml:"localtime"`
|
||||
|
||||
// Compress determines if the rotated log files should be compressed
|
||||
// using gzip. The default is not to perform compression.
|
||||
Compress bool `json:"compress" yaml:"compress"`
|
||||
// contains filtered or unexported fields
|
||||
}
|
||||
```
|
||||
Logger is an io.WriteCloser that writes to the specified filename.
|
||||
|
||||
Logger opens or creates the logfile on first Write. If the file exists and
|
||||
is less than MaxSize megabytes, lumberjack will open and append to that file.
|
||||
If the file exists and its size is >= MaxSize megabytes, the file is renamed
|
||||
by putting the current time in a timestamp in the name immediately before the
|
||||
file's extension (or the end of the filename if there's no extension). A new
|
||||
log file is then created using original filename.
|
||||
|
||||
Whenever a write would cause the current log file exceed MaxSize megabytes,
|
||||
the current file is closed, renamed, and a new log file created with the
|
||||
original name. Thus, the filename you give Logger is always the "current" log
|
||||
file.
|
||||
|
||||
Backups use the log file name given to Logger, in the form `name-timestamp.ext`
|
||||
where name is the filename without the extension, timestamp is the time at which
|
||||
the log was rotated formatted with the time.Time format of
|
||||
`2006-01-02T15-04-05.000` and the extension is the original extension. For
|
||||
example, if your Logger.Filename is `/var/log/foo/server.log`, a backup created
|
||||
at 6:30pm on Nov 11 2016 would use the filename
|
||||
`/var/log/foo/server-2016-11-04T18-30-00.000.log`
|
||||
|
||||
### Cleaning Up Old Log Files
|
||||
Whenever a new logfile gets created, old log files may be deleted. The most
|
||||
recent files according to the encoded timestamp will be retained, up to a
|
||||
number equal to MaxBackups (or all of them if MaxBackups is 0). Any files
|
||||
with an encoded timestamp older than MaxAge days are deleted, regardless of
|
||||
MaxBackups. Note that the time encoded in the timestamp is the rotation
|
||||
time, which may differ from the last time that file was written to.
|
||||
|
||||
If MaxBackups and MaxAge are both 0, no old log files will be deleted.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
### func (\*Logger) Close
|
||||
``` go
|
||||
func (l *Logger) Close() error
|
||||
```
|
||||
Close implements io.Closer, and closes the current logfile.
|
||||
|
||||
|
||||
|
||||
### func (\*Logger) Rotate
|
||||
``` go
|
||||
func (l *Logger) Rotate() error
|
||||
```
|
||||
Rotate causes Logger to close the existing log file and immediately create a
|
||||
new one. This is a helper function for applications that want to initiate
|
||||
rotations outside of the normal rotation rules, such as in response to
|
||||
SIGHUP. After rotating, this initiates a cleanup of old log files according
|
||||
to the normal rules.
|
||||
|
||||
**Example**
|
||||
|
||||
Example of how to rotate in response to SIGHUP.
|
||||
|
||||
Code:
|
||||
|
||||
```go
|
||||
l := &lumberjack.Logger{}
|
||||
log.SetOutput(l)
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, syscall.SIGHUP)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
<-c
|
||||
l.Rotate()
|
||||
}
|
||||
}()
|
||||
```
|
||||
|
||||
### func (\*Logger) Write
|
||||
``` go
|
||||
func (l *Logger) Write(p []byte) (n int, err error)
|
||||
```
|
||||
Write implements io.Writer. If a write would cause the log file to be larger
|
||||
than MaxSize, the file is closed, renamed to include a timestamp of the
|
||||
current time, and a new log file is created using the original log file name.
|
||||
If the length of the write is greater than MaxSize, an error is returned.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
- - -
|
||||
Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
|
|
@ -0,0 +1,11 @@
|
|||
// +build !linux
|
||||
|
||||
package lumberjack
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
func chown(_ string, _ os.FileInfo) error {
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
package lumberjack
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// os_Chown is a var so we can mock it out during tests.
|
||||
var os_Chown = os.Chown
|
||||
|
||||
func chown(name string, info os.FileInfo) error {
|
||||
f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, info.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.Close()
|
||||
stat := info.Sys().(*syscall.Stat_t)
|
||||
return os_Chown(name, int(stat.Uid), int(stat.Gid))
|
||||
}
|
|
@ -0,0 +1,541 @@
|
|||
// Package lumberjack provides a rolling logger.
|
||||
//
|
||||
// Note that this is v2.0 of lumberjack, and should be imported using gopkg.in
|
||||
// thusly:
|
||||
//
|
||||
// import "gopkg.in/natefinch/lumberjack.v2"
|
||||
//
|
||||
// The package name remains simply lumberjack, and the code resides at
|
||||
// https://github.com/natefinch/lumberjack under the v2.0 branch.
|
||||
//
|
||||
// Lumberjack is intended to be one part of a logging infrastructure.
|
||||
// It is not an all-in-one solution, but instead is a pluggable
|
||||
// component at the bottom of the logging stack that simply controls the files
|
||||
// to which logs are written.
|
||||
//
|
||||
// Lumberjack plays well with any logging package that can write to an
|
||||
// io.Writer, including the standard library's log package.
|
||||
//
|
||||
// Lumberjack assumes that only one process is writing to the output files.
|
||||
// Using the same lumberjack configuration from multiple processes on the same
|
||||
// machine will result in improper behavior.
|
||||
package lumberjack
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
backupTimeFormat = "2006-01-02T15-04-05.000"
|
||||
compressSuffix = ".gz"
|
||||
defaultMaxSize = 100
|
||||
)
|
||||
|
||||
// ensure we always implement io.WriteCloser
|
||||
var _ io.WriteCloser = (*Logger)(nil)
|
||||
|
||||
// Logger is an io.WriteCloser that writes to the specified filename.
|
||||
//
|
||||
// Logger opens or creates the logfile on first Write. If the file exists and
|
||||
// is less than MaxSize megabytes, lumberjack will open and append to that file.
|
||||
// If the file exists and its size is >= MaxSize megabytes, the file is renamed
|
||||
// by putting the current time in a timestamp in the name immediately before the
|
||||
// file's extension (or the end of the filename if there's no extension). A new
|
||||
// log file is then created using original filename.
|
||||
//
|
||||
// Whenever a write would cause the current log file exceed MaxSize megabytes,
|
||||
// the current file is closed, renamed, and a new log file created with the
|
||||
// original name. Thus, the filename you give Logger is always the "current" log
|
||||
// file.
|
||||
//
|
||||
// Backups use the log file name given to Logger, in the form
|
||||
// `name-timestamp.ext` where name is the filename without the extension,
|
||||
// timestamp is the time at which the log was rotated formatted with the
|
||||
// time.Time format of `2006-01-02T15-04-05.000` and the extension is the
|
||||
// original extension. For example, if your Logger.Filename is
|
||||
// `/var/log/foo/server.log`, a backup created at 6:30pm on Nov 11 2016 would
|
||||
// use the filename `/var/log/foo/server-2016-11-04T18-30-00.000.log`
|
||||
//
|
||||
// Cleaning Up Old Log Files
|
||||
//
|
||||
// Whenever a new logfile gets created, old log files may be deleted. The most
|
||||
// recent files according to the encoded timestamp will be retained, up to a
|
||||
// number equal to MaxBackups (or all of them if MaxBackups is 0). Any files
|
||||
// with an encoded timestamp older than MaxAge days are deleted, regardless of
|
||||
// MaxBackups. Note that the time encoded in the timestamp is the rotation
|
||||
// time, which may differ from the last time that file was written to.
|
||||
//
|
||||
// If MaxBackups and MaxAge are both 0, no old log files will be deleted.
|
||||
type Logger struct {
|
||||
// Filename is the file to write logs to. Backup log files will be retained
|
||||
// in the same directory. It uses <processname>-lumberjack.log in
|
||||
// os.TempDir() if empty.
|
||||
Filename string `json:"filename" yaml:"filename"`
|
||||
|
||||
// MaxSize is the maximum size in megabytes of the log file before it gets
|
||||
// rotated. It defaults to 100 megabytes.
|
||||
MaxSize int `json:"maxsize" yaml:"maxsize"`
|
||||
|
||||
// MaxAge is the maximum number of days to retain old log files based on the
|
||||
// timestamp encoded in their filename. Note that a day is defined as 24
|
||||
// hours and may not exactly correspond to calendar days due to daylight
|
||||
// savings, leap seconds, etc. The default is not to remove old log files
|
||||
// based on age.
|
||||
MaxAge int `json:"maxage" yaml:"maxage"`
|
||||
|
||||
// MaxBackups is the maximum number of old log files to retain. The default
|
||||
// is to retain all old log files (though MaxAge may still cause them to get
|
||||
// deleted.)
|
||||
MaxBackups int `json:"maxbackups" yaml:"maxbackups"`
|
||||
|
||||
// LocalTime determines if the time used for formatting the timestamps in
|
||||
// backup files is the computer's local time. The default is to use UTC
|
||||
// time.
|
||||
LocalTime bool `json:"localtime" yaml:"localtime"`
|
||||
|
||||
// Compress determines if the rotated log files should be compressed
|
||||
// using gzip. The default is not to perform compression.
|
||||
Compress bool `json:"compress" yaml:"compress"`
|
||||
|
||||
size int64
|
||||
file *os.File
|
||||
mu sync.Mutex
|
||||
|
||||
millCh chan bool
|
||||
startMill sync.Once
|
||||
}
|
||||
|
||||
var (
|
||||
// currentTime exists so it can be mocked out by tests.
|
||||
currentTime = time.Now
|
||||
|
||||
// os_Stat exists so it can be mocked out by tests.
|
||||
os_Stat = os.Stat
|
||||
|
||||
// megabyte is the conversion factor between MaxSize and bytes. It is a
|
||||
// variable so tests can mock it out and not need to write megabytes of data
|
||||
// to disk.
|
||||
megabyte = 1024 * 1024
|
||||
)
|
||||
|
||||
// Write implements io.Writer. If a write would cause the log file to be larger
|
||||
// than MaxSize, the file is closed, renamed to include a timestamp of the
|
||||
// current time, and a new log file is created using the original log file name.
|
||||
// If the length of the write is greater than MaxSize, an error is returned.
|
||||
func (l *Logger) Write(p []byte) (n int, err error) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
|
||||
writeLen := int64(len(p))
|
||||
if writeLen > l.max() {
|
||||
return 0, fmt.Errorf(
|
||||
"write length %d exceeds maximum file size %d", writeLen, l.max(),
|
||||
)
|
||||
}
|
||||
|
||||
if l.file == nil {
|
||||
if err = l.openExistingOrNew(len(p)); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if l.size+writeLen > l.max() {
|
||||
if err := l.rotate(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
n, err = l.file.Write(p)
|
||||
l.size += int64(n)
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close implements io.Closer, and closes the current logfile.
|
||||
func (l *Logger) Close() error {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
return l.close()
|
||||
}
|
||||
|
||||
// close closes the file if it is open.
|
||||
func (l *Logger) close() error {
|
||||
if l.file == nil {
|
||||
return nil
|
||||
}
|
||||
err := l.file.Close()
|
||||
l.file = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// Rotate causes Logger to close the existing log file and immediately create a
|
||||
// new one. This is a helper function for applications that want to initiate
|
||||
// rotations outside of the normal rotation rules, such as in response to
|
||||
// SIGHUP. After rotating, this initiates compression and removal of old log
|
||||
// files according to the configuration.
|
||||
func (l *Logger) Rotate() error {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
return l.rotate()
|
||||
}
|
||||
|
||||
// rotate closes the current file, moves it aside with a timestamp in the name,
|
||||
// (if it exists), opens a new file with the original filename, and then runs
|
||||
// post-rotation processing and removal.
|
||||
func (l *Logger) rotate() error {
|
||||
if err := l.close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := l.openNew(); err != nil {
|
||||
return err
|
||||
}
|
||||
l.mill()
|
||||
return nil
|
||||
}
|
||||
|
||||
// openNew opens a new log file for writing, moving any old log file out of the
|
||||
// way. This methods assumes the file has already been closed.
|
||||
func (l *Logger) openNew() error {
|
||||
err := os.MkdirAll(l.dir(), 0744)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't make directories for new logfile: %s", err)
|
||||
}
|
||||
|
||||
name := l.filename()
|
||||
mode := os.FileMode(0644)
|
||||
info, err := os_Stat(name)
|
||||
if err == nil {
|
||||
// Copy the mode off the old logfile.
|
||||
mode = info.Mode()
|
||||
// move the existing file
|
||||
newname := backupName(name, l.LocalTime)
|
||||
if err := os.Rename(name, newname); err != nil {
|
||||
return fmt.Errorf("can't rename log file: %s", err)
|
||||
}
|
||||
|
||||
// this is a no-op anywhere but linux
|
||||
if err := chown(name, info); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// we use truncate here because this should only get called when we've moved
|
||||
// the file ourselves. if someone else creates the file in the meantime,
|
||||
// just wipe out the contents.
|
||||
f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't open new logfile: %s", err)
|
||||
}
|
||||
l.file = f
|
||||
l.size = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// backupName creates a new filename from the given name, inserting a timestamp
|
||||
// between the filename and the extension, using the local time if requested
|
||||
// (otherwise UTC).
|
||||
func backupName(name string, local bool) string {
|
||||
dir := filepath.Dir(name)
|
||||
filename := filepath.Base(name)
|
||||
ext := filepath.Ext(filename)
|
||||
prefix := filename[:len(filename)-len(ext)]
|
||||
t := currentTime()
|
||||
if !local {
|
||||
t = t.UTC()
|
||||
}
|
||||
|
||||
timestamp := t.Format(backupTimeFormat)
|
||||
return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext))
|
||||
}
|
||||
|
||||
// openExistingOrNew opens the logfile if it exists and if the current write
|
||||
// would not put it over MaxSize. If there is no such file or the write would
|
||||
// put it over the MaxSize, a new file is created.
|
||||
func (l *Logger) openExistingOrNew(writeLen int) error {
|
||||
l.mill()
|
||||
|
||||
filename := l.filename()
|
||||
info, err := os_Stat(filename)
|
||||
if os.IsNotExist(err) {
|
||||
return l.openNew()
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting log file info: %s", err)
|
||||
}
|
||||
|
||||
if info.Size()+int64(writeLen) >= l.max() {
|
||||
return l.rotate()
|
||||
}
|
||||
|
||||
file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
// if we fail to open the old log file for some reason, just ignore
|
||||
// it and open a new log file.
|
||||
return l.openNew()
|
||||
}
|
||||
l.file = file
|
||||
l.size = info.Size()
|
||||
return nil
|
||||
}
|
||||
|
||||
// genFilename generates the name of the logfile from the current time.
|
||||
func (l *Logger) filename() string {
|
||||
if l.Filename != "" {
|
||||
return l.Filename
|
||||
}
|
||||
name := filepath.Base(os.Args[0]) + "-lumberjack.log"
|
||||
return filepath.Join(os.TempDir(), name)
|
||||
}
|
||||
|
||||
// millRunOnce performs compression and removal of stale log files.
|
||||
// Log files are compressed if enabled via configuration and old log
|
||||
// files are removed, keeping at most l.MaxBackups files, as long as
|
||||
// none of them are older than MaxAge.
|
||||
func (l *Logger) millRunOnce() error {
|
||||
if l.MaxBackups == 0 && l.MaxAge == 0 && !l.Compress {
|
||||
return nil
|
||||
}
|
||||
|
||||
files, err := l.oldLogFiles()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var compress, remove []logInfo
|
||||
|
||||
if l.MaxBackups > 0 && l.MaxBackups < len(files) {
|
||||
preserved := make(map[string]bool)
|
||||
var remaining []logInfo
|
||||
for _, f := range files {
|
||||
// Only count the uncompressed log file or the
|
||||
// compressed log file, not both.
|
||||
fn := f.Name()
|
||||
if strings.HasSuffix(fn, compressSuffix) {
|
||||
fn = fn[:len(fn)-len(compressSuffix)]
|
||||
}
|
||||
preserved[fn] = true
|
||||
|
||||
if len(preserved) > l.MaxBackups {
|
||||
remove = append(remove, f)
|
||||
} else {
|
||||
remaining = append(remaining, f)
|
||||
}
|
||||
}
|
||||
files = remaining
|
||||
}
|
||||
if l.MaxAge > 0 {
|
||||
diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge))
|
||||
cutoff := currentTime().Add(-1 * diff)
|
||||
|
||||
var remaining []logInfo
|
||||
for _, f := range files {
|
||||
if f.timestamp.Before(cutoff) {
|
||||
remove = append(remove, f)
|
||||
} else {
|
||||
remaining = append(remaining, f)
|
||||
}
|
||||
}
|
||||
files = remaining
|
||||
}
|
||||
|
||||
if l.Compress {
|
||||
for _, f := range files {
|
||||
if !strings.HasSuffix(f.Name(), compressSuffix) {
|
||||
compress = append(compress, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, f := range remove {
|
||||
errRemove := os.Remove(filepath.Join(l.dir(), f.Name()))
|
||||
if err == nil && errRemove != nil {
|
||||
err = errRemove
|
||||
}
|
||||
}
|
||||
for _, f := range compress {
|
||||
fn := filepath.Join(l.dir(), f.Name())
|
||||
errCompress := compressLogFile(fn, fn+compressSuffix)
|
||||
if err == nil && errCompress != nil {
|
||||
err = errCompress
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// millRun runs in a goroutine to manage post-rotation compression and removal
|
||||
// of old log files.
|
||||
func (l *Logger) millRun() {
|
||||
for _ = range l.millCh {
|
||||
// what am I going to do, log this?
|
||||
_ = l.millRunOnce()
|
||||
}
|
||||
}
|
||||
|
||||
// mill performs post-rotation compression and removal of stale log files,
|
||||
// starting the mill goroutine if necessary.
|
||||
func (l *Logger) mill() {
|
||||
l.startMill.Do(func() {
|
||||
l.millCh = make(chan bool, 1)
|
||||
go l.millRun()
|
||||
})
|
||||
select {
|
||||
case l.millCh <- true:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// oldLogFiles returns the list of backup log files stored in the same
|
||||
// directory as the current log file, sorted by ModTime
|
||||
func (l *Logger) oldLogFiles() ([]logInfo, error) {
|
||||
files, err := ioutil.ReadDir(l.dir())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't read log file directory: %s", err)
|
||||
}
|
||||
logFiles := []logInfo{}
|
||||
|
||||
prefix, ext := l.prefixAndExt()
|
||||
|
||||
for _, f := range files {
|
||||
if f.IsDir() {
|
||||
continue
|
||||
}
|
||||
if t, err := l.timeFromName(f.Name(), prefix, ext); err == nil {
|
||||
logFiles = append(logFiles, logInfo{t, f})
|
||||
continue
|
||||
}
|
||||
if t, err := l.timeFromName(f.Name(), prefix, ext+compressSuffix); err == nil {
|
||||
logFiles = append(logFiles, logInfo{t, f})
|
||||
continue
|
||||
}
|
||||
// error parsing means that the suffix at the end was not generated
|
||||
// by lumberjack, and therefore it's not a backup file.
|
||||
}
|
||||
|
||||
sort.Sort(byFormatTime(logFiles))
|
||||
|
||||
return logFiles, nil
|
||||
}
|
||||
|
||||
// timeFromName extracts the formatted time from the filename by stripping off
|
||||
// the filename's prefix and extension. This prevents someone's filename from
|
||||
// confusing time.parse.
|
||||
func (l *Logger) timeFromName(filename, prefix, ext string) (time.Time, error) {
|
||||
if !strings.HasPrefix(filename, prefix) {
|
||||
return time.Time{}, errors.New("mismatched prefix")
|
||||
}
|
||||
if !strings.HasSuffix(filename, ext) {
|
||||
return time.Time{}, errors.New("mismatched extension")
|
||||
}
|
||||
ts := filename[len(prefix) : len(filename)-len(ext)]
|
||||
return time.Parse(backupTimeFormat, ts)
|
||||
}
|
||||
|
||||
// max returns the maximum size in bytes of log files before rolling.
|
||||
func (l *Logger) max() int64 {
|
||||
if l.MaxSize == 0 {
|
||||
return int64(defaultMaxSize * megabyte)
|
||||
}
|
||||
return int64(l.MaxSize) * int64(megabyte)
|
||||
}
|
||||
|
||||
// dir returns the directory for the current filename.
|
||||
func (l *Logger) dir() string {
|
||||
return filepath.Dir(l.filename())
|
||||
}
|
||||
|
||||
// prefixAndExt returns the filename part and extension part from the Logger's
|
||||
// filename.
|
||||
func (l *Logger) prefixAndExt() (prefix, ext string) {
|
||||
filename := filepath.Base(l.filename())
|
||||
ext = filepath.Ext(filename)
|
||||
prefix = filename[:len(filename)-len(ext)] + "-"
|
||||
return prefix, ext
|
||||
}
|
||||
|
||||
// compressLogFile compresses the given log file, removing the
|
||||
// uncompressed log file if successful.
|
||||
func compressLogFile(src, dst string) (err error) {
|
||||
f, err := os.Open(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open log file: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
fi, err := os_Stat(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to stat log file: %v", err)
|
||||
}
|
||||
|
||||
if err := chown(dst, fi); err != nil {
|
||||
return fmt.Errorf("failed to chown compressed log file: %v", err)
|
||||
}
|
||||
|
||||
// If this file already exists, we presume it was created by
|
||||
// a previous attempt to compress the log file.
|
||||
gzf, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, fi.Mode())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open compressed log file: %v", err)
|
||||
}
|
||||
defer gzf.Close()
|
||||
|
||||
gz := gzip.NewWriter(gzf)
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
os.Remove(dst)
|
||||
err = fmt.Errorf("failed to compress log file: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err := io.Copy(gz, f); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := gz.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := gzf.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := f.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Remove(src); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// logInfo is a convenience struct to return the filename and its embedded
|
||||
// timestamp.
|
||||
type logInfo struct {
|
||||
timestamp time.Time
|
||||
os.FileInfo
|
||||
}
|
||||
|
||||
// byFormatTime sorts by newest time formatted in the name.
|
||||
type byFormatTime []logInfo
|
||||
|
||||
func (b byFormatTime) Less(i, j int) bool {
|
||||
return b[i].timestamp.After(b[j].timestamp)
|
||||
}
|
||||
|
||||
func (b byFormatTime) Swap(i, j int) {
|
||||
b[i], b[j] = b[j], b[i]
|
||||
}
|
||||
|
||||
func (b byFormatTime) Len() int {
|
||||
return len(b)
|
||||
}
|
|
@ -31,9 +31,9 @@ github.com/ethereum/go-ethereum/common
|
|||
github.com/ethereum/go-ethereum/crypto
|
||||
github.com/ethereum/go-ethereum/rpc
|
||||
github.com/ethereum/go-ethereum/crypto/secp256k1
|
||||
github.com/ethereum/go-ethereum/crypto/sha3
|
||||
github.com/ethereum/go-ethereum/p2p
|
||||
github.com/ethereum/go-ethereum/common/hexutil
|
||||
github.com/ethereum/go-ethereum/crypto/sha3
|
||||
github.com/ethereum/go-ethereum/common/math
|
||||
github.com/ethereum/go-ethereum/rlp
|
||||
github.com/ethereum/go-ethereum/log
|
||||
|
@ -297,7 +297,7 @@ github.com/status-im/migrate/source/go_bindata
|
|||
# github.com/status-im/rendezvous v1.1.0
|
||||
github.com/status-im/rendezvous
|
||||
github.com/status-im/rendezvous/protocol
|
||||
# github.com/status-im/status-go v0.23.0-beta.7
|
||||
# github.com/status-im/status-go v0.23.0-beta.9
|
||||
github.com/status-im/status-go/logutils
|
||||
github.com/status-im/status-go/node
|
||||
github.com/status-im/status-go/params
|
||||
|
@ -314,7 +314,6 @@ github.com/status-im/status-go/services/personal
|
|||
github.com/status-im/status-go/services/status
|
||||
github.com/status-im/status-go/static
|
||||
github.com/status-im/status-go/timesource
|
||||
github.com/status-im/status-go/services
|
||||
github.com/status-im/status-go/services/shhext/chat
|
||||
github.com/status-im/status-go/services/shhext/dedup
|
||||
github.com/status-im/status-go/services/shhext/mailservers
|
||||
|
@ -333,6 +332,7 @@ github.com/stretchr/testify/require
|
|||
github.com/stretchr/testify/assert
|
||||
# github.com/syndtr/goleveldb v0.0.0-20181128100959-b001fa50d6b2
|
||||
github.com/syndtr/goleveldb/leveldb
|
||||
github.com/syndtr/goleveldb/leveldb/storage
|
||||
github.com/syndtr/goleveldb/leveldb/util
|
||||
github.com/syndtr/goleveldb/leveldb/cache
|
||||
github.com/syndtr/goleveldb/leveldb/comparer
|
||||
|
@ -342,7 +342,6 @@ github.com/syndtr/goleveldb/leveldb/iterator
|
|||
github.com/syndtr/goleveldb/leveldb/journal
|
||||
github.com/syndtr/goleveldb/leveldb/memdb
|
||||
github.com/syndtr/goleveldb/leveldb/opt
|
||||
github.com/syndtr/goleveldb/leveldb/storage
|
||||
github.com/syndtr/goleveldb/leveldb/table
|
||||
# github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc
|
||||
github.com/whyrusleeping/go-logging
|
||||
|
@ -404,6 +403,8 @@ golang.org/x/text/runes
|
|||
golang.org/x/text/internal/tag
|
||||
# gopkg.in/go-playground/validator.v9 v9.24.0
|
||||
gopkg.in/go-playground/validator.v9
|
||||
# gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
||||
gopkg.in/natefinch/lumberjack.v2
|
||||
# gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce
|
||||
gopkg.in/natefinch/npipe.v2
|
||||
# gopkg.in/olebedev/go-duktape.v3 v3.0.0-20181125150206-ccb656ba24c2
|
||||
|
|
Loading…
Reference in New Issue