logging: Add subsystem to log API (#19002)

Create new code paths for multiple subsystems in the code. This will
make maintaing this easier later.

Also introduce bugLogIf() for errors that should not happen in the first
place.
This commit is contained in:
Anis Eleuch
2024-04-04 13:04:40 +01:00
committed by GitHub
parent 2228eb61cb
commit 95bf4a57b6
123 changed files with 972 additions and 786 deletions

View File

@@ -37,6 +37,14 @@ import (
"github.com/minio/pkg/v2/env"
)
const (
logSubsys = "locking"
)
func lockLogIf(ctx context.Context, err error) {
logger.LogIf(ctx, logSubsys, err)
}
// Enabled indicates object locking is enabled
const Enabled = "Enabled"
@@ -153,7 +161,7 @@ type Retention struct {
func (r Retention) Retain(created time.Time) bool {
t, err := UTCNowNTP()
if err != nil {
logger.LogIf(context.Background(), err)
lockLogIf(context.Background(), err)
// Retain
return true
}
@@ -262,7 +270,7 @@ func (config *Config) ToRetention() Retention {
t, err := UTCNowNTP()
if err != nil {
logger.LogIf(context.Background(), err)
lockLogIf(context.Background(), err)
// Do not change any configuration
// upon NTP failure.
return r
@@ -364,7 +372,7 @@ func ParseObjectRetention(reader io.Reader) (*ObjectRetention, error) {
t, err := UTCNowNTP()
if err != nil {
logger.LogIf(context.Background(), err)
lockLogIf(context.Background(), err)
return &ret, ErrPastObjectLockRetainDate
}
@@ -427,7 +435,7 @@ func ParseObjectLockRetentionHeaders(h http.Header) (rmode RetMode, r RetentionD
t, err := UTCNowNTP()
if err != nil {
logger.LogIf(context.Background(), err)
lockLogIf(context.Background(), err)
return rmode, r, ErrPastObjectLockRetainDate
}

View File

@@ -38,6 +38,10 @@ import (
xnet "github.com/minio/pkg/v2/net"
)
func authNLogIf(ctx context.Context, err error) {
logger.LogIf(ctx, "authN", err)
}
// Authentication Plugin config and env variables
const (
URL = "url"
@@ -434,7 +438,7 @@ func (o *AuthNPlugin) checkConnectivity(ctx context.Context) bool {
req, err := http.NewRequestWithContext(ctx, http.MethodHead, u.String(), nil)
if err != nil {
logger.LogIf(ctx, err)
authNLogIf(ctx, err)
return false
}

View File

@@ -31,6 +31,14 @@ import (
xnet "github.com/minio/pkg/v2/net"
)
const (
logSubsys = "notify"
)
func logOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
logger.LogOnceIf(ctx, logSubsys, err, id, errKind...)
}
// ErrTargetsOffline - Indicates single/multiple target failures.
var ErrTargetsOffline = errors.New("one or more targets are offline. Please use `mc admin info --json` to check the offline targets")
@@ -76,7 +84,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t
if !args.Enable {
continue
}
t, err := target.NewWebhookTarget(ctx, id, args, logger.LogOnceIf, transport)
t, err := target.NewWebhookTarget(ctx, id, args, logOnceIf, transport)
if err != nil {
return nil, err
}

View File

@@ -40,6 +40,14 @@ const (
formatNamespace = "namespace"
)
const (
logSubsys = "notify"
)
func logOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
logger.LogOnceIf(ctx, logSubsys, err, id, errKind...)
}
// ErrTargetsOffline - Indicates single/multiple target failures.
var ErrTargetsOffline = errors.New("one or more targets are offline. Please use `mc admin info --json` to check the offline targets")
@@ -97,7 +105,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t
if !args.Enable {
continue
}
t, err := target.NewAMQPTarget(id, args, logger.LogOnceIf)
t, err := target.NewAMQPTarget(id, args, logOnceIf)
if err != nil {
return nil, err
}
@@ -112,7 +120,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t
if !args.Enable {
continue
}
t, err := target.NewElasticsearchTarget(id, args, logger.LogOnceIf)
t, err := target.NewElasticsearchTarget(id, args, logOnceIf)
if err != nil {
return nil, err
}
@@ -129,7 +137,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t
continue
}
args.TLS.RootCAs = transport.TLSClientConfig.RootCAs
t, err := target.NewKafkaTarget(id, args, logger.LogOnceIf)
t, err := target.NewKafkaTarget(id, args, logOnceIf)
if err != nil {
return nil, err
}
@@ -147,7 +155,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t
continue
}
args.RootCAs = transport.TLSClientConfig.RootCAs
t, err := target.NewMQTTTarget(id, args, logger.LogOnceIf)
t, err := target.NewMQTTTarget(id, args, logOnceIf)
if err != nil {
return nil, err
}
@@ -162,7 +170,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t
if !args.Enable {
continue
}
t, err := target.NewMySQLTarget(id, args, logger.LogOnceIf)
t, err := target.NewMySQLTarget(id, args, logOnceIf)
if err != nil {
return nil, err
}
@@ -177,7 +185,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t
if !args.Enable {
continue
}
t, err := target.NewNATSTarget(id, args, logger.LogOnceIf)
t, err := target.NewNATSTarget(id, args, logOnceIf)
if err != nil {
return nil, err
}
@@ -192,7 +200,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t
if !args.Enable {
continue
}
t, err := target.NewNSQTarget(id, args, logger.LogOnceIf)
t, err := target.NewNSQTarget(id, args, logOnceIf)
if err != nil {
return nil, err
}
@@ -207,7 +215,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t
if !args.Enable {
continue
}
t, err := target.NewPostgreSQLTarget(id, args, logger.LogOnceIf)
t, err := target.NewPostgreSQLTarget(id, args, logOnceIf)
if err != nil {
return nil, err
}
@@ -222,7 +230,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t
if !args.Enable {
continue
}
t, err := target.NewRedisTarget(id, args, logger.LogOnceIf)
t, err := target.NewRedisTarget(id, args, logOnceIf)
if err != nil {
return nil, err
}
@@ -237,7 +245,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t
if !args.Enable {
continue
}
t, err := target.NewWebhookTarget(ctx, id, args, logger.LogOnceIf, transport)
t, err := target.NewWebhookTarget(ctx, id, args, logOnceIf, transport)
if err != nil {
return nil, err
}

View File

@@ -397,7 +397,7 @@ func LookupConfig(kvs config.KVS, setDriveCount int) (cfg Config, err error) {
return cfg, err
}
if inlineBlock > 128*humanize.KiByte {
logger.LogOnceIf(context.Background(), fmt.Errorf("inline block value bigger than recommended max of 128KiB -> %s, performance may degrade for PUT please benchmark the changes", inlineBlockStr), inlineBlockStr)
configLogOnceIf(context.Background(), fmt.Errorf("inline block value bigger than recommended max of 128KiB -> %s, performance may degrade for PUT please benchmark the changes", inlineBlockStr), inlineBlockStr)
}
cfg.inlineBlock = int64(inlineBlock)
} else {
@@ -408,3 +408,7 @@ func LookupConfig(kvs config.KVS, setDriveCount int) (cfg Config, err error) {
return cfg, nil
}
func configLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
logger.LogOnceIf(ctx, "config", err, id, errKind...)
}

View File

@@ -30,6 +30,8 @@ import (
)
const (
logSubsys = "notify"
// The maximum allowed number of concurrent Send() calls to all configured notifications targets
maxConcurrentAsyncSend = 50000
)
@@ -290,7 +292,7 @@ func (list *TargetList) sendSync(event Event, targetIDset TargetIDSet) {
list.incFailedEvents(id)
reqInfo := &logger.ReqInfo{}
reqInfo.AppendTags("targetID", id.String())
logger.LogOnceIf(logger.SetReqInfo(context.Background(), reqInfo), err, id.String())
logger.LogOnceIf(logger.SetReqInfo(context.Background(), reqInfo), logSubsys, err, id.String())
}
}(id, target)
}
@@ -313,7 +315,7 @@ func (list *TargetList) sendAsync(event Event, targetIDset TargetIDSet) {
for id := range targetIDset {
reqInfo := &logger.ReqInfo{}
reqInfo.AppendTags("targetID", id.String())
logger.LogOnceIf(logger.SetReqInfo(context.Background(), reqInfo), err, id.String())
logger.LogOnceIf(logger.SetReqInfo(context.Background(), reqInfo), logSubsys, err, id.String())
}
return
}

View File

@@ -47,6 +47,18 @@ import (
"github.com/zeebo/xxh3"
)
func gridLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "grid", err, errKind...)
}
func gridLogIfNot(ctx context.Context, err error, ignored ...error) {
logger.LogIfNot(ctx, "grid", err, ignored...)
}
func gridLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
logger.LogOnceIf(ctx, "grid", err, id, errKind...)
}
// A Connection is a remote connection.
// There is no distinction externally whether the connection was initiated from
// this server or from the remote.
@@ -667,7 +679,7 @@ func (c *Connection) connect() {
if gotState != StateConnecting {
// Don't print error on first attempt,
// and after that only once per hour.
logger.LogOnceIf(c.ctx, fmt.Errorf("grid: %s connecting to %s: %w (%T) Sleeping %v (%v)", c.Local, toDial, err, err, sleep, gotState), toDial)
gridLogOnceIf(c.ctx, fmt.Errorf("grid: %s connecting to %s: %w (%T) Sleeping %v (%v)", c.Local, toDial, err, err, sleep, gotState), toDial)
}
c.updateState(StateConnectionError)
time.Sleep(sleep)
@@ -898,7 +910,7 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) {
go func() {
defer func() {
if rec := recover(); rec != nil {
logger.LogIf(ctx, fmt.Errorf("handleMessages: panic recovered: %v", rec))
gridLogIf(ctx, fmt.Errorf("handleMessages: panic recovered: %v", rec))
debug.PrintStack()
}
c.connChange.L.Lock()
@@ -960,7 +972,7 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) {
msg, err = readDataInto(msg, conn, c.side, ws.OpBinary)
if err != nil {
cancel(ErrDisconnected)
logger.LogIfNot(ctx, fmt.Errorf("ws read: %w", err), net.ErrClosed, io.EOF)
gridLogIfNot(ctx, fmt.Errorf("ws read: %w", err), net.ErrClosed, io.EOF)
return
}
if c.incomingBytes != nil {
@@ -971,7 +983,7 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) {
var m message
subID, remain, err := m.parse(msg)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("ws parse package: %w", err))
gridLogIf(ctx, fmt.Errorf("ws parse package: %w", err))
cancel(ErrDisconnected)
return
}
@@ -992,7 +1004,7 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) {
var next []byte
next, remain, err = msgp.ReadBytesZC(remain)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("ws read merged: %w", err))
gridLogIf(ctx, fmt.Errorf("ws read merged: %w", err))
cancel(ErrDisconnected)
return
}
@@ -1000,7 +1012,7 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) {
m.Payload = nil
subID, _, err = m.parse(next)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("ws parse merged: %w", err))
gridLogIf(ctx, fmt.Errorf("ws parse merged: %w", err))
cancel(ErrDisconnected)
return
}
@@ -1012,7 +1024,7 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) {
// Write function.
defer func() {
if rec := recover(); rec != nil {
logger.LogIf(ctx, fmt.Errorf("handleMessages: panic recovered: %v", rec))
gridLogIf(ctx, fmt.Errorf("handleMessages: panic recovered: %v", rec))
debug.PrintStack()
}
if debugPrint {
@@ -1058,14 +1070,14 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) {
if lastPong > 0 {
lastPongTime := time.Unix(lastPong, 0)
if d := time.Since(lastPongTime); d > connPingInterval*2 {
logger.LogIf(ctx, fmt.Errorf("host %s last pong too old (%v); disconnecting", c.Remote, d.Round(time.Millisecond)))
gridLogIf(ctx, fmt.Errorf("host %s last pong too old (%v); disconnecting", c.Remote, d.Round(time.Millisecond)))
return
}
}
var err error
toSend, err = pingFrame.MarshalMsg(GetByteBuffer()[:0])
if err != nil {
logger.LogIf(ctx, err)
gridLogIf(ctx, err)
// Fake it...
atomic.StoreInt64(&c.LastPong, time.Now().Unix())
continue
@@ -1107,18 +1119,18 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) {
buf.Reset()
err := wsw.writeMessage(&buf, c.side, ws.OpBinary, toSend)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("ws writeMessage: %w", err))
gridLogIf(ctx, fmt.Errorf("ws writeMessage: %w", err))
return
}
PutByteBuffer(toSend)
err = conn.SetWriteDeadline(time.Now().Add(connWriteTimeout))
if err != nil {
logger.LogIf(ctx, fmt.Errorf("conn.SetWriteDeadline: %w", err))
gridLogIf(ctx, fmt.Errorf("conn.SetWriteDeadline: %w", err))
return
}
_, err = buf.WriteTo(conn)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("ws write: %w", err))
gridLogIf(ctx, fmt.Errorf("ws write: %w", err))
return
}
continue
@@ -1135,7 +1147,7 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) {
var err error
toSend, err = m.MarshalMsg(toSend)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("msg.MarshalMsg: %w", err))
gridLogIf(ctx, fmt.Errorf("msg.MarshalMsg: %w", err))
return
}
// Append as byte slices.
@@ -1151,18 +1163,18 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) {
buf.Reset()
err = wsw.writeMessage(&buf, c.side, ws.OpBinary, toSend)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("ws writeMessage: %w", err))
gridLogIf(ctx, fmt.Errorf("ws writeMessage: %w", err))
return
}
// buf is our local buffer, so we can reuse it.
err = conn.SetWriteDeadline(time.Now().Add(connWriteTimeout))
if err != nil {
logger.LogIf(ctx, fmt.Errorf("conn.SetWriteDeadline: %w", err))
gridLogIf(ctx, fmt.Errorf("conn.SetWriteDeadline: %w", err))
return
}
_, err = buf.WriteTo(conn)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("ws write: %w", err))
gridLogIf(ctx, fmt.Errorf("ws write: %w", err))
return
}
@@ -1202,7 +1214,7 @@ func (c *Connection) handleMsg(ctx context.Context, m message, subID *subHandler
case OpMuxConnectError:
c.handleConnectMuxError(ctx, m)
default:
logger.LogIf(ctx, fmt.Errorf("unknown message type: %v", m.Op))
gridLogIf(ctx, fmt.Errorf("unknown message type: %v", m.Op))
}
}
@@ -1211,7 +1223,7 @@ func (c *Connection) handleConnectMux(ctx context.Context, m message, subID *sub
if m.Flags&FlagStateless != 0 {
// Reject for now, so we can safely add it later.
if true {
logger.LogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Stateless streams not supported"}))
gridLogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Stateless streams not supported"}))
return
}
@@ -1222,7 +1234,7 @@ func (c *Connection) handleConnectMux(ctx context.Context, m message, subID *sub
handler = c.handlers.subStateless[*subID]
}
if handler == nil {
logger.LogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler for type"}))
gridLogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler for type"}))
return
}
_, _ = c.inStream.LoadOrCompute(m.MuxID, func() *muxServer {
@@ -1233,7 +1245,7 @@ func (c *Connection) handleConnectMux(ctx context.Context, m message, subID *sub
var handler *StreamHandler
if subID == nil {
if !m.Handler.valid() {
logger.LogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler"}))
gridLogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler"}))
return
}
handler = c.handlers.streams[m.Handler]
@@ -1241,7 +1253,7 @@ func (c *Connection) handleConnectMux(ctx context.Context, m message, subID *sub
handler = c.handlers.subStreams[*subID]
}
if handler == nil {
logger.LogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler for type"}))
gridLogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler for type"}))
return
}
@@ -1257,7 +1269,7 @@ func (c *Connection) handleConnectMuxError(ctx context.Context, m message) {
if v, ok := c.outgoing.Load(m.MuxID); ok {
var cErr muxConnectError
_, err := cErr.UnmarshalMsg(m.Payload)
logger.LogIf(ctx, err)
gridLogIf(ctx, err)
v.error(RemoteErr(cErr.Error))
return
}
@@ -1269,7 +1281,7 @@ func (c *Connection) handleAckMux(ctx context.Context, m message) {
v, ok := c.outgoing.Load(m.MuxID)
if !ok {
if m.Flags&FlagEOF == 0 {
logger.LogIf(ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: m.MuxID}, nil))
gridLogIf(ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: m.MuxID}, nil))
}
return
}
@@ -1281,7 +1293,7 @@ func (c *Connection) handleAckMux(ctx context.Context, m message) {
func (c *Connection) handleRequest(ctx context.Context, m message, subID *subHandlerID) {
if !m.Handler.valid() {
logger.LogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler"}))
gridLogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler"}))
return
}
if debugReqs {
@@ -1295,7 +1307,7 @@ func (c *Connection) handleRequest(ctx context.Context, m message, subID *subHan
handler = c.handlers.subSingle[*subID]
}
if handler == nil {
logger.LogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler for type"}))
gridLogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler for type"}))
return
}
@@ -1313,7 +1325,7 @@ func (c *Connection) handleRequest(ctx context.Context, m message, subID *subHan
if rec := recover(); rec != nil {
err = NewRemoteErrString(fmt.Sprintf("handleMessages: panic recovered: %v", rec))
debug.PrintStack()
logger.LogIf(ctx, err)
gridLogIf(ctx, err)
}
}()
b, err = handler(m.Payload)
@@ -1346,7 +1358,7 @@ func (c *Connection) handleRequest(ctx context.Context, m message, subID *subHan
m.Payload = b
m.setZeroPayloadFlag()
}
logger.LogIf(ctx, c.queueMsg(m, nil))
gridLogIf(ctx, c.queueMsg(m, nil))
}(m)
}
@@ -1354,7 +1366,7 @@ func (c *Connection) handlePong(ctx context.Context, m message) {
var pong pongMsg
_, err := pong.UnmarshalMsg(m.Payload)
PutByteBuffer(m.Payload)
logger.LogIf(ctx, err)
gridLogIf(ctx, err)
if m.MuxID == 0 {
atomic.StoreInt64(&c.LastPong, time.Now().Unix())
return
@@ -1364,22 +1376,22 @@ func (c *Connection) handlePong(ctx context.Context, m message) {
} else {
// We don't care if the client was removed in the meantime,
// but we send a disconnect message to the server just in case.
logger.LogIf(ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: m.MuxID}, nil))
gridLogIf(ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: m.MuxID}, nil))
}
}
func (c *Connection) handlePing(ctx context.Context, m message) {
if m.MuxID == 0 {
logger.LogIf(ctx, c.queueMsg(m, &pongMsg{}))
gridLogIf(ctx, c.queueMsg(m, &pongMsg{}))
return
}
// Single calls do not support pinging.
if v, ok := c.inStream.Load(m.MuxID); ok {
pong := v.ping(m.Seq)
logger.LogIf(ctx, c.queueMsg(m, &pong))
gridLogIf(ctx, c.queueMsg(m, &pong))
} else {
pong := pongMsg{NotFound: true}
logger.LogIf(ctx, c.queueMsg(m, &pong))
gridLogIf(ctx, c.queueMsg(m, &pong))
}
return
}
@@ -1442,7 +1454,7 @@ func (c *Connection) handleMuxClientMsg(ctx context.Context, m message) {
if debugPrint {
fmt.Println(c.Local, "OpMuxClientMsg: Unknown Mux:", m.MuxID)
}
logger.LogIf(ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: m.MuxID}, nil))
gridLogIf(ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: m.MuxID}, nil))
PutByteBuffer(m.Payload)
return
}
@@ -1486,7 +1498,7 @@ func (c *Connection) handleMuxServerMsg(ctx context.Context, m message) {
v, ok := c.outgoing.Load(m.MuxID)
if !ok {
if m.Flags&FlagEOF == 0 {
logger.LogIf(ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: m.MuxID}, nil))
gridLogIf(ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: m.MuxID}, nil))
}
PutByteBuffer(m.Payload)
return
@@ -1522,7 +1534,7 @@ func (c *Connection) deleteMux(incoming bool, muxID uint64) {
}
v, loaded := c.inStream.LoadAndDelete(muxID)
if loaded && v != nil {
logger.LogIf(c.ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: muxID}, nil))
gridLogIf(c.ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: muxID}, nil))
v.close()
}
} else {
@@ -1535,7 +1547,7 @@ func (c *Connection) deleteMux(incoming bool, muxID uint64) {
fmt.Println(muxID, c.String(), "deleteMux: DELETING MUX")
}
v.close()
logger.LogIf(c.ctx, c.queueMsg(message{Op: OpDisconnectServerMux, MuxID: muxID}, nil))
gridLogIf(c.ctx, c.queueMsg(message{Op: OpDisconnectServerMux, MuxID: muxID}, nil))
}
}
}

View File

@@ -27,7 +27,6 @@ import (
"github.com/minio/minio/internal/hash/sha256"
xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
"github.com/tinylib/msgp/msgp"
)
@@ -466,7 +465,7 @@ func (h *SingleHandler[Req, Resp]) AllowCallRequestPool(b bool) *SingleHandler[R
// This may only be set ONCE before use.
func (h *SingleHandler[Req, Resp]) IgnoreNilConn() *SingleHandler[Req, Resp] {
if h.ignoreNilConn {
logger.LogOnceIf(context.Background(), fmt.Errorf("%s: IgnoreNilConn called twice", h.id.String()), h.id.String()+"IgnoreNilConn")
gridLogOnceIf(context.Background(), fmt.Errorf("%s: IgnoreNilConn called twice", h.id.String()), h.id.String()+"IgnoreNilConn")
}
h.ignoreNilConn = true
return h
@@ -767,7 +766,7 @@ func (h *StreamTypeHandler[Payload, Req, Resp]) register(m *Manager, handle func
input := h.NewRequest()
_, err := input.UnmarshalMsg(v)
if err != nil {
logger.LogOnceIf(ctx, err, err.Error())
gridLogOnceIf(ctx, err, err.Error())
}
PutByteBuffer(v)
// Send input
@@ -791,7 +790,7 @@ func (h *StreamTypeHandler[Payload, Req, Resp]) register(m *Manager, handle func
}
dst, err := v.MarshalMsg(GetByteBufferCap(v.Msgsize()))
if err != nil {
logger.LogOnceIf(ctx, err, err.Error())
gridLogOnceIf(ctx, err, err.Error())
}
if !h.sharedResponse {
h.PutResponse(v)
@@ -877,7 +876,7 @@ func (h *StreamTypeHandler[Payload, Req, Resp]) Call(ctx context.Context, c Stre
for req := range reqT {
b, err := req.MarshalMsg(GetByteBufferCap(req.Msgsize()))
if err != nil {
logger.LogOnceIf(ctx, err, err.Error())
gridLogOnceIf(ctx, err, err.Error())
}
h.PutRequest(req)
stream.Requests <- b

View File

@@ -29,7 +29,6 @@ import (
"github.com/gobwas/ws/wsutil"
"github.com/google/uuid"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/pubsub"
"github.com/minio/mux"
)
@@ -142,7 +141,7 @@ func (m *Manager) Handler() http.HandlerFunc {
if r := recover(); r != nil {
debug.PrintStack()
err := fmt.Errorf("grid: panic: %v\n", r)
logger.LogIf(context.Background(), err, err.Error())
gridLogIf(context.Background(), err, err.Error())
w.WriteHeader(http.StatusInternalServerError)
}
}()
@@ -151,7 +150,7 @@ func (m *Manager) Handler() http.HandlerFunc {
}
ctx := req.Context()
if err := m.authRequest(req); err != nil {
logger.LogOnceIf(ctx, fmt.Errorf("auth %s: %w", req.RemoteAddr, err), req.RemoteAddr+err.Error())
gridLogOnceIf(ctx, fmt.Errorf("auth %s: %w", req.RemoteAddr, err), req.RemoteAddr+err.Error())
w.WriteHeader(http.StatusForbidden)
return
}
@@ -168,7 +167,7 @@ func (m *Manager) Handler() http.HandlerFunc {
if err == nil {
return
}
logger.LogOnceIf(ctx, err, err.Error())
gridLogOnceIf(ctx, err, err.Error())
resp := connectResp{
ID: m.ID,
Accepted: false,

View File

@@ -27,7 +27,6 @@ import (
"time"
xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
"github.com/zeebo/xxh3"
)
@@ -289,7 +288,7 @@ func (m *muxClient) addErrorNonBlockingClose(respHandler chan<- Response, err er
xioutil.SafeClose(respHandler)
}()
}
logger.LogIf(m.ctx, m.sendLocked(message{Op: OpDisconnectServerMux, MuxID: m.MuxID}))
gridLogIf(m.ctx, m.sendLocked(message{Op: OpDisconnectServerMux, MuxID: m.MuxID}))
m.closed = true
}
}
@@ -336,7 +335,7 @@ func (m *muxClient) handleOneWayStream(respHandler chan<- Response, respServer <
case respHandler <- resp:
m.respMu.Lock()
if !m.closed {
logger.LogIf(m.ctx, m.sendLocked(message{Op: OpUnblockSrvMux, MuxID: m.MuxID}))
gridLogIf(m.ctx, m.sendLocked(message{Op: OpUnblockSrvMux, MuxID: m.MuxID}))
}
m.respMu.Unlock()
case <-m.ctx.Done():
@@ -349,7 +348,7 @@ func (m *muxClient) handleOneWayStream(respHandler chan<- Response, respServer <
return
}
// Send new ping.
logger.LogIf(m.ctx, m.send(message{Op: OpPing, MuxID: m.MuxID}))
gridLogIf(m.ctx, m.send(message{Op: OpPing, MuxID: m.MuxID}))
}
}
}
@@ -509,7 +508,7 @@ func (m *muxClient) unblockSend(seq uint32) {
select {
case m.outBlock <- struct{}{}:
default:
logger.LogIf(m.ctx, errors.New("output unblocked overflow"))
gridLogIf(m.ctx, errors.New("output unblocked overflow"))
}
}
@@ -548,7 +547,7 @@ func (m *muxClient) addResponse(r Response) (ok bool) {
return
}
err := errors.New("INTERNAL ERROR: Response was blocked")
logger.LogIf(m.ctx, err)
gridLogIf(m.ctx, err)
m.closeLocked()
return false
}

View File

@@ -26,7 +26,6 @@ import (
"time"
xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
)
const lastPingThreshold = 4 * clientPingInterval
@@ -220,7 +219,7 @@ func (m *muxServer) handleRequests(ctx context.Context, msg message, send chan<-
fmt.Println("Mux", m.ID, "Handler took", time.Since(start).Round(time.Millisecond))
}
if r := recover(); r != nil {
logger.LogIf(ctx, fmt.Errorf("grid handler (%v) panic: %v", msg.Handler, r))
gridLogIf(ctx, fmt.Errorf("grid handler (%v) panic: %v", msg.Handler, r))
err := RemoteErr(fmt.Sprintf("handler panic: %v", r))
handlerErr = &err
}
@@ -244,7 +243,7 @@ func (m *muxServer) checkRemoteAlive() {
case <-t.C:
last := time.Since(time.Unix(atomic.LoadInt64(&m.LastPing), 0))
if last > lastPingThreshold {
logger.LogIf(m.ctx, fmt.Errorf("canceling remote connection %s not seen for %v", m.parent, last))
gridLogIf(m.ctx, fmt.Errorf("canceling remote connection %s not seen for %v", m.parent, last))
m.close()
return
}
@@ -281,7 +280,7 @@ func (m *muxServer) message(msg message) {
// Note, on EOF no value can be sent.
if msg.Flags&FlagEOF != 0 {
if len(msg.Payload) > 0 {
logger.LogIf(m.ctx, fmt.Errorf("muxServer: EOF message with payload"))
gridLogIf(m.ctx, fmt.Errorf("muxServer: EOF message with payload"))
}
if m.inbound != nil {
xioutil.SafeClose(m.inbound)
@@ -314,7 +313,7 @@ func (m *muxServer) unblockSend(seq uint32) {
select {
case m.outBlock <- struct{}{}:
default:
logger.LogIf(m.ctx, errors.New("output unblocked overflow"))
gridLogIf(m.ctx, errors.New("output unblocked overflow"))
}
}
@@ -354,7 +353,7 @@ func (m *muxServer) send(msg message) {
if debugPrint {
fmt.Printf("Mux %d, Sending %+v\n", m.ID, msg)
}
logger.LogIf(m.ctx, m.parent.queueMsg(msg, nil))
gridLogIf(m.ctx, m.parent.queueMsg(msg, nil))
}
func (m *muxServer) close() {

View File

@@ -34,6 +34,10 @@ import (
"github.com/minio/minio/internal/logger"
)
func hashLogIf(ctx context.Context, err error) {
logger.LogIf(ctx, "hash", err)
}
// MinIOMultipartChecksum is as metadata on multipart uploads to indicate checksum type.
const MinIOMultipartChecksum = "x-minio-multipart-checksum"
@@ -323,7 +327,7 @@ func (c *Checksum) AppendTo(b []byte, parts []byte) []byte {
var checksums int
// Ensure we don't divide by 0:
if c.Type.RawByteLen() == 0 || len(parts)%c.Type.RawByteLen() != 0 {
logger.LogIf(context.Background(), fmt.Errorf("internal error: Unexpected checksum length: %d, each checksum %d", len(parts), c.Type.RawByteLen()))
hashLogIf(context.Background(), fmt.Errorf("internal error: Unexpected checksum length: %d, each checksum %d", len(parts), c.Type.RawByteLen()))
checksums = 0
parts = nil
} else {

View File

@@ -36,7 +36,7 @@ const contextAuditKey = contextKeyType("audit-entry")
// SetAuditEntry sets Audit info in the context.
func SetAuditEntry(ctx context.Context, audit *audit.Entry) context.Context {
if ctx == nil {
LogIf(context.Background(), fmt.Errorf("context is nil"))
LogIf(context.Background(), "audit", fmt.Errorf("context is nil"))
return nil
}
return context.WithValue(ctx, contextAuditKey, audit)
@@ -144,7 +144,7 @@ func AuditLog(ctx context.Context, w http.ResponseWriter, r *http.Request, reqCl
// Send audit logs only to http targets.
for _, t := range auditTgts {
if err := t.Send(ctx, entry); err != nil {
LogOnceIf(ctx, fmt.Errorf("Unable to send an audit event to the target `%v`: %v", t, err), "send-audit-event-failure")
LogOnceIf(ctx, "logging", fmt.Errorf("Unable to send an audit event to the target `%v`: %v", t, err), "send-audit-event-failure")
}
}
}

View File

@@ -299,7 +299,7 @@ func lookupLegacyConfigForSubSys(ctx context.Context, subSys string) Config {
}
url, err := xnet.ParseHTTPURL(endpoint)
if err != nil {
LogOnceIf(ctx, err, "logger-webhook-"+endpoint)
LogOnceIf(ctx, "logging", err, "logger-webhook-"+endpoint)
continue
}
cfg.HTTP[target] = http.Config{
@@ -327,7 +327,7 @@ func lookupLegacyConfigForSubSys(ctx context.Context, subSys string) Config {
}
url, err := xnet.ParseHTTPURL(endpoint)
if err != nil {
LogOnceIf(ctx, err, "audit-webhook-"+endpoint)
LogOnceIf(ctx, "logging", err, "audit-webhook-"+endpoint)
continue
}
cfg.AuditWebhook[target] = http.Config{

View File

@@ -242,27 +242,26 @@ func HashString(input string) string {
// LogAlwaysIf prints a detailed error message during
// the execution of the server.
func LogAlwaysIf(ctx context.Context, err error, errKind ...interface{}) {
func LogAlwaysIf(ctx context.Context, subsystem string, err error, errKind ...interface{}) {
if err == nil {
return
}
logIf(ctx, err, errKind...)
logIf(ctx, subsystem, err, errKind...)
}
// LogIf prints a detailed error message during
// the execution of the server, if it is not an
// ignored error.
func LogIf(ctx context.Context, err error, errKind ...interface{}) {
func LogIf(ctx context.Context, subsystem string, err error, errKind ...interface{}) {
if logIgnoreError(err) {
return
}
logIf(ctx, err, errKind...)
logIf(ctx, subsystem, err, errKind...)
}
// LogIfNot prints a detailed error message during
// the execution of the server, if it is not an ignored error (either internal or given).
func LogIfNot(ctx context.Context, err error, ignored ...error) {
func LogIfNot(ctx context.Context, subsystem string, err error, ignored ...error) {
if logIgnoreError(err) {
return
}
@@ -271,24 +270,24 @@ func LogIfNot(ctx context.Context, err error, ignored ...error) {
return
}
}
logIf(ctx, err)
logIf(ctx, subsystem, err)
}
func errToEntry(ctx context.Context, err error, errKind ...interface{}) log.Entry {
func errToEntry(ctx context.Context, subsystem string, err error, errKind ...interface{}) log.Entry {
var l string
if anonFlag {
l = reflect.TypeOf(err).String()
} else {
l = fmt.Sprintf("%v (%T)", err, err)
}
return buildLogEntry(ctx, l, getTrace(3), errKind...)
return buildLogEntry(ctx, subsystem, l, getTrace(3), errKind...)
}
func logToEntry(ctx context.Context, message string, errKind ...interface{}) log.Entry {
return buildLogEntry(ctx, message, nil, errKind...)
func logToEntry(ctx context.Context, subsystem, message string, errKind ...interface{}) log.Entry {
return buildLogEntry(ctx, subsystem, message, nil, errKind...)
}
func buildLogEntry(ctx context.Context, message string, trace []string, errKind ...interface{}) log.Entry {
func buildLogEntry(ctx context.Context, subsystem, message string, trace []string, errKind ...interface{}) log.Entry {
logKind := madmin.LogKindError
if len(errKind) > 0 {
if ek, ok := errKind[0].(madmin.LogKind); ok {
@@ -307,8 +306,11 @@ func buildLogEntry(ctx context.Context, message string, trace []string, errKind
defer req.RUnlock()
API := "SYSTEM"
if req.API != "" {
switch {
case req.API != "":
API = req.API
case subsystem != "":
API += "." + subsystem
}
// Copy tags. We hold read lock already.
@@ -374,7 +376,7 @@ func buildLogEntry(ctx context.Context, message string, trace []string, errKind
// consoleLogIf prints a detailed error message during
// the execution of the server.
func consoleLogIf(ctx context.Context, err error, errKind ...interface{}) {
func consoleLogIf(ctx context.Context, subsystem string, err error, errKind ...interface{}) {
if DisableErrorLog {
return
}
@@ -382,20 +384,22 @@ func consoleLogIf(ctx context.Context, err error, errKind ...interface{}) {
return
}
if consoleTgt != nil {
consoleTgt.Send(ctx, errToEntry(ctx, err, errKind...))
entry := errToEntry(ctx, subsystem, err, errKind...)
consoleTgt.Send(ctx, entry)
}
}
// logIf prints a detailed error message during
// the execution of the server.
func logIf(ctx context.Context, err error, errKind ...interface{}) {
func logIf(ctx context.Context, subsystem string, err error, errKind ...interface{}) {
if DisableErrorLog {
return
}
if err == nil {
return
}
sendLog(ctx, errToEntry(ctx, err, errKind...))
entry := errToEntry(ctx, subsystem, err, errKind...)
sendLog(ctx, entry)
}
func sendLog(ctx context.Context, entry log.Entry) {
@@ -416,11 +420,12 @@ func sendLog(ctx context.Context, entry log.Entry) {
}
// Event sends a event log to log targets
func Event(ctx context.Context, msg string, args ...interface{}) {
func Event(ctx context.Context, subsystem, msg string, args ...interface{}) {
if DisableErrorLog {
return
}
sendLog(ctx, logToEntry(ctx, fmt.Sprintf(msg, args...), EventKind))
entry := logToEntry(ctx, subsystem, fmt.Sprintf(msg, args...), EventKind)
sendLog(ctx, entry)
}
// ErrCritical is the value panic'd whenever CriticalIf is called.
@@ -430,7 +435,7 @@ var ErrCritical struct{}
// current go-routine by causing a `panic(ErrCritical)`.
func CriticalIf(ctx context.Context, err error, errKind ...interface{}) {
if err != nil {
LogIf(ctx, err, errKind...)
LogIf(ctx, "", err, errKind...)
panic(ErrCritical)
}
}

View File

@@ -38,7 +38,7 @@ type logOnceType struct {
sync.Mutex
}
func (l *logOnceType) logOnceConsoleIf(ctx context.Context, err error, id string, errKind ...interface{}) {
func (l *logOnceType) logOnceConsoleIf(ctx context.Context, subsystem string, err error, id string, errKind ...interface{}) {
if err == nil {
return
}
@@ -61,7 +61,7 @@ func (l *logOnceType) logOnceConsoleIf(ctx context.Context, err error, id string
l.Unlock()
if shouldLog {
consoleLogIf(ctx, err, errKind...)
consoleLogIf(ctx, subsystem, err, errKind...)
}
}
@@ -92,7 +92,7 @@ func unwrapErrs(err error) (leafErr error) {
}
// One log message per error.
func (l *logOnceType) logOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
func (l *logOnceType) logOnceIf(ctx context.Context, subsystem string, err error, id string, errKind ...interface{}) {
if err == nil {
return
}
@@ -115,7 +115,7 @@ func (l *logOnceType) logOnceIf(ctx context.Context, err error, id string, errKi
l.Unlock()
if shouldLog {
logIf(ctx, err, errKind...)
logIf(ctx, subsystem, err, errKind...)
}
}
@@ -142,17 +142,17 @@ var logOnce = newLogOnceType()
// LogOnceIf - Logs notification errors - once per error.
// id is a unique identifier for related log messages, refer to cmd/notification.go
// on how it is used.
func LogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
func LogOnceIf(ctx context.Context, subsystem string, err error, id string, errKind ...interface{}) {
if logIgnoreError(err) {
return
}
logOnce.logOnceIf(ctx, err, id, errKind...)
logOnce.logOnceIf(ctx, subsystem, err, id, errKind...)
}
// LogOnceConsoleIf - similar to LogOnceIf but exclusively only logs to console target.
func LogOnceConsoleIf(ctx context.Context, err error, id string, errKind ...interface{}) {
func LogOnceConsoleIf(ctx context.Context, subsystem string, err error, id string, errKind ...interface{}) {
if logIgnoreError(err) {
return
}
logOnce.logOnceConsoleIf(ctx, err, id, errKind...)
logOnce.logOnceConsoleIf(ctx, subsystem, err, id, errKind...)
}

View File

@@ -153,7 +153,7 @@ func (r *ReqInfo) PopulateTagsMap(tagsMap map[string]interface{}) {
// SetReqInfo sets ReqInfo in the context.
func SetReqInfo(ctx context.Context, req *ReqInfo) context.Context {
if ctx == nil {
LogIf(context.Background(), fmt.Errorf("context is nil"))
LogIf(context.Background(), "", fmt.Errorf("context is nil"))
return nil
}
return context.WithValue(ctx, contextLogKey, req)

View File

@@ -88,22 +88,25 @@ func (c *Target) Send(e interface{}) error {
var apiString string
if entry.API != nil {
apiString = "API: " + entry.API.Name + "("
apiString = "API: " + entry.API.Name
if entry.API.Args != nil {
args := ""
if entry.API.Args.Bucket != "" {
apiString = apiString + "bucket=" + entry.API.Args.Bucket
args = args + "bucket=" + entry.API.Args.Bucket
}
if entry.API.Args.Object != "" {
apiString = apiString + ", object=" + entry.API.Args.Object
args = args + ", object=" + entry.API.Args.Object
}
if entry.API.Args.VersionID != "" {
apiString = apiString + ", versionId=" + entry.API.Args.VersionID
args = args + ", versionId=" + entry.API.Args.VersionID
}
if len(entry.API.Args.Objects) > 0 {
apiString = apiString + ", multiObject=true, numberOfObjects=" + strconv.Itoa(len(entry.API.Args.Objects))
args = args + ", multiObject=true, numberOfObjects=" + strconv.Itoa(len(entry.API.Args.Objects))
}
if len(args) > 0 {
apiString += "(" + args + ")"
}
}
apiString += ")"
} else {
apiString = "INTERNAL"
}

View File

@@ -39,6 +39,8 @@ import (
xnet "github.com/minio/pkg/v2/net"
)
const logSubsys = "internodes"
// DefaultTimeout - default REST timeout is 10 seconds.
const DefaultTimeout = 10 * time.Second
@@ -316,7 +318,7 @@ func (c *Client) Call(ctx context.Context, method string, values url.Values, bod
atomic.AddUint64(&globalStats.errs, 1)
}
if c.MarkOffline(err) {
logger.LogOnceIf(ctx, fmt.Errorf("Marking %s offline temporarily; caused by %w", c.url.Host, err), c.url.Host)
logger.LogOnceIf(ctx, logSubsys, fmt.Errorf("Marking %s offline temporarily; caused by %w", c.url.Host, err), c.url.Host)
}
}
return nil, &NetworkError{err}
@@ -340,7 +342,7 @@ func (c *Client) Call(ctx context.Context, method string, values url.Values, bod
// instead, see cmd/storage-rest-server.go for ideas.
if c.HealthCheckFn != nil && resp.StatusCode == http.StatusPreconditionFailed {
err = fmt.Errorf("Marking %s offline temporarily; caused by PreconditionFailed with drive ID mismatch", c.url.Host)
logger.LogOnceIf(ctx, err, c.url.Host)
logger.LogOnceIf(ctx, logSubsys, err, c.url.Host)
c.MarkOffline(err)
}
defer xhttp.DrainBody(resp.Body)
@@ -352,7 +354,7 @@ func (c *Client) Call(ctx context.Context, method string, values url.Values, bod
atomic.AddUint64(&globalStats.errs, 1)
}
if c.MarkOffline(err) {
logger.LogOnceIf(ctx, fmt.Errorf("Marking %s offline temporarily; caused by %w", c.url.Host, err), c.url.Host)
logger.LogOnceIf(ctx, logSubsys, fmt.Errorf("Marking %s offline temporarily; caused by %w", c.url.Host, err), c.url.Host)
}
}
return nil, err