Documentation
¶
Index ¶
- Constants
- Variables
- func GetLogger() zerolog.Logger
- func LogDebug(msg string)
- func LogError(err error, msg string)
- func LogInfo(msg string)
- func LogWarn(msg string)
- func LogWarningWithDetails(warning error)
- func SetLoggerProvider(provider LoggerProvider)
- func SetupLogger(loglevel string)
- func ToLogLevel(level string) zerolog.Level
- type Level
- type Logger
- type LoggerProvider
- type TestLogger
- func (t *TestLogger) Clear()
- func (t *TestLogger) ContainsField(key string, value interface{}) bool
- func (t *TestLogger) ContainsMessage(message string) bool
- func (t *TestLogger) Debug(msg string, fields ...any)
- func (t *TestLogger) Enabled(ctx context.Context, level Level) bool
- func (t *TestLogger) Error(msg string, fields ...any)
- func (t *TestLogger) GetBuffer() *bytes.Buffer
- func (t *TestLogger) GetLogEntries() ([]map[string]interface{}, error)
- func (t *TestLogger) Info(msg string, fields ...any)
- func (t *TestLogger) Warn(msg string, fields ...any)
- func (t *TestLogger) With(fields ...any) Logger
- type TestLoggerProvider
Constants ¶
const ( // ModelNameKey identifies the type of machine learning model. // Examples: "LinearRegression", "StandardScaler", "RandomForest" ModelNameKey = "model.name" // EstimatorIDKey provides a unique identifier for a specific model instance. // This is useful for tracking multiple instances of the same model type. // Examples: "lr-001", "scaler-abc123", UUID strings EstimatorIDKey = "estimator.id" // OperationKey specifies the machine learning operation being performed. // Standard values: "fit", "predict", "transform", "fit_transform", "score" OperationKey = "ml.operation" // ComponentKey identifies which component or package is performing the operation. // Examples: "linear", "preprocessing", "metrics" ComponentKey = "ml.component" // PhaseKey indicates the phase of model lifecycle. // Examples: "training", "inference", "validation", "preprocessing" PhaseKey = "ml.phase" )
Model and Operation Context These attributes identify the model type, instance, and operation being performed.
const ( // SamplesKey indicates the number of samples (rows) in the dataset. // This is crucial for understanding the scale of data being processed. SamplesKey = "data.samples" // FeaturesKey indicates the number of features (columns) in the dataset. // Important for dimensionality tracking and debugging shape mismatches. FeaturesKey = "data.features" // TargetsKey indicates the number of target variables for supervised learning. // Usually 1 for single-target problems, >1 for multi-target problems. TargetsKey = "data.targets" // DataTypeKey specifies the type of data being processed. // Examples: "float64", "int32", "categorical", "mixed" DataTypeKey = "data.type" // DataSizeKey indicates the memory size of the data in bytes. // Useful for memory usage monitoring and optimization. DataSizeKey = "data.size_bytes" // BatchSizeKey indicates the size of processing batches. // Relevant for streaming or mini-batch processing scenarios. BatchSizeKey = "data.batch_size" )
Data Shape and Characteristics These attributes describe the structure and properties of data being processed.
const ( // DurationMsKey records the execution time of an operation in milliseconds. // This is essential for performance monitoring and optimization. DurationMsKey = "perf.duration_ms" // DurationSecondsKey records the execution time in seconds for longer operations. // Useful for training operations that take minutes or hours. DurationSecondsKey = "perf.duration_seconds" // MemoryUsageKey records memory usage in bytes during the operation. // Important for memory optimization and resource planning. MemoryUsageKey = "perf.memory_bytes" // AccuracyKey records model accuracy for evaluation operations. // Range typically [0.0, 1.0] for classification accuracy. AccuracyKey = "metrics.accuracy" // LossKey records loss value during training or evaluation. // Lower values typically indicate better model performance. LossKey = "metrics.loss" // R2ScoreKey records R² coefficient of determination for regression. // Range typically [-∞, 1.0], with 1.0 being perfect prediction. R2ScoreKey = "metrics.r2_score" // IterationKey records the current iteration number during iterative processes. // Useful for tracking convergence in iterative algorithms. IterationKey = "training.iteration" // EpochKey records the current epoch number during training. // Standard in neural networks and iterative learning algorithms. EpochKey = "training.epoch" )
Performance Metrics These attributes capture timing, accuracy, and resource usage information.
const ( // PredsKey indicates the number of predictions made. // Useful for throughput monitoring and batch size optimization. PredsKey = "preds.count" // PredsBatchKey indicates the batch number for prediction operations. // Relevant for streaming or large-scale batch prediction scenarios. PredsBatchKey = "preds.batch" // ConfidenceKey records prediction confidence or probability. // Range typically [0.0, 1.0] for classification confidence. ConfidenceKey = "preds.confidence" // ThresholdKey records decision thresholds used for classification. // Important for understanding model decision boundaries. ThresholdKey = "preds.threshold" )
Prediction and Output Context These attributes describe prediction operations and their results.
const ( // ErrorCodeKey provides a structured error code for programmatic handling. // Examples: "DIMENSION_MISMATCH", "NOT_FITTED", "CONVERGENCE_FAILURE" ErrorCodeKey = "error.code" // ErrorTypeKey categorizes the type of error encountered. // Examples: "ValidationError", "ConvergenceError", "DataError" ErrorTypeKey = "error.type" // StacktraceKey contains stack trace information for debugging. // Automatically populated by the error logging functions. StacktraceKey = "error.stacktrace" // SuggestionKey provides helpful suggestions for resolving issues. // Examples: "Check input data shape", "Increase max_iterations" SuggestionKey = "error.suggestion" )
Error and Warning Context These attributes provide additional context for error and warning messages.
const ( // HyperParamsKey contains model hyperparameters as a structured object. // Useful for tracking model configuration and reproducibility. HyperParamsKey = "model.hyperparams" // LearningRateKey records the learning rate for gradient-based algorithms. // Critical hyperparameter for training stability and convergence. LearningRateKey = "hyperparams.learning_rate" // RegularizationKey records regularization strength (L1, L2, etc.). // Important for understanding model complexity and overfitting prevention. RegularizationKey = "hyperparams.regularization" // RandomSeedKey records the random seed for reproducibility. // Essential for debugging and ensuring reproducible results. RandomSeedKey = "config.random_seed" // ConfigVersionKey tracks configuration or model version. // Useful for A/B testing and model versioning. ConfigVersionKey = "config.version" )
Hyperparameters and Configuration These attributes capture model configuration and hyperparameters.
const ( // HostnameKey identifies the machine or container running the operation. // Useful for distributed systems and debugging deployment issues. HostnameKey = "infra.hostname" // ProcessIDKey records the process ID for the operation. // Helpful for debugging and resource tracking. ProcessIDKey = "infra.pid" // ThreadIDKey records the thread or goroutine ID. // Useful for concurrent processing debugging. ThreadIDKey = "infra.thread_id" // GPUIDKey identifies which GPU device is being used (if applicable). // Important for GPU resource management and debugging. GPUIDKey = "infra.gpu_id" // WorkerIDKey identifies worker processes in distributed systems. // Relevant for parameter servers and distributed training. WorkerIDKey = "infra.worker_id" )
Infrastructure and Environment These attributes describe the execution environment and resource usage.
const ( // Standard ML operations OperationFit = "fit" OperationPredict = "predict" OperationTransform = "transform" OperationFitTransform = "fit_transform" OperationScore = "score" OperationPartialFit = "partial_fit" // Standard ML phases PhaseTraining = "training" PhaseValidation = "validation" PhaseTesting = "testing" PhaseInference = "inference" PhasePreprocessing = "preprocessing" // Standard error codes ErrorNotFitted = "NOT_FITTED" ErrorDimensionMismatch = "DIMENSION_MISMATCH" ErrorEmptyData = "EMPTY_DATA" ErrorInvalidInput = "INVALID_INPUT" ErrorConvergence = "CONVERGENCE_FAILURE" ErrorSingularMatrix = "SINGULAR_MATRIX" )
Standard attribute value constants for common operations. Using these constants ensures consistency across the codebase.
const ( ErrAttrKey = "error" StacktraceAttrKey = "stacktrace" )
Legacy constants for backward compatibility
Variables ¶
var GlobalLogger zerolog.Logger
GlobalLogger はプロジェクト全体で使用するzerologインスタンスです。
Functions ¶
func LogError ¶
LogError はcockroachdb/errorsと統合されたエラーログを出力します。 scikit-learnスタイルの詳細なエラー情報を構造化ログとして記録します。
func LogWarningWithDetails ¶
func LogWarningWithDetails(warning error)
LogWarningWithDetails は構造化された警告ログを出力します。
func SetLoggerProvider ¶
func SetLoggerProvider(provider LoggerProvider)
SetLoggerProvider sets the global logger provider. This is useful for testing or using different logging backends.
func ToLogLevel ¶
ToLogLevel はログレベル文字列をzerologのレベルに変換します。
Types ¶
type Level ¶
type Level int
Level represents a logging level, compatible with slog.Level. This type allows for level-based filtering of log messages.
type Logger ¶
type Logger interface {
// Debug logs a debug-level message with optional structured fields.
// Debug logs are typically used for detailed diagnostic information
// and are usually disabled in production environments.
//
// Parameters:
// - msg: The primary log message
// - fields: Optional key-value pairs for structured logging
//
// Example:
// logger.Debug("Processing data batch",
// "batch_id", 42,
// "size", 100,
// )
Debug(msg string, fields ...any)
// Info logs an info-level message with optional structured fields.
// Info logs are used for general operational information about
// the application's execution flow.
//
// Parameters:
// - msg: The primary log message
// - fields: Optional key-value pairs for structured logging
//
// Example:
// logger.Info("Model training completed",
// log.DurationMsKey, 5432,
// log.AccuracyKey, 0.95,
// )
Info(msg string, fields ...any)
// Warn logs a warning-level message with optional structured fields.
// Warning logs indicate potentially problematic situations that
// don't prevent the application from continuing.
//
// Parameters:
// - msg: The primary log message
// - fields: Optional key-value pairs for structured logging
//
// Example:
// logger.Warn("Model performance below threshold",
// log.AccuracyKey, 0.65,
// "threshold", 0.8,
// )
Warn(msg string, fields ...any)
// Error logs an error-level message with optional structured fields.
// Error logs indicate error conditions that should be investigated.
// If an error value is provided as the first field, stack trace
// information may be automatically included.
//
// Parameters:
// - msg: The primary log message
// - fields: Optional key-value pairs for structured logging
// If the first field is an error, it will be handled specially
//
// Example:
// logger.Error("Model training failed",
// err,
// log.OperationKey, "fit",
// log.SamplesKey, 1000,
// )
Error(msg string, fields ...any)
// With returns a new Logger with the given fields pre-populated.
// This method enables creation of contextual loggers that automatically
// include common fields in all subsequent log messages.
//
// Parameters:
// - fields: Key-value pairs to include in all future log messages
//
// Returns:
// - Logger: A new logger instance with the specified fields
//
// Example:
// contextLogger := logger.With(
// log.ModelNameKey, "RandomForest",
// log.EstimatorIDKey, "rf-123",
// )
// contextLogger.Info("Starting training") // Automatically includes model info
With(fields ...any) Logger
// Enabled reports whether the logger emits log records at the given level.
// This method can be used to avoid expensive operations when constructing
// log messages that won't be emitted.
//
// Parameters:
// - ctx: Context for the logging operation
// - level: The log level to check
//
// Returns:
// - bool: true if the logger would emit a record at the given level
//
// Example:
// if logger.Enabled(ctx, LevelDebug) {
// expensiveData := calculateExpensiveMetrics()
// logger.Debug("Detailed metrics", "metrics", expensiveData)
// }
Enabled(ctx context.Context, level Level) bool
}
Logger defines a structured logging interface compatible with Go's log/slog.
This interface provides the core logging methods with structured field support, allowing for rich contextual information to be included with log messages. It's designed to be implementation-agnostic, enabling easy switching between different logging backends while maintaining a consistent API.
The interface supports method chaining through the With method, allowing for creation of contextual loggers with pre-populated fields.
func GetDefaultLogger ¶
func GetDefaultLogger() Logger
GetDefaultLogger returns the default logger instance using the new interface.
func GetLoggerWithName ¶
GetLoggerWithName returns a logger with a specific component name.
type LoggerProvider ¶
type LoggerProvider interface {
// GetLogger returns the default logger instance.
GetLogger() Logger
// GetLoggerWithName returns a logger with a specific name/component identifier.
GetLoggerWithName(name string) Logger
// SetLevel sets the minimum log level for all loggers created by this provider.
SetLevel(level Level)
}
LoggerProvider defines an interface for creating and configuring loggers. This interface allows for dependency injection and testing with different logger implementations.
func NewZerologProvider ¶
func NewZerologProvider(level zerolog.Level) LoggerProvider
NewZerologProvider creates a new LoggerProvider using zerolog.
type TestLogger ¶
type TestLogger struct {
// contains filtered or unexported fields
}
TestLogger is a logger implementation designed for testing. It captures all log messages in memory for later inspection and verification.
func NewTestLogger ¶
func NewTestLogger(level Level) (*TestLogger, *bytes.Buffer)
NewTestLogger creates a new TestLogger with the specified minimum level. All log messages are captured in an internal buffer for later examination.
Parameters:
- level: Minimum log level to capture
Returns:
- *TestLogger: A new test logger instance
- *bytes.Buffer: The buffer containing captured log output
Example:
logger, buffer := log.NewTestLogger(log.LevelDebug)
logger.Info("test message", "key", "value")
output := buffer.String()
// Verify output contains expected content
func (*TestLogger) Clear ¶
func (t *TestLogger) Clear()
Clear clears all captured log content. Useful for resetting state between test cases.
func (*TestLogger) ContainsField ¶
func (t *TestLogger) ContainsField(key string, value interface{}) bool
ContainsField checks if the captured logs contain an entry with the specified field and value.
Parameters:
- key: The field key to search for
- value: The expected field value
Returns:
- bool: true if the field with the specified value is found
Example:
if !testLogger.ContainsField("ml.operation", "fit") {
t.Error("Expected fit operation in logs")
}
func (*TestLogger) ContainsMessage ¶
func (t *TestLogger) ContainsMessage(message string) bool
ContainsMessage checks if the captured logs contain a message with the specified content. This is a convenience method for common test assertions.
Parameters:
- message: The message content to search for
Returns:
- bool: true if the message is found in any log entry
Example:
if !testLogger.ContainsMessage("Training completed") {
t.Error("Expected training completion log message")
}
func (*TestLogger) Debug ¶
func (t *TestLogger) Debug(msg string, fields ...any)
Debug implements Logger.Debug.
func (*TestLogger) Enabled ¶
func (t *TestLogger) Enabled(ctx context.Context, level Level) bool
Enabled implements Logger.Enabled.
func (*TestLogger) Error ¶
func (t *TestLogger) Error(msg string, fields ...any)
Error implements Logger.Error.
func (*TestLogger) GetBuffer ¶
func (t *TestLogger) GetBuffer() *bytes.Buffer
GetBuffer returns the internal buffer for direct access to captured logs.
func (*TestLogger) GetLogEntries ¶
func (t *TestLogger) GetLogEntries() ([]map[string]interface{}, error)
GetLogEntries parses the captured log output and returns structured log entries. This is useful for programmatic verification of log content.
Returns:
- []map[string]interface{}: Slice of parsed log entries
- error: Error if log parsing fails
Example:
entries, err := testLogger.GetLogEntries()
if err != nil {
t.Fatal(err)
}
if len(entries) != 2 {
t.Errorf("Expected 2 log entries, got %d", len(entries))
}
func (*TestLogger) Info ¶
func (t *TestLogger) Info(msg string, fields ...any)
Info implements Logger.Info.
func (*TestLogger) Warn ¶
func (t *TestLogger) Warn(msg string, fields ...any)
Warn implements Logger.Warn.
func (*TestLogger) With ¶
func (t *TestLogger) With(fields ...any) Logger
With implements Logger.With.
type TestLoggerProvider ¶
type TestLoggerProvider struct {
// contains filtered or unexported fields
}
TestLoggerProvider implements LoggerProvider for testing scenarios.
func NewTestLoggerProvider ¶
func NewTestLoggerProvider(level Level) (*TestLoggerProvider, *bytes.Buffer)
NewTestLoggerProvider creates a new test logger provider.
Parameters:
- level: Minimum log level to capture
Returns:
- *TestLoggerProvider: A new test provider instance
- *bytes.Buffer: Buffer for accessing captured logs
func (*TestLoggerProvider) GetBuffer ¶
func (p *TestLoggerProvider) GetBuffer() *bytes.Buffer
GetBuffer returns the buffer for accessing captured logs.
func (*TestLoggerProvider) GetLogger ¶
func (p *TestLoggerProvider) GetLogger() Logger
GetLogger implements LoggerProvider.GetLogger.
func (*TestLoggerProvider) GetLoggerWithName ¶
func (p *TestLoggerProvider) GetLoggerWithName(name string) Logger
GetLoggerWithName implements LoggerProvider.GetLoggerWithName.
func (*TestLoggerProvider) SetLevel ¶
func (p *TestLoggerProvider) SetLevel(level Level)
SetLevel implements LoggerProvider.SetLevel.