cache Package
Redis caching with automatic JSON serialization, TTL management, and connection pooling.
Features
- Automatic Serialization: JSON marshaling/unmarshaling
- TTL Support: Automatic expiration of cached data
- Connection Pooling: Efficient connection management
- Key Prefixing: Namespace support for multi-tenant setups
- Health Checks: Monitor Redis connectivity
- Error Wrapping: Consistent error handling
- Logging Integration: Automatic operation logging with duration
Installation
import "github.com/LaRestoOU/laresto-go-common/pkg/cache"
Quick Start
Connect to Redis
cfg := cache.Config{
Host: "localhost",
Port: 6379,
Password: "redis_dev_password",
DB: 0,
Prefix: "laresto:",
}
log := logger.New(logger.Config{...})
cache, err := cache.NewRedis(cfg, log)
if err != nil {
log.Fatal("Failed to connect to Redis", err)
}
defer cache.Close()
Set and Get
type User struct {
ID int64 `json:"id"`
Email string `json:"email"`
Name string `json:"name"`
}
// Set with TTL
user := User{ID: 123, Email: "user@example.com", Name: "John"}
err := cache.Set(ctx, "user:123", user, 1*time.Hour)
// Get
var cachedUser User
err = cache.Get(ctx, "user:123", &cachedUser)
if err == errors.ErrNotFound {
// Cache miss - fetch from database
}
Configuration
type Config struct {
// Connection
Host string // Redis host
Port int // Redis port
Password string // Redis password (empty for no auth)
DB int // Redis database (0-15)
Prefix string // Key prefix for namespacing
// Connection pool
MaxRetries int // Max retries (default: 3)
PoolSize int // Max connections (default: 10)
MinIdleConns int // Min idle connections (default: 2)
// Timeouts
DialTimeout time.Duration // Connection timeout (default: 5s)
ReadTimeout time.Duration // Read timeout (default: 3s)
WriteTimeout time.Duration // Write timeout (default: 3s)
}
Connection Pool Sizing
Guidelines:
- PoolSize: Number of concurrent operations
- MinIdleConns: Keep connections warm (20-30% of PoolSize)
Example:
cfg := cache.Config{
PoolSize: 20, // Up to 20 concurrent operations
MinIdleConns: 5, // Keep 5 connections ready
}
Core Operations
Set with TTL
// Set with 1 hour TTL
cache.Set(ctx, "session:abc123", sessionData, 1*time.Hour)
// Set with 5 minutes TTL
cache.Set(ctx, "otp:user123", otpCode, 5*time.Minute)
// Set with no expiration (use with caution!)
cache.Set(ctx, "config:app", config, 0)
Get
var data MyType
err := cache.Get(ctx, "my-key", &data)
if err == errors.ErrNotFound {
// Key doesn't exist or expired
// Fetch from database and cache it
} else if err != nil {
// Redis error
return err
}
// Use data
Delete
err := cache.Delete(ctx, "user:123")
Check Existence
exists, err := cache.Exists(ctx, "user:123")
if exists {
// Key exists in cache
}
Update TTL
// Extend TTL on existing key
err := cache.Expire(ctx, "session:abc123", 2*time.Hour)
Find Keys
// Get all user keys
keys, err := cache.Keys(ctx, "user:*")
// Get all session keys
keys, err := cache.Keys(ctx, "session:*")
Common Patterns
Cache-Aside (Lazy Loading)
func (s *UserService) GetUser(ctx context.Context, id int64) (*User, error) {
cacheKey := fmt.Sprintf("user:%d", id)
// Try cache first
var user User
err := s.cache.Get(ctx, cacheKey, &user)
if err == nil {
return &user, nil // Cache hit
}
// Cache miss - fetch from database
user, err = s.db.FindUserByID(ctx, id)
if err != nil {
return nil, err
}
// Store in cache
s.cache.Set(ctx, cacheKey, user, 1*time.Hour)
return &user, nil
}
Write-Through
func (s *UserService) UpdateUser(ctx context.Context, user *User) error {
// Update database
if err := s.db.UpdateUser(ctx, user); err != nil {
return err
}
// Update cache immediately
cacheKey := fmt.Sprintf("user:%d", user.ID)
s.cache.Set(ctx, cacheKey, user, 1*time.Hour)
return nil
}
Cache Invalidation
func (s *UserService) DeleteUser(ctx context.Context, id int64) error {
// Delete from database
if err := s.db.DeleteUser(ctx, id); err != nil {
return err
}
// Invalidate cache
cacheKey := fmt.Sprintf("user:%d", id)
s.cache.Delete(ctx, cacheKey)
return nil
}
Multi-Key Invalidation
func (s *UserService) UpdateUserProfile(ctx context.Context, user *User) error {
// Update database
if err := s.db.UpdateUser(ctx, user); err != nil {
return err
}
// Invalidate related caches
s.cache.Delete(ctx, fmt.Sprintf("user:%d", user.ID))
s.cache.Delete(ctx, fmt.Sprintf("user:email:%s", user.Email))
s.cache.Delete(ctx, "users:list") // Invalidate list cache
return nil
}
Session Management
type SessionService struct {
cache cache.Cache
}
func (s *SessionService) CreateSession(ctx context.Context, userID int64) (string, error) {
sessionID := generateSessionID()
session := Session{
UserID: userID,
CreatedAt: time.Now(),
}
// Store with 24 hour expiration
err := s.cache.Set(ctx, fmt.Sprintf("session:%s", sessionID), session, 24*time.Hour)
return sessionID, err
}
func (s *SessionService) GetSession(ctx context.Context, sessionID string) (*Session, error) {
var session Session
err := s.cache.Get(ctx, fmt.Sprintf("session:%s", sessionID), &session)
return &session, err
}
func (s *SessionService) ExtendSession(ctx context.Context, sessionID string) error {
return s.cache.Expire(ctx, fmt.Sprintf("session:%s", sessionID), 24*time.Hour)
}
func (s *SessionService) DeleteSession(ctx context.Context, sessionID string) error {
return s.cache.Delete(ctx, fmt.Sprintf("session:%s", sessionID))
}
Rate Limiting
type RateLimiter struct {
cache cache.Cache
}
func (r *RateLimiter) CheckLimit(ctx context.Context, userID int64, limit int) (bool, error) {
key := fmt.Sprintf("ratelimit:%d", userID)
var count int
err := r.cache.Get(ctx, key, &count)
if err == errors.ErrNotFound {
// First request - set counter
r.cache.Set(ctx, key, 1, 1*time.Minute)
return true, nil
}
if count >= limit {
return false, nil // Rate limit exceeded
}
// Increment counter
count++
r.cache.Set(ctx, key, count, 1*time.Minute)
return true, nil
}
Key Naming Conventions
Best practices:
// Use hierarchical keys with colons
"user:123"
"user:email:john@example.com"
"session:abc123"
"product:456"
"order:789:items"
// Use prefixes for namespacing
"laresto:user:123"
"laresto:session:abc123"
// Include type and ID
"user:123" // Good
"123" // Bad - ambiguous
"u123" // Bad - unclear
// Use descriptive names
"user:123:profile" // Good
"user:123:settings" // Good
"user:123:data" // Bad - vague
TTL Strategy
Recommended TTL values:
// Short-lived data (seconds to minutes)
OTP codes: 5 * time.Minute
API rate limits: 1 * time.Minute
Temporary tokens: 15 * time.Minute
// Medium-lived data (hours)
User sessions: 24 * time.Hour
API responses: 1 * time.Hour
Search results: 30 * time.Minute
// Long-lived data (days)
User profiles: 7 * 24 * time.Hour
Product catalog: 24 * time.Hour
Configuration: 12 * time.Hour
Health Checks
func (s *Service) HealthCheck() error {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
// Cast to *RedisCache to access HealthCheck
redisCache := s.cache.(*cache.RedisCache)
return redisCache.HealthCheck(ctx)
}
Monitoring
// Get Redis connection pool statistics
redisCache := cache.(*cache.RedisCache)
stats := redisCache.Stats()
// Returns:
// {
// "hits": 1234, // Cache hits
// "misses": 56, // Cache misses
// "timeouts": 0, // Timeout errors
// "total_conns": 10, // Total connections
// "idle_conns": 7, // Idle connections
// "stale_conns": 0 // Stale connections
// }
Best Practices
DO ✅
// Always use context with timeout
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
cache.Get(ctx, key, &data)
// Use appropriate TTLs
cache.Set(ctx, key, data, 1*time.Hour) // Not too short, not too long
// Handle cache misses gracefully
if err := cache.Get(ctx, key, &data); err == errors.ErrNotFound {
// Fetch from database
}
// Use key prefixes for organization
cache.Set(ctx, "user:123", data, ttl)
// Close cache on shutdown
defer cache.Close()
DON'T ❌
// Don't cache without TTL unless necessary
cache.Set(ctx, key, data, 0) // Never expires!
// Don't ignore errors
cache.Set(ctx, key, data, ttl) // Error ignored!
// Don't cache everything
// Cache only frequently accessed, expensive-to-compute data
// Don't use cache as primary storage
// Cache can be cleared - always have database as source of truth
// Don't cache sensitive data without encryption
cache.Set(ctx, "password", password, ttl) // BAD!
Error Handling
err := cache.Get(ctx, key, &data)
if err == errors.ErrNotFound {
// Key doesn't exist - normal cache miss
// Fetch from database
} else if err != nil {
// Redis error - log and handle gracefully
log.Error("Cache error", err, "key", key)
// Fallback to database
}
Testing
Integration Tests
func TestUserService_Integration(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration test")
}
cfg := cache.Config{
Host: "localhost",
Port: 6379,
Password: "redis_dev_password",
Prefix: "test:",
}
cache, err := cache.NewRedis(cfg, logger.NewDefault())
require.NoError(t, err)
defer cache.Close()
// Test cache operations
ctx := context.Background()
err = cache.Set(ctx, "test-key", "test-value", 1*time.Minute)
require.NoError(t, err)
// Cleanup
cache.Delete(ctx, "test-key")
}
Mock Cache
type MockCache struct{}
func (m *MockCache) Get(ctx context.Context, key string, dest interface{}) error {
// Return mock data
return nil
}
func (m *MockCache) Set(ctx context.Context, key string, value interface{}, ttl time.Duration) error {
return nil
}
// Implement other methods...
- Use connection pooling: Reuse connections (already done)
- Set appropriate TTLs: Balance freshness vs database load
- Batch operations: Use pipelines for multiple operations
- Monitor cache hit ratio: Aim for >80% hit rate
- Use key prefixes: Faster pattern matching with
Keys()
Security
// ✅ SAFE: Environment variable
cfg := cache.Config{
Password: os.Getenv("REDIS_PASSWORD"),
}
// ❌ UNSAFE: Hardcoded password
cfg := cache.Config{
Password: "mysecretpassword",
}
iOS Developer Notes
Cache package is similar to:
- NSCache for in-memory caching
- UserDefaults for key-value storage
- URLCache for HTTP response caching
Comparison:
// iOS NSCache
let cache = NSCache<NSString, AnyObject>()
cache.setObject(user, forKey: "user:123")
let user = cache.object(forKey: "user:123")
// Go Redis cache
cache.Set(ctx, "user:123", user, 1*time.Hour)
cache.Get(ctx, "user:123", &user)
Key differences:
- Redis = Distributed: Shared across all service instances
- NSCache = Local: Only in current process
- TTL = Automatic: Redis handles expiration
- Serialization = Automatic: JSON marshaling built-in
License
MIT License - see LICENSE file for details