Caching Patterns
Real-world caching patterns using grub stores.
Cache-Aside Pattern
The most common caching pattern: check cache first, fall back to source.
package cache
import (
"context"
"errors"
"time"
"github.com/zoobz-io/grub"
)
type CacheAside[T any] struct {
cache *grub.Store[T]
source func(ctx context.Context, key string) (*T, error)
ttl time.Duration
}
func NewCacheAside[T any](
cache *grub.Store[T],
source func(ctx context.Context, key string) (*T, error),
ttl time.Duration,
) *CacheAside[T] {
return &CacheAside[T]{
cache: cache,
source: source,
ttl: ttl,
}
}
func (c *CacheAside[T]) Get(ctx context.Context, key string) (*T, error) {
// Try cache first
val, err := c.cache.Get(ctx, key)
if err == nil {
return val, nil
}
// Not in cache - get from source
if !errors.Is(err, grub.ErrNotFound) {
return nil, err // Cache error
}
val, err = c.source(ctx, key)
if err != nil {
return nil, err
}
// Populate cache (ignore errors - cache is best-effort)
_ = c.cache.Set(ctx, key, val, c.ttl)
return val, nil
}
func (c *CacheAside[T]) Invalidate(ctx context.Context, key string) error {
err := c.cache.Delete(ctx, key)
if errors.Is(err, grub.ErrNotFound) {
return nil
}
return err
}
Usage
// Source: database lookup
source := func(ctx context.Context, key string) (*User, error) {
return db.Get(ctx, key)
}
// Cache with 5-minute TTL
userCache := NewCacheAside(
grub.NewStore[User](redis.New(client)),
source,
5*time.Minute,
)
// Get user (cache-aside)
user, err := userCache.Get(ctx, "user:123")
// Invalidate on update
userCache.Invalidate(ctx, "user:123")
Read-Through Cache
Cache handles loading automatically with configurable staleness.
package cache
import (
"context"
"errors"
"sync"
"time"
"github.com/zoobz-io/grub"
)
type CachedValue[T any] struct {
Value T `json:"value"`
CachedAt time.Time `json:"cached_at"`
ExpiresAt time.Time `json:"expires_at"`
}
type ReadThrough[T any] struct {
cache *grub.Store[CachedValue[T]]
loader func(ctx context.Context, key string) (*T, error)
ttl time.Duration
loading sync.Map // Prevents thundering herd
}
func NewReadThrough[T any](
cache *grub.Store[CachedValue[T]],
loader func(ctx context.Context, key string) (*T, error),
ttl time.Duration,
) *ReadThrough[T] {
return &ReadThrough[T]{
cache: cache,
loader: loader,
ttl: ttl,
}
}
func (r *ReadThrough[T]) Get(ctx context.Context, key string) (*T, error) {
// Try cache
cached, err := r.cache.Get(ctx, key)
if err == nil {
if time.Now().Before(cached.ExpiresAt) {
return &cached.Value, nil
}
// Expired - fall through to reload
} else if !errors.Is(err, grub.ErrNotFound) {
return nil, err
}
// Prevent thundering herd
ch := make(chan struct{})
actual, loaded := r.loading.LoadOrStore(key, ch)
if loaded {
// Another goroutine is loading - wait
<-actual.(chan struct{})
// Retry from cache
cached, err := r.cache.Get(ctx, key)
if err == nil {
return &cached.Value, nil
}
return nil, err
}
defer func() {
r.loading.Delete(key)
close(ch)
}()
// Load from source
val, err := r.loader(ctx, key)
if err != nil {
return nil, err
}
// Cache the result
now := time.Now()
_ = r.cache.Set(ctx, key, &CachedValue[T]{
Value: *val,
CachedAt: now,
ExpiresAt: now.Add(r.ttl),
}, r.ttl)
return val, nil
}
Write-Through Cache
Update cache and source together.
package cache
import (
"context"
"time"
"github.com/zoobz-io/grub"
)
type WriteThrough[T any] struct {
cache *grub.Store[T]
source interface {
Get(ctx context.Context, key string) (*T, error)
Set(ctx context.Context, key string, val *T) error
Delete(ctx context.Context, key string) error
}
ttl time.Duration
}
func (w *WriteThrough[T]) Get(ctx context.Context, key string) (*T, error) {
// Try cache first
val, err := w.cache.Get(ctx, key)
if err == nil {
return val, nil
}
// Load from source
val, err = w.source.Get(ctx, key)
if err != nil {
return nil, err
}
// Populate cache
_ = w.cache.Set(ctx, key, val, w.ttl)
return val, nil
}
func (w *WriteThrough[T]) Set(ctx context.Context, key string, val *T) error {
// Write to source first (source of truth)
if err := w.source.Set(ctx, key, val); err != nil {
return err
}
// Then update cache
return w.cache.Set(ctx, key, val, w.ttl)
}
func (w *WriteThrough[T]) Delete(ctx context.Context, key string) error {
// Delete from source first
if err := w.source.Delete(ctx, key); err != nil {
return err
}
// Then invalidate cache (ignore not found)
_ = w.cache.Delete(ctx, key)
return nil
}
TTL Strategies
Fixed TTL
Simple, predictable cache duration.
const cacheTTL = 5 * time.Minute
func (c *Cache[T]) Set(ctx context.Context, key string, val *T) error {
return c.store.Set(ctx, key, val, cacheTTL)
}
Sliding Window
Reset TTL on each access.
func (c *SlidingCache[T]) Get(ctx context.Context, key string) (*T, error) {
val, err := c.store.Get(ctx, key)
if err != nil {
return nil, err
}
// Reset TTL on access
_ = c.store.Set(ctx, key, val, c.ttl)
return val, nil
}
Tiered TTL
Different TTLs based on data type or access patterns.
var ttlByType = map[string]time.Duration{
"session": 24 * time.Hour,
"config": 1 * time.Hour,
"user": 5 * time.Minute,
"product": 15 * time.Minute,
}
func getTTL(keyType string) time.Duration {
if ttl, ok := ttlByType[keyType]; ok {
return ttl
}
return 5 * time.Minute // Default
}
Jittered TTL
Prevent cache stampedes by adding randomness.
import "math/rand"
func jitteredTTL(base time.Duration) time.Duration {
// Add 0-20% jitter
jitter := time.Duration(rand.Float64() * 0.2 * float64(base))
return base + jitter
}
func (c *Cache[T]) Set(ctx context.Context, key string, val *T) error {
return c.store.Set(ctx, key, val, jitteredTTL(c.baseTTL))
}
Cache Invalidation
Event-Based Invalidation
// User service
func (s *UserService) Update(ctx context.Context, user *User) error {
if err := s.db.Set(ctx, user.ID, user); err != nil {
return err
}
// Invalidate cache
key := "user:" + user.ID
_ = s.cache.Delete(ctx, key)
// Publish event for other services
s.events.Publish(ctx, "user.updated", user.ID)
return nil
}
// Cache listener
func (c *CacheInvalidator) HandleUserUpdated(ctx context.Context, userID string) {
key := "user:" + userID
_ = c.store.Delete(ctx, key)
// Also invalidate derived caches
_ = c.store.Delete(ctx, "user-profile:"+userID)
_ = c.store.Delete(ctx, "user-permissions:"+userID)
}
Tag-Based Invalidation
Group cached items by tags for bulk invalidation.
type TaggedCache[T any] struct {
data *grub.Store[T]
tags *grub.Store[[]string] // tag -> keys
}
func (c *TaggedCache[T]) Set(ctx context.Context, key string, val *T, tags []string, ttl time.Duration) error {
// Store value
if err := c.data.Set(ctx, key, val, ttl); err != nil {
return err
}
// Update tag indexes
for _, tag := range tags {
tagKey := "tag:" + tag
existing, _ := c.tags.Get(ctx, tagKey)
keys := []string{key}
if existing != nil {
keys = append(*existing, key)
}
_ = c.tags.Set(ctx, tagKey, &keys, ttl)
}
return nil
}
func (c *TaggedCache[T]) InvalidateTag(ctx context.Context, tag string) error {
tagKey := "tag:" + tag
keys, err := c.tags.Get(ctx, tagKey)
if err != nil {
return nil // No keys for tag
}
for _, key := range *keys {
_ = c.data.Delete(ctx, key)
}
return c.tags.Delete(ctx, tagKey)
}
Usage
cache := NewTaggedCache[Product](dataStore, tagStore)
// Cache product with tags
cache.Set(ctx, "product:123", &product, []string{"category:electronics", "vendor:acme"}, time.Hour)
// Invalidate all products in category
cache.InvalidateTag(ctx, "category:electronics")
Multi-Level Cache
Local memory cache with Redis backing.
package cache
import (
"context"
"sync"
"time"
"github.com/zoobz-io/grub"
)
type L1L2Cache[T any] struct {
l1 sync.Map // In-memory (L1)
l2 *grub.Store[T] // Redis (L2)
l1TTL time.Duration
l2TTL time.Duration
}
type l1Entry[T any] struct {
value *T
expiresAt time.Time
}
func (c *L1L2Cache[T]) Get(ctx context.Context, key string) (*T, error) {
// Check L1
if entry, ok := c.l1.Load(key); ok {
e := entry.(l1Entry[T])
if time.Now().Before(e.expiresAt) {
return e.value, nil
}
c.l1.Delete(key) // Expired
}
// Check L2
val, err := c.l2.Get(ctx, key)
if err != nil {
return nil, err
}
// Populate L1
c.l1.Store(key, l1Entry[T]{
value: val,
expiresAt: time.Now().Add(c.l1TTL),
})
return val, nil
}
func (c *L1L2Cache[T]) Set(ctx context.Context, key string, val *T) error {
// Write to L2
if err := c.l2.Set(ctx, key, val, c.l2TTL); err != nil {
return err
}
// Write to L1
c.l1.Store(key, l1Entry[T]{
value: val,
expiresAt: time.Now().Add(c.l1TTL),
})
return nil
}
func (c *L1L2Cache[T]) Delete(ctx context.Context, key string) error {
c.l1.Delete(key)
return c.l2.Delete(ctx, key)
}
Warming the Cache
Pre-populate cache on startup.
func WarmCache[T any](ctx context.Context, cache *grub.Store[T], keys []string, loader func(string) (*T, error), ttl time.Duration) error {
const workers = 10
keyCh := make(chan string, len(keys))
var wg sync.WaitGroup
var mu sync.Mutex
var errors []error
// Start workers
for i := 0; i < workers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for key := range keyCh {
val, err := loader(key)
if err != nil {
mu.Lock()
errors = append(errors, err)
mu.Unlock()
continue
}
_ = cache.Set(ctx, key, val, ttl)
}
}()
}
// Send keys
for _, key := range keys {
keyCh <- key
}
close(keyCh)
wg.Wait()
if len(errors) > 0 {
return fmt.Errorf("cache warming had %d errors", len(errors))
}
return nil
}
Usage
// On application startup
keys := []string{"config:app", "config:features", "config:limits"}
loader := func(key string) (*Config, error) {
return db.Get(ctx, key)
}
WarmCache(ctx, configCache, keys, loader, time.Hour)