zoobzio January 6, 2025 Edit this page

Best Practices

This guide covers recommended patterns for building applications with grub.

Key Design

Use Hierarchical Keys

Structure keys for efficient listing and logical grouping:

// Good: hierarchical, scannable
"tenant:acme:user:123"
"tenant:acme:session:abc"
"tenant:beta:user:456"

// List all for tenant
store.List(ctx, "tenant:acme:", 100)

// List all users for tenant
store.List(ctx, "tenant:acme:user:", 100)

Avoid Dynamic Key Segments First

// Bad: random prefix prevents efficient scanning
key := fmt.Sprintf("%s:user:data", uuid.New())

// Good: type prefix first
key := fmt.Sprintf("user:%s:data", uuid.New())

Keep Keys Short

// Verbose
"application:users:profile:data:user_id_12345"

// Concise
"u:12345:profile"

Use Consistent Separators

// Pick one and stick with it
"user:123:session:abc"  // Colons
"user/123/session/abc"  // Slashes (good for blob storage)
"user.123.session.abc"  // Dots

Type Design

Keep Types Focused

// Good: single purpose
type Session struct {
    UserID    string    `json:"user_id"`
    Token     string    `json:"token"`
    ExpiresAt time.Time `json:"expires_at"`
}

// Bad: kitchen sink
type UserEverything struct {
    ID       string
    Profile  Profile
    Settings Settings
    Sessions []Session
    Orders   []Order
    // ...
}

Use Pointers Sparingly

Grub returns pointers to avoid copying. Internal fields don't need to be pointers:

// Good: value fields
type Config struct {
    Debug   bool   `json:"debug"`
    Version string `json:"version"`
}

// Unnecessary: pointer fields
type Config struct {
    Debug   *bool   `json:"debug"`
    Version *string `json:"version"`
}

Design for Serialization

// Good: explicit JSON tags
type User struct {
    ID        string    `json:"id"`
    Email     string    `json:"email"`
    CreatedAt time.Time `json:"created_at"`
}

// Problematic: unexported fields ignored
type User struct {
    id    string // Won't serialize
    Email string
}

Error Handling

Always Check Errors

// Good
user, err := store.Get(ctx, key)
if err != nil {
    return fmt.Errorf("loading user %s: %w", key, err)
}

// Bad: ignoring errors
user, _ := store.Get(ctx, key)

Use errors.Is for Semantic Errors

// Good
if errors.Is(err, grub.ErrNotFound) {
    return createDefault()
}

// Bad: string matching
if err.Error() == "grub: record not found" {
    return createDefault()
}

Wrap Errors with Context

user, err := store.Get(ctx, key)
if err != nil {
    return fmt.Errorf("user %s: %w", key, err)
}

Performance

Batch When Possible

// Good: single round trip
users, _ := store.GetBatch(ctx, keys)

// Bad: N round trips
for _, key := range keys {
    user, _ := store.Get(ctx, key)
}

Set Appropriate TTLs

// Session: expires
store.Set(ctx, "session:abc", &session, 24*time.Hour)

// Config: no expiration
store.Set(ctx, "config:app", &config, 0)

Use List Limits

// Good: bounded
keys, _ := store.List(ctx, prefix, 100)

// Risky: unbounded on large datasets
keys, _ := store.List(ctx, prefix, 0)

Choose the Right Codec

ScenarioCodec
InteroperabilityJSONCodec
Go-only, performanceGobCodec
Custom needsCustom implementation
// Performance-sensitive path
store := grub.NewStoreWithCodec[Data](provider, grub.GobCodec{})

Provider Selection

Match Provider to Use Case

Use CaseProvider
Distributed cacheRedis
Embedded/localBadgerDB
Simple configBoltDB
Files/mediaS3/GCS/Azure
Structured dataPostgreSQL/MariaDB
Embedded SQLSQLite

Plan for Provider Limitations

// BoltDB doesn't support TTL
if ttl > 0 {
    // Handle expiration at application level
    data.ExpiresAt = time.Now().Add(ttl)
}
store.Set(ctx, key, data, 0)

Test with Target Provider

func TestWithProduction Provider(t *testing.T) {
    if testing.Short() {
        t.Skip("skipping integration test")
    }
    // Test with actual provider
}

Concurrency

Stores are Thread-Safe

// Safe: concurrent access
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
    wg.Add(1)
    go func(i int) {
        defer wg.Done()
        store.Set(ctx, fmt.Sprintf("key:%d", i), &data, 0)
    }(i)
}
wg.Wait()

Avoid Read-Modify-Write Races

// Dangerous: race condition
val, _ := store.Get(ctx, key)
val.Counter++
store.Set(ctx, key, val, 0)

// Safer: use provider-specific atomics or transactions
// Or design to avoid conflicts

Use Context Timeouts

ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()

user, err := store.Get(ctx, key)
if errors.Is(err, context.DeadlineExceeded) {
    // Handle timeout
}

Operational Guidelines

Log Provider Errors

user, err := store.Get(ctx, key)
if err != nil && !errors.Is(err, grub.ErrNotFound) {
    log.Error("store error", "key", key, "error", err)
}

Monitor Key Metrics

  • Operation latency (p50, p95, p99)
  • Error rates by type
  • Cache hit/miss ratio
  • Connection pool usage

Handle Provider Failures Gracefully

func GetWithFallback[T any](ctx context.Context, store *grub.Store[T], key string, fallback func() (*T, error)) (*T, error) {
    val, err := store.Get(ctx, key)
    if err == nil {
        return val, nil
    }

    if errors.Is(err, grub.ErrNotFound) {
        return fallback()
    }

    // Provider error - log and fallback
    log.Warn("store unavailable", "error", err)
    return fallback()
}

Plan for Schema Evolution

// Version your types
type UserV1 struct {
    ID   string `json:"id"`
    Name string `json:"name"`
}

type UserV2 struct {
    ID        string `json:"id"`
    FirstName string `json:"first_name"` // Split from Name
    LastName  string `json:"last_name"`
}

// Migrate on read
func migrateUser(data []byte) (*UserV2, error) {
    var v2 UserV2
    if err := json.Unmarshal(data, &v2); err == nil && v2.FirstName != "" {
        return &v2, nil
    }

    var v1 UserV1
    if err := json.Unmarshal(data, &v1); err != nil {
        return nil, err
    }

    // Migrate v1 to v2
    parts := strings.SplitN(v1.Name, " ", 2)
    return &UserV2{
        ID:        v1.ID,
        FirstName: parts[0],
        LastName:  parts[1],
    }, nil
}

Anti-Patterns

Don't Store Large Blobs in Key-Value Stores

// Bad: large files in Redis
store.Set(ctx, "file:video.mp4", &LargeFile{Data: videoBytes}, 0)

// Good: use blob storage
bucket.Put(ctx, &grub.Object[FileMetadata]{
    Key:  "video.mp4",
    Data: FileMetadata{Name: "video.mp4", Size: len(videoBytes)},
})

Don't Create Stores Per-Request

// Bad: creates store on every request
func Handler(w http.ResponseWriter, r *http.Request) {
    store := grub.NewStore[Session](redis.New(client))
    // ...
}

// Good: reuse store
var sessionStore = grub.NewStore[Session](redis.New(client))

func Handler(w http.ResponseWriter, r *http.Request) {
    session, _ := sessionStore.Get(r.Context(), sessionID)
    // ...
}

Don't Ignore Context Cancellation

// Good: check context
select {
case <-ctx.Done():
    return ctx.Err()
default:
    return store.Set(ctx, key, value, 0)
}