framework

package
v1.6.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Oct 15, 2025 License: Apache-2.0 Imports: 14 Imported by: 0

README

Warren Test Framework

Status: Foundation Complete (Week 1) ✅ Version: 1.0.0 Last Updated: 2025-10-12

Overview

The Warren test framework provides reusable infrastructure for writing Go-based end-to-end, integration, and chaos tests for Warren. It replaces bash-based testing with type-safe, maintainable Go tests.

Features

  • Cluster Management: Create and manage multi-node test clusters
  • Multiple Runtimes: Support for Lima VMs, Docker containers, and local processes
  • Process Lifecycle: Start, stop, restart, and monitor Warren processes
  • Rich Assertions: Comprehensive test assertion helpers
  • Polling Utilities: Wait for conditions with timeouts
  • Log Capture: Automatic log capture with searching and filtering
  • Type Safety: Compile-time checks prevent test breakage
  • Parallel Execution: Tests can run in parallel with t.Parallel()

Architecture

test/framework/
├── types.go         # Core type definitions
├── cluster.go       # Cluster management
├── process.go       # Process lifecycle & logging
├── assertions.go    # Test assertions
├── waiters.go       # Polling utilities
├── vm.go           # VM interface (TODO)
├── lima_vm.go      # Lima implementation (TODO)
└── docker_vm.go    # Docker implementation (TODO)

Quick Start

1. Basic Cluster Test
package e2e

import (
    "context"
    "testing"
    "time"

    "github.com/cuemby/warren/test/framework"
)

func TestBasicCluster(t *testing.T) {
    // Skip in short mode
    if testing.Short() {
        t.Skip("Skipping e2e test in short mode")
    }

    // Create context
    ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
    defer cancel()

    // Configure cluster
    config := framework.DefaultClusterConfig()
    config.NumManagers = 3
    config.NumWorkers = 2
    config.Runtime = framework.RuntimeLocal

    // Create cluster
    cluster, err := framework.NewCluster(config)
    if err != nil {
        t.Fatalf("Failed to create cluster: %v", err)
    }
    defer cluster.Cleanup()

    // Start cluster
    if err := cluster.Start(); err != nil {
        t.Fatalf("Failed to start cluster: %v", err)
    }

    // Create assertions helper
    assert := framework.NewAssertions(t)
    assert.HasLeader(cluster)
    assert.QuorumSize(3, cluster)

    t.Log("✓ Cluster started successfully")
}
2. Service Lifecycle Test
func TestServiceLifecycle(t *testing.T) {
    // Setup cluster (omitted for brevity)
    cluster, _ := setupTestCluster(t)
    defer cluster.Cleanup()

    // Get leader
    leader, err := cluster.GetLeader()
    if err != nil {
        t.Fatalf("No leader: %v", err)
    }

    // Create assertions and waiter
    assert := framework.NewAssertions(t)
    waiter := framework.DefaultWaiter()

    // Create service
    svc, err := leader.Client.CreateService("nginx", "nginx:alpine", 3, nil)
    assert.NoError(err, "Failed to create service")

    // Wait for replicas to be running
    ctx := context.Background()
    err = waiter.WaitForReplicas(ctx, leader.Client, "nginx", 3)
    assert.NoError(err, "Failed to wait for replicas")

    // Verify service is running
    assert.ServiceRunning("nginx", leader.Client)
    assert.ServiceReplicas("nginx", 3, leader.Client)

    // Delete service
    err = leader.Client.DeleteService(svc.Id)
    assert.NoError(err, "Failed to delete service")

    // Wait for deletion
    err = waiter.WaitForServiceDeleted(ctx, leader.Client, "nginx")
    assert.NoError(err, "Service not deleted")

    t.Log("✓ Service lifecycle test passed")
}
3. Failover Test
func TestLeaderFailover(t *testing.T) {
    cluster, _ := setupTestCluster(t)
    defer cluster.Cleanup()

    assert := framework.NewAssertions(t)
    waiter := framework.DefaultWaiter()

    // Get current leader
    leader, err := cluster.GetLeader()
    assert.NoError(err, "Failed to get leader")

    oldLeaderID := leader.ID
    t.Logf("Current leader: %s", oldLeaderID)

    // Kill the leader
    err = cluster.KillManager(oldLeaderID)
    assert.NoError(err, "Failed to kill leader")

    // Wait for new leader election
    ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
    defer cancel()

    err = waiter.WaitForLeaderElection(ctx, cluster)
    assert.NoError(err, "New leader not elected")

    // Verify new leader
    newLeader, err := cluster.GetLeader()
    assert.NoError(err, "Failed to get new leader")
    assert.NotEqual(oldLeaderID, newLeader.ID, "Leader should have changed")

    t.Logf("✓ Leader failover: %s → %s", oldLeaderID, newLeader.ID)
}

API Reference

Cluster Management
Creating a Cluster
// Default configuration (3 managers, 2 workers, Lima runtime)
config := framework.DefaultClusterConfig()

// Custom configuration
config := &framework.ClusterConfig{
    NumManagers:   3,
    NumWorkers:    2,
    Runtime:       framework.RuntimeLocal,
    DataDir:       "/tmp/warren-test",
    WarrenBinary:  "bin/warren",
    KeepOnFailure: false,  // Keep cluster running if test fails (for debugging)
    LogLevel:      "info",
}

cluster, err := framework.NewCluster(config)
Cluster Operations
// Start entire cluster
err := cluster.Start()

// Stop cluster gracefully
err := cluster.Stop()

// Clean up all resources
err := cluster.Cleanup()

// Get current leader
leader, err := cluster.GetLeader()

// Wait for quorum
err := cluster.WaitForQuorum()

// Wait for workers to connect
err := cluster.WaitForWorkers()

// Kill a manager (simulate crash)
err := cluster.KillManager("manager-1")

// Restart a manager
err := cluster.RestartManager("manager-1")
Process Management
// Create process
process := framework.NewProcess("bin/warren")
process.Args = []string{"cluster", "init", "--node-id=test"}
process.Env = []string{"LOG_LEVEL=debug"}
process.LogFile = "/tmp/warren.log"

// Start process
err := process.Start()

// Stop gracefully (SIGTERM)
err := process.Stop()

// Kill forcefully (SIGKILL)
err := process.Kill()

// Restart
err := process.Restart()

// Check if running
isRunning := process.IsRunning()

// Get logs
logs := process.Logs()

// Wait for specific log line
err := process.WaitForLog("cluster started", 30*time.Second)
Assertions
assert := framework.NewAssertions(t)

// Service assertions
assert.ServiceExists("nginx", client)
assert.ServiceRunning("nginx", client)
assert.ServiceReplicas("nginx", 3, client)
assert.ServiceDeleted("nginx", client)

// Task assertions
assert.TaskRunning("task-123", client)
assert.TaskHealthy("task-123", client)

// Cluster assertions
assert.HasLeader(cluster)
assert.QuorumSize(3, cluster)
assert.NodeCount(5, client)

// Generic assertions
assert.NoError(err, "Operation failed")
assert.Error(err, "Expected error")
assert.Equal(3, count, "Replica count mismatch")
assert.True(condition, "Condition should be true")
assert.Contains(logs, "started", "Logs should contain 'started'")

// Eventually (polling assertion)
assert.Eventually(func() bool {
    svc, _ := client.GetService("nginx")
    return svc.Status == "running"
}, 30*time.Second, 1*time.Second, "service should be running")

// Logging (non-failing)
assert.Step("Creating service")
assert.Info("Starting manager-1")
assert.Success("Cluster started")
assert.Warning("High memory usage detected")
Waiters
waiter := framework.DefaultWaiter()  // 30s timeout, 1s interval

// Custom waiter
waiter := framework.NewWaiter(60*time.Second, 2*time.Second)

// Wait for service
ctx := context.Background()
err := waiter.WaitForServiceRunning(ctx, client, "nginx")
err := waiter.WaitForServiceDeleted(ctx, client, "nginx")

// Wait for replicas
err := waiter.WaitForReplicas(ctx, client, "nginx", 3)

// Wait for task
err := waiter.WaitForTaskRunning(ctx, client, "task-123")
err := waiter.WaitForTaskHealthy(ctx, client, "task-123")

// Wait for cluster state
err := waiter.WaitForLeaderElection(ctx, cluster)
err := waiter.WaitForQuorum(ctx, cluster)
err := waiter.WaitForNodeCount(ctx, client, 5)
err := waiter.WaitForWorkerNodes(ctx, client, 2)
err := waiter.WaitForClusterHealthy(ctx, client)

// Wait for secrets/volumes
err := waiter.WaitForSecret(ctx, client, "my-secret")
err := waiter.WaitForVolume(ctx, client, "my-volume")

// Generic wait with condition
err := waiter.WaitFor(ctx, func() bool {
    // Custom condition
    return someCondition()
}, "custom condition description")

// Retry with exponential backoff
err := framework.Retry(ctx, 5, time.Second, func() error {
    return someOperation()
})

Test Patterns

Table-Driven Tests
func TestServiceCreation(t *testing.T) {
    cluster, _ := setupTestCluster(t)
    defer cluster.Cleanup()

    leader, _ := cluster.GetLeader()

    tests := []struct {
        name     string
        image    string
        replicas int32
        wantErr  bool
    }{
        {"nginx", "nginx:alpine", 1, false},
        {"redis", "redis:alpine", 3, false},
        {"invalid", "nonexistent:image", 1, true},
    }

    for _, tt := range tests {
        t.Run(tt.name, func(t *testing.T) {
            _, err := leader.Client.CreateService(tt.name, tt.image, tt.replicas, nil)
            if (err != nil) != tt.wantErr {
                t.Errorf("CreateService() error = %v, wantErr %v", err, tt.wantErr)
            }
        })
    }
}
Parallel Tests
func TestParallelServices(t *testing.T) {
    cluster, _ := setupTestCluster(t)
    defer cluster.Cleanup()

    services := []string{"nginx", "redis", "postgres"}

    for _, svc := range services {
        svc := svc  // Capture range variable
        t.Run(svc, func(t *testing.T) {
            t.Parallel()  // Run in parallel

            leader, _ := cluster.GetLeader()
            // Create and test service
        })
    }
}
Subtests
func TestServiceLifecycle(t *testing.T) {
    cluster, _ := setupTestCluster(t)
    defer cluster.Cleanup()

    leader, _ := cluster.GetLeader()
    assert := framework.NewAssertions(t)

    t.Run("create", func(t *testing.T) {
        _, err := leader.Client.CreateService("nginx", "nginx:alpine", 3, nil)
        assert.NoError(err, "Failed to create service")
    })

    t.Run("scale", func(t *testing.T) {
        _, err := leader.Client.UpdateService("nginx", 5)
        assert.NoError(err, "Failed to scale service")
    })

    t.Run("delete", func(t *testing.T) {
        err := leader.Client.DeleteService("nginx")
        assert.NoError(err, "Failed to delete service")
    })
}

Configuration

Environment Variables
# Warren binary path
export WARREN_BINARY="bin/warren"

# Test data directory
export WARREN_TEST_DATA_DIR="/tmp/warren-test"

# Runtime type
export WARREN_TEST_RUNTIME="local"  # local, lima, or docker
Test Flags
# Run all tests
go test ./test/e2e/...

# Run specific test
go test -run TestClusterFormation ./test/e2e/...

# Skip long-running tests
go test -short ./test/e2e/...

# Enable verbose output
go test -v ./test/e2e/...

# Run with race detector
go test -race ./test/e2e/...

# Set timeout
go test -timeout 10m ./test/e2e/...

# Run in parallel
go test -parallel 4 ./test/e2e/...

Best Practices

1. Always Use defer for Cleanup
cluster, err := framework.NewCluster(config)
if err != nil {
    t.Fatalf("Failed to create cluster: %v", err)
}
defer cluster.Cleanup()  // Ensure cleanup even if test fails
2. Use Context with Timeout
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()

err := waiter.WaitForServiceRunning(ctx, client, "nginx")
3. Skip Long Tests in Short Mode
if testing.Short() {
    t.Skip("Skipping e2e test in short mode")
}
4. Use t.Helper() in Helper Functions
func setupTestCluster(t *testing.T) *framework.Cluster {
    t.Helper()  // Mark as helper for better error reporting

    cluster, err := framework.NewCluster(framework.DefaultClusterConfig())
    if err != nil {
        t.Fatalf("Failed to create cluster: %v", err)
    }

    return cluster
}
5. Log Test Steps
assert := framework.NewAssertions(t)
assert.Step("Step 1: Creating cluster")
// ...
assert.Step("Step 2: Starting services")
// ...
assert.Success("Test completed successfully")
6. Keep Tests Fast
  • Use t.Parallel() where possible
  • Use smaller clusters for unit-like tests (1 manager, 1 worker)
  • Use timeouts to fail fast
  • Consider using local runtime over Lima/Docker for speed
7. Test in CI
# .github/workflows/e2e.yml
name: E2E Tests

on: [push, pull_request]

jobs:
  e2e:
    runs-on: ubuntu-latest
    steps:
      - uses: actions/checkout@v4
      - uses: actions/setup-go@v5
        with:
          go-version: '1.24'

      - name: Build Warren
        run: make build

      - name: Run E2E Tests
        run: go test -v -timeout 30m ./test/e2e/...

Troubleshooting

Test Hangs
  • Check timeouts in Context and Waiter
  • Use t.Log() to add visibility
  • Run with -v flag for verbose output
  • Check process logs: cluster.Managers[0].Process.Logs()
Port Conflicts
  • Ensure no other Warren instances running
  • Use unique ports for each test
  • Clean up previous test resources
Permission Errors
  • Check file permissions on data directories
  • Ensure Warren binary is executable
  • Run with appropriate privileges if needed
Cleanup Failures
  • Check KeepOnFailure option for debugging
  • Manually clean up: rm -rf /tmp/warren-test*
  • Kill lingering processes: pkill warren

Future Enhancements

  • Lima VM implementation (vm.go, lima_vm.go)
  • Docker container implementation (docker_vm.go)
  • Chaos testing utilities (chaos/)
  • Benchmark helpers (benchmark/)
  • Test fixtures and factories
  • Network utilities (port allocation, TCP checks)
  • File utilities (temp dirs, file ops)
  • Enhanced logging (structured, colored)

Contributing

When adding new framework features:

  1. Add tests for framework code (framework/*_test.go)
  2. Update this README with examples
  3. Follow existing patterns and conventions
  4. Ensure backward compatibility
  5. Document any breaking changes

Resources

  • Main Repo: github.com/cuemby/warren
  • Test Migration Plan: tasks/todo.md (Phase 1 Stabilization)
  • Example Tests: test/e2e/ (coming soon)
  • Integration Tests: test/integration/ (existing Go tests)

Maintained by: Warren Test Infrastructure Team Version: 1.0.0 (Foundation - Week 1 Complete) Last Updated: 2025-10-12

Documentation

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

func PollUntil

func PollUntil(ctx context.Context, interval time.Duration, condition func() bool) error

PollUntil polls a condition until it returns true or context is cancelled

func PollUntilWithError

func PollUntilWithError(ctx context.Context, interval time.Duration, condition func() (bool, error)) error

PollUntilWithError polls a condition that can return an error

func Retry

func Retry(ctx context.Context, attempts int, initialDelay time.Duration, operation func() error) error

Retry retries an operation with exponential backoff

Types

type Assertions

type Assertions struct {
	// contains filtered or unexported fields
}

Assertions provides test assertion helpers

func NewAssertions

func NewAssertions(t TestingT) *Assertions

NewAssertions creates a new Assertions instance

func (*Assertions) ContainerHealthy

func (a *Assertions) ContainerHealthy(containerID string, client *Client)

ContainerHealthy asserts that a container is healthy TODO: Container proto doesn't have health_status field yet - using actual_state as proxy

func (*Assertions) ContainerRunning

func (a *Assertions) ContainerRunning(containerID string, client *Client)

ContainerRunning asserts that a container is running

func (*Assertions) Contains

func (a *Assertions) Contains(haystack, needle, msg string)

Contains asserts that a string contains a substring

func (*Assertions) Equal

func (a *Assertions) Equal(expected, actual interface{}, msg string)

Equal asserts that two values are equal

func (*Assertions) Error

func (a *Assertions) Error(err error, msg string)

Error asserts that the error is not nil

func (*Assertions) Errorf

func (a *Assertions) Errorf(format string, args ...interface{})

Errorf logs an error and fails the test

func (*Assertions) Eventually

func (a *Assertions) Eventually(condition func() bool, timeout, interval time.Duration, msg string)

Eventually repeatedly runs a condition until it returns true or timeout occurs

func (*Assertions) EventuallyWithContext

func (a *Assertions) EventuallyWithContext(ctx context.Context, condition func() bool, interval time.Duration, msg string)

EventuallyWithContext is like Eventually but uses a provided context

func (*Assertions) Fail

func (a *Assertions) Fail(msg string)

Fail marks the test as failed but continues execution

func (*Assertions) FailNow

func (a *Assertions) FailNow()

FailNow fails the test immediately without logging

func (*Assertions) False

func (a *Assertions) False(condition bool, msg string)

False asserts that a condition is false

func (*Assertions) Fatalf

func (a *Assertions) Fatalf(format string, args ...interface{})

Fatalf logs a fatal error and stops the test immediately

func (*Assertions) HasLeader

func (a *Assertions) HasLeader(cluster *Cluster)

HasLeader asserts that the cluster has a leader

func (*Assertions) Info

func (a *Assertions) Info(msg string)

Info logs an informational message

func (*Assertions) Len

func (a *Assertions) Len(obj interface{}, expected int, msg string)

Len asserts that a slice or map has a specific length

func (*Assertions) Log

func (a *Assertions) Log(msg string)

Log logs a message (non-failing)

func (*Assertions) Logf

func (a *Assertions) Logf(format string, args ...interface{})

Logf logs a formatted message (non-failing)

func (*Assertions) Nil

func (a *Assertions) Nil(obj interface{}, msg string)

Nil asserts that a value is nil

func (*Assertions) NoError

func (a *Assertions) NoError(err error, msg string)

NoError asserts that the error is nil

func (*Assertions) NodeCount

func (a *Assertions) NodeCount(expected int, client *Client)

NodeCount asserts that the cluster has the expected number of nodes

func (*Assertions) NodeRole

func (a *Assertions) NodeRole(nodeID, expectedRole string, client *Client)

NodeRole asserts that a node has a specific role

func (*Assertions) NotContains

func (a *Assertions) NotContains(haystack, needle, msg string)

NotContains asserts that a string does not contain a substring

func (*Assertions) NotEqual

func (a *Assertions) NotEqual(expected, actual interface{}, msg string)

NotEqual asserts that two values are not equal

func (*Assertions) NotNil

func (a *Assertions) NotNil(obj interface{}, msg string)

NotNil asserts that a value is not nil

func (*Assertions) QuorumSize

func (a *Assertions) QuorumSize(expected int, cluster *Cluster)

QuorumSize asserts that the cluster has the expected quorum size

func (*Assertions) ServiceDeleted

func (a *Assertions) ServiceDeleted(name string, client *Client)

ServiceDeleted asserts that a service no longer exists

func (*Assertions) ServiceExists

func (a *Assertions) ServiceExists(name string, client *Client)

ServiceExists asserts that a service exists

func (*Assertions) ServiceReplicas

func (a *Assertions) ServiceReplicas(name string, expected int, client *Client)

ServiceReplicas asserts that a service has the expected number of running replicas

func (*Assertions) ServiceRunning

func (a *Assertions) ServiceRunning(name string, client *Client)

ServiceRunning asserts that a service has at least one running task

func (*Assertions) Step

func (a *Assertions) Step(step string)

Step logs a test step (for visibility in test output)

func (*Assertions) Success

func (a *Assertions) Success(msg string)

Success logs a success message

func (*Assertions) True

func (a *Assertions) True(condition bool, msg string)

True asserts that a condition is true

func (*Assertions) Warning

func (a *Assertions) Warning(msg string)

Warning logs a warning message

type Client

type Client struct {
	*client.Client
}

Client wraps the Warren client with test-friendly methods

func NewClient

func NewClient(c *client.Client) *Client

NewClient creates a new test client wrapper

func (*Client) CreateIngress

func (c *Client) CreateIngress(name string, spec *IngressSpec) error

CreateIngress creates an ingress rule

func (*Client) CreateService

func (c *Client) CreateService(name, image string, replicas int) error

CreateService creates a service with default environment

func (*Client) CreateServiceWithEnv

func (c *Client) CreateServiceWithEnv(name, image string, replicas int, env map[string]string) error

CreateServiceWithEnv creates a service with custom environment variables

func (*Client) DeleteIngress

func (c *Client) DeleteIngress(name string) error

DeleteIngress deletes an ingress rule

func (*Client) ListIngresses

func (c *Client) ListIngresses() ([]*proto.Ingress, error)

ListIngresses lists all ingress rules

type Cluster

type Cluster struct {
	// Config is the cluster configuration
	Config *ClusterConfig
	// Managers contains all manager nodes in the cluster
	Managers []*Manager
	// Workers contains all worker nodes in the cluster
	Workers []*Worker
	// contains filtered or unexported fields
}

Cluster represents a test Warren cluster

func NewCluster

func NewCluster(config *ClusterConfig) (*Cluster, error)

NewCluster creates a new test cluster with the given configuration

func (*Cluster) Cleanup

func (c *Cluster) Cleanup() error

Cleanup cleans up all cluster resources

func (*Cluster) GetLeader

func (c *Cluster) GetLeader() (*Manager, error)

GetLeader returns the current Raft leader manager

func (*Cluster) KillManager

func (c *Cluster) KillManager(id string) error

KillManager kills a specific manager (simulates crash)

func (*Cluster) RestartManager

func (c *Cluster) RestartManager(id string) error

RestartManager restarts a specific manager

func (*Cluster) Start

func (c *Cluster) Start() error

Start starts the entire cluster (managers and workers)

func (*Cluster) Stop

func (c *Cluster) Stop() error

Stop stops the entire cluster gracefully

func (*Cluster) WaitForQuorum

func (c *Cluster) WaitForQuorum() error

WaitForQuorum waits for Raft quorum to be established

func (*Cluster) WaitForWorkers

func (c *Cluster) WaitForWorkers() error

WaitForWorkers waits for all workers to connect to the cluster

type ClusterConfig

type ClusterConfig struct {
	// NumManagers is the number of manager nodes to create
	NumManagers int
	// NumWorkers is the number of worker nodes to create
	NumWorkers int
	// UseLima indicates whether to use Lima VMs (default: true)
	UseLima bool
	// Runtime specifies which runtime to use (Lima, Docker, Local)
	Runtime RuntimeType
	// ManagerVMConfig is the VM configuration for manager nodes
	ManagerVMConfig *VMConfig
	// WorkerVMConfig is the VM configuration for worker nodes
	WorkerVMConfig *VMConfig
	// DataDir is the base directory for cluster data
	DataDir string
	// WarrenBinary is the path to the Warren binary
	WarrenBinary string
	// KeepOnFailure keeps the cluster running if tests fail (for debugging)
	KeepOnFailure bool
	// LogLevel sets the logging level for Warren processes
	LogLevel string
}

ClusterConfig defines the configuration for a test cluster

func DefaultClusterConfig

func DefaultClusterConfig() *ClusterConfig

DefaultClusterConfig returns a default cluster configuration

type IngressBackend

type IngressBackend struct {
	Service string
	Port    int
}

IngressBackend defines the backend service for an ingress rule

type IngressSpec

type IngressSpec struct {
	Host     string
	Path     string
	PathType string // "Exact" or "Prefix"
	Backend  IngressBackend
	TLS      *IngressTLS
}

IngressSpec defines an ingress rule for testing

type IngressTLS

type IngressTLS struct {
	Enabled    bool
	SecretName string
}

IngressTLS defines TLS configuration for an ingress

type LogBuffer

type LogBuffer struct {
	// contains filtered or unexported fields
}

LogBuffer provides thread-safe log buffering with timestamps

func (*LogBuffer) Append

func (lb *LogBuffer) Append(line string)

Append adds a log line to the buffer

func (*LogBuffer) Clear

func (lb *LogBuffer) Clear()

Clear clears all logs

func (*LogBuffer) Contains

func (lb *LogBuffer) Contains(pattern string) bool

Contains checks if the logs contain a specific pattern

func (*LogBuffer) Lines

func (lb *LogBuffer) Lines() int

Lines returns the number of log lines

func (*LogBuffer) Since

func (lb *LogBuffer) Since(since time.Time) string

Since returns logs since the given timestamp

func (*LogBuffer) String

func (lb *LogBuffer) String() string

String returns all logs as a single string

type Manager

type Manager struct {
	// ID is the unique identifier for this manager
	ID string
	// VM is the virtual machine or container running this manager
	VM VM
	// APIAddr is the gRPC API address (host:port)
	APIAddr string
	// RaftAddr is the Raft consensus address (host:port)
	RaftAddr string
	// Client is the Warren client connected to this manager
	Client *Client
	// RawClient is the underlying Warren client (for advanced use)
	RawClient *client.Client
	// Process is the Warren process (if running locally)
	Process *Process
	// DataDir is the data directory for this manager
	DataDir string
	// IsLeader indicates if this manager is the Raft leader
	IsLeader bool
}

Manager represents a manager node in the test cluster

type Process

type Process struct {
	Binary  string
	Args    []string
	Env     []string
	Ctx     context.Context
	Cancel  context.CancelFunc
	LogFile string
	PID     int
	// contains filtered or unexported fields
}

Process manages a Warren process with logging and lifecycle control

func NewProcess

func NewProcess(binary string) *Process

NewProcess creates a new Process instance

func (*Process) IsRunning

func (p *Process) IsRunning() bool

IsRunning returns true if the process is currently running

func (*Process) Kill

func (p *Process) Kill() error

Kill forcefully kills the process with SIGKILL

func (*Process) Logs

func (p *Process) Logs() string

Logs returns all captured logs as a string

func (*Process) LogsSince

func (p *Process) LogsSince(since time.Time) string

LogsSince returns logs since the given timestamp

func (*Process) Restart

func (p *Process) Restart() error

Restart restarts the process

func (*Process) Start

func (p *Process) Start() error

Start starts the process

func (*Process) Stop

func (p *Process) Stop() error

Stop stops the process gracefully with SIGTERM

func (*Process) Wait

func (p *Process) Wait() error

Wait waits for the process to exit

func (*Process) WaitForLog

func (p *Process) WaitForLog(pattern string, timeout time.Duration) error

WaitForLog waits for a specific log line to appear

type RuntimeType

type RuntimeType string

RuntimeType defines the type of runtime for test clusters

const (
	// RuntimeLima uses Lima VMs for testing
	RuntimeLima RuntimeType = "lima"
	// RuntimeDocker uses Docker containers for testing
	RuntimeDocker RuntimeType = "docker"
	// RuntimeLocal uses local processes for testing
	RuntimeLocal RuntimeType = "local"
)

type ServicePort

type ServicePort struct {
	ContainerPort int
	Protocol      string
}

ServicePort defines a port mapping

type ServiceSpec

type ServiceSpec struct {
	Name     string
	Image    string
	Replicas int
	Env      map[string]string
	Ports    []ServicePort
}

ServiceSpec defines a service for testing

type TestContext

type TestContext struct {
	// T is the testing.T instance
	T TestingT
	// Ctx is the context for test operations
	Ctx context.Context
	// Cancel cancels the test context
	Cancel context.CancelFunc
	// Timeout is the default timeout for operations
	Timeout time.Duration
	// contains filtered or unexported fields
}

TestContext provides utilities for test execution

type TestingT

type TestingT interface {
	Logf(format string, args ...interface{})
	Errorf(format string, args ...interface{})
	Fatalf(format string, args ...interface{})
	FailNow()
	Failed() bool
	Name() string
	Helper()
}

TestingT is an interface matching testing.T

type VM

type VM interface {
	// ID returns the unique identifier for this VM
	ID() string
	// Start starts the VM
	Start(ctx context.Context) error
	// Stop stops the VM gracefully
	Stop(ctx context.Context) error
	// Kill forcefully terminates the VM
	Kill(ctx context.Context) error
	// IsRunning returns true if the VM is currently running
	IsRunning() bool
	// Exec executes a command in the VM and returns the output
	Exec(ctx context.Context, command string, args ...string) (string, error)
	// CopyFile copies a file from the host to the VM
	CopyFile(ctx context.Context, src, dst string) error
	// GetIP returns the IP address of the VM
	GetIP() (string, error)
	// WaitForBoot waits for the VM to finish booting
	WaitForBoot(ctx context.Context) error
}

VM represents a virtual machine or container for testing

type VMConfig

type VMConfig struct {
	// CPUs is the number of CPUs to allocate
	CPUs int
	// Memory is the amount of memory to allocate (e.g., "2GiB")
	Memory string
	// Disk is the disk size (e.g., "10GiB")
	Disk string
}

VMConfig defines the configuration for a VM

type Waiter

type Waiter struct {
	// contains filtered or unexported fields
}

Waiter provides utilities for waiting on conditions with timeouts

func DefaultWaiter

func DefaultWaiter() *Waiter

DefaultWaiter returns a waiter with sensible defaults (30s timeout, 1s interval)

func NewWaiter

func NewWaiter(timeout, interval time.Duration) *Waiter

NewWaiter creates a new Waiter with the given timeout and polling interval

func (*Waiter) WaitFor

func (w *Waiter) WaitFor(ctx context.Context, condition func() bool, description string) error

WaitFor waits for a condition to become true

func (*Waiter) WaitForClusterHealthy

func (w *Waiter) WaitForClusterHealthy(ctx context.Context, client *Client) error

WaitForClusterHealthy waits for all nodes in the cluster to be healthy

func (*Waiter) WaitForConditionWithRetry

func (w *Waiter) WaitForConditionWithRetry(ctx context.Context, condition func() (bool, error), description string) error

WaitForConditionWithRetry waits for a condition with exponential backoff retry

func (*Waiter) WaitForContainer

func (w *Waiter) WaitForContainer(ctx context.Context, client *Client, containerID string, status string) error

WaitForContainer waits for a specific container to reach a status

func (*Waiter) WaitForContainerHealthy

func (w *Waiter) WaitForContainerHealthy(ctx context.Context, client *Client, containerID string) error

WaitForContainerHealthy waits for a container to become healthy TODO: Use actual health_status field when added to proto

func (*Waiter) WaitForContainerRunning

func (w *Waiter) WaitForContainerRunning(ctx context.Context, client *Client, containerID string) error

WaitForContainerRunning waits for a container to be running

func (*Waiter) WaitForLeaderElection

func (w *Waiter) WaitForLeaderElection(ctx context.Context, cluster *Cluster) error

WaitForLeaderElection waits for a leader to be elected in the cluster

func (*Waiter) WaitForManagerNodes

func (w *Waiter) WaitForManagerNodes(ctx context.Context, client *Client, count int) error

WaitForManagerNodes waits for a specific number of manager nodes

func (*Waiter) WaitForNodeCount

func (w *Waiter) WaitForNodeCount(ctx context.Context, client *Client, count int) error

WaitForNodeCount waits for a specific number of nodes to join the cluster

func (*Waiter) WaitForQuorum

func (w *Waiter) WaitForQuorum(ctx context.Context, cluster *Cluster) error

WaitForQuorum waits for Raft quorum to be established

func (*Waiter) WaitForReplicas

func (w *Waiter) WaitForReplicas(ctx context.Context, client *Client, serviceName string, count int) error

WaitForReplicas waits for a service to have a specific number of running replicas

func (*Waiter) WaitForSecret

func (w *Waiter) WaitForSecret(ctx context.Context, client *Client, name string) error

WaitForSecret waits for a secret to exist

func (*Waiter) WaitForSecretDeleted

func (w *Waiter) WaitForSecretDeleted(ctx context.Context, client *Client, name string) error

WaitForSecretDeleted waits for a secret to be deleted

func (*Waiter) WaitForServiceDeleted

func (w *Waiter) WaitForServiceDeleted(ctx context.Context, client *Client, name string) error

WaitForServiceDeleted waits for a service to be deleted

func (*Waiter) WaitForServiceRunning

func (w *Waiter) WaitForServiceRunning(ctx context.Context, client *Client, name string) error

WaitForServiceRunning waits for a service to have at least one running task

func (*Waiter) WaitForVolume

func (w *Waiter) WaitForVolume(ctx context.Context, client *Client, name string) error

WaitForVolume waits for a volume to exist

func (*Waiter) WaitForVolumeDeleted

func (w *Waiter) WaitForVolumeDeleted(ctx context.Context, client *Client, name string) error

WaitForVolumeDeleted waits for a volume to be deleted

func (*Waiter) WaitForWorkerNodes

func (w *Waiter) WaitForWorkerNodes(ctx context.Context, client *Client, count int) error

WaitForWorkerNodes waits for a specific number of worker nodes

type Worker

type Worker struct {
	// ID is the unique identifier for this worker
	ID string
	// VM is the virtual machine or container running this worker
	VM VM
	// ManagerAddr is the address of the manager this worker connects to
	ManagerAddr string
	// Process is the Warren process (if running locally)
	Process *Process
	// DataDir is the data directory for this worker
	DataDir string
}

Worker represents a worker node in the test cluster

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL