The Sleeping Barber Problem

The Sleeping Barber is a classic synchronization problem proposed by Edsger Dijkstra in 1965. It elegantly demonstrates resource management, waiting, and signaling in concurrent systems.

The Scenario

A barbershop has:

  • 1 barber
  • 1 barber chair
  • N waiting room chairs

The rules:

  1. If no customers, the barber sleeps
  2. When a customer arrives:
    • If barber is sleeping → wake him up
    • If barber is busy → sit in waiting room (if space available)
    • If waiting room is full → leave
  3. When barber finishes a customer → check waiting room

Real-World Applications

This pattern appears in many systems:

  • Thread pools: Workers sleep when no jobs, wake on new jobs
  • Connection pools: Idle connections vs waiting requests
  • Server request handling: Worker threads and request queues
  • Event loops: Sleep when idle, wake on events
  • Resource pools: Limited resources with waiting clients

Implementation in Go

package main

import (
	"fmt"
	"math/rand"
	"sync"
	"time"
)

type BarberShop struct {
	waitingRoom    chan int       // Buffered channel = waiting chairs
	barberChair    chan int       // Unbuffered = single chair
	barberSleeping bool
	mu             sync.Mutex
	customersServed int
	customersLeft  int
}

func NewBarberShop(waitingChairs int) *BarberShop {
	return &BarberShop{
		waitingRoom:    make(chan int, waitingChairs),
		barberChair:    make(chan int),
		barberSleeping: true,
	}
}

// Barber runs continuously, serving customers
func (bs *BarberShop) Barber() {
	for {
		fmt.Println("[Barber] 😴 Sleeping... (no customers)")

		// Wait for customer (blocking)
		customerID, ok := <-bs.waitingRoom
		if !ok {
			fmt.Println("[Barber] 🚪 Shop closing, going home")
			return
		}

		bs.mu.Lock()
		bs.barberSleeping = false
		bs.mu.Unlock()

		fmt.Printf("[Barber] 👋 Woke up! Customer %d in chair\n", customerID)

		// Put customer in barber chair
		bs.barberChair <- customerID

		// Cut hair
		fmt.Printf("[Barber] ✂️  Cutting hair for customer %d\n", customerID)
		time.Sleep(time.Duration(200+rand.Intn(300)) * time.Millisecond)

		// Finish
		fmt.Printf("[Barber] ✓ Done with customer %d\n", customerID)
		<-bs.barberChair // Release chair

		bs.mu.Lock()
		bs.customersServed++
		bs.barberSleeping = len(bs.waitingRoom) == 0
		bs.mu.Unlock()
	}
}

// Customer tries to get a haircut
func (bs *BarberShop) Customer(id int) {
	fmt.Printf("[Customer %d] 🚶 Entering shop...\n", id)

	select {
	case bs.waitingRoom <- id:
		// Got a seat in waiting room
		fmt.Printf("[Customer %d] 💺 Waiting (waiting room: %d/%d)\n",
			id, len(bs.waitingRoom), cap(bs.waitingRoom))

		bs.mu.Lock()
		if bs.barberSleeping {
			fmt.Printf("[Customer %d] 🔔 Waking up barber!\n", id)
		}
		bs.mu.Unlock()

		// Wait for barber chair
		<-bs.barberChair

		// Getting haircut
		fmt.Printf("[Customer %d] 💇 Getting haircut...\n", id)

	default:
		// Waiting room full
		fmt.Printf("[Customer %d] 😞 Waiting room full, leaving!\n", id)
		bs.mu.Lock()
		bs.customersLeft++
		bs.mu.Unlock()
	}
}

// Stats returns shop statistics
func (bs *BarberShop) Stats() (served, left int) {
	bs.mu.Lock()
	defer bs.mu.Unlock()
	return bs.customersServed, bs.customersLeft
}

func main() {
	fmt.Println("=== Sleeping Barber Problem ===")
	fmt.Println("Barbershop simulation\n")

	const (
		waitingChairs  = 3
		numCustomers   = 10
		arrivalDelay   = 200 // ms between customer arrivals
	)

	shop := NewBarberShop(waitingChairs)

	// Start barber
	go shop.Barber()

	// Customers arrive over time
	var wg sync.WaitGroup
	for i := 0; i < numCustomers; i++ {
		wg.Add(1)
		go func(id int) {
			defer wg.Done()
			time.Sleep(time.Duration(id*arrivalDelay) * time.Millisecond)
			shop.Customer(id)
		}(i)
	}

	// Wait for all customers to finish or leave
	wg.Wait()

	// Give barber time to finish
	time.Sleep(1 * time.Second)

	// Close shop
	close(shop.waitingRoom)
	time.Sleep(500 * time.Millisecond)

	served, left := shop.Stats()
	fmt.Printf("\n📊 Statistics:\n")
	fmt.Printf("   Customers served: %d\n", served)
	fmt.Printf("   Customers left:   %d\n", left)
	fmt.Printf("   Total:           %d\n", served+left)
	fmt.Println("\n✓ Simulation complete!")
}

How It Works

1. Waiting Room as Buffered Channel

waitingRoom: make(chan int, waitingChairs)
  • Capacity = number of waiting chairs
  • Full channel = full waiting room
  • Customers leave if can’t send to channel

2. Barber Chair as Unbuffered Channel

barberChair: make(chan int)
  • Only one customer at a time
  • Synchronizes barber and customer

3. Customer Arrival

select {
case bs.waitingRoom <- id:
    // Got a seat
default:
    // Full, leave
}

Non-blocking send - customer leaves if no space!

4. Barber Waiting

customerID := <-bs.waitingRoom  // Blocks until customer arrives

Blocking receive - barber sleeps until woken!

Advanced: Multiple Barbers

Let’s extend to multiple barbers:

package main

import (
	"fmt"
	"sync"
	"sync/atomic"
	"time"
)

type MultiBarberShop struct {
	waitingRoom     chan int
	barberAvailable chan struct{}
	customersServed atomic.Int64
	customersLeft   atomic.Int64
	numBarbers      int
}

func NewMultiBarberShop(numBarbers, waitingChairs int) *MultiBarberShop {
	return &MultiBarberShop{
		waitingRoom:     make(chan int, waitingChairs),
		barberAvailable: make(chan struct{}, numBarbers),
		numBarbers:      numBarbers,
	}
}

// Barber goroutine
func (mbs *MultiBarberShop) Barber(id int) {
	for {
		fmt.Printf("[Barber %d] 😴 Sleeping...\n", id)

		// Signal availability
		mbs.barberAvailable <- struct{}{}

		// Wait for customer
		customerID, ok := <-mbs.waitingRoom
		if !ok {
			fmt.Printf("[Barber %d] 🚪 Going home\n", id)
			return
		}

		// Take barber off available list
		<-mbs.barberAvailable

		fmt.Printf("[Barber %d] ✂️  Cutting hair for customer %d\n", id, customerID)
		time.Sleep(time.Duration(150+id*50) * time.Millisecond)

		fmt.Printf("[Barber %d] ✓ Done with customer %d\n", id, customerID)
		mbs.customersServed.Add(1)
	}
}

// Customer arrival
func (mbs *MultiBarberShop) Customer(id int) {
	select {
	case mbs.waitingRoom <- id:
		available := len(mbs.barberAvailable)
		fmt.Printf("[Customer %d] 💺 Waiting (barbers available: %d)\n", id, available)

	default:
		fmt.Printf("[Customer %d] 😞 Full, leaving\n", id)
		mbs.customersLeft.Add(1)
	}
}

func (mbs *MultiBarberShop) Stats() (served, left int64) {
	return mbs.customersServed.Load(), mbs.customersLeft.Load()
}

func RunMultiBarberShop() {
	fmt.Println("\n=== Multiple Barbers Variant ===\n")

	const (
		numBarbers    = 3
		waitingChairs = 5
		numCustomers  = 15
	)

	shop := NewMultiBarberShop(numBarbers, waitingChairs)

	// Start barbers
	for i := 0; i < numBarbers; i++ {
		go shop.Barber(i)
	}

	// Customers arrive
	var wg sync.WaitGroup
	for i := 0; i < numCustomers; i++ {
		wg.Add(1)
		go func(id int) {
			defer wg.Done()
			time.Sleep(time.Duration(id*100) * time.Millisecond)
			shop.Customer(id)
		}(i)
	}

	wg.Wait()
	time.Sleep(1 * time.Second)

	close(shop.waitingRoom)
	time.Sleep(500 * time.Millisecond)

	served, left := shop.Stats()
	fmt.Printf("\n📊 Multi-Barber Stats:\n")
	fmt.Printf("   Served: %d, Left: %d, Total: %d\n", served, left, served+left)
}

func main() {
	RunMultiBarberShop()
}

Real-World Example: HTTP Server Request Handler

package main

import (
	"fmt"
	"net/http"
	"sync"
	"sync/atomic"
	"time"
)

type RequestHandler struct {
	workers     int
	queue       chan *http.Request
	processed   atomic.Int64
	rejected    atomic.Int64
	workerPool  sync.WaitGroup
}

func NewRequestHandler(workers, queueSize int) *RequestHandler {
	rh := &RequestHandler{
		workers: workers,
		queue:   make(chan *http.Request, queueSize),
	}

	// Start worker pool (barbers)
	for i := 0; i < workers; i++ {
		rh.workerPool.Add(1)
		go rh.worker(i)
	}

	return rh
}

// Worker processes requests (like a barber)
func (rh *RequestHandler) worker(id int) {
	defer rh.workerPool.Done()

	for req := range rh.queue {
		fmt.Printf("[Worker %d] Processing request: %s\n", id, req.URL.Path)

		// Simulate processing
		time.Sleep(100 * time.Millisecond)

		rh.processed.Add(1)
	}
}

// ServeHTTP tries to enqueue request (like a customer arriving)
func (rh *RequestHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
	select {
	case rh.queue <- r:
		w.WriteHeader(http.StatusAccepted)
		fmt.Fprintf(w, "Request queued (queue: %d/%d)\n",
			len(rh.queue), cap(rh.queue))

	default:
		// Queue full (waiting room full)
		rh.rejected.Add(1)
		w.WriteHeader(http.StatusServiceUnavailable)
		fmt.Fprintf(w, "Server busy, try again later\n")
	}
}

// Shutdown gracefully stops the handler
func (rh *RequestHandler) Shutdown() {
	close(rh.queue)
	rh.workerPool.Wait()

	fmt.Printf("\n📊 Handler Stats:\n")
	fmt.Printf("   Processed: %d\n", rh.processed.Load())
	fmt.Printf("   Rejected:  %d\n", rh.rejected.Load())
}

Key Go Patterns

1. Buffered Channel = Waiting Room

waitingRoom := make(chan Customer, capacity)

Perfect metaphor for bounded queues!

2. Non-Blocking Send with Select

select {
case queue <- customer:
    // Got in
default:
    // Full, reject
}

3. Blocking Receive = Sleep/Wake

customer := <-queue  // Sleep until customer arrives

Go’s channels naturally model this!

4. Channel Close = Shutdown

close(waitingRoom)  // Signal workers to finish

Performance Considerations

Optimal Parameters

  • Waiting room size: Based on arrival rate vs service rate
  • Number of barbers: Match to CPU cores for CPU-bound work
  • Service time: Should be » arrival time to prevent filling

Monitoring

Track these metrics:

  • Queue depth over time
  • Worker utilization
  • Rejection rate
  • Average wait time

Variations

1. Priority Customers (VIP Queue)

type PriorityBarberShop struct {
	vipQueue    chan int
	normalQueue chan int
}

func (pbs *PriorityBarberShop) Barber() {
	for {
		select {
		case customer := <-pbs.vipQueue:
			// Serve VIP first
		case customer := <-pbs.normalQueue:
			// Serve normal customer
		}
	}
}

2. Timeout-Based Waiting

func (bs *BarberShop) CustomerWithTimeout(id int, timeout time.Duration) {
	select {
	case bs.waitingRoom <- id:
		// Got seat
	case <-time.After(timeout):
		// Waited too long, leave
	}
}

Advantages of This Pattern

Resource bounded - Can’t overflow ✓ Self-regulating - Natural backpressure ✓ Clear semantics - Easy to understand ✓ Graceful degradation - Rejects cleanly when full

When to Use

Use when:

  • You have limited workers/resources
  • Work items can be queued
  • You can reject when overloaded
  • Workers can sleep when idle

Avoid when:

  • All requests must be processed
  • Workers should always be busy
  • Unbounded queue is acceptable
  • Rejection is not an option

Try It Yourself

  1. Tune parameters - Find optimal waiting room size
  2. Add priorities - VIP vs normal customers
  3. Monitor metrics - Track utilization
  4. Stress test - High arrival rate
  5. Add timeouts - Customers leave if wait too long

This is part 10 of “Golang Experiments: Classic Concurrency Problems”