The Fairness Problem
We’ve seen two extremes:
- Readers preference: Writers can starve
- Writers preference: Readers can starve
The fair solution ensures no starvation - everyone gets served in the order they arrive.
The Solution: FIFO Ordering
Key idea: Use a queue to serve requests in arrival order. This prevents both reader and writer starvation.
Arrival Order: R1, R2, W1, R3, R4, W2
Execution: R1+R2 → W1 → R3+R4 → W2
(batch) (excl) (batch) (excl)
Consecutive readers can still batch together, but writers don’t get skipped!
Implementation
package main
import (
"fmt"
"sync"
"sync/atomic"
"time"
)
// FairRWLock implements FIFO readers-writers
type FairRWLock struct {
queueMu sync.Mutex // Protects the queue
dataLock sync.RWMutex // Actual data protection
queue []chan struct{} // Queue of waiting goroutines
activeReaders int
requestID atomic.Int64 // For tracking order
}
func NewFairRWLock() *FairRWLock {
return &FairRWLock{
queue: make([]chan struct{}, 0),
}
}
// RLock with fair ordering
func (f *FairRWLock) RLock() int64 {
requestID := f.requestID.Add(1)
f.queueMu.Lock()
// Check if we can proceed immediately
if len(f.queue) == 0 {
f.activeReaders++
f.queueMu.Unlock()
f.dataLock.RLock()
return requestID
}
// Need to wait our turn
wait := make(chan struct{})
f.queue = append(f.queue, wait)
f.queueMu.Unlock()
// Wait for our turn
<-wait
f.dataLock.RLock()
return requestID
}
func (f *FairRWLock) RUnlock() {
f.dataLock.RUnlock()
f.queueMu.Lock()
defer f.queueMu.Unlock()
f.activeReaders--
if f.activeReaders == 0 && len(f.queue) > 0 {
// Wake next in queue
close(f.queue[0])
f.queue = f.queue[1:]
}
}
func (f *FairRWLock) Lock() int64 {
requestID := f.requestID.Add(1)
f.queueMu.Lock()
// Check if we can proceed
if len(f.queue) == 0 && f.activeReaders == 0 {
f.queueMu.Unlock()
f.dataLock.Lock()
return requestID
}
// Wait in queue
wait := make(chan struct{})
f.queue = append(f.queue, wait)
f.queueMu.Unlock()
<-wait
f.dataLock.Lock()
return requestID
}
func (f *FairRWLock) Unlock() {
f.dataLock.Unlock()
f.queueMu.Lock()
defer f.queueMu.Unlock()
if len(f.queue) > 0 {
close(f.queue[0])
f.queue = f.queue[1:]
}
}
// SharedData with fair locking
type SharedData struct {
value int
lock *FairRWLock
readOps atomic.Int64
writeOps atomic.Int64
}
func NewSharedData() *SharedData {
return &SharedData{
lock: NewFairRWLock(),
}
}
func (d *SharedData) Read(readerID int) int {
requestID := d.lock.RLock()
defer d.lock.RUnlock()
ops := d.readOps.Add(1)
fmt.Printf("[Reader %d] 🔍 Reading value=%d (request #%d, read ops: %d)\n",
readerID, d.value, requestID, ops)
time.Sleep(50 * time.Millisecond)
return d.value
}
func (d *SharedData) Write(writerID int, newValue int) {
fmt.Printf("[Writer %d] ⏳ Requesting write lock...\n", writerID)
start := time.Now()
requestID := d.lock.Lock()
waitTime := time.Since(start)
defer d.lock.Unlock()
ops := d.writeOps.Add(1)
oldValue := d.value
d.value = newValue
fmt.Printf("[Writer %d] ✏️ Got lock after %v, writing %d→%d (request #%d, write ops: %d)\n",
writerID, waitTime, oldValue, newValue, requestID, ops)
time.Sleep(100 * time.Millisecond)
}
func main() {
fmt.Println("=== Readers-Writers: Fair Solution ===")
fmt.Println("FIFO ordering prevents starvation\n")
data := NewSharedData()
numReaders := 4
numWriters := 3
var wg sync.WaitGroup
// Start readers and writers interleaved
for i := 0; i < 5; i++ {
// Reader
if i < numReaders {
wg.Add(1)
go func(id int) {
defer wg.Done()
for j := 0; j < 3; j++ {
time.Sleep(time.Duration(50*id) * time.Millisecond)
data.Read(id)
}
fmt.Printf("[Reader %d] Finished\n", id)
}(i)
}
// Writer
if i < numWriters {
wg.Add(1)
go func(id int) {
defer wg.Done()
for j := 0; j < 2; j++ {
time.Sleep(time.Duration(70*id) * time.Millisecond)
data.Write(id, id*100+j)
}
fmt.Printf("[Writer %d] Finished\n", id)
}(i)
}
}
wg.Wait()
fmt.Printf("\n✓ Complete! Final value: %d\n", data.value)
fmt.Printf("Read ops: %d, Write ops: %d\n",
data.readOps.Load(), data.writeOps.Load())
fmt.Println("\n✓ Notice: Operations completed in fair FIFO order!")
}
Simplified Fair Lock with Ticket System
Here’s a simpler implementation using a ticket counter:
package main
import (
"sync"
"sync/atomic"
)
// TicketRWLock uses a ticket system for fairness
type TicketRWLock struct {
nextTicket atomic.Int64
nowServing atomic.Int64
readersActive atomic.Int32
writerActive atomic.Bool
mu sync.Mutex
cond *sync.Cond
}
func NewTicketRWLock() *TicketRWLock {
lock := &TicketRWLock{}
lock.cond = sync.NewCond(&lock.mu)
return lock
}
func (t *TicketRWLock) RLock() {
myTicket := t.nextTicket.Add(1) - 1
t.mu.Lock()
defer t.mu.Unlock()
// Wait for our ticket number
for t.nowServing.Load() != myTicket || t.writerActive.Load() {
t.cond.Wait()
}
t.readersActive.Add(1)
t.nowServing.Add(1)
t.cond.Broadcast() // Allow other readers with next ticket
}
func (t *TicketRWLock) RUnlock() {
if t.readersActive.Add(-1) == 0 {
t.cond.Broadcast()
}
}
func (t *TicketRWLock) Lock() {
myTicket := t.nextTicket.Add(1) - 1
t.mu.Lock()
defer t.mu.Unlock()
// Wait for ticket and no active readers/writers
for t.nowServing.Load() != myTicket ||
t.readersActive.Load() > 0 ||
t.writerActive.Load() {
t.cond.Wait()
}
t.writerActive.Store(true)
t.nowServing.Add(1)
}
func (t *TicketRWLock) Unlock() {
t.writerActive.Store(false)
t.cond.Broadcast()
}
Demonstrating Fairness
package main
import (
"fmt"
"sync"
"time"
)
func DemonstrateFairness() {
fmt.Println("=== Demonstrating Fair FIFO Ordering ===\n")
data := NewSharedData()
var wg sync.WaitGroup
// Start readers
for i := 0; i < 3; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
time.Sleep(time.Duration(id*20) * time.Millisecond)
fmt.Printf("[Reader %d] Arriving at t=%v\n", id, time.Now().Format("15:04:05.000"))
data.Read(id)
}(i)
}
// Start writer in the middle
wg.Add(1)
go func() {
defer wg.Done()
time.Sleep(30 * time.Millisecond)
fmt.Printf("[Writer 0] Arriving at t=%v\n", time.Now().Format("15:04:05.000"))
data.Write(0, 100)
}()
// More readers after writer
for i := 3; i < 5; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
time.Sleep(time.Duration(id*20) * time.Millisecond)
fmt.Printf("[Reader %d] Arriving at t=%v\n", id, time.Now().Format("15:04:05.000"))
data.Read(id)
}(i)
}
wg.Wait()
fmt.Println("\n✓ All operations completed in FIFO order!")
}
func main() {
DemonstrateFairness()
}
Real-World Example: Document Editing System
package main
import (
"fmt"
"sync"
"time"
)
type Document struct {
content string
version int
lock *FairRWLock
editLog []string
logMu sync.Mutex
}
func NewDocument(initialContent string) *Document {
return &Document{
content: initialContent,
version: 1,
lock: NewFairRWLock(),
editLog: make([]string, 0),
}
}
// Read views the document (can be concurrent with other reads)
func (d *Document) Read(userID int) string {
requestID := d.lock.RLock()
defer d.lock.RUnlock()
d.logEdit(fmt.Sprintf("User %d read v%d (request #%d)", userID, d.version, requestID))
time.Sleep(30 * time.Millisecond) // Simulate read time
return d.content
}
// Edit modifies the document (exclusive access, but fair!)
func (d *Document) Edit(userID int, newContent string) {
fmt.Printf("[User %d] Requesting edit...\n", userID)
start := time.Now()
requestID := d.lock.Lock()
waitTime := time.Since(start)
defer d.lock.Unlock()
d.version++
d.content = newContent
d.logEdit(fmt.Sprintf("User %d edited to v%d (waited %v, request #%d)",
userID, d.version, waitTime, requestID))
time.Sleep(80 * time.Millisecond) // Simulate edit time
fmt.Printf("[User %d] Edit complete (v%d)\n", userID, d.version)
}
func (d *Document) logEdit(msg string) {
d.logMu.Lock()
defer d.logMu.Unlock()
d.editLog = append(d.editLog, fmt.Sprintf("%s - %s",
time.Now().Format("15:04:05.000"), msg))
}
func (d *Document) PrintLog() {
d.logMu.Lock()
defer d.logMu.Unlock()
fmt.Println("\n📋 Edit Log:")
for _, entry := range d.editLog {
fmt.Println(" ", entry)
}
}
func main() {
doc := NewDocument("Initial content")
var wg sync.WaitGroup
// Multiple users reading and editing
// User 0: reads
for i := 0; i < 2; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
time.Sleep(time.Duration(id*25) * time.Millisecond)
content := doc.Read(0)
fmt.Printf("[User 0] Read: %q\n", content)
}(i)
}
// User 1: edits
wg.Add(1)
go func() {
defer wg.Done()
time.Sleep(30 * time.Millisecond)
doc.Edit(1, "User 1's changes")
}()
// User 2: reads
wg.Add(1)
go func() {
defer wg.Done()
time.Sleep(50 * time.Millisecond)
content := doc.Read(2)
fmt.Printf("[User 2] Read: %q\n", content)
}()
// User 3: edits
wg.Add(1)
go func() {
defer wg.Done()
time.Sleep(60 * time.Millisecond)
doc.Edit(3, "User 3's changes")
}()
wg.Wait()
doc.PrintLog()
fmt.Println("\n✓ All edits completed fairly!")
}
Performance Characteristics
Latency
- Average case: Slightly higher than unfair solutions
- Worst case: Much better - no starvation!
- Variance: Lower - more predictable
Throughput
- Read throughput: Lower than readers preference
- Write throughput: Lower than writers preference
- Overall: Balanced, predictable
Comparison Table
| Solution | Read Throughput | Write Throughput | Fairness | Complexity |
|---|---|---|---|---|
| Readers Pref | ⭐⭐⭐⭐⭐ | ⭐ | ⭐ | ⭐⭐ |
| Writers Pref | ⭐⭐ | ⭐⭐⭐⭐⭐ | ⭐⭐ | ⭐⭐⭐ |
| Fair FIFO | ⭐⭐⭐ | ⭐⭐⭐ | ⭐⭐⭐⭐⭐ | ⭐⭐⭐⭐ |
Advantages of Fair Solution
✓ No starvation - Everyone gets served eventually ✓ Predictable latency - FIFO ordering ✓ Fair to all - No preferential treatment ✓ Better worst-case - Bounded wait times
Disadvantages
✗ More complex - Harder to implement correctly ✗ Lower throughput - Can’t optimize for common case ✗ Higher overhead - Queue management cost ✗ Not always needed - Many workloads don’t need fairness
When to Use Fair Solution
✓ Use when:
- Starvation is unacceptable
- All operations are equally important
- Predictable latency matters
- Workload is mixed (reads and writes)
✗ Use optimized solutions when:
- Workload is heavily skewed (90%+ reads or writes)
- Throughput is critical
- Starvation is acceptable
- Simpler solution works
Implementation Tips
- Use tickets - Simpler than queue management
- Batch readers - Consecutive readers can share
- Monitor fairness - Track wait time variance
- Consider timeouts - Prevent infinite waits
- Profile - Measure actual performance
Try It Yourself
- Benchmark all three - Compare readers pref, writers pref, fair
- Add timeout detection - Alert on long waits
- Implement batching - Optimize consecutive same-type requests
- Test starvation - Verify no goroutine waits forever
- Add metrics - Track queue depth, wait times
This is part 9 of “Golang Experiments: Classic Concurrency Problems”