The Writer Starvation Problem
In the readers preference solution, we saw how continuous readers can starve writers. The writers preference solution fixes this by giving writers priority.
Key idea: When a writer is waiting, no new readers are allowed to start, even if other readers are currently reading.
Real-World Need for Writer Priority
Some systems need writer priority:
- Databases with critical updates (billing, inventory)
- Real-time systems (sensor updates must not be delayed)
- Logging systems (log writes can’t be delayed)
- Configuration systems (updates must propagate quickly)
- Leader election (state changes need priority)
The Solution
Go’s sync.RWMutex uses readers preference, so we need to implement writer preference manually using additional synchronization:
┌─────────────────────────────────────┐
│ Writer Waiting? │
│ Yes → Block new readers │
│ No → Allow readers │
└─────────────────────────────────────┘
Implementation with Writer Priority
package main
import (
"fmt"
"sync"
"sync/atomic"
"time"
)
// WriterPriorityRWLock implements readers-writers with writer priority
type WriterPriorityRWLock struct {
readerMu sync.Mutex // Protects reader count
writerMu sync.Mutex // Exclusive lock for writers
noReaders *sync.Cond // Signal when all readers done
noWriters *sync.Cond // Signal when writer done
readers int // Active reader count
writers int // Active writer count (0 or 1)
waitingWriters int // Writers waiting
}
func NewWriterPriorityRWLock() *WriterPriorityRWLock {
lock := &WriterPriorityRWLock{}
lock.noReaders = sync.NewCond(&lock.readerMu)
lock.noWriters = sync.NewCond(&lock.readerMu)
return lock
}
// RLock acquires a read lock (blocks if writer is waiting/active)
func (rw *WriterPriorityRWLock) RLock() {
rw.readerMu.Lock()
defer rw.readerMu.Unlock()
// Wait if any writers are active or waiting
for rw.writers > 0 || rw.waitingWriters > 0 {
rw.noWriters.Wait()
}
rw.readers++
}
// RUnlock releases a read lock
func (rw *WriterPriorityRWLock) RUnlock() {
rw.readerMu.Lock()
defer rw.readerMu.Unlock()
rw.readers--
// If no more readers, wake up a waiting writer
if rw.readers == 0 {
rw.noReaders.Signal()
}
}
// Lock acquires a write lock (exclusive access)
func (rw *WriterPriorityRWLock) Lock() {
rw.readerMu.Lock()
rw.waitingWriters++
// Wait for all readers to finish
for rw.readers > 0 {
rw.noReaders.Wait()
}
rw.readerMu.Unlock()
// Get exclusive write lock
rw.writerMu.Lock()
rw.readerMu.Lock()
rw.waitingWriters--
rw.writers++
rw.readerMu.Unlock()
}
// Unlock releases a write lock
func (rw *WriterPriorityRWLock) Unlock() {
rw.readerMu.Lock()
defer rw.readerMu.Unlock()
rw.writers--
rw.writerMu.Unlock()
// Wake up all waiting readers/writers
rw.noWriters.Broadcast()
}
// SharedData with writer priority
type SharedData struct {
value int
lock *WriterPriorityRWLock
readOps atomic.Int64
writeOps atomic.Int64
}
func NewSharedData() *SharedData {
return &SharedData{
lock: NewWriterPriorityRWLock(),
}
}
func (d *SharedData) Read(readerID int) int {
d.lock.RLock()
defer d.lock.RUnlock()
ops := d.readOps.Add(1)
fmt.Printf("[Reader %d] 🔍 Reading value=%d (read ops: %d)\n",
readerID, d.value, ops)
time.Sleep(50 * time.Millisecond)
return d.value
}
func (d *SharedData) Write(writerID int, newValue int) {
fmt.Printf("[Writer %d] ⏳ Requesting write lock...\n", writerID)
start := time.Now()
d.lock.Lock()
waitTime := time.Since(start)
defer d.lock.Unlock()
ops := d.writeOps.Add(1)
oldValue := d.value
d.value = newValue
fmt.Printf("[Writer %d] ✏️ Got lock after %v, writing %d→%d (write ops: %d)\n",
writerID, waitTime, oldValue, newValue, ops)
time.Sleep(100 * time.Millisecond)
}
func main() {
fmt.Println("=== Readers-Writers: Writers Preference ===")
fmt.Println("Writers get priority over readers\n")
data := NewSharedData()
numReaders := 5
numWriters := 3
var wg sync.WaitGroup
// Start continuous readers
for i := 0; i < numReaders; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
for j := 0; j < 4; j++ {
time.Sleep(80 * time.Millisecond)
data.Read(id)
}
fmt.Printf("[Reader %d] Finished\n", id)
}(i)
}
// Start writers (they should get priority)
for i := 0; i < numWriters; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
for j := 0; j < 3; j++ {
time.Sleep(150 * time.Millisecond)
data.Write(id, id*100+j)
}
fmt.Printf("[Writer %d] Finished\n", id)
}(i)
}
wg.Wait()
fmt.Printf("\n✓ Complete! Final value: %d\n", data.value)
fmt.Printf("Read ops: %d, Write ops: %d\n",
data.readOps.Load(), data.writeOps.Load())
}
Demonstrating Writer Priority
Let’s prove writers block new readers:
package main
import (
"fmt"
"sync"
"time"
)
func DemonstrateWriterPriority() {
fmt.Println("=== Demonstrating Writer Priority ===\n")
data := NewSharedData()
var wg sync.WaitGroup
// Start initial readers
for i := 0; i < 3; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
fmt.Printf("[Reader %d] 🔍 Reading...\n", id)
data.Read(id)
}(i)
}
// Let readers start
time.Sleep(50 * time.Millisecond)
// Now a writer arrives
wg.Add(1)
go func() {
defer wg.Done()
fmt.Println("[Writer 0] 📝 Arriving (should block new readers)")
data.Write(0, 100)
}()
// Wait a bit for writer to start waiting
time.Sleep(50 * time.Millisecond)
// Try to start new readers - they should be blocked!
for i := 3; i < 6; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
fmt.Printf("[Reader %d] 🚫 Blocked by waiting writer\n", id)
data.Read(id)
}(i)
}
wg.Wait()
fmt.Println("\n✓ Notice how new readers waited for the writer!")
}
func main() {
DemonstrateWriterPriority()
}
Alternative: Priority Queue Implementation
Another approach using semaphores and priority:
package main
import (
"sync"
)
// PriorityRWLock uses a priority counter
type PriorityRWLock struct {
mu sync.Mutex
cond *sync.Cond
activeReaders int
activeWriters int
waitingWriters int
}
func NewPriorityRWLock() *PriorityRWLock {
lock := &PriorityRWLock{}
lock.cond = sync.NewCond(&lock.mu)
return lock
}
func (l *PriorityRWLock) RLock() {
l.mu.Lock()
defer l.mu.Unlock()
// Readers wait if writer is active OR writers are waiting
for l.activeWriters > 0 || l.waitingWriters > 0 {
l.cond.Wait()
}
l.activeReaders++
}
func (l *PriorityRWLock) RUnlock() {
l.mu.Lock()
defer l.mu.Unlock()
l.activeReaders--
if l.activeReaders == 0 {
// Wake up waiting writers
l.cond.Broadcast()
}
}
func (l *PriorityRWLock) Lock() {
l.mu.Lock()
defer l.mu.Unlock()
l.waitingWriters++
// Wait for active readers and writers to finish
for l.activeReaders > 0 || l.activeWriters > 0 {
l.cond.Wait()
}
l.waitingWriters--
l.activeWriters++
}
func (l *PriorityRWLock) Unlock() {
l.mu.Lock()
defer l.mu.Unlock()
l.activeWriters--
// Wake everyone - writers will get priority
l.cond.Broadcast()
}
Performance Implications
Reader Latency Increases
With writer priority:
- Readers: May wait longer (blocked by waiting writers)
- Writers: Lower latency, no starvation
- Throughput: Lower read throughput, higher write throughput
Use Case Comparison
| Workload | Readers Pref | Writers Pref |
|---|---|---|
| 90% reads | ✓ Optimal | ✗ Slower |
| 50/50 | ~ Good | ~ Good |
| 10% reads | ✗ Writer starvation | ✓ Optimal |
Real-World Example: Configuration Manager
package main
import (
"fmt"
"sync"
"time"
)
type Config struct {
settings map[string]string
version int
lock *WriterPriorityRWLock
}
func NewConfig() *Config {
return &Config{
settings: make(map[string]string),
lock: NewWriterPriorityRWLock(),
}
}
// Get reads a config value (can be concurrent)
func (c *Config) Get(key string) (string, bool) {
c.lock.RLock()
defer c.lock.RUnlock()
val, ok := c.settings[key]
return val, ok
}
// Reload updates entire config (gets priority!)
func (c *Config) Reload(newSettings map[string]string) {
fmt.Println("📥 Config reload requested (will block new readers)")
c.lock.Lock()
defer c.lock.Unlock()
fmt.Println("📝 Reloading config...")
c.settings = newSettings
c.version++
// Simulate reload time
time.Sleep(200 * time.Millisecond)
fmt.Printf("✓ Config reloaded (version %d)\n", c.version)
}
// Set updates a single key (gets priority!)
func (c *Config) Set(key, value string) {
c.lock.Lock()
defer c.lock.Unlock()
c.settings[key] = value
c.version++
}
func main() {
config := NewConfig()
config.Set("api_url", "https://api.example.com")
config.Set("timeout", "30s")
var wg sync.WaitGroup
// Many readers
for i := 0; i < 10; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
for j := 0; j < 5; j++ {
val, _ := config.Get("api_url")
fmt.Printf("[Worker %d] Using API: %s\n", id, val)
time.Sleep(50 * time.Millisecond)
}
}(i)
}
// Critical config update (should not starve!)
time.Sleep(100 * time.Millisecond)
wg.Add(1)
go func() {
defer wg.Done()
newSettings := map[string]string{
"api_url": "https://api-v2.example.com",
"timeout": "60s",
}
config.Reload(newSettings)
}()
wg.Wait()
}
Advantages of Writers Preference
✓ No writer starvation - Writers always make progress ✓ Bounded write latency - Predictable write times ✓ Good for write-important systems - Updates aren’t delayed ✓ Fair to writers - They don’t wait indefinitely
Disadvantages
✗ Readers can starve - Continuous writes block readers ✗ Lower read throughput - Waiting writers block new readers ✗ More complex - Not built into Go’s stdlib ✗ Higher read latency - Readers wait for writers
When to Use Writer Priority
✓ Use when:
- Writes are critical and time-sensitive
- Writer starvation is unacceptable
- Write frequency is moderate to high
- Read latency can tolerate some delays
✗ Use reader priority when:
- Reads vastly outnumber writes (90%+)
- Read latency is critical
- Writes are infrequent
- Eventual consistency is acceptable
Next Up: Fair Solution
Neither readers nor writers should starve! In the next article, we’ll implement the Fair Solution where everyone gets a turn in order.
Try It Yourself
- Compare latencies - Measure reader vs writer wait times
- Stress test - Many writers, see reader impact
- Add timeouts - Detect starvation
- Benchmark - Compare both approaches
- Add metrics - Track lock hold times
This is part 8 of “Golang Experiments: Classic Concurrency Problems”