What are HTTP Polling Patterns?
HTTP polling patterns are techniques for achieving near-real-time communication between clients and servers using the standard HTTP request-response model. While not truly real-time like WebSockets, these patterns are simpler to implement, easier to debug, and work reliably across all networks and proxies.
The four main patterns are:
- Short Polling - Regular repeated requests at fixed intervals
- Long Polling - Server holds connection open until data is available
- HTTP/2 Server Push - Server proactively pushes resources to client
- Chunked Transfer Encoding - Streaming data in HTTP response body
Each pattern offers different trade-offs between latency, resource consumption, and implementation complexity.
Architecture Overview
Real-World Use Cases
- Progress Tracking - File uploads, report generation, batch processing
- Dashboard Metrics - System health, user counts, server stats
- Notification Systems - New messages, alerts, updates
- Live Scoreboards - Sports scores, stock tickers, leaderboards
- Job Status Monitoring - Background task completion tracking
- Form Validation - Real-time availability checks (usernames, emails)
Implementation in Go
Project Structure
http-polling/
├── main.go
├── handlers/
│ ├── polling.go
│ ├── longpolling.go
│ └── chunked.go
├── notifier/
│ └── notifier.go
└── go.mod
1. Short Polling Implementation
handlers/polling.go
package handlers
import (
"encoding/json"
"log"
"net/http"
"sync"
"time"
)
// MessageStore simulates a data source
type MessageStore struct {
mu sync.RWMutex
messages []Message
}
type Message struct {
ID int `json:"id"`
Content string `json:"content"`
Timestamp time.Time `json:"timestamp"`
}
var store = &MessageStore{messages: []Message{}}
// AddMessage adds a new message to the store
func (ms *MessageStore) AddMessage(content string) {
ms.mu.Lock()
defer ms.mu.Unlock()
msg := Message{
ID: len(ms.messages) + 1,
Content: content,
Timestamp: time.Now(),
}
ms.messages = append(ms.messages, msg)
}
// GetMessagesSince returns messages after a given ID
func (ms *MessageStore) GetMessagesSince(lastID int) []Message {
ms.mu.RLock()
defer ms.mu.RUnlock()
var newMessages []Message
for _, msg := range ms.messages {
if msg.ID > lastID {
newMessages = append(newMessages, msg)
}
}
return newMessages
}
// ShortPollingHandler handles short polling requests
func ShortPollingHandler(w http.ResponseWriter, r *http.Request) {
// Parse last seen message ID from query params
lastID := 0
if id := r.URL.Query().Get("last_id"); id != "" {
fmt.Sscanf(id, "%d", &lastID)
}
// Get new messages
messages := store.GetMessagesSince(lastID)
// Set headers
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
// Return messages (even if empty)
json.NewEncoder(w).Encode(map[string]interface{}{
"messages": messages,
"count": len(messages),
"polled_at": time.Now(),
})
log.Printf("Short polling: returned %d messages (after ID %d)", len(messages), lastID)
}
Client Example (JavaScript):
// Short polling client
let lastMessageId = 0;
async function pollForUpdates() {
try {
const response = await fetch(`/api/poll?last_id=${lastMessageId}`);
const data = await response.json();
if (data.messages && data.messages.length > 0) {
data.messages.forEach(msg => {
console.log('New message:', msg.content);
lastMessageId = msg.id;
});
}
} catch (error) {
console.error('Polling error:', error);
}
// Poll every 5 seconds
setTimeout(pollForUpdates, 5000);
}
pollForUpdates();
2. Long Polling Implementation
handlers/longpolling.go
package handlers
import (
"context"
"encoding/json"
"log"
"net/http"
"sync"
"time"
)
// Notifier manages long polling connections
type Notifier struct {
mu sync.RWMutex
waiters map[int]chan []Message
waiterID int
}
func NewNotifier() *Notifier {
return &Notifier{
waiters: make(map[int]chan []Message),
}
}
// Notify sends messages to all waiting clients
func (n *Notifier) Notify(messages []Message) {
n.mu.Lock()
defer n.mu.Unlock()
for id, ch := range n.waiters {
select {
case ch <- messages:
log.Printf("Notified waiter %d with %d messages", id, len(messages))
default:
log.Printf("Waiter %d channel full, skipping", id)
}
}
}
// Wait registers a waiter and returns a channel
func (n *Notifier) Wait(ctx context.Context) (<-chan []Message, int) {
n.mu.Lock()
defer n.mu.Unlock()
ch := make(chan []Message, 1)
id := n.waiterID
n.waiterID++
n.waiters[id] = ch
// Clean up when context is done
go func() {
<-ctx.Done()
n.mu.Lock()
delete(n.waiters, id)
close(ch)
n.mu.Unlock()
log.Printf("Cleaned up waiter %d", id)
}()
return ch, id
}
var notifier = NewNotifier()
// LongPollingHandler handles long polling requests
func LongPollingHandler(w http.ResponseWriter, r *http.Request) {
// Parse last seen message ID
lastID := 0
if id := r.URL.Query().Get("last_id"); id != "" {
fmt.Sscanf(id, "%d", &lastID)
}
// Check for immediate messages
messages := store.GetMessagesSince(lastID)
if len(messages) > 0 {
respondWithMessages(w, messages)
return
}
// No messages yet - wait for up to 30 seconds
ctx, cancel := context.WithTimeout(r.Context(), 30*time.Second)
defer cancel()
msgChan, waiterID := notifier.Wait(ctx)
log.Printf("Long polling: waiter %d waiting (after ID %d)", waiterID, lastID)
select {
case messages := <-msgChan:
// New messages arrived
respondWithMessages(w, messages)
log.Printf("Long polling: waiter %d received %d messages", waiterID, len(messages))
case <-ctx.Done():
// Timeout or client disconnect
if ctx.Err() == context.DeadlineExceeded {
// Send empty response on timeout
respondWithMessages(w, []Message{})
log.Printf("Long polling: waiter %d timed out", waiterID)
} else {
log.Printf("Long polling: waiter %d disconnected", waiterID)
}
}
}
func respondWithMessages(w http.ResponseWriter, messages []Message) {
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
json.NewEncoder(w).Encode(map[string]interface{}{
"messages": messages,
"count": len(messages),
"timestamp": time.Now(),
})
}
// MessagePublisher simulates message publishing
func MessagePublisher(interval time.Duration) {
ticker := time.NewTicker(interval)
defer ticker.Stop()
counter := 0
for range ticker.C {
counter++
content := fmt.Sprintf("Broadcast message #%d", counter)
store.AddMessage(content)
// Notify all long polling waiters
messages := []Message{{
ID: len(store.messages),
Content: content,
Timestamp: time.Now(),
}}
notifier.Notify(messages)
log.Printf("Published: %s", content)
}
}
Client Example (JavaScript):
// Long polling client
let lastMessageId = 0;
async function longPoll() {
try {
const response = await fetch(`/api/longpoll?last_id=${lastMessageId}`);
const data = await response.json();
if (data.messages && data.messages.length > 0) {
data.messages.forEach(msg => {
console.log('New message:', msg.content);
lastMessageId = msg.id;
});
}
} catch (error) {
console.error('Long polling error:', error);
await new Promise(resolve => setTimeout(resolve, 5000));
}
// Immediately reconnect
longPoll();
}
longPoll();
3. Chunked Transfer Encoding (Streaming)
handlers/chunked.go
package handlers
import (
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"time"
)
// ChunkedStreamHandler streams data using chunked transfer encoding
func ChunkedStreamHandler(w http.ResponseWriter, r *http.Request) {
// Set headers for streaming
w.Header().Set("Content-Type", "text/event-stream")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Connection", "keep-alive")
w.Header().Set("Transfer-Encoding", "chunked")
// Ensure we can flush
flusher, ok := w.(http.Flusher)
if !ok {
http.Error(w, "Streaming not supported", http.StatusInternalServerError)
return
}
ctx := r.Context()
clientID := time.Now().UnixNano()
log.Printf("Stream client %d connected", clientID)
// Send initial connection message
sendChunk(w, flusher, map[string]interface{}{
"type": "connected",
"client_id": clientID,
"timestamp": time.Now(),
})
// Stream updates every 2 seconds
ticker := time.NewTicker(2 * time.Second)
defer ticker.Stop()
counter := 0
for {
select {
case <-ctx.Done():
log.Printf("Stream client %d disconnected", clientID)
return
case <-ticker.C:
counter++
data := map[string]interface{}{
"type": "update",
"counter": counter,
"timestamp": time.Now(),
"message": fmt.Sprintf("Update #%d", counter),
}
if err := sendChunk(w, flusher, data); err != nil {
log.Printf("Stream client %d: send error: %v", clientID, err)
return
}
log.Printf("Stream client %d: sent update #%d", clientID, counter)
}
}
}
func sendChunk(w http.ResponseWriter, flusher http.Flusher, data interface{}) error {
jsonData, err := json.Marshal(data)
if err != nil {
return err
}
// Write chunk with newline delimiter
_, err = fmt.Fprintf(w, "%s\n", jsonData)
if err != nil {
return err
}
// Flush immediately
flusher.Flush()
return nil
}
// ProgressHandler demonstrates progress streaming
func ProgressHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/event-stream")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Connection", "keep-alive")
flusher, ok := w.(http.Flusher)
if !ok {
http.Error(w, "Streaming not supported", http.StatusInternalServerError)
return
}
// Simulate a long-running task with progress updates
totalSteps := 10
for i := 1; i <= totalSteps; i++ {
progress := map[string]interface{}{
"step": i,
"total": totalSteps,
"percentage": (i * 100) / totalSteps,
"message": fmt.Sprintf("Processing step %d of %d", i, totalSteps),
"timestamp": time.Now(),
}
if err := sendChunk(w, flusher, progress); err != nil {
log.Printf("Progress stream error: %v", err)
return
}
// Simulate work
time.Sleep(time.Second)
}
// Send completion message
sendChunk(w, flusher, map[string]interface{}{
"step": totalSteps,
"total": totalSteps,
"percentage": 100,
"message": "Complete!",
"status": "done",
"timestamp": time.Now(),
})
}
Client Example (JavaScript):
// Chunked streaming client
async function streamUpdates() {
const response = await fetch('/api/stream');
const reader = response.body.getReader();
const decoder = new TextDecoder();
while (true) {
const {value, done} = await reader.read();
if (done) break;
const chunk = decoder.decode(value);
const lines = chunk.split('\n').filter(line => line.trim());
for (const line of lines) {
try {
const data = JSON.parse(line);
console.log('Stream update:', data);
} catch (e) {
console.error('Parse error:', e);
}
}
}
}
streamUpdates();
4. Complete Main Application
main.go
package main
import (
"log"
"net/http"
"time"
"http-polling/handlers"
)
func main() {
// Setup routes
mux := http.NewServeMux()
// Short polling endpoint
mux.HandleFunc("/api/poll", handlers.ShortPollingHandler)
// Long polling endpoint
mux.HandleFunc("/api/longpoll", handlers.LongPollingHandler)
// Chunked streaming endpoints
mux.HandleFunc("/api/stream", handlers.ChunkedStreamHandler)
mux.HandleFunc("/api/progress", handlers.ProgressHandler)
// Test endpoint to add messages
mux.HandleFunc("/api/message", func(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
message := r.URL.Query().Get("content")
if message == "" {
message = "Test message"
}
handlers.store.AddMessage(message)
w.WriteHeader(http.StatusCreated)
w.Write([]byte("Message added"))
})
// Start message publisher for long polling demo
go handlers.MessagePublisher(10 * time.Second)
// Start server
log.Println("Server starting on :8080")
log.Println("Endpoints:")
log.Println(" - GET /api/poll?last_id=0 (short polling)")
log.Println(" - GET /api/longpoll?last_id=0 (long polling)")
log.Println(" - GET /api/stream (chunked streaming)")
log.Println(" - GET /api/progress (progress streaming)")
log.Println(" - POST /api/message?content=X (add message)")
if err := http.ListenAndServe(":8080", mux); err != nil {
log.Fatal(err)
}
}
go.mod
module http-polling
go 1.21
Pattern Comparison
Performance Characteristics
| Pattern | Latency | Server Load | Network Efficiency | Complexity |
|---|---|---|---|---|
| Short Polling | High (N seconds) | Low | Poor (empty responses) | Very Low |
| Long Polling | Low (< 1s) | Medium | Good | Medium |
| Chunked Transfer | Very Low (instant) | Medium | Excellent | Medium |
| HTTP/2 Push | Very Low | Low | Excellent | High |
Best Practices
1. Short Polling
- Use exponential backoff on errors
- Add jitter to prevent thundering herd
- Set appropriate polling intervals (5-30 seconds)
- Implement client-side caching with ETags
2. Long Polling
- Set reasonable timeouts (20-60 seconds)
- Implement proper cleanup on disconnect
- Use context for cancellation
- Limit concurrent connections per client
- Handle reconnection with exponential backoff
3. Chunked Streaming
- Always check for Flusher support
- Send heartbeat messages to detect disconnects
- Implement proper error handling
- Use structured data format (JSON lines)
- Set appropriate buffer sizes
4. Connection Management
// Rate limiting example
import "golang.org/x/time/rate"
var limiter = rate.NewLimiter(rate.Limit(10), 20) // 10 req/s, burst 20
func rateLimitMiddleware(next http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if !limiter.Allow() {
http.Error(w, "Rate limit exceeded", http.StatusTooManyRequests)
return
}
next(w, r)
}
}
Common Pitfalls
1. Not Handling Client Disconnects
// Bad: Doesn't check context
func badHandler(w http.ResponseWriter, r *http.Request) {
for {
time.Sleep(time.Second)
// Client may have disconnected!
sendUpdate(w)
}
}
// Good: Monitors context
func goodHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return // Client disconnected
case <-ticker.C:
sendUpdate(w)
}
}
}
2. Memory Leaks with Long Polling
// Bad: No cleanup
var waiters []chan Message
// Good: Cleanup on disconnect
type Notifier struct {
waiters map[int]chan Message
}
func (n *Notifier) Wait(ctx context.Context) <-chan Message {
ch := make(chan Message)
id := generateID()
n.waiters[id] = ch
go func() {
<-ctx.Done()
delete(n.waiters, id)
close(ch)
}()
return ch
}
3. Proxy Buffering Issues
// Add headers to prevent buffering
w.Header().Set("X-Accel-Buffering", "no") // Nginx
w.Header().Set("Cache-Control", "no-cache")
4. Not Using HTTP/1.1 Keep-Alive
// Configure HTTP client for reuse
var client = &http.Client{
Transport: &http.Transport{
MaxIdleConns: 100,
MaxIdleConnsPerHost: 10,
IdleConnTimeout: 90 * time.Second,
},
}
When to Use Each Pattern
Short Polling
✅ Use When:
- Updates are infrequent (every 30+ seconds)
- Clients are behind strict proxies/firewalls
- Simple implementation is priority
- You need guaranteed compatibility
❌ Avoid When:
- Need low latency (< 5 seconds)
- High update frequency
- Many concurrent clients
- Network efficiency matters
Long Polling
✅ Use When:
- Need near-real-time updates (< 1 second latency)
- Updates are sporadic/unpredictable
- WebSockets are not available
- Working with RESTful architecture
❌ Avoid When:
- Very high message frequency (> 1/second)
- Server resources are limited
- Need bidirectional communication
- Proxy servers cause issues
Chunked Transfer
✅ Use When:
- Streaming large responses
- Progress updates for long tasks
- Real-time logs or metrics
- Server-to-client only communication
❌ Avoid When:
- Need client-to-server messages
- Proxy servers buffer responses
- Clients need message replay
- Working with HTTP/2 Server Push
Advantages
Short Polling
- Simplest to implement and debug
- Works everywhere (proxies, firewalls, old browsers)
- Stateless - no server-side connection management
- Easy to scale horizontally
Long Polling
- Lower latency than short polling
- More efficient network usage
- Compatible with RESTful design
- Works through most proxies
Chunked Transfer
- True streaming capability
- Instant delivery of updates
- Great for progress indicators
- Single HTTP connection
Disadvantages
Short Polling
- High latency (polling interval delay)
- Wasteful (many empty responses)
- Not suitable for real-time needs
- Unnecessary server load
Long Polling
- Server resources tied up in connections
- Complex connection management
- Difficult to scale with many clients
- May still have slight delays
Chunked Transfer
- Uni-directional only (server → client)
- Proxy buffering issues
- No automatic reconnection
- Client must handle parsing
Scaling Considerations
Horizontal Scaling with Redis
package main
import (
"context"
"encoding/json"
"github.com/redis/go-redis/v9"
"log"
)
type RedisNotifier struct {
client *redis.Client
pubsub *redis.PubSub
}
func NewRedisNotifier(addr string) *RedisNotifier {
client := redis.NewClient(&redis.Options{
Addr: addr,
})
return &RedisNotifier{
client: client,
pubsub: client.Subscribe(context.Background(), "updates"),
}
}
func (rn *RedisNotifier) Publish(message Message) error {
data, err := json.Marshal(message)
if err != nil {
return err
}
return rn.client.Publish(context.Background(), "updates", data).Err()
}
func (rn *RedisNotifier) Subscribe(ctx context.Context) <-chan Message {
ch := make(chan Message)
go func() {
defer close(ch)
for {
select {
case <-ctx.Done():
return
case msg := <-rn.pubsub.Channel():
var message Message
if err := json.Unmarshal([]byte(msg.Payload), &message); err != nil {
log.Printf("Error unmarshaling: %v", err)
continue
}
ch <- message
}
}
}()
return ch
}
Testing
package handlers
import (
"net/http"
"net/http/httptest"
"testing"
"time"
)
func TestShortPolling(t *testing.T) {
// Add test message
store.AddMessage("Test message")
// Create request
req := httptest.NewRequest("GET", "/api/poll?last_id=0", nil)
w := httptest.NewRecorder()
// Call handler
ShortPollingHandler(w, req)
// Check response
if w.Code != http.StatusOK {
t.Errorf("Expected 200, got %d", w.Code)
}
if w.Header().Get("Content-Type") != "application/json" {
t.Error("Expected JSON content type")
}
}
func TestLongPollingTimeout(t *testing.T) {
req := httptest.NewRequest("GET", "/api/longpoll?last_id=999", nil)
w := httptest.NewRecorder()
// Should timeout after 30 seconds (use shorter timeout for testing)
done := make(chan bool)
go func() {
LongPollingHandler(w, req)
done <- true
}()
select {
case <-done:
if w.Code != http.StatusOK {
t.Errorf("Expected 200, got %d", w.Code)
}
case <-time.After(35 * time.Second):
t.Error("Handler didn't timeout")
}
}