← Back to Index

Chapter 4: Concurrency

goroutines, channels & concurrency patterns

1. goroutines

A goroutine is a lightweight thread managed by the Go runtime. Creating one is as simple as adding the go keyword before a function call.

package main

import (
    "fmt"
    "time"
)

func sayHello(name string) {
    for i := 0; i < 3; i++ {
        fmt.Printf("Hello from %s (%d)\n", name, i)
        time.Sleep(100 * time.Millisecond)
    }
}

func main() {
    // Launch goroutines
    go sayHello("goroutine-1")
    go sayHello("goroutine-2")

    // Anonymous goroutine
    go func() {
        fmt.Println("Hello from anonymous goroutine")
    }()

    // Main goroutine must wait, otherwise program exits immediately
    time.Sleep(500 * time.Millisecond)
    fmt.Println("Main done")
}

πŸ’‘ A goroutine starts with only ~2KB of stack (grows as needed). You can easily run millions of goroutines on a single machine. A Java thread typically uses ~1MB of stack.

πŸ”„ Concurrency Model Comparison

Language Mechanism Cost
Go go func() ~2KB, M:N scheduling
Java new Thread() / Virtual Threads ~1MB / lightweight (JDK 21+)
Python asyncio / threading GIL limits true parallelism
Node.js Event loop / Worker threads Single-threaded async

2. Channels

Channels are Go's primary mechanism for goroutine communication. The Go mantra: "Don't communicate by sharing memory; share memory by communicating."

Unbuffered Channel

func main() {
    // Create an unbuffered channel
    ch := make(chan string)

    go func() {
        ch <- "hello" // Send: blocks until receiver is ready
    }()

    msg := <-ch // Receive: blocks until sender sends
    fmt.Println(msg) // "hello"
}

Buffered Channel

func main() {
    // Buffered channel: can hold 3 values without blocking
    ch := make(chan int, 3)

    ch <- 1
    ch <- 2
    ch <- 3
    // ch <- 4 would block here (buffer full)

    fmt.Println(<-ch) // 1
    fmt.Println(<-ch) // 2
    fmt.Println(<-ch) // 3
}

Close & Range

func producer(ch chan<- int) {
    for i := 0; i < 5; i++ {
        ch <- i
    }
    close(ch) // Signal: no more values
}

func main() {
    ch := make(chan int)
    go producer(ch)

    // range over channel: loops until channel is closed
    for val := range ch {
        fmt.Println(val) // 0, 1, 2, 3, 4
    }
}

Channel Direction

// Send-only channel
func sender(ch chan<- string) {
    ch <- "data"
}

// Receive-only channel
func receiver(ch <-chan string) {
    msg := <-ch
    fmt.Println(msg)
}

πŸ’‘ Unbuffered channels synchronize goroutines (rendezvous). Buffered channels decouple sender and receiver up to the buffer size.

3. select

select lets a goroutine wait on multiple channel operations simultaneously.

Multiplexing

func main() {
    ch1 := make(chan string)
    ch2 := make(chan string)

    go func() {
        time.Sleep(100 * time.Millisecond)
        ch1 <- "from ch1"
    }()
    go func() {
        time.Sleep(200 * time.Millisecond)
        ch2 <- "from ch2"
    }()

    // Wait for whichever channel is ready first
    for i := 0; i < 2; i++ {
        select {
        case msg := <-ch1:
            fmt.Println(msg)
        case msg := <-ch2:
            fmt.Println(msg)
        }
    }
}

Timeout Pattern

func fetchWithTimeout(url string) (string, error) {
    ch := make(chan string, 1)

    go func() {
        result := doFetch(url)
        ch <- result
    }()

    select {
    case result := <-ch:
        return result, nil
    case <-time.After(3 * time.Second):
        return "", fmt.Errorf("timeout fetching %s", url)
    }
}

Non-blocking with default

select {
case msg := <-ch:
    fmt.Println("Received:", msg)
default:
    fmt.Println("No message available, doing other work...")
}

4. sync Package

WaitGroup

func main() {
    var wg sync.WaitGroup

    for i := 0; i < 5; i++ {
        wg.Add(1)
        go func(id int) {
            defer wg.Done()
            fmt.Printf("Worker %d done\n", id)
        }(i)
    }

    wg.Wait() // Block until all goroutines call Done()
    fmt.Println("All workers finished")
}

Mutex

type SafeCounter struct {
    mu sync.Mutex
    count map[string]int
}

func (c *SafeCounter) Inc(key string) {
    c.mu.Lock()
    defer c.mu.Unlock()
    c.count[key]++
}

func (c *SafeCounter) Get(key string) int {
    c.mu.Lock()
    defer c.mu.Unlock()
    return c.count[key]
}

Once

var (
    instance *Database
    once     sync.Once
)

func GetDB() *Database {
    once.Do(func() {
        instance = connectToDatabase()
    })
    return instance
}

RWMutex

type Cache struct {
    mu   sync.RWMutex
    data map[string]string
}

func (c *Cache) Get(key string) (string, bool) {
    c.mu.RLock()         // Multiple readers allowed
    defer c.mu.RUnlock()
    val, ok := c.data[key]
    return val, ok
}

func (c *Cache) Set(key, value string) {
    c.mu.Lock()          // Exclusive write access
    defer c.mu.Unlock()
    c.data[key] = value
}

5. Concurrency Patterns

Fan-out / Fan-in

func fanOut(input <-chan int, workers int) []<-chan int {
    channels := make([]<-chan int, workers)
    for i := 0; i < workers; i++ {
        channels[i] = process(input)
    }
    return channels
}

func fanIn(channels ...<-chan int) <-chan int {
    var wg sync.WaitGroup
    merged := make(chan int)

    for _, ch := range channels {
        wg.Add(1)
        go func(c <-chan int) {
            defer wg.Done()
            for val := range c {
                merged <- val
            }
        }(ch)
    }

    go func() {
        wg.Wait()
        close(merged)
    }()

    return merged
}

Worker Pool

func workerPool(jobs <-chan int, results chan<- int, numWorkers int) {
    var wg sync.WaitGroup

    for i := 0; i < numWorkers; i++ {
        wg.Add(1)
        go func(id int) {
            defer wg.Done()
            for job := range jobs {
                fmt.Printf("Worker %d processing job %d\n", id, job)
                results <- job * 2
            }
        }(i)
    }

    go func() {
        wg.Wait()
        close(results)
    }()
}

func main() {
    jobs := make(chan int, 100)
    results := make(chan int, 100)

    workerPool(jobs, results, 3)

    for i := 1; i <= 10; i++ {
        jobs <- i
    }
    close(jobs)

    for result := range results {
        fmt.Println("Result:", result)
    }
}

Pipeline

func generate(nums ...int) <-chan int {
    out := make(chan int)
    go func() {
        for _, n := range nums {
            out <- n
        }
        close(out)
    }()
    return out
}

func square(in <-chan int) <-chan int {
    out := make(chan int)
    go func() {
        for n := range in {
            out <- n * n
        }
        close(out)
    }()
    return out
}

func main() {
    // Pipeline: generate -> square -> print
    for val := range square(generate(2, 3, 4)) {
        fmt.Println(val) // 4, 9, 16
    }
}

Context Cancellation

func longTask(ctx context.Context) error {
    for i := 0; i < 100; i++ {
        select {
        case <-ctx.Done():
            return ctx.Err() // context.Canceled or context.DeadlineExceeded
        default:
            fmt.Printf("Step %d\n", i)
            time.Sleep(100 * time.Millisecond)
        }
    }
    return nil
}

func main() {
    // Cancel after 500ms
    ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
    defer cancel()

    if err := longTask(ctx); err != nil {
        fmt.Println("Task stopped:", err)
    }
}

πŸ’‘ context.Context is passed as the first parameter to functions that do I/O, network calls, or long-running operations. It provides cancellation, timeouts, and request-scoped values.

πŸ“‹ Chapter Summary

goroutines

Lightweight (~2KB), managed by Go runtime. Use go func() to launch. Millions can run concurrently.

Channels

Type-safe communication between goroutines. Unbuffered for sync, buffered for async.

select

Multiplex multiple channels. Use with time.After for timeouts, default for non-blocking.

sync Package

WaitGroup for coordination, Mutex for exclusive access, Once for singleton init.

Patterns

Fan-out/Fan-in, Worker Pool, Pipeline β€” composable patterns for real-world concurrency.

Context

context.Context propagates cancellation and deadlines across goroutine trees.