Fragment Description:



The recycler keeps a list of returned buffers and periodically throws away those that are too old and are unlikely to be reused (in the example code that's buffers that are older than one minute).
That allows the program to cope with bursts of demand for buffers dynamically.
Using a channel it's possible to keep a separate pool of buffers that are no longer used and use that pool to retrieve a buffer (or make a new one if the channel is empty).
see:
https://blog.cloudflare.com/recycling-memory-buffers-in-go/ and the interesting comments.


makeBuffersRecycler-1

Go Playground

Last update, on 2015, Fri 9 Oct, 16:15:29

/* ... <== see fragment description ... */

package main

import (
    "fmt"
    "math/rand"
    "runtime"
    "time"
)

var makes int
var frees int

func makeBuffer() []byte {
    makes += 1
    return make([]byte, rand.Intn(5000000)+5000000)
}

type queued struct {
    when  time.Time
    slice []byte
}

const timeout = 500 * time.Millisecond

func makeRecycler() (get, give chan []byte) {
    get = make(chan []byte)
    give = make(chan []byte)
    go func() {
        timer := time.NewTimer(timeout)
        var q queue
        for {
            if len(q) == 0 {
                q.add(makeBuffer())
            }
            select {
            case b := <-give:
                q.add(b)
            case get <- q.newest():
                q.deleteNewest()
            case <-timer.C:
                q.deleteOlderThan(time.Now().Add(-timeout))
                timer.Reset(timeout)
            }
        }
    }()
    return
}

type queue []queued

func (q *queue) add(b []byte) {
    *q = append(*q, queued{
        when:  time.Now(),
        slice: b,
    })
}
func (qp *queue) deleteNewest() {
    q := *qp
    q[len(q)-1] = queued{}
    *qp = q[0 : len(q)-1]
}
func (q queue) newest() []byte {
    return q[len(q)-1].slice
}
func (qp *queue) deleteOlderThan(t time.Time) {
    q := *qp
    inactiveCount := len(q)
    for i, e := range q {
        if e.when.After(t) {
            inactiveCount = i
            break
        }
    }
    // Copy all active elements to the start of the slice.
    copy(q, q[inactiveCount:])
    activeCount := len(q) - inactiveCount
    // Zero out all inactive elements of the queue
    for j := activeCount; j < len(q); j++ {
        q[j] = queued{}
    }
    *qp = q[0:activeCount]
}
func main() {
    pool := make([][]byte, 100)
    get, give := makeRecycler()
    var m runtime.MemStats
    for x := 0; x < 10000; x++ {
        b := <-get
        i := rand.Intn(len(pool))
        if pool[i] != nil {
            give <- pool[i]
        }
        pool[i] = b
        // working set shrinks after a while.
        if x > 1200 && len(pool) != 5 {
            for _, p := range pool {
                give <- p
            }
            pool = make([][]byte, 5)
        }
        time.Sleep(time.Millisecond)
        bytes := 0
        for i := 0; i < len(pool); i++ {
            if pool[i] != nil {
                bytes += len(pool[i])
            }
        }
        runtime.ReadMemStats(&m)
        fmt.Printf("%d,%d,%d,%d,%d,%d,%d\n", m.HeapSys, bytes, m.HeapAlloc,
            m.HeapIdle, m.HeapReleased, makes, frees)
    }
}

/* Expected Output:
Interesting use of runtime.MemStats to visualize:
HeapSys, bytes, HeapAlloc, HeapIdle, HeapReleased, makes, frees.
*/



Comments