Fragment Description:



The recycler keeps a list of returned buffers and periodically throws away those that are too old and are unlikely to be reused.
That allows the program to cope with bursts of demand for buffers dynamically.
Improvement suggested:
Presumably the makeRecycler loop would be more effective if it didn't allocate memory itself on every iteration (I'm looking at NewTimer and PushFront here).
I'd be tempted to use Timer.Reset, and perhaps a just a slice of queued items to amortise queue allocs over time (append to add to queue; slice to delete old items).
Then it might work well for smaller byte slices too.
I'm also unconvinced that you want to be resetting the timer on every pool operation - then if there's never a full minute of waiting, you'll never GC the pool.
Approach suggested:
slice-as-a-stack approach, appending to the end rather than inserting at the front of a doubly-linked list like in the original (see makeBufferRecycler-1).
This example was provided with the comment above by Roger Peppe.
see:
https://blog.cloudflare.com/recycling-memory-buffers-in-go/ and the interesting comments.


makeBuffersRecycler-2

Go Playground

Last update, on 2015, Fri 9 Oct, 16:15:29

/* ... <== see fragment description ... */

package main

import (
    "fmt"
    "math/rand"
    "runtime"
    "time"
)

var makes int
var frees int

func makeBuffer() []byte {
    makes += 1
    return make([]byte, rand.Intn(5000000)+5000000)
}

type queued struct {
    when  time.Time
    slice []byte
}

const timeout = 500 * time.Millisecond

func makeRecycler() (get, give chan []byte) {
    get = make(chan []byte)
    give = make(chan []byte)
    go func() {
        timer := time.NewTimer(timeout)
        var q queue
        for {
            if len(q) == 0 {
                q.add(makeBuffer())
            }
            select {
            case b := <-give:
                q.add(b)
            case get <- q.newest():
                q.deleteNewest()
            case <-timer.C:
                q.deleteOlderThan(time.Now().Add(-timeout))
                timer.Reset(timeout)
            }
        }
    }()
    return
}

type queue []queued

func (q *queue) add(b []byte) {
    *q = append(*q, queued{
        when:  time.Now(),
        slice: b,
    })
}
func (qp *queue) deleteNewest() {
    q := *qp
    q[len(q)-1] = queued{}
    *qp = q[0 : len(q)-1]
}
func (q queue) newest() []byte {
    return q[len(q)-1].slice
}
func (qp *queue) deleteOlderThan(t time.Time) {
    q := *qp
    inactiveCount := len(q)
    for i, e := range q {
        if e.when.After(t) {
            inactiveCount = i
            break
        }
    }
    // Copy all active elements to the start of the slice.
    copy(q, q[inactiveCount:])
    activeCount := len(q) - inactiveCount
    // Zero out all inactive elements of the queue
    for j := activeCount; j < len(q); j++ {
        q[j] = queued{}
    }
    *qp = q[0:activeCount]
}
func main() {
    pool := make([][]byte, 100)
    get, give := makeRecycler()
    var m runtime.MemStats
    // watching what memory resources are used
    for x := 0; x < 10000; x++ {
        b := <-get
        i := rand.Intn(len(pool))
        if pool[i] != nil {
            give <- pool[i]
        }
        pool[i] = b
        // working set shrinks after a while.
        if x > 1200 && len(pool) != 5 {
            for _, p := range pool {
                give <- p
            }
            pool = make([][]byte, 5)
        }
        time.Sleep(time.Millisecond)
        bytes := 0
        for i := 0; i < len(pool); i++ {
            if pool[i] != nil {
                bytes += len(pool[i])
            }
        }
        runtime.ReadMemStats(&m)
        fmt.Printf("%d,%d,%d,%d,%d,%d,%d\n", m.HeapSys, bytes, m.HeapAlloc,
            m.HeapIdle, m.HeapReleased, makes, frees)
    }
}

/* Expected Output:
Interesting use of runtime.MemStats to visualize:
HeapSys, bytes, HeapAlloc, HeapIdle, HeapReleased, makes, frees.
*/



Comments