mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2026-05-17 08:36:55 +03:00
lib/promscrape: use chunkedbuffer.Buffer instead of bytesutil.ByteBuffer for reading response body from scrape targets
This reduces memory usage when reading large response bodies because the underlying buffer doesn't need to be re-allocated during the read of large response body in the buffer. Also decompress response body under the processScrapedDataConcurrencyLimitCh . This reduces CPU usage and RAM usage a bit when scraping thousands of targets.
This commit is contained in:
@@ -1,15 +1,40 @@
|
||||
package chunkedbuffer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/filestream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
||||
const chunkSize = 4 * 1024
|
||||
|
||||
// Get returns Buffer from the pool.
|
||||
//
|
||||
// Return back the Buffer to the pool via Put() call when it is no longer needed.
|
||||
func Get() *Buffer {
|
||||
v := cbPool.Get()
|
||||
if v == nil {
|
||||
return &Buffer{}
|
||||
}
|
||||
return v.(*Buffer)
|
||||
}
|
||||
|
||||
// Put returns cb to the pool, so it could be re-used via Get() call.
|
||||
//
|
||||
// The cb cannot be used after Put() call.
|
||||
func Put(cb *Buffer) {
|
||||
cb.Reset()
|
||||
cbPool.Put(cb)
|
||||
}
|
||||
|
||||
var cbPool sync.Pool
|
||||
|
||||
// Buffer provides in-memory buffer optimized for storing big bytes volumes.
|
||||
//
|
||||
// It stores the data in chunks of fixed size. This reduces memory fragmentation
|
||||
@@ -89,12 +114,45 @@ func (cb *Buffer) MustReadAt(p []byte, off int64) {
|
||||
}
|
||||
}
|
||||
|
||||
// ReadFrom reads all the data from r and appends it to cb.
|
||||
func (cb *Buffer) ReadFrom(r io.Reader) (int64, error) {
|
||||
v := copyBufPool.Get()
|
||||
if v == nil {
|
||||
v = new([16 * 1024]byte)
|
||||
}
|
||||
b := (v.(*[16 * 1024]byte))[:]
|
||||
|
||||
bytesRead := int64(0)
|
||||
for {
|
||||
n, err := r.Read(b)
|
||||
cb.MustWrite(b[:n])
|
||||
bytesRead += int64(n)
|
||||
if err != nil {
|
||||
copyBufPool.Put(v)
|
||||
if errors.Is(err, io.EOF) {
|
||||
return bytesRead, nil
|
||||
}
|
||||
return bytesRead, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var copyBufPool sync.Pool
|
||||
|
||||
// WriteTo writes cb data to w.
|
||||
func (cb *Buffer) WriteTo(w io.Writer) (int64, error) {
|
||||
if len(cb.chunks) == 0 {
|
||||
bLen := cb.Len()
|
||||
if bLen == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
switch t := w.(type) {
|
||||
case *bytesutil.ByteBuffer:
|
||||
t.Grow(bLen)
|
||||
case *bytes.Buffer:
|
||||
t.Grow(bLen)
|
||||
}
|
||||
|
||||
nTotal := 0
|
||||
|
||||
// Write all the chunks except the last one, which may be incomplete.
|
||||
@@ -123,6 +181,16 @@ func (cb *Buffer) WriteTo(w io.Writer) (int64, error) {
|
||||
return int64(nTotal), nil
|
||||
}
|
||||
|
||||
// MustWriteTo writes cb contents w.
|
||||
//
|
||||
// Use this function only if w cannot return errors. For example, if w is bytes.Buffer of bytesutil.ByteBuffer.
|
||||
// If w can return errors, then use WriteTo function instead.
|
||||
func (cb *Buffer) MustWriteTo(w io.Writer) {
|
||||
if _, err := cb.WriteTo(w); err != nil {
|
||||
logger.Panicf("BUG: unexpected error writing Buffer data to the provided writer: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Path returns cb path.
|
||||
func (cb *Buffer) Path() string {
|
||||
return fmt.Sprintf("Buffer/%p/mem", cb)
|
||||
@@ -185,8 +253,7 @@ func (r *reader) MustClose() {
|
||||
func getChunk() *[chunkSize]byte {
|
||||
v := chunkPool.Get()
|
||||
if v == nil {
|
||||
var chunk [chunkSize]byte
|
||||
return &chunk
|
||||
return new([chunkSize]byte)
|
||||
}
|
||||
return v.(*[chunkSize]byte)
|
||||
}
|
||||
@@ -8,7 +8,9 @@ import (
|
||||
)
|
||||
|
||||
func TestBuffer(t *testing.T) {
|
||||
var cb Buffer
|
||||
cb := Get()
|
||||
defer Put(cb)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
cb.Reset()
|
||||
|
||||
@@ -84,8 +86,8 @@ func TestBuffer(t *testing.T) {
|
||||
}
|
||||
|
||||
// Copy the data to another chunked buffer via WriteTo.
|
||||
var cb2 Buffer
|
||||
n, err = cb.WriteTo(&cb2)
|
||||
cb2 := Get()
|
||||
n, err = cb.WriteTo(cb2)
|
||||
if err != nil {
|
||||
t.Fatalf("error when writing data to another chunked buffer: %s", err)
|
||||
}
|
||||
@@ -111,6 +113,40 @@ func TestBuffer(t *testing.T) {
|
||||
|
||||
// Verify MustClose at chunked buffer
|
||||
cb2.MustClose()
|
||||
|
||||
Put(cb2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuffer_ReadFrom(t *testing.T) {
|
||||
cb := Get()
|
||||
defer Put(cb)
|
||||
|
||||
bb := bytes.NewBufferString("foo")
|
||||
n, err := cb.ReadFrom(bb)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if n != 3 {
|
||||
t.Fatalf("unexpected number of bytes written: %d; want 3", n)
|
||||
}
|
||||
|
||||
bb = bytes.NewBufferString("bar")
|
||||
n, err = cb.ReadFrom(bb)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if n != 3 {
|
||||
t.Fatalf("unexpected number of bytes written: %d; want 3", n)
|
||||
}
|
||||
|
||||
var bbResult bytes.Buffer
|
||||
cb.MustWriteTo(&bbResult)
|
||||
|
||||
result := bbResult.String()
|
||||
resultExpected := "foobar"
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result; got %q; want %q", result, resultExpected)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -164,13 +200,7 @@ func TestBuffer_ReaderSingleChunk(t *testing.T) {
|
||||
func TestBuffer_WriteToZeroData(t *testing.T) {
|
||||
var cb Buffer
|
||||
var bb bytes.Buffer
|
||||
n, err := cb.WriteTo(&bb)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if n != 0 {
|
||||
t.Fatalf("unexpected data written from cb with len=%d", n)
|
||||
}
|
||||
cb.MustWriteTo(&bb)
|
||||
if bbLen := bb.Len(); bbLen != 0 {
|
||||
t.Fatalf("unexpected data written to bb with len=%d; data=%q", bbLen, bb.Bytes())
|
||||
}
|
||||
Reference in New Issue
Block a user