mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2026-05-17 08:36:55 +03:00
app/vmagent: allow to disabled on-disk persistence (#5088)
* app/vmagent: allow to disabled on-disk queue Previously, it wasn't possible to build data processing pipeline with a chain of vmagents. In case when remoteWrite for the last vmagent in the chain wasn't accessible, it persisted data only when it has enough disk capacity. If disk queue is full, it started to silently drop ingested metrics. New flags allows to disable on-disk persistent and immediatly return an error if remoteWrite is not accessible anymore. It blocks any writes and notify client, that data ingestion isn't possible. Main use case for this feature - use external queue such as kafka for data persistence. https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2110 * adds test, updates readme * apply review suggestions * update docs for vmagent * makes linter happy --------- Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
This commit is contained in:
@@ -22,6 +22,7 @@ type FastQueue struct {
|
||||
// or when MustClose is called.
|
||||
cond sync.Cond
|
||||
|
||||
isPQDisabled bool
|
||||
// pq is file-based queue
|
||||
pq *queue
|
||||
|
||||
@@ -42,11 +43,14 @@ type FastQueue struct {
|
||||
// if maxPendingBytes is 0, then the queue size is unlimited.
|
||||
// Otherwise its size is limited by maxPendingBytes. The oldest data is dropped when the queue
|
||||
// reaches maxPendingSize.
|
||||
func MustOpenFastQueue(path, name string, maxInmemoryBlocks int, maxPendingBytes int64) *FastQueue {
|
||||
// if isPQDisabled is set to true, all write requests that exceed in-memory buffer capacity'll be rejected with errQueueIsFull error
|
||||
// in-memory queue part can be stored on disk during gracefull shutdown.
|
||||
func MustOpenFastQueue(path, name string, maxInmemoryBlocks int, maxPendingBytes int64, isPQDisabled bool) *FastQueue {
|
||||
pq := mustOpen(path, name, maxPendingBytes)
|
||||
fq := &FastQueue{
|
||||
pq: pq,
|
||||
ch: make(chan *bytesutil.ByteBuffer, maxInmemoryBlocks),
|
||||
pq: pq,
|
||||
isPQDisabled: isPQDisabled,
|
||||
ch: make(chan *bytesutil.ByteBuffer, maxInmemoryBlocks),
|
||||
}
|
||||
fq.cond.L = &fq.mu
|
||||
fq.lastInmemoryBlockReadTime = fasttime.UnixTimestamp()
|
||||
@@ -61,6 +65,16 @@ func MustOpenFastQueue(path, name string, maxInmemoryBlocks int, maxPendingBytes
|
||||
return fq
|
||||
}
|
||||
|
||||
// IsWritesBlocked checks if data can be pushed into the queue
|
||||
func (fq *FastQueue) IsWritesBlocked() bool {
|
||||
if !fq.isPQDisabled {
|
||||
return false
|
||||
}
|
||||
fq.mu.Lock()
|
||||
defer fq.mu.Unlock()
|
||||
return len(fq.ch) == cap(fq.ch) || fq.pq.GetPendingBytes() > 0
|
||||
}
|
||||
|
||||
// UnblockAllReaders unblocks all the readers.
|
||||
func (fq *FastQueue) UnblockAllReaders() {
|
||||
fq.mu.Lock()
|
||||
@@ -92,7 +106,7 @@ func (fq *FastQueue) MustClose() {
|
||||
}
|
||||
|
||||
func (fq *FastQueue) flushInmemoryBlocksToFileIfNeededLocked() {
|
||||
if len(fq.ch) == 0 {
|
||||
if len(fq.ch) == 0 || fq.isPQDisabled {
|
||||
return
|
||||
}
|
||||
if fasttime.UnixTimestamp() < fq.lastInmemoryBlockReadTime+5 {
|
||||
@@ -118,6 +132,10 @@ func (fq *FastQueue) flushInmemoryBlocksToFileLocked() {
|
||||
func (fq *FastQueue) GetPendingBytes() uint64 {
|
||||
fq.mu.Lock()
|
||||
defer fq.mu.Unlock()
|
||||
return fq.getPendingBytesLocked()
|
||||
}
|
||||
|
||||
func (fq *FastQueue) getPendingBytesLocked() uint64 {
|
||||
|
||||
n := fq.pendingInmemoryBytes
|
||||
n += fq.pq.GetPendingBytes()
|
||||
@@ -132,26 +150,47 @@ func (fq *FastQueue) GetInmemoryQueueLen() int {
|
||||
return len(fq.ch)
|
||||
}
|
||||
|
||||
// MustWriteBlock writes block to fq.
|
||||
func (fq *FastQueue) MustWriteBlock(block []byte) {
|
||||
// MustWriteBlockIgnoreDisabledPQ writes block to fq, persists data on disk even if persistent disabled by flag.
|
||||
// it's needed to gracefully stop service and do not lose data if remote storage is not available.
|
||||
func (fq *FastQueue) MustWriteBlockIgnoreDisabledPQ(block []byte) {
|
||||
if !fq.writeBlock(block, true) {
|
||||
logger.Fatalf("BUG: MustWriteBlockIgnoreDisabledPQ must always write data even if persistence is disabled")
|
||||
}
|
||||
}
|
||||
|
||||
// WriteBlock writes block to fq.
|
||||
func (fq *FastQueue) WriteBlock(block []byte) bool {
|
||||
return fq.writeBlock(block, false)
|
||||
}
|
||||
|
||||
// WriteBlock writes block to fq.
|
||||
func (fq *FastQueue) writeBlock(block []byte, mustIgnoreDisabledPQ bool) bool {
|
||||
fq.mu.Lock()
|
||||
defer fq.mu.Unlock()
|
||||
|
||||
isPQWritesAllowed := !fq.isPQDisabled || mustIgnoreDisabledPQ
|
||||
|
||||
fq.flushInmemoryBlocksToFileIfNeededLocked()
|
||||
if n := fq.pq.GetPendingBytes(); n > 0 {
|
||||
if !isPQWritesAllowed {
|
||||
return false
|
||||
}
|
||||
// The file-based queue isn't drained yet. This means that in-memory queue cannot be used yet.
|
||||
// So put the block to file-based queue.
|
||||
if len(fq.ch) > 0 {
|
||||
logger.Panicf("BUG: the in-memory queue must be empty when the file-based queue is non-empty; it contains %d pending bytes", n)
|
||||
}
|
||||
fq.pq.MustWriteBlock(block)
|
||||
return
|
||||
return true
|
||||
}
|
||||
if len(fq.ch) == cap(fq.ch) {
|
||||
// There is no space in the in-memory queue. Put the data to file-based queue.
|
||||
if !isPQWritesAllowed {
|
||||
return false
|
||||
}
|
||||
fq.flushInmemoryBlocksToFileLocked()
|
||||
fq.pq.MustWriteBlock(block)
|
||||
return
|
||||
return true
|
||||
}
|
||||
// There is enough space in the in-memory queue.
|
||||
bb := blockBufPool.Get()
|
||||
@@ -162,6 +201,7 @@ func (fq *FastQueue) MustWriteBlock(block []byte) {
|
||||
// Notify potentially blocked reader.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/484 for the context.
|
||||
fq.cond.Signal()
|
||||
return true
|
||||
}
|
||||
|
||||
// MustReadBlock reads the next block from fq to dst and returns it.
|
||||
|
||||
@@ -11,7 +11,7 @@ func TestFastQueueOpenClose(_ *testing.T) {
|
||||
path := "fast-queue-open-close"
|
||||
mustDeleteDir(path)
|
||||
for i := 0; i < 10; i++ {
|
||||
fq := MustOpenFastQueue(path, "foobar", 100, 0)
|
||||
fq := MustOpenFastQueue(path, "foobar", 100, 0, true)
|
||||
fq.MustClose()
|
||||
}
|
||||
mustDeleteDir(path)
|
||||
@@ -22,14 +22,16 @@ func TestFastQueueWriteReadInmemory(t *testing.T) {
|
||||
mustDeleteDir(path)
|
||||
|
||||
capacity := 100
|
||||
fq := MustOpenFastQueue(path, "foobar", capacity, 0)
|
||||
fq := MustOpenFastQueue(path, "foobar", capacity, 0, true)
|
||||
if n := fq.GetInmemoryQueueLen(); n != 0 {
|
||||
t.Fatalf("unexpected non-zero inmemory queue size: %d", n)
|
||||
}
|
||||
var blocks []string
|
||||
for i := 0; i < capacity; i++ {
|
||||
block := fmt.Sprintf("block %d", i)
|
||||
fq.MustWriteBlock([]byte(block))
|
||||
if !fq.WriteBlock([]byte(block)) {
|
||||
t.Fatalf("unexpected false for WriteBlock")
|
||||
}
|
||||
blocks = append(blocks, block)
|
||||
}
|
||||
if n := fq.GetInmemoryQueueLen(); n != capacity {
|
||||
@@ -53,14 +55,16 @@ func TestFastQueueWriteReadMixed(t *testing.T) {
|
||||
mustDeleteDir(path)
|
||||
|
||||
capacity := 100
|
||||
fq := MustOpenFastQueue(path, "foobar", capacity, 0)
|
||||
fq := MustOpenFastQueue(path, "foobar", capacity, 0, false)
|
||||
if n := fq.GetPendingBytes(); n != 0 {
|
||||
t.Fatalf("the number of pending bytes must be 0; got %d", n)
|
||||
}
|
||||
var blocks []string
|
||||
for i := 0; i < 2*capacity; i++ {
|
||||
block := fmt.Sprintf("block %d", i)
|
||||
fq.MustWriteBlock([]byte(block))
|
||||
if !fq.WriteBlock([]byte(block)) {
|
||||
t.Fatalf("not expected WriteBlock fail")
|
||||
}
|
||||
blocks = append(blocks, block)
|
||||
}
|
||||
if n := fq.GetPendingBytes(); n == 0 {
|
||||
@@ -87,17 +91,20 @@ func TestFastQueueWriteReadWithCloses(t *testing.T) {
|
||||
mustDeleteDir(path)
|
||||
|
||||
capacity := 100
|
||||
fq := MustOpenFastQueue(path, "foobar", capacity, 0)
|
||||
fq := MustOpenFastQueue(path, "foobar", capacity, 0, false)
|
||||
if n := fq.GetPendingBytes(); n != 0 {
|
||||
t.Fatalf("the number of pending bytes must be 0; got %d", n)
|
||||
}
|
||||
var blocks []string
|
||||
for i := 0; i < 2*capacity; i++ {
|
||||
block := fmt.Sprintf("block %d", i)
|
||||
fq.MustWriteBlock([]byte(block))
|
||||
if !fq.WriteBlock([]byte(block)) {
|
||||
t.Fatalf("unexpected false for WriteBlock")
|
||||
}
|
||||
|
||||
blocks = append(blocks, block)
|
||||
fq.MustClose()
|
||||
fq = MustOpenFastQueue(path, "foobar", capacity, 0)
|
||||
fq = MustOpenFastQueue(path, "foobar", capacity, 0, false)
|
||||
}
|
||||
if n := fq.GetPendingBytes(); n == 0 {
|
||||
t.Fatalf("the number of pending bytes must be greater than 0")
|
||||
@@ -111,7 +118,7 @@ func TestFastQueueWriteReadWithCloses(t *testing.T) {
|
||||
t.Fatalf("unexpected block read; got %q; want %q", buf, block)
|
||||
}
|
||||
fq.MustClose()
|
||||
fq = MustOpenFastQueue(path, "foobar", capacity, 0)
|
||||
fq = MustOpenFastQueue(path, "foobar", capacity, 0, false)
|
||||
}
|
||||
if n := fq.GetPendingBytes(); n != 0 {
|
||||
t.Fatalf("the number of pending bytes must be 0; got %d", n)
|
||||
@@ -124,7 +131,7 @@ func TestFastQueueReadUnblockByClose(t *testing.T) {
|
||||
path := "fast-queue-read-unblock-by-close"
|
||||
mustDeleteDir(path)
|
||||
|
||||
fq := MustOpenFastQueue(path, "foorbar", 123, 0)
|
||||
fq := MustOpenFastQueue(path, "foorbar", 123, 0, false)
|
||||
resultCh := make(chan error)
|
||||
go func() {
|
||||
data, ok := fq.MustReadBlock(nil)
|
||||
@@ -154,7 +161,7 @@ func TestFastQueueReadUnblockByWrite(t *testing.T) {
|
||||
path := "fast-queue-read-unblock-by-write"
|
||||
mustDeleteDir(path)
|
||||
|
||||
fq := MustOpenFastQueue(path, "foobar", 13, 0)
|
||||
fq := MustOpenFastQueue(path, "foobar", 13, 0, false)
|
||||
block := "foodsafdsaf sdf"
|
||||
resultCh := make(chan error)
|
||||
go func() {
|
||||
@@ -169,7 +176,9 @@ func TestFastQueueReadUnblockByWrite(t *testing.T) {
|
||||
}
|
||||
resultCh <- nil
|
||||
}()
|
||||
fq.MustWriteBlock([]byte(block))
|
||||
if !fq.WriteBlock([]byte(block)) {
|
||||
t.Fatalf("unexpected false for WriteBlock")
|
||||
}
|
||||
select {
|
||||
case err := <-resultCh:
|
||||
if err != nil {
|
||||
@@ -186,7 +195,7 @@ func TestFastQueueReadWriteConcurrent(t *testing.T) {
|
||||
path := "fast-queue-read-write-concurrent"
|
||||
mustDeleteDir(path)
|
||||
|
||||
fq := MustOpenFastQueue(path, "foobar", 5, 0)
|
||||
fq := MustOpenFastQueue(path, "foobar", 5, 0, false)
|
||||
|
||||
var blocks []string
|
||||
blocksMap := make(map[string]bool)
|
||||
@@ -226,7 +235,10 @@ func TestFastQueueReadWriteConcurrent(t *testing.T) {
|
||||
go func() {
|
||||
defer writersWG.Done()
|
||||
for block := range blocksCh {
|
||||
fq.MustWriteBlock([]byte(block))
|
||||
if !fq.WriteBlock([]byte(block)) {
|
||||
t.Errorf("unexpected false for WriteBlock")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -250,7 +262,7 @@ func TestFastQueueReadWriteConcurrent(t *testing.T) {
|
||||
readersWG.Wait()
|
||||
|
||||
// Collect the remaining data
|
||||
fq = MustOpenFastQueue(path, "foobar", 5, 0)
|
||||
fq = MustOpenFastQueue(path, "foobar", 5, 0, false)
|
||||
resultCh := make(chan error)
|
||||
go func() {
|
||||
for len(blocksMap) > 0 {
|
||||
@@ -278,3 +290,80 @@ func TestFastQueueReadWriteConcurrent(t *testing.T) {
|
||||
fq.MustClose()
|
||||
mustDeleteDir(path)
|
||||
}
|
||||
|
||||
func TestFastQueueWriteReadWithDisabledPQ(t *testing.T) {
|
||||
path := "fast-queue-write-read-inmemory-disabled-pq"
|
||||
mustDeleteDir(path)
|
||||
|
||||
capacity := 20
|
||||
fq := MustOpenFastQueue(path, "foobar", capacity, 0, true)
|
||||
if n := fq.GetInmemoryQueueLen(); n != 0 {
|
||||
t.Fatalf("unexpected non-zero inmemory queue size: %d", n)
|
||||
}
|
||||
var blocks []string
|
||||
for i := 0; i < capacity; i++ {
|
||||
block := fmt.Sprintf("block %d", i)
|
||||
if !fq.WriteBlock([]byte(block)) {
|
||||
t.Fatalf("unexpected false for WriteBlock")
|
||||
}
|
||||
blocks = append(blocks, block)
|
||||
}
|
||||
if fq.WriteBlock([]byte("error-block")) {
|
||||
t.Fatalf("expect false due to full queue")
|
||||
}
|
||||
|
||||
fq.MustClose()
|
||||
fq = MustOpenFastQueue(path, "foobar", capacity, 0, true)
|
||||
for _, block := range blocks {
|
||||
buf, ok := fq.MustReadBlock(nil)
|
||||
if !ok {
|
||||
t.Fatalf("unexpected ok=false")
|
||||
}
|
||||
if string(buf) != block {
|
||||
t.Fatalf("unexpected block read; got %q; want %q", buf, block)
|
||||
}
|
||||
}
|
||||
fq.MustClose()
|
||||
mustDeleteDir(path)
|
||||
}
|
||||
|
||||
func TestFastQueueWriteReadWithIgnoreDisabledPQ(t *testing.T) {
|
||||
path := "fast-queue-write-read-inmemory-disabled-pq-force-write"
|
||||
mustDeleteDir(path)
|
||||
|
||||
capacity := 20
|
||||
fq := MustOpenFastQueue(path, "foobar", capacity, 0, true)
|
||||
if n := fq.GetInmemoryQueueLen(); n != 0 {
|
||||
t.Fatalf("unexpected non-zero inmemory queue size: %d", n)
|
||||
}
|
||||
var blocks []string
|
||||
for i := 0; i < capacity; i++ {
|
||||
block := fmt.Sprintf("block %d", i)
|
||||
if !fq.WriteBlock([]byte(block)) {
|
||||
t.Fatalf("unexpected false for WriteBlock")
|
||||
}
|
||||
blocks = append(blocks, block)
|
||||
}
|
||||
if fq.WriteBlock([]byte("error-block")) {
|
||||
t.Fatalf("expect false due to full queue")
|
||||
}
|
||||
for i := 0; i < capacity; i++ {
|
||||
block := fmt.Sprintf("block %d-%d", i, i)
|
||||
fq.MustWriteBlockIgnoreDisabledPQ([]byte(block))
|
||||
blocks = append(blocks, block)
|
||||
}
|
||||
|
||||
fq.MustClose()
|
||||
fq = MustOpenFastQueue(path, "foobar", capacity, 0, true)
|
||||
for _, block := range blocks {
|
||||
buf, ok := fq.MustReadBlock(nil)
|
||||
if !ok {
|
||||
t.Fatalf("unexpected ok=false")
|
||||
}
|
||||
if string(buf) != block {
|
||||
t.Fatalf("unexpected block read; got %q; want %q", buf, block)
|
||||
}
|
||||
}
|
||||
fq.MustClose()
|
||||
mustDeleteDir(path)
|
||||
}
|
||||
|
||||
@@ -16,13 +16,13 @@ func BenchmarkFastQueueThroughputSerial(b *testing.B) {
|
||||
b.SetBytes(int64(blockSize) * iterationsCount)
|
||||
path := fmt.Sprintf("bench-fast-queue-throughput-serial-%d", blockSize)
|
||||
mustDeleteDir(path)
|
||||
fq := MustOpenFastQueue(path, "foobar", iterationsCount*2, 0)
|
||||
fq := MustOpenFastQueue(path, "foobar", iterationsCount*2, 0, false)
|
||||
defer func() {
|
||||
fq.MustClose()
|
||||
mustDeleteDir(path)
|
||||
}()
|
||||
for i := 0; i < b.N; i++ {
|
||||
writeReadIterationFastQueue(fq, block, iterationsCount)
|
||||
writeReadIterationFastQueue(b, fq, block, iterationsCount)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -37,23 +37,25 @@ func BenchmarkFastQueueThroughputConcurrent(b *testing.B) {
|
||||
b.SetBytes(int64(blockSize) * iterationsCount)
|
||||
path := fmt.Sprintf("bench-fast-queue-throughput-concurrent-%d", blockSize)
|
||||
mustDeleteDir(path)
|
||||
fq := MustOpenFastQueue(path, "foobar", iterationsCount*cgroup.AvailableCPUs()*2, 0)
|
||||
fq := MustOpenFastQueue(path, "foobar", iterationsCount*cgroup.AvailableCPUs()*2, 0, false)
|
||||
defer func() {
|
||||
fq.MustClose()
|
||||
mustDeleteDir(path)
|
||||
}()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
writeReadIterationFastQueue(fq, block, iterationsCount)
|
||||
writeReadIterationFastQueue(b, fq, block, iterationsCount)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func writeReadIterationFastQueue(fq *FastQueue, block []byte, iterationsCount int) {
|
||||
func writeReadIterationFastQueue(b *testing.B, fq *FastQueue, block []byte, iterationsCount int) {
|
||||
for i := 0; i < iterationsCount; i++ {
|
||||
fq.MustWriteBlock(block)
|
||||
if !fq.WriteBlock(block) {
|
||||
b.Fatalf("unexpected false for WriteBlock")
|
||||
}
|
||||
}
|
||||
var ok bool
|
||||
bb := bbPool.Get()
|
||||
|
||||
Reference in New Issue
Block a user