mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2026-05-17 16:59:40 +03:00
Compare commits
1 Commits
9074
...
debug/erro
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
005068ea5a |
8
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
8
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -5,10 +5,10 @@ body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Before filling a bug report it would be great to [upgrade](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-upgrade)
|
||||
Before filling a bug report it would be great to [upgrade](https://docs.victoriametrics.com/#how-to-upgrade)
|
||||
to [the latest available release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
|
||||
and verify whether the bug is reproducible there.
|
||||
It's also recommended to read the [troubleshooting docs](https://docs.victoriametrics.com/victoriametrics/troubleshooting/) first.
|
||||
It's also recommended to read the [troubleshooting docs](https://docs.victoriametrics.com/troubleshooting/) first.
|
||||
- type: textarea
|
||||
id: describe-the-bug
|
||||
attributes:
|
||||
@@ -64,8 +64,8 @@ body:
|
||||
* [Grafana dashboard for VictoriaMetrics cluster](https://grafana.com/grafana/dashboards/11176)
|
||||
|
||||
See how to setup monitoring here:
|
||||
* [monitoring for single-node VictoriaMetrics](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#monitoring)
|
||||
* [monitoring for VictoriaMetrics cluster](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/#monitoring)
|
||||
* [monitoring for single-node VictoriaMetrics](https://docs.victoriametrics.com/#monitoring)
|
||||
* [monitoring for VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/#monitoring)
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
|
||||
6
.github/ISSUE_TEMPLATE/question.yml
vendored
6
.github/ISSUE_TEMPLATE/question.yml
vendored
@@ -24,9 +24,9 @@ body:
|
||||
label: Troubleshooting docs
|
||||
description: I am familiar with the following troubleshooting docs
|
||||
options:
|
||||
- label: General - https://docs.victoriametrics.com/victoriametrics/troubleshooting/
|
||||
- label: General - https://docs.victoriametrics.com/troubleshooting/
|
||||
required: false
|
||||
- label: vmagent - https://docs.victoriametrics.com/victoriametrics/vmagent/#troubleshooting
|
||||
- label: vmagent - https://docs.victoriametrics.com/vmagent/#troubleshooting
|
||||
required: false
|
||||
- label: vmalert - https://docs.victoriametrics.com/victoriametrics/vmalert/#troubleshooting
|
||||
- label: vmalert - https://docs.victoriametrics.com/vmalert/#troubleshooting
|
||||
required: false
|
||||
|
||||
2
.github/pull_request_template.md
vendored
2
.github/pull_request_template.md
vendored
@@ -6,4 +6,4 @@ Please provide a brief description of the changes you made. Be as specific as po
|
||||
|
||||
The following checks are **mandatory**:
|
||||
|
||||
- [ ] My change adheres to [VictoriaMetrics contributing guidelines](https://docs.victoriametrics.com/victoriametrics/contributing/#pull-request-checklist).
|
||||
- [ ] My change adheres [VictoriaMetrics contributing guidelines](https://docs.victoriametrics.com/contributing/).
|
||||
|
||||
57
.github/workflows/docs.yaml
vendored
57
.github/workflows/docs.yaml
vendored
@@ -1,57 +0,0 @@
|
||||
name: publish-docs
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
paths:
|
||||
- 'docs/**'
|
||||
- '.github/workflows/docs.yaml'
|
||||
workflow_dispatch: {}
|
||||
permissions:
|
||||
contents: read # This is required for actions/checkout and to commit back image update
|
||||
deployments: write
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: __vm
|
||||
|
||||
- name: Checkout private code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: VictoriaMetrics/vmdocs
|
||||
token: ${{ secrets.VM_BOT_GH_TOKEN }}
|
||||
path: __vm-docs
|
||||
|
||||
- name: Import GPG key
|
||||
uses: crazy-max/ghaction-import-gpg@v6
|
||||
id: import-gpg
|
||||
with:
|
||||
gpg_private_key: ${{ secrets.VM_BOT_GPG_PRIVATE_KEY }}
|
||||
passphrase: ${{ secrets.VM_BOT_PASSPHRASE }}
|
||||
git_user_signingkey: true
|
||||
git_commit_gpgsign: true
|
||||
git_config_global: true
|
||||
|
||||
- name: Copy docs
|
||||
id: update
|
||||
run: |
|
||||
find docs -type d -maxdepth 1 -mindepth 1 -exec \
|
||||
sh -c 'rsync -zarvh --delete {}/ ../__vm-docs/content/$(basename {})/' \;
|
||||
echo "SHORT_SHA=$(git rev-parse --short $GITHUB_SHA)" >> $GITHUB_OUTPUT
|
||||
working-directory: __vm
|
||||
|
||||
- name: Push to vmdocs
|
||||
run: |
|
||||
git config --global user.name "${{ steps.import-gpg.outputs.email }}"
|
||||
git config --global user.email "${{ steps.import-gpg.outputs.email }}"
|
||||
if [[ -n $(git status --porcelain) ]]; then
|
||||
git add .
|
||||
git commit -S -m "sync docs with VictoriaMetrics/VictoriaMetrics commit: ${{ steps.update.outputs.SHORT_SHA }}"
|
||||
git push
|
||||
fi
|
||||
working-directory: __vm-docs
|
||||
4
.github/workflows/main.yml
vendored
4
.github/workflows/main.yml
vendored
@@ -85,12 +85,12 @@ jobs:
|
||||
restore-keys: go-artifacts-${{ runner.os }}-${{ matrix.scenario }}-
|
||||
|
||||
- name: Run tests
|
||||
run: GOGC=10 make ${{ matrix.scenario}}
|
||||
run: make ${{ matrix.scenario}}
|
||||
|
||||
- name: Publish coverage
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
files: ./coverage.txt
|
||||
file: ./coverage.txt
|
||||
|
||||
integration-test:
|
||||
name: integration-test
|
||||
|
||||
51
.github/workflows/sync-docs.yml
vendored
Normal file
51
.github/workflows/sync-docs.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
name: publish-docs
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
paths:
|
||||
- 'docs/**'
|
||||
workflow_dispatch: {}
|
||||
permissions:
|
||||
contents: read # This is required for actions/checkout and to commit back image update
|
||||
deployments: write
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: main
|
||||
- name: Checkout private code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: VictoriaMetrics/vmdocs
|
||||
token: ${{ secrets.VM_BOT_GH_TOKEN }}
|
||||
path: docs
|
||||
- name: Import GPG key
|
||||
uses: crazy-max/ghaction-import-gpg@v6
|
||||
with:
|
||||
gpg_private_key: ${{ secrets.VM_BOT_GPG_PRIVATE_KEY }}
|
||||
passphrase: ${{ secrets.VM_BOT_PASSPHRASE }}
|
||||
git_user_signingkey: true
|
||||
git_commit_gpgsign: true
|
||||
workdir: docs
|
||||
- name: Set short git commit SHA
|
||||
id: vars
|
||||
run: |
|
||||
calculatedSha=$(git rev-parse --short ${{ github.sha }})
|
||||
echo "short_sha=$calculatedSha" >> $GITHUB_OUTPUT
|
||||
working-directory: main
|
||||
- name: update code and commit
|
||||
run: |
|
||||
rm -rf content
|
||||
cp -r ../main/docs content
|
||||
make clean-after-copy
|
||||
git config --global user.name "${{ steps.import-gpg.outputs.email }}"
|
||||
git config --global user.email "${{ steps.import-gpg.outputs.email }}"
|
||||
git add .
|
||||
git commit -S -m "sync docs with VictoriaMetrics/VictoriaMetrics commit: ${{ steps.vars.outputs.short_sha }}"
|
||||
git push
|
||||
working-directory: docs
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -27,4 +27,3 @@ _site
|
||||
coverage.txt
|
||||
cspell.json
|
||||
*~
|
||||
deployment/docker/provisioning/plugins/
|
||||
|
||||
@@ -1 +1 @@
|
||||
The document has been moved [here](https://docs.victoriametrics.com/victoriametrics/contributing/).
|
||||
The document has been moved [here](https://docs.victoriametrics.com/contributing/).
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -175,7 +175,7 @@
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2019-2025 VictoriaMetrics, Inc.
|
||||
Copyright 2019-2024 VictoriaMetrics, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
54
Makefile
54
Makefile
@@ -5,6 +5,7 @@ MAKE_PARALLEL := $(MAKE) -j $(MAKE_CONCURRENCY)
|
||||
DATEINFO_TAG ?= $(shell date -u +'%Y%m%d-%H%M%S')
|
||||
BUILDINFO_TAG ?= $(shell echo $$(git describe --long --all | tr '/' '-')$$( \
|
||||
git diff-index --quiet HEAD -- || echo '-dirty-'$$(git diff-index -u HEAD | openssl sha1 | cut -d' ' -f2 | cut -c 1-8)))
|
||||
LATEST_TAG ?= latest
|
||||
|
||||
PKG_TAG ?= $(shell git tag -l --points-at HEAD)
|
||||
ifeq ($(PKG_TAG),)
|
||||
@@ -17,7 +18,7 @@ TAR_OWNERSHIP ?= --owner=1000 --group=1000
|
||||
.PHONY: $(MAKECMDGOALS)
|
||||
|
||||
include app/*/Makefile
|
||||
include codespell/Makefile
|
||||
include cspell/Makefile
|
||||
include docs/Makefile
|
||||
include deployment/*/Makefile
|
||||
include dashboards/Makefile
|
||||
@@ -195,31 +196,12 @@ vmutils-crossbuild: \
|
||||
vmutils-openbsd-amd64 \
|
||||
vmutils-windows-amd64
|
||||
|
||||
publish-latest:
|
||||
PKG_TAG=$(TAG) APP_NAME=victoria-metrics $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmagent $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmalert $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmalert-tool $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmauth $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmbackup $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmrestore $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG) APP_NAME=vmctl $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG)-cluster APP_NAME=vminsert $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG)-cluster APP_NAME=vmselect $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG)-cluster APP_NAME=vmstorage $(MAKE) publish-via-docker-latest && \
|
||||
PKG_TAG=$(TAG)-enterprise APP_NAME=vmgateway $(MAKE) publish-via-docker-latest
|
||||
PKG_TAG=$(TAG)-enterprise APP_NAME=vmbackupmanager $(MAKE) publish-via-docker-latest
|
||||
|
||||
publish-victoria-logs-latest:
|
||||
PKG_TAG=$(TAG) APP_NAME=victoria-logs $(MAKE) publish-via-docker-latest
|
||||
PKG_TAG=$(TAG) APP_NAME=vlogscli $(MAKE) publish-via-docker-latest
|
||||
|
||||
publish-release:
|
||||
rm -rf bin/*
|
||||
git checkout $(TAG) && $(MAKE) release && $(MAKE) publish && \
|
||||
git checkout $(TAG)-cluster && $(MAKE) release && $(MAKE) publish && \
|
||||
git checkout $(TAG)-enterprise && $(MAKE) release && $(MAKE) publish && \
|
||||
git checkout $(TAG)-enterprise-cluster && $(MAKE) release && $(MAKE) publish
|
||||
git checkout $(TAG) && $(MAKE) release && LATEST_TAG=stable $(MAKE) publish && \
|
||||
git checkout $(TAG)-cluster && $(MAKE) release && LATEST_TAG=cluster-stable $(MAKE) publish && \
|
||||
git checkout $(TAG)-enterprise && $(MAKE) release && LATEST_TAG=enterprise-stable $(MAKE) publish && \
|
||||
git checkout $(TAG)-enterprise-cluster && $(MAKE) release && LATEST_TAG=enterprise-cluster-stable $(MAKE) publish
|
||||
|
||||
release:
|
||||
$(MAKE_PARALLEL) \
|
||||
@@ -522,7 +504,7 @@ fmt:
|
||||
gofmt -l -w -s ./apptest
|
||||
|
||||
vet:
|
||||
GOEXPERIMENT=synctest go vet ./lib/...
|
||||
go vet ./lib/...
|
||||
go vet ./app/...
|
||||
go vet ./apptest/...
|
||||
|
||||
@@ -531,35 +513,35 @@ check-all: fmt vet golangci-lint govulncheck
|
||||
clean-checkers: remove-golangci-lint remove-govulncheck
|
||||
|
||||
test:
|
||||
GOEXPERIMENT=synctest go test ./lib/... ./app/...
|
||||
DISABLE_FSYNC_FOR_TESTING=1 go test ./lib/... ./app/...
|
||||
|
||||
test-race:
|
||||
GOEXPERIMENT=synctest go test -race ./lib/... ./app/...
|
||||
DISABLE_FSYNC_FOR_TESTING=1 go test -race ./lib/... ./app/...
|
||||
|
||||
test-pure:
|
||||
GOEXPERIMENT=synctest CGO_ENABLED=0 go test ./lib/... ./app/...
|
||||
DISABLE_FSYNC_FOR_TESTING=1 CGO_ENABLED=0 go test ./lib/... ./app/...
|
||||
|
||||
test-full:
|
||||
GOEXPERIMENT=synctest go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
DISABLE_FSYNC_FOR_TESTING=1 go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
|
||||
test-full-386:
|
||||
GOEXPERIMENT=synctest GOARCH=386 go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
DISABLE_FSYNC_FOR_TESTING=1 GOARCH=386 go test -coverprofile=coverage.txt -covermode=atomic ./lib/... ./app/...
|
||||
|
||||
integration-test: victoria-metrics vmagent vmalert vmauth vmctl
|
||||
integration-test: victoria-metrics vmagent vmalert vmauth
|
||||
go test ./apptest/... -skip="^TestCluster.*"
|
||||
|
||||
benchmark:
|
||||
GOEXPERIMENT=synctest go test -bench=. ./lib/...
|
||||
go test -bench=. ./lib/...
|
||||
go test -bench=. ./app/...
|
||||
|
||||
benchmark-pure:
|
||||
GOEXPERIMENT=synctest CGO_ENABLED=0 go test -bench=. ./lib/...
|
||||
CGO_ENABLED=0 go test -bench=. ./lib/...
|
||||
CGO_ENABLED=0 go test -bench=. ./app/...
|
||||
|
||||
vendor-update:
|
||||
go get -u ./lib/...
|
||||
go get -u ./app/...
|
||||
go mod tidy -compat=1.24
|
||||
go mod tidy -compat=1.23
|
||||
go mod vendor
|
||||
|
||||
app-local:
|
||||
@@ -582,10 +564,10 @@ install-qtc:
|
||||
|
||||
|
||||
golangci-lint: install-golangci-lint
|
||||
GOEXPERIMENT=synctest golangci-lint run
|
||||
golangci-lint run
|
||||
|
||||
install-golangci-lint:
|
||||
which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.64.7
|
||||
which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.60.3
|
||||
|
||||
remove-golangci-lint:
|
||||
rm -rf `which golangci-lint`
|
||||
|
||||
62
README.md
62
README.md
@@ -1,19 +1,17 @@
|
||||
# VictoriaMetrics
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
|
||||
[](https://hub.docker.com/r/victoriametrics/victoria-metrics)
|
||||
[](https://slack.victoriametrics.com/)
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/LICENSE)
|
||||
[](https://goreportcard.com/report/github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
[](https://github.com/VictoriaMetrics/VictoriaMetrics/actions)
|
||||
[](https://codecov.io/gh/VictoriaMetrics/VictoriaMetrics)
|
||||
|
||||
<picture>
|
||||
<source srcset="docs/victoriametrics/logo_white.webp" media="(prefers-color-scheme: dark)">
|
||||
<source srcset="docs/victoriametrics/logo.webp" media="(prefers-color-scheme: light)">
|
||||
<img src="docs/victoriametrics/logo.webp" width="300" alt="VictoriaMetrics logo">
|
||||
<source srcset="docs/logo_white.webp" media="(prefers-color-scheme: dark)">
|
||||
<source srcset="docs/logo.webp" media="(prefers-color-scheme: light)">
|
||||
<img src="docs/logo.webp" width="300" alt="VictoriaMetrics logo">
|
||||
</picture>
|
||||
|
||||
VictoriaMetrics is a fast, cost-saving, and scalable solution for monitoring and managing time series data. It delivers high performance and reliability, making it an ideal choice for businesses of all sizes.
|
||||
@@ -21,10 +19,10 @@ VictoriaMetrics is a fast, cost-saving, and scalable solution for monitoring and
|
||||
Here are some resources and information about VictoriaMetrics:
|
||||
|
||||
- Documentation: [docs.victoriametrics.com](https://docs.victoriametrics.com)
|
||||
- Case studies: [Grammarly, Roblox, Wix,...](https://docs.victoriametrics.com/victoriametrics/casestudies/).
|
||||
- Available: [Binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest), docker images [Docker Hub](https://hub.docker.com/r/victoriametrics/victoria-metrics/) and [Quay](https://quay.io/repository/victoriametrics/victoria-metrics), [Source code](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
- Deployment types: [Single-node version](https://docs.victoriametrics.com/), [Cluster version](https://docs.victoriametrics.com/victoriametrics/cluster-victoriametrics/), and [Enterprise version](https://docs.victoriametrics.com/victoriametrics/enterprise/)
|
||||
- Changelog: [CHANGELOG](https://docs.victoriametrics.com/victoriametrics/changelog/), and [How to upgrade](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-upgrade-victoriametrics)
|
||||
- Case studies: [Grammarly, Roblox, Wix,...](https://docs.victoriametrics.com/casestudies/).
|
||||
- Available: [Binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest), [Docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), [Source code](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
- Deployment types: [Single-node version](https://docs.victoriametrics.com/), [Cluster version](https://docs.victoriametrics.com/cluster-victoriametrics/), and [Enterprise version](https://docs.victoriametrics.com/enterprise/)
|
||||
- Changelog: [CHANGELOG](https://docs.victoriametrics.com/changelog/), and [How to upgrade](https://docs.victoriametrics.com/#how-to-upgrade-victoriametrics)
|
||||
- Community: [Slack](https://slack.victoriametrics.com/), [X (Twitter)](https://x.com/VictoriaMetrics), [LinkedIn](https://www.linkedin.com/company/victoriametrics/), [YouTube](https://www.youtube.com/@VictoriaMetrics)
|
||||
|
||||
Yes, we open-source both the single-node VictoriaMetrics and the cluster version.
|
||||
@@ -35,22 +33,22 @@ VictoriaMetrics is optimized for timeseries data, even when old time series are
|
||||
|
||||
* **Long-term storage for Prometheus** or as a drop-in replacement for Prometheus and Graphite in Grafana.
|
||||
* **Powerful stream aggregation**: Can be used as a StatsD alternative.
|
||||
* **Ideal for big data**: Works well with large amounts of time series data from APM, Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data and various [Enterprise workloads](https://docs.victoriametrics.com/victoriametrics/enterprise/).
|
||||
* **Ideal for big data**: Works well with large amounts of time series data from APM, Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data and various [Enterprise workloads](https://docs.victoriametrics.com/enterprise/).
|
||||
* **Query language**: Supports both PromQL and the more performant MetricsQL.
|
||||
* **Easy to setup**: No dependencies, single [small binary](https://medium.com/@valyala/stripping-dependency-bloat-in-victoriametrics-docker-image-983fb5912b0d), configuration through command-line flags, but the default is also fine-tuned; backup and restore with [instant snapshots](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
|
||||
* **Global query view**: Multiple Prometheus instances or any other data sources may ingest data into VictoriaMetrics and queried via a single query.
|
||||
* **Various Protocols**: Support metric scraping, ingestion and backfilling in various protocol.
|
||||
* [Prometheus exporters](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-scrape-prometheus-exporters-such-as-node-exporter), [Prometheus remote write API](https://docs.victoriametrics.com/victoriametrics/integrations/prometheus/), [Prometheus exposition format](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-data-in-prometheus-exposition-format).
|
||||
* [InfluxDB line protocol](https://docs.victoriametrics.com/victoriametrics/integrations/influxdb/) over HTTP, TCP and UDP.
|
||||
* [Graphite plaintext protocol](https://docs.victoriametrics.com/victoriametrics/integrations/graphite/#ingesting) with [tags](https://graphite.readthedocs.io/en/latest/tags.html#carbon).
|
||||
* [OpenTSDB put message](https://docs.victoriametrics.com/victoriametrics/integrations/opentsdb/#sending-data-via-telnet).
|
||||
* [HTTP OpenTSDB /api/put requests](https://docs.victoriametrics.com/victoriametrics/integrations/opentsdb/#sending-data-via-http).
|
||||
* [JSON line format](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-data-in-json-line-format).
|
||||
* [Arbitrary CSV data](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-csv-data).
|
||||
* [Native binary format](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#how-to-import-data-in-native-format).
|
||||
* [DataDog agent or DogStatsD](https://docs.victoriametrics.com/victoriametrics/integrations/datadog/).
|
||||
* [NewRelic infrastructure agent](https://docs.victoriametrics.com/victoriametrics/integrations/newrelic/#sending-data-from-agent).
|
||||
* [OpenTelemetry metrics format](https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#sending-data-via-opentelemetry).
|
||||
* [Prometheus exporters](https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter), [Prometheus remote write API](https://docs.victoriametrics.com/#prometheus-setup), [Prometheus exposition format](https://docs.victoriametrics.com/#how-to-import-data-in-prometheus-exposition-format).
|
||||
* [InfluxDB line protocol](https://docs.victoriametrics.com/#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) over HTTP, TCP and UDP.
|
||||
* [Graphite plaintext protocol](https://docs.victoriametrics.com/#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) with [tags](https://graphite.readthedocs.io/en/latest/tags.html#carbon).
|
||||
* [OpenTSDB put message](https://docs.victoriametrics.com/#sending-data-via-telnet-put-protocol).
|
||||
* [HTTP OpenTSDB /api/put requests](https://docs.victoriametrics.com/#sending-opentsdb-data-via-http-apiput-requests).
|
||||
* [JSON line format](https://docs.victoriametrics.com/#how-to-import-data-in-json-line-format).
|
||||
* [Arbitrary CSV data](https://docs.victoriametrics.com/#how-to-import-csv-data).
|
||||
* [Native binary format](https://docs.victoriametrics.com/#how-to-import-data-in-native-format).
|
||||
* [DataDog agent or DogStatsD](https://docs.victoriametrics.com/#how-to-send-data-from-datadog-agent).
|
||||
* [NewRelic infrastructure agent](https://docs.victoriametrics.com/#how-to-send-data-from-newrelic-agent).
|
||||
* [OpenTelemetry metrics format](https://docs.victoriametrics.com/#sending-data-via-opentelemetry).
|
||||
* **NFS-based storages**: Supports storing data on NFS-based storages such as Amazon EFS, Google Filestore.
|
||||
* And many other features such as metrics relabeling, cardinality limiter, etc.
|
||||
|
||||
@@ -62,9 +60,9 @@ In addition, the Enterprise version includes extra features:
|
||||
- **Backup automation**: Automates regular backup procedures.
|
||||
- **Multiple retentions**: Reducing storage costs by specifying different retentions for different datasets.
|
||||
- **Downsampling**: Reducing storage costs and increasing performance for queries over historical data.
|
||||
- **Stable releases** with long-term support lines ([LTS](https://docs.victoriametrics.com/victoriametrics/lts-releases/)).
|
||||
- **Stable releases** with long-term support lines ([LTS](https://docs.victoriametrics.com/lts-releases/)).
|
||||
- **Comprehensive support**: First-class consulting, feature requests and technical support provided by the core VictoriaMetrics dev team.
|
||||
- Many other features, which you can read about on [the Enterprise page](https://docs.victoriametrics.com/victoriametrics/enterprise/).
|
||||
- Many other features, which you can read about on [the Enterprise page](https://docs.victoriametrics.com/enterprise/).
|
||||
|
||||
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics. Or you can request a free trial license [here](https://victoriametrics.com/products/enterprise/trial/), downloaded Enterprise binaries are available at [Github Releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest).
|
||||
|
||||
@@ -77,7 +75,7 @@ Some good benchmarks VictoriaMetrics achieved:
|
||||
* **Minimal memory footprint**: handling millions of unique timeseries with [10x less RAM](https://medium.com/@valyala/insert-benchmarks-with-inch-influxdb-vs-victoriametrics-e31a41ae2893) than InfluxDB, up to [7x less RAM](https://valyala.medium.com/prometheus-vs-victoriametrics-benchmark-on-node-exporter-metrics-4ca29c75590f) than Prometheus, Thanos or Cortex.
|
||||
* **Highly scalable and performance** for [data ingestion](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b) and [querying](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4), [20x outperforms](https://medium.com/@valyala/insert-benchmarks-with-inch-influxdb-vs-victoriametrics-e31a41ae2893) InfluxDB and TimescaleDB.
|
||||
* **High data compression**: [70x more data points](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4) may be stored into limited storage than TimescaleDB, [7x less storage](https://valyala.medium.com/prometheus-vs-victoriametrics-benchmark-on-node-exporter-metrics-4ca29c75590f) space is required than Prometheus, Thanos or Cortex.
|
||||
* **Reducing storage costs**: [10x more effective](https://docs.victoriametrics.com/victoriametrics/casestudies/#grammarly) than Graphite according to the Grammarly case study.
|
||||
* **Reducing storage costs**: [10x more effective](https://docs.victoriametrics.com/casestudies/#grammarly) than Graphite according to the Grammarly case study.
|
||||
* **A single-node VictoriaMetrics** can replace medium-sized clusters built with competing solutions such as Thanos, M3DB, Cortex, InfluxDB or TimescaleDB. See [VictoriaMetrics vs Thanos](https://medium.com/@valyala/comparing-thanos-to-victoriametrics-cluster-b193bea1683), [Measuring vertical scalability](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae), [Remote write storage wars - PromCon 2019](https://promcon.io/2019-munich/talks/remote-write-storage-wars/).
|
||||
* **Optimized for storage**: [Works well with high-latency IO](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b) and low IOPS (HDD and network storage in AWS, Google Cloud, Microsoft Azure, etc.).
|
||||
|
||||
@@ -93,7 +91,7 @@ Feel free asking any questions regarding VictoriaMetrics:
|
||||
* [Telegram-ru](https://t.me/VictoriaMetrics_ru1)
|
||||
* [Mastodon](https://mastodon.social/@victoriametrics/)
|
||||
|
||||
If you like VictoriaMetrics and want to contribute, then please [read these docs](https://docs.victoriametrics.com/victoriametrics/contributing/).
|
||||
If you like VictoriaMetrics and want to contribute, then please [read these docs](https://docs.victoriametrics.com/contributing/).
|
||||
|
||||
## VictoriaMetrics Logo
|
||||
|
||||
|
||||
@@ -6,9 +6,9 @@ The following versions of VictoriaMetrics receive regular security fixes:
|
||||
|
||||
| Version | Supported |
|
||||
|---------|--------------------|
|
||||
| [latest release](https://docs.victoriametrics.com/victoriametrics/changelog/) | :white_check_mark: |
|
||||
| v1.102.x [LTS line](https://docs.victoriametrics.com/victoriametrics/lts-releases/) | :white_check_mark: |
|
||||
| v1.110.x [LTS line](https://docs.victoriametrics.com/victoriametrics/lts-releases/) | :white_check_mark: |
|
||||
| [latest release](https://docs.victoriametrics.com/changelog/) | :white_check_mark: |
|
||||
| v1.102.x [LTS line](https://docs.victoriametrics.com/lts-releases/) | :white_check_mark: |
|
||||
| v1.97.x [LTS line](https://docs.victoriametrics.com/lts-releases/) | :white_check_mark: |
|
||||
| other releases | :x: |
|
||||
|
||||
See [this page](https://victoriametrics.com/security/) for more details.
|
||||
|
||||
@@ -46,9 +46,7 @@ func main() {
|
||||
vlselect.Init()
|
||||
vlinsert.Init()
|
||||
|
||||
go httpserver.Serve(listenAddrs, requestHandler, httpserver.ServeOptions{
|
||||
UseProxyProtocol: useProxyProtocol,
|
||||
})
|
||||
go httpserver.Serve(listenAddrs, useProxyProtocol, requestHandler)
|
||||
logger.Infof("started VictoriaLogs in %.3f seconds; see https://docs.victoriametrics.com/victorialogs/", time.Since(startTime).Seconds())
|
||||
|
||||
pushmetrics.Init()
|
||||
|
||||
@@ -32,7 +32,7 @@ var (
|
||||
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . "+
|
||||
"With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing")
|
||||
minScrapeInterval = flag.Duration("dedup.minScrapeInterval", 0, "Leave only the last sample in every time series per each discrete interval "+
|
||||
"equal to -dedup.minScrapeInterval > 0. See also -streamAggr.dedupInterval and https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#deduplication")
|
||||
"equal to -dedup.minScrapeInterval > 0. See also -streamAggr.dedupInterval and https://docs.victoriametrics.com/#deduplication")
|
||||
dryRun = flag.Bool("dryRun", false, "Whether to check config files without running VictoriaMetrics. The following config files are checked: "+
|
||||
"-promscrape.config, -relabelConfig and -streamAggr.config. Unknown config entries aren't allowed in -promscrape.config by default. "+
|
||||
"This can be changed with -promscrape.config.strictParse=false command-line flag")
|
||||
@@ -42,10 +42,6 @@ var (
|
||||
"Smaller intervals increase disk IO load. Minimum supported value is 1s")
|
||||
maxIngestionRate = flag.Int("maxIngestionRate", 0, "The maximum number of samples vmsingle can receive per second. Data ingestion is paused when the limit is exceeded. "+
|
||||
"By default there are no limits on samples ingestion rate.")
|
||||
finalDedupScheduleInterval = flag.Duration("storage.finalDedupScheduleCheckInterval", time.Hour, "The interval for checking when final deduplication process should be started."+
|
||||
"Storage unconditionally adds 25% jitter to the interval value on each check evaluation."+
|
||||
" Changing the interval to the bigger values may delay downsampling, deduplication for historical data."+
|
||||
" See also https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#deduplication")
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -90,10 +86,6 @@ func main() {
|
||||
startTime := time.Now()
|
||||
storage.SetDedupInterval(*minScrapeInterval)
|
||||
storage.SetDataFlushInterval(*inmemoryDataFlushInterval)
|
||||
if *finalDedupScheduleInterval < time.Hour {
|
||||
logger.Fatalf("-dedup.finalDedupScheduleCheckInterval cannot be smaller than 1 hour; got %s", *finalDedupScheduleInterval)
|
||||
}
|
||||
storage.SetFinalDedupScheduleInterval(*finalDedupScheduleInterval)
|
||||
vmstorage.Init(promql.ResetRollupResultCacheIfNeeded)
|
||||
vmselect.Init()
|
||||
vminsertcommon.StartIngestionRateLimiter(*maxIngestionRate)
|
||||
@@ -101,9 +93,7 @@ func main() {
|
||||
|
||||
startSelfScraper()
|
||||
|
||||
go httpserver.Serve(listenAddrs, requestHandler, httpserver.ServeOptions{
|
||||
UseProxyProtocol: useProxyProtocol,
|
||||
})
|
||||
go httpserver.Serve(listenAddrs, useProxyProtocol, requestHandler)
|
||||
logger.Infof("started VictoriaMetrics in %.3f seconds", time.Since(startTime).Seconds())
|
||||
|
||||
pushmetrics.Init()
|
||||
|
||||
619
app/victoria-metrics/main_test.go
Normal file
619
app/victoria-metrics/main_test.go
Normal file
@@ -0,0 +1,619 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
testutil "github.com/VictoriaMetrics/VictoriaMetrics/app/victoria-metrics/test"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/promql"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
testFixturesDir = "testdata"
|
||||
testStorageSuffix = "vm-test-storage"
|
||||
testHTTPListenAddr = ":7654"
|
||||
testStatsDListenAddr = ":2003"
|
||||
testOpenTSDBListenAddr = ":4242"
|
||||
testOpenTSDBHTTPListenAddr = ":4243"
|
||||
testLogLevel = "INFO"
|
||||
)
|
||||
|
||||
const (
|
||||
testReadHTTPPath = "http://127.0.0.1" + testHTTPListenAddr
|
||||
testWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/write"
|
||||
testOpenTSDBWriteHTTPPath = "http://127.0.0.1" + testOpenTSDBHTTPListenAddr + "/api/put"
|
||||
testPromWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/api/v1/write"
|
||||
testImportCSVWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/api/v1/import/csv"
|
||||
|
||||
testHealthHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/health"
|
||||
)
|
||||
|
||||
const (
|
||||
testStorageInitTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
storagePath string
|
||||
insertionTime = time.Now().UTC()
|
||||
)
|
||||
|
||||
type test struct {
|
||||
Name string `json:"name"`
|
||||
Data []string `json:"data"`
|
||||
InsertQuery string `json:"insert_query"`
|
||||
Query []string `json:"query"`
|
||||
ResultMetrics []Metric `json:"result_metrics"`
|
||||
ResultSeries Series `json:"result_series"`
|
||||
ResultQuery Query `json:"result_query"`
|
||||
Issue string `json:"issue"`
|
||||
ExpectedResultLinesCount int `json:"expected_result_lines_count"`
|
||||
}
|
||||
|
||||
type Metric struct {
|
||||
Metric map[string]string `json:"metric"`
|
||||
Values []float64 `json:"values"`
|
||||
Timestamps []int64 `json:"timestamps"`
|
||||
}
|
||||
|
||||
func (r *Metric) UnmarshalJSON(b []byte) error {
|
||||
type plain Metric
|
||||
return json.Unmarshal(testutil.PopulateTimeTpl(b, insertionTime), (*plain)(r))
|
||||
}
|
||||
|
||||
type Series struct {
|
||||
Status string `json:"status"`
|
||||
Data []map[string]string `json:"data"`
|
||||
}
|
||||
|
||||
type Query struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
ResultType string `json:"resultType"`
|
||||
Result json.RawMessage `json:"result"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
const rtVector, rtMatrix = "vector", "matrix"
|
||||
|
||||
func (q *Query) metrics() ([]Metric, error) {
|
||||
switch q.Data.ResultType {
|
||||
case rtVector:
|
||||
var r QueryInstant
|
||||
if err := json.Unmarshal(q.Data.Result, &r.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r.metrics()
|
||||
case rtMatrix:
|
||||
var r QueryRange
|
||||
if err := json.Unmarshal(q.Data.Result, &r.Result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r.metrics()
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown result type %q", q.Data.ResultType)
|
||||
}
|
||||
}
|
||||
|
||||
type QueryInstant struct {
|
||||
Result []struct {
|
||||
Labels map[string]string `json:"metric"`
|
||||
TV [2]any `json:"value"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
func (q QueryInstant) metrics() ([]Metric, error) {
|
||||
result := make([]Metric, len(q.Result))
|
||||
for i, res := range q.Result {
|
||||
f, err := strconv.ParseFloat(res.TV[1].(string), 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("metric %v, unable to parse float64 from %s: %w", res, res.TV[1], err)
|
||||
}
|
||||
var m Metric
|
||||
m.Metric = res.Labels
|
||||
m.Timestamps = append(m.Timestamps, int64(res.TV[0].(float64)))
|
||||
m.Values = append(m.Values, f)
|
||||
result[i] = m
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type QueryRange struct {
|
||||
Result []struct {
|
||||
Metric map[string]string `json:"metric"`
|
||||
Values [][]any `json:"values"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
func (q QueryRange) metrics() ([]Metric, error) {
|
||||
var result []Metric
|
||||
for i, res := range q.Result {
|
||||
var m Metric
|
||||
for _, tv := range res.Values {
|
||||
f, err := strconv.ParseFloat(tv[1].(string), 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("metric %v, unable to parse float64 from %s: %w", res, tv[1], err)
|
||||
}
|
||||
m.Values = append(m.Values, f)
|
||||
m.Timestamps = append(m.Timestamps, int64(tv[0].(float64)))
|
||||
}
|
||||
if len(m.Values) < 1 || len(m.Timestamps) < 1 {
|
||||
return nil, fmt.Errorf("metric %v contains no values", res)
|
||||
}
|
||||
m.Metric = q.Result[i].Metric
|
||||
result = append(result, m)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (q *Query) UnmarshalJSON(b []byte) error {
|
||||
type plain Query
|
||||
return json.Unmarshal(testutil.PopulateTimeTpl(b, insertionTime), (*plain)(q))
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
setUp()
|
||||
code := m.Run()
|
||||
tearDown()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func setUp() {
|
||||
storagePath = filepath.Join(os.TempDir(), testStorageSuffix)
|
||||
processFlags()
|
||||
logger.Init()
|
||||
vmstorage.Init(promql.ResetRollupResultCacheIfNeeded)
|
||||
vmselect.Init()
|
||||
vminsert.Init()
|
||||
go httpserver.Serve(*httpListenAddrs, useProxyProtocol, requestHandler)
|
||||
readyStorageCheckFunc := func() bool {
|
||||
resp, err := http.Get(testHealthHTTPPath)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
_ = resp.Body.Close()
|
||||
return resp.StatusCode == 200
|
||||
}
|
||||
if err := waitFor(testStorageInitTimeout, readyStorageCheckFunc); err != nil {
|
||||
log.Fatalf("http server can't start for %s seconds, err %s", testStorageInitTimeout, err)
|
||||
}
|
||||
}
|
||||
|
||||
func processFlags() {
|
||||
flag.Parse()
|
||||
for _, fv := range []struct {
|
||||
flag string
|
||||
value string
|
||||
}{
|
||||
{flag: "storageDataPath", value: storagePath},
|
||||
{flag: "httpListenAddr", value: testHTTPListenAddr},
|
||||
{flag: "graphiteListenAddr", value: testStatsDListenAddr},
|
||||
{flag: "opentsdbListenAddr", value: testOpenTSDBListenAddr},
|
||||
{flag: "loggerLevel", value: testLogLevel},
|
||||
{flag: "opentsdbHTTPListenAddr", value: testOpenTSDBHTTPListenAddr},
|
||||
} {
|
||||
// panics if flag doesn't exist
|
||||
if err := flag.Lookup(fv.flag).Value.Set(fv.value); err != nil {
|
||||
log.Fatalf("unable to set %q with value %q, err: %v", fv.flag, fv.value, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func waitFor(timeout time.Duration, f func() bool) error {
|
||||
fraction := timeout / 10
|
||||
for i := fraction; i < timeout; i += fraction {
|
||||
if f() {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(fraction)
|
||||
}
|
||||
return fmt.Errorf("timeout")
|
||||
}
|
||||
|
||||
func tearDown() {
|
||||
if err := httpserver.Stop(*httpListenAddrs); err != nil {
|
||||
log.Printf("cannot stop the webservice: %s", err)
|
||||
}
|
||||
vminsert.Stop()
|
||||
vmstorage.Stop()
|
||||
vmselect.Stop()
|
||||
fs.MustRemoveAll(storagePath)
|
||||
}
|
||||
|
||||
func TestWriteRead(t *testing.T) {
|
||||
t.Run("write", testWrite)
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
vmstorage.Storage.DebugFlush()
|
||||
time.Sleep(1500 * time.Millisecond)
|
||||
t.Run("read", testRead)
|
||||
}
|
||||
|
||||
func testWrite(t *testing.T) {
|
||||
t.Run("prometheus", func(t *testing.T) {
|
||||
for _, test := range readIn("prometheus", insertionTime) {
|
||||
if test.Data == nil {
|
||||
continue
|
||||
}
|
||||
r := testutil.WriteRequest{}
|
||||
testData := strings.Join(test.Data, "\n")
|
||||
if err := json.Unmarshal([]byte(testData), &r.Timeseries); err != nil {
|
||||
panic(fmt.Errorf("BUG: cannot unmarshal TimeSeries: %s\ntest data\n%s", err, testData))
|
||||
}
|
||||
if n := len(r.Timeseries); n <= 0 {
|
||||
panic(fmt.Errorf("BUG: expecting non-empty Timeseries in test data:\n%s", testData))
|
||||
}
|
||||
data := testutil.Compress(r)
|
||||
httpWrite(t, testPromWriteHTTPPath, test.InsertQuery, bytes.NewBuffer(data))
|
||||
}
|
||||
})
|
||||
t.Run("csv", func(t *testing.T) {
|
||||
for _, test := range readIn("csv", insertionTime) {
|
||||
if test.Data == nil {
|
||||
continue
|
||||
}
|
||||
httpWrite(t, testImportCSVWriteHTTPPath, test.InsertQuery, bytes.NewBuffer([]byte(strings.Join(test.Data, "\n"))))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("influxdb", func(t *testing.T) {
|
||||
for _, x := range readIn("influxdb", insertionTime) {
|
||||
test := x
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
httpWrite(t, testWriteHTTPPath, test.InsertQuery, bytes.NewBufferString(strings.Join(test.Data, "\n")))
|
||||
})
|
||||
}
|
||||
})
|
||||
t.Run("graphite", func(t *testing.T) {
|
||||
for _, x := range readIn("graphite", insertionTime) {
|
||||
test := x
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
tcpWrite(t, "127.0.0.1"+testStatsDListenAddr, strings.Join(test.Data, "\n"))
|
||||
})
|
||||
}
|
||||
})
|
||||
t.Run("opentsdb", func(t *testing.T) {
|
||||
for _, x := range readIn("opentsdb", insertionTime) {
|
||||
test := x
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
tcpWrite(t, "127.0.0.1"+testOpenTSDBListenAddr, strings.Join(test.Data, "\n"))
|
||||
})
|
||||
}
|
||||
})
|
||||
t.Run("opentsdbhttp", func(t *testing.T) {
|
||||
for _, x := range readIn("opentsdbhttp", insertionTime) {
|
||||
test := x
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
logger.Infof("writing %s", test.Data)
|
||||
httpWrite(t, testOpenTSDBWriteHTTPPath, test.InsertQuery, bytes.NewBufferString(strings.Join(test.Data, "\n")))
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func testRead(t *testing.T) {
|
||||
for _, engine := range []string{"csv", "prometheus", "graphite", "opentsdb", "influxdb", "opentsdbhttp"} {
|
||||
t.Run(engine, func(t *testing.T) {
|
||||
for _, x := range readIn(engine, insertionTime) {
|
||||
test := x
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
for _, q := range test.Query {
|
||||
q = testutil.PopulateTimeTplString(q, insertionTime)
|
||||
if test.Issue != "" {
|
||||
test.Issue = "\nRegression in " + test.Issue
|
||||
}
|
||||
switch {
|
||||
case strings.HasPrefix(q, "/api/v1/export/csv"):
|
||||
data := strings.Split(string(httpReadData(t, testReadHTTPPath, q)), "\n")
|
||||
if len(data) == test.ExpectedResultLinesCount {
|
||||
t.Fatalf("not expected number of csv lines want=%d\ngot=%d test=%s.%s\n\response=%q", len(data), test.ExpectedResultLinesCount, q, test.Issue, strings.Join(data, "\n"))
|
||||
}
|
||||
case strings.HasPrefix(q, "/api/v1/export"):
|
||||
if err := checkMetricsResult(httpReadMetrics(t, testReadHTTPPath, q), test.ResultMetrics); err != nil {
|
||||
t.Fatalf("Export. %s fails with error %s.%s", q, err, test.Issue)
|
||||
}
|
||||
case strings.HasPrefix(q, "/api/v1/series"):
|
||||
s := Series{}
|
||||
httpReadStruct(t, testReadHTTPPath, q, &s)
|
||||
if err := checkSeriesResult(s, test.ResultSeries); err != nil {
|
||||
t.Fatalf("Series. %s fails with error %s.%s", q, err, test.Issue)
|
||||
}
|
||||
case strings.HasPrefix(q, "/api/v1/query"):
|
||||
queryResult := Query{}
|
||||
httpReadStruct(t, testReadHTTPPath, q, &queryResult)
|
||||
gotMetrics, err := queryResult.metrics()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse query response: %s", err)
|
||||
}
|
||||
expMetrics, err := test.ResultQuery.metrics()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse expected response: %s", err)
|
||||
}
|
||||
if err := checkMetricsResult(gotMetrics, expMetrics); err != nil {
|
||||
t.Fatalf("%q fails with error %s.%s", q, err, test.Issue)
|
||||
}
|
||||
default:
|
||||
t.Fatalf("unsupported read query %s", q)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func readIn(readFor string, insertTime time.Time) []test {
|
||||
testDir := filepath.Join(testFixturesDir, readFor)
|
||||
var tt []test
|
||||
err := filepath.Walk(testDir, func(path string, _ os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if filepath.Ext(path) != ".json" {
|
||||
return nil
|
||||
}
|
||||
b, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("BUG: cannot read %s: %s", path, err))
|
||||
}
|
||||
item := test{}
|
||||
if err := json.Unmarshal(b, &item); err != nil {
|
||||
panic(fmt.Errorf("cannot parse %T from %s: %s; data:\n%s", &item, path, err, b))
|
||||
}
|
||||
for i := range item.Data {
|
||||
item.Data[i] = testutil.PopulateTimeTplString(item.Data[i], insertTime)
|
||||
}
|
||||
tt = append(tt, item)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("BUG: cannot read test data at %s: %w", testDir, err))
|
||||
}
|
||||
if len(tt) == 0 {
|
||||
panic(fmt.Errorf("BUG: no tests found in %s", testDir))
|
||||
}
|
||||
return tt
|
||||
}
|
||||
|
||||
func httpWrite(t *testing.T, address, query string, r io.Reader) {
|
||||
t.Helper()
|
||||
|
||||
requestURL := address + query
|
||||
resp, err := http.Post(requestURL, "", r)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot send request to %s: %s", requestURL, err)
|
||||
}
|
||||
_ = resp.Body.Close()
|
||||
if resp.StatusCode != 204 {
|
||||
t.Fatalf("unexpected status code received from %s; got %d; want 204", requestURL, resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
func tcpWrite(t *testing.T, address, data string) {
|
||||
t.Helper()
|
||||
|
||||
conn, err := net.Dial("tcp", address)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot dial %s: %s", address, err)
|
||||
}
|
||||
defer func() {
|
||||
_ = conn.Close()
|
||||
}()
|
||||
n, err := conn.Write([]byte(data))
|
||||
if err != nil {
|
||||
t.Fatalf("cannot write %d bytes to %s: %s", len(data), address, err)
|
||||
}
|
||||
if n != len(data) {
|
||||
panic(fmt.Errorf("BUG: conn.Write() returned unexpected number of written bytes to %s; got %d; want %d", address, n, len(data)))
|
||||
}
|
||||
}
|
||||
|
||||
func httpReadMetrics(t *testing.T, address, query string) []Metric {
|
||||
t.Helper()
|
||||
|
||||
requestURL := address + query
|
||||
resp, err := http.Get(requestURL)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot send request to %s: %s", requestURL, err)
|
||||
}
|
||||
defer func() {
|
||||
_ = resp.Body.Close()
|
||||
}()
|
||||
if resp.StatusCode != 200 {
|
||||
t.Fatalf("unexpected status code received from %s; got %d; want 200", requestURL, resp.StatusCode)
|
||||
}
|
||||
|
||||
var rows []Metric
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
for {
|
||||
var row Metric
|
||||
err := dec.Decode(&row)
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
return rows
|
||||
}
|
||||
t.Fatalf("cannot decode %T from response received from %s: %s", &row, requestURL, err)
|
||||
}
|
||||
rows = append(rows, row)
|
||||
}
|
||||
}
|
||||
|
||||
func httpReadStruct(t *testing.T, address, query string, dst any) {
|
||||
t.Helper()
|
||||
|
||||
requestURL := address + query
|
||||
resp, err := http.Get(requestURL)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot send request to %s: %s", requestURL, err)
|
||||
}
|
||||
defer func() {
|
||||
_ = resp.Body.Close()
|
||||
}()
|
||||
if resp.StatusCode != 200 {
|
||||
t.Fatalf("unexpected status code received from %s; got %d; want 200", requestURL, resp.StatusCode)
|
||||
}
|
||||
err = json.NewDecoder(resp.Body).Decode(dst)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot decode %T from response received from %s: %s", dst, requestURL, err)
|
||||
}
|
||||
}
|
||||
|
||||
func httpReadData(t *testing.T, address, query string) []byte {
|
||||
t.Helper()
|
||||
|
||||
requestURL := address + query
|
||||
resp, err := http.Get(requestURL)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot send request to %s: %s", requestURL, err)
|
||||
}
|
||||
defer func() {
|
||||
_ = resp.Body.Close()
|
||||
}()
|
||||
if resp.StatusCode != 200 {
|
||||
t.Fatalf("unexpected status code received from %s; got %d; want 200", requestURL, resp.StatusCode)
|
||||
}
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot read response from %s: %s", requestURL, err)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func checkMetricsResult(got, want []Metric) error {
|
||||
for _, r := range append([]Metric(nil), got...) {
|
||||
want = removeIfFoundMetrics(r, want)
|
||||
}
|
||||
if len(want) > 0 {
|
||||
return fmt.Errorf("expected metrics %+v not found in %+v", want, got)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeIfFoundMetrics(r Metric, contains []Metric) []Metric {
|
||||
for i, item := range contains {
|
||||
if reflect.DeepEqual(r.Metric, item.Metric) && reflect.DeepEqual(r.Values, item.Values) &&
|
||||
reflect.DeepEqual(r.Timestamps, item.Timestamps) {
|
||||
contains[i] = contains[len(contains)-1]
|
||||
return contains[:len(contains)-1]
|
||||
}
|
||||
}
|
||||
return contains
|
||||
}
|
||||
|
||||
func checkSeriesResult(got, want Series) error {
|
||||
if got.Status != want.Status {
|
||||
return fmt.Errorf("status mismatch %q - %q", want.Status, got.Status)
|
||||
}
|
||||
wantData := append([]map[string]string(nil), want.Data...)
|
||||
for _, r := range got.Data {
|
||||
wantData = removeIfFoundSeries(r, wantData)
|
||||
}
|
||||
if len(wantData) > 0 {
|
||||
return fmt.Errorf("expected seria(s) %+v not found in %+v", wantData, got.Data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeIfFoundSeries(r map[string]string, contains []map[string]string) []map[string]string {
|
||||
for i, item := range contains {
|
||||
if reflect.DeepEqual(r, item) {
|
||||
contains[i] = contains[len(contains)-1]
|
||||
return contains[:len(contains)-1]
|
||||
}
|
||||
}
|
||||
return contains
|
||||
}
|
||||
|
||||
func TestImportJSONLines(t *testing.T) {
|
||||
f := func(labelsCount, labelLen int) {
|
||||
t.Helper()
|
||||
|
||||
reqURL := fmt.Sprintf("http://localhost%s/api/v1/import", testHTTPListenAddr)
|
||||
line := generateJSONLine(labelsCount, labelLen)
|
||||
req, err := http.NewRequest("POST", reqURL, bytes.NewBufferString(line))
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create request: %s", err)
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot perform request for labelsCount=%d, labelLen=%d: %s", labelsCount, labelLen, err)
|
||||
}
|
||||
if resp.StatusCode != 204 {
|
||||
t.Fatalf("unexpected statusCode for labelsCount=%d, labelLen=%d; got %d; want 204", labelsCount, labelLen, resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
// labels with various lengths
|
||||
for i := 0; i < 500; i++ {
|
||||
f(10, i*5)
|
||||
}
|
||||
|
||||
// Too many labels
|
||||
f(1000, 100)
|
||||
|
||||
// Too long labels
|
||||
f(1, 100_000)
|
||||
f(10, 100_000)
|
||||
f(10, 10_000)
|
||||
}
|
||||
|
||||
func generateJSONLine(labelsCount, labelLen int) string {
|
||||
m := make(map[string]string, labelsCount)
|
||||
m["__name__"] = generateSizedRandomString(labelLen)
|
||||
for j := 1; j < labelsCount; j++ {
|
||||
labelName := generateSizedRandomString(labelLen)
|
||||
labelValue := generateSizedRandomString(labelLen)
|
||||
m[labelName] = labelValue
|
||||
}
|
||||
|
||||
type jsonLine struct {
|
||||
Metric map[string]string `json:"metric"`
|
||||
Values []float64 `json:"values"`
|
||||
Timestamps []int64 `json:"timestamps"`
|
||||
}
|
||||
line := &jsonLine{
|
||||
Metric: m,
|
||||
Values: []float64{1.34},
|
||||
Timestamps: []int64{time.Now().UnixNano() / 1e6},
|
||||
}
|
||||
data, err := json.Marshal(&line)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot marshal JSON: %w", err))
|
||||
}
|
||||
data = append(data, '\n')
|
||||
return string(data)
|
||||
}
|
||||
|
||||
const alphabetSample = `qwertyuiopasdfghjklzxcvbnm`
|
||||
|
||||
func generateSizedRandomString(size int) string {
|
||||
dst := make([]byte, size)
|
||||
for i := range dst {
|
||||
dst[i] = alphabetSample[rand.Intn(len(alphabetSample))]
|
||||
}
|
||||
return string(dst)
|
||||
}
|
||||
@@ -9,5 +9,4 @@ COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certifica
|
||||
EXPOSE 8428
|
||||
ENTRYPOINT ["/victoria-metrics-prod"]
|
||||
ARG TARGETARCH
|
||||
ARG BINARY_SUFFIX=non-existing
|
||||
COPY victoria-metrics-linux-${TARGETARCH}-prod${BINARY_SUFFIX} ./victoria-metrics-prod
|
||||
COPY victoria-metrics-linux-${TARGETARCH}-prod ./victoria-metrics-prod
|
||||
|
||||
14
app/victoria-metrics/testdata/csv/basic.json
vendored
Normal file
14
app/victoria-metrics/testdata/csv/basic.json
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"name": "csv export",
|
||||
"data": [
|
||||
"rfc3339,4,{TIME_MS}",
|
||||
"rfc3339milli,6,{TIME_MS}",
|
||||
"ts,8,{TIME_MS}",
|
||||
"tsms,10,{TIME_MS},"
|
||||
],
|
||||
"insert_query": "?format=1:label:tfmt,2:metric:test_csv,3:time:unix_ms",
|
||||
"query": [
|
||||
"/api/v1/export/csv?format=__name__,tfmt,__value__,__timestamp__:rfc3339&match[]={__name__=\"test_csv\"}&step=30s&start={TIME_MS-180s}"
|
||||
],
|
||||
"expected_result_lines_count": 4
|
||||
}
|
||||
14
app/victoria-metrics/testdata/csv/with_extra_labels.json
vendored
Normal file
14
app/victoria-metrics/testdata/csv/with_extra_labels.json
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"name": "csv export with extra_labels",
|
||||
"data": [
|
||||
"location-1,4,{TIME_MS}",
|
||||
"location-2,6,{TIME_MS}",
|
||||
"location-3,8,{TIME_MS}",
|
||||
"location-4,10,{TIME_MS},"
|
||||
],
|
||||
"insert_query": "?format=1:label:location,2:metric:test_csv_labels,3:time:unix_ms&extra_label=location=location-1",
|
||||
"query": [
|
||||
"/api/v1/export/csv?format=__name__,location,__value__,__timestamp__:unix_ms&match[]={__name__=\"test_csv\"}&step=30s&start={TIME_MS-180s}"
|
||||
],
|
||||
"expected_result_lines_count": 4
|
||||
}
|
||||
8
app/victoria-metrics/testdata/graphite/basic.json
vendored
Normal file
8
app/victoria-metrics/testdata/graphite/basic.json
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "basic_insertion",
|
||||
"data": ["graphite.foo.bar.baz;tag1=value1;tag2=value2 123 {TIME_S}"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"graphite.foo.bar.baz","tag1":"value1","tag2":"value2"},"values":[123], "timestamps": ["{TIME_MSZ}"]}
|
||||
]
|
||||
}
|
||||
16
app/victoria-metrics/testdata/graphite/comparison-not-inf-not-nan.json
vendored
Normal file
16
app/victoria-metrics/testdata/graphite/comparison-not-inf-not-nan.json
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"name": "comparison-not-inf-not-nan",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/150",
|
||||
"data": [
|
||||
"not_nan_not_inf;item=x 1 {TIME_S-1m}",
|
||||
"not_nan_not_inf;item=x 1 {TIME_S-2m}",
|
||||
"not_nan_not_inf;item=y 3 {TIME_S-1m}",
|
||||
"not_nan_not_inf;item=y 1 {TIME_S-2m}"],
|
||||
"query": ["/api/v1/query_range?query=1/(not_nan_not_inf-1)!=inf!=nan&start={TIME_S-3m}&end={TIME_S}&step=60"],
|
||||
"result_query": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"matrix",
|
||||
"result":[
|
||||
{"metric":{"item":"y"},"values":[["{TIME_S-1m}","0.5"], ["{TIME_S}","0.5"]]}
|
||||
]}}
|
||||
}
|
||||
16
app/victoria-metrics/testdata/graphite/empty-label-match.json
vendored
Normal file
16
app/victoria-metrics/testdata/graphite/empty-label-match.json
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"name": "empty-label-match",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/395",
|
||||
"data": [
|
||||
"empty_label_match 1 {TIME_S-1m}",
|
||||
"empty_label_match;foo=bar 2 {TIME_S-1m}",
|
||||
"empty_label_match;foo=baz 3 {TIME_S-1m}"],
|
||||
"query": ["/api/v1/query_range?query=empty_label_match{foo=~'bar|'}&start={TIME_S-1m}&end={TIME_S}&step=60"],
|
||||
"result_query": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"matrix",
|
||||
"result":[
|
||||
{"metric":{"__name__":"empty_label_match"},"values":[["{TIME_S-1m}","1"],["{TIME_S}","1"]]},
|
||||
{"metric":{"__name__":"empty_label_match","foo":"bar"},"values":[["{TIME_S-1m}","2"],["{TIME_S}","2"]]}
|
||||
]}}
|
||||
}
|
||||
17
app/victoria-metrics/testdata/graphite/graphite-selector.json
vendored
Normal file
17
app/victoria-metrics/testdata/graphite/graphite-selector.json
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"name": "graphite-selector",
|
||||
"issue": "",
|
||||
"data": [
|
||||
"graphite-selector.bar.baz 1 {TIME_S-1m}",
|
||||
"graphite-selector.xxx.yy 2 {TIME_S-1m}",
|
||||
"graphite-selector.bb.cc 3 {TIME_S-1m}",
|
||||
"graphite-selector.a.baz 4 {TIME_S-1m}"],
|
||||
"query": ["/api/v1/query?query=sort({__graphite__='graphite-selector.*.baz'})&time={TIME_S-1m}"],
|
||||
"result_query": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"vector","result":[
|
||||
{"metric":{"__name__":"graphite-selector.bar.baz"},"value":["{TIME_S-1m}","1"]},
|
||||
{"metric":{"__name__":"graphite-selector.a.baz"},"value":["{TIME_S-1m}","4"]}
|
||||
]}
|
||||
}
|
||||
}
|
||||
23
app/victoria-metrics/testdata/graphite/max_lookback_set.json
vendored
Normal file
23
app/victoria-metrics/testdata/graphite/max_lookback_set.json
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
{
|
||||
"name": "max_lookback_set",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/209",
|
||||
"data": [
|
||||
"max_lookback_set 1 {TIME_S-30s}",
|
||||
"max_lookback_set 2 {TIME_S-60s}",
|
||||
"max_lookback_set 3 {TIME_S-120s}",
|
||||
"max_lookback_set 4 {TIME_S-150s}"
|
||||
],
|
||||
"query": ["/api/v1/query_range?query=max_lookback_set&start={TIME_S-150s}&end={TIME_S}&step=10s&max_lookback=1s"],
|
||||
"result_query": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"matrix",
|
||||
"result":[{"metric":{"__name__":"max_lookback_set"},"values":[
|
||||
["{TIME_S-150s}","4"],
|
||||
["{TIME_S-120s}","3"],
|
||||
["{TIME_S-60s}","2"],
|
||||
["{TIME_S-30s}","1"],
|
||||
["{TIME_S-20s}","1"],
|
||||
["{TIME_S-10s}","1"],
|
||||
["{TIME_S-0s}","1"]
|
||||
]}]}}
|
||||
}
|
||||
31
app/victoria-metrics/testdata/graphite/max_lookback_unset.json
vendored
Normal file
31
app/victoria-metrics/testdata/graphite/max_lookback_unset.json
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
{
|
||||
"name": "max_lookback_unset",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/209",
|
||||
"data": [
|
||||
"max_lookback_unset 1 {TIME_S-30s}",
|
||||
"max_lookback_unset 2 {TIME_S-60s}",
|
||||
"max_lookback_unset 3 {TIME_S-120s}",
|
||||
"max_lookback_unset 4 {TIME_S-150s}"
|
||||
],
|
||||
"query": ["/api/v1/query_range?query=max_lookback_unset&start={TIME_S-150s}&end={TIME_S}&step=10s"],
|
||||
"result_query": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"matrix",
|
||||
"result":[{"metric":{"__name__":"max_lookback_unset"},"values":[
|
||||
["{TIME_S-150s}","4"],
|
||||
["{TIME_S-140s}","4"],
|
||||
["{TIME_S-130s}","4"],
|
||||
["{TIME_S-120s}","3"],
|
||||
["{TIME_S-110s}","3"],
|
||||
["{TIME_S-100s}","3"],
|
||||
["{TIME_S-90s}","3"],
|
||||
["{TIME_S-80s}","3"],
|
||||
["{TIME_S-60s}","2"],
|
||||
["{TIME_S-50s}","2"],
|
||||
["{TIME_S-40s}","2"],
|
||||
["{TIME_S-30s}","1"],
|
||||
["{TIME_S-20s}","1"],
|
||||
["{TIME_S-10s}","1"],
|
||||
["{TIME_S-0s}","1"]
|
||||
]}]}}
|
||||
}
|
||||
16
app/victoria-metrics/testdata/graphite/name-plus-negative-filter.json
vendored
Normal file
16
app/victoria-metrics/testdata/graphite/name-plus-negative-filter.json
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"name": "name-plus-negative-filter",
|
||||
"issue": "",
|
||||
"data": [
|
||||
"name-plus-negative-filter;foo=123 1 {TIME_S-1m}",
|
||||
"name-plus-negative-filter;bar=123 2 {TIME_S-1m}",
|
||||
"name-plus-negative-filter;foo=qwe 3 {TIME_S-1m}"
|
||||
],
|
||||
"query": ["/api/v1/query?query={__name__='name-plus-negative-filter',foo!='123'}&time={TIME_S-1m}"],
|
||||
"result_query": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"vector","result":[
|
||||
{"metric":{"__name__":"name-plus-negative-filter","foo":"qwe"},"value":["{TIME_S-1m}","3"]}
|
||||
]}
|
||||
}
|
||||
}
|
||||
18
app/victoria-metrics/testdata/graphite/not-nan-as-missing-data.json
vendored
Normal file
18
app/victoria-metrics/testdata/graphite/not-nan-as-missing-data.json
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"name": "not-nan-as-missing-data",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/153",
|
||||
"data": [
|
||||
"not_nan_as_missing_data;item=x 2 {TIME_S-2m}",
|
||||
"not_nan_as_missing_data;item=x 1 {TIME_S-1m}",
|
||||
"not_nan_as_missing_data;item=y 4 {TIME_S-2m}",
|
||||
"not_nan_as_missing_data;item=y 3 {TIME_S-1m}"
|
||||
],
|
||||
"query": ["/api/v1/query_range?query=not_nan_as_missing_data>1&start={TIME_S-2m}&end={TIME_S}&step=60"],
|
||||
"result_query": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"matrix",
|
||||
"result":[
|
||||
{"metric":{"__name__":"not_nan_as_missing_data","item":"x"},"values":[["{TIME_S-2m}","2"]]},
|
||||
{"metric":{"__name__":"not_nan_as_missing_data","item":"y"},"values":[["{TIME_S-2m}","4"],["{TIME_S-1m}","3"],["{TIME_S}", "3"]]}
|
||||
]}}
|
||||
}
|
||||
14
app/victoria-metrics/testdata/graphite/subquery-aggregation.json
vendored
Normal file
14
app/victoria-metrics/testdata/graphite/subquery-aggregation.json
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"name": "subquery-aggregation",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/184",
|
||||
"data": [
|
||||
"forms_daily_count;item=x 1 {TIME_S-59s}",
|
||||
"forms_daily_count;item=x 2 {TIME_S-1m59s}",
|
||||
"forms_daily_count;item=y 3 {TIME_S-59s}",
|
||||
"forms_daily_count;item=y 4 {TIME_S-1m59s}"],
|
||||
"query": ["/api/v1/query?query=min%20by%20(item)%20(min_over_time(forms_daily_count[10m:1m]))&time={TIME_S-1m}&latency_offset=1ms"],
|
||||
"result_query": {
|
||||
"status":"success",
|
||||
"data":{"resultType":"vector","result":[{"metric":{"item":"x"},"value":["{TIME_S-1m}","2"]},{"metric":{"item":"y"},"value":["{TIME_S-1m}","4"]}]}
|
||||
}
|
||||
}
|
||||
9
app/victoria-metrics/testdata/influxdb/basic.json
vendored
Normal file
9
app/victoria-metrics/testdata/influxdb/basic.json
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "basic_insertion",
|
||||
"data": ["measurement,tag1=value1,tag2=value2 field1=1.23,field2=123 {TIME_NS}"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"measurement_field2","tag1":"value1","tag2":"value2"},"values":[123], "timestamps": ["{TIME_MS}"]},
|
||||
{"metric":{"__name__":"measurement_field1","tag1":"value1","tag2":"value2"},"values":[1.23], "timestamps": ["{TIME_MS}"]}
|
||||
]
|
||||
}
|
||||
10
app/victoria-metrics/testdata/influxdb/with_extra_labels.json
vendored
Normal file
10
app/victoria-metrics/testdata/influxdb/with_extra_labels.json
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"name": "insert_with_extra_labels",
|
||||
"data": ["measurement,tag1=value1,tag2=value2 field6=1.23,field5=123 {TIME_NS}"],
|
||||
"insert_query": "?extra_label=job=test&extra_label=tag2=value10",
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"measurement_field5","tag1":"value1","job": "test","tag2":"value10"},"values":[123], "timestamps": ["{TIME_MS}"]},
|
||||
{"metric":{"__name__":"measurement_field6","tag1":"value1","job": "test","tag2":"value10"},"values":[1.23], "timestamps": ["{TIME_MS}"]}
|
||||
]
|
||||
}
|
||||
8
app/victoria-metrics/testdata/opentsdb/basic.json
vendored
Normal file
8
app/victoria-metrics/testdata/opentsdb/basic.json
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "basic_insertion",
|
||||
"data": ["put openstdb.foo.bar.baz {TIME_S} 123 tag1=value1 tag2=value2"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"openstdb.foo.bar.baz","tag1":"value1","tag2":"value2"},"values":[123], "timestamps": ["{TIME_MSZ}"]}
|
||||
]
|
||||
}
|
||||
8
app/victoria-metrics/testdata/opentsdbhttp/basic.json
vendored
Normal file
8
app/victoria-metrics/testdata/opentsdbhttp/basic.json
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "basic_insertion",
|
||||
"data": ["{\"metric\": \"opentsdbhttp.foo\", \"value\": 1001, \"timestamp\": {TIME_S}, \"tags\": {\"bar\":\"baz\", \"x\": \"y\"}}"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"opentsdbhttp.foo","bar":"baz","x":"y"},"values":[1001], "timestamps": ["{TIME_MSZ}"]}
|
||||
]
|
||||
}
|
||||
9
app/victoria-metrics/testdata/opentsdbhttp/multi_line.json
vendored
Normal file
9
app/victoria-metrics/testdata/opentsdbhttp/multi_line.json
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "multiline",
|
||||
"data": ["[{\"metric\": \"opentsdbhttp.multiline1\", \"value\": 1001, \"timestamp\": \"{TIME_S}\", \"tags\": {\"bar\":\"baz\", \"x\": \"y\"}}, {\"metric\": \"opentsdbhttp.multiline2\", \"value\": 1002, \"timestamp\": {TIME_S}}]"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"opentsdbhttp.multiline1","bar":"baz","x":"y"},"values":[1001], "timestamps": ["{TIME_MSZ}"]},
|
||||
{"metric":{"__name__":"opentsdbhttp.multiline2"},"values":[1002], "timestamps": ["{TIME_MSZ}"]}
|
||||
]
|
||||
}
|
||||
9
app/victoria-metrics/testdata/opentsdbhttp/with_extra_labels.json
vendored
Normal file
9
app/victoria-metrics/testdata/opentsdbhttp/with_extra_labels.json
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "insert_with_extra_labels",
|
||||
"data": ["{\"metric\": \"opentsdbhttp.foobar\", \"value\": 1001, \"timestamp\": {TIME_S}, \"tags\": {\"bar\":\"baz\", \"x\": \"y\"}}"],
|
||||
"insert_query": "?extra_label=job=open-test&extra_label=x=z",
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"opentsdbhttp.foobar","bar":"baz","x":"z","job": "open-test"},"values":[1001], "timestamps": ["{TIME_MSZ}"]}
|
||||
]
|
||||
}
|
||||
8
app/victoria-metrics/testdata/prometheus/basic.json
vendored
Normal file
8
app/victoria-metrics/testdata/prometheus/basic.json
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "basic_insertion",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.bar\"},{\"name\":\"baz\",\"value\":\"qux\"}],\"samples\":[{\"value\":100000,\"timestamp\":\"{TIME_MS}\"}]}]"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"prometheus.bar","baz":"qux"},"values":[100000], "timestamps": ["{TIME_MS}"]}
|
||||
]
|
||||
}
|
||||
10
app/victoria-metrics/testdata/prometheus/case-sensitive-regex.json
vendored
Normal file
10
app/victoria-metrics/testdata/prometheus/case-sensitive-regex.json
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"name": "case-sensitive-regex",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/161",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.sensitiveRegex\"},{\"name\":\"label\",\"value\":\"sensitiveRegex\"}],\"samples\":[{\"value\":2,\"timestamp\":\"{TIME_MS}\"}]},{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.sensitiveRegex\"},{\"name\":\"label\",\"value\":\"SensitiveRegex\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]}]"],
|
||||
"query": ["/api/v1/export?match={label=~'(?i)sensitiveregex'}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"prometheus.sensitiveRegex","label":"sensitiveRegex"},"values":[2], "timestamps": ["{TIME_MS}"]},
|
||||
{"metric":{"__name__":"prometheus.sensitiveRegex","label":"SensitiveRegex"},"values":[1], "timestamps": ["{TIME_MS}"]}
|
||||
]
|
||||
}
|
||||
9
app/victoria-metrics/testdata/prometheus/duplicate-label.json
vendored
Normal file
9
app/victoria-metrics/testdata/prometheus/duplicate-label.json
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "duplicate_label",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/172",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.duplicate_label\"},{\"name\":\"duplicate\",\"value\":\"label\"},{\"name\":\"duplicate\",\"value\":\"label\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]}]"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"prometheus.duplicate_label","duplicate":"label"},"values":[1], "timestamps": ["{TIME_MS}"]}
|
||||
]
|
||||
}
|
||||
12
app/victoria-metrics/testdata/prometheus/instant-matrix.json
vendored
Normal file
12
app/victoria-metrics/testdata/prometheus/instant-matrix.json
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"name": "instant query with look-behind window",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"foo\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS-60s}\"}]}]"],
|
||||
"query": ["/api/v1/query?query=foo[5m]"],
|
||||
"result_query": {
|
||||
"status": "success",
|
||||
"data":{
|
||||
"resultType":"matrix",
|
||||
"result":[{"metric":{"__name__":"foo"},"values":[["{TIME_S-60s}", "1"]]}]
|
||||
}
|
||||
}
|
||||
}
|
||||
11
app/victoria-metrics/testdata/prometheus/instant-scalar.json
vendored
Normal file
11
app/victoria-metrics/testdata/prometheus/instant-scalar.json
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"name": "instant scalar query",
|
||||
"query": ["/api/v1/query?query=42&time={TIME_S}"],
|
||||
"result_query": {
|
||||
"status": "success",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[{"metric":{},"value":["{TIME_S}", "42"]}]
|
||||
}
|
||||
}
|
||||
}
|
||||
13
app/victoria-metrics/testdata/prometheus/issue-5553-too-big-lookback.json
vendored
Normal file
13
app/victoria-metrics/testdata/prometheus/issue-5553-too-big-lookback.json
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"name": "too big look-behind window",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5553",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"foo\"},{\"name\":\"issue\",\"value\":\"5553\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS-60s}\"}]}]"],
|
||||
"query": ["/api/v1/query?query=foo{issue=\"5553\"}[100y]"],
|
||||
"result_query": {
|
||||
"status": "success",
|
||||
"data":{
|
||||
"resultType":"matrix",
|
||||
"result":[{"metric":{"__name__":"foo", "issue": "5553"},"values":[["{TIME_S-60s}", "1"]]}]
|
||||
}
|
||||
}
|
||||
}
|
||||
15
app/victoria-metrics/testdata/prometheus/match-series.json
vendored
Normal file
15
app/victoria-metrics/testdata/prometheus/match-series.json
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"name": "match_series",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/155",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"MatchSeries\"},{\"name\":\"db\",\"value\":\"TenMinute\"},{\"name\":\"TurbineType\",\"value\":\"V112\"},{\"name\":\"Park\",\"value\":\"1\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]},{\"labels\":[{\"name\":\"__name__\",\"value\":\"MatchSeries\"},{\"name\":\"db\",\"value\":\"TenMinute\"},{\"name\":\"TurbineType\",\"value\":\"V112\"},{\"name\":\"Park\",\"value\":\"2\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]},{\"labels\":[{\"name\":\"__name__\",\"value\":\"MatchSeries\"},{\"name\":\"db\",\"value\":\"TenMinute\"},{\"name\":\"TurbineType\",\"value\":\"V112\"},{\"name\":\"Park\",\"value\":\"3\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]},{\"labels\":[{\"name\":\"__name__\",\"value\":\"MatchSeries\"},{\"name\":\"db\",\"value\":\"TenMinute\"},{\"name\":\"TurbineType\",\"value\":\"V112\"},{\"name\":\"Park\",\"value\":\"4\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS}\"}]}]"],
|
||||
"query": ["/api/v1/series?match[]={__name__='MatchSeries'}", "/api/v1/series?match[]={__name__=~'MatchSeries.*'}"],
|
||||
"result_series": {
|
||||
"status": "success",
|
||||
"data": [
|
||||
{"__name__":"MatchSeries","db":"TenMinute","Park":"1","TurbineType":"V112"},
|
||||
{"__name__":"MatchSeries","db":"TenMinute","Park":"2","TurbineType":"V112"},
|
||||
{"__name__":"MatchSeries","db":"TenMinute","Park":"3","TurbineType":"V112"},
|
||||
{"__name__":"MatchSeries","db":"TenMinute","Park":"4","TurbineType":"V112"}
|
||||
]
|
||||
}
|
||||
}
|
||||
18
app/victoria-metrics/testdata/prometheus/query-range.json
vendored
Normal file
18
app/victoria-metrics/testdata/prometheus/query-range.json
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"name": "query range",
|
||||
"issue": "https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5553",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"bar\"}],\"samples\":[{\"value\":1,\"timestamp\":\"{TIME_MS-60s}\"}, {\"value\":2,\"timestamp\":\"{TIME_MS-120s}\"}, {\"value\":1,\"timestamp\":\"{TIME_MS-180s}\"}]}]"],
|
||||
"query": ["/api/v1/query_range?query=bar&step=30s&start={TIME_MS-180s}"],
|
||||
"result_query": {
|
||||
"status": "success",
|
||||
"data":{
|
||||
"resultType":"matrix",
|
||||
"result":[
|
||||
{
|
||||
"metric":{"__name__":"bar"},
|
||||
"values":[["{TIME_S-180s}", "1"],["{TIME_S-150s}", "1"],["{TIME_S-120s}", "2"],["{TIME_S-90s}", "2"], ["{TIME_S-60s}", "1"], ["{TIME_S-30s}", "1"], ["{TIME_S}", "1"]]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
9
app/victoria-metrics/testdata/prometheus/with_extra_labels.json
vendored
Normal file
9
app/victoria-metrics/testdata/prometheus/with_extra_labels.json
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "basic_insertion_with_extra_labels",
|
||||
"insert_query": "?extra_label=job=prom-test&extra_label=baz=bar",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.foobar\"},{\"name\":\"baz\",\"value\":\"qux\"}],\"samples\":[{\"value\":100000,\"timestamp\":\"{TIME_MS}\"}]}]"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"prometheus.foobar","baz":"bar","job": "prom-test"},"values":[100000], "timestamps": ["{TIME_MS}"]}
|
||||
]
|
||||
}
|
||||
8
app/victoria-metrics/testdata/prometheus/with_request_extra_filter.json
vendored
Normal file
8
app/victoria-metrics/testdata/prometheus/with_request_extra_filter.json
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "basic_select_with_extra_labels",
|
||||
"data": ["[{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.tenant.limits\"},{\"name\":\"baz\",\"value\":\"qux\"},{\"name\":\"tenant\",\"value\":\"dev\"}],\"samples\":[{\"value\":100000,\"timestamp\":\"{TIME_MS}\"}]},{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.up\"},{\"name\":\"baz\",\"value\":\"qux\"}],\"samples\":[{\"value\":100000,\"timestamp\":\"{TIME_MS}\"}]}]"],
|
||||
"query": ["/api/v1/export?match={__name__!=''}&extra_label=tenant=dev"],
|
||||
"result_metrics": [
|
||||
{"metric":{"__name__":"prometheus.tenant.limits","baz":"qux","tenant": "dev"},"values":[100000], "timestamps": ["{TIME_MS}"]}
|
||||
]
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package datadog
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
@@ -10,22 +11,20 @@ import (
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/valyala/fastjson"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/protoparserutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
)
|
||||
|
||||
var (
|
||||
datadogStreamFields = flagutil.NewArrayString("datadog.streamFields", "Comma-separated list of fields to use as log stream fields for logs ingested via DataDog protocol. "+
|
||||
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/datadog-agent/#stream-fields")
|
||||
datadogIgnoreFields = flagutil.NewArrayString("datadog.ignoreFields", "Comma-separated list of fields to ignore for logs ingested via DataDog protocol. "+
|
||||
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/datadog-agent/#dropping-fields")
|
||||
|
||||
maxRequestSize = flagutil.NewBytes("datadog.maxRequestSize", 64*1024*1024, "The maximum size in bytes of a single DataDog request")
|
||||
datadogStreamFields = flagutil.NewArrayString("datadog.streamFields", "Datadog tags to be used as stream fields.")
|
||||
datadogIgnoreFields = flagutil.NewArrayString("datadog.ignoreFields", "Datadog tags to ignore.")
|
||||
)
|
||||
|
||||
var parserPool fastjson.ParserPool
|
||||
@@ -47,6 +46,7 @@ func datadogLogsIngestion(w http.ResponseWriter, r *http.Request) bool {
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
startTime := time.Now()
|
||||
v2LogsRequestsTotal.Inc()
|
||||
reader := r.Body
|
||||
|
||||
var ts int64
|
||||
if tsValue := r.Header.Get("dd-message-timestamp"); tsValue != "" && tsValue != "0" {
|
||||
@@ -61,7 +61,25 @@ func datadogLogsIngestion(w http.ResponseWriter, r *http.Request) bool {
|
||||
ts = startTime.UnixNano()
|
||||
}
|
||||
|
||||
cp, err := insertutil.GetCommonParams(r)
|
||||
if r.Header.Get("Content-Encoding") == "gzip" {
|
||||
zr, err := common.GetGzipReader(reader)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot read gzipped logs request: %s", err)
|
||||
return true
|
||||
}
|
||||
defer common.PutGzipReader(zr)
|
||||
reader = zr
|
||||
}
|
||||
|
||||
wcr := writeconcurrencylimiter.GetReader(reader)
|
||||
data, err := io.ReadAll(wcr)
|
||||
writeconcurrencylimiter.PutReader(wcr)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot read request body: %s", err)
|
||||
return true
|
||||
}
|
||||
|
||||
cp, err := insertutils.GetCommonParams(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
@@ -79,15 +97,11 @@ func datadogLogsIngestion(w http.ResponseWriter, r *http.Request) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
encoding := r.Header.Get("Content-Encoding")
|
||||
err = protoparserutil.ReadUncompressedData(r.Body, encoding, maxRequestSize, func(data []byte) error {
|
||||
lmp := cp.NewLogMessageProcessor("datadog", false)
|
||||
err := readLogsRequest(ts, data, lmp)
|
||||
lmp.MustClose()
|
||||
return err
|
||||
})
|
||||
lmp := cp.NewLogMessageProcessor("datadog")
|
||||
err = readLogsRequest(ts, data, lmp)
|
||||
lmp.MustClose()
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot read DataDog protocol data: %s", err)
|
||||
logger.Warnf("cannot decode log message in /api/v2/logs request: %s, stream fields: %s", err, cp.StreamFields)
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -95,7 +109,6 @@ func datadogLogsIngestion(w http.ResponseWriter, r *http.Request) bool {
|
||||
// There is no need in updating v2LogsRequestDuration for request errors,
|
||||
// since their timings are usually much smaller than the timing for successful request parsing.
|
||||
v2LogsRequestDuration.UpdateDuration(startTime)
|
||||
w.WriteHeader(http.StatusAccepted)
|
||||
fmt.Fprintf(w, `{}`)
|
||||
return true
|
||||
}
|
||||
@@ -108,7 +121,7 @@ var (
|
||||
// datadog message field has two formats:
|
||||
// - regular log message with string text
|
||||
// - nested json format for serverless plugins
|
||||
// which has the following format:
|
||||
// which has folowing format:
|
||||
// {"message": {"message": "text","lamdba": {"arn": "string","requestID": "string"}, "timestamp": int64} }
|
||||
//
|
||||
// See https://github.com/DataDog/datadog-lambda-extension/blob/28b90c7e4e985b72d60b5f5a5147c69c7ac693c4/bottlecap/src/logs/lambda/mod.rs#L24
|
||||
@@ -171,7 +184,7 @@ func appendMsgFields(fields []logstorage.Field, v *fastjson.Value) ([]logstorage
|
||||
|
||||
// readLogsRequest parses data according to DataDog logs format
|
||||
// https://docs.datadoghq.com/api/latest/logs/#send-logs
|
||||
func readLogsRequest(ts int64, data []byte, lmp insertutil.LogMessageProcessor) error {
|
||||
func readLogsRequest(ts int64, data []byte, lmp insertutils.LogMessageProcessor) error {
|
||||
p := parserPool.Get()
|
||||
defer parserPool.Put(p)
|
||||
v, err := p.ParseBytes(data)
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
)
|
||||
|
||||
func TestReadLogsRequestFailure(t *testing.T) {
|
||||
@@ -13,7 +13,7 @@ func TestReadLogsRequestFailure(t *testing.T) {
|
||||
|
||||
ts := time.Now().UnixNano()
|
||||
|
||||
lmp := &insertutil.TestLogMessageProcessor{}
|
||||
lmp := &insertutils.TestLogMessageProcessor{}
|
||||
if err := readLogsRequest(ts, []byte(data), lmp); err == nil {
|
||||
t.Fatalf("expecting non-empty error")
|
||||
}
|
||||
@@ -37,7 +37,7 @@ func TestReadLogsRequestSuccess(t *testing.T) {
|
||||
for i := 0; i < rowsExpected; i++ {
|
||||
timestampsExpected = append(timestampsExpected, ts)
|
||||
}
|
||||
lmp := &insertutil.TestLogMessageProcessor{}
|
||||
lmp := &insertutils.TestLogMessageProcessor{}
|
||||
if err := readLogsRequest(ts, []byte(data), lmp); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
@@ -10,14 +10,14 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bufferedwriter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/protoparserutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
)
|
||||
|
||||
@@ -60,7 +60,7 @@ func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
return true
|
||||
}
|
||||
switch path {
|
||||
case "/", "":
|
||||
case "/":
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
// Return fake response for Elasticsearch ping request.
|
||||
@@ -90,7 +90,7 @@ func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
startTime := time.Now()
|
||||
bulkRequestsTotal.Inc()
|
||||
|
||||
cp, err := insertutil.GetCommonParams(r)
|
||||
cp, err := insertutils.GetCommonParams(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
@@ -99,10 +99,10 @@ func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
lmp := cp.NewLogMessageProcessor("elasticsearch_bulk", true)
|
||||
encoding := r.Header.Get("Content-Encoding")
|
||||
lmp := cp.NewLogMessageProcessor("elasticsearch_bulk")
|
||||
isGzip := r.Header.Get("Content-Encoding") == "gzip"
|
||||
streamName := fmt.Sprintf("remoteAddr=%s, requestURI=%q", httpserver.GetQuotedRemoteAddr(r), r.RequestURI)
|
||||
n, err := readBulkRequest(streamName, r.Body, encoding, cp.TimeFields, cp.MsgFields, lmp)
|
||||
n, err := readBulkRequest(streamName, r.Body, isGzip, cp.TimeField, cp.MsgFields, lmp)
|
||||
lmp.MustClose()
|
||||
if err != nil {
|
||||
logger.Warnf("cannot decode log message #%d in /_bulk request: %s, stream fields: %s", n, err, cp.StreamFields)
|
||||
@@ -131,23 +131,26 @@ var (
|
||||
bulkRequestDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/elasticsearch/_bulk"}`)
|
||||
)
|
||||
|
||||
func readBulkRequest(streamName string, r io.Reader, encoding string, timeFields, msgFields []string, lmp insertutil.LogMessageProcessor) (int, error) {
|
||||
func readBulkRequest(streamName string, r io.Reader, isGzip bool, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) (int, error) {
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
|
||||
|
||||
reader, err := protoparserutil.GetUncompressedReader(r, encoding)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot decode Elasticsearch protocol data: %w", err)
|
||||
if isGzip {
|
||||
zr, err := common.GetGzipReader(r)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot read gzipped _bulk request: %w", err)
|
||||
}
|
||||
defer common.PutGzipReader(zr)
|
||||
r = zr
|
||||
}
|
||||
defer protoparserutil.PutUncompressedReader(reader)
|
||||
|
||||
wcr := writeconcurrencylimiter.GetReader(reader)
|
||||
wcr := writeconcurrencylimiter.GetReader(r)
|
||||
defer writeconcurrencylimiter.PutReader(wcr)
|
||||
|
||||
lr := insertutil.NewLineReader(streamName, wcr)
|
||||
lr := insertutils.NewLineReader(streamName, wcr)
|
||||
|
||||
n := 0
|
||||
for {
|
||||
ok, err := readBulkLine(lr, timeFields, msgFields, lmp)
|
||||
ok, err := readBulkLine(lr, timeField, msgFields, lmp)
|
||||
wcr.DecConcurrency()
|
||||
if err != nil || !ok {
|
||||
return n, err
|
||||
@@ -156,7 +159,7 @@ func readBulkRequest(streamName string, r io.Reader, encoding string, timeFields
|
||||
}
|
||||
}
|
||||
|
||||
func readBulkLine(lr *insertutil.LineReader, timeFields, msgFields []string, lmp insertutil.LogMessageProcessor) (bool, error) {
|
||||
func readBulkLine(lr *insertutils.LineReader, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) (bool, error) {
|
||||
var line []byte
|
||||
|
||||
// Read the command, must be "create" or "index"
|
||||
@@ -190,7 +193,7 @@ func readBulkLine(lr *insertutil.LineReader, timeFields, msgFields []string, lmp
|
||||
return false, fmt.Errorf("cannot parse json-encoded log entry: %w", err)
|
||||
}
|
||||
|
||||
ts, err := extractTimestampFromFields(timeFields, p.Fields)
|
||||
ts, err := extractTimestampFromFields(timeField, p.Fields)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("cannot parse timestamp: %w", err)
|
||||
}
|
||||
@@ -204,20 +207,18 @@ func readBulkLine(lr *insertutil.LineReader, timeFields, msgFields []string, lmp
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func extractTimestampFromFields(timeFields []string, fields []logstorage.Field) (int64, error) {
|
||||
for _, timeField := range timeFields {
|
||||
for i := range fields {
|
||||
f := &fields[i]
|
||||
if f.Name != timeField {
|
||||
continue
|
||||
}
|
||||
timestamp, err := parseElasticsearchTimestamp(f.Value)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
f.Value = ""
|
||||
return timestamp, nil
|
||||
func extractTimestampFromFields(timeField string, fields []logstorage.Field) (int64, error) {
|
||||
for i := range fields {
|
||||
f := &fields[i]
|
||||
if f.Name != timeField {
|
||||
continue
|
||||
}
|
||||
timestamp, err := parseElasticsearchTimestamp(f.Value)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
f.Value = ""
|
||||
return timestamp, nil
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
@@ -230,7 +231,7 @@ func parseElasticsearchTimestamp(s string) (int64, error) {
|
||||
}
|
||||
if len(s) < len("YYYY-MM-DD") || s[len("YYYY")] != '-' {
|
||||
// Try parsing timestamp in seconds or milliseconds
|
||||
return insertutil.ParseUnixTimestamp(s)
|
||||
return insertutils.ParseUnixTimestamp(s)
|
||||
}
|
||||
if len(s) == len("YYYY-MM-DD") {
|
||||
t, err := time.Parse("2006-01-02", s)
|
||||
|
||||
@@ -2,24 +2,20 @@ package elasticsearch
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"github.com/golang/snappy"
|
||||
"github.com/klauspost/compress/gzip"
|
||||
"github.com/klauspost/compress/zlib"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
)
|
||||
|
||||
func TestReadBulkRequest_Failure(t *testing.T) {
|
||||
f := func(data string) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &insertutil.TestLogMessageProcessor{}
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
r := bytes.NewBufferString(data)
|
||||
rows, err := readBulkRequest("test", r, "", []string{"_time"}, []string{"_msg"}, tlp)
|
||||
rows, err := readBulkRequest("test", r, false, "_time", []string{"_msg"}, tlp)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-empty error")
|
||||
}
|
||||
@@ -37,16 +33,15 @@ foobar`)
|
||||
}
|
||||
|
||||
func TestReadBulkRequest_Success(t *testing.T) {
|
||||
f := func(data, encoding, timeField, msgField string, timestampsExpected []int64, resultExpected string) {
|
||||
f := func(data, timeField, msgField string, timestampsExpected []int64, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
timeFields := []string{"non_existing_foo", timeField, "non_existing_bar"}
|
||||
msgFields := []string{"non_existing_foo", msgField, "non_exiting_bar"}
|
||||
tlp := &insertutil.TestLogMessageProcessor{}
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
|
||||
// Read the request without compression
|
||||
r := bytes.NewBufferString(data)
|
||||
rows, err := readBulkRequest("test", r, "", timeFields, msgFields, tlp)
|
||||
rows, err := readBulkRequest("test", r, false, timeField, msgFields, tlp)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
@@ -58,12 +53,10 @@ func TestReadBulkRequest_Success(t *testing.T) {
|
||||
}
|
||||
|
||||
// Read the request with compression
|
||||
tlp = &insertutil.TestLogMessageProcessor{}
|
||||
if encoding != "" {
|
||||
data = compressData(data, encoding)
|
||||
}
|
||||
r = bytes.NewBufferString(data)
|
||||
rows, err = readBulkRequest("test", r, encoding, timeFields, msgFields, tlp)
|
||||
tlp = &insertutils.TestLogMessageProcessor{}
|
||||
compressedData := compressData(data)
|
||||
r = bytes.NewBufferString(compressedData)
|
||||
rows, err = readBulkRequest("test", r, true, timeField, msgFields, tlp)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
@@ -76,9 +69,9 @@ func TestReadBulkRequest_Success(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify an empty data
|
||||
f("", "gzip", "_time", "_msg", nil, "")
|
||||
f("\n", "gzip", "_time", "_msg", nil, "")
|
||||
f("\n\n", "gzip", "_time", "_msg", nil, "")
|
||||
f("", "_time", "_msg", nil, "")
|
||||
f("\n", "_time", "_msg", nil, "")
|
||||
f("\n\n", "_time", "_msg", nil, "")
|
||||
|
||||
// Verify non-empty data
|
||||
data := `{"create":{"_index":"filebeat-8.8.0"}}
|
||||
@@ -89,35 +82,20 @@ func TestReadBulkRequest_Success(t *testing.T) {
|
||||
{"message":"xyz","@timestamp":"1686026893735","x":"y"}
|
||||
{"create":{"_index":"filebeat-8.8.0"}}
|
||||
{"message":"qwe rty","@timestamp":"1686026893"}
|
||||
{"create":{"_index":"filebeat-8.8.0"}}
|
||||
{"message":"qwe rty float","@timestamp":"1686026123.62"}
|
||||
`
|
||||
timeField := "@timestamp"
|
||||
msgField := "message"
|
||||
timestampsExpected := []int64{1686026891735000000, 1686023292735000000, 1686026893735000000, 1686026893000000000, 1686026123620000000}
|
||||
timestampsExpected := []int64{1686026891735000000, 1686023292735000000, 1686026893735000000, 1686026893000000000}
|
||||
resultExpected := `{"log.offset":"71770","log.file.path":"/var/log/auth.log","_msg":"foobar"}
|
||||
{"_msg":"baz"}
|
||||
{"_msg":"xyz","x":"y"}
|
||||
{"_msg":"qwe rty"}
|
||||
{"_msg":"qwe rty float"}`
|
||||
f(data, "zstd", timeField, msgField, timestampsExpected, resultExpected)
|
||||
{"_msg":"qwe rty"}`
|
||||
f(data, timeField, msgField, timestampsExpected, resultExpected)
|
||||
}
|
||||
|
||||
func compressData(s string, encoding string) string {
|
||||
func compressData(s string) string {
|
||||
var bb bytes.Buffer
|
||||
var zw io.WriteCloser
|
||||
switch encoding {
|
||||
case "gzip":
|
||||
zw = gzip.NewWriter(&bb)
|
||||
case "zstd":
|
||||
zw, _ = zstd.NewWriter(&bb)
|
||||
case "snappy":
|
||||
return string(snappy.Encode(nil, []byte(s)))
|
||||
case "deflate":
|
||||
zw = zlib.NewWriter(&bb)
|
||||
default:
|
||||
panic(fmt.Errorf("%q encoding is not supported", encoding))
|
||||
}
|
||||
zw := gzip.NewWriter(&bb)
|
||||
if _, err := zw.Write([]byte(s)); err != nil {
|
||||
panic(fmt.Errorf("unexpected error when compressing data: %w", err))
|
||||
}
|
||||
|
||||
@@ -5,29 +5,20 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
)
|
||||
|
||||
func BenchmarkReadBulkRequest(b *testing.B) {
|
||||
b.Run("encoding:none", func(b *testing.B) {
|
||||
benchmarkReadBulkRequest(b, "")
|
||||
b.Run("gzip:off", func(b *testing.B) {
|
||||
benchmarkReadBulkRequest(b, false)
|
||||
})
|
||||
b.Run("encoding:gzip", func(b *testing.B) {
|
||||
benchmarkReadBulkRequest(b, "gzip")
|
||||
})
|
||||
b.Run("encoding:zstd", func(b *testing.B) {
|
||||
benchmarkReadBulkRequest(b, "zstd")
|
||||
})
|
||||
b.Run("encoding:deflate", func(b *testing.B) {
|
||||
benchmarkReadBulkRequest(b, "deflate")
|
||||
})
|
||||
b.Run("encoding:snappy", func(b *testing.B) {
|
||||
benchmarkReadBulkRequest(b, "snappy")
|
||||
b.Run("gzip:on", func(b *testing.B) {
|
||||
benchmarkReadBulkRequest(b, true)
|
||||
})
|
||||
}
|
||||
|
||||
func benchmarkReadBulkRequest(b *testing.B, encoding string) {
|
||||
func benchmarkReadBulkRequest(b *testing.B, isGzip bool) {
|
||||
data := `{"create":{"_index":"filebeat-8.8.0"}}
|
||||
{"@timestamp":"2023-06-06T04:48:11.735Z","log":{"offset":71770,"file":{"path":"/var/log/auth.log"}},"message":"foobar"}
|
||||
{"create":{"_index":"filebeat-8.8.0"}}
|
||||
@@ -35,14 +26,14 @@ func benchmarkReadBulkRequest(b *testing.B, encoding string) {
|
||||
{"create":{"_index":"filebeat-8.8.0"}}
|
||||
{"message":"xyz","@timestamp":"2023-06-06T04:48:13.735Z","x":"y"}
|
||||
`
|
||||
if encoding != "" {
|
||||
data = compressData(data, encoding)
|
||||
if isGzip {
|
||||
data = compressData(data)
|
||||
}
|
||||
dataBytes := bytesutil.ToUnsafeBytes(data)
|
||||
|
||||
timeFields := []string{"@timestamp"}
|
||||
timeField := "@timestamp"
|
||||
msgFields := []string{"message"}
|
||||
blp := &insertutil.BenchmarkLogMessageProcessor{}
|
||||
blp := &insertutils.BenchmarkLogMessageProcessor{}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(data)))
|
||||
@@ -50,7 +41,7 @@ func benchmarkReadBulkRequest(b *testing.B, encoding string) {
|
||||
r := &bytes.Reader{}
|
||||
for pb.Next() {
|
||||
r.Reset(dataBytes)
|
||||
_, err := readBulkRequest("test", r, encoding, timeFields, msgFields, blp)
|
||||
_, err := readBulkRequest("test", r, isGzip, timeField, msgFields, blp)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
}
|
||||
|
||||
@@ -1,308 +0,0 @@
|
||||
package insertutil
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeutil"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultMsgValue = flag.String("defaultMsgValue", "missing _msg field; see https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field",
|
||||
"Default value for _msg field if the ingested log entry doesn't contain it; see https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field")
|
||||
)
|
||||
|
||||
// CommonParams contains common HTTP parameters used by log ingestion APIs.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/data-ingestion/#http-parameters
|
||||
type CommonParams struct {
|
||||
TenantID logstorage.TenantID
|
||||
TimeFields []string
|
||||
MsgFields []string
|
||||
StreamFields []string
|
||||
IgnoreFields []string
|
||||
DecolorizeFields []string
|
||||
ExtraFields []logstorage.Field
|
||||
|
||||
Debug bool
|
||||
DebugRequestURI string
|
||||
DebugRemoteAddr string
|
||||
}
|
||||
|
||||
// GetCommonParams returns CommonParams from r.
|
||||
func GetCommonParams(r *http.Request) (*CommonParams, error) {
|
||||
// Extract tenantID
|
||||
tenantID, err := logstorage.GetTenantIDFromRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
timeFields := []string{"_time"}
|
||||
if tfs := httputil.GetArray(r, "_time_field", "VL-Time-Field"); len(tfs) > 0 {
|
||||
timeFields = tfs
|
||||
}
|
||||
|
||||
msgFields := httputil.GetArray(r, "_msg_field", "VL-Msg-Field")
|
||||
streamFields := httputil.GetArray(r, "_stream_fields", "VL-Stream-Fields")
|
||||
ignoreFields := httputil.GetArray(r, "ignore_fields", "VL-Ignore-Fields")
|
||||
decolorizeFields := httputil.GetArray(r, "decolorize_fields", "VL-Decolorize-Fields")
|
||||
|
||||
extraFields, err := getExtraFields(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
debug := false
|
||||
if dv := httputil.GetRequestValue(r, "debug", "VL-Debug"); dv != "" {
|
||||
debug, err = strconv.ParseBool(dv)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse debug=%q: %w", dv, err)
|
||||
}
|
||||
}
|
||||
debugRequestURI := ""
|
||||
debugRemoteAddr := ""
|
||||
if debug {
|
||||
debugRequestURI = httpserver.GetRequestURI(r)
|
||||
debugRemoteAddr = httpserver.GetQuotedRemoteAddr(r)
|
||||
}
|
||||
|
||||
cp := &CommonParams{
|
||||
TenantID: tenantID,
|
||||
TimeFields: timeFields,
|
||||
MsgFields: msgFields,
|
||||
StreamFields: streamFields,
|
||||
IgnoreFields: ignoreFields,
|
||||
DecolorizeFields: decolorizeFields,
|
||||
ExtraFields: extraFields,
|
||||
Debug: debug,
|
||||
DebugRequestURI: debugRequestURI,
|
||||
DebugRemoteAddr: debugRemoteAddr,
|
||||
}
|
||||
|
||||
return cp, nil
|
||||
}
|
||||
|
||||
func getExtraFields(r *http.Request) ([]logstorage.Field, error) {
|
||||
efs := httputil.GetArray(r, "extra_fields", "VL-Extra-Fields")
|
||||
if len(efs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
extraFields := make([]logstorage.Field, len(efs))
|
||||
for i, ef := range efs {
|
||||
n := strings.Index(ef, "=")
|
||||
if n <= 0 || n == len(ef)-1 {
|
||||
return nil, fmt.Errorf(`invalid extra_field format: %q; must be in the form "field=value"`, ef)
|
||||
}
|
||||
extraFields[i] = logstorage.Field{
|
||||
Name: ef[:n],
|
||||
Value: ef[n+1:],
|
||||
}
|
||||
}
|
||||
return extraFields, nil
|
||||
}
|
||||
|
||||
// GetCommonParamsForSyslog returns common params needed for parsing syslog messages and storing them to the given tenantID.
|
||||
func GetCommonParamsForSyslog(tenantID logstorage.TenantID, streamFields, ignoreFields, decolorizeFields []string, extraFields []logstorage.Field) *CommonParams {
|
||||
// See https://docs.victoriametrics.com/victorialogs/logsql/#unpack_syslog-pipe
|
||||
if streamFields == nil {
|
||||
streamFields = []string{
|
||||
"hostname",
|
||||
"app_name",
|
||||
"proc_id",
|
||||
}
|
||||
}
|
||||
cp := &CommonParams{
|
||||
TenantID: tenantID,
|
||||
TimeFields: []string{
|
||||
"timestamp",
|
||||
},
|
||||
MsgFields: []string{
|
||||
"message",
|
||||
},
|
||||
StreamFields: streamFields,
|
||||
IgnoreFields: ignoreFields,
|
||||
DecolorizeFields: decolorizeFields,
|
||||
ExtraFields: extraFields,
|
||||
}
|
||||
|
||||
return cp
|
||||
}
|
||||
|
||||
// LogMessageProcessor is an interface for log message processors.
|
||||
type LogMessageProcessor interface {
|
||||
// AddRow must add row to the LogMessageProcessor with the given timestamp and fields.
|
||||
//
|
||||
// If streamFields is non-nil, then the given streamFields must be used as log stream fields instead of pre-configured fields.
|
||||
//
|
||||
// The LogMessageProcessor implementation cannot hold references to fields, since the caller can reuse them.
|
||||
AddRow(timestamp int64, fields, streamFields []logstorage.Field)
|
||||
|
||||
// MustClose() must flush all the remaining fields and free up resources occupied by LogMessageProcessor.
|
||||
MustClose()
|
||||
}
|
||||
|
||||
type logMessageProcessor struct {
|
||||
mu sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
stopCh chan struct{}
|
||||
lastFlushTime time.Time
|
||||
|
||||
cp *CommonParams
|
||||
lr *logstorage.LogRows
|
||||
|
||||
rowsIngestedTotal *metrics.Counter
|
||||
bytesIngestedTotal *metrics.Counter
|
||||
}
|
||||
|
||||
func (lmp *logMessageProcessor) initPeriodicFlush() {
|
||||
lmp.lastFlushTime = time.Now()
|
||||
|
||||
lmp.wg.Add(1)
|
||||
go func() {
|
||||
defer lmp.wg.Done()
|
||||
|
||||
d := timeutil.AddJitterToDuration(time.Second)
|
||||
ticker := time.NewTicker(d)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-lmp.stopCh:
|
||||
return
|
||||
case <-ticker.C:
|
||||
lmp.mu.Lock()
|
||||
if time.Since(lmp.lastFlushTime) >= d {
|
||||
lmp.flushLocked()
|
||||
}
|
||||
lmp.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// AddRow adds new log message to lmp with the given timestamp and fields.
|
||||
//
|
||||
// If streamFields is non-nil, then it is used as log stream fields instead of the pre-configured stream fields.
|
||||
func (lmp *logMessageProcessor) AddRow(timestamp int64, fields, streamFields []logstorage.Field) {
|
||||
lmp.rowsIngestedTotal.Inc()
|
||||
n := logstorage.EstimatedJSONRowLen(fields)
|
||||
lmp.bytesIngestedTotal.Add(n)
|
||||
|
||||
if len(fields) > *MaxFieldsPerLine {
|
||||
line := logstorage.MarshalFieldsToJSON(nil, fields)
|
||||
logger.Warnf("dropping log line with %d fields; it exceeds -insert.maxFieldsPerLine=%d; %s", len(fields), *MaxFieldsPerLine, line)
|
||||
rowsDroppedTotalTooManyFields.Inc()
|
||||
return
|
||||
}
|
||||
|
||||
lmp.mu.Lock()
|
||||
defer lmp.mu.Unlock()
|
||||
|
||||
lmp.lr.MustAdd(lmp.cp.TenantID, timestamp, fields, streamFields)
|
||||
|
||||
if lmp.cp.Debug {
|
||||
s := lmp.lr.GetRowString(0)
|
||||
lmp.lr.ResetKeepSettings()
|
||||
logger.Infof("remoteAddr=%s; requestURI=%s; ignoring log entry because of `debug` arg: %s", lmp.cp.DebugRemoteAddr, lmp.cp.DebugRequestURI, s)
|
||||
rowsDroppedTotalDebug.Inc()
|
||||
return
|
||||
}
|
||||
if lmp.lr.NeedFlush() {
|
||||
lmp.flushLocked()
|
||||
}
|
||||
}
|
||||
|
||||
// InsertRowProcessor is used by native data ingestion protocol parser.
|
||||
type InsertRowProcessor interface {
|
||||
// AddInsertRow must add r to the underlying storage.
|
||||
AddInsertRow(r *logstorage.InsertRow)
|
||||
}
|
||||
|
||||
// AddInsertRow adds r to lmp.
|
||||
func (lmp *logMessageProcessor) AddInsertRow(r *logstorage.InsertRow) {
|
||||
lmp.rowsIngestedTotal.Inc()
|
||||
n := logstorage.EstimatedJSONRowLen(r.Fields)
|
||||
lmp.bytesIngestedTotal.Add(n)
|
||||
|
||||
if len(r.Fields) > *MaxFieldsPerLine {
|
||||
line := logstorage.MarshalFieldsToJSON(nil, r.Fields)
|
||||
logger.Warnf("dropping log line with %d fields; it exceeds -insert.maxFieldsPerLine=%d; %s", len(r.Fields), *MaxFieldsPerLine, line)
|
||||
rowsDroppedTotalTooManyFields.Inc()
|
||||
return
|
||||
}
|
||||
|
||||
lmp.mu.Lock()
|
||||
defer lmp.mu.Unlock()
|
||||
|
||||
lmp.lr.MustAddInsertRow(r)
|
||||
|
||||
if lmp.cp.Debug {
|
||||
s := lmp.lr.GetRowString(0)
|
||||
lmp.lr.ResetKeepSettings()
|
||||
logger.Infof("remoteAddr=%s; requestURI=%s; ignoring log entry because of `debug` arg: %s", lmp.cp.DebugRemoteAddr, lmp.cp.DebugRequestURI, s)
|
||||
rowsDroppedTotalDebug.Inc()
|
||||
return
|
||||
}
|
||||
if lmp.lr.NeedFlush() {
|
||||
lmp.flushLocked()
|
||||
}
|
||||
}
|
||||
|
||||
// flushLocked must be called under locked lmp.mu.
|
||||
func (lmp *logMessageProcessor) flushLocked() {
|
||||
lmp.lastFlushTime = time.Now()
|
||||
vlstorage.MustAddRows(lmp.lr)
|
||||
lmp.lr.ResetKeepSettings()
|
||||
}
|
||||
|
||||
// MustClose flushes the remaining data to the underlying storage and closes lmp.
|
||||
func (lmp *logMessageProcessor) MustClose() {
|
||||
close(lmp.stopCh)
|
||||
lmp.wg.Wait()
|
||||
|
||||
lmp.flushLocked()
|
||||
logstorage.PutLogRows(lmp.lr)
|
||||
lmp.lr = nil
|
||||
}
|
||||
|
||||
// NewLogMessageProcessor returns new LogMessageProcessor for the given cp.
|
||||
//
|
||||
// MustClose() must be called on the returned LogMessageProcessor when it is no longer needed.
|
||||
func (cp *CommonParams) NewLogMessageProcessor(protocolName string, isStreamMode bool) LogMessageProcessor {
|
||||
lr := logstorage.GetLogRows(cp.StreamFields, cp.IgnoreFields, cp.DecolorizeFields, cp.ExtraFields, *defaultMsgValue)
|
||||
rowsIngestedTotal := metrics.GetOrCreateCounter(fmt.Sprintf("vl_rows_ingested_total{type=%q}", protocolName))
|
||||
bytesIngestedTotal := metrics.GetOrCreateCounter(fmt.Sprintf("vl_bytes_ingested_total{type=%q}", protocolName))
|
||||
lmp := &logMessageProcessor{
|
||||
cp: cp,
|
||||
lr: lr,
|
||||
|
||||
rowsIngestedTotal: rowsIngestedTotal,
|
||||
bytesIngestedTotal: bytesIngestedTotal,
|
||||
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
if isStreamMode {
|
||||
lmp.initPeriodicFlush()
|
||||
}
|
||||
|
||||
return lmp
|
||||
}
|
||||
|
||||
var (
|
||||
rowsDroppedTotalDebug = metrics.NewCounter(`vl_rows_dropped_total{reason="debug"}`)
|
||||
rowsDroppedTotalTooManyFields = metrics.NewCounter(`vl_rows_dropped_total{reason="too_many_fields"}`)
|
||||
)
|
||||
@@ -1,17 +0,0 @@
|
||||
package insertutil
|
||||
|
||||
import (
|
||||
"flag"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
)
|
||||
|
||||
var (
|
||||
// MaxLineSizeBytes is the maximum length of a single line for /insert/* handlers
|
||||
MaxLineSizeBytes = flagutil.NewBytes("insert.maxLineSizeBytes", 256*1024, "The maximum size of a single line, which can be read by /insert/* handlers; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/faq/#what-length-a-log-record-is-expected-to-have")
|
||||
|
||||
// MaxFieldsPerLine is the maximum number of fields per line for /insert/* handlers
|
||||
MaxFieldsPerLine = flag.Int("insert.maxFieldsPerLine", 1000, "The maximum number of log fields per line, which can be read by /insert/* handlers; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/faq/#how-many-fields-a-single-log-entry-may-contain")
|
||||
)
|
||||
@@ -1,103 +0,0 @@
|
||||
package insertutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
// ExtractTimestampFromFields extracts timestamp in nanoseconds from the first field the name from timeFields at fields.
|
||||
//
|
||||
// The value for the corresponding timeFields is set to empty string after returning from the function,
|
||||
// so it could be ignored during data ingestion.
|
||||
//
|
||||
// The current timestamp is returned if fields do not contain a field with timeField name or if the timeField value is empty.
|
||||
func ExtractTimestampFromFields(timeFields []string, fields []logstorage.Field) (int64, error) {
|
||||
for _, timeField := range timeFields {
|
||||
for i := range fields {
|
||||
f := &fields[i]
|
||||
if f.Name != timeField {
|
||||
continue
|
||||
}
|
||||
nsecs, err := parseTimestamp(f.Value)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse timestamp from field %q: %s", f.Name, err)
|
||||
}
|
||||
f.Value = ""
|
||||
if nsecs == 0 {
|
||||
nsecs = time.Now().UnixNano()
|
||||
}
|
||||
return nsecs, nil
|
||||
}
|
||||
}
|
||||
return time.Now().UnixNano(), nil
|
||||
}
|
||||
|
||||
func parseTimestamp(s string) (int64, error) {
|
||||
if s == "" || s == "0" {
|
||||
return time.Now().UnixNano(), nil
|
||||
}
|
||||
if len(s) <= len("YYYY") || s[len("YYYY")] != '-' {
|
||||
return ParseUnixTimestamp(s)
|
||||
}
|
||||
nsecs, ok := logstorage.TryParseTimestampRFC3339Nano(s)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("cannot unmarshal rfc3339 timestamp %q", s)
|
||||
}
|
||||
return nsecs, nil
|
||||
}
|
||||
|
||||
// ParseUnixTimestamp parses s as unix timestamp in seconds, milliseconds, microseconds or nanoseconds and returns the parsed timestamp in nanoseconds.
|
||||
func ParseUnixTimestamp(s string) (int64, error) {
|
||||
if strings.IndexByte(s, '.') >= 0 {
|
||||
// Parse timestamp as floating-point value
|
||||
f, err := strconv.ParseFloat(s, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse unix timestamp from %q: %w", s, err)
|
||||
}
|
||||
if f < (1<<31) && f >= (-1<<31) {
|
||||
// The timestamp is in seconds.
|
||||
return int64(f * 1e9), nil
|
||||
}
|
||||
if f < 1e3*(1<<31) && f >= 1e3*(-1<<31) {
|
||||
// The timestamp is in milliseconds.
|
||||
return int64(f * 1e6), nil
|
||||
}
|
||||
if f < 1e6*(1<<31) && f >= 1e6*(-1<<31) {
|
||||
// The timestamp is in microseconds.
|
||||
return int64(f * 1e3), nil
|
||||
}
|
||||
// The timestamp is in nanoseconds
|
||||
if f > math.MaxInt64 {
|
||||
return 0, fmt.Errorf("too big timestamp in nanoseconds: %v; mustn't exceed %v", f, int64(math.MaxInt64))
|
||||
}
|
||||
if f < math.MinInt64 {
|
||||
return 0, fmt.Errorf("too small timestamp in nanoseconds: %v; must be bigger or equal to %v", f, int64(math.MinInt64))
|
||||
}
|
||||
return int64(f), nil
|
||||
}
|
||||
|
||||
// Parse timestamp as integer
|
||||
n, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse unix timestamp from %q: %w", s, err)
|
||||
}
|
||||
if n < (1<<31) && n >= (-1<<31) {
|
||||
// The timestamp is in seconds.
|
||||
return n * 1e9, nil
|
||||
}
|
||||
if n < 1e3*(1<<31) && n >= 1e3*(-1<<31) {
|
||||
// The timestamp is in milliseconds.
|
||||
return n * 1e6, nil
|
||||
}
|
||||
if n < 1e6*(1<<31) && n >= 1e6*(-1<<31) {
|
||||
// The timestamp is in microseconds.
|
||||
return n * 1e3, nil
|
||||
}
|
||||
// The timestamp is in nanoseconds
|
||||
return n, nil
|
||||
}
|
||||
@@ -1,158 +0,0 @@
|
||||
package insertutil
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
func TestParseUnixTimestamp_Success(t *testing.T) {
|
||||
f := func(s string, timestampExpected int64) {
|
||||
t.Helper()
|
||||
|
||||
timestamp, err := ParseUnixTimestamp(s)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in ParseUnixTimestamp(%q): %s", s, err)
|
||||
}
|
||||
if timestamp != timestampExpected {
|
||||
t.Fatalf("unexpected timestamp returned from ParseUnixTimestamp(%q); got %d; want %d", s, timestamp, timestampExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f("0", 0)
|
||||
|
||||
// nanoseconds
|
||||
f("-1234567890123456789", -1234567890123456789)
|
||||
f("1234567890123456789", 1234567890123456789)
|
||||
|
||||
// microseconds
|
||||
f("-1234567890123456", -1234567890123456000)
|
||||
f("1234567890123456", 1234567890123456000)
|
||||
f("1234567890123456.789", 1234567890123456768)
|
||||
|
||||
// milliseconds
|
||||
f("-1234567890123", -1234567890123000000)
|
||||
f("1234567890123", 1234567890123000000)
|
||||
f("1234567890123.456", 1234567890123456000)
|
||||
|
||||
// seconds
|
||||
f("-1234567890", -1234567890000000000)
|
||||
f("1234567890", 1234567890000000000)
|
||||
f("-1234567890.123456", -1234567890123456000)
|
||||
}
|
||||
|
||||
func TestParseUnixTimestamp_Failure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
|
||||
_, err := ParseUnixTimestamp(s)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error in ParseUnixTimestamp(%q)", s)
|
||||
}
|
||||
}
|
||||
|
||||
// non-numeric timestamp
|
||||
f("")
|
||||
f("foobar")
|
||||
f("foo.bar")
|
||||
|
||||
// too big timestamp
|
||||
f("12345678901234567890")
|
||||
f("-12345678901234567890")
|
||||
f("12345678901234567890.235424")
|
||||
f("-12345678901234567890.235424")
|
||||
}
|
||||
|
||||
func TestExtractTimestampFromFields_Success(t *testing.T) {
|
||||
f := func(timeField string, fields []logstorage.Field, nsecsExpected int64) {
|
||||
t.Helper()
|
||||
|
||||
nsecs, err := ExtractTimestampFromFields([]string{timeField}, fields)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if nsecs != nsecsExpected {
|
||||
t.Fatalf("unexpected nsecs; got %d; want %d", nsecs, nsecsExpected)
|
||||
}
|
||||
|
||||
for _, f := range fields {
|
||||
if f.Name == timeField {
|
||||
if f.Value != "" {
|
||||
t.Fatalf("unexpected value for field %s; got %q; want %q", timeField, f.Value, "")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UTC time
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "foo", Value: "bar"},
|
||||
{Name: "time", Value: "2024-06-18T23:37:20Z"},
|
||||
}, 1718753840000000000)
|
||||
|
||||
// Time with timezone
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "foo", Value: "bar"},
|
||||
{Name: "time", Value: "2024-06-18T23:37:20+08:00"},
|
||||
}, 1718725040000000000)
|
||||
|
||||
// SQL datetime format
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "foo", Value: "bar"},
|
||||
{Name: "time", Value: "2024-06-18 23:37:20.123-05:30"},
|
||||
}, 1718773640123000000)
|
||||
|
||||
// Time with nanosecond precision
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "time", Value: "2024-06-18T23:37:20.123456789-05:30"},
|
||||
{Name: "foo", Value: "bar"},
|
||||
}, 1718773640123456789)
|
||||
|
||||
// Unix timestamp in nanoseconds
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "foo", Value: "bar"},
|
||||
{Name: "time", Value: "1718773640123456789"},
|
||||
}, 1718773640123456789)
|
||||
|
||||
// Unix timestamp in microseconds
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "foo", Value: "bar"},
|
||||
{Name: "time", Value: "1718773640123456"},
|
||||
}, 1718773640123456000)
|
||||
|
||||
// Unix timestamp in milliseconds
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "foo", Value: "bar"},
|
||||
{Name: "time", Value: "1718773640123"},
|
||||
}, 1718773640123000000)
|
||||
|
||||
// Unix timestamp in seconds
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "foo", Value: "bar"},
|
||||
{Name: "time", Value: "1718773640"},
|
||||
}, 1718773640000000000)
|
||||
}
|
||||
|
||||
func TestExtractTimestampFromFields_Error(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
|
||||
fields := []logstorage.Field{
|
||||
{Name: "time", Value: s},
|
||||
}
|
||||
nsecs, err := ExtractTimestampFromFields([]string{"time"}, fields)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
if nsecs != 0 {
|
||||
t.Fatalf("unexpected nsecs; got %d; want %d", nsecs, 0)
|
||||
}
|
||||
}
|
||||
|
||||
// invalid time
|
||||
f("foobar")
|
||||
|
||||
// incomplete time
|
||||
f("2024-06-18")
|
||||
f("2024-06-18T23:37")
|
||||
}
|
||||
262
app/vlinsert/insertutils/common_params.go
Normal file
262
app/vlinsert/insertutils/common_params.go
Normal file
@@ -0,0 +1,262 @@
|
||||
package insertutils
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeutil"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultMsgValue = flag.String("defaultMsgValue", "missing _msg field; see https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field",
|
||||
"Default value for _msg field if the ingested log entry doesn't contain it; see https://docs.victoriametrics.com/victorialogs/keyconcepts/#message-field")
|
||||
)
|
||||
|
||||
// CommonParams contains common HTTP parameters used by log ingestion APIs.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/victorialogs/data-ingestion/#http-parameters
|
||||
type CommonParams struct {
|
||||
TenantID logstorage.TenantID
|
||||
TimeField string
|
||||
MsgFields []string
|
||||
StreamFields []string
|
||||
IgnoreFields []string
|
||||
ExtraFields []logstorage.Field
|
||||
|
||||
Debug bool
|
||||
DebugRequestURI string
|
||||
DebugRemoteAddr string
|
||||
}
|
||||
|
||||
// GetCommonParams returns CommonParams from r.
|
||||
func GetCommonParams(r *http.Request) (*CommonParams, error) {
|
||||
// Extract tenantID
|
||||
tenantID, err := logstorage.GetTenantIDFromRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
timeField := "_time"
|
||||
if tf := httputils.GetRequestValue(r, "_time_field", "VL-Time-Field"); tf != "" {
|
||||
timeField = tf
|
||||
}
|
||||
|
||||
msgFields := httputils.GetArray(r, "_msg_field", "VL-Msg-Field")
|
||||
streamFields := httputils.GetArray(r, "_stream_fields", "VL-Stream-Fields")
|
||||
ignoreFields := httputils.GetArray(r, "ignore_fields", "VL-Ignore-Fields")
|
||||
|
||||
extraFields, err := getExtraFields(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
debug := false
|
||||
if dv := httputils.GetRequestValue(r, "debug", "VL-Debug"); dv != "" {
|
||||
debug, err = strconv.ParseBool(dv)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse debug=%q: %w", dv, err)
|
||||
}
|
||||
}
|
||||
debugRequestURI := ""
|
||||
debugRemoteAddr := ""
|
||||
if debug {
|
||||
debugRequestURI = httpserver.GetRequestURI(r)
|
||||
debugRemoteAddr = httpserver.GetQuotedRemoteAddr(r)
|
||||
}
|
||||
|
||||
cp := &CommonParams{
|
||||
TenantID: tenantID,
|
||||
TimeField: timeField,
|
||||
MsgFields: msgFields,
|
||||
StreamFields: streamFields,
|
||||
IgnoreFields: ignoreFields,
|
||||
ExtraFields: extraFields,
|
||||
Debug: debug,
|
||||
DebugRequestURI: debugRequestURI,
|
||||
DebugRemoteAddr: debugRemoteAddr,
|
||||
}
|
||||
|
||||
return cp, nil
|
||||
}
|
||||
|
||||
func getExtraFields(r *http.Request) ([]logstorage.Field, error) {
|
||||
efs := httputils.GetArray(r, "extra_fields", "VL-Extra-Fields")
|
||||
if len(efs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
extraFields := make([]logstorage.Field, len(efs))
|
||||
for i, ef := range efs {
|
||||
n := strings.Index(ef, "=")
|
||||
if n <= 0 || n == len(ef)-1 {
|
||||
return nil, fmt.Errorf(`invalid extra_field format: %q; must be in the form "field=value"`, ef)
|
||||
}
|
||||
extraFields[i] = logstorage.Field{
|
||||
Name: ef[:n],
|
||||
Value: ef[n+1:],
|
||||
}
|
||||
}
|
||||
return extraFields, nil
|
||||
}
|
||||
|
||||
// GetCommonParamsForSyslog returns common params needed for parsing syslog messages and storing them to the given tenantID.
|
||||
func GetCommonParamsForSyslog(tenantID logstorage.TenantID, streamFields, ignoreFields []string, extraFields []logstorage.Field) *CommonParams {
|
||||
// See https://docs.victoriametrics.com/victorialogs/logsql/#unpack_syslog-pipe
|
||||
if streamFields == nil {
|
||||
streamFields = []string{
|
||||
"hostname",
|
||||
"app_name",
|
||||
"proc_id",
|
||||
}
|
||||
}
|
||||
cp := &CommonParams{
|
||||
TenantID: tenantID,
|
||||
TimeField: "timestamp",
|
||||
MsgFields: []string{
|
||||
"message",
|
||||
},
|
||||
StreamFields: streamFields,
|
||||
IgnoreFields: ignoreFields,
|
||||
ExtraFields: extraFields,
|
||||
}
|
||||
|
||||
return cp
|
||||
}
|
||||
|
||||
// LogMessageProcessor is an interface for log message processors.
|
||||
type LogMessageProcessor interface {
|
||||
// AddRow must add row to the LogMessageProcessor with the given timestamp and fields.
|
||||
//
|
||||
// If streamFields is non-nil, then the given streamFields must be used as log stream fields instead of pre-configured fields.
|
||||
//
|
||||
// The LogMessageProcessor implementation cannot hold references to fields, since the caller can re-use them.
|
||||
AddRow(timestamp int64, fields, streamFields []logstorage.Field)
|
||||
|
||||
// MustClose() must flush all the remaining fields and free up resources occupied by LogMessageProcessor.
|
||||
MustClose()
|
||||
}
|
||||
|
||||
type logMessageProcessor struct {
|
||||
mu sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
stopCh chan struct{}
|
||||
lastFlushTime time.Time
|
||||
|
||||
cp *CommonParams
|
||||
lr *logstorage.LogRows
|
||||
|
||||
rowsIngestedTotal *metrics.Counter
|
||||
bytesIngestedTotal *metrics.Counter
|
||||
}
|
||||
|
||||
func (lmp *logMessageProcessor) initPeriodicFlush() {
|
||||
lmp.lastFlushTime = time.Now()
|
||||
|
||||
lmp.wg.Add(1)
|
||||
go func() {
|
||||
defer lmp.wg.Done()
|
||||
|
||||
d := timeutil.AddJitterToDuration(time.Second)
|
||||
ticker := time.NewTicker(d)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-lmp.stopCh:
|
||||
return
|
||||
case <-ticker.C:
|
||||
lmp.mu.Lock()
|
||||
if time.Since(lmp.lastFlushTime) >= d {
|
||||
lmp.flushLocked()
|
||||
}
|
||||
lmp.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// AddRow adds new log message to lmp with the given timestamp and fields.
|
||||
//
|
||||
// If streamFields is non-nil, then it is used as log stream fields instead of the pre-configured stream fields.
|
||||
func (lmp *logMessageProcessor) AddRow(timestamp int64, fields, streamFields []logstorage.Field) {
|
||||
lmp.mu.Lock()
|
||||
defer lmp.mu.Unlock()
|
||||
|
||||
lmp.rowsIngestedTotal.Inc()
|
||||
n := logstorage.EstimatedJSONRowLen(fields)
|
||||
lmp.bytesIngestedTotal.Add(n)
|
||||
|
||||
if len(fields) > *MaxFieldsPerLine {
|
||||
rf := logstorage.RowFormatter(fields)
|
||||
logger.Warnf("dropping log line with %d fields; it exceeds -insert.maxFieldsPerLine=%d; %s", len(fields), *MaxFieldsPerLine, rf)
|
||||
rowsDroppedTotalTooManyFields.Inc()
|
||||
return
|
||||
}
|
||||
|
||||
lmp.lr.MustAdd(lmp.cp.TenantID, timestamp, fields, streamFields)
|
||||
if lmp.cp.Debug {
|
||||
s := lmp.lr.GetRowString(0)
|
||||
lmp.lr.ResetKeepSettings()
|
||||
logger.Infof("remoteAddr=%s; requestURI=%s; ignoring log entry because of `debug` arg: %s", lmp.cp.DebugRemoteAddr, lmp.cp.DebugRequestURI, s)
|
||||
rowsDroppedTotalDebug.Inc()
|
||||
return
|
||||
}
|
||||
if lmp.lr.NeedFlush() {
|
||||
lmp.flushLocked()
|
||||
}
|
||||
}
|
||||
|
||||
// flushLocked must be called under locked lmp.mu.
|
||||
func (lmp *logMessageProcessor) flushLocked() {
|
||||
lmp.lastFlushTime = time.Now()
|
||||
vlstorage.MustAddRows(lmp.lr)
|
||||
lmp.lr.ResetKeepSettings()
|
||||
}
|
||||
|
||||
// MustClose flushes the remaining data to the underlying storage and closes lmp.
|
||||
func (lmp *logMessageProcessor) MustClose() {
|
||||
close(lmp.stopCh)
|
||||
lmp.wg.Wait()
|
||||
|
||||
lmp.flushLocked()
|
||||
logstorage.PutLogRows(lmp.lr)
|
||||
lmp.lr = nil
|
||||
}
|
||||
|
||||
// NewLogMessageProcessor returns new LogMessageProcessor for the given cp.
|
||||
//
|
||||
// MustClose() must be called on the returned LogMessageProcessor when it is no longer needed.
|
||||
func (cp *CommonParams) NewLogMessageProcessor(protocolName string) LogMessageProcessor {
|
||||
lr := logstorage.GetLogRows(cp.StreamFields, cp.IgnoreFields, cp.ExtraFields, *defaultMsgValue)
|
||||
rowsIngestedTotal := metrics.GetOrCreateCounter(fmt.Sprintf("vl_rows_ingested_total{type=%q}", protocolName))
|
||||
bytesIngestedTotal := metrics.GetOrCreateCounter(fmt.Sprintf("vl_bytes_ingested_total{type=%q}", protocolName))
|
||||
lmp := &logMessageProcessor{
|
||||
cp: cp,
|
||||
lr: lr,
|
||||
|
||||
rowsIngestedTotal: rowsIngestedTotal,
|
||||
bytesIngestedTotal: bytesIngestedTotal,
|
||||
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
lmp.initPeriodicFlush()
|
||||
|
||||
return lmp
|
||||
}
|
||||
|
||||
var (
|
||||
rowsDroppedTotalDebug = metrics.NewCounter(`vl_rows_dropped_total{reason="debug"}`)
|
||||
rowsDroppedTotalTooManyFields = metrics.NewCounter(`vl_rows_dropped_total{reason="too_many_fields"}`)
|
||||
)
|
||||
15
app/vlinsert/insertutils/flags.go
Normal file
15
app/vlinsert/insertutils/flags.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package insertutils
|
||||
|
||||
import (
|
||||
"flag"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
)
|
||||
|
||||
var (
|
||||
// MaxLineSizeBytes is the maximum length of a single line for /insert/* handlers
|
||||
MaxLineSizeBytes = flagutil.NewBytes("insert.maxLineSizeBytes", 256*1024, "The maximum size of a single line, which can be read by /insert/* handlers")
|
||||
|
||||
// MaxFieldsPerLine is the maximum number of fields per line for /insert/* handlers
|
||||
MaxFieldsPerLine = flag.Int("insert.maxFieldsPerLine", 1000, "The maximum number of log fields per line, which can be read by /insert/* handlers")
|
||||
)
|
||||
@@ -1,4 +1,4 @@
|
||||
package insertutil
|
||||
package insertutils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -15,8 +15,6 @@ import (
|
||||
// LineReader reads newline-delimited lines from the underlying reader
|
||||
type LineReader struct {
|
||||
// Line contains the next line read after the call to NextLine
|
||||
//
|
||||
// The Line contents is valid until the next call to NextLine.
|
||||
Line []byte
|
||||
|
||||
// name is the LineReader name
|
||||
@@ -28,9 +26,6 @@ type LineReader struct {
|
||||
// buf is a buffer for reading the next line
|
||||
buf []byte
|
||||
|
||||
// bufOffset is the offset at buf to read the next line from
|
||||
bufOffset int
|
||||
|
||||
// err is the last error when reading data from r
|
||||
err error
|
||||
|
||||
@@ -56,27 +51,26 @@ func NewLineReader(name string, r io.Reader) *LineReader {
|
||||
// Check for Err in this case.
|
||||
func (lr *LineReader) NextLine() bool {
|
||||
for {
|
||||
if lr.bufOffset >= len(lr.buf) {
|
||||
if len(lr.buf) == 0 {
|
||||
if lr.err != nil || lr.eofReached {
|
||||
return false
|
||||
}
|
||||
if !lr.readMoreData() {
|
||||
return false
|
||||
}
|
||||
if lr.bufOffset >= len(lr.buf) && lr.eofReached {
|
||||
if len(lr.buf) == 0 && lr.eofReached {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
buf := lr.buf[lr.bufOffset:]
|
||||
if n := bytes.IndexByte(buf, '\n'); n >= 0 {
|
||||
lr.Line = buf[:n]
|
||||
lr.bufOffset += n + 1
|
||||
if n := bytes.IndexByte(lr.buf, '\n'); n >= 0 {
|
||||
lr.Line = append(lr.Line[:0], lr.buf[:n]...)
|
||||
lr.buf = append(lr.buf[:0], lr.buf[n+1:]...)
|
||||
return true
|
||||
}
|
||||
if lr.eofReached {
|
||||
lr.Line = buf
|
||||
lr.bufOffset += len(buf)
|
||||
lr.Line = append(lr.Line[:0], lr.buf...)
|
||||
lr.buf = lr.buf[:0]
|
||||
return true
|
||||
}
|
||||
if !lr.readMoreData() {
|
||||
@@ -94,11 +88,6 @@ func (lr *LineReader) Err() error {
|
||||
}
|
||||
|
||||
func (lr *LineReader) readMoreData() bool {
|
||||
if lr.bufOffset > 0 {
|
||||
lr.buf = append(lr.buf[:0], lr.buf[lr.bufOffset:]...)
|
||||
lr.bufOffset = 0
|
||||
}
|
||||
|
||||
bufLen := len(lr.buf)
|
||||
if bufLen >= MaxLineSizeBytes.IntN() {
|
||||
logger.Warnf("%s: the line length exceeds -insert.maxLineSizeBytes=%d; skipping it; line contents=%q", lr.name, MaxLineSizeBytes.IntN(), lr.buf)
|
||||
@@ -1,4 +1,4 @@
|
||||
package insertutil
|
||||
package insertutils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -1,4 +1,4 @@
|
||||
package insertutil
|
||||
package insertutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
69
app/vlinsert/insertutils/timestamp.go
Normal file
69
app/vlinsert/insertutils/timestamp.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package insertutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
// ExtractTimestampRFC3339NanoFromFields extracts RFC3339 timestamp in nanoseconds from the field with the name timeField at fields.
|
||||
//
|
||||
// The value for the timeField is set to empty string after returning from the function,
|
||||
// so it could be ignored during data ingestion.
|
||||
//
|
||||
// The current timestamp is returned if fields do not contain a field with timeField name or if the timeField value is empty.
|
||||
func ExtractTimestampRFC3339NanoFromFields(timeField string, fields []logstorage.Field) (int64, error) {
|
||||
for i := range fields {
|
||||
f := &fields[i]
|
||||
if f.Name != timeField {
|
||||
continue
|
||||
}
|
||||
nsecs, err := parseTimestamp(f.Value)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse timestamp from field %q: %s", timeField, err)
|
||||
}
|
||||
f.Value = ""
|
||||
if nsecs == 0 {
|
||||
nsecs = time.Now().UnixNano()
|
||||
}
|
||||
return nsecs, nil
|
||||
}
|
||||
return time.Now().UnixNano(), nil
|
||||
}
|
||||
|
||||
func parseTimestamp(s string) (int64, error) {
|
||||
if s == "" || s == "0" {
|
||||
return time.Now().UnixNano(), nil
|
||||
}
|
||||
if len(s) <= len("YYYY") || s[len("YYYY")] != '-' {
|
||||
return ParseUnixTimestamp(s)
|
||||
}
|
||||
nsecs, ok := logstorage.TryParseTimestampRFC3339Nano(s)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("cannot unmarshal rfc3339 timestamp %q", s)
|
||||
}
|
||||
return nsecs, nil
|
||||
}
|
||||
|
||||
// ParseUnixTimestamp parses s as unix timestamp in either seconds or milliseconds and returns the parsed timestamp in nanoseconds.
|
||||
func ParseUnixTimestamp(s string) (int64, error) {
|
||||
n, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse unix timestamp from %q: %w", s, err)
|
||||
}
|
||||
if n < (1<<31) && n >= (-1<<31) {
|
||||
// The timestamp is in seconds. Convert it to milliseconds
|
||||
n *= 1e3
|
||||
}
|
||||
if n > int64(math.MaxInt64)/1e6 {
|
||||
return 0, fmt.Errorf("too big timestamp in milliseconds: %d; mustn't exceed %d", n, int64(math.MaxInt64)/1e6)
|
||||
}
|
||||
if n < int64(math.MinInt64)/1e6 {
|
||||
return 0, fmt.Errorf("too small timestamp in milliseconds: %d; must be bigger than %d", n, int64(math.MinInt64)/1e6)
|
||||
}
|
||||
n *= 1e6
|
||||
return n, nil
|
||||
}
|
||||
88
app/vlinsert/insertutils/timestamp_test.go
Normal file
88
app/vlinsert/insertutils/timestamp_test.go
Normal file
@@ -0,0 +1,88 @@
|
||||
package insertutils
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
func TestExtractTimestampRFC3339NanoFromFields_Success(t *testing.T) {
|
||||
f := func(timeField string, fields []logstorage.Field, nsecsExpected int64) {
|
||||
t.Helper()
|
||||
|
||||
nsecs, err := ExtractTimestampRFC3339NanoFromFields(timeField, fields)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if nsecs != nsecsExpected {
|
||||
t.Fatalf("unexpected nsecs; got %d; want %d", nsecs, nsecsExpected)
|
||||
}
|
||||
|
||||
for _, f := range fields {
|
||||
if f.Name == timeField {
|
||||
if f.Value != "" {
|
||||
t.Fatalf("unexpected value for field %s; got %q; want %q", timeField, f.Value, "")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UTC time
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "foo", Value: "bar"},
|
||||
{Name: "time", Value: "2024-06-18T23:37:20Z"},
|
||||
}, 1718753840000000000)
|
||||
|
||||
// Time with timezone
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "foo", Value: "bar"},
|
||||
{Name: "time", Value: "2024-06-18T23:37:20+08:00"},
|
||||
}, 1718725040000000000)
|
||||
|
||||
// SQL datetime format
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "foo", Value: "bar"},
|
||||
{Name: "time", Value: "2024-06-18 23:37:20.123-05:30"},
|
||||
}, 1718773640123000000)
|
||||
|
||||
// Time with nanosecond precision
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "time", Value: "2024-06-18T23:37:20.123456789-05:30"},
|
||||
{Name: "foo", Value: "bar"},
|
||||
}, 1718773640123456789)
|
||||
|
||||
// Unix timestamp in milliseconds
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "foo", Value: "bar"},
|
||||
{Name: "time", Value: "1718773640123"},
|
||||
}, 1718773640123000000)
|
||||
|
||||
// Unix timestamp in seconds
|
||||
f("time", []logstorage.Field{
|
||||
{Name: "foo", Value: "bar"},
|
||||
{Name: "time", Value: "1718773640"},
|
||||
}, 1718773640000000000)
|
||||
}
|
||||
|
||||
func TestExtractTimestampRFC3339NanoFromFields_Error(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
|
||||
fields := []logstorage.Field{
|
||||
{Name: "time", Value: s},
|
||||
}
|
||||
nsecs, err := ExtractTimestampRFC3339NanoFromFields("time", fields)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
if nsecs != 0 {
|
||||
t.Fatalf("unexpected nsecs; got %d; want %d", nsecs, 0)
|
||||
}
|
||||
}
|
||||
|
||||
f("foobar")
|
||||
|
||||
// incomplete time
|
||||
f("2024-06-18")
|
||||
f("2024-06-18T23:37")
|
||||
}
|
||||
@@ -1,96 +0,0 @@
|
||||
package internalinsert
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage/netinsert"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/protoparserutil"
|
||||
)
|
||||
|
||||
var (
|
||||
disableInsert = flag.Bool("internalinsert.disable", false, "Whether to disable /internal/insert HTTP endpoint")
|
||||
maxRequestSize = flagutil.NewBytes("internalinsert.maxRequestSize", 64*1024*1024, "The maximum size in bytes of a single request, which can be accepted at /internal/insert HTTP endpoint")
|
||||
)
|
||||
|
||||
// RequestHandler processes /internal/insert requests.
|
||||
func RequestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if *disableInsert {
|
||||
httpserver.Errorf(w, r, "requests to /internal/insert are disabled with -internalinsert.disable command-line flag")
|
||||
return
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
if r.Method != "POST" {
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
version := r.FormValue("version")
|
||||
if version != netinsert.ProtocolVersion {
|
||||
httpserver.Errorf(w, r, "unsupported protocol version=%q; want %q", version, netinsert.ProtocolVersion)
|
||||
return
|
||||
}
|
||||
|
||||
requestsTotal.Inc()
|
||||
|
||||
cp, err := insertutil.GetCommonParams(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
if err := vlstorage.CanWriteData(); err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
encoding := r.Header.Get("Content-Encoding")
|
||||
err = protoparserutil.ReadUncompressedData(r.Body, encoding, maxRequestSize, func(data []byte) error {
|
||||
lmp := cp.NewLogMessageProcessor("internalinsert", false)
|
||||
irp := lmp.(insertutil.InsertRowProcessor)
|
||||
err := parseData(irp, data)
|
||||
lmp.MustClose()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
errorsTotal.Inc()
|
||||
httpserver.Errorf(w, r, "cannot parse internal insert request: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
requestDuration.UpdateDuration(startTime)
|
||||
}
|
||||
|
||||
func parseData(irp insertutil.InsertRowProcessor, data []byte) error {
|
||||
r := logstorage.GetInsertRow()
|
||||
src := data
|
||||
i := 0
|
||||
for len(src) > 0 {
|
||||
tail, err := r.UnmarshalInplace(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse row #%d: %s", i, err)
|
||||
}
|
||||
src = tail
|
||||
i++
|
||||
|
||||
irp.AddInsertRow(r)
|
||||
}
|
||||
logstorage.PutInsertRow(r)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
requestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/internal/insert"}`)
|
||||
errorsTotal = metrics.NewCounter(`vl_http_errors_total{path="/internal/insert"}`)
|
||||
|
||||
requestDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/internal/insert"}`)
|
||||
)
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/binary"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"slices"
|
||||
@@ -12,37 +13,39 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding/zstd"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/protoparserutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
// See https://github.com/systemd/systemd/blob/main/src/libsystemd/sd-journal/journal-file.c#L1703
|
||||
const journaldEntryMaxNameLen = 64
|
||||
|
||||
var allowedJournaldEntryNameChars = regexp.MustCompile(`^[A-Z_][A-Z0-9_]*`)
|
||||
|
||||
var (
|
||||
journaldStreamFields = flagutil.NewArrayString("journald.streamFields", "Comma-separated list of fields to use as log stream fields for logs ingested over journald protocol. "+
|
||||
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/journald/#stream-fields")
|
||||
journaldIgnoreFields = flagutil.NewArrayString("journald.ignoreFields", "Comma-separated list of fields to ignore for logs ingested over journald protocol. "+
|
||||
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/journald/#dropping-fields")
|
||||
journaldTimeField = flag.String("journald.timeField", "__REALTIME_TIMESTAMP", "Field to use as a log timestamp for logs ingested via journald protocol. "+
|
||||
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/journald/#time-field")
|
||||
journaldTenantID = flag.String("journald.tenantID", "0:0", "TenantID for logs ingested via the Journald endpoint. "+
|
||||
"See https://docs.victoriametrics.com/victorialogs/data-ingestion/journald/#multitenancy")
|
||||
journaldIncludeEntryMetadata = flag.Bool("journald.includeEntryMetadata", false, "Include journal entry fields, which with double underscores.")
|
||||
|
||||
maxRequestSize = flagutil.NewBytes("journald.maxRequestSize", 64*1024*1024, "The maximum size in bytes of a single journald request")
|
||||
const (
|
||||
journaldEntryMaxNameLen = 64
|
||||
)
|
||||
|
||||
func getCommonParams(r *http.Request) (*insertutil.CommonParams, error) {
|
||||
cp, err := insertutil.GetCommonParams(r)
|
||||
var (
|
||||
bodyBufferPool bytesutil.ByteBufferPool
|
||||
allowedJournaldEntryNameChars = regexp.MustCompile(`^[A-Z_][A-Z0-9_]+`)
|
||||
)
|
||||
|
||||
var (
|
||||
journaldStreamFields = flagutil.NewArrayString("journald.streamFields", "Journal fields to be used as stream fields. "+
|
||||
"See the list of allowed fields at https://www.freedesktop.org/software/systemd/man/latest/systemd.journal-fields.html.")
|
||||
journaldIgnoreFields = flagutil.NewArrayString("journald.ignoreFields", "Journal fields to ignore. "+
|
||||
"See the list of allowed fields at https://www.freedesktop.org/software/systemd/man/latest/systemd.journal-fields.html.")
|
||||
journaldTimeField = flag.String("journald.timeField", "__REALTIME_TIMESTAMP", "Journal field to be used as time field. "+
|
||||
"See the list of allowed fields at https://www.freedesktop.org/software/systemd/man/latest/systemd.journal-fields.html.")
|
||||
journaldTenantID = flag.String("journald.tenantID", "0:0", "TenantID for logs ingested via the Journald endpoint.")
|
||||
journaldIncludeEntryMetadata = flag.Bool("journald.includeEntryMetadata", false, "Include journal entry fields, which with double underscores.")
|
||||
)
|
||||
|
||||
func getCommonParams(r *http.Request) (*insertutils.CommonParams, error) {
|
||||
cp, err := insertutils.GetCommonParams(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -53,8 +56,8 @@ func getCommonParams(r *http.Request) (*insertutil.CommonParams, error) {
|
||||
}
|
||||
cp.TenantID = tenantID
|
||||
}
|
||||
if len(cp.TimeFields) == 0 {
|
||||
cp.TimeFields = []string{*journaldTimeField}
|
||||
if cp.TimeField != "" {
|
||||
cp.TimeField = *journaldTimeField
|
||||
}
|
||||
if len(cp.StreamFields) == 0 {
|
||||
cp.StreamFields = *journaldStreamFields
|
||||
@@ -86,36 +89,45 @@ func handleJournald(r *http.Request, w http.ResponseWriter) {
|
||||
startTime := time.Now()
|
||||
requestsJournaldTotal.Inc()
|
||||
|
||||
cp, err := getCommonParams(r)
|
||||
if err != nil {
|
||||
errorsTotal.Inc()
|
||||
httpserver.Errorf(w, r, "cannot parse common params from request: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := vlstorage.CanWriteData(); err != nil {
|
||||
errorsTotal.Inc()
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
encoding := r.Header.Get("Content-Encoding")
|
||||
err = protoparserutil.ReadUncompressedData(r.Body, encoding, maxRequestSize, func(data []byte) error {
|
||||
lmp := cp.NewLogMessageProcessor("journald", false)
|
||||
err := parseJournaldRequest(data, lmp, cp)
|
||||
lmp.MustClose()
|
||||
return err
|
||||
})
|
||||
reader := r.Body
|
||||
var err error
|
||||
|
||||
wcr := writeconcurrencylimiter.GetReader(reader)
|
||||
data, err := io.ReadAll(wcr)
|
||||
if err != nil {
|
||||
errorsTotal.Inc()
|
||||
httpserver.Errorf(w, r, "cannot read journald protocol data: %s", err)
|
||||
httpserver.Errorf(w, r, "cannot read request body: %s", err)
|
||||
return
|
||||
}
|
||||
writeconcurrencylimiter.PutReader(wcr)
|
||||
bb := bodyBufferPool.Get()
|
||||
defer bodyBufferPool.Put(bb)
|
||||
if r.Header.Get("Content-Encoding") == "zstd" {
|
||||
bb.B, err = zstd.Decompress(bb.B[:0], data)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot decompress zstd-encoded request with length %d: %s", len(data), err)
|
||||
return
|
||||
}
|
||||
data = bb.B
|
||||
}
|
||||
cp, err := getCommonParams(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse common params from request: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// systemd starting release v258 will support compression, which starts working after negotiation: it expects supported compression
|
||||
// algorithms list in Accept-Encoding response header in a format "<algorithm_1>[:<priority_1>][;<algorithm_2>:<priority_2>]"
|
||||
// See https://github.com/systemd/systemd/pull/34822
|
||||
w.Header().Set("Accept-Encoding", "zstd")
|
||||
lmp := cp.NewLogMessageProcessor("journald")
|
||||
err = parseJournaldRequest(data, lmp, cp)
|
||||
lmp.MustClose()
|
||||
if err != nil {
|
||||
errorsTotal.Inc()
|
||||
httpserver.Errorf(w, r, "cannot parse Journald protobuf request: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// update requestJournaldDuration only for successfully parsed requests
|
||||
// There is no need in updating requestJournaldDuration for request errors,
|
||||
@@ -131,7 +143,7 @@ var (
|
||||
)
|
||||
|
||||
// See https://systemd.io/JOURNAL_EXPORT_FORMATS/#journal-export-format
|
||||
func parseJournaldRequest(data []byte, lmp insertutil.LogMessageProcessor, cp *insertutil.CommonParams) error {
|
||||
func parseJournaldRequest(data []byte, lmp insertutils.LogMessageProcessor, cp *insertutils.CommonParams) error {
|
||||
var fields []logstorage.Field
|
||||
var ts int64
|
||||
var size uint64
|
||||
@@ -181,7 +193,7 @@ func parseJournaldRequest(data []byte, lmp insertutil.LogMessageProcessor, cp *i
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to extract binary field %q value size: %w", name, err)
|
||||
}
|
||||
// skip binary data size
|
||||
// skip binary data sise
|
||||
data = data[idx:]
|
||||
if size == 0 {
|
||||
return fmt.Errorf("unexpected zero binary data size decoded %d", size)
|
||||
@@ -201,13 +213,14 @@ func parseJournaldRequest(data []byte, lmp insertutil.LogMessageProcessor, cp *i
|
||||
}
|
||||
data = data[1:]
|
||||
}
|
||||
// https://github.com/systemd/systemd/blob/main/src/libsystemd/sd-journal/journal-file.c#L1703
|
||||
if len(name) > journaldEntryMaxNameLen {
|
||||
return fmt.Errorf("journald entry name should not exceed %d symbols, got: %q", journaldEntryMaxNameLen, name)
|
||||
}
|
||||
if !allowedJournaldEntryNameChars.MatchString(name) {
|
||||
return fmt.Errorf("journald entry name should consist of `A-Z0-9_` characters and must start from non-digit symbol")
|
||||
}
|
||||
if slices.Contains(cp.TimeFields, name) {
|
||||
if name == cp.TimeField {
|
||||
n, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse Journald timestamp, %w", err)
|
||||
|
||||
@@ -3,16 +3,16 @@ package journald
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
)
|
||||
|
||||
func TestPushJournaldOk(t *testing.T) {
|
||||
f := func(src string, timestampsExpected []int64, resultExpected string) {
|
||||
t.Helper()
|
||||
tlp := &insertutil.TestLogMessageProcessor{}
|
||||
cp := &insertutil.CommonParams{
|
||||
TimeFields: []string{"__REALTIME_TIMESTAMP"},
|
||||
MsgFields: []string{"MESSAGE"},
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
cp := &insertutils.CommonParams{
|
||||
TimeField: "__REALTIME_TIMESTAMP",
|
||||
MsgFields: []string{"MESSAGE"},
|
||||
}
|
||||
if err := parseJournaldRequest([]byte(src), tlp, cp); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
@@ -35,19 +35,19 @@ func TestPushJournaldOk(t *testing.T) {
|
||||
)
|
||||
|
||||
// Parse binary data
|
||||
f("__CURSOR=s=e0afe8412a6a49d2bfcf66aa7927b588;i=1f06;b=f778b6e2f7584a77b991a2366612a7b5;m=300bdfd420;t=62526e1182354;x=930dc44b370963b7\nE=JobStateChanged\n__REALTIME_TIMESTAMP=1729698775704404\n__MONOTONIC_TIMESTAMP=206357648416\n__SEQNUM=7942\n__SEQNUM_ID=e0afe8412a6a49d2bfcf66aa7927b588\n_BOOT_ID=f778b6e2f7584a77b991a2366612a7b5\n_UID=0\n_GID=0\n_MACHINE_ID=a4a970370c30a925df02a13c67167847\n_HOSTNAME=ecd5e4555787\n_RUNTIME_SCOPE=system\n_TRANSPORT=journal\n_CAP_EFFECTIVE=1ffffffffff\n_SYSTEMD_CGROUP=/init.scope\n_SYSTEMD_UNIT=init.scope\n_SYSTEMD_SLICE=-.slice\nCODE_FILE=<stdin>\nCODE_LINE=1\nCODE_FUNC=<module>\nSYSLOG_IDENTIFIER=python3\n_COMM=python3\n_EXE=/usr/bin/python3.12\n_CMDLINE=python3\nMESSAGE\n\x13\x00\x00\x00\x00\x00\x00\x00foo\nbar\n\n\nasda\nasda\n_PID=2763\n_SOURCE_REALTIME_TIMESTAMP=1729698775704375\n\n",
|
||||
f("__CURSOR=s=e0afe8412a6a49d2bfcf66aa7927b588;i=1f06;b=f778b6e2f7584a77b991a2366612a7b5;m=300bdfd420;t=62526e1182354;x=930dc44b370963b7\n__REALTIME_TIMESTAMP=1729698775704404\n__MONOTONIC_TIMESTAMP=206357648416\n__SEQNUM=7942\n__SEQNUM_ID=e0afe8412a6a49d2bfcf66aa7927b588\n_BOOT_ID=f778b6e2f7584a77b991a2366612a7b5\n_UID=0\n_GID=0\n_MACHINE_ID=a4a970370c30a925df02a13c67167847\n_HOSTNAME=ecd5e4555787\n_RUNTIME_SCOPE=system\n_TRANSPORT=journal\n_CAP_EFFECTIVE=1ffffffffff\n_SYSTEMD_CGROUP=/init.scope\n_SYSTEMD_UNIT=init.scope\n_SYSTEMD_SLICE=-.slice\nCODE_FILE=<stdin>\nCODE_LINE=1\nCODE_FUNC=<module>\nSYSLOG_IDENTIFIER=python3\n_COMM=python3\n_EXE=/usr/bin/python3.12\n_CMDLINE=python3\nMESSAGE\n\x13\x00\x00\x00\x00\x00\x00\x00foo\nbar\n\n\nasda\nasda\n_PID=2763\n_SOURCE_REALTIME_TIMESTAMP=1729698775704375\n\n",
|
||||
[]int64{1729698775704404000},
|
||||
"{\"E\":\"JobStateChanged\",\"_BOOT_ID\":\"f778b6e2f7584a77b991a2366612a7b5\",\"_UID\":\"0\",\"_GID\":\"0\",\"_MACHINE_ID\":\"a4a970370c30a925df02a13c67167847\",\"_HOSTNAME\":\"ecd5e4555787\",\"_RUNTIME_SCOPE\":\"system\",\"_TRANSPORT\":\"journal\",\"_CAP_EFFECTIVE\":\"1ffffffffff\",\"_SYSTEMD_CGROUP\":\"/init.scope\",\"_SYSTEMD_UNIT\":\"init.scope\",\"_SYSTEMD_SLICE\":\"-.slice\",\"CODE_FILE\":\"\\u003cstdin>\",\"CODE_LINE\":\"1\",\"CODE_FUNC\":\"\\u003cmodule>\",\"SYSLOG_IDENTIFIER\":\"python3\",\"_COMM\":\"python3\",\"_EXE\":\"/usr/bin/python3.12\",\"_CMDLINE\":\"python3\",\"_msg\":\"foo\\nbar\\n\\n\\nasda\\nasda\",\"_PID\":\"2763\",\"_SOURCE_REALTIME_TIMESTAMP\":\"1729698775704375\"}",
|
||||
"{\"_BOOT_ID\":\"f778b6e2f7584a77b991a2366612a7b5\",\"_UID\":\"0\",\"_GID\":\"0\",\"_MACHINE_ID\":\"a4a970370c30a925df02a13c67167847\",\"_HOSTNAME\":\"ecd5e4555787\",\"_RUNTIME_SCOPE\":\"system\",\"_TRANSPORT\":\"journal\",\"_CAP_EFFECTIVE\":\"1ffffffffff\",\"_SYSTEMD_CGROUP\":\"/init.scope\",\"_SYSTEMD_UNIT\":\"init.scope\",\"_SYSTEMD_SLICE\":\"-.slice\",\"CODE_FILE\":\"\\u003cstdin>\",\"CODE_LINE\":\"1\",\"CODE_FUNC\":\"\\u003cmodule>\",\"SYSLOG_IDENTIFIER\":\"python3\",\"_COMM\":\"python3\",\"_EXE\":\"/usr/bin/python3.12\",\"_CMDLINE\":\"python3\",\"_msg\":\"foo\\nbar\\n\\n\\nasda\\nasda\",\"_PID\":\"2763\",\"_SOURCE_REALTIME_TIMESTAMP\":\"1729698775704375\"}",
|
||||
)
|
||||
}
|
||||
|
||||
func TestPushJournald_Failure(t *testing.T) {
|
||||
f := func(data string) {
|
||||
t.Helper()
|
||||
tlp := &insertutil.TestLogMessageProcessor{}
|
||||
cp := &insertutil.CommonParams{
|
||||
TimeFields: []string{"__REALTIME_TIMESTAMP"},
|
||||
MsgFields: []string{"MESSAGE"},
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
cp := &insertutils.CommonParams{
|
||||
TimeField: "__REALTIME_TIMESTAMP",
|
||||
MsgFields: []string{"MESSAGE"},
|
||||
}
|
||||
if err := parseJournaldRequest([]byte(data), tlp, cp); err == nil {
|
||||
t.Fatalf("expected non nil error")
|
||||
|
||||
@@ -6,12 +6,12 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/protoparserutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
@@ -28,7 +28,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
requestsTotal.Inc()
|
||||
|
||||
cp, err := insertutil.GetCommonParams(r)
|
||||
cp, err := insertutils.GetCommonParams(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
@@ -38,59 +38,54 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
encoding := r.Header.Get("Content-Encoding")
|
||||
reader, err := protoparserutil.GetUncompressedReader(r.Body, encoding)
|
||||
if err != nil {
|
||||
logger.Errorf("cannot decode jsonline request: %s", err)
|
||||
return
|
||||
reader := r.Body
|
||||
if r.Header.Get("Content-Encoding") == "gzip" {
|
||||
zr, err := common.GetGzipReader(reader)
|
||||
if err != nil {
|
||||
logger.Errorf("cannot read gzipped jsonline request: %s", err)
|
||||
return
|
||||
}
|
||||
defer common.PutGzipReader(zr)
|
||||
reader = zr
|
||||
}
|
||||
defer protoparserutil.PutUncompressedReader(reader)
|
||||
|
||||
lmp := cp.NewLogMessageProcessor("jsonline", true)
|
||||
lmp := cp.NewLogMessageProcessor("jsonline")
|
||||
streamName := fmt.Sprintf("remoteAddr=%s, requestURI=%q", httpserver.GetQuotedRemoteAddr(r), r.RequestURI)
|
||||
err = processStreamInternal(streamName, reader, cp.TimeFields, cp.MsgFields, lmp)
|
||||
err = processStreamInternal(streamName, reader, cp.TimeField, cp.MsgFields, lmp)
|
||||
lmp.MustClose()
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot process jsonline request; error: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
requestDuration.UpdateDuration(startTime)
|
||||
if err != nil {
|
||||
logger.Errorf("jsonline: %s", err)
|
||||
} else {
|
||||
// update requestDuration only for successfully parsed requests.
|
||||
// There is no need in updating requestDuration for request errors,
|
||||
// since their timings are usually much smaller than the timing for successful request parsing.
|
||||
requestDuration.UpdateDuration(startTime)
|
||||
}
|
||||
}
|
||||
|
||||
func processStreamInternal(streamName string, r io.Reader, timeFields, msgFields []string, lmp insertutil.LogMessageProcessor) error {
|
||||
func processStreamInternal(streamName string, r io.Reader, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) error {
|
||||
wcr := writeconcurrencylimiter.GetReader(r)
|
||||
defer writeconcurrencylimiter.PutReader(wcr)
|
||||
|
||||
lr := insertutil.NewLineReader(streamName, wcr)
|
||||
lr := insertutils.NewLineReader(streamName, wcr)
|
||||
|
||||
n := 0
|
||||
errors := 0
|
||||
var lastError error
|
||||
for {
|
||||
ok, err := readLine(lr, timeFields, msgFields, lmp)
|
||||
ok, err := readLine(lr, timeField, msgFields, lmp)
|
||||
wcr.DecConcurrency()
|
||||
if err != nil {
|
||||
lastError = err
|
||||
errors++
|
||||
logger.Warnf("jsonline: cannot read line #%d in /jsonline request: %s", n, err)
|
||||
errorsTotal.Inc()
|
||||
return fmt.Errorf("cannot read line #%d in /jsonline request: %s", n, err)
|
||||
}
|
||||
if !ok {
|
||||
break
|
||||
return nil
|
||||
}
|
||||
n++
|
||||
}
|
||||
errorsTotal.Add(errors)
|
||||
|
||||
if errors > 0 && n == errors {
|
||||
// Return an error if no logs were processed and there were errors
|
||||
return lastError
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func readLine(lr *insertutil.LineReader, timeFields, msgFields []string, lmp insertutil.LogMessageProcessor) (bool, error) {
|
||||
func readLine(lr *insertutils.LineReader, timeField string, msgFields []string, lmp insertutils.LogMessageProcessor) (bool, error) {
|
||||
var line []byte
|
||||
for len(line) == 0 {
|
||||
if !lr.NextLine() {
|
||||
@@ -101,17 +96,16 @@ func readLine(lr *insertutil.LineReader, timeFields, msgFields []string, lmp ins
|
||||
}
|
||||
|
||||
p := logstorage.GetJSONParser()
|
||||
defer logstorage.PutJSONParser(p)
|
||||
|
||||
if err := p.ParseLogMessage(line); err != nil {
|
||||
return true, fmt.Errorf("%s; line contents: %q", err, line)
|
||||
return false, fmt.Errorf("cannot parse json-encoded log entry: %w", err)
|
||||
}
|
||||
ts, err := insertutil.ExtractTimestampFromFields(timeFields, p.Fields)
|
||||
ts, err := insertutils.ExtractTimestampRFC3339NanoFromFields(timeField, p.Fields)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("%s; line contents: %q", err, line)
|
||||
return false, fmt.Errorf("cannot get timestamp: %w", err)
|
||||
}
|
||||
logstorage.RenameField(p.Fields, msgFields, "_msg")
|
||||
lmp.AddRow(ts, p.Fields, nil)
|
||||
logstorage.PutJSONParser(p)
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -2,21 +2,19 @@ package jsonline
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
)
|
||||
|
||||
func TestProcessStreamInternalSuccess(t *testing.T) {
|
||||
func TestProcessStreamInternal_Success(t *testing.T) {
|
||||
f := func(data, timeField, msgField string, timestampsExpected []int64, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
timeFields := []string{timeField}
|
||||
msgFields := []string{msgField}
|
||||
tlp := &insertutil.TestLogMessageProcessor{}
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
r := bytes.NewBufferString(data)
|
||||
if err := processStreamInternal("test", r, timeFields, msgFields, tlp); err != nil {
|
||||
if err := processStreamInternal("test", r, timeField, msgFields, tlp); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
@@ -47,51 +45,22 @@ func TestProcessStreamInternalSuccess(t *testing.T) {
|
||||
resultExpected = `{"log.offset":"71770","log.file.path":"/var/log/auth.log","message":"foobar"}
|
||||
{"message":"baz"}`
|
||||
f(data, timeField, msgField, timestampsExpected, resultExpected)
|
||||
|
||||
// invalid lines among valid lines
|
||||
data = `
|
||||
dsfodmasd
|
||||
|
||||
{"time":"2023-06-06T04:48:11.735Z","log":{"offset":71770,"file":{"path":"/var/log/auth.log"}},"message":"foobar"}
|
||||
invalid line
|
||||
{"time":"2023-06-06T04:48:12.735+01:00","message":"baz"}
|
||||
asbsdf
|
||||
|
||||
`
|
||||
timeField = "time"
|
||||
msgField = "message"
|
||||
timestampsExpected = []int64{1686026891735000000, 1686023292735000000}
|
||||
resultExpected = `{"log.offset":"71770","log.file.path":"/var/log/auth.log","_msg":"foobar"}
|
||||
{"_msg":"baz"}`
|
||||
f(data, timeField, msgField, timestampsExpected, resultExpected)
|
||||
}
|
||||
|
||||
func TestProcessStreamInternalFailure(t *testing.T) {
|
||||
func TestProcessStreamInternal_Failure(t *testing.T) {
|
||||
f := func(data string) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &insertutil.TestLogMessageProcessor{}
|
||||
r := strings.NewReader(data)
|
||||
if err := processStreamInternal("test", r, []string{"time"}, nil, tlp); err == nil {
|
||||
t.Fatalf("expected error, got nil")
|
||||
}
|
||||
|
||||
if err := tlp.Verify(nil, ""); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
r := bytes.NewBufferString(data)
|
||||
if err := processStreamInternal("test", r, "time", nil, tlp); err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
}
|
||||
|
||||
// invalid json
|
||||
f("foobar")
|
||||
|
||||
f(`foo
|
||||
bar`)
|
||||
|
||||
f(`
|
||||
foo
|
||||
|
||||
`)
|
||||
|
||||
// invalid timestamp field
|
||||
f(`{"time":"foobar"}`)
|
||||
}
|
||||
|
||||
@@ -1,18 +1,12 @@
|
||||
package loki
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
var disableMessageParsing = flag.Bool("loki.disableMessageParsing", false, "Whether to disable automatic parsing of JSON-encoded log fields inside Loki log message into distinct log fields")
|
||||
|
||||
// RequestHandler processes Loki insert requests
|
||||
func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
switch path {
|
||||
@@ -41,17 +35,8 @@ func handleInsert(r *http.Request, w http.ResponseWriter) {
|
||||
}
|
||||
}
|
||||
|
||||
type commonParams struct {
|
||||
cp *insertutil.CommonParams
|
||||
|
||||
// Whether to parse JSON inside plaintext log message.
|
||||
//
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8486
|
||||
parseMessage bool
|
||||
}
|
||||
|
||||
func getCommonParams(r *http.Request) (*commonParams, error) {
|
||||
cp, err := insertutil.GetCommonParams(r)
|
||||
func getCommonParams(r *http.Request) (*insertutils.CommonParams, error) {
|
||||
cp, err := insertutils.GetCommonParams(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -70,17 +55,5 @@ func getCommonParams(r *http.Request) (*commonParams, error) {
|
||||
|
||||
}
|
||||
|
||||
parseMessage := !*disableMessageParsing
|
||||
if rv := httputil.GetRequestValue(r, "disable_message_parsing", "VL-Loki-Disable-Message-Parsing"); rv != "" {
|
||||
bv, err := strconv.ParseBool(rv)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse dusable_message_parsing=%q: %s", rv, err)
|
||||
}
|
||||
parseMessage = !bv
|
||||
}
|
||||
|
||||
return &commonParams{
|
||||
cp: cp,
|
||||
parseMessage: parseMessage,
|
||||
}, nil
|
||||
return cp, nil
|
||||
}
|
||||
|
||||
@@ -2,28 +2,47 @@ package loki
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/valyala/fastjson"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/protoparserutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
)
|
||||
|
||||
var maxRequestSize = flagutil.NewBytes("loki.maxRequestSize", 64*1024*1024, "The maximum size in bytes of a single Loki request")
|
||||
|
||||
var parserPool fastjson.ParserPool
|
||||
|
||||
func handleJSON(r *http.Request, w http.ResponseWriter) {
|
||||
startTime := time.Now()
|
||||
requestsJSONTotal.Inc()
|
||||
reader := r.Body
|
||||
if r.Header.Get("Content-Encoding") == "gzip" {
|
||||
zr, err := common.GetGzipReader(reader)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot initialize gzip reader: %s", err)
|
||||
return
|
||||
}
|
||||
defer common.PutGzipReader(zr)
|
||||
reader = zr
|
||||
}
|
||||
|
||||
wcr := writeconcurrencylimiter.GetReader(reader)
|
||||
data, err := io.ReadAll(wcr)
|
||||
writeconcurrencylimiter.PutReader(wcr)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot read request body: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
cp, err := getCommonParams(r)
|
||||
if err != nil {
|
||||
@@ -34,17 +53,12 @@ func handleJSON(r *http.Request, w http.ResponseWriter) {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
encoding := r.Header.Get("Content-Encoding")
|
||||
err = protoparserutil.ReadUncompressedData(r.Body, encoding, maxRequestSize, func(data []byte) error {
|
||||
lmp := cp.cp.NewLogMessageProcessor("loki_json", false)
|
||||
useDefaultStreamFields := len(cp.cp.StreamFields) == 0
|
||||
err := parseJSONRequest(data, lmp, cp.cp.MsgFields, useDefaultStreamFields, cp.parseMessage)
|
||||
lmp.MustClose()
|
||||
return err
|
||||
})
|
||||
lmp := cp.NewLogMessageProcessor("loki_json")
|
||||
useDefaultStreamFields := len(cp.StreamFields) == 0
|
||||
err = parseJSONRequest(data, lmp, useDefaultStreamFields)
|
||||
lmp.MustClose()
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot read Loki json data: %s", err)
|
||||
httpserver.Errorf(w, r, "cannot parse Loki json request: %s; data=%s", err, data)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -52,9 +66,6 @@ func handleJSON(r *http.Request, w http.ResponseWriter) {
|
||||
// There is no need in updating requestJSONDuration for request errors,
|
||||
// since their timings are usually much smaller than the timing for successful request parsing.
|
||||
requestJSONDuration.UpdateDuration(startTime)
|
||||
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8505
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -62,10 +73,9 @@ var (
|
||||
requestJSONDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="json"}`)
|
||||
)
|
||||
|
||||
func parseJSONRequest(data []byte, lmp insertutil.LogMessageProcessor, msgFields []string, useDefaultStreamFields, parseMessage bool) error {
|
||||
func parseJSONRequest(data []byte, lmp insertutils.LogMessageProcessor, useDefaultStreamFields bool) error {
|
||||
p := parserPool.Get()
|
||||
defer parserPool.Put(p)
|
||||
|
||||
v, err := p.ParseBytes(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse JSON request body: %w", err)
|
||||
@@ -80,20 +90,11 @@ func parseJSONRequest(data []byte, lmp insertutil.LogMessageProcessor, msgFields
|
||||
return fmt.Errorf("`streams` item in the parsed JSON must contain an array; got %q", streamsV)
|
||||
}
|
||||
|
||||
fields := getFields()
|
||||
defer putFields(fields)
|
||||
|
||||
var msgParser *logstorage.JSONParser
|
||||
if parseMessage {
|
||||
msgParser = logstorage.GetJSONParser()
|
||||
defer logstorage.PutJSONParser(msgParser)
|
||||
}
|
||||
|
||||
currentTimestamp := time.Now().UnixNano()
|
||||
|
||||
var commonFields []logstorage.Field
|
||||
for _, stream := range streams {
|
||||
// populate common labels from `stream` dict
|
||||
fields.fields = fields.fields[:0]
|
||||
commonFields = commonFields[:0]
|
||||
labelsV := stream.Get("stream")
|
||||
var labels *fastjson.Object
|
||||
if labelsV != nil {
|
||||
@@ -109,7 +110,7 @@ func parseJSONRequest(data []byte, lmp insertutil.LogMessageProcessor, msgFields
|
||||
err = fmt.Errorf("unexpected label value type for %q:%q; want string", k, v)
|
||||
return
|
||||
}
|
||||
fields.fields = append(fields.fields, logstorage.Field{
|
||||
commonFields = append(commonFields, logstorage.Field{
|
||||
Name: bytesutil.ToUnsafeString(k),
|
||||
Value: bytesutil.ToUnsafeString(vStr),
|
||||
})
|
||||
@@ -128,10 +129,8 @@ func parseJSONRequest(data []byte, lmp insertutil.LogMessageProcessor, msgFields
|
||||
return fmt.Errorf("`values` item in the parsed JSON must contain an array; got %q", linesV)
|
||||
}
|
||||
|
||||
commonFieldsLen := len(fields.fields)
|
||||
fields := commonFields
|
||||
for _, line := range lines {
|
||||
fields.fields = fields.fields[:commonFieldsLen]
|
||||
|
||||
lineA, err := line.Array()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unexpected contents of `values` item; want array; got %q", line)
|
||||
@@ -153,6 +152,17 @@ func parseJSONRequest(data []byte, lmp insertutil.LogMessageProcessor, msgFields
|
||||
ts = currentTimestamp
|
||||
}
|
||||
|
||||
// parse log message
|
||||
msg, err := lineA[1].StringBytes()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unexpected log message type for %q; want string", lineA[1])
|
||||
}
|
||||
|
||||
fields = append(fields[:len(commonFields)], logstorage.Field{
|
||||
Name: "_msg",
|
||||
Value: bytesutil.ToUnsafeString(msg),
|
||||
})
|
||||
|
||||
// parse structured metadata - see https://grafana.com/docs/loki/latest/reference/loki-http-api/#ingest-logs
|
||||
if len(lineA) > 2 {
|
||||
structuredMetadata, err := lineA[2].Object()
|
||||
@@ -167,7 +177,7 @@ func parseJSONRequest(data []byte, lmp insertutil.LogMessageProcessor, msgFields
|
||||
return
|
||||
}
|
||||
|
||||
fields.fields = append(fields.fields, logstorage.Field{
|
||||
fields = append(fields, logstorage.Field{
|
||||
Name: bytesutil.ToUnsafeString(k),
|
||||
Value: bytesutil.ToUnsafeString(vStr),
|
||||
})
|
||||
@@ -176,51 +186,39 @@ func parseJSONRequest(data []byte, lmp insertutil.LogMessageProcessor, msgFields
|
||||
return fmt.Errorf("error when parsing `structuredMetadata` object: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// parse log message
|
||||
msg, err := lineA[1].StringBytes()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unexpected log message type for %q; want string", lineA[1])
|
||||
}
|
||||
allowMsgRenaming := false
|
||||
fields.fields, allowMsgRenaming = addMsgField(fields.fields, msgParser, bytesutil.ToUnsafeString(msg))
|
||||
|
||||
var streamFields []logstorage.Field
|
||||
if useDefaultStreamFields {
|
||||
streamFields = fields.fields[:commonFieldsLen]
|
||||
streamFields = commonFields
|
||||
}
|
||||
if allowMsgRenaming {
|
||||
logstorage.RenameField(fields.fields[commonFieldsLen:], msgFields, "_msg")
|
||||
}
|
||||
lmp.AddRow(ts, fields.fields, streamFields)
|
||||
lmp.AddRow(ts, fields, streamFields)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addMsgField(dst []logstorage.Field, msgParser *logstorage.JSONParser, msg string) ([]logstorage.Field, bool) {
|
||||
if msgParser == nil || len(msg) < 2 || msg[0] != '{' || msg[len(msg)-1] != '}' {
|
||||
return append(dst, logstorage.Field{
|
||||
Name: "_msg",
|
||||
Value: msg,
|
||||
}), false
|
||||
}
|
||||
if msgParser != nil && len(msg) >= 2 && msg[0] == '{' && msg[len(msg)-1] == '}' {
|
||||
if err := msgParser.ParseLogMessage(bytesutil.ToUnsafeBytes(msg)); err == nil {
|
||||
return append(dst, msgParser.Fields...), true
|
||||
}
|
||||
}
|
||||
return append(dst, logstorage.Field{
|
||||
Name: "_msg",
|
||||
Value: msg,
|
||||
}), false
|
||||
}
|
||||
|
||||
func parseLokiTimestamp(s string) (int64, error) {
|
||||
if s == "" {
|
||||
// Special case - an empty timestamp must be substituted with the current time by the caller.
|
||||
return 0, nil
|
||||
}
|
||||
return insertutil.ParseUnixTimestamp(s)
|
||||
n, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
// Fall back to parsing floating-point value
|
||||
f, err := strconv.ParseFloat(s, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if f > math.MaxInt64 {
|
||||
return 0, fmt.Errorf("too big timestamp in nanoseconds: %v; mustn't exceed %v", f, int64(math.MaxInt64))
|
||||
}
|
||||
if f < math.MinInt64 {
|
||||
return 0, fmt.Errorf("too small timestamp in nanoseconds: %v; must be bigger or equal to %v", f, int64(math.MinInt64))
|
||||
}
|
||||
n = int64(f)
|
||||
}
|
||||
if n < 0 {
|
||||
return 0, fmt.Errorf("too small timestamp in nanoseconds: %d; must be bigger than 0", n)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
@@ -3,15 +3,15 @@ package loki
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
)
|
||||
|
||||
func TestParseJSONRequest_Failure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &insertutil.TestLogMessageProcessor{}
|
||||
if err := parseJSONRequest([]byte(s), tlp, nil, false, false); err == nil {
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
if err := parseJSONRequest([]byte(s), tlp, false); err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
if err := tlp.Verify(nil, ""); err != nil {
|
||||
@@ -63,9 +63,9 @@ func TestParseJSONRequest_Success(t *testing.T) {
|
||||
f := func(s string, timestampsExpected []int64, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &insertutil.TestLogMessageProcessor{}
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
|
||||
if err := parseJSONRequest([]byte(s), tlp, nil, false, false); err != nil {
|
||||
if err := parseJSONRequest([]byte(s), tlp, false); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
|
||||
@@ -89,9 +89,9 @@ func TestParseJSONRequest_Success(t *testing.T) {
|
||||
"label2": "value2"
|
||||
},"values":[
|
||||
["1577836800000000001", "foo bar"],
|
||||
["1686026123.62", "abc"],
|
||||
["1477836900005000002", "abc"],
|
||||
["147.78369e9", "foobar"]
|
||||
]}]}`, []int64{1577836800000000001, 1686026123620000000, 147783690000000000}, `{"label1":"value1","label2":"value2","_msg":"foo bar"}
|
||||
]}]}`, []int64{1577836800000000001, 1477836900005000002, 147783690000}, `{"label1":"value1","label2":"value2","_msg":"foo bar"}
|
||||
{"label1":"value1","label2":"value2","_msg":"abc"}
|
||||
{"label1":"value1","label2":"value2","_msg":"foobar"}`)
|
||||
|
||||
@@ -122,48 +122,6 @@ func TestParseJSONRequest_Success(t *testing.T) {
|
||||
{"x":"y","_msg":"yx"}`)
|
||||
|
||||
// values with metadata
|
||||
f(`{"streams":[{"values":[["1577836800000000001", "foo bar", {"metadata_1": "md_value"}]]}]}`, []int64{1577836800000000001}, `{"metadata_1":"md_value","_msg":"foo bar"}`)
|
||||
f(`{"streams":[{"values":[["1577836800000000001", "foo bar", {"metadata_1": "md_value"}]]}]}`, []int64{1577836800000000001}, `{"_msg":"foo bar","metadata_1":"md_value"}`)
|
||||
f(`{"streams":[{"values":[["1577836800000000001", "foo bar", {}]]}]}`, []int64{1577836800000000001}, `{"_msg":"foo bar"}`)
|
||||
}
|
||||
|
||||
func TestParseJSONRequest_ParseMessage(t *testing.T) {
|
||||
f := func(s string, msgFields []string, timestampsExpected []int64, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &insertutil.TestLogMessageProcessor{}
|
||||
|
||||
if err := parseJSONRequest([]byte(s), tlp, msgFields, false, true); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if err := tlp.Verify(timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
f(`{
|
||||
"streams": [
|
||||
{
|
||||
"stream": {
|
||||
"foo": "bar",
|
||||
"a": "b"
|
||||
},
|
||||
"values": [
|
||||
["1577836800000000001", "{\"user_id\":\"123\"}"],
|
||||
["1577836900005000002", "abc", {"trace_id":"pqw"}],
|
||||
["1577836900005000003", "{def}"]
|
||||
]
|
||||
},
|
||||
{
|
||||
"stream": {
|
||||
"x": "y"
|
||||
},
|
||||
"values": [
|
||||
["1877836900005000004", "{\"trace_id\":\"111\",\"parent_id\":\"abc\"}"]
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, []string{"a", "trace_id"}, []int64{1577836800000000001, 1577836900005000002, 1577836900005000003, 1877836900005000004}, `{"foo":"bar","a":"b","user_id":"123"}
|
||||
{"foo":"bar","a":"b","trace_id":"pqw","_msg":"abc"}
|
||||
{"foo":"bar","a":"b","_msg":"{def}"}
|
||||
{"x":"y","_msg":"111","parent_id":"abc"}`)
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
)
|
||||
|
||||
func BenchmarkParseJSONRequest(b *testing.B) {
|
||||
@@ -22,13 +22,13 @@ func BenchmarkParseJSONRequest(b *testing.B) {
|
||||
}
|
||||
|
||||
func benchmarkParseJSONRequest(b *testing.B, streams, rows, labels int) {
|
||||
blp := &insertutil.BenchmarkLogMessageProcessor{}
|
||||
blp := &insertutils.BenchmarkLogMessageProcessor{}
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(streams * rows))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
data := getJSONBody(streams, rows, labels)
|
||||
for pb.Next() {
|
||||
if err := parseJSONRequest(data, blp, nil, false, true); err != nil {
|
||||
if err := parseJSONRequest(data, blp, false); err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,27 +2,38 @@ package loki
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/protoparserutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/golang/snappy"
|
||||
)
|
||||
|
||||
var (
|
||||
bytesBufPool bytesutil.ByteBufferPool
|
||||
pushReqsPool sync.Pool
|
||||
)
|
||||
|
||||
func handleProtobuf(r *http.Request, w http.ResponseWriter) {
|
||||
startTime := time.Now()
|
||||
requestsProtobufTotal.Inc()
|
||||
wcr := writeconcurrencylimiter.GetReader(r.Body)
|
||||
data, err := io.ReadAll(wcr)
|
||||
writeconcurrencylimiter.PutReader(wcr)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot read request body: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
cp, err := getCommonParams(r)
|
||||
if err != nil {
|
||||
@@ -33,22 +44,12 @@ func handleProtobuf(r *http.Request, w http.ResponseWriter) {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
encoding := r.Header.Get("Content-Encoding")
|
||||
if encoding == "" {
|
||||
// Loki protocol uses snappy compression by default.
|
||||
// See https://grafana.com/docs/loki/latest/reference/loki-http-api/#ingest-logs
|
||||
encoding = "snappy"
|
||||
}
|
||||
err = protoparserutil.ReadUncompressedData(r.Body, encoding, maxRequestSize, func(data []byte) error {
|
||||
lmp := cp.cp.NewLogMessageProcessor("loki_protobuf", false)
|
||||
useDefaultStreamFields := len(cp.cp.StreamFields) == 0
|
||||
err := parseProtobufRequest(data, lmp, cp.cp.MsgFields, useDefaultStreamFields, cp.parseMessage)
|
||||
lmp.MustClose()
|
||||
return err
|
||||
})
|
||||
lmp := cp.NewLogMessageProcessor("loki_protobuf")
|
||||
useDefaultStreamFields := len(cp.StreamFields) == 0
|
||||
err = parseProtobufRequest(data, lmp, useDefaultStreamFields)
|
||||
lmp.MustClose()
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot read Loki protobuf data: %s", err)
|
||||
httpserver.Errorf(w, r, "cannot parse Loki protobuf request: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -56,9 +57,6 @@ func handleProtobuf(r *http.Request, w http.ResponseWriter) {
|
||||
// There is no need in updating requestProtobufDuration for request errors,
|
||||
// since their timings are usually much smaller than the timing for successful request parsing.
|
||||
requestProtobufDuration.UpdateDuration(startTime)
|
||||
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8505
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -66,11 +64,20 @@ var (
|
||||
requestProtobufDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="protobuf"}`)
|
||||
)
|
||||
|
||||
func parseProtobufRequest(data []byte, lmp insertutil.LogMessageProcessor, msgFields []string, useDefaultStreamFields, parseMessage bool) error {
|
||||
func parseProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor, useDefaultStreamFields bool) error {
|
||||
bb := bytesBufPool.Get()
|
||||
defer bytesBufPool.Put(bb)
|
||||
|
||||
buf, err := snappy.Decode(bb.B[:cap(bb.B)], data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot decode snappy-encoded request body: %w", err)
|
||||
}
|
||||
bb.B = buf
|
||||
|
||||
req := getPushRequest()
|
||||
defer putPushRequest(req)
|
||||
|
||||
err := req.UnmarshalProtobuf(data)
|
||||
err = req.UnmarshalProtobuf(bb.B)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse request body: %w", err)
|
||||
}
|
||||
@@ -78,15 +85,8 @@ func parseProtobufRequest(data []byte, lmp insertutil.LogMessageProcessor, msgFi
|
||||
fields := getFields()
|
||||
defer putFields(fields)
|
||||
|
||||
var msgParser *logstorage.JSONParser
|
||||
if parseMessage {
|
||||
msgParser = logstorage.GetJSONParser()
|
||||
defer logstorage.PutJSONParser(msgParser)
|
||||
}
|
||||
|
||||
streams := req.Streams
|
||||
currentTimestamp := time.Now().UnixNano()
|
||||
|
||||
for i := range streams {
|
||||
stream := &streams[i]
|
||||
// st.Labels contains labels for the stream.
|
||||
@@ -109,8 +109,10 @@ func parseProtobufRequest(data []byte, lmp insertutil.LogMessageProcessor, msgFi
|
||||
})
|
||||
}
|
||||
|
||||
allowMsgRenaming := false
|
||||
fields.fields, allowMsgRenaming = addMsgField(fields.fields, msgParser, e.Line)
|
||||
fields.fields = append(fields.fields, logstorage.Field{
|
||||
Name: "_msg",
|
||||
Value: e.Line,
|
||||
})
|
||||
|
||||
ts := e.Timestamp.UnixNano()
|
||||
if ts == 0 {
|
||||
@@ -121,9 +123,6 @@ func parseProtobufRequest(data []byte, lmp insertutil.LogMessageProcessor, msgFi
|
||||
if useDefaultStreamFields {
|
||||
streamFields = fields.fields[:commonFieldsLen]
|
||||
}
|
||||
if allowMsgRenaming {
|
||||
logstorage.RenameField(fields.fields[commonFieldsLen:], msgFields, "_msg")
|
||||
}
|
||||
lmp.AddRow(ts, fields.fields, streamFields)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,8 +6,9 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/golang/snappy"
|
||||
)
|
||||
|
||||
type testLogMessageProcessor struct {
|
||||
@@ -52,7 +53,7 @@ func TestParseProtobufRequest_Success(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &testLogMessageProcessor{}
|
||||
if err := parseJSONRequest([]byte(s), tlp, nil, false, false); err != nil {
|
||||
if err := parseJSONRequest([]byte(s), tlp, false); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if len(tlp.pr.Streams) != len(timestampsExpected) {
|
||||
@@ -60,9 +61,10 @@ func TestParseProtobufRequest_Success(t *testing.T) {
|
||||
}
|
||||
|
||||
data := tlp.pr.MarshalProtobuf(nil)
|
||||
encodedData := snappy.Encode(nil, data)
|
||||
|
||||
tlp2 := &insertutil.TestLogMessageProcessor{}
|
||||
if err := parseProtobufRequest(data, tlp2, nil, false, false); err != nil {
|
||||
tlp2 := &insertutils.TestLogMessageProcessor{}
|
||||
if err := parseProtobufRequest(encodedData, tlp2, false); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if err := tlp2.Verify(timestampsExpected, resultExpected); err != nil {
|
||||
@@ -88,7 +90,7 @@ func TestParseProtobufRequest_Success(t *testing.T) {
|
||||
["1577836800000000001", "foo bar"],
|
||||
["1477836900005000002", "abc"],
|
||||
["147.78369e9", "foobar"]
|
||||
]}]}`, []int64{1577836800000000001, 1477836900005000002, 147783690000000000}, `{"label1":"value1","label2":"value2","_msg":"foo bar"}
|
||||
]}]}`, []int64{1577836800000000001, 1477836900005000002, 147783690000}, `{"label1":"value1","label2":"value2","_msg":"foo bar"}
|
||||
{"label1":"value1","label2":"value2","_msg":"abc"}
|
||||
{"label1":"value1","label2":"value2","_msg":"foobar"}`)
|
||||
|
||||
@@ -119,57 +121,6 @@ func TestParseProtobufRequest_Success(t *testing.T) {
|
||||
{"x":"y","_msg":"yx"}`)
|
||||
}
|
||||
|
||||
func TestParseProtobufRequest_ParseMessage(t *testing.T) {
|
||||
f := func(s string, msgFields []string, timestampsExpected []int64, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
tlp := &testLogMessageProcessor{}
|
||||
if err := parseJSONRequest([]byte(s), tlp, nil, false, false); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if len(tlp.pr.Streams) != len(timestampsExpected) {
|
||||
t.Fatalf("unexpected number of streams; got %d; want %d", len(tlp.pr.Streams), len(timestampsExpected))
|
||||
}
|
||||
|
||||
data := tlp.pr.MarshalProtobuf(nil)
|
||||
|
||||
tlp2 := &insertutil.TestLogMessageProcessor{}
|
||||
if err := parseProtobufRequest(data, tlp2, msgFields, false, true); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if err := tlp2.Verify(timestampsExpected, resultExpected); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
f(`{
|
||||
"streams": [
|
||||
{
|
||||
"stream": {
|
||||
"foo": "bar",
|
||||
"a": "b"
|
||||
},
|
||||
"values": [
|
||||
["1577836800000000001", "{\"user_id\":\"123\"}"],
|
||||
["1577836900005000002", "abc", {"trace_id":"pqw"}],
|
||||
["1577836900005000003", "{def}"]
|
||||
]
|
||||
},
|
||||
{
|
||||
"stream": {
|
||||
"x": "y"
|
||||
},
|
||||
"values": [
|
||||
["1877836900005000004", "{\"trace_id\":\"432\",\"parent_id\":\"qwerty\"}"]
|
||||
]
|
||||
}
|
||||
]
|
||||
}`, []string{"a", "trace_id"}, []int64{1577836800000000001, 1577836900005000002, 1577836900005000003, 1877836900005000004}, `{"foo":"bar","a":"b","user_id":"123"}
|
||||
{"foo":"bar","a":"b","trace_id":"pqw","_msg":"abc"}
|
||||
{"foo":"bar","a":"b","_msg":"{def}"}
|
||||
{"x":"y","_msg":"432","parent_id":"qwerty"}`)
|
||||
}
|
||||
|
||||
func TestParsePromLabels_Success(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
|
||||
@@ -6,7 +6,9 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/golang/snappy"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
)
|
||||
|
||||
@@ -23,13 +25,13 @@ func BenchmarkParseProtobufRequest(b *testing.B) {
|
||||
}
|
||||
|
||||
func benchmarkParseProtobufRequest(b *testing.B, streams, rows, labels int) {
|
||||
blp := &insertutil.BenchmarkLogMessageProcessor{}
|
||||
blp := &insertutils.BenchmarkLogMessageProcessor{}
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(streams * rows))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
body := getProtobufBody(streams, rows, labels)
|
||||
for pb.Next() {
|
||||
if err := parseProtobufRequest(body, blp, nil, false, true); err != nil {
|
||||
if err := parseProtobufRequest(body, blp, false); err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
}
|
||||
}
|
||||
@@ -76,5 +78,8 @@ func getProtobufBody(streamsCount, rowsCount, labelsCount int) []byte {
|
||||
Streams: streams,
|
||||
}
|
||||
|
||||
return pr.MarshalProtobuf(nil)
|
||||
body := pr.MarshalProtobuf(nil)
|
||||
encodedBody := snappy.Encode(nil, body)
|
||||
|
||||
return encodedBody
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/datadog"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/elasticsearch"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/internalinsert"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/journald"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/jsonline"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/loki"
|
||||
@@ -29,11 +28,6 @@ func Stop() {
|
||||
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
path := r.URL.Path
|
||||
|
||||
if path == "/internal/insert" {
|
||||
internalinsert.RequestHandler(w, r)
|
||||
return true
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(path, "/insert/") {
|
||||
// Skip requests, which do not start with /insert/, since these aren't our requests.
|
||||
return false
|
||||
@@ -52,9 +46,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
return true
|
||||
}
|
||||
switch {
|
||||
case strings.HasPrefix(path, "/elasticsearch"):
|
||||
// some clients may omit trailing slash
|
||||
// see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8353
|
||||
case strings.HasPrefix(path, "/elasticsearch/"):
|
||||
path = strings.TrimPrefix(path, "/elasticsearch")
|
||||
return elasticsearch.RequestHandler(path, w, r)
|
||||
case strings.HasPrefix(path, "/loki/"):
|
||||
|
||||
@@ -2,21 +2,21 @@ package opentelemetry
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/pb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/protoparserutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var maxRequestSize = flagutil.NewBytes("opentelemetry.maxRequestSize", 64*1024*1024, "The maximum size in bytes of a single OpenTelemetry request")
|
||||
|
||||
// RequestHandler processes Opentelemetry insert requests
|
||||
func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
switch path {
|
||||
@@ -37,8 +37,26 @@ func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
func handleProtobuf(r *http.Request, w http.ResponseWriter) {
|
||||
startTime := time.Now()
|
||||
requestsProtobufTotal.Inc()
|
||||
reader := r.Body
|
||||
if r.Header.Get("Content-Encoding") == "gzip" {
|
||||
zr, err := common.GetGzipReader(reader)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot initialize gzip reader: %s", err)
|
||||
return
|
||||
}
|
||||
defer common.PutGzipReader(zr)
|
||||
reader = zr
|
||||
}
|
||||
|
||||
cp, err := insertutil.GetCommonParams(r)
|
||||
wcr := writeconcurrencylimiter.GetReader(reader)
|
||||
data, err := io.ReadAll(wcr)
|
||||
writeconcurrencylimiter.PutReader(wcr)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot read request body: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
cp, err := insertutils.GetCommonParams(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse common params from request: %s", err)
|
||||
return
|
||||
@@ -48,16 +66,12 @@ func handleProtobuf(r *http.Request, w http.ResponseWriter) {
|
||||
return
|
||||
}
|
||||
|
||||
encoding := r.Header.Get("Content-Encoding")
|
||||
err = protoparserutil.ReadUncompressedData(r.Body, encoding, maxRequestSize, func(data []byte) error {
|
||||
lmp := cp.NewLogMessageProcessor("opentelelemtry_protobuf", false)
|
||||
useDefaultStreamFields := len(cp.StreamFields) == 0
|
||||
err := pushProtobufRequest(data, lmp, cp.MsgFields, useDefaultStreamFields)
|
||||
lmp.MustClose()
|
||||
return err
|
||||
})
|
||||
lmp := cp.NewLogMessageProcessor("opentelelemtry_protobuf")
|
||||
useDefaultStreamFields := len(cp.StreamFields) == 0
|
||||
err = pushProtobufRequest(data, lmp, useDefaultStreamFields)
|
||||
lmp.MustClose()
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot read OpenTelemetry protocol data: %s", err)
|
||||
httpserver.Errorf(w, r, "cannot parse OpenTelemetry protobuf request: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -74,7 +88,7 @@ var (
|
||||
requestProtobufDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/opentelemetry/v1/logs",format="protobuf"}`)
|
||||
)
|
||||
|
||||
func pushProtobufRequest(data []byte, lmp insertutil.LogMessageProcessor, msgFields []string, useDefaultStreamFields bool) error {
|
||||
func pushProtobufRequest(data []byte, lmp insertutils.LogMessageProcessor, useDefaultStreamFields bool) error {
|
||||
var req pb.ExportLogsServiceRequest
|
||||
if err := req.UnmarshalProtobuf(data); err != nil {
|
||||
errorsTotal.Inc()
|
||||
@@ -83,41 +97,33 @@ func pushProtobufRequest(data []byte, lmp insertutil.LogMessageProcessor, msgFie
|
||||
|
||||
var commonFields []logstorage.Field
|
||||
for _, rl := range req.ResourceLogs {
|
||||
commonFields = commonFields[:0]
|
||||
commonFields = appendKeyValues(commonFields, rl.Resource.Attributes, "")
|
||||
attributes := rl.Resource.Attributes
|
||||
commonFields = slicesutil.SetLength(commonFields, len(attributes))
|
||||
for i, attr := range attributes {
|
||||
commonFields[i].Name = attr.Key
|
||||
commonFields[i].Value = attr.Value.FormatString()
|
||||
}
|
||||
commonFieldsLen := len(commonFields)
|
||||
for _, sc := range rl.ScopeLogs {
|
||||
commonFields = pushFieldsFromScopeLogs(&sc, commonFields[:commonFieldsLen], lmp, msgFields, useDefaultStreamFields)
|
||||
commonFields = pushFieldsFromScopeLogs(&sc, commonFields[:commonFieldsLen], lmp, useDefaultStreamFields)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func pushFieldsFromScopeLogs(sc *pb.ScopeLogs, commonFields []logstorage.Field, lmp insertutil.LogMessageProcessor, msgFields []string, useDefaultStreamFields bool) []logstorage.Field {
|
||||
func pushFieldsFromScopeLogs(sc *pb.ScopeLogs, commonFields []logstorage.Field, lmp insertutils.LogMessageProcessor, useDefaultStreamFields bool) []logstorage.Field {
|
||||
fields := commonFields
|
||||
for _, lr := range sc.LogRecords {
|
||||
fields = fields[:len(commonFields)]
|
||||
if lr.Body.KeyValueList != nil {
|
||||
fields = appendKeyValues(fields, lr.Body.KeyValueList.Values, "")
|
||||
logstorage.RenameField(fields[len(commonFields):], msgFields, "_msg")
|
||||
} else {
|
||||
fields = append(fields, logstorage.Field{
|
||||
Name: "_msg",
|
||||
Value: lr.Body.FormatString(),
|
||||
})
|
||||
for _, attr := range lr.Attributes {
|
||||
fields = append(fields, logstorage.Field{
|
||||
Name: "_msg",
|
||||
Value: lr.Body.FormatString(true),
|
||||
})
|
||||
}
|
||||
fields = appendKeyValues(fields, lr.Attributes, "")
|
||||
if len(lr.TraceID) > 0 {
|
||||
fields = append(fields, logstorage.Field{
|
||||
Name: "trace_id",
|
||||
Value: lr.TraceID,
|
||||
})
|
||||
}
|
||||
if len(lr.SpanID) > 0 {
|
||||
fields = append(fields, logstorage.Field{
|
||||
Name: "span_id",
|
||||
Value: lr.SpanID,
|
||||
Name: attr.Key,
|
||||
Value: attr.Value.FormatString(),
|
||||
})
|
||||
}
|
||||
fields = append(fields, logstorage.Field{
|
||||
@@ -133,22 +139,3 @@ func pushFieldsFromScopeLogs(sc *pb.ScopeLogs, commonFields []logstorage.Field,
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
func appendKeyValues(fields []logstorage.Field, kvs []*pb.KeyValue, parentField string) []logstorage.Field {
|
||||
for _, attr := range kvs {
|
||||
fieldName := attr.Key
|
||||
if parentField != "" {
|
||||
fieldName = parentField + "." + fieldName
|
||||
}
|
||||
|
||||
if attr.Value.KeyValueList != nil {
|
||||
fields = appendKeyValues(fields, attr.Value.KeyValueList.Values, fieldName)
|
||||
} else {
|
||||
fields = append(fields, logstorage.Field{
|
||||
Name: fieldName,
|
||||
Value: attr.Value.FormatString(true),
|
||||
})
|
||||
}
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package opentelemetry
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/pb"
|
||||
)
|
||||
|
||||
@@ -15,8 +15,8 @@ func TestPushProtoOk(t *testing.T) {
|
||||
}
|
||||
|
||||
pData := lr.MarshalProtobuf(nil)
|
||||
tlp := &insertutil.TestLogMessageProcessor{}
|
||||
if err := pushProtobufRequest(pData, tlp, nil, false); err != nil {
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
if err := pushProtobufRequest(pData, tlp, false); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
@@ -24,7 +24,6 @@ func TestPushProtoOk(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// single line without resource attributes
|
||||
f([]pb.ResourceLogs{
|
||||
{
|
||||
@@ -40,27 +39,6 @@ func TestPushProtoOk(t *testing.T) {
|
||||
[]int64{1234},
|
||||
`{"_msg":"log-line-message","severity":"Trace"}`,
|
||||
)
|
||||
|
||||
// severities mapping
|
||||
f([]pb.ResourceLogs{
|
||||
{
|
||||
ScopeLogs: []pb.ScopeLogs{
|
||||
{
|
||||
LogRecords: []pb.LogRecord{
|
||||
{Attributes: []*pb.KeyValue{}, TimeUnixNano: 1234, SeverityNumber: 1, Body: pb.AnyValue{StringValue: ptrTo("log-line-message")}},
|
||||
{Attributes: []*pb.KeyValue{}, TimeUnixNano: 1234, SeverityNumber: 13, Body: pb.AnyValue{StringValue: ptrTo("log-line-message")}},
|
||||
{Attributes: []*pb.KeyValue{}, TimeUnixNano: 1234, SeverityNumber: 24, Body: pb.AnyValue{StringValue: ptrTo("log-line-message")}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
[]int64{1234, 1234, 1234},
|
||||
`{"_msg":"log-line-message","severity":"Trace"}
|
||||
{"_msg":"log-line-message","severity":"Warn"}
|
||||
{"_msg":"log-line-message","severity":"Fatal4"}`,
|
||||
)
|
||||
|
||||
// multi-line with resource attributes
|
||||
f([]pb.ResourceLogs{
|
||||
{
|
||||
@@ -80,7 +58,7 @@ func TestPushProtoOk(t *testing.T) {
|
||||
{
|
||||
LogRecords: []pb.LogRecord{
|
||||
{Attributes: []*pb.KeyValue{}, TimeUnixNano: 1234, SeverityNumber: 1, Body: pb.AnyValue{StringValue: ptrTo("log-line-message")}},
|
||||
{Attributes: []*pb.KeyValue{}, TimeUnixNano: 1235, SeverityNumber: 25, Body: pb.AnyValue{StringValue: ptrTo("log-line-message-msg-2")}},
|
||||
{Attributes: []*pb.KeyValue{}, TimeUnixNano: 1235, SeverityNumber: 21, Body: pb.AnyValue{StringValue: ptrTo("log-line-message-msg-2")}},
|
||||
{Attributes: []*pb.KeyValue{}, TimeUnixNano: 1236, SeverityNumber: -1, Body: pb.AnyValue{StringValue: ptrTo("log-line-message-msg-2")}},
|
||||
},
|
||||
},
|
||||
@@ -88,9 +66,9 @@ func TestPushProtoOk(t *testing.T) {
|
||||
},
|
||||
},
|
||||
[]int64{1234, 1235, 1236},
|
||||
`{"logger":"context","instance_id":"10","node_taints.role":"dev","node_taints.cluster_load_percent":"0.55","_msg":"log-line-message","severity":"Trace"}
|
||||
{"logger":"context","instance_id":"10","node_taints.role":"dev","node_taints.cluster_load_percent":"0.55","_msg":"log-line-message-msg-2","severity":"Unspecified"}
|
||||
{"logger":"context","instance_id":"10","node_taints.role":"dev","node_taints.cluster_load_percent":"0.55","_msg":"log-line-message-msg-2","severity":"Unspecified"}`,
|
||||
`{"logger":"context","instance_id":"10","node_taints":"[{\"Key\":\"role\",\"Value\":{\"StringValue\":\"dev\",\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":null,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}},{\"Key\":\"cluster_load_percent\",\"Value\":{\"StringValue\":null,\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":0.55,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}}]","_msg":"log-line-message","severity":"Trace"}
|
||||
{"logger":"context","instance_id":"10","node_taints":"[{\"Key\":\"role\",\"Value\":{\"StringValue\":\"dev\",\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":null,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}},{\"Key\":\"cluster_load_percent\",\"Value\":{\"StringValue\":null,\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":0.55,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}}]","_msg":"log-line-message-msg-2","severity":"Unspecified"}
|
||||
{"logger":"context","instance_id":"10","node_taints":"[{\"Key\":\"role\",\"Value\":{\"StringValue\":\"dev\",\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":null,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}},{\"Key\":\"cluster_load_percent\",\"Value\":{\"StringValue\":null,\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":0.55,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}}]","_msg":"log-line-message-msg-2","severity":"Unspecified"}`,
|
||||
)
|
||||
|
||||
// multi-scope with resource attributes and multi-line
|
||||
@@ -128,79 +106,20 @@ func TestPushProtoOk(t *testing.T) {
|
||||
{
|
||||
LogRecords: []pb.LogRecord{
|
||||
{TimeUnixNano: 2347, SeverityNumber: 12, Body: pb.AnyValue{StringValue: ptrTo("log-line-resource-scope-1-1-0")}},
|
||||
{TraceID: "1234", SpanID: "45", ObservedTimeUnixNano: 2348, SeverityNumber: 12, Body: pb.AnyValue{StringValue: ptrTo("log-line-resource-scope-1-1-1")}},
|
||||
{TraceID: "4bf92f3577b34da6a3ce929d0e0e4736", SpanID: "00f067aa0ba902b7", ObservedTimeUnixNano: 3333, Body: pb.AnyValue{StringValue: ptrTo("log-line-resource-scope-1-1-2")}},
|
||||
{ObservedTimeUnixNano: 2348, SeverityNumber: 12, Body: pb.AnyValue{StringValue: ptrTo("log-line-resource-scope-1-1-1")}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
[]int64{1234, 1235, 2345, 2346, 2347, 2348, 3333},
|
||||
`{"logger":"context","instance_id":"10","node_taints.role":"dev","node_taints.cluster_load_percent":"0.55","_msg":"log-line-message","severity":"Trace"}
|
||||
{"logger":"context","instance_id":"10","node_taints.role":"dev","node_taints.cluster_load_percent":"0.55","_msg":"log-line-message-msg-2","severity":"Debug"}
|
||||
[]int64{1234, 1235, 2345, 2346, 2347, 2348},
|
||||
`{"logger":"context","instance_id":"10","node_taints":"[{\"Key\":\"role\",\"Value\":{\"StringValue\":\"dev\",\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":null,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}},{\"Key\":\"cluster_load_percent\",\"Value\":{\"StringValue\":null,\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":0.55,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}}]","_msg":"log-line-message","severity":"Trace"}
|
||||
{"logger":"context","instance_id":"10","node_taints":"[{\"Key\":\"role\",\"Value\":{\"StringValue\":\"dev\",\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":null,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}},{\"Key\":\"cluster_load_percent\",\"Value\":{\"StringValue\":null,\"BoolValue\":null,\"IntValue\":null,\"DoubleValue\":0.55,\"ArrayValue\":null,\"KeyValueList\":null,\"BytesValue\":null}}]","_msg":"log-line-message-msg-2","severity":"Debug"}
|
||||
{"_msg":"log-line-resource-scope-1-0-0","severity":"Info2"}
|
||||
{"_msg":"log-line-resource-scope-1-0-1","severity":"Info2"}
|
||||
{"_msg":"log-line-resource-scope-1-1-0","severity":"Info4"}
|
||||
{"_msg":"log-line-resource-scope-1-1-1","trace_id":"1234","span_id":"45","severity":"Info4"}
|
||||
{"_msg":"log-line-resource-scope-1-1-2","trace_id":"4bf92f3577b34da6a3ce929d0e0e4736","span_id":"00f067aa0ba902b7","severity":"Unspecified"}`,
|
||||
{"_msg":"log-line-resource-scope-1-1-1","severity":"Info4"}`,
|
||||
)
|
||||
|
||||
// nested fields
|
||||
f([]pb.ResourceLogs{
|
||||
{
|
||||
ScopeLogs: []pb.ScopeLogs{
|
||||
{
|
||||
LogRecords: []pb.LogRecord{
|
||||
{
|
||||
TimeUnixNano: 1234,
|
||||
Body: pb.AnyValue{StringValue: ptrTo("nested fields")},
|
||||
Attributes: []*pb.KeyValue{
|
||||
{Key: "error", Value: &pb.AnyValue{KeyValueList: &pb.KeyValueList{Values: []*pb.KeyValue{
|
||||
{
|
||||
Key: "type",
|
||||
Value: &pb.AnyValue{StringValue: ptrTo("document_parsing_exception")},
|
||||
},
|
||||
{
|
||||
Key: "reason",
|
||||
Value: &pb.AnyValue{StringValue: ptrTo("failed to parse field [_msg] of type [text]")},
|
||||
},
|
||||
{
|
||||
Key: "caused_by",
|
||||
Value: &pb.AnyValue{KeyValueList: &pb.KeyValueList{Values: []*pb.KeyValue{
|
||||
{
|
||||
Key: "type",
|
||||
Value: &pb.AnyValue{StringValue: ptrTo("x_content_parse_exception")},
|
||||
},
|
||||
{
|
||||
Key: "reason",
|
||||
Value: &pb.AnyValue{StringValue: ptrTo("unexpected end-of-input in VALUE_STRING")},
|
||||
},
|
||||
{
|
||||
Key: "caused_by",
|
||||
Value: &pb.AnyValue{KeyValueList: &pb.KeyValueList{Values: []*pb.KeyValue{
|
||||
{
|
||||
Key: "type",
|
||||
Value: &pb.AnyValue{StringValue: ptrTo("json_e_o_f_exception")},
|
||||
},
|
||||
{
|
||||
Key: "reason",
|
||||
Value: &pb.AnyValue{StringValue: ptrTo("eof")},
|
||||
},
|
||||
}}},
|
||||
},
|
||||
}}},
|
||||
},
|
||||
}}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, []int64{1234},
|
||||
`{"_msg":"nested fields","error.type":"document_parsing_exception","error.reason":"failed to parse field [_msg] of type [text]",`+
|
||||
`"error.caused_by.type":"x_content_parse_exception","error.caused_by.reason":"unexpected end-of-input in VALUE_STRING",`+
|
||||
`"error.caused_by.caused_by.type":"json_e_o_f_exception","error.caused_by.caused_by.reason":"eof","severity":"Unspecified"}`)
|
||||
}
|
||||
|
||||
func ptrTo[T any](s T) *T {
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/pb"
|
||||
)
|
||||
|
||||
@@ -21,13 +21,13 @@ func BenchmarkParseProtobufRequest(b *testing.B) {
|
||||
}
|
||||
|
||||
func benchmarkParseProtobufRequest(b *testing.B, streams, rows, labels int) {
|
||||
blp := &insertutil.BenchmarkLogMessageProcessor{}
|
||||
blp := &insertutils.BenchmarkLogMessageProcessor{}
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(streams * rows))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
body := getProtobufBody(streams, rows, labels)
|
||||
for pb.Next() {
|
||||
if err := pushProtobufRequest(body, blp, nil, false); err != nil {
|
||||
if err := pushProtobufRequest(body, blp, false); err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,9 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/klauspost/compress/gzip"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
@@ -25,7 +27,7 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/protoparserutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/slicesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
@@ -45,11 +47,6 @@ var (
|
||||
ignoreFieldsUDP = flagutil.NewArrayString("syslog.ignoreFields.udp", "Fields to ignore at logs ingested via the corresponding -syslog.listenAddr.udp. "+
|
||||
`See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#dropping-fields`)
|
||||
|
||||
decolorizeFieldsTCP = flagutil.NewArrayString("syslog.decolorizeFields.tcp", "Fields to remove ANSI color codes across logs ingested via the corresponding -syslog.listenAddr.tcp. "+
|
||||
`See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#decolorizing-fields`)
|
||||
decolorizeFieldsUDP = flagutil.NewArrayString("syslog.decolorizeFields.udp", "Fields to remove ANSI color codes across logs ingested via the corresponding -syslog.listenAddr.udp. "+
|
||||
`See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#decolorizing-fields`)
|
||||
|
||||
extraFieldsTCP = flagutil.NewArrayString("syslog.extraFields.tcp", "Fields to add to logs ingested via the corresponding -syslog.listenAddr.tcp. "+
|
||||
`See https://docs.victoriametrics.com/victorialogs/data-ingestion/syslog/#adding-extra-fields`)
|
||||
extraFieldsUDP = flagutil.NewArrayString("syslog.extraFields.udp", "Fields to add to logs ingested via the corresponding -syslog.listenAddr.udp. "+
|
||||
@@ -193,12 +190,6 @@ func runUDPListener(addr string, argIdx int) {
|
||||
logger.Fatalf("cannot parse -syslog.ignoreFields.udp=%q for -syslog.listenAddr.udp=%q: %s", ignoreFieldsStr, addr, err)
|
||||
}
|
||||
|
||||
decolorizeFieldsStr := decolorizeFieldsUDP.GetOptionalArg(argIdx)
|
||||
decolorizeFields, err := parseFieldsList(decolorizeFieldsStr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot parse -syslog.decolorizeFields.udp=%q for -syslog.listenAddr.udp=%q: %s", decolorizeFieldsStr, addr, err)
|
||||
}
|
||||
|
||||
extraFieldsStr := extraFieldsUDP.GetOptionalArg(argIdx)
|
||||
extraFields, err := parseExtraFields(extraFieldsStr)
|
||||
if err != nil {
|
||||
@@ -207,7 +198,7 @@ func runUDPListener(addr string, argIdx int) {
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
go func() {
|
||||
serveUDP(ln, tenantID, compressMethod, useLocalTimestamp, streamFields, ignoreFields, decolorizeFields, extraFields)
|
||||
serveUDP(ln, tenantID, compressMethod, useLocalTimestamp, streamFields, ignoreFields, extraFields)
|
||||
close(doneCh)
|
||||
}()
|
||||
|
||||
@@ -260,12 +251,6 @@ func runTCPListener(addr string, argIdx int) {
|
||||
logger.Fatalf("cannot parse -syslog.ignoreFields.tcp=%q for -syslog.listenAddr.tcp=%q: %s", ignoreFieldsStr, addr, err)
|
||||
}
|
||||
|
||||
decolorizeFieldsStr := decolorizeFieldsTCP.GetOptionalArg(argIdx)
|
||||
decolorizeFields, err := parseFieldsList(decolorizeFieldsStr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot parse -syslog.decolorizeFields.tcp=%q for -syslog.listenAddr.tcp=%q: %s", decolorizeFieldsStr, addr, err)
|
||||
}
|
||||
|
||||
extraFieldsStr := extraFieldsTCP.GetOptionalArg(argIdx)
|
||||
extraFields, err := parseExtraFields(extraFieldsStr)
|
||||
if err != nil {
|
||||
@@ -274,7 +259,7 @@ func runTCPListener(addr string, argIdx int) {
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
go func() {
|
||||
serveTCP(ln, tenantID, compressMethod, useLocalTimestamp, streamFields, ignoreFields, decolorizeFields, extraFields)
|
||||
serveTCP(ln, tenantID, compressMethod, useLocalTimestamp, streamFields, ignoreFields, extraFields)
|
||||
close(doneCh)
|
||||
}()
|
||||
|
||||
@@ -289,14 +274,14 @@ func runTCPListener(addr string, argIdx int) {
|
||||
|
||||
func checkCompressMethod(compressMethod, addr, protocol string) {
|
||||
switch compressMethod {
|
||||
case "", "none", "zstd", "gzip", "deflate":
|
||||
case "", "none", "gzip", "deflate":
|
||||
return
|
||||
default:
|
||||
logger.Fatalf("unsupported -syslog.compressMethod.%s=%q for -syslog.listenAddr.%s=%q; supported values: 'none', 'zstd', 'gzip', 'deflate'", protocol, compressMethod, protocol, addr)
|
||||
logger.Fatalf("unsupported -syslog.compressMethod.%s=%q for -syslog.listenAddr.%s=%q; supported values: 'none', 'gzip', 'deflate'", protocol, compressMethod, protocol, addr)
|
||||
}
|
||||
}
|
||||
|
||||
func serveUDP(ln net.PacketConn, tenantID logstorage.TenantID, encoding string, useLocalTimestamp bool, streamFields, ignoreFields, decolorizeFields []string, extraFields []logstorage.Field) {
|
||||
func serveUDP(ln net.PacketConn, tenantID logstorage.TenantID, compressMethod string, useLocalTimestamp bool, streamFields, ignoreFields []string, extraFields []logstorage.Field) {
|
||||
gomaxprocs := cgroup.AvailableCPUs()
|
||||
var wg sync.WaitGroup
|
||||
localAddr := ln.LocalAddr()
|
||||
@@ -304,7 +289,7 @@ func serveUDP(ln net.PacketConn, tenantID logstorage.TenantID, encoding string,
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
cp := insertutil.GetCommonParamsForSyslog(tenantID, streamFields, ignoreFields, decolorizeFields, extraFields)
|
||||
cp := insertutils.GetCommonParamsForSyslog(tenantID, streamFields, ignoreFields, extraFields)
|
||||
var bb bytesutil.ByteBuffer
|
||||
bb.B = bytesutil.ResizeNoCopyNoOverallocate(bb.B, 64*1024)
|
||||
for {
|
||||
@@ -329,7 +314,7 @@ func serveUDP(ln net.PacketConn, tenantID logstorage.TenantID, encoding string,
|
||||
}
|
||||
bb.B = bb.B[:n]
|
||||
udpRequestsTotal.Inc()
|
||||
if err := processStream("udp", bb.NewReader(), encoding, useLocalTimestamp, cp); err != nil {
|
||||
if err := processStream("udp", bb.NewReader(), compressMethod, useLocalTimestamp, cp); err != nil {
|
||||
logger.Errorf("syslog: cannot process UDP data from %s at %s: %s", remoteAddr, localAddr, err)
|
||||
}
|
||||
}
|
||||
@@ -338,7 +323,7 @@ func serveUDP(ln net.PacketConn, tenantID logstorage.TenantID, encoding string,
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func serveTCP(ln net.Listener, tenantID logstorage.TenantID, encoding string, useLocalTimestamp bool, streamFields, ignoreFields, decolorizeFields []string, extraFields []logstorage.Field) {
|
||||
func serveTCP(ln net.Listener, tenantID logstorage.TenantID, compressMethod string, useLocalTimestamp bool, streamFields, ignoreFields []string, extraFields []logstorage.Field) {
|
||||
var cm ingestserver.ConnsMap
|
||||
cm.Init("syslog")
|
||||
|
||||
@@ -368,8 +353,8 @@ func serveTCP(ln net.Listener, tenantID logstorage.TenantID, encoding string, us
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
cp := insertutil.GetCommonParamsForSyslog(tenantID, streamFields, ignoreFields, decolorizeFields, extraFields)
|
||||
if err := processStream("tcp", c, encoding, useLocalTimestamp, cp); err != nil {
|
||||
cp := insertutils.GetCommonParamsForSyslog(tenantID, streamFields, ignoreFields, extraFields)
|
||||
if err := processStream("tcp", c, compressMethod, useLocalTimestamp, cp); err != nil {
|
||||
logger.Errorf("syslog: cannot process TCP data at %q: %s", addr, err)
|
||||
}
|
||||
|
||||
@@ -384,29 +369,52 @@ func serveTCP(ln net.Listener, tenantID logstorage.TenantID, encoding string, us
|
||||
}
|
||||
|
||||
// processStream parses a stream of syslog messages from r and ingests them into vlstorage.
|
||||
func processStream(protocol string, r io.Reader, encoding string, useLocalTimestamp bool, cp *insertutil.CommonParams) error {
|
||||
func processStream(protocol string, r io.Reader, compressMethod string, useLocalTimestamp bool, cp *insertutils.CommonParams) error {
|
||||
if err := vlstorage.CanWriteData(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lmp := cp.NewLogMessageProcessor("syslog_"+protocol, true)
|
||||
err := processStreamInternal(r, encoding, useLocalTimestamp, lmp)
|
||||
lmp := cp.NewLogMessageProcessor("syslog_" + protocol)
|
||||
err := processStreamInternal(r, compressMethod, useLocalTimestamp, lmp)
|
||||
lmp.MustClose()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func processStreamInternal(r io.Reader, encoding string, useLocalTimestamp bool, lmp insertutil.LogMessageProcessor) error {
|
||||
reader, err := protoparserutil.GetUncompressedReader(r, encoding)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot decode syslog data: %w", err)
|
||||
func processStreamInternal(r io.Reader, compressMethod string, useLocalTimestamp bool, lmp insertutils.LogMessageProcessor) error {
|
||||
switch compressMethod {
|
||||
case "", "none":
|
||||
case "gzip":
|
||||
zr, err := common.GetGzipReader(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read gzipped data: %w", err)
|
||||
}
|
||||
r = zr
|
||||
case "deflate":
|
||||
zr, err := common.GetZlibReader(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read deflated data: %w", err)
|
||||
}
|
||||
r = zr
|
||||
default:
|
||||
logger.Panicf("BUG: unsupported compressMethod=%q; supported values: none, gzip, deflate", compressMethod)
|
||||
}
|
||||
defer protoparserutil.PutUncompressedReader(reader)
|
||||
|
||||
return processUncompressedStream(reader, useLocalTimestamp, lmp)
|
||||
err := processUncompressedStream(r, useLocalTimestamp, lmp)
|
||||
|
||||
switch compressMethod {
|
||||
case "gzip":
|
||||
zr := r.(*gzip.Reader)
|
||||
common.PutGzipReader(zr)
|
||||
case "deflate":
|
||||
zr := r.(io.ReadCloser)
|
||||
common.PutZlibReader(zr)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func processUncompressedStream(r io.Reader, useLocalTimestamp bool, lmp insertutil.LogMessageProcessor) error {
|
||||
func processUncompressedStream(r io.Reader, useLocalTimestamp bool, lmp insertutils.LogMessageProcessor) error {
|
||||
wcr := writeconcurrencylimiter.GetReader(r)
|
||||
defer writeconcurrencylimiter.PutReader(wcr)
|
||||
|
||||
@@ -491,7 +499,7 @@ again:
|
||||
slr.err = fmt.Errorf("cannot parse message length from %q: %w", msgLenStr, err)
|
||||
return false
|
||||
}
|
||||
if maxMsgLen := insertutil.MaxLineSizeBytes.IntN(); msgLen > uint64(maxMsgLen) {
|
||||
if maxMsgLen := insertutils.MaxLineSizeBytes.IntN(); msgLen > uint64(maxMsgLen) {
|
||||
slr.err = fmt.Errorf("cannot read message longer than %d bytes; msgLen=%d", maxMsgLen, msgLen)
|
||||
return false
|
||||
}
|
||||
@@ -543,7 +551,7 @@ func putSyslogLineReader(slr *syslogLineReader) {
|
||||
|
||||
var syslogLineReaderPool sync.Pool
|
||||
|
||||
func processLine(line []byte, currentYear int, timezone *time.Location, useLocalTimestamp bool, lmp insertutil.LogMessageProcessor) error {
|
||||
func processLine(line []byte, currentYear int, timezone *time.Location, useLocalTimestamp bool, lmp insertutils.LogMessageProcessor) error {
|
||||
p := logstorage.GetSyslogParser(currentYear, timezone)
|
||||
lineStr := bytesutil.ToUnsafeString(line)
|
||||
p.Parse(lineStr)
|
||||
@@ -552,7 +560,7 @@ func processLine(line []byte, currentYear int, timezone *time.Location, useLocal
|
||||
if useLocalTimestamp {
|
||||
ts = time.Now().UnixNano()
|
||||
} else {
|
||||
nsecs, err := insertutil.ExtractTimestampFromFields(timeFields, p.Fields)
|
||||
nsecs, err := insertutils.ExtractTimestampRFC3339NanoFromFields("timestamp", p.Fields)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot get timestamp from syslog line %q: %w", line, err)
|
||||
}
|
||||
@@ -565,7 +573,6 @@ func processLine(line []byte, currentYear int, timezone *time.Location, useLocal
|
||||
return nil
|
||||
}
|
||||
|
||||
var timeFields = []string{"timestamp"}
|
||||
var msgFields = []string{"message"}
|
||||
|
||||
var (
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
)
|
||||
|
||||
func TestSyslogLineReader_Success(t *testing.T) {
|
||||
@@ -84,7 +84,7 @@ func TestProcessStreamInternal_Success(t *testing.T) {
|
||||
globalTimezone = time.UTC
|
||||
globalCurrentYear.Store(int64(currentYear))
|
||||
|
||||
tlp := &insertutil.TestLogMessageProcessor{}
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
r := bytes.NewBufferString(data)
|
||||
if err := processStreamInternal(r, "", false, tlp); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
@@ -113,7 +113,7 @@ func TestProcessStreamInternal_Failure(t *testing.T) {
|
||||
MustInit()
|
||||
defer MustStop()
|
||||
|
||||
tlp := &insertutil.TestLogMessageProcessor{}
|
||||
tlp := &insertutils.TestLogMessageProcessor{}
|
||||
r := bytes.NewBufferString(data)
|
||||
if err := processStreamInternal(r, "", false, tlp); err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
|
||||
@@ -154,7 +154,7 @@ func readNextJSONObject(d *json.Decoder) ([]logstorage.Field, error) {
|
||||
}
|
||||
value, ok := t.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected token read for object value: %v; want string", t)
|
||||
return nil, fmt.Errorf("unexpected token read for oject value: %v; want string", t)
|
||||
}
|
||||
|
||||
fields = append(fields, logstorage.Field{
|
||||
@@ -176,7 +176,7 @@ func writeCompactObject(w io.Writer, fields []logstorage.Field) error {
|
||||
_, err := fmt.Fprintf(w, "%s\n", fields[0].Value)
|
||||
return err
|
||||
}
|
||||
if len(fields) == 2 && (fields[0].Name == "_time" || fields[1].Name == "_time") {
|
||||
if len(fields) == 2 && fields[0].Name == "_time" || fields[1].Name == "_time" {
|
||||
// Write _time\tfieldValue as is
|
||||
if fields[0].Name == "_time" {
|
||||
_, err := fmt.Fprintf(w, "%s\t%s\n", fields[0].Value, fields[1].Value)
|
||||
|
||||
@@ -17,7 +17,7 @@ func isTerminal() bool {
|
||||
return isatty.IsTerminal(os.Stdout.Fd()) && isatty.IsTerminal(os.Stderr.Fd())
|
||||
}
|
||||
|
||||
func readWithLess(r io.Reader, disableColors, wrapLongLines bool) error {
|
||||
func readWithLess(r io.Reader, wrapLongLines bool) error {
|
||||
if !isTerminal() {
|
||||
// Just write everything to stdout if no terminal is available.
|
||||
_, err := io.Copy(os.Stdout, r)
|
||||
@@ -49,9 +49,6 @@ func readWithLess(r io.Reader, disableColors, wrapLongLines bool) error {
|
||||
return fmt.Errorf("cannot find 'less' command: %w", err)
|
||||
}
|
||||
opts := []string{"less", "-F", "-X"}
|
||||
if !disableColors {
|
||||
opts = append(opts, "-R")
|
||||
}
|
||||
if !wrapLongLines {
|
||||
opts = append(opts, "-S")
|
||||
}
|
||||
|
||||
@@ -91,7 +91,6 @@ func runReadlineLoop(rl *readline.Instance, incompleteLine *string) {
|
||||
}
|
||||
|
||||
outputMode := outputModeJSONMultiline
|
||||
disableColors := true
|
||||
wrapLongLines := false
|
||||
s := ""
|
||||
for {
|
||||
@@ -101,7 +100,7 @@ func runReadlineLoop(rl *readline.Instance, incompleteLine *string) {
|
||||
case io.EOF:
|
||||
if s != "" {
|
||||
// This is non-interactive query execution.
|
||||
executeQuery(context.Background(), rl, s, outputMode, disableColors, wrapLongLines)
|
||||
executeQuery(context.Background(), rl, s, outputMode, wrapLongLines)
|
||||
}
|
||||
return
|
||||
case readline.ErrInterrupt:
|
||||
@@ -177,24 +176,6 @@ func runReadlineLoop(rl *readline.Instance, incompleteLine *string) {
|
||||
s = ""
|
||||
continue
|
||||
}
|
||||
if s == `\disable_colors` {
|
||||
if !disableColors {
|
||||
disableColors = true
|
||||
fmt.Fprintf(rl, `disabled colors in compact output mode; enter \enable_colors for enabling it`+"\n")
|
||||
}
|
||||
historyLines = pushToHistory(rl, historyLines, s)
|
||||
s = ""
|
||||
continue
|
||||
}
|
||||
if s == `\enable_colors` {
|
||||
if disableColors {
|
||||
disableColors = false
|
||||
fmt.Fprintf(rl, `enabled colors in compact output mode; type \disable_colors for disabling it`+"\n")
|
||||
}
|
||||
historyLines = pushToHistory(rl, historyLines, s)
|
||||
s = ""
|
||||
continue
|
||||
}
|
||||
if line != "" && !strings.HasSuffix(line, ";") {
|
||||
// Assume the query is incomplete and allow the user finishing the query on the next line
|
||||
s += "\n"
|
||||
@@ -204,7 +185,7 @@ func runReadlineLoop(rl *readline.Instance, incompleteLine *string) {
|
||||
|
||||
// Execute the query
|
||||
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt)
|
||||
executeQuery(ctx, rl, s, outputMode, disableColors, wrapLongLines)
|
||||
executeQuery(ctx, rl, s, outputMode, wrapLongLines)
|
||||
cancel()
|
||||
|
||||
historyLines = pushToHistory(rl, historyLines, s)
|
||||
@@ -289,18 +270,16 @@ func printCommandsHelp(w io.Writer) {
|
||||
\h - show this help
|
||||
\s - singleline json output mode
|
||||
\m - multiline json output mode
|
||||
\c - compact output mode
|
||||
\c - compact output
|
||||
\logfmt - logfmt output mode
|
||||
\wrap_long_lines - toggles wrapping long lines
|
||||
\enable_colors - enable ANSI colors in compact output mode
|
||||
\disable_colors - disable ANSI colors in compact output mode
|
||||
\tail <query> - live tail <query> results
|
||||
|
||||
See https://docs.victoriametrics.com/victorialogs/querying/vlogscli/ for more details
|
||||
`)
|
||||
}
|
||||
|
||||
func executeQuery(ctx context.Context, output io.Writer, qStr string, outputMode outputMode, disableColors, wrapLongLines bool) {
|
||||
func executeQuery(ctx context.Context, output io.Writer, qStr string, outputMode outputMode, wrapLongLines bool) {
|
||||
if strings.HasPrefix(qStr, `\tail `) {
|
||||
tailQuery(ctx, output, qStr, outputMode)
|
||||
return
|
||||
@@ -314,7 +293,7 @@ func executeQuery(ctx context.Context, output io.Writer, qStr string, outputMode
|
||||
_ = respBody.Close()
|
||||
}()
|
||||
|
||||
if err := readWithLess(respBody, disableColors, wrapLongLines); err != nil {
|
||||
if err := readWithLess(respBody, wrapLongLines); err != nil {
|
||||
fmt.Fprintf(output, "error when reading query response: %s\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -18,20 +18,20 @@ import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
)
|
||||
|
||||
var (
|
||||
addr = flag.String("addr", "stdout", "HTTP address to push the generated logs to; if it is set to stdout, then logs are generated to stdout")
|
||||
workers = flag.Int("workers", 1, "The number of workers to use to push logs to -addr")
|
||||
|
||||
start = newTimeFlag("start", "-1d", "Generated logs start from this time; see https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#timestamp-formats")
|
||||
end = newTimeFlag("end", "0s", "Generated logs end at this time; see https://docs.victoriametrics.com/victoriametrics/single-server-victoriametrics/#timestamp-formats")
|
||||
start = newTimeFlag("start", "-1d", "Generated logs start from this time; see https://docs.victoriametrics.com/#timestamp-formats")
|
||||
end = newTimeFlag("end", "0s", "Generated logs end at this time; see https://docs.victoriametrics.com/#timestamp-formats")
|
||||
activeStreams = flag.Int("activeStreams", 100, "The number of active log streams to generate; see https://docs.victoriametrics.com/victorialogs/keyconcepts/#stream-fields")
|
||||
totalStreams = flag.Int("totalStreams", 0, "The number of total log streams; if -totalStreams > -activeStreams, then some active streams are substituted with new streams "+
|
||||
"during data generation")
|
||||
logsPerStream = flag.Int64("logsPerStream", 1_000, "The number of log entries to generate per each log stream. Log entries are evenly distributed between -start and -end")
|
||||
constFieldsPerLog = flag.Int("constFieldsPerLog", 3, "The number of fields with constant values to generate per each log entry; "+
|
||||
constFieldsPerLog = flag.Int("constFieldsPerLog", 3, "The number of fields with constaint values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
varFieldsPerLog = flag.Int("varFieldsPerLog", 1, "The number of fields with variable values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
@@ -45,8 +45,6 @@ var (
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
u64FieldsPerLog = flag.Int("u64FieldsPerLog", 1, "The number of fields with uint64 values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
i64FieldsPerLog = flag.Int("i64FieldsPerLog", 1, "The number of fields with int64 values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
floatFieldsPerLog = flag.Int("floatFieldsPerLog", 1, "The number of fields with float64 values to generate per each log entry; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model")
|
||||
ipFieldsPerLog = flag.Int("ipFieldsPerLog", 1, "The number of fields with IPv4 values to generate per each log entry; "+
|
||||
@@ -177,10 +175,7 @@ func generateAndPushLogs(cfg *workerConfig, workerID int) {
|
||||
sw := &statWriter{
|
||||
w: pw,
|
||||
}
|
||||
|
||||
// The 1MB write buffer increases data ingestion performance by reducing the number of send() syscalls
|
||||
bw := bufio.NewWriterSize(sw, 1024*1024)
|
||||
|
||||
bw := bufio.NewWriter(sw)
|
||||
doneCh := make(chan struct{})
|
||||
go func() {
|
||||
generateLogs(bw, workerID, cfg.activeStreams, cfg.totalStreams)
|
||||
@@ -206,8 +201,6 @@ func generateAndPushLogs(cfg *workerConfig, workerID int) {
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot perform request to %q: %s", cfg.url, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode/100 != 2 {
|
||||
logger.Fatalf("unexpected status code got from %q: %d; want 2xx", cfg.url, err)
|
||||
}
|
||||
@@ -261,9 +254,6 @@ func generateLogsAtTimestamp(bw *bufio.Writer, workerID int, ts int64, firstStre
|
||||
for j := 0; j < *u64FieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"u64_%d":"%d"`, j, rand.Uint64())
|
||||
}
|
||||
for j := 0; j < *i64FieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"i64_%d":"%d"`, j, int64(rand.Uint64()))
|
||||
}
|
||||
for j := 0; j < *floatFieldsPerLog; j++ {
|
||||
fmt.Fprintf(bw, `,"float_%d":"%v"`, j, math.Round(10_000*rand.Float64())/1000)
|
||||
}
|
||||
@@ -311,7 +301,7 @@ type timeFlag struct {
|
||||
}
|
||||
|
||||
func (tf *timeFlag) Set(s string) error {
|
||||
msec, err := timeutil.ParseTimeMsec(s)
|
||||
msec, err := promutils.ParseTimeMsec(s)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse time from %q: %w", s, err)
|
||||
}
|
||||
|
||||
@@ -1,324 +0,0 @@
|
||||
package internalselect
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage/netselect"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/atomicutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding/zstd"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
)
|
||||
|
||||
var disableSelect = flag.Bool("internalselect.disable", false, "Whether to disable /internal/select/* HTTP endpoints")
|
||||
|
||||
// RequestHandler processes requests to /internal/select/*
|
||||
func RequestHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
||||
if *disableSelect {
|
||||
httpserver.Errorf(w, r, "requests to /internal/select/* are disabled with -internalselect.disable command-line flag")
|
||||
return
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
path := r.URL.Path
|
||||
rh := requestHandlers[path]
|
||||
if rh == nil {
|
||||
httpserver.Errorf(w, r, "unsupported endpoint requested: %s", path)
|
||||
return
|
||||
}
|
||||
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vl_http_requests_total{path=%q}`, path)).Inc()
|
||||
if err := rh(ctx, w, r); err != nil && !netutil.IsTrivialNetworkError(err) {
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vl_http_request_errors_total{path=%q}`, path)).Inc()
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
// The return is skipped intentionally in order to track the duration of failed queries.
|
||||
}
|
||||
metrics.GetOrCreateSummary(fmt.Sprintf(`vl_http_request_duration_seconds{path=%q}`, path)).UpdateDuration(startTime)
|
||||
}
|
||||
|
||||
var requestHandlers = map[string]func(ctx context.Context, w http.ResponseWriter, r *http.Request) error{
|
||||
"/internal/select/query": processQueryRequest,
|
||||
"/internal/select/field_names": processFieldNamesRequest,
|
||||
"/internal/select/field_values": processFieldValuesRequest,
|
||||
"/internal/select/stream_field_names": processStreamFieldNamesRequest,
|
||||
"/internal/select/stream_field_values": processStreamFieldValuesRequest,
|
||||
"/internal/select/streams": processStreamsRequest,
|
||||
"/internal/select/stream_ids": processStreamIDsRequest,
|
||||
}
|
||||
|
||||
func processQueryRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
|
||||
cp, err := getCommonParams(r, netselect.QueryProtocolVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
|
||||
var wLock sync.Mutex
|
||||
var dataLenBuf []byte
|
||||
|
||||
sendBuf := func(bb *bytesutil.ByteBuffer) error {
|
||||
if len(bb.B) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
data := bb.B
|
||||
if !cp.DisableCompression {
|
||||
bufLen := len(bb.B)
|
||||
bb.B = zstd.CompressLevel(bb.B, bb.B, 1)
|
||||
data = bb.B[bufLen:]
|
||||
}
|
||||
|
||||
wLock.Lock()
|
||||
dataLenBuf = encoding.MarshalUint64(dataLenBuf[:0], uint64(len(data)))
|
||||
_, err := w.Write(dataLenBuf)
|
||||
if err == nil {
|
||||
_, err = w.Write(data)
|
||||
}
|
||||
wLock.Unlock()
|
||||
|
||||
// Reset the sent buf
|
||||
bb.Reset()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
var bufs atomicutil.Slice[bytesutil.ByteBuffer]
|
||||
|
||||
var errGlobalLock sync.Mutex
|
||||
var errGlobal error
|
||||
|
||||
writeBlock := func(workerID uint, db *logstorage.DataBlock) {
|
||||
if errGlobal != nil {
|
||||
return
|
||||
}
|
||||
|
||||
bb := bufs.Get(workerID)
|
||||
|
||||
bb.B = db.Marshal(bb.B)
|
||||
|
||||
if len(bb.B) < 1024*1024 {
|
||||
// Fast path - the bb is too small to be sent to the client yet.
|
||||
return
|
||||
}
|
||||
|
||||
// Slow path - the bb must be sent to the client.
|
||||
if err := sendBuf(bb); err != nil {
|
||||
errGlobalLock.Lock()
|
||||
if errGlobal != nil {
|
||||
errGlobal = err
|
||||
}
|
||||
errGlobalLock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
if err := vlstorage.RunQuery(ctx, cp.TenantIDs, cp.Query, writeBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
if errGlobal != nil {
|
||||
return errGlobal
|
||||
}
|
||||
|
||||
// Send the remaining data
|
||||
for _, bb := range bufs.All() {
|
||||
if err := sendBuf(bb); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func processFieldNamesRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
|
||||
cp, err := getCommonParams(r, netselect.FieldNamesProtocolVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fieldNames, err := vlstorage.GetFieldNames(ctx, cp.TenantIDs, cp.Query)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain field names: %w", err)
|
||||
}
|
||||
|
||||
return writeValuesWithHits(w, fieldNames, cp.DisableCompression)
|
||||
}
|
||||
|
||||
func processFieldValuesRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
|
||||
cp, err := getCommonParams(r, netselect.FieldValuesProtocolVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fieldName := r.FormValue("field")
|
||||
|
||||
limit, err := getInt64FromRequest(r, "limit")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fieldValues, err := vlstorage.GetFieldValues(ctx, cp.TenantIDs, cp.Query, fieldName, uint64(limit))
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain field values: %w", err)
|
||||
}
|
||||
|
||||
return writeValuesWithHits(w, fieldValues, cp.DisableCompression)
|
||||
}
|
||||
|
||||
func processStreamFieldNamesRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
|
||||
cp, err := getCommonParams(r, netselect.StreamFieldNamesProtocolVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fieldNames, err := vlstorage.GetStreamFieldNames(ctx, cp.TenantIDs, cp.Query)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain stream field names: %w", err)
|
||||
}
|
||||
|
||||
return writeValuesWithHits(w, fieldNames, cp.DisableCompression)
|
||||
}
|
||||
|
||||
func processStreamFieldValuesRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
|
||||
cp, err := getCommonParams(r, netselect.StreamFieldValuesProtocolVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fieldName := r.FormValue("field")
|
||||
|
||||
limit, err := getInt64FromRequest(r, "limit")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fieldValues, err := vlstorage.GetStreamFieldValues(ctx, cp.TenantIDs, cp.Query, fieldName, uint64(limit))
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain stream field values: %w", err)
|
||||
}
|
||||
|
||||
return writeValuesWithHits(w, fieldValues, cp.DisableCompression)
|
||||
}
|
||||
|
||||
func processStreamsRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
|
||||
cp, err := getCommonParams(r, netselect.StreamsProtocolVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
limit, err := getInt64FromRequest(r, "limit")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
streams, err := vlstorage.GetStreams(ctx, cp.TenantIDs, cp.Query, uint64(limit))
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain streams: %w", err)
|
||||
}
|
||||
|
||||
return writeValuesWithHits(w, streams, cp.DisableCompression)
|
||||
}
|
||||
|
||||
func processStreamIDsRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
|
||||
cp, err := getCommonParams(r, netselect.StreamIDsProtocolVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
limit, err := getInt64FromRequest(r, "limit")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
streamIDs, err := vlstorage.GetStreamIDs(ctx, cp.TenantIDs, cp.Query, uint64(limit))
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain streams: %w", err)
|
||||
}
|
||||
|
||||
return writeValuesWithHits(w, streamIDs, cp.DisableCompression)
|
||||
}
|
||||
|
||||
type commonParams struct {
|
||||
TenantIDs []logstorage.TenantID
|
||||
Query *logstorage.Query
|
||||
|
||||
DisableCompression bool
|
||||
}
|
||||
|
||||
func getCommonParams(r *http.Request, expectedProtocolVersion string) (*commonParams, error) {
|
||||
version := r.FormValue("version")
|
||||
if version != expectedProtocolVersion {
|
||||
return nil, fmt.Errorf("unexpected version=%q; want %q", version, expectedProtocolVersion)
|
||||
}
|
||||
|
||||
tenantIDsStr := r.FormValue("tenant_ids")
|
||||
tenantIDs, err := logstorage.UnmarshalTenantIDs([]byte(tenantIDsStr))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal tenant_ids=%q: %w", tenantIDsStr, err)
|
||||
}
|
||||
|
||||
timestamp, err := getInt64FromRequest(r, "timestamp")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
qStr := r.FormValue("query")
|
||||
q, err := logstorage.ParseQueryAtTimestamp(qStr, timestamp)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal query=%q: %w", qStr, err)
|
||||
}
|
||||
|
||||
s := r.FormValue("disable_compression")
|
||||
disableCompression, err := strconv.ParseBool(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse disable_compression=%q: %w", s, err)
|
||||
}
|
||||
|
||||
cp := &commonParams{
|
||||
TenantIDs: tenantIDs,
|
||||
Query: q,
|
||||
|
||||
DisableCompression: disableCompression,
|
||||
}
|
||||
return cp, nil
|
||||
}
|
||||
|
||||
func writeValuesWithHits(w http.ResponseWriter, vhs []logstorage.ValueWithHits, disableCompression bool) error {
|
||||
var b []byte
|
||||
for i := range vhs {
|
||||
b = vhs[i].Marshal(b)
|
||||
}
|
||||
|
||||
if !disableCompression {
|
||||
b = zstd.CompressLevel(nil, b, 1)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
|
||||
if _, err := w.Write(b); err != nil {
|
||||
return fmt.Errorf("cannot send response to the client: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getInt64FromRequest(r *http.Request, argName string) (int64, error) {
|
||||
s := r.FormValue(argName)
|
||||
n, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse %s=%q: %w", argName, s, err)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
47
app/vlselect/logsql/buffered_writer.go
Normal file
47
app/vlselect/logsql/buffered_writer.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package logsql
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func getBufferedWriter(w io.Writer) *bufferedWriter {
|
||||
v := bufferedWriterPool.Get()
|
||||
if v == nil {
|
||||
return &bufferedWriter{
|
||||
bw: bufio.NewWriter(w),
|
||||
}
|
||||
}
|
||||
bw := v.(*bufferedWriter)
|
||||
bw.bw.Reset(w)
|
||||
return bw
|
||||
}
|
||||
|
||||
func putBufferedWriter(bw *bufferedWriter) {
|
||||
bw.reset()
|
||||
bufferedWriterPool.Put(bw)
|
||||
}
|
||||
|
||||
var bufferedWriterPool sync.Pool
|
||||
|
||||
type bufferedWriter struct {
|
||||
mu sync.Mutex
|
||||
bw *bufio.Writer
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) reset() {
|
||||
// nothing to do
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) WriteIgnoreErrors(p []byte) {
|
||||
bw.mu.Lock()
|
||||
_, _ = bw.bw.Write(p)
|
||||
bw.mu.Unlock()
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) FlushIgnoreErrors() {
|
||||
bw.mu.Lock()
|
||||
_ = bw.bw.Flush()
|
||||
bw.mu.Unlock()
|
||||
}
|
||||
@@ -3,7 +3,6 @@ package logsql
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"regexp"
|
||||
@@ -12,20 +11,18 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/valyala/fastjson"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/atomicutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
)
|
||||
|
||||
// ProcessFacetsRequest handles /select/logsql/facets request.
|
||||
@@ -38,38 +35,32 @@ func ProcessFacetsRequest(ctx context.Context, w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
limit, err := httputil.GetInt(r, "limit")
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
maxValuesPerField, err := httputil.GetInt(r, "max_values_per_field")
|
||||
maxValuesPerField, err := httputils.GetInt(r, "max_values_per_field")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
maxValueLen, err := httputil.GetInt(r, "max_value_len")
|
||||
maxValueLen, err := httputils.GetInt(r, "max_value_len")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
keepConstFields := httputil.GetBool(r, "keep_const_fields")
|
||||
keepConstFields := httputils.GetBool(r, "keep_const_fields")
|
||||
|
||||
// Pipes must be dropped, since it is expected facets are obtained
|
||||
// from the real logs stored in the database.
|
||||
q.DropAllPipes()
|
||||
|
||||
q.AddFacetsPipe(limit, maxValuesPerField, maxValueLen, keepConstFields)
|
||||
|
||||
var mLock sync.Mutex
|
||||
m := make(map[string][]facetEntry)
|
||||
writeBlock := func(_ uint, db *logstorage.DataBlock) {
|
||||
rowsCount := db.RowsCount()
|
||||
if rowsCount == 0 {
|
||||
writeBlock := func(_ uint, _ []int64, columns []logstorage.BlockColumn) {
|
||||
if len(columns) == 0 || len(columns[0].Values) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
columns := db.Columns
|
||||
if len(columns) != 3 {
|
||||
logger.Panicf("BUG: expecting 3 columns; got %d columns", len(columns))
|
||||
}
|
||||
@@ -125,7 +116,7 @@ func ProcessHitsRequest(ctx context.Context, w http.ResponseWriter, r *http.Requ
|
||||
if stepStr == "" {
|
||||
stepStr = "1d"
|
||||
}
|
||||
step, err := timeutil.ParseDuration(stepStr)
|
||||
step, err := promutils.ParseDuration(stepStr)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse 'step' arg: %s", err)
|
||||
return
|
||||
@@ -140,7 +131,7 @@ func ProcessHitsRequest(ctx context.Context, w http.ResponseWriter, r *http.Requ
|
||||
if offsetStr == "" {
|
||||
offsetStr = "0s"
|
||||
}
|
||||
offset, err := timeutil.ParseDuration(offsetStr)
|
||||
offset, err := promutils.ParseDuration(offsetStr)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse 'offset' arg: %s", err)
|
||||
return
|
||||
@@ -150,7 +141,7 @@ func ProcessHitsRequest(ctx context.Context, w http.ResponseWriter, r *http.Requ
|
||||
fields := r.Form["field"]
|
||||
|
||||
// Obtain limit on the number of top fields entries.
|
||||
fieldsLimit, err := httputil.GetInt(r, "fields_limit")
|
||||
fieldsLimit, err := httputils.GetInt(r, "fields_limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
@@ -159,27 +150,23 @@ func ProcessHitsRequest(ctx context.Context, w http.ResponseWriter, r *http.Requ
|
||||
fieldsLimit = 0
|
||||
}
|
||||
|
||||
// Pipes must be dropped, since it is expected hits are obtained
|
||||
// from the real logs stored in the database.
|
||||
// Prepare the query for hits count.
|
||||
q.DropAllPipes()
|
||||
|
||||
q.AddCountByTimePipe(int64(step), int64(offset), fields)
|
||||
|
||||
var mLock sync.Mutex
|
||||
m := make(map[string]*hitsSeries)
|
||||
writeBlock := func(_ uint, db *logstorage.DataBlock) {
|
||||
rowsCount := db.RowsCount()
|
||||
if rowsCount == 0 {
|
||||
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
|
||||
if len(columns) == 0 || len(columns[0].Values) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
columns := db.Columns
|
||||
timestampValues := columns[0].Values
|
||||
hitsValues := columns[len(columns)-1].Values
|
||||
columns = columns[1 : len(columns)-1]
|
||||
|
||||
bb := blockResultPool.Get()
|
||||
for i := 0; i < rowsCount; i++ {
|
||||
for i := range timestamps {
|
||||
timestampStr := strings.Clone(timestampValues[i])
|
||||
hitsStr := strings.Clone(hitsValues[i])
|
||||
hits, err := strconv.ParseUint(hitsStr, 10, 64)
|
||||
@@ -218,8 +205,6 @@ func ProcessHitsRequest(ctx context.Context, w http.ResponseWriter, r *http.Requ
|
||||
WriteHitsSeries(w, m)
|
||||
}
|
||||
|
||||
var blockResultPool bytesutil.ByteBufferPool
|
||||
|
||||
func getTopHitsSeries(m map[string]*hitsSeries, fieldsLimit int) map[string]*hitsSeries {
|
||||
if fieldsLimit <= 0 || fieldsLimit >= len(m) {
|
||||
return m
|
||||
@@ -295,10 +280,6 @@ func ProcessFieldNamesRequest(ctx context.Context, w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
|
||||
// Pipes must be dropped, since it is expected field names are obtained
|
||||
// from the real logs stored in the database.
|
||||
q.DropAllPipes()
|
||||
|
||||
// Obtain field names for the given query
|
||||
fieldNames, err := vlstorage.GetFieldNames(ctx, tenantIDs, q)
|
||||
if err != nil {
|
||||
@@ -329,7 +310,7 @@ func ProcessFieldValuesRequest(ctx context.Context, w http.ResponseWriter, r *ht
|
||||
}
|
||||
|
||||
// Parse limit query arg
|
||||
limit, err := httputil.GetInt(r, "limit")
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
@@ -338,10 +319,6 @@ func ProcessFieldValuesRequest(ctx context.Context, w http.ResponseWriter, r *ht
|
||||
limit = 0
|
||||
}
|
||||
|
||||
// Pipes must be dropped, since it is expected field values are obtained
|
||||
// from the real logs stored in the database.
|
||||
q.DropAllPipes()
|
||||
|
||||
// Obtain unique values for the given field
|
||||
values, err := vlstorage.GetFieldValues(ctx, tenantIDs, q, fieldName, uint64(limit))
|
||||
if err != nil {
|
||||
@@ -364,10 +341,6 @@ func ProcessStreamFieldNamesRequest(ctx context.Context, w http.ResponseWriter,
|
||||
return
|
||||
}
|
||||
|
||||
// Pipes must be dropped, since it is expected stream field names are obtained
|
||||
// from the real logs stored in the database.
|
||||
q.DropAllPipes()
|
||||
|
||||
// Obtain stream field names for the given query
|
||||
names, err := vlstorage.GetStreamFieldNames(ctx, tenantIDs, q)
|
||||
if err != nil {
|
||||
@@ -397,7 +370,7 @@ func ProcessStreamFieldValuesRequest(ctx context.Context, w http.ResponseWriter,
|
||||
}
|
||||
|
||||
// Parse limit query arg
|
||||
limit, err := httputil.GetInt(r, "limit")
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
@@ -406,10 +379,6 @@ func ProcessStreamFieldValuesRequest(ctx context.Context, w http.ResponseWriter,
|
||||
limit = 0
|
||||
}
|
||||
|
||||
// Pipes must be dropped, since it is expected stream field values are obtained
|
||||
// from the real logs stored in the database.
|
||||
q.DropAllPipes()
|
||||
|
||||
// Obtain stream field values for the given query and the given fieldName
|
||||
values, err := vlstorage.GetStreamFieldValues(ctx, tenantIDs, q, fieldName, uint64(limit))
|
||||
if err != nil {
|
||||
@@ -432,7 +401,7 @@ func ProcessStreamIDsRequest(ctx context.Context, w http.ResponseWriter, r *http
|
||||
}
|
||||
|
||||
// Parse limit query arg
|
||||
limit, err := httputil.GetInt(r, "limit")
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
@@ -441,10 +410,6 @@ func ProcessStreamIDsRequest(ctx context.Context, w http.ResponseWriter, r *http
|
||||
limit = 0
|
||||
}
|
||||
|
||||
// Pipes must be dropped, since it is expected stream ids are obtained
|
||||
// from the real logs stored in the database.
|
||||
q.DropAllPipes()
|
||||
|
||||
// Obtain streamIDs for the given query
|
||||
streamIDs, err := vlstorage.GetStreamIDs(ctx, tenantIDs, q, uint64(limit))
|
||||
if err != nil {
|
||||
@@ -467,7 +432,7 @@ func ProcessStreamsRequest(ctx context.Context, w http.ResponseWriter, r *http.R
|
||||
}
|
||||
|
||||
// Parse limit query arg
|
||||
limit, err := httputil.GetInt(r, "limit")
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
@@ -476,10 +441,6 @@ func ProcessStreamsRequest(ctx context.Context, w http.ResponseWriter, r *http.R
|
||||
limit = 0
|
||||
}
|
||||
|
||||
// Pipes must be dropped, since it is expected stream are obtained
|
||||
// from the real logs stored in the database.
|
||||
q.DropAllPipes()
|
||||
|
||||
// Obtain streams for the given query
|
||||
streams, err := vlstorage.GetStreams(ctx, tenantIDs, q, uint64(limit))
|
||||
if err != nil {
|
||||
@@ -509,21 +470,21 @@ func ProcessLiveTailRequest(ctx context.Context, w http.ResponseWriter, r *http.
|
||||
return
|
||||
}
|
||||
|
||||
refreshIntervalMsecs, err := httputil.GetDuration(r, "refresh_interval", 1000)
|
||||
refreshIntervalMsecs, err := httputils.GetDuration(r, "refresh_interval", 1000)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
refreshInterval := time.Millisecond * time.Duration(refreshIntervalMsecs)
|
||||
|
||||
startOffsetMsecs, err := httputil.GetDuration(r, "start_offset", 5*1000)
|
||||
startOffsetMsecs, err := httputils.GetDuration(r, "start_offset", 5*1000)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
startOffset := startOffsetMsecs * 1e6
|
||||
|
||||
offsetMsecs, err := httputil.GetDuration(r, "offset", 1000)
|
||||
offsetMsecs, err := httputils.GetDuration(r, "offset", 1000)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
@@ -543,11 +504,6 @@ func ProcessLiveTailRequest(ctx context.Context, w http.ResponseWriter, r *http.
|
||||
if !ok {
|
||||
logger.Panicf("BUG: it is expected that http.ResponseWriter (%T) supports http.Flusher interface", w)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/x-ndjson")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
flusher.Flush()
|
||||
|
||||
qOrig := q
|
||||
for {
|
||||
q = qOrig.CloneWithTimeFilter(end, start, end)
|
||||
@@ -610,8 +566,8 @@ func newTailProcessor(cancel func()) *tailProcessor {
|
||||
}
|
||||
}
|
||||
|
||||
func (tp *tailProcessor) writeBlock(_ uint, db *logstorage.DataBlock) {
|
||||
if db.RowsCount() == 0 {
|
||||
func (tp *tailProcessor) writeBlock(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
|
||||
if len(timestamps) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -623,8 +579,14 @@ func (tp *tailProcessor) writeBlock(_ uint, db *logstorage.DataBlock) {
|
||||
}
|
||||
|
||||
// Make sure columns contain _time field, since it is needed for proper tail work.
|
||||
timestamps, ok := db.GetTimestamps(nil)
|
||||
if !ok {
|
||||
hasTime := false
|
||||
for _, c := range columns {
|
||||
if c.Name == "_time" {
|
||||
hasTime = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasTime {
|
||||
tp.err = fmt.Errorf("missing _time field")
|
||||
tp.cancel()
|
||||
return
|
||||
@@ -633,8 +595,8 @@ func (tp *tailProcessor) writeBlock(_ uint, db *logstorage.DataBlock) {
|
||||
// Copy block rows to tp.perStreamRows
|
||||
for i, timestamp := range timestamps {
|
||||
streamID := ""
|
||||
fields := make([]logstorage.Field, len(db.Columns))
|
||||
for j, c := range db.Columns {
|
||||
fields := make([]logstorage.Field, len(columns))
|
||||
for j, c := range columns {
|
||||
name := strings.Clone(c.Name)
|
||||
value := strings.Clone(c.Values[i])
|
||||
|
||||
@@ -703,7 +665,7 @@ func ProcessStatsQueryRangeRequest(ctx context.Context, w http.ResponseWriter, r
|
||||
if stepStr == "" {
|
||||
stepStr = "1d"
|
||||
}
|
||||
step, err := timeutil.ParseDuration(stepStr)
|
||||
step, err := promutils.ParseDuration(stepStr)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("cannot parse 'step' arg: %s", err)
|
||||
httpserver.SendPrometheusError(w, r, err)
|
||||
@@ -726,25 +688,19 @@ func ProcessStatsQueryRangeRequest(ctx context.Context, w http.ResponseWriter, r
|
||||
m := make(map[string]*statsSeries)
|
||||
var mLock sync.Mutex
|
||||
|
||||
writeBlock := func(_ uint, db *logstorage.DataBlock) {
|
||||
rowsCount := db.RowsCount()
|
||||
|
||||
columns := db.Columns
|
||||
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
|
||||
clonedColumnNames := make([]string, len(columns))
|
||||
for i, c := range columns {
|
||||
clonedColumnNames[i] = strings.Clone(c.Name)
|
||||
}
|
||||
for i := 0; i < rowsCount; i++ {
|
||||
// Do not move q.GetTimestamp() outside writeBlock, since ts
|
||||
// must be initialized to query timestamp for every processed log row.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8312
|
||||
ts := q.GetTimestamp()
|
||||
for i := range timestamps {
|
||||
timestamp := q.GetTimestamp()
|
||||
labels := make([]logstorage.Field, 0, len(byFields))
|
||||
for j, c := range columns {
|
||||
if c.Name == "_time" {
|
||||
nsec, ok := logstorage.TryParseTimestampRFC3339Nano(c.Values[i])
|
||||
if ok {
|
||||
ts = nsec
|
||||
timestamp = nsec
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -765,7 +721,7 @@ func ProcessStatsQueryRangeRequest(ctx context.Context, w http.ResponseWriter, r
|
||||
dst = logstorage.MarshalFieldsToJSON(dst, labels)
|
||||
key := string(dst)
|
||||
p := statsPoint{
|
||||
Timestamp: ts,
|
||||
Timestamp: timestamp,
|
||||
Value: strings.Clone(c.Values[i]),
|
||||
}
|
||||
|
||||
@@ -843,14 +799,12 @@ func ProcessStatsQueryRequest(ctx context.Context, w http.ResponseWriter, r *htt
|
||||
var rowsLock sync.Mutex
|
||||
|
||||
timestamp := q.GetTimestamp()
|
||||
writeBlock := func(_ uint, db *logstorage.DataBlock) {
|
||||
rowsCount := db.RowsCount()
|
||||
columns := db.Columns
|
||||
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
|
||||
clonedColumnNames := make([]string, len(columns))
|
||||
for i, c := range columns {
|
||||
clonedColumnNames[i] = strings.Clone(c.Name)
|
||||
}
|
||||
for i := 0; i < rowsCount; i++ {
|
||||
for i := range timestamps {
|
||||
labels := make([]logstorage.Field, 0, len(byFields))
|
||||
for j, c := range columns {
|
||||
if slices.Contains(byFields, c.Name) {
|
||||
@@ -906,27 +860,17 @@ func ProcessQueryRequest(ctx context.Context, w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
|
||||
// Parse limit query arg
|
||||
limit, err := httputil.GetInt(r, "limit")
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
sw := &syncWriter{
|
||||
w: w,
|
||||
}
|
||||
|
||||
var bwShards atomicutil.Slice[bufferedWriter]
|
||||
bwShards.Init = func(shard *bufferedWriter) {
|
||||
shard.sw = sw
|
||||
}
|
||||
bw := getBufferedWriter(w)
|
||||
defer func() {
|
||||
shards := bwShards.All()
|
||||
for _, shard := range shards {
|
||||
shard.FlushIgnoreErrors()
|
||||
}
|
||||
bw.FlushIgnoreErrors()
|
||||
putBufferedWriter(bw)
|
||||
}()
|
||||
|
||||
w.Header().Set("Content-Type", "application/stream+json")
|
||||
|
||||
if limit > 0 {
|
||||
@@ -936,34 +880,32 @@ func ProcessQueryRequest(ctx context.Context, w http.ResponseWriter, r *http.Req
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
bw := bwShards.Get(0)
|
||||
bb := blockResultPool.Get()
|
||||
b := bb.B
|
||||
for i := range rows {
|
||||
bw.buf = logstorage.MarshalFieldsToJSON(bw.buf, rows[i].fields)
|
||||
bw.buf = append(bw.buf, '\n')
|
||||
if len(bw.buf) > 16*1024 {
|
||||
bw.FlushIgnoreErrors()
|
||||
}
|
||||
b = logstorage.MarshalFieldsToJSON(b[:0], rows[i].fields)
|
||||
b = append(b, '\n')
|
||||
bw.WriteIgnoreErrors(b)
|
||||
}
|
||||
bb.B = b
|
||||
blockResultPool.Put(bb)
|
||||
return
|
||||
}
|
||||
|
||||
q.AddPipeLimit(uint64(limit))
|
||||
}
|
||||
|
||||
writeBlock := func(workerID uint, db *logstorage.DataBlock) {
|
||||
rowsCount := db.RowsCount()
|
||||
if rowsCount == 0 {
|
||||
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
|
||||
if len(columns) == 0 || len(columns[0].Values) == 0 {
|
||||
return
|
||||
}
|
||||
columns := db.Columns
|
||||
|
||||
bw := bwShards.Get(workerID)
|
||||
for i := 0; i < rowsCount; i++ {
|
||||
WriteJSONRow(bw, columns, i)
|
||||
if len(bw.buf) > 16*1024 {
|
||||
bw.FlushIgnoreErrors()
|
||||
}
|
||||
bb := blockResultPool.Get()
|
||||
for i := range timestamps {
|
||||
WriteJSONRow(bb, columns, i)
|
||||
}
|
||||
bw.WriteIgnoreErrors(bb.B)
|
||||
blockResultPool.Put(bb)
|
||||
}
|
||||
|
||||
if err := vlstorage.RunQuery(ctx, tenantIDs, q, writeBlock); err != nil {
|
||||
@@ -972,37 +914,14 @@ func ProcessQueryRequest(ctx context.Context, w http.ResponseWriter, r *http.Req
|
||||
}
|
||||
}
|
||||
|
||||
type syncWriter struct {
|
||||
mu sync.Mutex
|
||||
w io.Writer
|
||||
var blockResultPool bytesutil.ByteBufferPool
|
||||
|
||||
type row struct {
|
||||
timestamp int64
|
||||
fields []logstorage.Field
|
||||
}
|
||||
|
||||
func (sw *syncWriter) Write(p []byte) (int, error) {
|
||||
sw.mu.Lock()
|
||||
n, err := sw.w.Write(p)
|
||||
sw.mu.Unlock()
|
||||
return n, err
|
||||
}
|
||||
|
||||
type bufferedWriter struct {
|
||||
buf []byte
|
||||
sw *syncWriter
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) Write(p []byte) (int, error) {
|
||||
bw.buf = append(bw.buf, p...)
|
||||
|
||||
// Do not send bw.buf to bw.sw here, since the data at bw.buf may be incomplete (it must end with '\n')
|
||||
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) FlushIgnoreErrors() {
|
||||
_, _ = bw.sw.Write(bw.buf)
|
||||
bw.buf = bw.buf[:0]
|
||||
}
|
||||
|
||||
func getLastNQueryResults(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit int) ([]logRow, error) {
|
||||
func getLastNQueryResults(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit int) ([]row, error) {
|
||||
limitUpper := 2 * limit
|
||||
q.AddPipeLimit(uint64(limitUpper))
|
||||
|
||||
@@ -1071,39 +990,28 @@ func getLastNQueryResults(ctx context.Context, tenantIDs []logstorage.TenantID,
|
||||
}
|
||||
}
|
||||
|
||||
func getLastNRows(rows []logRow, limit int) []logRow {
|
||||
sortLogRows(rows)
|
||||
func getLastNRows(rows []row, limit int) []row {
|
||||
sort.Slice(rows, func(i, j int) bool {
|
||||
return rows[i].timestamp < rows[j].timestamp
|
||||
})
|
||||
if len(rows) > limit {
|
||||
rows = rows[len(rows)-limit:]
|
||||
}
|
||||
return rows
|
||||
}
|
||||
|
||||
func getQueryResultsWithLimit(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit int) ([]logRow, error) {
|
||||
func getQueryResultsWithLimit(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit int) ([]row, error) {
|
||||
ctxWithCancel, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
var missingTimeColumn atomic.Bool
|
||||
var rows []logRow
|
||||
var rows []row
|
||||
var rowsLock sync.Mutex
|
||||
writeBlock := func(_ uint, db *logstorage.DataBlock) {
|
||||
if missingTimeColumn.Load() {
|
||||
return
|
||||
}
|
||||
|
||||
columns := db.Columns
|
||||
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
|
||||
clonedColumnNames := make([]string, len(columns))
|
||||
for i, c := range columns {
|
||||
clonedColumnNames[i] = strings.Clone(c.Name)
|
||||
}
|
||||
|
||||
timestamps, ok := db.GetTimestamps(nil)
|
||||
if !ok {
|
||||
missingTimeColumn.Store(true)
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
for i, timestamp := range timestamps {
|
||||
fields := make([]logstorage.Field, len(columns))
|
||||
for j := range columns {
|
||||
@@ -1113,7 +1021,7 @@ func getQueryResultsWithLimit(ctx context.Context, tenantIDs []logstorage.Tenant
|
||||
}
|
||||
|
||||
rowsLock.Lock()
|
||||
rows = append(rows, logRow{
|
||||
rows = append(rows, row{
|
||||
timestamp: timestamp,
|
||||
fields: fields,
|
||||
})
|
||||
@@ -1124,13 +1032,11 @@ func getQueryResultsWithLimit(ctx context.Context, tenantIDs []logstorage.Tenant
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
err := vlstorage.RunQuery(ctxWithCancel, tenantIDs, q, writeBlock)
|
||||
|
||||
if missingTimeColumn.Load() {
|
||||
return nil, fmt.Errorf("missing _time column in the result for the query [%s]", q)
|
||||
if err := vlstorage.RunQuery(ctxWithCancel, tenantIDs, q, writeBlock); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rows, err
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
func parseCommonArgs(r *http.Request) (*logstorage.Query, []logstorage.TenantID, error) {
|
||||
@@ -1189,22 +1095,20 @@ func parseCommonArgs(r *http.Request) (*logstorage.Query, []logstorage.TenantID,
|
||||
}
|
||||
|
||||
// Parse optional extra_filters
|
||||
for _, extraFiltersStr := range r.Form["extra_filters"] {
|
||||
extraFilters, err := parseExtraFilters(extraFiltersStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
q.AddExtraFilters(extraFilters)
|
||||
extraFiltersStr := r.FormValue("extra_filters")
|
||||
extraFilters, err := parseExtraFilters(extraFiltersStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
q.AddExtraFilters(extraFilters)
|
||||
|
||||
// Parse optional extra_stream_filters
|
||||
for _, extraStreamFiltersStr := range r.Form["extra_stream_filters"] {
|
||||
extraStreamFilters, err := parseExtraStreamFilters(extraStreamFiltersStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
q.AddExtraFilters(extraStreamFilters)
|
||||
extraStreamFiltersStr := r.FormValue("extra_stream_filters")
|
||||
extraStreamFilters, err := parseExtraStreamFilters(extraStreamFiltersStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
q.AddExtraFilters(extraStreamFilters)
|
||||
|
||||
return q, tenantIDs, nil
|
||||
}
|
||||
@@ -1215,7 +1119,7 @@ func getTimeNsec(r *http.Request, argName string) (int64, bool, error) {
|
||||
return 0, false, nil
|
||||
}
|
||||
currentTimestamp := time.Now().UnixNano()
|
||||
nsecs, err := timeutil.ParseTimeAt(s, currentTimestamp)
|
||||
nsecs, err := promutils.ParseTimeAt(s, currentTimestamp)
|
||||
if err != nil {
|
||||
return 0, false, fmt.Errorf("cannot parse %s=%s: %w", argName, s, err)
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ func TestParseExtraFilters_Success(t *testing.T) {
|
||||
// LogsQL filter
|
||||
f(`foobar`, `foobar`)
|
||||
f(`foo:bar`, `foo:bar`)
|
||||
f(`foo:(bar or baz) error _time:5m {"foo"=bar,baz="z"}`, `{foo="bar",baz="z"} (foo:bar or foo:baz) error _time:5m`)
|
||||
f(`foo:(bar or baz) error _time:5m {"foo"=bar,baz="z"}`, `(foo:bar or foo:baz) error _time:5m {foo="bar",baz="z"}`)
|
||||
}
|
||||
|
||||
func TestParseExtraFilters_Failure(t *testing.T) {
|
||||
@@ -77,7 +77,7 @@ func TestParseExtraStreamFilters_Success(t *testing.T) {
|
||||
// LogsQL filter
|
||||
f(`foobar`, `foobar`)
|
||||
f(`foo:bar`, `foo:bar`)
|
||||
f(`foo:(bar or baz) error _time:5m {"foo"=bar,baz="z"}`, `{foo="bar",baz="z"} (foo:bar or foo:baz) error _time:5m`)
|
||||
f(`foo:(bar or baz) error _time:5m {"foo"=bar,baz="z"}`, `(foo:bar or foo:baz) error _time:5m {foo="bar",baz="z"}`)
|
||||
}
|
||||
|
||||
func TestParseExtraStreamFilters_Failure(t *testing.T) {
|
||||
|
||||
@@ -9,11 +9,10 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlselect/internalselect"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlselect/logsql"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
@@ -72,8 +71,7 @@ var vmuiFileServer = http.FileServer(http.FS(vmuiFiles))
|
||||
// RequestHandler handles select requests for VictoriaLogs
|
||||
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
path := r.URL.Path
|
||||
|
||||
if !strings.HasPrefix(path, "/select/") && !strings.HasPrefix(path, "/internal/select/") {
|
||||
if !strings.HasPrefix(path, "/select/") {
|
||||
// Skip requests, which do not start with /select/, since these aren't our requests.
|
||||
return false
|
||||
}
|
||||
@@ -100,75 +98,25 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
ctx := r.Context()
|
||||
if path == "/select/logsql/tail" {
|
||||
logsqlTailRequests.Inc()
|
||||
// Process live tailing request without timeout, since it is OK to run live tailing requests for very long time.
|
||||
// Also do not apply concurrency limit to tail requests, since these limits are intended for non-tail requests.
|
||||
logsql.ProcessLiveTailRequest(ctx, w, r)
|
||||
return true
|
||||
}
|
||||
|
||||
// Limit the number of concurrent queries, which can consume big amounts of CPU time.
|
||||
startTime := time.Now()
|
||||
ctx := r.Context()
|
||||
d := getMaxQueryDuration(r)
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, d)
|
||||
defer cancel()
|
||||
|
||||
if !incRequestConcurrency(ctxWithTimeout, w, r) {
|
||||
return true
|
||||
}
|
||||
defer decRequestConcurrency()
|
||||
|
||||
if strings.HasPrefix(path, "/internal/select/") {
|
||||
// Process internal request from vlselect without timeout (e.g. use ctx instead of ctxWithTimeout),
|
||||
// since the timeout must be controlled by the vlselect.
|
||||
internalselect.RequestHandler(ctx, w, r)
|
||||
return true
|
||||
}
|
||||
|
||||
ok := processSelectRequest(ctxWithTimeout, w, r, path)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
logRequestErrorIfNeeded(ctxWithTimeout, w, r, startTime)
|
||||
return true
|
||||
}
|
||||
|
||||
func logRequestErrorIfNeeded(ctx context.Context, w http.ResponseWriter, r *http.Request, startTime time.Time) {
|
||||
err := ctx.Err()
|
||||
switch err {
|
||||
case nil:
|
||||
// nothing to do
|
||||
case context.Canceled:
|
||||
// do not log canceled requests, since they are expected and legal.
|
||||
case context.DeadlineExceeded:
|
||||
err = &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("the request couldn't be executed in %.3f seconds; possible solutions: "+
|
||||
"to increase -search.maxQueryDuration=%s; to pass bigger value to 'timeout' query arg", time.Since(startTime).Seconds(), maxQueryDuration),
|
||||
StatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
default:
|
||||
httpserver.Errorf(w, r, "unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func incRequestConcurrency(ctx context.Context, w http.ResponseWriter, r *http.Request) bool {
|
||||
startTime := time.Now()
|
||||
stopCh := ctx.Done()
|
||||
stopCh := ctxWithTimeout.Done()
|
||||
select {
|
||||
case concurrencyLimitCh <- struct{}{}:
|
||||
return true
|
||||
defer func() { <-concurrencyLimitCh }()
|
||||
default:
|
||||
// Sleep for a while until giving up. This should resolve short bursts in requests.
|
||||
concurrencyLimitReached.Inc()
|
||||
select {
|
||||
case concurrencyLimitCh <- struct{}{}:
|
||||
return true
|
||||
defer func() { <-concurrencyLimitCh }()
|
||||
case <-stopCh:
|
||||
switch ctx.Err() {
|
||||
switch ctxWithTimeout.Err() {
|
||||
case context.Canceled:
|
||||
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
|
||||
requestURI := httpserver.GetRequestURI(r)
|
||||
@@ -181,78 +129,97 @@ func incRequestConcurrency(ctx context.Context, w http.ResponseWriter, r *http.R
|
||||
"are executed. Possible solutions: to reduce query load; to add more compute resources to the server; "+
|
||||
"to increase -search.maxQueueDuration=%s; to increase -search.maxQueryDuration=%s; to increase -search.maxConcurrentRequests; "+
|
||||
"to pass bigger value to 'timeout' query arg",
|
||||
time.Since(startTime).Seconds(), *maxConcurrentRequests, maxQueueDuration, maxQueryDuration),
|
||||
d.Seconds(), *maxConcurrentRequests, maxQueueDuration, maxQueryDuration),
|
||||
StatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
}
|
||||
return false
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func decRequestConcurrency() {
|
||||
<-concurrencyLimitCh
|
||||
if path == "/select/logsql/tail" {
|
||||
logsqlTailRequests.Inc()
|
||||
// Process live tailing request without timeout (e.g. use ctx instead of ctxWithTimeout),
|
||||
// since it is OK to run live tailing requests for very long time.
|
||||
logsql.ProcessLiveTailRequest(ctx, w, r)
|
||||
return true
|
||||
}
|
||||
|
||||
ok := processSelectRequest(ctxWithTimeout, w, r, path)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
err := ctxWithTimeout.Err()
|
||||
switch err {
|
||||
case nil:
|
||||
// nothing to do
|
||||
case context.Canceled:
|
||||
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
|
||||
requestURI := httpserver.GetRequestURI(r)
|
||||
logger.Infof("client has canceled the request after %.3f seconds: remoteAddr=%s, requestURI: %q",
|
||||
time.Since(startTime).Seconds(), remoteAddr, requestURI)
|
||||
case context.DeadlineExceeded:
|
||||
err = &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("the request couldn't be executed in %.3f seconds; possible solutions: "+
|
||||
"to increase -search.maxQueryDuration=%s; to pass bigger value to 'timeout' query arg", d.Seconds(), maxQueryDuration),
|
||||
StatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
default:
|
||||
httpserver.Errorf(w, r, "unexpected error: %s", err)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func processSelectRequest(ctx context.Context, w http.ResponseWriter, r *http.Request, path string) bool {
|
||||
httpserver.EnableCORS(w, r)
|
||||
startTime := time.Now()
|
||||
switch path {
|
||||
case "/select/logsql/facets":
|
||||
logsqlFacetsRequests.Inc()
|
||||
logsql.ProcessFacetsRequest(ctx, w, r)
|
||||
logsqlFacetsDuration.UpdateDuration(startTime)
|
||||
return true
|
||||
case "/select/logsql/field_names":
|
||||
logsqlFieldNamesRequests.Inc()
|
||||
logsql.ProcessFieldNamesRequest(ctx, w, r)
|
||||
logsqlFieldNamesDuration.UpdateDuration(startTime)
|
||||
return true
|
||||
case "/select/logsql/field_values":
|
||||
logsqlFieldValuesRequests.Inc()
|
||||
logsql.ProcessFieldValuesRequest(ctx, w, r)
|
||||
logsqlFieldValuesDuration.UpdateDuration(startTime)
|
||||
return true
|
||||
case "/select/logsql/hits":
|
||||
logsqlHitsRequests.Inc()
|
||||
logsql.ProcessHitsRequest(ctx, w, r)
|
||||
logsqlHitsDuration.UpdateDuration(startTime)
|
||||
return true
|
||||
case "/select/logsql/query":
|
||||
logsqlQueryRequests.Inc()
|
||||
logsql.ProcessQueryRequest(ctx, w, r)
|
||||
logsqlQueryDuration.UpdateDuration(startTime)
|
||||
return true
|
||||
case "/select/logsql/stats_query":
|
||||
logsqlStatsQueryRequests.Inc()
|
||||
logsql.ProcessStatsQueryRequest(ctx, w, r)
|
||||
logsqlStatsQueryDuration.UpdateDuration(startTime)
|
||||
return true
|
||||
case "/select/logsql/stats_query_range":
|
||||
logsqlStatsQueryRangeRequests.Inc()
|
||||
logsql.ProcessStatsQueryRangeRequest(ctx, w, r)
|
||||
logsqlStatsQueryRangeDuration.UpdateDuration(startTime)
|
||||
return true
|
||||
case "/select/logsql/stream_field_names":
|
||||
logsqlStreamFieldNamesRequests.Inc()
|
||||
logsql.ProcessStreamFieldNamesRequest(ctx, w, r)
|
||||
logsqlStreamFieldNamesDuration.UpdateDuration(startTime)
|
||||
return true
|
||||
case "/select/logsql/stream_field_values":
|
||||
logsqlStreamFieldValuesRequests.Inc()
|
||||
logsql.ProcessStreamFieldValuesRequest(ctx, w, r)
|
||||
logsqlStreamFieldValuesDuration.UpdateDuration(startTime)
|
||||
return true
|
||||
case "/select/logsql/stream_ids":
|
||||
logsqlStreamIDsRequests.Inc()
|
||||
logsql.ProcessStreamIDsRequest(ctx, w, r)
|
||||
logsqlStreamIDsDuration.UpdateDuration(startTime)
|
||||
return true
|
||||
case "/select/logsql/streams":
|
||||
logsqlStreamsRequests.Inc()
|
||||
logsql.ProcessStreamsRequest(ctx, w, r)
|
||||
logsqlStreamsDuration.UpdateDuration(startTime)
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
@@ -261,7 +228,7 @@ func processSelectRequest(ctx context.Context, w http.ResponseWriter, r *http.Re
|
||||
|
||||
// getMaxQueryDuration returns the maximum duration for query from r.
|
||||
func getMaxQueryDuration(r *http.Request) time.Duration {
|
||||
dms, err := httputil.GetDuration(r, "timeout", 0)
|
||||
dms, err := httputils.GetDuration(r, "timeout", 0)
|
||||
if err != nil {
|
||||
dms = 0
|
||||
}
|
||||
@@ -273,39 +240,16 @@ func getMaxQueryDuration(r *http.Request) time.Duration {
|
||||
}
|
||||
|
||||
var (
|
||||
logsqlFacetsRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/facets"}`)
|
||||
logsqlFacetsDuration = metrics.NewSummary(`vl_http_request_duration_seconds{path="/select/logsql/facets"}`)
|
||||
|
||||
logsqlFieldNamesRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/field_names"}`)
|
||||
logsqlFieldNamesDuration = metrics.NewSummary(`vl_http_request_duration_seconds{path="/select/logsql/field_names"}`)
|
||||
|
||||
logsqlFieldValuesRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/field_values"}`)
|
||||
logsqlFieldValuesDuration = metrics.NewSummary(`vl_http_request_duration_seconds{path="/select/logsql/field_values"}`)
|
||||
|
||||
logsqlHitsRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/hits"}`)
|
||||
logsqlHitsDuration = metrics.NewSummary(`vl_http_request_duration_seconds{path="/select/logsql/hits"}`)
|
||||
|
||||
logsqlQueryRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/query"}`)
|
||||
logsqlQueryDuration = metrics.NewSummary(`vl_http_request_duration_seconds{path="/select/logsql/query"}`)
|
||||
|
||||
logsqlStatsQueryRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/stats_query"}`)
|
||||
logsqlStatsQueryDuration = metrics.NewSummary(`vl_http_request_duration_seconds{path="/select/logsql/stats_query"}`)
|
||||
|
||||
logsqlStatsQueryRangeRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/stats_query_range"}`)
|
||||
logsqlStatsQueryRangeDuration = metrics.NewSummary(`vl_http_request_duration_seconds{path="/select/logsql/stats_query_range"}`)
|
||||
|
||||
logsqlStreamFieldNamesRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/stream_field_names"}`)
|
||||
logsqlStreamFieldNamesDuration = metrics.NewSummary(`vl_http_request_duration_seconds{path="/select/logsql/stream_field_names"}`)
|
||||
|
||||
logsqlFacetsRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/facets"}`)
|
||||
logsqlFieldNamesRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/field_names"}`)
|
||||
logsqlFieldValuesRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/field_values"}`)
|
||||
logsqlHitsRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/hits"}`)
|
||||
logsqlQueryRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/query"}`)
|
||||
logsqlStatsQueryRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/stats_query"}`)
|
||||
logsqlStatsQueryRangeRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/stats_query_range"}`)
|
||||
logsqlStreamFieldNamesRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/stream_field_names"}`)
|
||||
logsqlStreamFieldValuesRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/stream_field_values"}`)
|
||||
logsqlStreamFieldValuesDuration = metrics.NewSummary(`vl_http_request_duration_seconds{path="/select/logsql/stream_field_values"}`)
|
||||
|
||||
logsqlStreamIDsRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/stream_ids"}`)
|
||||
logsqlStreamIDsDuration = metrics.NewSummary(`vl_http_request_duration_seconds{path="/select/logsql/stream_ids"}`)
|
||||
|
||||
logsqlStreamsRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/streams"}`)
|
||||
logsqlStreamsDuration = metrics.NewSummary(`vl_http_request_duration_seconds{path="/select/logsql/streams"}`)
|
||||
|
||||
// no need to track duration for tail requests, as they usually take long time
|
||||
logsqlTailRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/tail"}`)
|
||||
logsqlStreamIDsRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/stream_ids"}`)
|
||||
logsqlStreamsRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/streams"}`)
|
||||
logsqlTailRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/tail"}`)
|
||||
)
|
||||
|
||||
13
app/vlselect/vmui/asset-manifest.json
Normal file
13
app/vlselect/vmui/asset-manifest.json
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"files": {
|
||||
"main.css": "./static/css/main.fa83344e.css",
|
||||
"main.js": "./static/js/main.8ad2bc1f.js",
|
||||
"static/js/685.f772060c.chunk.js": "./static/js/685.f772060c.chunk.js",
|
||||
"static/media/MetricsQL.md": "./static/media/MetricsQL.a00044c91d9781cf8557.md",
|
||||
"index.html": "./index.html"
|
||||
},
|
||||
"entrypoints": [
|
||||
"static/css/main.fa83344e.css",
|
||||
"static/js/main.8ad2bc1f.js"
|
||||
]
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1 +0,0 @@
|
||||
.uplot,.uplot *,.uplot *:before,.uplot *:after{box-sizing:border-box}.uplot{font-family:system-ui,-apple-system,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,"Apple Color Emoji","Segoe UI Emoji",Segoe UI Symbol,"Noto Color Emoji";line-height:1.5;width:min-content}.u-title{text-align:center;font-size:18px;font-weight:700}.u-wrap{position:relative;-webkit-user-select:none;user-select:none}.u-over,.u-under{position:absolute}.u-under{overflow:hidden}.uplot canvas{display:block;position:relative;width:100%;height:100%}.u-axis{position:absolute}.u-legend{font-size:14px;margin:auto;text-align:center}.u-inline{display:block}.u-inline *{display:inline-block}.u-inline tr{margin-right:16px}.u-legend th{font-weight:600}.u-legend th>*{vertical-align:middle;display:inline-block}.u-legend .u-marker{width:1em;height:1em;margin-right:4px;background-clip:padding-box!important}.u-inline.u-live th:after{content:":";vertical-align:middle}.u-inline:not(.u-live) .u-value{display:none}.u-series>*{padding:4px}.u-series th{cursor:pointer}.u-legend .u-off>*{opacity:.3}.u-select{background:#00000012;position:absolute;pointer-events:none}.u-cursor-x,.u-cursor-y{position:absolute;left:0;top:0;pointer-events:none;will-change:transform}.u-hz .u-cursor-x,.u-vt .u-cursor-y{height:100%;border-right:1px dashed #607D8B}.u-hz .u-cursor-y,.u-vt .u-cursor-x{width:100%;border-bottom:1px dashed #607D8B}.u-cursor-pt{position:absolute;top:0;left:0;border-radius:50%;border:0 solid;pointer-events:none;will-change:transform;background-clip:padding-box!important}.u-axis.u-off,.u-select.u-off,.u-cursor-x.u-off,.u-cursor-y.u-off,.u-cursor-pt.u-off{display:none}
|
||||
File diff suppressed because one or more lines are too long
@@ -1,57 +1 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8"/>
|
||||
<link rel="icon" href="./favicon.svg" />
|
||||
<link rel="apple-touch-icon" href="./favicon.svg" />
|
||||
<link rel="mask-icon" href="./favicon.svg" color="#000000">
|
||||
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=5"/>
|
||||
<meta name="theme-color" content="#000000"/>
|
||||
<meta name="description" content="Explore your log data with VictoriaLogs UI"/>
|
||||
<!--
|
||||
manifest.json provides metadata used when your web app is installed on a
|
||||
user's mobile device or desktop. See https://developers.google.com/web/fundamentals/web-app-manifest/
|
||||
-->
|
||||
<link rel="manifest" href="./manifest.json"/>
|
||||
<!--
|
||||
Notice the use of in the tags above.
|
||||
It will be replaced with the URL of the `public` folder during the build.
|
||||
Only files inside the `public` folder can be referenced from the HTML.
|
||||
|
||||
Unlike "/favicon.ico" or "favicon.ico", "/favicon.ico" will
|
||||
work correctly both with client-side routing and a non-root public URL.
|
||||
Learn how to configure a non-root public URL by running `npm run build`.
|
||||
-->
|
||||
<title>UI for VictoriaLogs</title>
|
||||
|
||||
<meta name="twitter:card" content="summary">
|
||||
<meta name="twitter:title" content="UI for VictoriaLogs">
|
||||
<meta name="twitter:site" content="@https://victoriametrics.com/products/victorialogs/">
|
||||
<meta name="twitter:description" content="Explore your log data with VictoriaLogs UI">
|
||||
<meta name="twitter:image" content="./preview.jpg">
|
||||
|
||||
<meta property="og:type" content="website">
|
||||
<meta property="og:title" content="UI for VictoriaLogs">
|
||||
<meta property="og:url" content="https://victoriametrics.com/products/victorialogs/">
|
||||
<meta property="og:description" content="Explore your log data with VictoriaLogs UI">
|
||||
<script type="module" crossorigin src="./assets/index-BaRvaPfA.js"></script>
|
||||
<link rel="modulepreload" crossorigin href="./assets/vendor-D8IJGiEn.js">
|
||||
<link rel="stylesheet" crossorigin href="./assets/vendor-D1GxaB_c.css">
|
||||
<link rel="stylesheet" crossorigin href="./assets/index-C85_NB5q.css">
|
||||
</head>
|
||||
<body>
|
||||
<noscript>You need to enable JavaScript to run this app.</noscript>
|
||||
<div id="root"></div>
|
||||
<!--
|
||||
This HTML file is a template.
|
||||
If you open it directly in the browser, you will see an empty page.
|
||||
|
||||
You can add webfonts, meta tags, or analytics to this file.
|
||||
The build step will place the bundled scripts into the <body> tag.
|
||||
|
||||
To begin the development, run `npm start` or `yarn start`.
|
||||
To create a production bundle, use `npm run build` or `yarn build`.
|
||||
-->
|
||||
</body>
|
||||
</html>
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.svg"/><link rel="apple-touch-icon" href="./favicon.svg"/><link rel="mask-icon" href="./favicon.svg" color="#000000"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=5"/><meta name="theme-color" content="#000000"/><meta name="description" content="Explore your log data with VictoriaLogs UI"/><link rel="manifest" href="./manifest.json"/><title>UI for VictoriaLogs</title><meta name="twitter:card" content="summary"><meta name="twitter:title" content="UI for VictoriaLogs"><meta name="twitter:site" content="@https://victoriametrics.com/products/victorialogs/"><meta name="twitter:description" content="Explore your log data with VictoriaLogs UI"><meta name="twitter:image" content="./preview.jpg"><meta property="og:type" content="website"><meta property="og:title" content="UI for VictoriaLogs"><meta property="og:url" content="https://victoriametrics.com/products/victorialogs/"><meta property="og:description" content="Explore your log data with VictoriaLogs UI"><script defer="defer" src="./static/js/main.8ad2bc1f.js"></script><link href="./static/css/main.fa83344e.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
||||
1
app/vlselect/vmui/static/css/main.fa83344e.css
Normal file
1
app/vlselect/vmui/static/css/main.fa83344e.css
Normal file
File diff suppressed because one or more lines are too long
1
app/vlselect/vmui/static/js/685.f772060c.chunk.js
Normal file
1
app/vlselect/vmui/static/js/685.f772060c.chunk.js
Normal file
File diff suppressed because one or more lines are too long
2
app/vlselect/vmui/static/js/main.8ad2bc1f.js
Normal file
2
app/vlselect/vmui/static/js/main.8ad2bc1f.js
Normal file
File diff suppressed because one or more lines are too long
38
app/vlselect/vmui/static/js/main.8ad2bc1f.js.LICENSE.txt
Normal file
38
app/vlselect/vmui/static/js/main.8ad2bc1f.js.LICENSE.txt
Normal file
@@ -0,0 +1,38 @@
|
||||
/*!
|
||||
Copyright (c) 2018 Jed Watson.
|
||||
Licensed under the MIT License (MIT), see
|
||||
http://jedwatson.github.io/classnames
|
||||
*/
|
||||
|
||||
/**
|
||||
* @remix-run/router v1.19.2
|
||||
*
|
||||
* Copyright (c) Remix Software Inc.
|
||||
*
|
||||
* This source code is licensed under the MIT license found in the
|
||||
* LICENSE.md file in the root directory of this source tree.
|
||||
*
|
||||
* @license MIT
|
||||
*/
|
||||
|
||||
/**
|
||||
* React Router DOM v6.26.2
|
||||
*
|
||||
* Copyright (c) Remix Software Inc.
|
||||
*
|
||||
* This source code is licensed under the MIT license found in the
|
||||
* LICENSE.md file in the root directory of this source tree.
|
||||
*
|
||||
* @license MIT
|
||||
*/
|
||||
|
||||
/**
|
||||
* React Router v6.26.2
|
||||
*
|
||||
* Copyright (c) Remix Software Inc.
|
||||
*
|
||||
* This source code is licensed under the MIT license found in the
|
||||
* LICENSE.md file in the root directory of this source tree.
|
||||
*
|
||||
* @license MIT
|
||||
*/
|
||||
2352
app/vlselect/vmui/static/media/MetricsQL.a00044c91d9781cf8557.md
Normal file
2352
app/vlselect/vmui/static/media/MetricsQL.a00044c91d9781cf8557.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -10,14 +10,11 @@ import (
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage/netinsert"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage/netselect"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -41,59 +38,15 @@ var (
|
||||
minFreeDiskSpaceBytes = flagutil.NewBytes("storage.minFreeDiskSpaceBytes", 10e6, "The minimum free disk space at -storageDataPath after which "+
|
||||
"the storage stops accepting new data")
|
||||
|
||||
forceMergeAuthKey = flagutil.NewPassword("forceMergeAuthKey", "authKey, which must be passed in query string to /internal/force_merge . It overrides -httpAuth.* . "+
|
||||
"See https://docs.victoriametrics.com/victorialogs/#forced-merge")
|
||||
forceFlushAuthKey = flagutil.NewPassword("forceFlushAuthKey", "authKey, which must be passed in query string to /internal/force_flush . It overrides -httpAuth.* . "+
|
||||
"See https://docs.victoriametrics.com/victorialogs/#forced-flush")
|
||||
|
||||
storageNodeAddrs = flagutil.NewArrayString("storageNode", "Comma-separated list of TCP addresses for storage nodes to route the ingested logs to and to send select queries to. "+
|
||||
"If the list is empty, then the ingested logs are stored and queried locally from -storageDataPath")
|
||||
insertConcurrency = flag.Int("insert.concurrency", 2, "The average number of concurrent data ingestion requests, which can be sent to every -storageNode")
|
||||
insertDisableCompression = flag.Bool("insert.disableCompression", false, "Whether to disable compression when sending the ingested data to -storageNode nodes. "+
|
||||
"Disabled compression reduces CPU usage at the cost of higher network usage")
|
||||
selectDisableCompression = flag.Bool("select.disableCompression", false, "Whether to disable compression for select query responses received from -storageNode nodes. "+
|
||||
"Disabled compression reduces CPU usage at the cost of higher network usage")
|
||||
|
||||
storageNodeUsername = flagutil.NewArrayString("storageNode.username", "Optional basic auth username to use for the corresponding -storageNode")
|
||||
storageNodePassword = flagutil.NewArrayString("storageNode.password", "Optional basic auth password to use for the corresponding -storageNode")
|
||||
storageNodePasswordFile = flagutil.NewArrayString("storageNode.passwordFile", "Optional path to basic auth password to use for the corresponding -storageNode. "+
|
||||
"The file is re-read every second")
|
||||
storageNodeBearerToken = flagutil.NewArrayString("storageNode.bearerToken", "Optional bearer auth token to use for the corresponding -storageNode")
|
||||
storageNodeBearerTokenFile = flagutil.NewArrayString("storageNode.bearerTokenFile", "Optional path to bearer token file to use for the corresponding -storageNode. "+
|
||||
"The token is re-read from the file every second")
|
||||
|
||||
storageNodeTLS = flagutil.NewArrayBool("storageNode.tls", "Whether to use TLS (HTTPS) protocol for communicating with the corresponding -storageNode. "+
|
||||
"By default communication is performed via HTTP")
|
||||
storageNodeTLSCAFile = flagutil.NewArrayString("storageNode.tlsCAFile", "Optional path to TLS CA file to use for verifying connections to the corresponding -storageNode. "+
|
||||
"By default, system CA is used")
|
||||
storageNodeTLSCertFile = flagutil.NewArrayString("storageNode.tlsCertFile", "Optional path to client-side TLS certificate file to use when connecting "+
|
||||
"to the corresponding -storageNode")
|
||||
storageNodeTLSKeyFile = flagutil.NewArrayString("storageNode.tlsKeyFile", "Optional path to client-side TLS certificate key to use when connecting to the corresponding -storageNode")
|
||||
storageNodeTLSServerName = flagutil.NewArrayString("storageNode.tlsServerName", "Optional TLS server name to use for connections to the corresponding -storageNode. "+
|
||||
"By default, the server name from -storageNode is used")
|
||||
storageNodeTLSInsecureSkipVerify = flagutil.NewArrayBool("storageNode.tlsInsecureSkipVerify", "Whether to skip tls verification when connecting to the corresponding -storageNode")
|
||||
forceMergeAuthKey = flagutil.NewPassword("forceMergeAuthKey", "authKey, which must be passed in query string to /internal/force_merge pages. It overrides -httpAuth.*")
|
||||
)
|
||||
|
||||
var localStorage *logstorage.Storage
|
||||
var localStorageMetrics *metrics.Set
|
||||
|
||||
var netstorageInsert *netinsert.Storage
|
||||
var netstorageSelect *netselect.Storage
|
||||
|
||||
// Init initializes vlstorage.
|
||||
//
|
||||
// Stop must be called when vlstorage is no longer needed
|
||||
func Init() {
|
||||
if len(*storageNodeAddrs) == 0 {
|
||||
initLocalStorage()
|
||||
} else {
|
||||
initNetworkStorage()
|
||||
}
|
||||
}
|
||||
|
||||
func initLocalStorage() {
|
||||
if localStorage != nil {
|
||||
logger.Panicf("BUG: initLocalStorage() has been already called")
|
||||
if strg != nil {
|
||||
logger.Panicf("BUG: Init() has been already called")
|
||||
}
|
||||
|
||||
if retentionPeriod.Duration() < 24*time.Hour {
|
||||
@@ -110,157 +63,60 @@ func initLocalStorage() {
|
||||
}
|
||||
logger.Infof("opening storage at -storageDataPath=%s", *storageDataPath)
|
||||
startTime := time.Now()
|
||||
localStorage = logstorage.MustOpenStorage(*storageDataPath, cfg)
|
||||
strg = logstorage.MustOpenStorage(*storageDataPath, cfg)
|
||||
|
||||
var ss logstorage.StorageStats
|
||||
localStorage.UpdateStats(&ss)
|
||||
strg.UpdateStats(&ss)
|
||||
logger.Infof("successfully opened storage in %.3f seconds; smallParts: %d; bigParts: %d; smallPartBlocks: %d; bigPartBlocks: %d; smallPartRows: %d; bigPartRows: %d; "+
|
||||
"smallPartSize: %d bytes; bigPartSize: %d bytes",
|
||||
time.Since(startTime).Seconds(), ss.SmallParts, ss.BigParts, ss.SmallPartBlocks, ss.BigPartBlocks, ss.SmallPartRowsCount, ss.BigPartRowsCount,
|
||||
ss.CompressedSmallPartSize, ss.CompressedBigPartSize)
|
||||
|
||||
// register local storage metrics
|
||||
localStorageMetrics = metrics.NewSet()
|
||||
localStorageMetrics.RegisterMetricsWriter(func(w io.Writer) {
|
||||
writeStorageMetrics(w, localStorage)
|
||||
// register storage metrics
|
||||
storageMetrics = metrics.NewSet()
|
||||
storageMetrics.RegisterMetricsWriter(func(w io.Writer) {
|
||||
writeStorageMetrics(w, strg)
|
||||
})
|
||||
metrics.RegisterSet(localStorageMetrics)
|
||||
}
|
||||
|
||||
func initNetworkStorage() {
|
||||
if netstorageInsert != nil || netstorageSelect != nil {
|
||||
logger.Panicf("BUG: initNetworkStorage() has been already called")
|
||||
}
|
||||
|
||||
authCfgs := make([]*promauth.Config, len(*storageNodeAddrs))
|
||||
isTLSs := make([]bool, len(*storageNodeAddrs))
|
||||
for i := range authCfgs {
|
||||
authCfgs[i] = newAuthConfigForStorageNode(i)
|
||||
isTLSs[i] = storageNodeTLS.GetOptionalArg(i)
|
||||
}
|
||||
|
||||
logger.Infof("starting insert service for nodes %s", *storageNodeAddrs)
|
||||
netstorageInsert = netinsert.NewStorage(*storageNodeAddrs, authCfgs, isTLSs, *insertConcurrency, *insertDisableCompression)
|
||||
|
||||
logger.Infof("initializing select service for nodes %s", *storageNodeAddrs)
|
||||
netstorageSelect = netselect.NewStorage(*storageNodeAddrs, authCfgs, isTLSs, *selectDisableCompression)
|
||||
|
||||
logger.Infof("initialized all the network services")
|
||||
}
|
||||
|
||||
func newAuthConfigForStorageNode(argIdx int) *promauth.Config {
|
||||
username := storageNodeUsername.GetOptionalArg(argIdx)
|
||||
password := storageNodePassword.GetOptionalArg(argIdx)
|
||||
passwordFile := storageNodePasswordFile.GetOptionalArg(argIdx)
|
||||
var basicAuthCfg *promauth.BasicAuthConfig
|
||||
if username != "" || password != "" || passwordFile != "" {
|
||||
basicAuthCfg = &promauth.BasicAuthConfig{
|
||||
Username: username,
|
||||
Password: promauth.NewSecret(password),
|
||||
PasswordFile: passwordFile,
|
||||
}
|
||||
}
|
||||
|
||||
token := storageNodeBearerToken.GetOptionalArg(argIdx)
|
||||
tokenFile := storageNodeBearerTokenFile.GetOptionalArg(argIdx)
|
||||
|
||||
tlsCfg := &promauth.TLSConfig{
|
||||
CAFile: storageNodeTLSCAFile.GetOptionalArg(argIdx),
|
||||
CertFile: storageNodeTLSCertFile.GetOptionalArg(argIdx),
|
||||
KeyFile: storageNodeTLSKeyFile.GetOptionalArg(argIdx),
|
||||
ServerName: storageNodeTLSServerName.GetOptionalArg(argIdx),
|
||||
InsecureSkipVerify: storageNodeTLSInsecureSkipVerify.GetOptionalArg(argIdx),
|
||||
}
|
||||
|
||||
opts := &promauth.Options{
|
||||
BasicAuth: basicAuthCfg,
|
||||
BearerToken: token,
|
||||
BearerTokenFile: tokenFile,
|
||||
TLSConfig: tlsCfg,
|
||||
}
|
||||
ac, err := opts.NewConfig()
|
||||
if err != nil {
|
||||
logger.Panicf("FATAL: cannot populate auth config for storage node #%d: %s", argIdx, err)
|
||||
}
|
||||
|
||||
return ac
|
||||
metrics.RegisterSet(storageMetrics)
|
||||
}
|
||||
|
||||
// Stop stops vlstorage.
|
||||
func Stop() {
|
||||
if localStorage != nil {
|
||||
metrics.UnregisterSet(localStorageMetrics, true)
|
||||
localStorageMetrics = nil
|
||||
metrics.UnregisterSet(storageMetrics, true)
|
||||
storageMetrics = nil
|
||||
|
||||
localStorage.MustClose()
|
||||
localStorage = nil
|
||||
} else {
|
||||
netstorageInsert.MustStop()
|
||||
netstorageInsert = nil
|
||||
|
||||
netstorageSelect.MustStop()
|
||||
netstorageSelect = nil
|
||||
}
|
||||
strg.MustClose()
|
||||
strg = nil
|
||||
}
|
||||
|
||||
// RequestHandler is a storage request handler.
|
||||
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
path := r.URL.Path
|
||||
switch path {
|
||||
case "/internal/force_merge":
|
||||
return processForceMerge(w, r)
|
||||
case "/internal/force_flush":
|
||||
return processForceFlush(w, r)
|
||||
if path == "/internal/force_merge" {
|
||||
if !httpserver.CheckAuthFlag(w, r, forceMergeAuthKey) {
|
||||
return true
|
||||
}
|
||||
// Run force merge in background
|
||||
partitionNamePrefix := r.FormValue("partition_prefix")
|
||||
go func() {
|
||||
activeForceMerges.Inc()
|
||||
defer activeForceMerges.Dec()
|
||||
logger.Infof("forced merge for partition_prefix=%q has been started", partitionNamePrefix)
|
||||
startTime := time.Now()
|
||||
strg.MustForceMerge(partitionNamePrefix)
|
||||
logger.Infof("forced merge for partition_prefix=%q has been successfully finished in %.3f seconds", partitionNamePrefix, time.Since(startTime).Seconds())
|
||||
}()
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func processForceMerge(w http.ResponseWriter, r *http.Request) bool {
|
||||
if localStorage == nil {
|
||||
// Force merge isn't supported by non-local storage
|
||||
return false
|
||||
}
|
||||
var strg *logstorage.Storage
|
||||
var storageMetrics *metrics.Set
|
||||
|
||||
if !httpserver.CheckAuthFlag(w, r, forceMergeAuthKey) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Run force merge in background
|
||||
partitionNamePrefix := r.FormValue("partition_prefix")
|
||||
go func() {
|
||||
activeForceMerges.Inc()
|
||||
defer activeForceMerges.Dec()
|
||||
logger.Infof("forced merge for partition_prefix=%q has been started", partitionNamePrefix)
|
||||
startTime := time.Now()
|
||||
localStorage.MustForceMerge(partitionNamePrefix)
|
||||
logger.Infof("forced merge for partition_prefix=%q has been successfully finished in %.3f seconds", partitionNamePrefix, time.Since(startTime).Seconds())
|
||||
}()
|
||||
return true
|
||||
}
|
||||
|
||||
func processForceFlush(w http.ResponseWriter, r *http.Request) bool {
|
||||
if localStorage == nil {
|
||||
// Force merge isn't supported by non-local storage
|
||||
return false
|
||||
}
|
||||
|
||||
if !httpserver.CheckAuthFlag(w, r, forceFlushAuthKey) {
|
||||
return true
|
||||
}
|
||||
|
||||
logger.Infof("flushing storage to make pending data available for reading")
|
||||
localStorage.DebugFlush()
|
||||
return true
|
||||
}
|
||||
|
||||
// CanWriteData returns non-nil error if it cannot write data to vlstorage
|
||||
// CanWriteData returns non-nil error if it cannot write data to vlstorage.
|
||||
func CanWriteData() error {
|
||||
if localStorage == nil {
|
||||
// The data can be always written in non-local mode.
|
||||
return nil
|
||||
}
|
||||
|
||||
if localStorage.IsReadOnly() {
|
||||
if strg.IsReadOnly() {
|
||||
return &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("cannot add rows into storage in read-only mode; the storage can be in read-only mode "+
|
||||
"because of lack of free disk space at -storageDataPath=%s", *storageDataPath),
|
||||
@@ -274,77 +130,50 @@ func CanWriteData() error {
|
||||
//
|
||||
// It is advised to call CanWriteData() before calling MustAddRows()
|
||||
func MustAddRows(lr *logstorage.LogRows) {
|
||||
if localStorage != nil {
|
||||
// Store lr in the local storage.
|
||||
localStorage.MustAddRows(lr)
|
||||
} else {
|
||||
// Store lr across the remote storage nodes.
|
||||
lr.ForEachRow(netstorageInsert.AddRow)
|
||||
}
|
||||
strg.MustAddRows(lr)
|
||||
}
|
||||
|
||||
// RunQuery runs the given q and calls writeBlock for the returned data blocks
|
||||
func RunQuery(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, writeBlock logstorage.WriteDataBlockFunc) error {
|
||||
if localStorage != nil {
|
||||
return localStorage.RunQuery(ctx, tenantIDs, q, writeBlock)
|
||||
}
|
||||
return netstorageSelect.RunQuery(ctx, tenantIDs, q, writeBlock)
|
||||
func RunQuery(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, writeBlock logstorage.WriteBlockFunc) error {
|
||||
return strg.RunQuery(ctx, tenantIDs, q, writeBlock)
|
||||
}
|
||||
|
||||
// GetFieldNames executes q and returns field names seen in results.
|
||||
func GetFieldNames(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query) ([]logstorage.ValueWithHits, error) {
|
||||
if localStorage != nil {
|
||||
return localStorage.GetFieldNames(ctx, tenantIDs, q)
|
||||
}
|
||||
return netstorageSelect.GetFieldNames(ctx, tenantIDs, q)
|
||||
return strg.GetFieldNames(ctx, tenantIDs, q)
|
||||
}
|
||||
|
||||
// GetFieldValues executes q and returns unique values for the fieldName seen in results.
|
||||
//
|
||||
// If limit > 0, then up to limit unique values are returned.
|
||||
func GetFieldValues(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, fieldName string, limit uint64) ([]logstorage.ValueWithHits, error) {
|
||||
if localStorage != nil {
|
||||
return localStorage.GetFieldValues(ctx, tenantIDs, q, fieldName, limit)
|
||||
}
|
||||
return netstorageSelect.GetFieldValues(ctx, tenantIDs, q, fieldName, limit)
|
||||
return strg.GetFieldValues(ctx, tenantIDs, q, fieldName, limit)
|
||||
}
|
||||
|
||||
// GetStreamFieldNames executes q and returns stream field names seen in results.
|
||||
func GetStreamFieldNames(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query) ([]logstorage.ValueWithHits, error) {
|
||||
if localStorage != nil {
|
||||
return localStorage.GetStreamFieldNames(ctx, tenantIDs, q)
|
||||
}
|
||||
return netstorageSelect.GetStreamFieldNames(ctx, tenantIDs, q)
|
||||
return strg.GetStreamFieldNames(ctx, tenantIDs, q)
|
||||
}
|
||||
|
||||
// GetStreamFieldValues executes q and returns stream field values for the given fieldName seen in results.
|
||||
//
|
||||
// If limit > 0, then up to limit unique stream field values are returned.
|
||||
func GetStreamFieldValues(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, fieldName string, limit uint64) ([]logstorage.ValueWithHits, error) {
|
||||
if localStorage != nil {
|
||||
return localStorage.GetStreamFieldValues(ctx, tenantIDs, q, fieldName, limit)
|
||||
}
|
||||
return netstorageSelect.GetStreamFieldValues(ctx, tenantIDs, q, fieldName, limit)
|
||||
return strg.GetStreamFieldValues(ctx, tenantIDs, q, fieldName, limit)
|
||||
}
|
||||
|
||||
// GetStreams executes q and returns streams seen in query results.
|
||||
//
|
||||
// If limit > 0, then up to limit unique streams are returned.
|
||||
func GetStreams(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit uint64) ([]logstorage.ValueWithHits, error) {
|
||||
if localStorage != nil {
|
||||
return localStorage.GetStreams(ctx, tenantIDs, q, limit)
|
||||
}
|
||||
return netstorageSelect.GetStreams(ctx, tenantIDs, q, limit)
|
||||
return strg.GetStreams(ctx, tenantIDs, q, limit)
|
||||
}
|
||||
|
||||
// GetStreamIDs executes q and returns streamIDs seen in query results.
|
||||
//
|
||||
// If limit > 0, then up to limit unique streamIDs are returned.
|
||||
func GetStreamIDs(ctx context.Context, tenantIDs []logstorage.TenantID, q *logstorage.Query, limit uint64) ([]logstorage.ValueWithHits, error) {
|
||||
if localStorage != nil {
|
||||
return localStorage.GetStreamIDs(ctx, tenantIDs, q, limit)
|
||||
}
|
||||
return netstorageSelect.GetStreamIDs(ctx, tenantIDs, q, limit)
|
||||
return strg.GetStreamIDs(ctx, tenantIDs, q, limit)
|
||||
}
|
||||
|
||||
func writeStorageMetrics(w io.Writer, strg *logstorage.Storage) {
|
||||
|
||||
@@ -1,369 +0,0 @@
|
||||
package netinsert
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/valyala/fastrand"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/contextutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding/zstd"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
|
||||
)
|
||||
|
||||
// the maximum size of a single data block sent to storage node.
|
||||
const maxInsertBlockSize = 2 * 1024 * 1024
|
||||
|
||||
// ProtocolVersion is the version of the data ingestion protocol.
|
||||
//
|
||||
// It must be changed every time the data encoding at /internal/insert HTTP endpoint is changed.
|
||||
const ProtocolVersion = "v1"
|
||||
|
||||
// Storage is a network storage for sending data to remote storage nodes in the cluster.
|
||||
type Storage struct {
|
||||
sns []*storageNode
|
||||
|
||||
disableCompression bool
|
||||
|
||||
srt *streamRowsTracker
|
||||
|
||||
pendingDataBuffers chan *bytesutil.ByteBuffer
|
||||
|
||||
stopCh chan struct{}
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
type storageNode struct {
|
||||
// scheme is http or https scheme to communicate with addr
|
||||
scheme string
|
||||
|
||||
// addr is TCP address of storage node to send the ingested data to
|
||||
addr string
|
||||
|
||||
// s is a storage, which holds the given storageNode
|
||||
s *Storage
|
||||
|
||||
// c is an http client used for sending data blocks to addr.
|
||||
c *http.Client
|
||||
|
||||
// ac is auth config used for setting request headers such as Authorization and Host.
|
||||
ac *promauth.Config
|
||||
|
||||
// pendingData contains pending data, which must be sent to the storage node at the addr.
|
||||
pendingDataMu sync.Mutex
|
||||
pendingData *bytesutil.ByteBuffer
|
||||
pendingDataLastFlush time.Time
|
||||
|
||||
// the unix timestamp until the storageNode is disabled for data writing.
|
||||
disabledUntil atomic.Uint64
|
||||
}
|
||||
|
||||
func newStorageNode(s *Storage, addr string, ac *promauth.Config, isTLS bool) *storageNode {
|
||||
tr := httputil.NewTransport(false, "vlinsert_backend")
|
||||
tr.TLSHandshakeTimeout = 20 * time.Second
|
||||
tr.DisableCompression = true
|
||||
|
||||
scheme := "http"
|
||||
if isTLS {
|
||||
scheme = "https"
|
||||
}
|
||||
|
||||
sn := &storageNode{
|
||||
scheme: scheme,
|
||||
addr: addr,
|
||||
s: s,
|
||||
c: &http.Client{
|
||||
Transport: ac.NewRoundTripper(tr),
|
||||
},
|
||||
ac: ac,
|
||||
|
||||
pendingData: &bytesutil.ByteBuffer{},
|
||||
}
|
||||
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
defer s.wg.Done()
|
||||
sn.backgroundFlusher()
|
||||
}()
|
||||
|
||||
return sn
|
||||
}
|
||||
|
||||
func (sn *storageNode) backgroundFlusher() {
|
||||
t := time.NewTicker(time.Second)
|
||||
defer t.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-sn.s.stopCh:
|
||||
return
|
||||
case <-t.C:
|
||||
sn.flushPendingData()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sn *storageNode) flushPendingData() {
|
||||
sn.pendingDataMu.Lock()
|
||||
if time.Since(sn.pendingDataLastFlush) < time.Second {
|
||||
// nothing to flush
|
||||
sn.pendingDataMu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
pendingData := sn.grabPendingDataForFlushLocked()
|
||||
sn.pendingDataMu.Unlock()
|
||||
|
||||
sn.mustSendInsertRequest(pendingData)
|
||||
}
|
||||
|
||||
func (sn *storageNode) addRow(r *logstorage.InsertRow) {
|
||||
bb := bbPool.Get()
|
||||
b := bb.B
|
||||
|
||||
b = r.Marshal(b)
|
||||
|
||||
if len(b) > maxInsertBlockSize {
|
||||
logger.Warnf("skipping too long log entry, since its length exceeds %d bytes; the actual log entry length is %d bytes; log entry contents: %s", maxInsertBlockSize, len(b), b)
|
||||
return
|
||||
}
|
||||
|
||||
var pendingData *bytesutil.ByteBuffer
|
||||
sn.pendingDataMu.Lock()
|
||||
if sn.pendingData.Len()+len(b) > maxInsertBlockSize {
|
||||
pendingData = sn.grabPendingDataForFlushLocked()
|
||||
}
|
||||
sn.pendingData.MustWrite(b)
|
||||
sn.pendingDataMu.Unlock()
|
||||
|
||||
bb.B = b
|
||||
bbPool.Put(bb)
|
||||
|
||||
if pendingData != nil {
|
||||
sn.mustSendInsertRequest(pendingData)
|
||||
}
|
||||
}
|
||||
|
||||
var bbPool bytesutil.ByteBufferPool
|
||||
|
||||
func (sn *storageNode) grabPendingDataForFlushLocked() *bytesutil.ByteBuffer {
|
||||
sn.pendingDataLastFlush = time.Now()
|
||||
pendingData := sn.pendingData
|
||||
sn.pendingData = <-sn.s.pendingDataBuffers
|
||||
|
||||
return pendingData
|
||||
}
|
||||
|
||||
func (sn *storageNode) mustSendInsertRequest(pendingData *bytesutil.ByteBuffer) {
|
||||
defer func() {
|
||||
pendingData.Reset()
|
||||
sn.s.pendingDataBuffers <- pendingData
|
||||
}()
|
||||
|
||||
err := sn.sendInsertRequest(pendingData)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !errors.Is(err, errTemporarilyDisabled) {
|
||||
logger.Warnf("%s; re-routing the data block to the remaining nodes", err)
|
||||
}
|
||||
for !sn.s.sendInsertRequestToAnyNode(pendingData) {
|
||||
logger.Errorf("cannot send pending data to all storage nodes, since all of them are unavailable; re-trying to send the data in a second")
|
||||
|
||||
t := timerpool.Get(time.Second)
|
||||
select {
|
||||
case <-sn.s.stopCh:
|
||||
timerpool.Put(t)
|
||||
logger.Errorf("dropping %d bytes of data, since there are no available storage nodes", pendingData.Len())
|
||||
return
|
||||
case <-t.C:
|
||||
timerpool.Put(t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sn *storageNode) sendInsertRequest(pendingData *bytesutil.ByteBuffer) error {
|
||||
dataLen := pendingData.Len()
|
||||
if dataLen == 0 {
|
||||
// Nothing to send.
|
||||
return nil
|
||||
}
|
||||
|
||||
if sn.disabledUntil.Load() > fasttime.UnixTimestamp() {
|
||||
return errTemporarilyDisabled
|
||||
}
|
||||
|
||||
ctx, cancel := contextutil.NewStopChanContext(sn.s.stopCh)
|
||||
defer cancel()
|
||||
|
||||
var body io.Reader
|
||||
if !sn.s.disableCompression {
|
||||
bb := zstdBufPool.Get()
|
||||
defer zstdBufPool.Put(bb)
|
||||
|
||||
bb.B = zstd.CompressLevel(bb.B[:0], pendingData.B, 1)
|
||||
body = bb.NewReader()
|
||||
} else {
|
||||
body = pendingData.NewReader()
|
||||
}
|
||||
|
||||
reqURL := sn.getRequestURL("/internal/insert")
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", reqURL, body)
|
||||
if err != nil {
|
||||
logger.Panicf("BUG: unexpected error when creating an http request: %s", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
if !sn.s.disableCompression {
|
||||
req.Header.Set("Content-Encoding", "zstd")
|
||||
}
|
||||
if err := sn.ac.SetHeaders(req, true); err != nil {
|
||||
return fmt.Errorf("cannot set auth headers for %q: %w", reqURL, err)
|
||||
}
|
||||
|
||||
resp, err := sn.c.Do(req)
|
||||
if err != nil {
|
||||
// Disable sn for data writing for 10 seconds.
|
||||
sn.disabledUntil.Store(fasttime.UnixTimestamp() + 10)
|
||||
|
||||
return fmt.Errorf("cannot send data block with the length %d to %q: %s", pendingData.Len(), reqURL, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode/100 == 2 {
|
||||
return nil
|
||||
}
|
||||
|
||||
respBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
respBody = []byte(fmt.Sprintf("%s", err))
|
||||
}
|
||||
|
||||
// Disable sn for data writing for 10 seconds.
|
||||
sn.disabledUntil.Store(fasttime.UnixTimestamp() + 10)
|
||||
|
||||
return fmt.Errorf("unexpected status code returned when sending data block to %q: %d; want 2xx; response body: %q", reqURL, resp.StatusCode, respBody)
|
||||
}
|
||||
|
||||
func (sn *storageNode) getRequestURL(path string) string {
|
||||
return fmt.Sprintf("%s://%s%s?version=%s", sn.scheme, sn.addr, path, url.QueryEscape(ProtocolVersion))
|
||||
}
|
||||
|
||||
var zstdBufPool bytesutil.ByteBufferPool
|
||||
|
||||
// NewStorage returns new Storage for the given addrs with the given authCfgs.
|
||||
//
|
||||
// The concurrency is the average number of concurrent connections per every addr.
|
||||
//
|
||||
// If disableCompression is set, then the data is sent uncompressed to the remote storage.
|
||||
//
|
||||
// Call MustStop on the returned storage when it is no longer needed.
|
||||
func NewStorage(addrs []string, authCfgs []*promauth.Config, isTLSs []bool, concurrency int, disableCompression bool) *Storage {
|
||||
pendingDataBuffers := make(chan *bytesutil.ByteBuffer, concurrency*len(addrs))
|
||||
for i := 0; i < cap(pendingDataBuffers); i++ {
|
||||
pendingDataBuffers <- &bytesutil.ByteBuffer{}
|
||||
}
|
||||
|
||||
s := &Storage{
|
||||
disableCompression: disableCompression,
|
||||
pendingDataBuffers: pendingDataBuffers,
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
sns := make([]*storageNode, len(addrs))
|
||||
for i, addr := range addrs {
|
||||
sns[i] = newStorageNode(s, addr, authCfgs[i], isTLSs[i])
|
||||
}
|
||||
s.sns = sns
|
||||
|
||||
s.srt = newStreamRowsTracker(len(sns))
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// MustStop stops the s.
|
||||
func (s *Storage) MustStop() {
|
||||
close(s.stopCh)
|
||||
s.wg.Wait()
|
||||
s.sns = nil
|
||||
}
|
||||
|
||||
// AddRow adds the given log row into s.
|
||||
func (s *Storage) AddRow(streamHash uint64, r *logstorage.InsertRow) {
|
||||
idx := s.srt.getNodeIdx(streamHash)
|
||||
sn := s.sns[idx]
|
||||
sn.addRow(r)
|
||||
}
|
||||
|
||||
func (s *Storage) sendInsertRequestToAnyNode(pendingData *bytesutil.ByteBuffer) bool {
|
||||
startIdx := int(fastrand.Uint32n(uint32(len(s.sns))))
|
||||
for i := range s.sns {
|
||||
idx := (startIdx + i) % len(s.sns)
|
||||
sn := s.sns[idx]
|
||||
err := sn.sendInsertRequest(pendingData)
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
if !errors.Is(err, errTemporarilyDisabled) {
|
||||
logger.Warnf("cannot send pending data to the storage node %q: %s; trying to send it to another storage node", sn.addr, err)
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var errTemporarilyDisabled = fmt.Errorf("writing to the node is temporarily disabled")
|
||||
|
||||
type streamRowsTracker struct {
|
||||
mu sync.Mutex
|
||||
|
||||
nodesCount int64
|
||||
rowsPerStream map[uint64]uint64
|
||||
}
|
||||
|
||||
func newStreamRowsTracker(nodesCount int) *streamRowsTracker {
|
||||
return &streamRowsTracker{
|
||||
nodesCount: int64(nodesCount),
|
||||
rowsPerStream: make(map[uint64]uint64),
|
||||
}
|
||||
}
|
||||
|
||||
func (srt *streamRowsTracker) getNodeIdx(streamHash uint64) uint64 {
|
||||
if srt.nodesCount == 1 {
|
||||
// Fast path for a single node.
|
||||
return 0
|
||||
}
|
||||
|
||||
srt.mu.Lock()
|
||||
defer srt.mu.Unlock()
|
||||
|
||||
streamRows := srt.rowsPerStream[streamHash] + 1
|
||||
srt.rowsPerStream[streamHash] = streamRows
|
||||
|
||||
if streamRows <= 1000 {
|
||||
// Write the initial rows for the stream to a single storage node for better locality.
|
||||
// This should work great for log streams containing small number of logs, since will be distributed
|
||||
// evenly among available storage nodes because they have different streamHash.
|
||||
return streamHash % uint64(srt.nodesCount)
|
||||
}
|
||||
|
||||
// The log stream contains more than 1000 rows. Distribute them among storage nodes at random
|
||||
// in order to improve query performance over this stream (the data for the log stream
|
||||
// can be processed in parallel on all the storage nodes).
|
||||
//
|
||||
// The random distribution is preferred over round-robin distribution in order to avoid possible
|
||||
// dependency between the order of the ingested logs and the number of storage nodes,
|
||||
// which may lead to non-uniform distribution of logs among storage nodes.
|
||||
return uint64(fastrand.Uint32n(uint32(srt.nodesCount)))
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user