24
vendor/github.com/prometheus/prometheus/pkg/exemplar/exemplar.go
generated
vendored
Normal file
24
vendor/github.com/prometheus/prometheus/pkg/exemplar/exemplar.go
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package exemplar
|
||||
|
||||
import "github.com/prometheus/prometheus/pkg/labels"
|
||||
|
||||
// Exemplar is additional information associated with a time series.
|
||||
type Exemplar struct {
|
||||
Labels labels.Labels
|
||||
Value float64
|
||||
HasTs bool
|
||||
Ts int64
|
||||
}
|
||||
297
vendor/github.com/prometheus/prometheus/pkg/rulefmt/rulefmt.go
generated
vendored
Normal file
297
vendor/github.com/prometheus/prometheus/pkg/rulefmt/rulefmt.go
generated
vendored
Normal file
@@ -0,0 +1,297 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package rulefmt
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/common/model"
|
||||
yaml "gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/timestamp"
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
"github.com/prometheus/prometheus/template"
|
||||
)
|
||||
|
||||
// Error represents semantic errors on parsing rule groups.
|
||||
type Error struct {
|
||||
Group string
|
||||
Rule int
|
||||
RuleName string
|
||||
Err WrappedError
|
||||
}
|
||||
|
||||
// WrappedError wraps error with the yaml node which can be used to represent
|
||||
// the line and column numbers of the error.
|
||||
type WrappedError struct {
|
||||
err error
|
||||
node *yaml.Node
|
||||
nodeAlt *yaml.Node
|
||||
}
|
||||
|
||||
func (err *Error) Error() string {
|
||||
if err.Err.nodeAlt != nil {
|
||||
return errors.Wrapf(err.Err.err, "%d:%d: %d:%d: group %q, rule %d, %q", err.Err.node.Line, err.Err.node.Column, err.Err.nodeAlt.Line, err.Err.nodeAlt.Column, err.Group, err.Rule, err.RuleName).Error()
|
||||
} else if err.Err.node != nil {
|
||||
return errors.Wrapf(err.Err.err, "%d:%d: group %q, rule %d, %q", err.Err.node.Line, err.Err.node.Column, err.Group, err.Rule, err.RuleName).Error()
|
||||
}
|
||||
return errors.Wrapf(err.Err.err, "group %q, rule %d, %q", err.Group, err.Rule, err.RuleName).Error()
|
||||
}
|
||||
|
||||
// RuleGroups is a set of rule groups that are typically exposed in a file.
|
||||
type RuleGroups struct {
|
||||
Groups []RuleGroup `yaml:"groups"`
|
||||
}
|
||||
|
||||
type ruleGroups struct {
|
||||
Groups []yaml.Node `yaml:"groups"`
|
||||
}
|
||||
|
||||
// Validate validates all rules in the rule groups.
|
||||
func (g *RuleGroups) Validate(node ruleGroups) (errs []error) {
|
||||
set := map[string]struct{}{}
|
||||
|
||||
for j, g := range g.Groups {
|
||||
if g.Name == "" {
|
||||
errs = append(errs, errors.Errorf("%d:%d: Groupname should not be empty", node.Groups[j].Line, node.Groups[j].Column))
|
||||
}
|
||||
|
||||
if _, ok := set[g.Name]; ok {
|
||||
errs = append(
|
||||
errs,
|
||||
errors.Errorf("%d:%d: groupname: \"%s\" is repeated in the same file", node.Groups[j].Line, node.Groups[j].Column, g.Name),
|
||||
)
|
||||
}
|
||||
|
||||
set[g.Name] = struct{}{}
|
||||
|
||||
for i, r := range g.Rules {
|
||||
for _, node := range r.Validate() {
|
||||
var ruleName yaml.Node
|
||||
if r.Alert.Value != "" {
|
||||
ruleName = r.Alert
|
||||
} else {
|
||||
ruleName = r.Record
|
||||
}
|
||||
errs = append(errs, &Error{
|
||||
Group: g.Name,
|
||||
Rule: i,
|
||||
RuleName: ruleName.Value,
|
||||
Err: node,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
// RuleGroup is a list of sequentially evaluated recording and alerting rules.
|
||||
type RuleGroup struct {
|
||||
Name string `yaml:"name"`
|
||||
Interval model.Duration `yaml:"interval,omitempty"`
|
||||
Rules []RuleNode `yaml:"rules"`
|
||||
}
|
||||
|
||||
// Rule describes an alerting or recording rule.
|
||||
type Rule struct {
|
||||
Record string `yaml:"record,omitempty"`
|
||||
Alert string `yaml:"alert,omitempty"`
|
||||
Expr string `yaml:"expr"`
|
||||
For model.Duration `yaml:"for,omitempty"`
|
||||
Labels map[string]string `yaml:"labels,omitempty"`
|
||||
Annotations map[string]string `yaml:"annotations,omitempty"`
|
||||
}
|
||||
|
||||
// RuleNode adds yaml.v3 layer to support line and column outputs for invalid rules.
|
||||
type RuleNode struct {
|
||||
Record yaml.Node `yaml:"record,omitempty"`
|
||||
Alert yaml.Node `yaml:"alert,omitempty"`
|
||||
Expr yaml.Node `yaml:"expr"`
|
||||
For model.Duration `yaml:"for,omitempty"`
|
||||
Labels map[string]string `yaml:"labels,omitempty"`
|
||||
Annotations map[string]string `yaml:"annotations,omitempty"`
|
||||
}
|
||||
|
||||
// Validate the rule and return a list of encountered errors.
|
||||
func (r *RuleNode) Validate() (nodes []WrappedError) {
|
||||
if r.Record.Value != "" && r.Alert.Value != "" {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: errors.Errorf("only one of 'record' and 'alert' must be set"),
|
||||
node: &r.Record,
|
||||
nodeAlt: &r.Alert,
|
||||
})
|
||||
}
|
||||
if r.Record.Value == "" && r.Alert.Value == "" {
|
||||
if r.Record.Value == "0" {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: errors.Errorf("one of 'record' or 'alert' must be set"),
|
||||
node: &r.Alert,
|
||||
})
|
||||
} else {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: errors.Errorf("one of 'record' or 'alert' must be set"),
|
||||
node: &r.Record,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if r.Expr.Value == "" {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: errors.Errorf("field 'expr' must be set in rule"),
|
||||
node: &r.Expr,
|
||||
})
|
||||
} else if _, err := parser.ParseExpr(r.Expr.Value); err != nil {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: errors.Wrapf(err, "could not parse expression"),
|
||||
node: &r.Expr,
|
||||
})
|
||||
}
|
||||
if r.Record.Value != "" {
|
||||
if len(r.Annotations) > 0 {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: errors.Errorf("invalid field 'annotations' in recording rule"),
|
||||
node: &r.Record,
|
||||
})
|
||||
}
|
||||
if r.For != 0 {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: errors.Errorf("invalid field 'for' in recording rule"),
|
||||
node: &r.Record,
|
||||
})
|
||||
}
|
||||
if !model.IsValidMetricName(model.LabelValue(r.Record.Value)) {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: errors.Errorf("invalid recording rule name: %s", r.Record.Value),
|
||||
node: &r.Record,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range r.Labels {
|
||||
if !model.LabelName(k).IsValid() {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: errors.Errorf("invalid label name: %s", k),
|
||||
})
|
||||
}
|
||||
|
||||
if !model.LabelValue(v).IsValid() {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: errors.Errorf("invalid label value: %s", v),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for k := range r.Annotations {
|
||||
if !model.LabelName(k).IsValid() {
|
||||
nodes = append(nodes, WrappedError{
|
||||
err: errors.Errorf("invalid annotation name: %s", k),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for _, err := range testTemplateParsing(r) {
|
||||
nodes = append(nodes, WrappedError{err: err})
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// testTemplateParsing checks if the templates used in labels and annotations
|
||||
// of the alerting rules are parsed correctly.
|
||||
func testTemplateParsing(rl *RuleNode) (errs []error) {
|
||||
if rl.Alert.Value == "" {
|
||||
// Not an alerting rule.
|
||||
return errs
|
||||
}
|
||||
|
||||
// Trying to parse templates.
|
||||
tmplData := template.AlertTemplateData(map[string]string{}, map[string]string{}, 0)
|
||||
defs := []string{
|
||||
"{{$labels := .Labels}}",
|
||||
"{{$externalLabels := .ExternalLabels}}",
|
||||
"{{$value := .Value}}",
|
||||
}
|
||||
parseTest := func(text string) error {
|
||||
tmpl := template.NewTemplateExpander(
|
||||
context.TODO(),
|
||||
strings.Join(append(defs, text), ""),
|
||||
"__alert_"+rl.Alert.Value,
|
||||
tmplData,
|
||||
model.Time(timestamp.FromTime(time.Now())),
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
return tmpl.ParseTest()
|
||||
}
|
||||
|
||||
// Parsing Labels.
|
||||
for k, val := range rl.Labels {
|
||||
err := parseTest(val)
|
||||
if err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "label %q", k))
|
||||
}
|
||||
}
|
||||
|
||||
// Parsing Annotations.
|
||||
for k, val := range rl.Annotations {
|
||||
err := parseTest(val)
|
||||
if err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "annotation %q", k))
|
||||
}
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
// Parse parses and validates a set of rules.
|
||||
func Parse(content []byte) (*RuleGroups, []error) {
|
||||
var (
|
||||
groups RuleGroups
|
||||
node ruleGroups
|
||||
errs []error
|
||||
)
|
||||
|
||||
err := yaml.Unmarshal(content, &groups)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
err = yaml.Unmarshal(content, &node)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return nil, errs
|
||||
}
|
||||
|
||||
return &groups, groups.Validate(node)
|
||||
}
|
||||
|
||||
// ParseFile reads and parses rules from a file.
|
||||
func ParseFile(file string) (*RuleGroups, []error) {
|
||||
b, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, []error{errors.Wrap(err, file)}
|
||||
}
|
||||
rgs, errs := Parse(b)
|
||||
for i := range errs {
|
||||
errs[i] = errors.Wrap(errs[i], file)
|
||||
}
|
||||
return rgs, errs
|
||||
}
|
||||
6
vendor/github.com/prometheus/prometheus/pkg/textparse/README.md
generated
vendored
Normal file
6
vendor/github.com/prometheus/prometheus/pkg/textparse/README.md
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
# Making changes to textparse lexers
|
||||
In the rare case that you need to update the textparse lexers, edit promlex.l or openmetricslex.l and then run the following command:
|
||||
`golex -o=promlex.l.go promlex.l`
|
||||
|
||||
Note that you need golex installed:
|
||||
`go get -u modernc.org/golex`
|
||||
96
vendor/github.com/prometheus/prometheus/pkg/textparse/interface.go
generated
vendored
Normal file
96
vendor/github.com/prometheus/prometheus/pkg/textparse/interface.go
generated
vendored
Normal file
@@ -0,0 +1,96 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package textparse
|
||||
|
||||
import (
|
||||
"mime"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/exemplar"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
)
|
||||
|
||||
// Parser parses samples from a byte slice of samples in the official
|
||||
// Prometheus and OpenMetrics text exposition formats.
|
||||
type Parser interface {
|
||||
// Series returns the bytes of the series, the timestamp if set, and the value
|
||||
// of the current sample.
|
||||
Series() ([]byte, *int64, float64)
|
||||
|
||||
// Help returns the metric name and help text in the current entry.
|
||||
// Must only be called after Next returned a help entry.
|
||||
// The returned byte slices become invalid after the next call to Next.
|
||||
Help() ([]byte, []byte)
|
||||
|
||||
// Type returns the metric name and type in the current entry.
|
||||
// Must only be called after Next returned a type entry.
|
||||
// The returned byte slices become invalid after the next call to Next.
|
||||
Type() ([]byte, MetricType)
|
||||
|
||||
// Unit returns the metric name and unit in the current entry.
|
||||
// Must only be called after Next returned a unit entry.
|
||||
// The returned byte slices become invalid after the next call to Next.
|
||||
Unit() ([]byte, []byte)
|
||||
|
||||
// Comment returns the text of the current comment.
|
||||
// Must only be called after Next returned a comment entry.
|
||||
// The returned byte slice becomes invalid after the next call to Next.
|
||||
Comment() []byte
|
||||
|
||||
// Metric writes the labels of the current sample into the passed labels.
|
||||
// It returns the string from which the metric was parsed.
|
||||
Metric(l *labels.Labels) string
|
||||
|
||||
// Exemplar writes the exemplar of the current sample into the passed
|
||||
// exemplar. It returns if an exemplar exists or not.
|
||||
Exemplar(l *exemplar.Exemplar) bool
|
||||
|
||||
// Next advances the parser to the next sample. It returns false if no
|
||||
// more samples were read or an error occurred.
|
||||
Next() (Entry, error)
|
||||
}
|
||||
|
||||
// New returns a new parser of the byte slice.
|
||||
func New(b []byte, contentType string) Parser {
|
||||
mediaType, _, err := mime.ParseMediaType(contentType)
|
||||
if err == nil && mediaType == "application/openmetrics-text" {
|
||||
return NewOpenMetricsParser(b)
|
||||
}
|
||||
return NewPromParser(b)
|
||||
}
|
||||
|
||||
// Entry represents the type of a parsed entry.
|
||||
type Entry int
|
||||
|
||||
const (
|
||||
EntryInvalid Entry = -1
|
||||
EntryType Entry = 0
|
||||
EntryHelp Entry = 1
|
||||
EntrySeries Entry = 2
|
||||
EntryComment Entry = 3
|
||||
EntryUnit Entry = 4
|
||||
)
|
||||
|
||||
// MetricType represents metric type values.
|
||||
type MetricType string
|
||||
|
||||
const (
|
||||
MetricTypeCounter = "counter"
|
||||
MetricTypeGauge = "gauge"
|
||||
MetricTypeHistogram = "histogram"
|
||||
MetricTypeGaugeHistogram = "gaugehistogram"
|
||||
MetricTypeSummary = "summary"
|
||||
MetricTypeInfo = "info"
|
||||
MetricTypeStateset = "stateset"
|
||||
MetricTypeUnknown = "unknown"
|
||||
)
|
||||
80
vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricslex.l
generated
vendored
Normal file
80
vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricslex.l
generated
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
%{
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package textparse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Lex is called by the parser generated by "go tool yacc" to obtain each
|
||||
// token. The method is opened before the matching rules block and closed at
|
||||
// the end of the file.
|
||||
func (l *openMetricsLexer) Lex() token {
|
||||
if l.i >= len(l.b) {
|
||||
return tEOF
|
||||
}
|
||||
c := l.b[l.i]
|
||||
l.start = l.i
|
||||
|
||||
%}
|
||||
|
||||
D [0-9]
|
||||
L [a-zA-Z_]
|
||||
M [a-zA-Z_:]
|
||||
C [^\n]
|
||||
S [ ]
|
||||
|
||||
%x sComment sMeta1 sMeta2 sLabels sLValue sValue sTimestamp sExemplar sEValue sETimestamp
|
||||
|
||||
%yyc c
|
||||
%yyn c = l.next()
|
||||
%yyt l.state
|
||||
|
||||
|
||||
%%
|
||||
|
||||
#{S} l.state = sComment
|
||||
<sComment>HELP{S} l.state = sMeta1; return tHelp
|
||||
<sComment>TYPE{S} l.state = sMeta1; return tType
|
||||
<sComment>UNIT{S} l.state = sMeta1; return tUnit
|
||||
<sComment>"EOF"\n? l.state = sInit; return tEOFWord
|
||||
<sMeta1>{M}({M}|{D})* l.state = sMeta2; return tMName
|
||||
<sMeta2>{S}{C}*\n l.state = sInit; return tText
|
||||
|
||||
{M}({M}|{D})* l.state = sValue; return tMName
|
||||
<sValue>\{ l.state = sLabels; return tBraceOpen
|
||||
<sLabels>{L}({L}|{D})* return tLName
|
||||
<sLabels>\} l.state = sValue; return tBraceClose
|
||||
<sLabels>= l.state = sLValue; return tEqual
|
||||
<sLabels>, return tComma
|
||||
<sLValue>\"(\\.|[^\\"\n])*\" l.state = sLabels; return tLValue
|
||||
<sValue>{S}[^ \n]+ l.state = sTimestamp; return tValue
|
||||
<sTimestamp>{S}[^ \n]+ return tTimestamp
|
||||
<sTimestamp>\n l.state = sInit; return tLinebreak
|
||||
<sTimestamp>{S}#{S}\{ l.state = sExemplar; return tComment
|
||||
|
||||
<sExemplar>{L}({L}|{D})* return tLName
|
||||
<sExemplar>\} l.state = sEValue; return tBraceClose
|
||||
<sExemplar>= l.state = sEValue; return tEqual
|
||||
<sEValue>\"(\\.|[^\\"\n])*\" l.state = sExemplar; return tLValue
|
||||
<sExemplar>, return tComma
|
||||
<sEValue>{S}[^ \n]+ l.state = sETimestamp; return tValue
|
||||
<sETimestamp>{S}[^ \n]+ return tTimestamp
|
||||
<sETimestamp>\n l.state = sInit; return tLinebreak
|
||||
|
||||
%%
|
||||
|
||||
return tInvalid
|
||||
}
|
||||
762
vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricslex.l.go
generated
vendored
Normal file
762
vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricslex.l.go
generated
vendored
Normal file
@@ -0,0 +1,762 @@
|
||||
// Code generated by golex. DO NOT EDIT.
|
||||
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package textparse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Lex is called by the parser generated by "go tool yacc" to obtain each
|
||||
// token. The method is opened before the matching rules block and closed at
|
||||
// the end of the file.
|
||||
func (l *openMetricsLexer) Lex() token {
|
||||
if l.i >= len(l.b) {
|
||||
return tEOF
|
||||
}
|
||||
c := l.b[l.i]
|
||||
l.start = l.i
|
||||
|
||||
yystate0:
|
||||
|
||||
switch yyt := l.state; yyt {
|
||||
default:
|
||||
panic(fmt.Errorf(`invalid start condition %d`, yyt))
|
||||
case 0: // start condition: INITIAL
|
||||
goto yystart1
|
||||
case 1: // start condition: sComment
|
||||
goto yystart5
|
||||
case 2: // start condition: sMeta1
|
||||
goto yystart25
|
||||
case 3: // start condition: sMeta2
|
||||
goto yystart27
|
||||
case 4: // start condition: sLabels
|
||||
goto yystart30
|
||||
case 5: // start condition: sLValue
|
||||
goto yystart35
|
||||
case 6: // start condition: sValue
|
||||
goto yystart39
|
||||
case 7: // start condition: sTimestamp
|
||||
goto yystart43
|
||||
case 8: // start condition: sExemplar
|
||||
goto yystart50
|
||||
case 9: // start condition: sEValue
|
||||
goto yystart55
|
||||
case 10: // start condition: sETimestamp
|
||||
goto yystart61
|
||||
}
|
||||
|
||||
goto yystate0 // silence unused label error
|
||||
goto yystate1 // silence unused label error
|
||||
yystate1:
|
||||
c = l.next()
|
||||
yystart1:
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == '#':
|
||||
goto yystate2
|
||||
case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
|
||||
goto yystate4
|
||||
}
|
||||
|
||||
yystate2:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == ' ':
|
||||
goto yystate3
|
||||
}
|
||||
|
||||
yystate3:
|
||||
c = l.next()
|
||||
goto yyrule1
|
||||
|
||||
yystate4:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyrule8
|
||||
case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
|
||||
goto yystate4
|
||||
}
|
||||
|
||||
goto yystate5 // silence unused label error
|
||||
yystate5:
|
||||
c = l.next()
|
||||
yystart5:
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == 'E':
|
||||
goto yystate6
|
||||
case c == 'H':
|
||||
goto yystate10
|
||||
case c == 'T':
|
||||
goto yystate15
|
||||
case c == 'U':
|
||||
goto yystate20
|
||||
}
|
||||
|
||||
yystate6:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == 'O':
|
||||
goto yystate7
|
||||
}
|
||||
|
||||
yystate7:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == 'F':
|
||||
goto yystate8
|
||||
}
|
||||
|
||||
yystate8:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyrule5
|
||||
case c == '\n':
|
||||
goto yystate9
|
||||
}
|
||||
|
||||
yystate9:
|
||||
c = l.next()
|
||||
goto yyrule5
|
||||
|
||||
yystate10:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == 'E':
|
||||
goto yystate11
|
||||
}
|
||||
|
||||
yystate11:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == 'L':
|
||||
goto yystate12
|
||||
}
|
||||
|
||||
yystate12:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == 'P':
|
||||
goto yystate13
|
||||
}
|
||||
|
||||
yystate13:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == ' ':
|
||||
goto yystate14
|
||||
}
|
||||
|
||||
yystate14:
|
||||
c = l.next()
|
||||
goto yyrule2
|
||||
|
||||
yystate15:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == 'Y':
|
||||
goto yystate16
|
||||
}
|
||||
|
||||
yystate16:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == 'P':
|
||||
goto yystate17
|
||||
}
|
||||
|
||||
yystate17:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == 'E':
|
||||
goto yystate18
|
||||
}
|
||||
|
||||
yystate18:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == ' ':
|
||||
goto yystate19
|
||||
}
|
||||
|
||||
yystate19:
|
||||
c = l.next()
|
||||
goto yyrule3
|
||||
|
||||
yystate20:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == 'N':
|
||||
goto yystate21
|
||||
}
|
||||
|
||||
yystate21:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == 'I':
|
||||
goto yystate22
|
||||
}
|
||||
|
||||
yystate22:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == 'T':
|
||||
goto yystate23
|
||||
}
|
||||
|
||||
yystate23:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == ' ':
|
||||
goto yystate24
|
||||
}
|
||||
|
||||
yystate24:
|
||||
c = l.next()
|
||||
goto yyrule4
|
||||
|
||||
goto yystate25 // silence unused label error
|
||||
yystate25:
|
||||
c = l.next()
|
||||
yystart25:
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
|
||||
goto yystate26
|
||||
}
|
||||
|
||||
yystate26:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyrule6
|
||||
case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
|
||||
goto yystate26
|
||||
}
|
||||
|
||||
goto yystate27 // silence unused label error
|
||||
yystate27:
|
||||
c = l.next()
|
||||
yystart27:
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == ' ':
|
||||
goto yystate28
|
||||
}
|
||||
|
||||
yystate28:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == '\n':
|
||||
goto yystate29
|
||||
case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ':
|
||||
goto yystate28
|
||||
}
|
||||
|
||||
yystate29:
|
||||
c = l.next()
|
||||
goto yyrule7
|
||||
|
||||
goto yystate30 // silence unused label error
|
||||
yystate30:
|
||||
c = l.next()
|
||||
yystart30:
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == ',':
|
||||
goto yystate31
|
||||
case c == '=':
|
||||
goto yystate32
|
||||
case c == '}':
|
||||
goto yystate34
|
||||
case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
|
||||
goto yystate33
|
||||
}
|
||||
|
||||
yystate31:
|
||||
c = l.next()
|
||||
goto yyrule13
|
||||
|
||||
yystate32:
|
||||
c = l.next()
|
||||
goto yyrule12
|
||||
|
||||
yystate33:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyrule10
|
||||
case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
|
||||
goto yystate33
|
||||
}
|
||||
|
||||
yystate34:
|
||||
c = l.next()
|
||||
goto yyrule11
|
||||
|
||||
goto yystate35 // silence unused label error
|
||||
yystate35:
|
||||
c = l.next()
|
||||
yystart35:
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == '"':
|
||||
goto yystate36
|
||||
}
|
||||
|
||||
yystate36:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == '"':
|
||||
goto yystate37
|
||||
case c == '\\':
|
||||
goto yystate38
|
||||
case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ':
|
||||
goto yystate36
|
||||
}
|
||||
|
||||
yystate37:
|
||||
c = l.next()
|
||||
goto yyrule14
|
||||
|
||||
yystate38:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ':
|
||||
goto yystate36
|
||||
}
|
||||
|
||||
goto yystate39 // silence unused label error
|
||||
yystate39:
|
||||
c = l.next()
|
||||
yystart39:
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == ' ':
|
||||
goto yystate40
|
||||
case c == '{':
|
||||
goto yystate42
|
||||
}
|
||||
|
||||
yystate40:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
|
||||
goto yystate41
|
||||
}
|
||||
|
||||
yystate41:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyrule15
|
||||
case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
|
||||
goto yystate41
|
||||
}
|
||||
|
||||
yystate42:
|
||||
c = l.next()
|
||||
goto yyrule9
|
||||
|
||||
goto yystate43 // silence unused label error
|
||||
yystate43:
|
||||
c = l.next()
|
||||
yystart43:
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == ' ':
|
||||
goto yystate45
|
||||
case c == '\n':
|
||||
goto yystate44
|
||||
}
|
||||
|
||||
yystate44:
|
||||
c = l.next()
|
||||
goto yyrule17
|
||||
|
||||
yystate45:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == '#':
|
||||
goto yystate47
|
||||
case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c == '!' || c == '"' || c >= '$' && c <= 'ÿ':
|
||||
goto yystate46
|
||||
}
|
||||
|
||||
yystate46:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyrule16
|
||||
case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
|
||||
goto yystate46
|
||||
}
|
||||
|
||||
yystate47:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyrule16
|
||||
case c == ' ':
|
||||
goto yystate48
|
||||
case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
|
||||
goto yystate46
|
||||
}
|
||||
|
||||
yystate48:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == '{':
|
||||
goto yystate49
|
||||
}
|
||||
|
||||
yystate49:
|
||||
c = l.next()
|
||||
goto yyrule18
|
||||
|
||||
goto yystate50 // silence unused label error
|
||||
yystate50:
|
||||
c = l.next()
|
||||
yystart50:
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == ',':
|
||||
goto yystate51
|
||||
case c == '=':
|
||||
goto yystate52
|
||||
case c == '}':
|
||||
goto yystate54
|
||||
case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
|
||||
goto yystate53
|
||||
}
|
||||
|
||||
yystate51:
|
||||
c = l.next()
|
||||
goto yyrule23
|
||||
|
||||
yystate52:
|
||||
c = l.next()
|
||||
goto yyrule21
|
||||
|
||||
yystate53:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyrule19
|
||||
case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
|
||||
goto yystate53
|
||||
}
|
||||
|
||||
yystate54:
|
||||
c = l.next()
|
||||
goto yyrule20
|
||||
|
||||
goto yystate55 // silence unused label error
|
||||
yystate55:
|
||||
c = l.next()
|
||||
yystart55:
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == ' ':
|
||||
goto yystate56
|
||||
case c == '"':
|
||||
goto yystate58
|
||||
}
|
||||
|
||||
yystate56:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
|
||||
goto yystate57
|
||||
}
|
||||
|
||||
yystate57:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyrule24
|
||||
case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
|
||||
goto yystate57
|
||||
}
|
||||
|
||||
yystate58:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == '"':
|
||||
goto yystate59
|
||||
case c == '\\':
|
||||
goto yystate60
|
||||
case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ':
|
||||
goto yystate58
|
||||
}
|
||||
|
||||
yystate59:
|
||||
c = l.next()
|
||||
goto yyrule22
|
||||
|
||||
yystate60:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ':
|
||||
goto yystate58
|
||||
}
|
||||
|
||||
goto yystate61 // silence unused label error
|
||||
yystate61:
|
||||
c = l.next()
|
||||
yystart61:
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == ' ':
|
||||
goto yystate63
|
||||
case c == '\n':
|
||||
goto yystate62
|
||||
}
|
||||
|
||||
yystate62:
|
||||
c = l.next()
|
||||
goto yyrule26
|
||||
|
||||
yystate63:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
|
||||
goto yystate64
|
||||
}
|
||||
|
||||
yystate64:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyrule25
|
||||
case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
|
||||
goto yystate64
|
||||
}
|
||||
|
||||
yyrule1: // #{S}
|
||||
{
|
||||
l.state = sComment
|
||||
goto yystate0
|
||||
}
|
||||
yyrule2: // HELP{S}
|
||||
{
|
||||
l.state = sMeta1
|
||||
return tHelp
|
||||
goto yystate0
|
||||
}
|
||||
yyrule3: // TYPE{S}
|
||||
{
|
||||
l.state = sMeta1
|
||||
return tType
|
||||
goto yystate0
|
||||
}
|
||||
yyrule4: // UNIT{S}
|
||||
{
|
||||
l.state = sMeta1
|
||||
return tUnit
|
||||
goto yystate0
|
||||
}
|
||||
yyrule5: // "EOF"\n?
|
||||
{
|
||||
l.state = sInit
|
||||
return tEOFWord
|
||||
goto yystate0
|
||||
}
|
||||
yyrule6: // {M}({M}|{D})*
|
||||
{
|
||||
l.state = sMeta2
|
||||
return tMName
|
||||
goto yystate0
|
||||
}
|
||||
yyrule7: // {S}{C}*\n
|
||||
{
|
||||
l.state = sInit
|
||||
return tText
|
||||
goto yystate0
|
||||
}
|
||||
yyrule8: // {M}({M}|{D})*
|
||||
{
|
||||
l.state = sValue
|
||||
return tMName
|
||||
goto yystate0
|
||||
}
|
||||
yyrule9: // \{
|
||||
{
|
||||
l.state = sLabels
|
||||
return tBraceOpen
|
||||
goto yystate0
|
||||
}
|
||||
yyrule10: // {L}({L}|{D})*
|
||||
{
|
||||
return tLName
|
||||
}
|
||||
yyrule11: // \}
|
||||
{
|
||||
l.state = sValue
|
||||
return tBraceClose
|
||||
goto yystate0
|
||||
}
|
||||
yyrule12: // =
|
||||
{
|
||||
l.state = sLValue
|
||||
return tEqual
|
||||
goto yystate0
|
||||
}
|
||||
yyrule13: // ,
|
||||
{
|
||||
return tComma
|
||||
}
|
||||
yyrule14: // \"(\\.|[^\\"\n])*\"
|
||||
{
|
||||
l.state = sLabels
|
||||
return tLValue
|
||||
goto yystate0
|
||||
}
|
||||
yyrule15: // {S}[^ \n]+
|
||||
{
|
||||
l.state = sTimestamp
|
||||
return tValue
|
||||
goto yystate0
|
||||
}
|
||||
yyrule16: // {S}[^ \n]+
|
||||
{
|
||||
return tTimestamp
|
||||
}
|
||||
yyrule17: // \n
|
||||
{
|
||||
l.state = sInit
|
||||
return tLinebreak
|
||||
goto yystate0
|
||||
}
|
||||
yyrule18: // {S}#{S}\{
|
||||
{
|
||||
l.state = sExemplar
|
||||
return tComment
|
||||
goto yystate0
|
||||
}
|
||||
yyrule19: // {L}({L}|{D})*
|
||||
{
|
||||
return tLName
|
||||
}
|
||||
yyrule20: // \}
|
||||
{
|
||||
l.state = sEValue
|
||||
return tBraceClose
|
||||
goto yystate0
|
||||
}
|
||||
yyrule21: // =
|
||||
{
|
||||
l.state = sEValue
|
||||
return tEqual
|
||||
goto yystate0
|
||||
}
|
||||
yyrule22: // \"(\\.|[^\\"\n])*\"
|
||||
{
|
||||
l.state = sExemplar
|
||||
return tLValue
|
||||
goto yystate0
|
||||
}
|
||||
yyrule23: // ,
|
||||
{
|
||||
return tComma
|
||||
}
|
||||
yyrule24: // {S}[^ \n]+
|
||||
{
|
||||
l.state = sETimestamp
|
||||
return tValue
|
||||
goto yystate0
|
||||
}
|
||||
yyrule25: // {S}[^ \n]+
|
||||
{
|
||||
return tTimestamp
|
||||
}
|
||||
yyrule26: // \n
|
||||
{
|
||||
l.state = sInit
|
||||
return tLinebreak
|
||||
goto yystate0
|
||||
}
|
||||
panic("unreachable")
|
||||
|
||||
goto yyabort // silence unused label error
|
||||
|
||||
yyabort: // no lexem recognized
|
||||
|
||||
return tInvalid
|
||||
}
|
||||
477
vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricsparse.go
generated
vendored
Normal file
477
vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricsparse.go
generated
vendored
Normal file
@@ -0,0 +1,477 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:generate go get -u modernc.org/golex
|
||||
//go:generate golex -o=openmetricslex.l.go openmetricslex.l
|
||||
|
||||
package textparse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/exemplar"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/pkg/value"
|
||||
)
|
||||
|
||||
var allowedSuffixes = [][]byte{[]byte("_total"), []byte("_bucket")}
|
||||
|
||||
type openMetricsLexer struct {
|
||||
b []byte
|
||||
i int
|
||||
start int
|
||||
err error
|
||||
state int
|
||||
}
|
||||
|
||||
// buf returns the buffer of the current token.
|
||||
func (l *openMetricsLexer) buf() []byte {
|
||||
return l.b[l.start:l.i]
|
||||
}
|
||||
|
||||
func (l *openMetricsLexer) cur() byte {
|
||||
if l.i < len(l.b) {
|
||||
return l.b[l.i]
|
||||
}
|
||||
return byte(' ')
|
||||
}
|
||||
|
||||
// next advances the openMetricsLexer to the next character.
|
||||
func (l *openMetricsLexer) next() byte {
|
||||
l.i++
|
||||
if l.i >= len(l.b) {
|
||||
l.err = io.EOF
|
||||
return byte(tEOF)
|
||||
}
|
||||
// Lex struggles with null bytes. If we are in a label value or help string, where
|
||||
// they are allowed, consume them here immediately.
|
||||
for l.b[l.i] == 0 && (l.state == sLValue || l.state == sMeta2 || l.state == sComment) {
|
||||
l.i++
|
||||
if l.i >= len(l.b) {
|
||||
l.err = io.EOF
|
||||
return byte(tEOF)
|
||||
}
|
||||
}
|
||||
return l.b[l.i]
|
||||
}
|
||||
|
||||
func (l *openMetricsLexer) Error(es string) {
|
||||
l.err = errors.New(es)
|
||||
}
|
||||
|
||||
// OpenMetricsParser parses samples from a byte slice of samples in the official
|
||||
// OpenMetrics text exposition format.
|
||||
// This is based on the working draft https://docs.google.com/document/u/1/d/1KwV0mAXwwbvvifBvDKH_LU1YjyXE_wxCkHNoCGq1GX0/edit
|
||||
type OpenMetricsParser struct {
|
||||
l *openMetricsLexer
|
||||
series []byte
|
||||
text []byte
|
||||
mtype MetricType
|
||||
val float64
|
||||
ts int64
|
||||
hasTS bool
|
||||
start int
|
||||
offsets []int
|
||||
|
||||
eOffsets []int
|
||||
exemplar []byte
|
||||
exemplarVal float64
|
||||
exemplarTs int64
|
||||
hasExemplarTs bool
|
||||
}
|
||||
|
||||
// NewOpenMetricsParser returns a new parser of the byte slice.
|
||||
func NewOpenMetricsParser(b []byte) Parser {
|
||||
return &OpenMetricsParser{l: &openMetricsLexer{b: b}}
|
||||
}
|
||||
|
||||
// Series returns the bytes of the series, the timestamp if set, and the value
|
||||
// of the current sample.
|
||||
func (p *OpenMetricsParser) Series() ([]byte, *int64, float64) {
|
||||
if p.hasTS {
|
||||
ts := p.ts
|
||||
return p.series, &ts, p.val
|
||||
}
|
||||
return p.series, nil, p.val
|
||||
}
|
||||
|
||||
// Help returns the metric name and help text in the current entry.
|
||||
// Must only be called after Next returned a help entry.
|
||||
// The returned byte slices become invalid after the next call to Next.
|
||||
func (p *OpenMetricsParser) Help() ([]byte, []byte) {
|
||||
m := p.l.b[p.offsets[0]:p.offsets[1]]
|
||||
|
||||
// Replacer causes allocations. Replace only when necessary.
|
||||
if strings.IndexByte(yoloString(p.text), byte('\\')) >= 0 {
|
||||
// OpenMetrics always uses the Prometheus format label value escaping.
|
||||
return m, []byte(lvalReplacer.Replace(string(p.text)))
|
||||
}
|
||||
return m, p.text
|
||||
}
|
||||
|
||||
// Type returns the metric name and type in the current entry.
|
||||
// Must only be called after Next returned a type entry.
|
||||
// The returned byte slices become invalid after the next call to Next.
|
||||
func (p *OpenMetricsParser) Type() ([]byte, MetricType) {
|
||||
return p.l.b[p.offsets[0]:p.offsets[1]], p.mtype
|
||||
}
|
||||
|
||||
// Unit returns the metric name and unit in the current entry.
|
||||
// Must only be called after Next returned a unit entry.
|
||||
// The returned byte slices become invalid after the next call to Next.
|
||||
func (p *OpenMetricsParser) Unit() ([]byte, []byte) {
|
||||
// The Prometheus format does not have units.
|
||||
return p.l.b[p.offsets[0]:p.offsets[1]], p.text
|
||||
}
|
||||
|
||||
// Comment returns the text of the current comment.
|
||||
// Must only be called after Next returned a comment entry.
|
||||
// The returned byte slice becomes invalid after the next call to Next.
|
||||
func (p *OpenMetricsParser) Comment() []byte {
|
||||
return p.text
|
||||
}
|
||||
|
||||
// Metric writes the labels of the current sample into the passed labels.
|
||||
// It returns the string from which the metric was parsed.
|
||||
func (p *OpenMetricsParser) Metric(l *labels.Labels) string {
|
||||
// Allocate the full immutable string immediately, so we just
|
||||
// have to create references on it below.
|
||||
s := string(p.series)
|
||||
|
||||
*l = append(*l, labels.Label{
|
||||
Name: labels.MetricName,
|
||||
Value: s[:p.offsets[0]-p.start],
|
||||
})
|
||||
|
||||
for i := 1; i < len(p.offsets); i += 4 {
|
||||
a := p.offsets[i] - p.start
|
||||
b := p.offsets[i+1] - p.start
|
||||
c := p.offsets[i+2] - p.start
|
||||
d := p.offsets[i+3] - p.start
|
||||
|
||||
// Replacer causes allocations. Replace only when necessary.
|
||||
if strings.IndexByte(s[c:d], byte('\\')) >= 0 {
|
||||
*l = append(*l, labels.Label{Name: s[a:b], Value: lvalReplacer.Replace(s[c:d])})
|
||||
continue
|
||||
}
|
||||
*l = append(*l, labels.Label{Name: s[a:b], Value: s[c:d]})
|
||||
}
|
||||
|
||||
// Sort labels. We can skip the first entry since the metric name is
|
||||
// already at the right place.
|
||||
sort.Sort((*l)[1:])
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Exemplar writes the exemplar of the current sample into the passed
|
||||
// exemplar. It returns the whether an exemplar exists.
|
||||
func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool {
|
||||
if len(p.exemplar) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Allocate the full immutable string immediately, so we just
|
||||
// have to create references on it below.
|
||||
s := string(p.exemplar)
|
||||
|
||||
e.Value = p.exemplarVal
|
||||
if p.hasExemplarTs {
|
||||
e.HasTs = true
|
||||
e.Ts = p.exemplarTs
|
||||
}
|
||||
|
||||
for i := 0; i < len(p.eOffsets); i += 4 {
|
||||
a := p.eOffsets[i] - p.start
|
||||
b := p.eOffsets[i+1] - p.start
|
||||
c := p.eOffsets[i+2] - p.start
|
||||
d := p.eOffsets[i+3] - p.start
|
||||
|
||||
e.Labels = append(e.Labels, labels.Label{Name: s[a:b], Value: s[c:d]})
|
||||
}
|
||||
|
||||
// Sort the labels.
|
||||
sort.Sort(e.Labels)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// nextToken returns the next token from the openMetricsLexer.
|
||||
func (p *OpenMetricsParser) nextToken() token {
|
||||
tok := p.l.Lex()
|
||||
return tok
|
||||
}
|
||||
|
||||
// Next advances the parser to the next sample. It returns false if no
|
||||
// more samples were read or an error occurred.
|
||||
func (p *OpenMetricsParser) Next() (Entry, error) {
|
||||
var err error
|
||||
|
||||
p.start = p.l.i
|
||||
p.offsets = p.offsets[:0]
|
||||
p.eOffsets = p.eOffsets[:0]
|
||||
p.exemplar = p.exemplar[:0]
|
||||
p.exemplarVal = 0
|
||||
p.hasExemplarTs = false
|
||||
|
||||
switch t := p.nextToken(); t {
|
||||
case tEOFWord:
|
||||
if t := p.nextToken(); t != tEOF {
|
||||
return EntryInvalid, errors.New("unexpected data after # EOF")
|
||||
}
|
||||
return EntryInvalid, io.EOF
|
||||
case tEOF:
|
||||
return EntryInvalid, errors.New("data does not end with # EOF")
|
||||
case tHelp, tType, tUnit:
|
||||
switch t := p.nextToken(); t {
|
||||
case tMName:
|
||||
p.offsets = append(p.offsets, p.l.start, p.l.i)
|
||||
default:
|
||||
return EntryInvalid, parseError("expected metric name after HELP", t)
|
||||
}
|
||||
switch t := p.nextToken(); t {
|
||||
case tText:
|
||||
if len(p.l.buf()) > 1 {
|
||||
p.text = p.l.buf()[1 : len(p.l.buf())-1]
|
||||
} else {
|
||||
p.text = []byte{}
|
||||
}
|
||||
default:
|
||||
return EntryInvalid, parseError("expected text in HELP", t)
|
||||
}
|
||||
switch t {
|
||||
case tType:
|
||||
switch s := yoloString(p.text); s {
|
||||
case "counter":
|
||||
p.mtype = MetricTypeCounter
|
||||
case "gauge":
|
||||
p.mtype = MetricTypeGauge
|
||||
case "histogram":
|
||||
p.mtype = MetricTypeHistogram
|
||||
case "gaugehistogram":
|
||||
p.mtype = MetricTypeGaugeHistogram
|
||||
case "summary":
|
||||
p.mtype = MetricTypeSummary
|
||||
case "info":
|
||||
p.mtype = MetricTypeInfo
|
||||
case "stateset":
|
||||
p.mtype = MetricTypeStateset
|
||||
case "unknown":
|
||||
p.mtype = MetricTypeUnknown
|
||||
default:
|
||||
return EntryInvalid, errors.Errorf("invalid metric type %q", s)
|
||||
}
|
||||
case tHelp:
|
||||
if !utf8.Valid(p.text) {
|
||||
return EntryInvalid, errors.New("help text is not a valid utf8 string")
|
||||
}
|
||||
}
|
||||
switch t {
|
||||
case tHelp:
|
||||
return EntryHelp, nil
|
||||
case tType:
|
||||
return EntryType, nil
|
||||
case tUnit:
|
||||
m := yoloString(p.l.b[p.offsets[0]:p.offsets[1]])
|
||||
u := yoloString(p.text)
|
||||
if len(u) > 0 {
|
||||
if !strings.HasSuffix(m, u) || len(m) < len(u)+1 || p.l.b[p.offsets[1]-len(u)-1] != '_' {
|
||||
return EntryInvalid, errors.Errorf("unit not a suffix of metric %q", m)
|
||||
}
|
||||
}
|
||||
return EntryUnit, nil
|
||||
}
|
||||
|
||||
case tMName:
|
||||
p.offsets = append(p.offsets, p.l.i)
|
||||
p.series = p.l.b[p.start:p.l.i]
|
||||
|
||||
t2 := p.nextToken()
|
||||
if t2 == tBraceOpen {
|
||||
offsets, err := p.parseLVals()
|
||||
if err != nil {
|
||||
return EntryInvalid, err
|
||||
}
|
||||
p.offsets = append(p.offsets, offsets...)
|
||||
p.series = p.l.b[p.start:p.l.i]
|
||||
t2 = p.nextToken()
|
||||
}
|
||||
p.val, err = p.getFloatValue(t2, "metric")
|
||||
if err != nil {
|
||||
return EntryInvalid, err
|
||||
}
|
||||
|
||||
p.hasTS = false
|
||||
switch t2 := p.nextToken(); t2 {
|
||||
case tEOF:
|
||||
return EntryInvalid, errors.New("data does not end with # EOF")
|
||||
case tLinebreak:
|
||||
break
|
||||
case tComment:
|
||||
if err := p.parseComment(); err != nil {
|
||||
return EntryInvalid, err
|
||||
}
|
||||
case tTimestamp:
|
||||
p.hasTS = true
|
||||
var ts float64
|
||||
// A float is enough to hold what we need for millisecond resolution.
|
||||
if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil {
|
||||
return EntryInvalid, err
|
||||
}
|
||||
p.ts = int64(ts * 1000)
|
||||
switch t3 := p.nextToken(); t3 {
|
||||
case tLinebreak:
|
||||
case tComment:
|
||||
if err := p.parseComment(); err != nil {
|
||||
return EntryInvalid, err
|
||||
}
|
||||
default:
|
||||
return EntryInvalid, parseError("expected next entry after timestamp", t3)
|
||||
}
|
||||
default:
|
||||
return EntryInvalid, parseError("expected timestamp or # symbol", t2)
|
||||
}
|
||||
return EntrySeries, nil
|
||||
|
||||
default:
|
||||
err = errors.Errorf("%q %q is not a valid start token", t, string(p.l.cur()))
|
||||
}
|
||||
return EntryInvalid, err
|
||||
}
|
||||
|
||||
func (p *OpenMetricsParser) parseComment() error {
|
||||
// Validate the name of the metric. It must have _total or _bucket as
|
||||
// suffix for exemplars to be supported.
|
||||
if err := p.validateNameForExemplar(p.series[:p.offsets[0]-p.start]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse the labels.
|
||||
offsets, err := p.parseLVals()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.eOffsets = append(p.eOffsets, offsets...)
|
||||
p.exemplar = p.l.b[p.start:p.l.i]
|
||||
|
||||
// Get the value.
|
||||
p.exemplarVal, err = p.getFloatValue(p.nextToken(), "exemplar labels")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read the optional timestamp.
|
||||
p.hasExemplarTs = false
|
||||
switch t2 := p.nextToken(); t2 {
|
||||
case tEOF:
|
||||
return errors.New("data does not end with # EOF")
|
||||
case tLinebreak:
|
||||
break
|
||||
case tTimestamp:
|
||||
p.hasExemplarTs = true
|
||||
var ts float64
|
||||
// A float is enough to hold what we need for millisecond resolution.
|
||||
if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil {
|
||||
return err
|
||||
}
|
||||
p.exemplarTs = int64(ts * 1000)
|
||||
switch t3 := p.nextToken(); t3 {
|
||||
case tLinebreak:
|
||||
default:
|
||||
return parseError("expected next entry after exemplar timestamp", t3)
|
||||
}
|
||||
default:
|
||||
return parseError("expected timestamp or comment", t2)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *OpenMetricsParser) parseLVals() ([]int, error) {
|
||||
var offsets []int
|
||||
first := true
|
||||
for {
|
||||
t := p.nextToken()
|
||||
switch t {
|
||||
case tBraceClose:
|
||||
return offsets, nil
|
||||
case tComma:
|
||||
if first {
|
||||
return nil, parseError("expected label name or left brace", t)
|
||||
}
|
||||
t = p.nextToken()
|
||||
if t != tLName {
|
||||
return nil, parseError("expected label name", t)
|
||||
}
|
||||
case tLName:
|
||||
if !first {
|
||||
return nil, parseError("expected comma", t)
|
||||
}
|
||||
default:
|
||||
if first {
|
||||
return nil, parseError("expected label name or left brace", t)
|
||||
}
|
||||
return nil, parseError("expected comma or left brace", t)
|
||||
|
||||
}
|
||||
first = false
|
||||
// t is now a label name.
|
||||
|
||||
offsets = append(offsets, p.l.start, p.l.i)
|
||||
|
||||
if t := p.nextToken(); t != tEqual {
|
||||
return nil, parseError("expected equal", t)
|
||||
}
|
||||
if t := p.nextToken(); t != tLValue {
|
||||
return nil, parseError("expected label value", t)
|
||||
}
|
||||
if !utf8.Valid(p.l.buf()) {
|
||||
return nil, errors.New("invalid UTF-8 label value")
|
||||
}
|
||||
|
||||
// The openMetricsLexer ensures the value string is quoted. Strip first
|
||||
// and last character.
|
||||
offsets = append(offsets, p.l.start+1, p.l.i-1)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error) {
|
||||
if t != tValue {
|
||||
return 0, parseError(fmt.Sprintf("expected value after %v", after), t)
|
||||
}
|
||||
val, err := parseFloat(yoloString(p.l.buf()[1:]))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// Ensure canonical NaN value.
|
||||
if math.IsNaN(p.exemplarVal) {
|
||||
val = math.Float64frombits(value.NormalNaN)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (p *OpenMetricsParser) validateNameForExemplar(name []byte) error {
|
||||
for _, suffix := range allowedSuffixes {
|
||||
if bytes.HasSuffix(name, suffix) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("metric name %v does not support exemplars", string(name))
|
||||
}
|
||||
100
vendor/github.com/prometheus/prometheus/pkg/textparse/promlex.l
generated
vendored
Normal file
100
vendor/github.com/prometheus/prometheus/pkg/textparse/promlex.l
generated
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
%{
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package textparse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
sInit = iota
|
||||
sComment
|
||||
sMeta1
|
||||
sMeta2
|
||||
sLabels
|
||||
sLValue
|
||||
sValue
|
||||
sTimestamp
|
||||
)
|
||||
|
||||
// Lex is called by the parser generated by "go tool yacc" to obtain each
|
||||
// token. The method is opened before the matching rules block and closed at
|
||||
// the end of the file.
|
||||
func (l *promlexer) Lex() token {
|
||||
if l.i >= len(l.b) {
|
||||
return tEOF
|
||||
}
|
||||
c := l.b[l.i]
|
||||
l.start = l.i
|
||||
|
||||
%}
|
||||
|
||||
D [0-9]
|
||||
L [a-zA-Z_]
|
||||
M [a-zA-Z_:]
|
||||
C [^\n]
|
||||
|
||||
%x sComment sMeta1 sMeta2 sLabels sLValue sValue sTimestamp
|
||||
|
||||
%yyc c
|
||||
%yyn c = l.next()
|
||||
%yyt l.state
|
||||
|
||||
|
||||
%%
|
||||
|
||||
\0 return tEOF
|
||||
\n l.state = sInit; return tLinebreak
|
||||
<*>[ \t]+ return tWhitespace
|
||||
|
||||
#[ \t]+ l.state = sComment
|
||||
# return l.consumeComment()
|
||||
<sComment>HELP[\t ]+ l.state = sMeta1; return tHelp
|
||||
<sComment>TYPE[\t ]+ l.state = sMeta1; return tType
|
||||
<sMeta1>{M}({M}|{D})* l.state = sMeta2; return tMName
|
||||
<sMeta2>{C}* l.state = sInit; return tText
|
||||
|
||||
{M}({M}|{D})* l.state = sValue; return tMName
|
||||
<sValue>\{ l.state = sLabels; return tBraceOpen
|
||||
<sLabels>{L}({L}|{D})* return tLName
|
||||
<sLabels>\} l.state = sValue; return tBraceClose
|
||||
<sLabels>= l.state = sLValue; return tEqual
|
||||
<sLabels>, return tComma
|
||||
<sLValue>\"(\\.|[^\\"])*\" l.state = sLabels; return tLValue
|
||||
<sValue>[^{ \t\n]+ l.state = sTimestamp; return tValue
|
||||
<sTimestamp>{D}+ return tTimestamp
|
||||
<sTimestamp>\n l.state = sInit; return tLinebreak
|
||||
|
||||
%%
|
||||
// Workaround to gobble up comments that started with a HELP or TYPE
|
||||
// prefix. We just consume all characters until we reach a newline.
|
||||
// This saves us from adding disproportionate complexity to the parser.
|
||||
if l.state == sComment {
|
||||
return l.consumeComment()
|
||||
}
|
||||
return tInvalid
|
||||
}
|
||||
|
||||
func (l *promlexer) consumeComment() token {
|
||||
for c := l.cur(); ; c = l.next() {
|
||||
switch c {
|
||||
case 0:
|
||||
return tEOF
|
||||
case '\n':
|
||||
l.state = sInit
|
||||
return tComment
|
||||
}
|
||||
}
|
||||
}
|
||||
553
vendor/github.com/prometheus/prometheus/pkg/textparse/promlex.l.go
generated
vendored
Normal file
553
vendor/github.com/prometheus/prometheus/pkg/textparse/promlex.l.go
generated
vendored
Normal file
@@ -0,0 +1,553 @@
|
||||
// CAUTION: Generated file - DO NOT EDIT.
|
||||
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package textparse
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
sInit = iota
|
||||
sComment
|
||||
sMeta1
|
||||
sMeta2
|
||||
sLabels
|
||||
sLValue
|
||||
sValue
|
||||
sTimestamp
|
||||
sExemplar
|
||||
sEValue
|
||||
sETimestamp
|
||||
)
|
||||
|
||||
// Lex is called by the parser generated by "go tool yacc" to obtain each
|
||||
// token. The method is opened before the matching rules block and closed at
|
||||
// the end of the file.
|
||||
func (l *promlexer) Lex() token {
|
||||
if l.i >= len(l.b) {
|
||||
return tEOF
|
||||
}
|
||||
c := l.b[l.i]
|
||||
l.start = l.i
|
||||
|
||||
yystate0:
|
||||
|
||||
switch yyt := l.state; yyt {
|
||||
default:
|
||||
panic(errors.Errorf(`invalid start condition %d`, yyt))
|
||||
case 0: // start condition: INITIAL
|
||||
goto yystart1
|
||||
case 1: // start condition: sComment
|
||||
goto yystart8
|
||||
case 2: // start condition: sMeta1
|
||||
goto yystart19
|
||||
case 3: // start condition: sMeta2
|
||||
goto yystart21
|
||||
case 4: // start condition: sLabels
|
||||
goto yystart24
|
||||
case 5: // start condition: sLValue
|
||||
goto yystart29
|
||||
case 6: // start condition: sValue
|
||||
goto yystart33
|
||||
case 7: // start condition: sTimestamp
|
||||
goto yystart36
|
||||
}
|
||||
|
||||
goto yystate0 // silence unused label error
|
||||
goto yystate1 // silence unused label error
|
||||
yystate1:
|
||||
c = l.next()
|
||||
yystart1:
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == '#':
|
||||
goto yystate5
|
||||
case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
|
||||
goto yystate7
|
||||
case c == '\n':
|
||||
goto yystate4
|
||||
case c == '\t' || c == ' ':
|
||||
goto yystate3
|
||||
case c == '\x00':
|
||||
goto yystate2
|
||||
}
|
||||
|
||||
yystate2:
|
||||
c = l.next()
|
||||
goto yyrule1
|
||||
|
||||
yystate3:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyrule3
|
||||
case c == '\t' || c == ' ':
|
||||
goto yystate3
|
||||
}
|
||||
|
||||
yystate4:
|
||||
c = l.next()
|
||||
goto yyrule2
|
||||
|
||||
yystate5:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyrule5
|
||||
case c == '\t' || c == ' ':
|
||||
goto yystate6
|
||||
}
|
||||
|
||||
yystate6:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyrule4
|
||||
case c == '\t' || c == ' ':
|
||||
goto yystate6
|
||||
}
|
||||
|
||||
yystate7:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyrule10
|
||||
case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
|
||||
goto yystate7
|
||||
}
|
||||
|
||||
goto yystate8 // silence unused label error
|
||||
yystate8:
|
||||
c = l.next()
|
||||
yystart8:
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == 'H':
|
||||
goto yystate9
|
||||
case c == 'T':
|
||||
goto yystate14
|
||||
case c == '\t' || c == ' ':
|
||||
goto yystate3
|
||||
}
|
||||
|
||||
yystate9:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == 'E':
|
||||
goto yystate10
|
||||
}
|
||||
|
||||
yystate10:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == 'L':
|
||||
goto yystate11
|
||||
}
|
||||
|
||||
yystate11:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == 'P':
|
||||
goto yystate12
|
||||
}
|
||||
|
||||
yystate12:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == '\t' || c == ' ':
|
||||
goto yystate13
|
||||
}
|
||||
|
||||
yystate13:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyrule6
|
||||
case c == '\t' || c == ' ':
|
||||
goto yystate13
|
||||
}
|
||||
|
||||
yystate14:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == 'Y':
|
||||
goto yystate15
|
||||
}
|
||||
|
||||
yystate15:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == 'P':
|
||||
goto yystate16
|
||||
}
|
||||
|
||||
yystate16:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == 'E':
|
||||
goto yystate17
|
||||
}
|
||||
|
||||
yystate17:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == '\t' || c == ' ':
|
||||
goto yystate18
|
||||
}
|
||||
|
||||
yystate18:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyrule7
|
||||
case c == '\t' || c == ' ':
|
||||
goto yystate18
|
||||
}
|
||||
|
||||
goto yystate19 // silence unused label error
|
||||
yystate19:
|
||||
c = l.next()
|
||||
yystart19:
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
|
||||
goto yystate20
|
||||
case c == '\t' || c == ' ':
|
||||
goto yystate3
|
||||
}
|
||||
|
||||
yystate20:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyrule8
|
||||
case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
|
||||
goto yystate20
|
||||
}
|
||||
|
||||
goto yystate21 // silence unused label error
|
||||
yystate21:
|
||||
c = l.next()
|
||||
yystart21:
|
||||
switch {
|
||||
default:
|
||||
goto yyrule9
|
||||
case c == '\t' || c == ' ':
|
||||
goto yystate23
|
||||
case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
|
||||
goto yystate22
|
||||
}
|
||||
|
||||
yystate22:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyrule9
|
||||
case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ':
|
||||
goto yystate22
|
||||
}
|
||||
|
||||
yystate23:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyrule3
|
||||
case c == '\t' || c == ' ':
|
||||
goto yystate23
|
||||
case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
|
||||
goto yystate22
|
||||
}
|
||||
|
||||
goto yystate24 // silence unused label error
|
||||
yystate24:
|
||||
c = l.next()
|
||||
yystart24:
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == ',':
|
||||
goto yystate25
|
||||
case c == '=':
|
||||
goto yystate26
|
||||
case c == '\t' || c == ' ':
|
||||
goto yystate3
|
||||
case c == '}':
|
||||
goto yystate28
|
||||
case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
|
||||
goto yystate27
|
||||
}
|
||||
|
||||
yystate25:
|
||||
c = l.next()
|
||||
goto yyrule15
|
||||
|
||||
yystate26:
|
||||
c = l.next()
|
||||
goto yyrule14
|
||||
|
||||
yystate27:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyrule12
|
||||
case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
|
||||
goto yystate27
|
||||
}
|
||||
|
||||
yystate28:
|
||||
c = l.next()
|
||||
goto yyrule13
|
||||
|
||||
goto yystate29 // silence unused label error
|
||||
yystate29:
|
||||
c = l.next()
|
||||
yystart29:
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == '"':
|
||||
goto yystate30
|
||||
case c == '\t' || c == ' ':
|
||||
goto yystate3
|
||||
}
|
||||
|
||||
yystate30:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == '"':
|
||||
goto yystate31
|
||||
case c == '\\':
|
||||
goto yystate32
|
||||
case c >= '\x01' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ':
|
||||
goto yystate30
|
||||
}
|
||||
|
||||
yystate31:
|
||||
c = l.next()
|
||||
goto yyrule16
|
||||
|
||||
yystate32:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ':
|
||||
goto yystate30
|
||||
}
|
||||
|
||||
goto yystate33 // silence unused label error
|
||||
yystate33:
|
||||
c = l.next()
|
||||
yystart33:
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == '\t' || c == ' ':
|
||||
goto yystate3
|
||||
case c == '{':
|
||||
goto yystate35
|
||||
case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'z' || c >= '|' && c <= 'ÿ':
|
||||
goto yystate34
|
||||
}
|
||||
|
||||
yystate34:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyrule17
|
||||
case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'z' || c >= '|' && c <= 'ÿ':
|
||||
goto yystate34
|
||||
}
|
||||
|
||||
yystate35:
|
||||
c = l.next()
|
||||
goto yyrule11
|
||||
|
||||
goto yystate36 // silence unused label error
|
||||
yystate36:
|
||||
c = l.next()
|
||||
yystart36:
|
||||
switch {
|
||||
default:
|
||||
goto yyabort
|
||||
case c == '\n':
|
||||
goto yystate37
|
||||
case c == '\t' || c == ' ':
|
||||
goto yystate3
|
||||
case c >= '0' && c <= '9':
|
||||
goto yystate38
|
||||
}
|
||||
|
||||
yystate37:
|
||||
c = l.next()
|
||||
goto yyrule19
|
||||
|
||||
yystate38:
|
||||
c = l.next()
|
||||
switch {
|
||||
default:
|
||||
goto yyrule18
|
||||
case c >= '0' && c <= '9':
|
||||
goto yystate38
|
||||
}
|
||||
|
||||
yyrule1: // \0
|
||||
{
|
||||
return tEOF
|
||||
}
|
||||
yyrule2: // \n
|
||||
{
|
||||
l.state = sInit
|
||||
return tLinebreak
|
||||
goto yystate0
|
||||
}
|
||||
yyrule3: // [ \t]+
|
||||
{
|
||||
return tWhitespace
|
||||
}
|
||||
yyrule4: // #[ \t]+
|
||||
{
|
||||
l.state = sComment
|
||||
goto yystate0
|
||||
}
|
||||
yyrule5: // #
|
||||
{
|
||||
return l.consumeComment()
|
||||
}
|
||||
yyrule6: // HELP[\t ]+
|
||||
{
|
||||
l.state = sMeta1
|
||||
return tHelp
|
||||
goto yystate0
|
||||
}
|
||||
yyrule7: // TYPE[\t ]+
|
||||
{
|
||||
l.state = sMeta1
|
||||
return tType
|
||||
goto yystate0
|
||||
}
|
||||
yyrule8: // {M}({M}|{D})*
|
||||
{
|
||||
l.state = sMeta2
|
||||
return tMName
|
||||
goto yystate0
|
||||
}
|
||||
yyrule9: // {C}*
|
||||
{
|
||||
l.state = sInit
|
||||
return tText
|
||||
goto yystate0
|
||||
}
|
||||
yyrule10: // {M}({M}|{D})*
|
||||
{
|
||||
l.state = sValue
|
||||
return tMName
|
||||
goto yystate0
|
||||
}
|
||||
yyrule11: // \{
|
||||
{
|
||||
l.state = sLabels
|
||||
return tBraceOpen
|
||||
goto yystate0
|
||||
}
|
||||
yyrule12: // {L}({L}|{D})*
|
||||
{
|
||||
return tLName
|
||||
}
|
||||
yyrule13: // \}
|
||||
{
|
||||
l.state = sValue
|
||||
return tBraceClose
|
||||
goto yystate0
|
||||
}
|
||||
yyrule14: // =
|
||||
{
|
||||
l.state = sLValue
|
||||
return tEqual
|
||||
goto yystate0
|
||||
}
|
||||
yyrule15: // ,
|
||||
{
|
||||
return tComma
|
||||
}
|
||||
yyrule16: // \"(\\.|[^\\"])*\"
|
||||
{
|
||||
l.state = sLabels
|
||||
return tLValue
|
||||
goto yystate0
|
||||
}
|
||||
yyrule17: // [^{ \t\n]+
|
||||
{
|
||||
l.state = sTimestamp
|
||||
return tValue
|
||||
goto yystate0
|
||||
}
|
||||
yyrule18: // {D}+
|
||||
{
|
||||
return tTimestamp
|
||||
}
|
||||
yyrule19: // \n
|
||||
{
|
||||
l.state = sInit
|
||||
return tLinebreak
|
||||
goto yystate0
|
||||
}
|
||||
panic("unreachable")
|
||||
|
||||
goto yyabort // silence unused label error
|
||||
|
||||
yyabort: // no lexem recognized
|
||||
// Workaround to gobble up comments that started with a HELP or TYPE
|
||||
// prefix. We just consume all characters until we reach a newline.
|
||||
// This saves us from adding disproportionate complexity to the parser.
|
||||
if l.state == sComment {
|
||||
return l.consumeComment()
|
||||
}
|
||||
return tInvalid
|
||||
}
|
||||
|
||||
func (l *promlexer) consumeComment() token {
|
||||
for c := l.cur(); ; c = l.next() {
|
||||
switch c {
|
||||
case 0:
|
||||
return tEOF
|
||||
case '\n':
|
||||
l.state = sInit
|
||||
return tComment
|
||||
}
|
||||
}
|
||||
}
|
||||
426
vendor/github.com/prometheus/prometheus/pkg/textparse/promparse.go
generated
vendored
Normal file
426
vendor/github.com/prometheus/prometheus/pkg/textparse/promparse.go
generated
vendored
Normal file
@@ -0,0 +1,426 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:generate go get -u modernc.org/golex
|
||||
//go:generate golex -o=promlex.l.go promlex.l
|
||||
|
||||
package textparse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
"unsafe"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/exemplar"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/pkg/value"
|
||||
)
|
||||
|
||||
type promlexer struct {
|
||||
b []byte
|
||||
i int
|
||||
start int
|
||||
err error
|
||||
state int
|
||||
}
|
||||
|
||||
type token int
|
||||
|
||||
const (
|
||||
tInvalid token = -1
|
||||
tEOF token = 0
|
||||
tLinebreak token = iota
|
||||
tWhitespace
|
||||
tHelp
|
||||
tType
|
||||
tUnit
|
||||
tEOFWord
|
||||
tText
|
||||
tComment
|
||||
tBlank
|
||||
tMName
|
||||
tBraceOpen
|
||||
tBraceClose
|
||||
tLName
|
||||
tLValue
|
||||
tComma
|
||||
tEqual
|
||||
tTimestamp
|
||||
tValue
|
||||
)
|
||||
|
||||
func (t token) String() string {
|
||||
switch t {
|
||||
case tInvalid:
|
||||
return "INVALID"
|
||||
case tEOF:
|
||||
return "EOF"
|
||||
case tLinebreak:
|
||||
return "LINEBREAK"
|
||||
case tWhitespace:
|
||||
return "WHITESPACE"
|
||||
case tHelp:
|
||||
return "HELP"
|
||||
case tType:
|
||||
return "TYPE"
|
||||
case tUnit:
|
||||
return "UNIT"
|
||||
case tEOFWord:
|
||||
return "EOFWORD"
|
||||
case tText:
|
||||
return "TEXT"
|
||||
case tComment:
|
||||
return "COMMENT"
|
||||
case tBlank:
|
||||
return "BLANK"
|
||||
case tMName:
|
||||
return "MNAME"
|
||||
case tBraceOpen:
|
||||
return "BOPEN"
|
||||
case tBraceClose:
|
||||
return "BCLOSE"
|
||||
case tLName:
|
||||
return "LNAME"
|
||||
case tLValue:
|
||||
return "LVALUE"
|
||||
case tEqual:
|
||||
return "EQUAL"
|
||||
case tComma:
|
||||
return "COMMA"
|
||||
case tTimestamp:
|
||||
return "TIMESTAMP"
|
||||
case tValue:
|
||||
return "VALUE"
|
||||
}
|
||||
return fmt.Sprintf("<invalid: %d>", t)
|
||||
}
|
||||
|
||||
// buf returns the buffer of the current token.
|
||||
func (l *promlexer) buf() []byte {
|
||||
return l.b[l.start:l.i]
|
||||
}
|
||||
|
||||
func (l *promlexer) cur() byte {
|
||||
return l.b[l.i]
|
||||
}
|
||||
|
||||
// next advances the promlexer to the next character.
|
||||
func (l *promlexer) next() byte {
|
||||
l.i++
|
||||
if l.i >= len(l.b) {
|
||||
l.err = io.EOF
|
||||
return byte(tEOF)
|
||||
}
|
||||
// Lex struggles with null bytes. If we are in a label value or help string, where
|
||||
// they are allowed, consume them here immediately.
|
||||
for l.b[l.i] == 0 && (l.state == sLValue || l.state == sMeta2 || l.state == sComment) {
|
||||
l.i++
|
||||
}
|
||||
return l.b[l.i]
|
||||
}
|
||||
|
||||
func (l *promlexer) Error(es string) {
|
||||
l.err = errors.New(es)
|
||||
}
|
||||
|
||||
// PromParser parses samples from a byte slice of samples in the official
|
||||
// Prometheus text exposition format.
|
||||
type PromParser struct {
|
||||
l *promlexer
|
||||
series []byte
|
||||
text []byte
|
||||
mtype MetricType
|
||||
val float64
|
||||
ts int64
|
||||
hasTS bool
|
||||
start int
|
||||
offsets []int
|
||||
}
|
||||
|
||||
// NewPromParser returns a new parser of the byte slice.
|
||||
func NewPromParser(b []byte) Parser {
|
||||
return &PromParser{l: &promlexer{b: append(b, '\n')}}
|
||||
}
|
||||
|
||||
// Series returns the bytes of the series, the timestamp if set, and the value
|
||||
// of the current sample.
|
||||
func (p *PromParser) Series() ([]byte, *int64, float64) {
|
||||
if p.hasTS {
|
||||
return p.series, &p.ts, p.val
|
||||
}
|
||||
return p.series, nil, p.val
|
||||
}
|
||||
|
||||
// Help returns the metric name and help text in the current entry.
|
||||
// Must only be called after Next returned a help entry.
|
||||
// The returned byte slices become invalid after the next call to Next.
|
||||
func (p *PromParser) Help() ([]byte, []byte) {
|
||||
m := p.l.b[p.offsets[0]:p.offsets[1]]
|
||||
|
||||
// Replacer causes allocations. Replace only when necessary.
|
||||
if strings.IndexByte(yoloString(p.text), byte('\\')) >= 0 {
|
||||
return m, []byte(helpReplacer.Replace(string(p.text)))
|
||||
}
|
||||
return m, p.text
|
||||
}
|
||||
|
||||
// Type returns the metric name and type in the current entry.
|
||||
// Must only be called after Next returned a type entry.
|
||||
// The returned byte slices become invalid after the next call to Next.
|
||||
func (p *PromParser) Type() ([]byte, MetricType) {
|
||||
return p.l.b[p.offsets[0]:p.offsets[1]], p.mtype
|
||||
}
|
||||
|
||||
// Unit returns the metric name and unit in the current entry.
|
||||
// Must only be called after Next returned a unit entry.
|
||||
// The returned byte slices become invalid after the next call to Next.
|
||||
func (p *PromParser) Unit() ([]byte, []byte) {
|
||||
// The Prometheus format does not have units.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Comment returns the text of the current comment.
|
||||
// Must only be called after Next returned a comment entry.
|
||||
// The returned byte slice becomes invalid after the next call to Next.
|
||||
func (p *PromParser) Comment() []byte {
|
||||
return p.text
|
||||
}
|
||||
|
||||
// Metric writes the labels of the current sample into the passed labels.
|
||||
// It returns the string from which the metric was parsed.
|
||||
func (p *PromParser) Metric(l *labels.Labels) string {
|
||||
// Allocate the full immutable string immediately, so we just
|
||||
// have to create references on it below.
|
||||
s := string(p.series)
|
||||
|
||||
*l = append(*l, labels.Label{
|
||||
Name: labels.MetricName,
|
||||
Value: s[:p.offsets[0]-p.start],
|
||||
})
|
||||
|
||||
for i := 1; i < len(p.offsets); i += 4 {
|
||||
a := p.offsets[i] - p.start
|
||||
b := p.offsets[i+1] - p.start
|
||||
c := p.offsets[i+2] - p.start
|
||||
d := p.offsets[i+3] - p.start
|
||||
|
||||
// Replacer causes allocations. Replace only when necessary.
|
||||
if strings.IndexByte(s[c:d], byte('\\')) >= 0 {
|
||||
*l = append(*l, labels.Label{Name: s[a:b], Value: lvalReplacer.Replace(s[c:d])})
|
||||
continue
|
||||
}
|
||||
*l = append(*l, labels.Label{Name: s[a:b], Value: s[c:d]})
|
||||
}
|
||||
|
||||
// Sort labels to maintain the sorted labels invariant.
|
||||
sort.Sort(*l)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Exemplar writes the exemplar of the current sample into the passed
|
||||
// exemplar. It returns if an exemplar exists.
|
||||
func (p *PromParser) Exemplar(e *exemplar.Exemplar) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// nextToken returns the next token from the promlexer. It skips over tabs
|
||||
// and spaces.
|
||||
func (p *PromParser) nextToken() token {
|
||||
for {
|
||||
if tok := p.l.Lex(); tok != tWhitespace {
|
||||
return tok
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseError(exp string, got token) error {
|
||||
return errors.Errorf("%s, got %q", exp, got)
|
||||
}
|
||||
|
||||
// Next advances the parser to the next sample. It returns false if no
|
||||
// more samples were read or an error occurred.
|
||||
func (p *PromParser) Next() (Entry, error) {
|
||||
var err error
|
||||
|
||||
p.start = p.l.i
|
||||
p.offsets = p.offsets[:0]
|
||||
|
||||
switch t := p.nextToken(); t {
|
||||
case tEOF:
|
||||
return EntryInvalid, io.EOF
|
||||
case tLinebreak:
|
||||
// Allow full blank lines.
|
||||
return p.Next()
|
||||
|
||||
case tHelp, tType:
|
||||
switch t := p.nextToken(); t {
|
||||
case tMName:
|
||||
p.offsets = append(p.offsets, p.l.start, p.l.i)
|
||||
default:
|
||||
return EntryInvalid, parseError("expected metric name after HELP", t)
|
||||
}
|
||||
switch t := p.nextToken(); t {
|
||||
case tText:
|
||||
if len(p.l.buf()) > 1 {
|
||||
p.text = p.l.buf()[1:]
|
||||
} else {
|
||||
p.text = []byte{}
|
||||
}
|
||||
default:
|
||||
return EntryInvalid, parseError("expected text in HELP", t)
|
||||
}
|
||||
switch t {
|
||||
case tType:
|
||||
switch s := yoloString(p.text); s {
|
||||
case "counter":
|
||||
p.mtype = MetricTypeCounter
|
||||
case "gauge":
|
||||
p.mtype = MetricTypeGauge
|
||||
case "histogram":
|
||||
p.mtype = MetricTypeHistogram
|
||||
case "summary":
|
||||
p.mtype = MetricTypeSummary
|
||||
case "untyped":
|
||||
p.mtype = MetricTypeUnknown
|
||||
default:
|
||||
return EntryInvalid, errors.Errorf("invalid metric type %q", s)
|
||||
}
|
||||
case tHelp:
|
||||
if !utf8.Valid(p.text) {
|
||||
return EntryInvalid, errors.Errorf("help text is not a valid utf8 string")
|
||||
}
|
||||
}
|
||||
if t := p.nextToken(); t != tLinebreak {
|
||||
return EntryInvalid, parseError("linebreak expected after metadata", t)
|
||||
}
|
||||
switch t {
|
||||
case tHelp:
|
||||
return EntryHelp, nil
|
||||
case tType:
|
||||
return EntryType, nil
|
||||
}
|
||||
case tComment:
|
||||
p.text = p.l.buf()
|
||||
if t := p.nextToken(); t != tLinebreak {
|
||||
return EntryInvalid, parseError("linebreak expected after comment", t)
|
||||
}
|
||||
return EntryComment, nil
|
||||
|
||||
case tMName:
|
||||
p.offsets = append(p.offsets, p.l.i)
|
||||
p.series = p.l.b[p.start:p.l.i]
|
||||
|
||||
t2 := p.nextToken()
|
||||
if t2 == tBraceOpen {
|
||||
if err := p.parseLVals(); err != nil {
|
||||
return EntryInvalid, err
|
||||
}
|
||||
p.series = p.l.b[p.start:p.l.i]
|
||||
t2 = p.nextToken()
|
||||
}
|
||||
if t2 != tValue {
|
||||
return EntryInvalid, parseError("expected value after metric", t)
|
||||
}
|
||||
if p.val, err = parseFloat(yoloString(p.l.buf())); err != nil {
|
||||
return EntryInvalid, err
|
||||
}
|
||||
// Ensure canonical NaN value.
|
||||
if math.IsNaN(p.val) {
|
||||
p.val = math.Float64frombits(value.NormalNaN)
|
||||
}
|
||||
p.hasTS = false
|
||||
switch p.nextToken() {
|
||||
case tLinebreak:
|
||||
break
|
||||
case tTimestamp:
|
||||
p.hasTS = true
|
||||
if p.ts, err = strconv.ParseInt(yoloString(p.l.buf()), 10, 64); err != nil {
|
||||
return EntryInvalid, err
|
||||
}
|
||||
if t2 := p.nextToken(); t2 != tLinebreak {
|
||||
return EntryInvalid, parseError("expected next entry after timestamp", t)
|
||||
}
|
||||
default:
|
||||
return EntryInvalid, parseError("expected timestamp or new record", t)
|
||||
}
|
||||
return EntrySeries, nil
|
||||
|
||||
default:
|
||||
err = errors.Errorf("%q is not a valid start token", t)
|
||||
}
|
||||
return EntryInvalid, err
|
||||
}
|
||||
|
||||
func (p *PromParser) parseLVals() error {
|
||||
t := p.nextToken()
|
||||
for {
|
||||
switch t {
|
||||
case tBraceClose:
|
||||
return nil
|
||||
case tLName:
|
||||
default:
|
||||
return parseError("expected label name", t)
|
||||
}
|
||||
p.offsets = append(p.offsets, p.l.start, p.l.i)
|
||||
|
||||
if t := p.nextToken(); t != tEqual {
|
||||
return parseError("expected equal", t)
|
||||
}
|
||||
if t := p.nextToken(); t != tLValue {
|
||||
return parseError("expected label value", t)
|
||||
}
|
||||
if !utf8.Valid(p.l.buf()) {
|
||||
return errors.Errorf("invalid UTF-8 label value")
|
||||
}
|
||||
|
||||
// The promlexer ensures the value string is quoted. Strip first
|
||||
// and last character.
|
||||
p.offsets = append(p.offsets, p.l.start+1, p.l.i-1)
|
||||
|
||||
// Free trailing commas are allowed.
|
||||
if t = p.nextToken(); t == tComma {
|
||||
t = p.nextToken()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var lvalReplacer = strings.NewReplacer(
|
||||
`\"`, "\"",
|
||||
`\\`, "\\",
|
||||
`\n`, "\n",
|
||||
)
|
||||
|
||||
var helpReplacer = strings.NewReplacer(
|
||||
`\\`, "\\",
|
||||
`\n`, "\n",
|
||||
)
|
||||
|
||||
func yoloString(b []byte) string {
|
||||
return *((*string)(unsafe.Pointer(&b)))
|
||||
}
|
||||
|
||||
func parseFloat(s string) (float64, error) {
|
||||
// Keep to pre-Go 1.13 float formats.
|
||||
if strings.ContainsAny(s, "pP_") {
|
||||
return 0, fmt.Errorf("unsupported character in float")
|
||||
}
|
||||
return strconv.ParseFloat(s, 64)
|
||||
}
|
||||
411
vendor/github.com/prometheus/prometheus/pkg/textparse/promtestdata.nometa.txt
generated
vendored
Normal file
411
vendor/github.com/prometheus/prometheus/pkg/textparse/promtestdata.nometa.txt
generated
vendored
Normal file
@@ -0,0 +1,411 @@
|
||||
go_gc_duration_seconds{quantile="0"} 4.9351e-05
|
||||
go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05
|
||||
go_gc_duration_seconds{quantile="0.5"} 8.3835e-05
|
||||
go_gc_duration_seconds{quantile="0.75"} 0.000106744
|
||||
go_gc_duration_seconds{quantile="1"} 0.002072195
|
||||
go_gc_duration_seconds_sum 0.012139815
|
||||
go_gc_duration_seconds_count 99
|
||||
go_goroutines 33
|
||||
go_memstats_alloc_bytes 1.7518624e+07
|
||||
go_memstats_alloc_bytes_total 8.3062296e+08
|
||||
go_memstats_buck_hash_sys_bytes 1.494637e+06
|
||||
go_memstats_frees_total 4.65658e+06
|
||||
go_memstats_gc_sys_bytes 1.107968e+06
|
||||
go_memstats_heap_alloc_bytes 1.7518624e+07
|
||||
go_memstats_heap_idle_bytes 6.668288e+06
|
||||
go_memstats_heap_inuse_bytes 1.8956288e+07
|
||||
go_memstats_heap_objects 72755
|
||||
go_memstats_heap_released_bytes_total 0
|
||||
go_memstats_heap_sys_bytes 2.5624576e+07
|
||||
go_memstats_last_gc_time_seconds 1.4843955586166437e+09
|
||||
go_memstats_lookups_total 2089
|
||||
go_memstats_mallocs_total 4.729335e+06
|
||||
go_memstats_mcache_inuse_bytes 9600
|
||||
go_memstats_mcache_sys_bytes 16384
|
||||
go_memstats_mspan_inuse_bytes 211520
|
||||
go_memstats_mspan_sys_bytes 245760
|
||||
go_memstats_next_gc_bytes 2.033527e+07
|
||||
go_memstats_other_sys_bytes 2.077323e+06
|
||||
go_memstats_stack_inuse_bytes 1.6384e+06
|
||||
go_memstats_stack_sys_bytes 1.6384e+06
|
||||
go_memstats_sys_bytes 3.2205048e+07
|
||||
http_request_duration_microseconds{handler="alerts",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="alerts",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="alerts",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="alerts"} 0
|
||||
http_request_duration_microseconds_count{handler="alerts"} 0
|
||||
http_request_duration_microseconds{handler="config",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="config",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="config",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="config"} 0
|
||||
http_request_duration_microseconds_count{handler="config"} 0
|
||||
http_request_duration_microseconds{handler="consoles",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="consoles",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="consoles",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="consoles"} 0
|
||||
http_request_duration_microseconds_count{handler="consoles"} 0
|
||||
http_request_duration_microseconds{handler="drop_series",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="drop_series",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="drop_series",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="drop_series"} 0
|
||||
http_request_duration_microseconds_count{handler="drop_series"} 0
|
||||
http_request_duration_microseconds{handler="federate",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="federate",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="federate",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="federate"} 0
|
||||
http_request_duration_microseconds_count{handler="federate"} 0
|
||||
http_request_duration_microseconds{handler="flags",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="flags",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="flags",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="flags"} 0
|
||||
http_request_duration_microseconds_count{handler="flags"} 0
|
||||
http_request_duration_microseconds{handler="graph",quantile="0.5"} 771.655
|
||||
http_request_duration_microseconds{handler="graph",quantile="0.9"} 1761.823
|
||||
http_request_duration_microseconds{handler="graph",quantile="0.99"} 1761.823
|
||||
http_request_duration_microseconds_sum{handler="graph"} 5803.93
|
||||
http_request_duration_microseconds_count{handler="graph"} 3
|
||||
http_request_duration_microseconds{handler="heap",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="heap",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="heap",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="heap"} 0
|
||||
http_request_duration_microseconds_count{handler="heap"} 0
|
||||
http_request_duration_microseconds{handler="label_values",quantile="0.5"} 325.401
|
||||
http_request_duration_microseconds{handler="label_values",quantile="0.9"} 414.708
|
||||
http_request_duration_microseconds{handler="label_values",quantile="0.99"} 414.708
|
||||
http_request_duration_microseconds_sum{handler="label_values"} 3995.574
|
||||
http_request_duration_microseconds_count{handler="label_values"} 3
|
||||
http_request_duration_microseconds{handler="options",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="options",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="options",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="options"} 0
|
||||
http_request_duration_microseconds_count{handler="options"} 0
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1351.859
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1714.035
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 2833.523
|
||||
http_request_duration_microseconds_sum{handler="prometheus"} 661851.54
|
||||
http_request_duration_microseconds_count{handler="prometheus"} 462
|
||||
http_request_duration_microseconds{handler="query",quantile="0.5"} 3885.448
|
||||
http_request_duration_microseconds{handler="query",quantile="0.9"} 4390.558
|
||||
http_request_duration_microseconds{handler="query",quantile="0.99"} 4390.558
|
||||
http_request_duration_microseconds_sum{handler="query"} 26074.11
|
||||
http_request_duration_microseconds_count{handler="query"} 6
|
||||
http_request_duration_microseconds{handler="query_range",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="query_range",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="query_range",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="query_range"} 0
|
||||
http_request_duration_microseconds_count{handler="query_range"} 0
|
||||
http_request_duration_microseconds{handler="rules",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="rules",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="rules",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="rules"} 0
|
||||
http_request_duration_microseconds_count{handler="rules"} 0
|
||||
http_request_duration_microseconds{handler="series",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="series",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="series",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="series"} 0
|
||||
http_request_duration_microseconds_count{handler="series"} 0
|
||||
http_request_duration_microseconds{handler="static",quantile="0.5"} 212.311
|
||||
http_request_duration_microseconds{handler="static",quantile="0.9"} 265.174
|
||||
http_request_duration_microseconds{handler="static",quantile="0.99"} 265.174
|
||||
http_request_duration_microseconds_sum{handler="static"} 6458.621
|
||||
http_request_duration_microseconds_count{handler="static"} 3
|
||||
http_request_duration_microseconds{handler="status",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="status",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="status",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="status"} 0
|
||||
http_request_duration_microseconds_count{handler="status"} 0
|
||||
http_request_duration_microseconds{handler="targets",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="targets",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="targets",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="targets"} 0
|
||||
http_request_duration_microseconds_count{handler="targets"} 0
|
||||
http_request_duration_microseconds{handler="version",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="version",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="version",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="version"} 0
|
||||
http_request_duration_microseconds_count{handler="version"} 0
|
||||
http_request_size_bytes{handler="alerts",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="alerts",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="alerts",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="alerts"} 0
|
||||
http_request_size_bytes_count{handler="alerts"} 0
|
||||
http_request_size_bytes{handler="config",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="config",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="config",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="config"} 0
|
||||
http_request_size_bytes_count{handler="config"} 0
|
||||
http_request_size_bytes{handler="consoles",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="consoles",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="consoles",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="consoles"} 0
|
||||
http_request_size_bytes_count{handler="consoles"} 0
|
||||
http_request_size_bytes{handler="drop_series",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="drop_series",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="drop_series",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="drop_series"} 0
|
||||
http_request_size_bytes_count{handler="drop_series"} 0
|
||||
http_request_size_bytes{handler="federate",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="federate",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="federate",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="federate"} 0
|
||||
http_request_size_bytes_count{handler="federate"} 0
|
||||
http_request_size_bytes{handler="flags",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="flags",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="flags",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="flags"} 0
|
||||
http_request_size_bytes_count{handler="flags"} 0
|
||||
http_request_size_bytes{handler="graph",quantile="0.5"} 367
|
||||
http_request_size_bytes{handler="graph",quantile="0.9"} 389
|
||||
http_request_size_bytes{handler="graph",quantile="0.99"} 389
|
||||
http_request_size_bytes_sum{handler="graph"} 1145
|
||||
http_request_size_bytes_count{handler="graph"} 3
|
||||
http_request_size_bytes{handler="heap",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="heap",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="heap",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="heap"} 0
|
||||
http_request_size_bytes_count{handler="heap"} 0
|
||||
http_request_size_bytes{handler="label_values",quantile="0.5"} 416
|
||||
http_request_size_bytes{handler="label_values",quantile="0.9"} 416
|
||||
http_request_size_bytes{handler="label_values",quantile="0.99"} 416
|
||||
http_request_size_bytes_sum{handler="label_values"} 1248
|
||||
http_request_size_bytes_count{handler="label_values"} 3
|
||||
http_request_size_bytes{handler="options",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="options",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="options",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="options"} 0
|
||||
http_request_size_bytes_count{handler="options"} 0
|
||||
http_request_size_bytes{handler="prometheus",quantile="0.5"} 238
|
||||
http_request_size_bytes{handler="prometheus",quantile="0.9"} 238
|
||||
http_request_size_bytes{handler="prometheus",quantile="0.99"} 238
|
||||
http_request_size_bytes_sum{handler="prometheus"} 109956
|
||||
http_request_size_bytes_count{handler="prometheus"} 462
|
||||
http_request_size_bytes{handler="query",quantile="0.5"} 531
|
||||
http_request_size_bytes{handler="query",quantile="0.9"} 531
|
||||
http_request_size_bytes{handler="query",quantile="0.99"} 531
|
||||
http_request_size_bytes_sum{handler="query"} 3186
|
||||
http_request_size_bytes_count{handler="query"} 6
|
||||
http_request_size_bytes{handler="query_range",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="query_range",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="query_range",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="query_range"} 0
|
||||
http_request_size_bytes_count{handler="query_range"} 0
|
||||
http_request_size_bytes{handler="rules",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="rules",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="rules",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="rules"} 0
|
||||
http_request_size_bytes_count{handler="rules"} 0
|
||||
http_request_size_bytes{handler="series",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="series",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="series",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="series"} 0
|
||||
http_request_size_bytes_count{handler="series"} 0
|
||||
http_request_size_bytes{handler="static",quantile="0.5"} 379
|
||||
http_request_size_bytes{handler="static",quantile="0.9"} 379
|
||||
http_request_size_bytes{handler="static",quantile="0.99"} 379
|
||||
http_request_size_bytes_sum{handler="static"} 1137
|
||||
http_request_size_bytes_count{handler="static"} 3
|
||||
http_request_size_bytes{handler="status",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="status",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="status",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="status"} 0
|
||||
http_request_size_bytes_count{handler="status"} 0
|
||||
http_request_size_bytes{handler="targets",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="targets",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="targets",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="targets"} 0
|
||||
http_request_size_bytes_count{handler="targets"} 0
|
||||
http_request_size_bytes{handler="version",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="version",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="version",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="version"} 0
|
||||
http_request_size_bytes_count{handler="version"} 0
|
||||
http_requests_total{code="200",handler="graph",method="get"} 3
|
||||
http_requests_total{code="200",handler="label_values",method="get"} 3
|
||||
http_requests_total{code="200",handler="prometheus",method="get"} 462
|
||||
http_requests_total{code="200",handler="query",method="get"} 6
|
||||
http_requests_total{code="200",handler="static",method="get"} 3
|
||||
http_response_size_bytes{handler="alerts",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="alerts",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="alerts",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="alerts"} 0
|
||||
http_response_size_bytes_count{handler="alerts"} 0
|
||||
http_response_size_bytes{handler="config",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="config",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="config",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="config"} 0
|
||||
http_response_size_bytes_count{handler="config"} 0
|
||||
http_response_size_bytes{handler="consoles",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="consoles",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="consoles",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="consoles"} 0
|
||||
http_response_size_bytes_count{handler="consoles"} 0
|
||||
http_response_size_bytes{handler="drop_series",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="drop_series",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="drop_series",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="drop_series"} 0
|
||||
http_response_size_bytes_count{handler="drop_series"} 0
|
||||
http_response_size_bytes{handler="federate",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="federate",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="federate",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="federate"} 0
|
||||
http_response_size_bytes_count{handler="federate"} 0
|
||||
http_response_size_bytes{handler="flags",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="flags",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="flags",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="flags"} 0
|
||||
http_response_size_bytes_count{handler="flags"} 0
|
||||
http_response_size_bytes{handler="graph",quantile="0.5"} 3619
|
||||
http_response_size_bytes{handler="graph",quantile="0.9"} 3619
|
||||
http_response_size_bytes{handler="graph",quantile="0.99"} 3619
|
||||
http_response_size_bytes_sum{handler="graph"} 10857
|
||||
http_response_size_bytes_count{handler="graph"} 3
|
||||
http_response_size_bytes{handler="heap",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="heap",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="heap",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="heap"} 0
|
||||
http_response_size_bytes_count{handler="heap"} 0
|
||||
http_response_size_bytes{handler="label_values",quantile="0.5"} 642
|
||||
http_response_size_bytes{handler="label_values",quantile="0.9"} 642
|
||||
http_response_size_bytes{handler="label_values",quantile="0.99"} 642
|
||||
http_response_size_bytes_sum{handler="label_values"} 1926
|
||||
http_response_size_bytes_count{handler="label_values"} 3
|
||||
http_response_size_bytes{handler="options",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="options",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="options",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="options"} 0
|
||||
http_response_size_bytes_count{handler="options"} 0
|
||||
http_response_size_bytes{handler="prometheus",quantile="0.5"} 3033
|
||||
http_response_size_bytes{handler="prometheus",quantile="0.9"} 3123
|
||||
http_response_size_bytes{handler="prometheus",quantile="0.99"} 3128
|
||||
http_response_size_bytes_sum{handler="prometheus"} 1.374097e+06
|
||||
http_response_size_bytes_count{handler="prometheus"} 462
|
||||
http_response_size_bytes{handler="query",quantile="0.5"} 776
|
||||
http_response_size_bytes{handler="query",quantile="0.9"} 781
|
||||
http_response_size_bytes{handler="query",quantile="0.99"} 781
|
||||
http_response_size_bytes_sum{handler="query"} 4656
|
||||
http_response_size_bytes_count{handler="query"} 6
|
||||
http_response_size_bytes{handler="query_range",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="query_range",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="query_range",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="query_range"} 0
|
||||
http_response_size_bytes_count{handler="query_range"} 0
|
||||
http_response_size_bytes{handler="rules",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="rules",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="rules",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="rules"} 0
|
||||
http_response_size_bytes_count{handler="rules"} 0
|
||||
http_response_size_bytes{handler="series",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="series",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="series",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="series"} 0
|
||||
http_response_size_bytes_count{handler="series"} 0
|
||||
http_response_size_bytes{handler="static",quantile="0.5"} 6316
|
||||
http_response_size_bytes{handler="static",quantile="0.9"} 6316
|
||||
http_response_size_bytes{handler="static",quantile="0.99"} 6316
|
||||
http_response_size_bytes_sum{handler="static"} 18948
|
||||
http_response_size_bytes_count{handler="static"} 3
|
||||
http_response_size_bytes{handler="status",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="status",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="status",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="status"} 0
|
||||
http_response_size_bytes_count{handler="status"} 0
|
||||
http_response_size_bytes{handler="targets",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="targets",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="targets",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="targets"} 0
|
||||
http_response_size_bytes_count{handler="targets"} 0
|
||||
http_response_size_bytes{handler="version",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="version",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="version",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="version"} 0
|
||||
http_response_size_bytes_count{handler="version"} 0
|
||||
prometheus_build_info{branch="",goversion="go1.7.3",revision="",version=""} 1
|
||||
prometheus_config_last_reload_success_timestamp_seconds 1.484395547e+09
|
||||
prometheus_config_last_reload_successful 1
|
||||
prometheus_evaluator_duration_seconds{quantile="0.01"} 1.7890000000000002e-06
|
||||
prometheus_evaluator_duration_seconds{quantile="0.05"} 1.7890000000000002e-06
|
||||
prometheus_evaluator_duration_seconds{quantile="0.5"} 1.7890000000000002e-06
|
||||
prometheus_evaluator_duration_seconds{quantile="0.9"} 1.7890000000000002e-06
|
||||
prometheus_evaluator_duration_seconds{quantile="0.99"} 1.7890000000000002e-06
|
||||
prometheus_evaluator_duration_seconds_sum 1.7890000000000002e-06
|
||||
prometheus_evaluator_duration_seconds_count 1
|
||||
prometheus_evaluator_iterations_skipped_total 0
|
||||
prometheus_notifications_dropped_total 0
|
||||
prometheus_notifications_queue_capacity 10000
|
||||
prometheus_notifications_queue_length 0
|
||||
prometheus_rule_evaluation_failures_total{rule_type="alerting"} 0
|
||||
prometheus_rule_evaluation_failures_total{rule_type="recording"} 0
|
||||
prometheus_sd_azure_refresh_duration_seconds{quantile="0.5"} NaN
|
||||
prometheus_sd_azure_refresh_duration_seconds{quantile="0.9"} NaN
|
||||
prometheus_sd_azure_refresh_duration_seconds{quantile="0.99"} NaN
|
||||
prometheus_sd_azure_refresh_duration_seconds_sum 0
|
||||
prometheus_sd_azure_refresh_duration_seconds_count 0
|
||||
prometheus_sd_azure_refresh_failures_total 0
|
||||
prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN
|
||||
prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN
|
||||
prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN
|
||||
prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0
|
||||
prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0
|
||||
prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN
|
||||
prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN
|
||||
prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN
|
||||
prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0
|
||||
prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0
|
||||
prometheus_sd_consul_rpc_failures_total 0
|
||||
prometheus_sd_dns_lookup_failures_total 0
|
||||
prometheus_sd_dns_lookups_total 0
|
||||
prometheus_sd_ec2_refresh_duration_seconds{quantile="0.5"} NaN
|
||||
prometheus_sd_ec2_refresh_duration_seconds{quantile="0.9"} NaN
|
||||
prometheus_sd_ec2_refresh_duration_seconds{quantile="0.99"} NaN
|
||||
prometheus_sd_ec2_refresh_duration_seconds_sum 0
|
||||
prometheus_sd_ec2_refresh_duration_seconds_count 0
|
||||
prometheus_sd_ec2_refresh_failures_total 0
|
||||
prometheus_sd_file_read_errors_total 0
|
||||
prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN
|
||||
prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN
|
||||
prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN
|
||||
prometheus_sd_file_scan_duration_seconds_sum 0
|
||||
prometheus_sd_file_scan_duration_seconds_count 0
|
||||
prometheus_sd_gce_refresh_duration{quantile="0.5"} NaN
|
||||
prometheus_sd_gce_refresh_duration{quantile="0.9"} NaN
|
||||
prometheus_sd_gce_refresh_duration{quantile="0.99"} NaN
|
||||
prometheus_sd_gce_refresh_duration_sum 0
|
||||
prometheus_sd_gce_refresh_duration_count 0
|
||||
prometheus_sd_gce_refresh_failures_total 0
|
||||
prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="add",role="node"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="add",role="service"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="update",role="node"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="update",role="service"} 0
|
||||
prometheus_sd_marathon_refresh_duration_seconds{quantile="0.5"} NaN
|
||||
prometheus_sd_marathon_refresh_duration_seconds{quantile="0.9"} NaN
|
||||
prometheus_sd_marathon_refresh_duration_seconds{quantile="0.99"} NaN
|
||||
prometheus_sd_marathon_refresh_duration_seconds_sum 0
|
||||
prometheus_sd_marathon_refresh_duration_seconds_count 0
|
||||
prometheus_sd_marathon_refresh_failures_total 0
|
||||
prometheus_target_interval_length_seconds{interval="50ms",quantile="0.01"} 0.046182157
|
||||
prometheus_target_interval_length_seconds{interval="50ms",quantile="0.05"} 0.047306979000000006
|
||||
prometheus_target_interval_length_seconds{interval="50ms",quantile="0.5"} 0.050381782
|
||||
prometheus_target_interval_length_seconds{interval="50ms",quantile="0.9"} 0.052614556
|
||||
prometheus_target_interval_length_seconds{interval="50ms",quantile="0.99"} 0.054404386000000006
|
||||
prometheus_target_interval_length_seconds_sum{interval="50ms"} 34.512091221999995
|
||||
prometheus_target_interval_length_seconds_count{interval="50ms"} 685
|
||||
prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1
|
||||
prometheus_target_skipped_scrapes_total 0
|
||||
prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 0.00020043300000000002
|
||||
prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 0.00020043300000000002
|
||||
prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 0.00020043300000000002
|
||||
prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.00020043300000000002
|
||||
prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.00020043300000000002
|
||||
prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.00020043300000000002
|
||||
prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1
|
||||
prometheus_treecache_watcher_goroutines 0
|
||||
prometheus_treecache_zookeeper_failures_total 0
|
||||
# EOF
|
||||
529
vendor/github.com/prometheus/prometheus/pkg/textparse/promtestdata.txt
generated
vendored
Normal file
529
vendor/github.com/prometheus/prometheus/pkg/textparse/promtestdata.txt
generated
vendored
Normal file
@@ -0,0 +1,529 @@
|
||||
# HELP go_gc_duration_seconds A summary of the GC invocation durations.
|
||||
# TYPE go_gc_duration_seconds summary
|
||||
go_gc_duration_seconds{quantile="0"} 4.9351e-05
|
||||
go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05
|
||||
go_gc_duration_seconds{quantile="0.5"} 8.3835e-05
|
||||
go_gc_duration_seconds{quantile="0.75"} 0.000106744
|
||||
go_gc_duration_seconds{quantile="1"} 0.002072195
|
||||
go_gc_duration_seconds_sum 0.012139815
|
||||
go_gc_duration_seconds_count 99
|
||||
# HELP go_goroutines Number of goroutines that currently exist.
|
||||
# TYPE go_goroutines gauge
|
||||
go_goroutines 33
|
||||
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
|
||||
# TYPE go_memstats_alloc_bytes gauge
|
||||
go_memstats_alloc_bytes 1.7518624e+07
|
||||
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
|
||||
# TYPE go_memstats_alloc_bytes_total counter
|
||||
go_memstats_alloc_bytes_total 8.3062296e+08
|
||||
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
|
||||
# TYPE go_memstats_buck_hash_sys_bytes gauge
|
||||
go_memstats_buck_hash_sys_bytes 1.494637e+06
|
||||
# HELP go_memstats_frees_total Total number of frees.
|
||||
# TYPE go_memstats_frees_total counter
|
||||
go_memstats_frees_total 4.65658e+06
|
||||
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
|
||||
# TYPE go_memstats_gc_sys_bytes gauge
|
||||
go_memstats_gc_sys_bytes 1.107968e+06
|
||||
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
|
||||
# TYPE go_memstats_heap_alloc_bytes gauge
|
||||
go_memstats_heap_alloc_bytes 1.7518624e+07
|
||||
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
|
||||
# TYPE go_memstats_heap_idle_bytes gauge
|
||||
go_memstats_heap_idle_bytes 6.668288e+06
|
||||
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
|
||||
# TYPE go_memstats_heap_inuse_bytes gauge
|
||||
go_memstats_heap_inuse_bytes 1.8956288e+07
|
||||
# HELP go_memstats_heap_objects Number of allocated objects.
|
||||
# TYPE go_memstats_heap_objects gauge
|
||||
go_memstats_heap_objects 72755
|
||||
# HELP go_memstats_heap_released_bytes_total Total number of heap bytes released to OS.
|
||||
# TYPE go_memstats_heap_released_bytes_total counter
|
||||
go_memstats_heap_released_bytes_total 0
|
||||
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
|
||||
# TYPE go_memstats_heap_sys_bytes gauge
|
||||
go_memstats_heap_sys_bytes 2.5624576e+07
|
||||
# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
|
||||
# TYPE go_memstats_last_gc_time_seconds gauge
|
||||
go_memstats_last_gc_time_seconds 1.4843955586166437e+09
|
||||
# HELP go_memstats_lookups_total Total number of pointer lookups.
|
||||
# TYPE go_memstats_lookups_total counter
|
||||
go_memstats_lookups_total 2089
|
||||
# HELP go_memstats_mallocs_total Total number of mallocs.
|
||||
# TYPE go_memstats_mallocs_total counter
|
||||
go_memstats_mallocs_total 4.729335e+06
|
||||
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
|
||||
# TYPE go_memstats_mcache_inuse_bytes gauge
|
||||
go_memstats_mcache_inuse_bytes 9600
|
||||
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
|
||||
# TYPE go_memstats_mcache_sys_bytes gauge
|
||||
go_memstats_mcache_sys_bytes 16384
|
||||
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
|
||||
# TYPE go_memstats_mspan_inuse_bytes gauge
|
||||
go_memstats_mspan_inuse_bytes 211520
|
||||
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
|
||||
# TYPE go_memstats_mspan_sys_bytes gauge
|
||||
go_memstats_mspan_sys_bytes 245760
|
||||
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
|
||||
# TYPE go_memstats_next_gc_bytes gauge
|
||||
go_memstats_next_gc_bytes 2.033527e+07
|
||||
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
|
||||
# TYPE go_memstats_other_sys_bytes gauge
|
||||
go_memstats_other_sys_bytes 2.077323e+06
|
||||
# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
|
||||
# TYPE go_memstats_stack_inuse_bytes gauge
|
||||
go_memstats_stack_inuse_bytes 1.6384e+06
|
||||
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
|
||||
# TYPE go_memstats_stack_sys_bytes gauge
|
||||
go_memstats_stack_sys_bytes 1.6384e+06
|
||||
# HELP go_memstats_sys_bytes Number of bytes obtained by system. Sum of all system allocations.
|
||||
# TYPE go_memstats_sys_bytes gauge
|
||||
go_memstats_sys_bytes 3.2205048e+07
|
||||
# HELP http_request_duration_microseconds The HTTP request latencies in microseconds.
|
||||
# TYPE http_request_duration_microseconds summary
|
||||
http_request_duration_microseconds{handler="alerts",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="alerts",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="alerts",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="alerts"} 0
|
||||
http_request_duration_microseconds_count{handler="alerts"} 0
|
||||
http_request_duration_microseconds{handler="config",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="config",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="config",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="config"} 0
|
||||
http_request_duration_microseconds_count{handler="config"} 0
|
||||
http_request_duration_microseconds{handler="consoles",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="consoles",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="consoles",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="consoles"} 0
|
||||
http_request_duration_microseconds_count{handler="consoles"} 0
|
||||
http_request_duration_microseconds{handler="drop_series",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="drop_series",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="drop_series",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="drop_series"} 0
|
||||
http_request_duration_microseconds_count{handler="drop_series"} 0
|
||||
http_request_duration_microseconds{handler="federate",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="federate",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="federate",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="federate"} 0
|
||||
http_request_duration_microseconds_count{handler="federate"} 0
|
||||
http_request_duration_microseconds{handler="flags",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="flags",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="flags",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="flags"} 0
|
||||
http_request_duration_microseconds_count{handler="flags"} 0
|
||||
http_request_duration_microseconds{handler="graph",quantile="0.5"} 771.655
|
||||
http_request_duration_microseconds{handler="graph",quantile="0.9"} 1761.823
|
||||
http_request_duration_microseconds{handler="graph",quantile="0.99"} 1761.823
|
||||
http_request_duration_microseconds_sum{handler="graph"} 5803.93
|
||||
http_request_duration_microseconds_count{handler="graph"} 3
|
||||
http_request_duration_microseconds{handler="heap",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="heap",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="heap",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="heap"} 0
|
||||
http_request_duration_microseconds_count{handler="heap"} 0
|
||||
http_request_duration_microseconds{handler="label_values",quantile="0.5"} 325.401
|
||||
http_request_duration_microseconds{handler="label_values",quantile="0.9"} 414.708
|
||||
http_request_duration_microseconds{handler="label_values",quantile="0.99"} 414.708
|
||||
http_request_duration_microseconds_sum{handler="label_values"} 3995.574
|
||||
http_request_duration_microseconds_count{handler="label_values"} 3
|
||||
http_request_duration_microseconds{handler="options",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="options",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="options",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="options"} 0
|
||||
http_request_duration_microseconds_count{handler="options"} 0
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1351.859
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1714.035
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 2833.523
|
||||
http_request_duration_microseconds_sum{handler="prometheus"} 661851.54
|
||||
http_request_duration_microseconds_count{handler="prometheus"} 462
|
||||
http_request_duration_microseconds{handler="query",quantile="0.5"} 3885.448
|
||||
http_request_duration_microseconds{handler="query",quantile="0.9"} 4390.558
|
||||
http_request_duration_microseconds{handler="query",quantile="0.99"} 4390.558
|
||||
http_request_duration_microseconds_sum{handler="query"} 26074.11
|
||||
http_request_duration_microseconds_count{handler="query"} 6
|
||||
http_request_duration_microseconds{handler="query_range",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="query_range",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="query_range",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="query_range"} 0
|
||||
http_request_duration_microseconds_count{handler="query_range"} 0
|
||||
http_request_duration_microseconds{handler="rules",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="rules",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="rules",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="rules"} 0
|
||||
http_request_duration_microseconds_count{handler="rules"} 0
|
||||
http_request_duration_microseconds{handler="series",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="series",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="series",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="series"} 0
|
||||
http_request_duration_microseconds_count{handler="series"} 0
|
||||
http_request_duration_microseconds{handler="static",quantile="0.5"} 212.311
|
||||
http_request_duration_microseconds{handler="static",quantile="0.9"} 265.174
|
||||
http_request_duration_microseconds{handler="static",quantile="0.99"} 265.174
|
||||
http_request_duration_microseconds_sum{handler="static"} 6458.621
|
||||
http_request_duration_microseconds_count{handler="static"} 3
|
||||
http_request_duration_microseconds{handler="status",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="status",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="status",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="status"} 0
|
||||
http_request_duration_microseconds_count{handler="status"} 0
|
||||
http_request_duration_microseconds{handler="targets",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="targets",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="targets",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="targets"} 0
|
||||
http_request_duration_microseconds_count{handler="targets"} 0
|
||||
http_request_duration_microseconds{handler="version",quantile="0.5"} NaN
|
||||
http_request_duration_microseconds{handler="version",quantile="0.9"} NaN
|
||||
http_request_duration_microseconds{handler="version",quantile="0.99"} NaN
|
||||
http_request_duration_microseconds_sum{handler="version"} 0
|
||||
http_request_duration_microseconds_count{handler="version"} 0
|
||||
# HELP http_request_size_bytes The HTTP request sizes in bytes.
|
||||
# TYPE http_request_size_bytes summary
|
||||
http_request_size_bytes{handler="alerts",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="alerts",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="alerts",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="alerts"} 0
|
||||
http_request_size_bytes_count{handler="alerts"} 0
|
||||
http_request_size_bytes{handler="config",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="config",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="config",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="config"} 0
|
||||
http_request_size_bytes_count{handler="config"} 0
|
||||
http_request_size_bytes{handler="consoles",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="consoles",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="consoles",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="consoles"} 0
|
||||
http_request_size_bytes_count{handler="consoles"} 0
|
||||
http_request_size_bytes{handler="drop_series",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="drop_series",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="drop_series",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="drop_series"} 0
|
||||
http_request_size_bytes_count{handler="drop_series"} 0
|
||||
http_request_size_bytes{handler="federate",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="federate",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="federate",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="federate"} 0
|
||||
http_request_size_bytes_count{handler="federate"} 0
|
||||
http_request_size_bytes{handler="flags",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="flags",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="flags",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="flags"} 0
|
||||
http_request_size_bytes_count{handler="flags"} 0
|
||||
http_request_size_bytes{handler="graph",quantile="0.5"} 367
|
||||
http_request_size_bytes{handler="graph",quantile="0.9"} 389
|
||||
http_request_size_bytes{handler="graph",quantile="0.99"} 389
|
||||
http_request_size_bytes_sum{handler="graph"} 1145
|
||||
http_request_size_bytes_count{handler="graph"} 3
|
||||
http_request_size_bytes{handler="heap",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="heap",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="heap",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="heap"} 0
|
||||
http_request_size_bytes_count{handler="heap"} 0
|
||||
http_request_size_bytes{handler="label_values",quantile="0.5"} 416
|
||||
http_request_size_bytes{handler="label_values",quantile="0.9"} 416
|
||||
http_request_size_bytes{handler="label_values",quantile="0.99"} 416
|
||||
http_request_size_bytes_sum{handler="label_values"} 1248
|
||||
http_request_size_bytes_count{handler="label_values"} 3
|
||||
http_request_size_bytes{handler="options",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="options",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="options",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="options"} 0
|
||||
http_request_size_bytes_count{handler="options"} 0
|
||||
http_request_size_bytes{handler="prometheus",quantile="0.5"} 238
|
||||
http_request_size_bytes{handler="prometheus",quantile="0.9"} 238
|
||||
http_request_size_bytes{handler="prometheus",quantile="0.99"} 238
|
||||
http_request_size_bytes_sum{handler="prometheus"} 109956
|
||||
http_request_size_bytes_count{handler="prometheus"} 462
|
||||
http_request_size_bytes{handler="query",quantile="0.5"} 531
|
||||
http_request_size_bytes{handler="query",quantile="0.9"} 531
|
||||
http_request_size_bytes{handler="query",quantile="0.99"} 531
|
||||
http_request_size_bytes_sum{handler="query"} 3186
|
||||
http_request_size_bytes_count{handler="query"} 6
|
||||
http_request_size_bytes{handler="query_range",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="query_range",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="query_range",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="query_range"} 0
|
||||
http_request_size_bytes_count{handler="query_range"} 0
|
||||
http_request_size_bytes{handler="rules",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="rules",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="rules",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="rules"} 0
|
||||
http_request_size_bytes_count{handler="rules"} 0
|
||||
http_request_size_bytes{handler="series",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="series",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="series",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="series"} 0
|
||||
http_request_size_bytes_count{handler="series"} 0
|
||||
http_request_size_bytes{handler="static",quantile="0.5"} 379
|
||||
http_request_size_bytes{handler="static",quantile="0.9"} 379
|
||||
http_request_size_bytes{handler="static",quantile="0.99"} 379
|
||||
http_request_size_bytes_sum{handler="static"} 1137
|
||||
http_request_size_bytes_count{handler="static"} 3
|
||||
http_request_size_bytes{handler="status",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="status",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="status",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="status"} 0
|
||||
http_request_size_bytes_count{handler="status"} 0
|
||||
http_request_size_bytes{handler="targets",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="targets",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="targets",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="targets"} 0
|
||||
http_request_size_bytes_count{handler="targets"} 0
|
||||
http_request_size_bytes{handler="version",quantile="0.5"} NaN
|
||||
http_request_size_bytes{handler="version",quantile="0.9"} NaN
|
||||
http_request_size_bytes{handler="version",quantile="0.99"} NaN
|
||||
http_request_size_bytes_sum{handler="version"} 0
|
||||
http_request_size_bytes_count{handler="version"} 0
|
||||
# HELP http_requests_total Total number of HTTP requests made.
|
||||
# TYPE http_requests_total counter
|
||||
http_requests_total{code="200",handler="graph",method="get"} 3
|
||||
http_requests_total{code="200",handler="label_values",method="get"} 3
|
||||
http_requests_total{code="200",handler="prometheus",method="get"} 462
|
||||
http_requests_total{code="200",handler="query",method="get"} 6
|
||||
http_requests_total{code="200",handler="static",method="get"} 3
|
||||
# HELP http_response_size_bytes The HTTP response sizes in bytes.
|
||||
# TYPE http_response_size_bytes summary
|
||||
http_response_size_bytes{handler="alerts",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="alerts",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="alerts",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="alerts"} 0
|
||||
http_response_size_bytes_count{handler="alerts"} 0
|
||||
http_response_size_bytes{handler="config",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="config",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="config",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="config"} 0
|
||||
http_response_size_bytes_count{handler="config"} 0
|
||||
http_response_size_bytes{handler="consoles",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="consoles",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="consoles",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="consoles"} 0
|
||||
http_response_size_bytes_count{handler="consoles"} 0
|
||||
http_response_size_bytes{handler="drop_series",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="drop_series",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="drop_series",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="drop_series"} 0
|
||||
http_response_size_bytes_count{handler="drop_series"} 0
|
||||
http_response_size_bytes{handler="federate",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="federate",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="federate",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="federate"} 0
|
||||
http_response_size_bytes_count{handler="federate"} 0
|
||||
http_response_size_bytes{handler="flags",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="flags",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="flags",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="flags"} 0
|
||||
http_response_size_bytes_count{handler="flags"} 0
|
||||
http_response_size_bytes{handler="graph",quantile="0.5"} 3619
|
||||
http_response_size_bytes{handler="graph",quantile="0.9"} 3619
|
||||
http_response_size_bytes{handler="graph",quantile="0.99"} 3619
|
||||
http_response_size_bytes_sum{handler="graph"} 10857
|
||||
http_response_size_bytes_count{handler="graph"} 3
|
||||
http_response_size_bytes{handler="heap",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="heap",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="heap",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="heap"} 0
|
||||
http_response_size_bytes_count{handler="heap"} 0
|
||||
http_response_size_bytes{handler="label_values",quantile="0.5"} 642
|
||||
http_response_size_bytes{handler="label_values",quantile="0.9"} 642
|
||||
http_response_size_bytes{handler="label_values",quantile="0.99"} 642
|
||||
http_response_size_bytes_sum{handler="label_values"} 1926
|
||||
http_response_size_bytes_count{handler="label_values"} 3
|
||||
http_response_size_bytes{handler="options",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="options",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="options",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="options"} 0
|
||||
http_response_size_bytes_count{handler="options"} 0
|
||||
http_response_size_bytes{handler="prometheus",quantile="0.5"} 3033
|
||||
http_response_size_bytes{handler="prometheus",quantile="0.9"} 3123
|
||||
http_response_size_bytes{handler="prometheus",quantile="0.99"} 3128
|
||||
http_response_size_bytes_sum{handler="prometheus"} 1.374097e+06
|
||||
http_response_size_bytes_count{handler="prometheus"} 462
|
||||
http_response_size_bytes{handler="query",quantile="0.5"} 776
|
||||
http_response_size_bytes{handler="query",quantile="0.9"} 781
|
||||
http_response_size_bytes{handler="query",quantile="0.99"} 781
|
||||
http_response_size_bytes_sum{handler="query"} 4656
|
||||
http_response_size_bytes_count{handler="query"} 6
|
||||
http_response_size_bytes{handler="query_range",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="query_range",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="query_range",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="query_range"} 0
|
||||
http_response_size_bytes_count{handler="query_range"} 0
|
||||
http_response_size_bytes{handler="rules",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="rules",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="rules",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="rules"} 0
|
||||
http_response_size_bytes_count{handler="rules"} 0
|
||||
http_response_size_bytes{handler="series",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="series",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="series",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="series"} 0
|
||||
http_response_size_bytes_count{handler="series"} 0
|
||||
http_response_size_bytes{handler="static",quantile="0.5"} 6316
|
||||
http_response_size_bytes{handler="static",quantile="0.9"} 6316
|
||||
http_response_size_bytes{handler="static",quantile="0.99"} 6316
|
||||
http_response_size_bytes_sum{handler="static"} 18948
|
||||
http_response_size_bytes_count{handler="static"} 3
|
||||
http_response_size_bytes{handler="status",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="status",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="status",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="status"} 0
|
||||
http_response_size_bytes_count{handler="status"} 0
|
||||
http_response_size_bytes{handler="targets",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="targets",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="targets",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="targets"} 0
|
||||
http_response_size_bytes_count{handler="targets"} 0
|
||||
http_response_size_bytes{handler="version",quantile="0.5"} NaN
|
||||
http_response_size_bytes{handler="version",quantile="0.9"} NaN
|
||||
http_response_size_bytes{handler="version",quantile="0.99"} NaN
|
||||
http_response_size_bytes_sum{handler="version"} 0
|
||||
http_response_size_bytes_count{handler="version"} 0
|
||||
# HELP prometheus_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which prometheus was built.
|
||||
# TYPE prometheus_build_info gauge
|
||||
prometheus_build_info{branch="",goversion="go1.7.3",revision="",version=""} 1
|
||||
# HELP prometheus_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload.
|
||||
# TYPE prometheus_config_last_reload_success_timestamp_seconds gauge
|
||||
prometheus_config_last_reload_success_timestamp_seconds 1.484395547e+09
|
||||
# HELP prometheus_config_last_reload_successful Whether the last configuration reload attempt was successful.
|
||||
# TYPE prometheus_config_last_reload_successful gauge
|
||||
prometheus_config_last_reload_successful 1
|
||||
# HELP prometheus_evaluator_duration_seconds The duration of rule group evaluations.
|
||||
# TYPE prometheus_evaluator_duration_seconds summary
|
||||
prometheus_evaluator_duration_seconds{quantile="0.01"} 1.7890000000000002e-06
|
||||
prometheus_evaluator_duration_seconds{quantile="0.05"} 1.7890000000000002e-06
|
||||
prometheus_evaluator_duration_seconds{quantile="0.5"} 1.7890000000000002e-06
|
||||
prometheus_evaluator_duration_seconds{quantile="0.9"} 1.7890000000000002e-06
|
||||
prometheus_evaluator_duration_seconds{quantile="0.99"} 1.7890000000000002e-06
|
||||
prometheus_evaluator_duration_seconds_sum 1.7890000000000002e-06
|
||||
prometheus_evaluator_duration_seconds_count 1
|
||||
# HELP prometheus_evaluator_iterations_skipped_total The total number of rule group evaluations skipped due to throttled metric storage.
|
||||
# TYPE prometheus_evaluator_iterations_skipped_total counter
|
||||
prometheus_evaluator_iterations_skipped_total 0
|
||||
# HELP prometheus_notifications_dropped_total Total number of alerts dropped due to alert manager missing in configuration.
|
||||
# TYPE prometheus_notifications_dropped_total counter
|
||||
prometheus_notifications_dropped_total 0
|
||||
# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue.
|
||||
# TYPE prometheus_notifications_queue_capacity gauge
|
||||
prometheus_notifications_queue_capacity 10000
|
||||
# HELP prometheus_notifications_queue_length The number of alert notifications in the queue.
|
||||
# TYPE prometheus_notifications_queue_length gauge
|
||||
prometheus_notifications_queue_length 0
|
||||
# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures.
|
||||
# TYPE prometheus_rule_evaluation_failures_total counter
|
||||
prometheus_rule_evaluation_failures_total{rule_type="alerting"} 0
|
||||
prometheus_rule_evaluation_failures_total{rule_type="recording"} 0
|
||||
# HELP prometheus_sd_azure_refresh_duration_seconds The duration of a Azure-SD refresh in seconds.
|
||||
# TYPE prometheus_sd_azure_refresh_duration_seconds summary
|
||||
prometheus_sd_azure_refresh_duration_seconds{quantile="0.5"} NaN
|
||||
prometheus_sd_azure_refresh_duration_seconds{quantile="0.9"} NaN
|
||||
prometheus_sd_azure_refresh_duration_seconds{quantile="0.99"} NaN
|
||||
prometheus_sd_azure_refresh_duration_seconds_sum 0
|
||||
prometheus_sd_azure_refresh_duration_seconds_count 0
|
||||
# HELP prometheus_sd_azure_refresh_failures_total Number of Azure-SD refresh failures.
|
||||
# TYPE prometheus_sd_azure_refresh_failures_total counter
|
||||
prometheus_sd_azure_refresh_failures_total 0
|
||||
# HELP prometheus_sd_consul_rpc_duration_seconds The duration of a Consul RPC call in seconds.
|
||||
# TYPE prometheus_sd_consul_rpc_duration_seconds summary
|
||||
prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN
|
||||
prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN
|
||||
prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN
|
||||
prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0
|
||||
prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0
|
||||
prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN
|
||||
prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN
|
||||
prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN
|
||||
prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0
|
||||
prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0
|
||||
# HELP prometheus_sd_consul_rpc_failures_total The number of Consul RPC call failures.
|
||||
# TYPE prometheus_sd_consul_rpc_failures_total counter
|
||||
prometheus_sd_consul_rpc_failures_total 0
|
||||
# HELP prometheus_sd_dns_lookup_failures_total The number of DNS-SD lookup failures.
|
||||
# TYPE prometheus_sd_dns_lookup_failures_total counter
|
||||
prometheus_sd_dns_lookup_failures_total 0
|
||||
# HELP prometheus_sd_dns_lookups_total The number of DNS-SD lookups.
|
||||
# TYPE prometheus_sd_dns_lookups_total counter
|
||||
prometheus_sd_dns_lookups_total 0
|
||||
# HELP prometheus_sd_ec2_refresh_duration_seconds The duration of a EC2-SD refresh in seconds.
|
||||
# TYPE prometheus_sd_ec2_refresh_duration_seconds summary
|
||||
prometheus_sd_ec2_refresh_duration_seconds{quantile="0.5"} NaN
|
||||
prometheus_sd_ec2_refresh_duration_seconds{quantile="0.9"} NaN
|
||||
prometheus_sd_ec2_refresh_duration_seconds{quantile="0.99"} NaN
|
||||
prometheus_sd_ec2_refresh_duration_seconds_sum 0
|
||||
prometheus_sd_ec2_refresh_duration_seconds_count 0
|
||||
# HELP prometheus_sd_ec2_refresh_failures_total The number of EC2-SD scrape failures.
|
||||
# TYPE prometheus_sd_ec2_refresh_failures_total counter
|
||||
prometheus_sd_ec2_refresh_failures_total 0
|
||||
# HELP prometheus_sd_file_read_errors_total The number of File-SD read errors.
|
||||
# TYPE prometheus_sd_file_read_errors_total counter
|
||||
prometheus_sd_file_read_errors_total 0
|
||||
# HELP prometheus_sd_file_scan_duration_seconds The duration of the File-SD scan in seconds.
|
||||
# TYPE prometheus_sd_file_scan_duration_seconds summary
|
||||
prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN
|
||||
prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN
|
||||
prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN
|
||||
prometheus_sd_file_scan_duration_seconds_sum 0
|
||||
prometheus_sd_file_scan_duration_seconds_count 0
|
||||
# HELP prometheus_sd_gce_refresh_duration The duration of a GCE-SD refresh in seconds.
|
||||
# TYPE prometheus_sd_gce_refresh_duration summary
|
||||
prometheus_sd_gce_refresh_duration{quantile="0.5"} NaN
|
||||
prometheus_sd_gce_refresh_duration{quantile="0.9"} NaN
|
||||
prometheus_sd_gce_refresh_duration{quantile="0.99"} NaN
|
||||
prometheus_sd_gce_refresh_duration_sum 0
|
||||
prometheus_sd_gce_refresh_duration_count 0
|
||||
# HELP prometheus_sd_gce_refresh_failures_total The number of GCE-SD refresh failures.
|
||||
# TYPE prometheus_sd_gce_refresh_failures_total counter
|
||||
prometheus_sd_gce_refresh_failures_total 0
|
||||
# HELP prometheus_sd_kubernetes_events_total The number of Kubernetes events handled.
|
||||
# TYPE prometheus_sd_kubernetes_events_total counter
|
||||
prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="add",role="node"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="add",role="service"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="update",role="node"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0
|
||||
prometheus_sd_kubernetes_events_total{event="update",role="service"} 0
|
||||
# HELP prometheus_sd_marathon_refresh_duration_seconds The duration of a Marathon-SD refresh in seconds.
|
||||
# TYPE prometheus_sd_marathon_refresh_duration_seconds summary
|
||||
prometheus_sd_marathon_refresh_duration_seconds{quantile="0.5"} NaN
|
||||
prometheus_sd_marathon_refresh_duration_seconds{quantile="0.9"} NaN
|
||||
prometheus_sd_marathon_refresh_duration_seconds{quantile="0.99"} NaN
|
||||
prometheus_sd_marathon_refresh_duration_seconds_sum 0
|
||||
prometheus_sd_marathon_refresh_duration_seconds_count 0
|
||||
# HELP prometheus_sd_marathon_refresh_failures_total The number of Marathon-SD refresh failures.
|
||||
# TYPE prometheus_sd_marathon_refresh_failures_total counter
|
||||
prometheus_sd_marathon_refresh_failures_total 0
|
||||
# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes.
|
||||
# TYPE prometheus_target_interval_length_seconds summary
|
||||
prometheus_target_interval_length_seconds{interval="50ms",quantile="0.01"} 0.046182157
|
||||
prometheus_target_interval_length_seconds{interval="50ms",quantile="0.05"} 0.047306979000000006
|
||||
prometheus_target_interval_length_seconds{interval="50ms",quantile="0.5"} 0.050381782
|
||||
prometheus_target_interval_length_seconds{interval="50ms",quantile="0.9"} 0.052614556
|
||||
prometheus_target_interval_length_seconds{interval="50ms",quantile="0.99"} 0.054404386000000006
|
||||
prometheus_target_interval_length_seconds_sum{interval="50ms"} 34.512091221999995
|
||||
prometheus_target_interval_length_seconds_count{interval="50ms"} 685
|
||||
# HELP prometheus_target_scrape_pool_sync_total Total number of syncs that were executed on a scrape pool.
|
||||
# TYPE prometheus_target_scrape_pool_sync_total counter
|
||||
prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1
|
||||
# HELP prometheus_target_skipped_scrapes_total Total number of scrapes that were skipped because the metric storage was throttled.
|
||||
# TYPE prometheus_target_skipped_scrapes_total counter
|
||||
prometheus_target_skipped_scrapes_total 0
|
||||
# HELP prometheus_target_sync_length_seconds Actual interval to sync the scrape pool.
|
||||
# TYPE prometheus_target_sync_length_seconds summary
|
||||
prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 0.00020043300000000002
|
||||
prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 0.00020043300000000002
|
||||
prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 0.00020043300000000002
|
||||
prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.00020043300000000002
|
||||
prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.00020043300000000002
|
||||
prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.00020043300000000002
|
||||
prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1
|
||||
# HELP prometheus_treecache_watcher_goroutines The current number of watcher goroutines.
|
||||
# TYPE prometheus_treecache_watcher_goroutines gauge
|
||||
prometheus_treecache_watcher_goroutines 0
|
||||
# HELP prometheus_treecache_zookeeper_failures_total The total number of ZooKeeper failures.
|
||||
# TYPE prometheus_treecache_zookeeper_failures_total counter
|
||||
prometheus_treecache_zookeeper_failures_total 0
|
||||
# EOF
|
||||
26
vendor/github.com/prometheus/prometheus/pkg/timestamp/timestamp.go
generated
vendored
Normal file
26
vendor/github.com/prometheus/prometheus/pkg/timestamp/timestamp.go
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package timestamp
|
||||
|
||||
import "time"
|
||||
|
||||
// FromTime returns a new millisecond timestamp from a time.
|
||||
func FromTime(t time.Time) int64 {
|
||||
return t.Unix()*1000 + int64(t.Nanosecond())/int64(time.Millisecond)
|
||||
}
|
||||
|
||||
// Time returns a new time.Time object from a millisecond timestamp.
|
||||
func Time(ts int64) time.Time {
|
||||
return time.Unix(ts/1000, (ts%1000)*int64(time.Millisecond)).UTC()
|
||||
}
|
||||
2119
vendor/github.com/prometheus/prometheus/promql/engine.go
generated
vendored
Normal file
2119
vendor/github.com/prometheus/prometheus/promql/engine.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1013
vendor/github.com/prometheus/prometheus/promql/functions.go
generated
vendored
Normal file
1013
vendor/github.com/prometheus/prometheus/promql/functions.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
103
vendor/github.com/prometheus/prometheus/promql/fuzz.go
generated
vendored
Normal file
103
vendor/github.com/prometheus/prometheus/promql/fuzz.go
generated
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Only build when go-fuzz is in use
|
||||
// +build gofuzz
|
||||
|
||||
package promql
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/textparse"
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
)
|
||||
|
||||
// PromQL parser fuzzing instrumentation for use with
|
||||
// https://github.com/dvyukov/go-fuzz.
|
||||
//
|
||||
// Fuzz each parser by building appropriately instrumented parser, ex.
|
||||
// FuzzParseMetric and execute it with it's
|
||||
//
|
||||
// go-fuzz-build -func FuzzParseMetric -o FuzzParseMetric.zip github.com/prometheus/prometheus/promql
|
||||
//
|
||||
// And then run the tests with the appropriate inputs
|
||||
//
|
||||
// go-fuzz -bin FuzzParseMetric.zip -workdir fuzz-data/ParseMetric
|
||||
//
|
||||
// Further input samples should go in the folders fuzz-data/ParseMetric/corpus.
|
||||
//
|
||||
// Repeat for FuzzParseOpenMetric, FuzzParseMetricSelector and FuzzParseExpr.
|
||||
|
||||
// Tuning which value is returned from Fuzz*-functions has a strong influence
|
||||
// on how quick the fuzzer converges on "interesting" cases. At least try
|
||||
// switching between fuzzMeh (= included in corpus, but not a priority) and
|
||||
// fuzzDiscard (=don't use this input for re-building later inputs) when
|
||||
// experimenting.
|
||||
const (
|
||||
fuzzInteresting = 1
|
||||
fuzzMeh = 0
|
||||
fuzzDiscard = -1
|
||||
)
|
||||
|
||||
func fuzzParseMetricWithContentType(in []byte, contentType string) int {
|
||||
p := textparse.New(in, contentType)
|
||||
var err error
|
||||
for {
|
||||
_, err = p.Next()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
return fuzzInteresting
|
||||
}
|
||||
|
||||
return fuzzMeh
|
||||
}
|
||||
|
||||
// Fuzz the metric parser.
|
||||
//
|
||||
// Note that this is not the parser for the text-based exposition-format; that
|
||||
// lives in github.com/prometheus/client_golang/text.
|
||||
func FuzzParseMetric(in []byte) int {
|
||||
return fuzzParseMetricWithContentType(in, "")
|
||||
}
|
||||
|
||||
func FuzzParseOpenMetric(in []byte) int {
|
||||
return fuzzParseMetricWithContentType(in, "application/openmetrics-text")
|
||||
}
|
||||
|
||||
// Fuzz the metric selector parser.
|
||||
func FuzzParseMetricSelector(in []byte) int {
|
||||
_, err := parser.ParseMetricSelector(string(in))
|
||||
if err == nil {
|
||||
return fuzzInteresting
|
||||
}
|
||||
|
||||
return fuzzMeh
|
||||
}
|
||||
|
||||
// Fuzz the expression parser.
|
||||
func FuzzParseExpr(in []byte) int {
|
||||
_, err := parser.ParseExpr(string(in))
|
||||
if err == nil {
|
||||
return fuzzInteresting
|
||||
}
|
||||
|
||||
return fuzzMeh
|
||||
}
|
||||
204
vendor/github.com/prometheus/prometheus/promql/quantile.go
generated
vendored
Normal file
204
vendor/github.com/prometheus/prometheus/promql/quantile.go
generated
vendored
Normal file
@@ -0,0 +1,204 @@
|
||||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package promql
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
)
|
||||
|
||||
// Helpers to calculate quantiles.
|
||||
|
||||
// excludedLabels are the labels to exclude from signature calculation for
|
||||
// quantiles.
|
||||
var excludedLabels = []string{
|
||||
labels.MetricName,
|
||||
labels.BucketLabel,
|
||||
}
|
||||
|
||||
type bucket struct {
|
||||
upperBound float64
|
||||
count float64
|
||||
}
|
||||
|
||||
// buckets implements sort.Interface.
|
||||
type buckets []bucket
|
||||
|
||||
func (b buckets) Len() int { return len(b) }
|
||||
func (b buckets) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b buckets) Less(i, j int) bool { return b[i].upperBound < b[j].upperBound }
|
||||
|
||||
type metricWithBuckets struct {
|
||||
metric labels.Labels
|
||||
buckets buckets
|
||||
}
|
||||
|
||||
// bucketQuantile calculates the quantile 'q' based on the given buckets. The
|
||||
// buckets will be sorted by upperBound by this function (i.e. no sorting
|
||||
// needed before calling this function). The quantile value is interpolated
|
||||
// assuming a linear distribution within a bucket. However, if the quantile
|
||||
// falls into the highest bucket, the upper bound of the 2nd highest bucket is
|
||||
// returned. A natural lower bound of 0 is assumed if the upper bound of the
|
||||
// lowest bucket is greater 0. In that case, interpolation in the lowest bucket
|
||||
// happens linearly between 0 and the upper bound of the lowest bucket.
|
||||
// However, if the lowest bucket has an upper bound less or equal 0, this upper
|
||||
// bound is returned if the quantile falls into the lowest bucket.
|
||||
//
|
||||
// There are a number of special cases (once we have a way to report errors
|
||||
// happening during evaluations of AST functions, we should report those
|
||||
// explicitly):
|
||||
//
|
||||
// If 'buckets' has fewer than 2 elements, NaN is returned.
|
||||
//
|
||||
// If the highest bucket is not +Inf, NaN is returned.
|
||||
//
|
||||
// If q<0, -Inf is returned.
|
||||
//
|
||||
// If q>1, +Inf is returned.
|
||||
func bucketQuantile(q float64, buckets buckets) float64 {
|
||||
if q < 0 {
|
||||
return math.Inf(-1)
|
||||
}
|
||||
if q > 1 {
|
||||
return math.Inf(+1)
|
||||
}
|
||||
sort.Sort(buckets)
|
||||
if !math.IsInf(buckets[len(buckets)-1].upperBound, +1) {
|
||||
return math.NaN()
|
||||
}
|
||||
|
||||
buckets = coalesceBuckets(buckets)
|
||||
ensureMonotonic(buckets)
|
||||
|
||||
if len(buckets) < 2 {
|
||||
return math.NaN()
|
||||
}
|
||||
|
||||
rank := q * buckets[len(buckets)-1].count
|
||||
b := sort.Search(len(buckets)-1, func(i int) bool { return buckets[i].count >= rank })
|
||||
|
||||
if b == len(buckets)-1 {
|
||||
return buckets[len(buckets)-2].upperBound
|
||||
}
|
||||
if b == 0 && buckets[0].upperBound <= 0 {
|
||||
return buckets[0].upperBound
|
||||
}
|
||||
var (
|
||||
bucketStart float64
|
||||
bucketEnd = buckets[b].upperBound
|
||||
count = buckets[b].count
|
||||
)
|
||||
if b > 0 {
|
||||
bucketStart = buckets[b-1].upperBound
|
||||
count -= buckets[b-1].count
|
||||
rank -= buckets[b-1].count
|
||||
}
|
||||
return bucketStart + (bucketEnd-bucketStart)*(rank/count)
|
||||
}
|
||||
|
||||
// coalesceBuckets merges buckets with the same upper bound.
|
||||
//
|
||||
// The input buckets must be sorted.
|
||||
func coalesceBuckets(buckets buckets) buckets {
|
||||
last := buckets[0]
|
||||
i := 0
|
||||
for _, b := range buckets[1:] {
|
||||
if b.upperBound == last.upperBound {
|
||||
last.count += b.count
|
||||
} else {
|
||||
buckets[i] = last
|
||||
last = b
|
||||
i++
|
||||
}
|
||||
}
|
||||
buckets[i] = last
|
||||
return buckets[:i+1]
|
||||
}
|
||||
|
||||
// The assumption that bucket counts increase monotonically with increasing
|
||||
// upperBound may be violated during:
|
||||
//
|
||||
// * Recording rule evaluation of histogram_quantile, especially when rate()
|
||||
// has been applied to the underlying bucket timeseries.
|
||||
// * Evaluation of histogram_quantile computed over federated bucket
|
||||
// timeseries, especially when rate() has been applied.
|
||||
//
|
||||
// This is because scraped data is not made available to rule evaluation or
|
||||
// federation atomically, so some buckets are computed with data from the
|
||||
// most recent scrapes, but the other buckets are missing data from the most
|
||||
// recent scrape.
|
||||
//
|
||||
// Monotonicity is usually guaranteed because if a bucket with upper bound
|
||||
// u1 has count c1, then any bucket with a higher upper bound u > u1 must
|
||||
// have counted all c1 observations and perhaps more, so that c >= c1.
|
||||
//
|
||||
// Randomly interspersed partial sampling breaks that guarantee, and rate()
|
||||
// exacerbates it. Specifically, suppose bucket le=1000 has a count of 10 from
|
||||
// 4 samples but the bucket with le=2000 has a count of 7 from 3 samples. The
|
||||
// monotonicity is broken. It is exacerbated by rate() because under normal
|
||||
// operation, cumulative counting of buckets will cause the bucket counts to
|
||||
// diverge such that small differences from missing samples are not a problem.
|
||||
// rate() removes this divergence.)
|
||||
//
|
||||
// bucketQuantile depends on that monotonicity to do a binary search for the
|
||||
// bucket with the φ-quantile count, so breaking the monotonicity
|
||||
// guarantee causes bucketQuantile() to return undefined (nonsense) results.
|
||||
//
|
||||
// As a somewhat hacky solution until ingestion is atomic per scrape, we
|
||||
// calculate the "envelope" of the histogram buckets, essentially removing
|
||||
// any decreases in the count between successive buckets.
|
||||
|
||||
func ensureMonotonic(buckets buckets) {
|
||||
max := buckets[0].count
|
||||
for i := range buckets[1:] {
|
||||
switch {
|
||||
case buckets[i].count > max:
|
||||
max = buckets[i].count
|
||||
case buckets[i].count < max:
|
||||
buckets[i].count = max
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// quantile calculates the given quantile of a vector of samples.
|
||||
//
|
||||
// The Vector will be sorted.
|
||||
// If 'values' has zero elements, NaN is returned.
|
||||
// If q<0, -Inf is returned.
|
||||
// If q>1, +Inf is returned.
|
||||
func quantile(q float64, values vectorByValueHeap) float64 {
|
||||
if len(values) == 0 {
|
||||
return math.NaN()
|
||||
}
|
||||
if q < 0 {
|
||||
return math.Inf(-1)
|
||||
}
|
||||
if q > 1 {
|
||||
return math.Inf(+1)
|
||||
}
|
||||
sort.Sort(values)
|
||||
|
||||
n := float64(len(values))
|
||||
// When the quantile lies between two samples,
|
||||
// we use a weighted average of the two samples.
|
||||
rank := q * (n - 1)
|
||||
|
||||
lowerIndex := math.Max(0, math.Floor(rank))
|
||||
upperIndex := math.Min(n-1, lowerIndex+1)
|
||||
|
||||
weight := rank - math.Floor(rank)
|
||||
return values[int(lowerIndex)].V*(1-weight) + values[int(upperIndex)].V*weight
|
||||
}
|
||||
197
vendor/github.com/prometheus/prometheus/promql/query_logger.go
generated
vendored
Normal file
197
vendor/github.com/prometheus/prometheus/promql/query_logger.go
generated
vendored
Normal file
@@ -0,0 +1,197 @@
|
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package promql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/edsrzf/mmap-go"
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/log/level"
|
||||
)
|
||||
|
||||
type ActiveQueryTracker struct {
|
||||
mmapedFile []byte
|
||||
getNextIndex chan int
|
||||
logger log.Logger
|
||||
maxConcurrent int
|
||||
}
|
||||
|
||||
type Entry struct {
|
||||
Query string `json:"query"`
|
||||
Timestamp int64 `json:"timestamp_sec"`
|
||||
}
|
||||
|
||||
const (
|
||||
entrySize int = 1000
|
||||
)
|
||||
|
||||
func parseBrokenJSON(brokenJSON []byte) (bool, string) {
|
||||
queries := strings.ReplaceAll(string(brokenJSON), "\x00", "")
|
||||
if len(queries) > 0 {
|
||||
queries = queries[:len(queries)-1] + "]"
|
||||
}
|
||||
|
||||
// Conditional because of implementation detail: len() = 1 implies file consisted of a single char: '['.
|
||||
if len(queries) <= 1 {
|
||||
return false, "[]"
|
||||
}
|
||||
|
||||
return true, queries
|
||||
}
|
||||
|
||||
func logUnfinishedQueries(filename string, filesize int, logger log.Logger) {
|
||||
if _, err := os.Stat(filename); err == nil {
|
||||
fd, err := os.Open(filename)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Failed to open query log file", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
brokenJSON := make([]byte, filesize)
|
||||
_, err = fd.Read(brokenJSON)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Failed to read query log file", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
queriesExist, queries := parseBrokenJSON(brokenJSON)
|
||||
if !queriesExist {
|
||||
return
|
||||
}
|
||||
level.Info(logger).Log("msg", "These queries didn't finish in prometheus' last run:", "queries", queries)
|
||||
}
|
||||
}
|
||||
|
||||
func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, error) {
|
||||
|
||||
file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Error opening query log file", "file", filename, "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = file.Truncate(int64(filesize))
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Error setting filesize.", "filesize", filesize, "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileAsBytes, err := mmap.Map(file, mmap.RDWR, 0)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Failed to mmap", "file", filename, "Attempted size", filesize, "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fileAsBytes, err
|
||||
}
|
||||
|
||||
func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger log.Logger) *ActiveQueryTracker {
|
||||
err := os.MkdirAll(localStoragePath, 0777)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Failed to create directory for logging active queries")
|
||||
}
|
||||
|
||||
filename, filesize := filepath.Join(localStoragePath, "queries.active"), 1+maxConcurrent*entrySize
|
||||
logUnfinishedQueries(filename, filesize, logger)
|
||||
|
||||
fileAsBytes, err := getMMapedFile(filename, filesize, logger)
|
||||
if err != nil {
|
||||
panic("Unable to create mmap-ed active query log")
|
||||
}
|
||||
|
||||
copy(fileAsBytes, "[")
|
||||
activeQueryTracker := ActiveQueryTracker{
|
||||
mmapedFile: fileAsBytes,
|
||||
getNextIndex: make(chan int, maxConcurrent),
|
||||
logger: logger,
|
||||
maxConcurrent: maxConcurrent,
|
||||
}
|
||||
|
||||
activeQueryTracker.generateIndices(maxConcurrent)
|
||||
|
||||
return &activeQueryTracker
|
||||
}
|
||||
|
||||
func trimStringByBytes(str string, size int) string {
|
||||
bytesStr := []byte(str)
|
||||
|
||||
trimIndex := len(bytesStr)
|
||||
if size < len(bytesStr) {
|
||||
for !utf8.RuneStart(bytesStr[size]) {
|
||||
size--
|
||||
}
|
||||
trimIndex = size
|
||||
}
|
||||
|
||||
return string(bytesStr[:trimIndex])
|
||||
}
|
||||
|
||||
func _newJSONEntry(query string, timestamp int64, logger log.Logger) []byte {
|
||||
entry := Entry{query, timestamp}
|
||||
jsonEntry, err := json.Marshal(entry)
|
||||
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "Cannot create json of query", "query", query)
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
return jsonEntry
|
||||
}
|
||||
|
||||
func newJSONEntry(query string, logger log.Logger) []byte {
|
||||
timestamp := time.Now().Unix()
|
||||
minEntryJSON := _newJSONEntry("", timestamp, logger)
|
||||
|
||||
query = trimStringByBytes(query, entrySize-(len(minEntryJSON)+1))
|
||||
jsonEntry := _newJSONEntry(query, timestamp, logger)
|
||||
|
||||
return jsonEntry
|
||||
}
|
||||
|
||||
func (tracker ActiveQueryTracker) generateIndices(maxConcurrent int) {
|
||||
for i := 0; i < maxConcurrent; i++ {
|
||||
tracker.getNextIndex <- 1 + (i * entrySize)
|
||||
}
|
||||
}
|
||||
|
||||
func (tracker ActiveQueryTracker) GetMaxConcurrent() int {
|
||||
return tracker.maxConcurrent
|
||||
}
|
||||
|
||||
func (tracker ActiveQueryTracker) Delete(insertIndex int) {
|
||||
copy(tracker.mmapedFile[insertIndex:], strings.Repeat("\x00", entrySize))
|
||||
tracker.getNextIndex <- insertIndex
|
||||
}
|
||||
|
||||
func (tracker ActiveQueryTracker) Insert(ctx context.Context, query string) (int, error) {
|
||||
select {
|
||||
case i := <-tracker.getNextIndex:
|
||||
fileBytes := tracker.mmapedFile
|
||||
entry := newJSONEntry(query, tracker.logger)
|
||||
start, end := i, i+entrySize
|
||||
|
||||
copy(fileBytes[start:], entry)
|
||||
copy(fileBytes[end-1:], ",")
|
||||
return i, nil
|
||||
case <-ctx.Done():
|
||||
return 0, ctx.Err()
|
||||
}
|
||||
}
|
||||
701
vendor/github.com/prometheus/prometheus/promql/test.go
generated
vendored
Normal file
701
vendor/github.com/prometheus/prometheus/promql/test.go
generated
vendored
Normal file
@@ -0,0 +1,701 @@
|
||||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package promql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
"github.com/prometheus/prometheus/util/teststorage"
|
||||
"github.com/prometheus/prometheus/util/testutil"
|
||||
)
|
||||
|
||||
var (
|
||||
minNormal = math.Float64frombits(0x0010000000000000) // The smallest positive normal value of type float64.
|
||||
|
||||
patSpace = regexp.MustCompile("[\t ]+")
|
||||
patLoad = regexp.MustCompile(`^load\s+(.+?)$`)
|
||||
patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`)
|
||||
)
|
||||
|
||||
const (
|
||||
epsilon = 0.000001 // Relative error allowed for sample values.
|
||||
)
|
||||
|
||||
var testStartTime = time.Unix(0, 0).UTC()
|
||||
|
||||
// Test is a sequence of read and write commands that are run
|
||||
// against a test storage.
|
||||
type Test struct {
|
||||
testutil.T
|
||||
|
||||
cmds []testCommand
|
||||
|
||||
storage *teststorage.TestStorage
|
||||
|
||||
queryEngine *Engine
|
||||
context context.Context
|
||||
cancelCtx context.CancelFunc
|
||||
}
|
||||
|
||||
// NewTest returns an initialized empty Test.
|
||||
func NewTest(t testutil.T, input string) (*Test, error) {
|
||||
test := &Test{
|
||||
T: t,
|
||||
cmds: []testCommand{},
|
||||
}
|
||||
err := test.parse(input)
|
||||
test.clear()
|
||||
|
||||
return test, err
|
||||
}
|
||||
|
||||
func newTestFromFile(t testutil.T, filename string) (*Test, error) {
|
||||
content, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewTest(t, string(content))
|
||||
}
|
||||
|
||||
// QueryEngine returns the test's query engine.
|
||||
func (t *Test) QueryEngine() *Engine {
|
||||
return t.queryEngine
|
||||
}
|
||||
|
||||
// Queryable allows querying the test data.
|
||||
func (t *Test) Queryable() storage.Queryable {
|
||||
return t.storage
|
||||
}
|
||||
|
||||
// Context returns the test's context.
|
||||
func (t *Test) Context() context.Context {
|
||||
return t.context
|
||||
}
|
||||
|
||||
// Storage returns the test's storage.
|
||||
func (t *Test) Storage() storage.Storage {
|
||||
return t.storage
|
||||
}
|
||||
|
||||
// TSDB returns test's TSDB.
|
||||
func (t *Test) TSDB() *tsdb.DB {
|
||||
return t.storage.DB
|
||||
}
|
||||
|
||||
func raise(line int, format string, v ...interface{}) error {
|
||||
return &parser.ParseErr{
|
||||
LineOffset: line,
|
||||
Err: errors.Errorf(format, v...),
|
||||
}
|
||||
}
|
||||
|
||||
func parseLoad(lines []string, i int) (int, *loadCmd, error) {
|
||||
if !patLoad.MatchString(lines[i]) {
|
||||
return i, nil, raise(i, "invalid load command. (load <step:duration>)")
|
||||
}
|
||||
parts := patLoad.FindStringSubmatch(lines[i])
|
||||
|
||||
gap, err := model.ParseDuration(parts[1])
|
||||
if err != nil {
|
||||
return i, nil, raise(i, "invalid step definition %q: %s", parts[1], err)
|
||||
}
|
||||
cmd := newLoadCmd(time.Duration(gap))
|
||||
for i+1 < len(lines) {
|
||||
i++
|
||||
defLine := lines[i]
|
||||
if len(defLine) == 0 {
|
||||
i--
|
||||
break
|
||||
}
|
||||
metric, vals, err := parser.ParseSeriesDesc(defLine)
|
||||
if err != nil {
|
||||
if perr, ok := err.(*parser.ParseErr); ok {
|
||||
perr.LineOffset = i
|
||||
}
|
||||
return i, nil, err
|
||||
}
|
||||
cmd.set(metric, vals...)
|
||||
}
|
||||
return i, cmd, nil
|
||||
}
|
||||
|
||||
func (t *Test) parseEval(lines []string, i int) (int, *evalCmd, error) {
|
||||
if !patEvalInstant.MatchString(lines[i]) {
|
||||
return i, nil, raise(i, "invalid evaluation command. (eval[_fail|_ordered] instant [at <offset:duration>] <query>")
|
||||
}
|
||||
parts := patEvalInstant.FindStringSubmatch(lines[i])
|
||||
var (
|
||||
mod = parts[1]
|
||||
at = parts[2]
|
||||
expr = parts[3]
|
||||
)
|
||||
_, err := parser.ParseExpr(expr)
|
||||
if err != nil {
|
||||
if perr, ok := err.(*parser.ParseErr); ok {
|
||||
perr.LineOffset = i
|
||||
posOffset := parser.Pos(strings.Index(lines[i], expr))
|
||||
perr.PositionRange.Start += posOffset
|
||||
perr.PositionRange.End += posOffset
|
||||
perr.Query = lines[i]
|
||||
}
|
||||
return i, nil, err
|
||||
}
|
||||
|
||||
offset, err := model.ParseDuration(at)
|
||||
if err != nil {
|
||||
return i, nil, raise(i, "invalid step definition %q: %s", parts[1], err)
|
||||
}
|
||||
ts := testStartTime.Add(time.Duration(offset))
|
||||
|
||||
cmd := newEvalCmd(expr, ts, i+1)
|
||||
switch mod {
|
||||
case "ordered":
|
||||
cmd.ordered = true
|
||||
case "fail":
|
||||
cmd.fail = true
|
||||
}
|
||||
|
||||
for j := 1; i+1 < len(lines); j++ {
|
||||
i++
|
||||
defLine := lines[i]
|
||||
if len(defLine) == 0 {
|
||||
i--
|
||||
break
|
||||
}
|
||||
if f, err := parseNumber(defLine); err == nil {
|
||||
cmd.expect(0, nil, parser.SequenceValue{Value: f})
|
||||
break
|
||||
}
|
||||
metric, vals, err := parser.ParseSeriesDesc(defLine)
|
||||
if err != nil {
|
||||
if perr, ok := err.(*parser.ParseErr); ok {
|
||||
perr.LineOffset = i
|
||||
}
|
||||
return i, nil, err
|
||||
}
|
||||
|
||||
// Currently, we are not expecting any matrices.
|
||||
if len(vals) > 1 {
|
||||
return i, nil, raise(i, "expecting multiple values in instant evaluation not allowed")
|
||||
}
|
||||
cmd.expect(j, metric, vals...)
|
||||
}
|
||||
return i, cmd, nil
|
||||
}
|
||||
|
||||
// getLines returns trimmed lines after removing the comments.
|
||||
func getLines(input string) []string {
|
||||
lines := strings.Split(input, "\n")
|
||||
for i, l := range lines {
|
||||
l = strings.TrimSpace(l)
|
||||
if strings.HasPrefix(l, "#") {
|
||||
l = ""
|
||||
}
|
||||
lines[i] = l
|
||||
}
|
||||
return lines
|
||||
}
|
||||
|
||||
// parse the given command sequence and appends it to the test.
|
||||
func (t *Test) parse(input string) error {
|
||||
lines := getLines(input)
|
||||
var err error
|
||||
// Scan for steps line by line.
|
||||
for i := 0; i < len(lines); i++ {
|
||||
l := lines[i]
|
||||
if len(l) == 0 {
|
||||
continue
|
||||
}
|
||||
var cmd testCommand
|
||||
|
||||
switch c := strings.ToLower(patSpace.Split(l, 2)[0]); {
|
||||
case c == "clear":
|
||||
cmd = &clearCmd{}
|
||||
case c == "load":
|
||||
i, cmd, err = parseLoad(lines, i)
|
||||
case strings.HasPrefix(c, "eval"):
|
||||
i, cmd, err = t.parseEval(lines, i)
|
||||
default:
|
||||
return raise(i, "invalid command %q", l)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.cmds = append(t.cmds, cmd)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// testCommand is an interface that ensures that only the package internal
|
||||
// types can be a valid command for a test.
|
||||
type testCommand interface {
|
||||
testCmd()
|
||||
}
|
||||
|
||||
func (*clearCmd) testCmd() {}
|
||||
func (*loadCmd) testCmd() {}
|
||||
func (*evalCmd) testCmd() {}
|
||||
|
||||
// loadCmd is a command that loads sequences of sample values for specific
|
||||
// metrics into the storage.
|
||||
type loadCmd struct {
|
||||
gap time.Duration
|
||||
metrics map[uint64]labels.Labels
|
||||
defs map[uint64][]Point
|
||||
}
|
||||
|
||||
func newLoadCmd(gap time.Duration) *loadCmd {
|
||||
return &loadCmd{
|
||||
gap: gap,
|
||||
metrics: map[uint64]labels.Labels{},
|
||||
defs: map[uint64][]Point{},
|
||||
}
|
||||
}
|
||||
|
||||
func (cmd loadCmd) String() string {
|
||||
return "load"
|
||||
}
|
||||
|
||||
// set a sequence of sample values for the given metric.
|
||||
func (cmd *loadCmd) set(m labels.Labels, vals ...parser.SequenceValue) {
|
||||
h := m.Hash()
|
||||
|
||||
samples := make([]Point, 0, len(vals))
|
||||
ts := testStartTime
|
||||
for _, v := range vals {
|
||||
if !v.Omitted {
|
||||
samples = append(samples, Point{
|
||||
T: ts.UnixNano() / int64(time.Millisecond/time.Nanosecond),
|
||||
V: v.Value,
|
||||
})
|
||||
}
|
||||
ts = ts.Add(cmd.gap)
|
||||
}
|
||||
cmd.defs[h] = samples
|
||||
cmd.metrics[h] = m
|
||||
}
|
||||
|
||||
// append the defined time series to the storage.
|
||||
func (cmd *loadCmd) append(a storage.Appender) error {
|
||||
for h, smpls := range cmd.defs {
|
||||
m := cmd.metrics[h]
|
||||
|
||||
for _, s := range smpls {
|
||||
if _, err := a.Add(m, s.T, s.V); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// evalCmd is a command that evaluates an expression for the given time (range)
|
||||
// and expects a specific result.
|
||||
type evalCmd struct {
|
||||
expr string
|
||||
start time.Time
|
||||
line int
|
||||
|
||||
fail, ordered bool
|
||||
|
||||
metrics map[uint64]labels.Labels
|
||||
expected map[uint64]entry
|
||||
}
|
||||
|
||||
type entry struct {
|
||||
pos int
|
||||
vals []parser.SequenceValue
|
||||
}
|
||||
|
||||
func (e entry) String() string {
|
||||
return fmt.Sprintf("%d: %s", e.pos, e.vals)
|
||||
}
|
||||
|
||||
func newEvalCmd(expr string, start time.Time, line int) *evalCmd {
|
||||
return &evalCmd{
|
||||
expr: expr,
|
||||
start: start,
|
||||
line: line,
|
||||
|
||||
metrics: map[uint64]labels.Labels{},
|
||||
expected: map[uint64]entry{},
|
||||
}
|
||||
}
|
||||
|
||||
func (ev *evalCmd) String() string {
|
||||
return "eval"
|
||||
}
|
||||
|
||||
// expect adds a new metric with a sequence of values to the set of expected
|
||||
// results for the query.
|
||||
func (ev *evalCmd) expect(pos int, m labels.Labels, vals ...parser.SequenceValue) {
|
||||
if m == nil {
|
||||
ev.expected[0] = entry{pos: pos, vals: vals}
|
||||
return
|
||||
}
|
||||
h := m.Hash()
|
||||
ev.metrics[h] = m
|
||||
ev.expected[h] = entry{pos: pos, vals: vals}
|
||||
}
|
||||
|
||||
// compareResult compares the result value with the defined expectation.
|
||||
func (ev *evalCmd) compareResult(result parser.Value) error {
|
||||
switch val := result.(type) {
|
||||
case Matrix:
|
||||
return errors.New("received range result on instant evaluation")
|
||||
|
||||
case Vector:
|
||||
seen := map[uint64]bool{}
|
||||
for pos, v := range val {
|
||||
fp := v.Metric.Hash()
|
||||
if _, ok := ev.metrics[fp]; !ok {
|
||||
return errors.Errorf("unexpected metric %s in result", v.Metric)
|
||||
}
|
||||
exp := ev.expected[fp]
|
||||
if ev.ordered && exp.pos != pos+1 {
|
||||
return errors.Errorf("expected metric %s with %v at position %d but was at %d", v.Metric, exp.vals, exp.pos, pos+1)
|
||||
}
|
||||
if !almostEqual(exp.vals[0].Value, v.V) {
|
||||
return errors.Errorf("expected %v for %s but got %v", exp.vals[0].Value, v.Metric, v.V)
|
||||
}
|
||||
|
||||
seen[fp] = true
|
||||
}
|
||||
for fp, expVals := range ev.expected {
|
||||
if !seen[fp] {
|
||||
fmt.Println("vector result", len(val), ev.expr)
|
||||
for _, ss := range val {
|
||||
fmt.Println(" ", ss.Metric, ss.Point)
|
||||
}
|
||||
return errors.Errorf("expected metric %s with %v not found", ev.metrics[fp], expVals)
|
||||
}
|
||||
}
|
||||
|
||||
case Scalar:
|
||||
if !almostEqual(ev.expected[0].vals[0].Value, val.V) {
|
||||
return errors.Errorf("expected Scalar %v but got %v", val.V, ev.expected[0].vals[0].Value)
|
||||
}
|
||||
|
||||
default:
|
||||
panic(errors.Errorf("promql.Test.compareResult: unexpected result type %T", result))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// clearCmd is a command that wipes the test's storage state.
|
||||
type clearCmd struct{}
|
||||
|
||||
func (cmd clearCmd) String() string {
|
||||
return "clear"
|
||||
}
|
||||
|
||||
// Run executes the command sequence of the test. Until the maximum error number
|
||||
// is reached, evaluation errors do not terminate execution.
|
||||
func (t *Test) Run() error {
|
||||
for _, cmd := range t.cmds {
|
||||
// TODO(fabxc): aggregate command errors, yield diffs for result
|
||||
// comparison errors.
|
||||
if err := t.exec(cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// exec processes a single step of the test.
|
||||
func (t *Test) exec(tc testCommand) error {
|
||||
switch cmd := tc.(type) {
|
||||
case *clearCmd:
|
||||
t.clear()
|
||||
|
||||
case *loadCmd:
|
||||
app := t.storage.Appender()
|
||||
if err := cmd.append(app); err != nil {
|
||||
app.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
if err := app.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *evalCmd:
|
||||
q, err := t.QueryEngine().NewInstantQuery(t.storage, cmd.expr, cmd.start)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer q.Close()
|
||||
res := q.Exec(t.context)
|
||||
if res.Err != nil {
|
||||
if cmd.fail {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(res.Err, "error evaluating query %q (line %d)", cmd.expr, cmd.line)
|
||||
}
|
||||
if res.Err == nil && cmd.fail {
|
||||
return errors.Errorf("expected error evaluating query %q (line %d) but got none", cmd.expr, cmd.line)
|
||||
}
|
||||
|
||||
err = cmd.compareResult(res.Value)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error in %s %s", cmd, cmd.expr)
|
||||
}
|
||||
|
||||
// Check query returns same result in range mode,
|
||||
// by checking against the middle step.
|
||||
q, err = t.queryEngine.NewRangeQuery(t.storage, cmd.expr, cmd.start.Add(-time.Minute), cmd.start.Add(time.Minute), time.Minute)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rangeRes := q.Exec(t.context)
|
||||
if rangeRes.Err != nil {
|
||||
return errors.Wrapf(rangeRes.Err, "error evaluating query %q (line %d) in range mode", cmd.expr, cmd.line)
|
||||
}
|
||||
defer q.Close()
|
||||
if cmd.ordered {
|
||||
// Ordering isn't defined for range queries.
|
||||
return nil
|
||||
}
|
||||
mat := rangeRes.Value.(Matrix)
|
||||
vec := make(Vector, 0, len(mat))
|
||||
for _, series := range mat {
|
||||
for _, point := range series.Points {
|
||||
if point.T == timeMilliseconds(cmd.start) {
|
||||
vec = append(vec, Sample{Metric: series.Metric, Point: point})
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if _, ok := res.Value.(Scalar); ok {
|
||||
err = cmd.compareResult(Scalar{V: vec[0].Point.V})
|
||||
} else {
|
||||
err = cmd.compareResult(vec)
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error in %s %s (line %d) rande mode", cmd, cmd.expr, cmd.line)
|
||||
}
|
||||
|
||||
default:
|
||||
panic("promql.Test.exec: unknown test command type")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// clear the current test storage of all inserted samples.
|
||||
func (t *Test) clear() {
|
||||
if t.storage != nil {
|
||||
if err := t.storage.Close(); err != nil {
|
||||
t.T.Fatalf("closing test storage: %s", err)
|
||||
}
|
||||
}
|
||||
if t.cancelCtx != nil {
|
||||
t.cancelCtx()
|
||||
}
|
||||
t.storage = teststorage.New(t)
|
||||
|
||||
opts := EngineOpts{
|
||||
Logger: nil,
|
||||
Reg: nil,
|
||||
MaxSamples: 10000,
|
||||
Timeout: 100 * time.Second,
|
||||
}
|
||||
|
||||
t.queryEngine = NewEngine(opts)
|
||||
t.context, t.cancelCtx = context.WithCancel(context.Background())
|
||||
}
|
||||
|
||||
// Close closes resources associated with the Test.
|
||||
func (t *Test) Close() {
|
||||
t.cancelCtx()
|
||||
|
||||
if err := t.storage.Close(); err != nil {
|
||||
t.T.Fatalf("closing test storage: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// samplesAlmostEqual returns true if the two sample lines only differ by a
|
||||
// small relative error in their sample value.
|
||||
func almostEqual(a, b float64) bool {
|
||||
// NaN has no equality but for testing we still want to know whether both values
|
||||
// are NaN.
|
||||
if math.IsNaN(a) && math.IsNaN(b) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Cf. http://floating-point-gui.de/errors/comparison/
|
||||
if a == b {
|
||||
return true
|
||||
}
|
||||
|
||||
diff := math.Abs(a - b)
|
||||
|
||||
if a == 0 || b == 0 || diff < minNormal {
|
||||
return diff < epsilon*minNormal
|
||||
}
|
||||
return diff/(math.Abs(a)+math.Abs(b)) < epsilon
|
||||
}
|
||||
|
||||
func parseNumber(s string) (float64, error) {
|
||||
n, err := strconv.ParseInt(s, 0, 64)
|
||||
f := float64(n)
|
||||
if err != nil {
|
||||
f, err = strconv.ParseFloat(s, 64)
|
||||
}
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error parsing number")
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// LazyLoader lazily loads samples into storage.
|
||||
// This is specifically implemented for unit testing of rules.
|
||||
type LazyLoader struct {
|
||||
testutil.T
|
||||
|
||||
loadCmd *loadCmd
|
||||
|
||||
storage storage.Storage
|
||||
|
||||
queryEngine *Engine
|
||||
context context.Context
|
||||
cancelCtx context.CancelFunc
|
||||
}
|
||||
|
||||
// NewLazyLoader returns an initialized empty LazyLoader.
|
||||
func NewLazyLoader(t testutil.T, input string) (*LazyLoader, error) {
|
||||
ll := &LazyLoader{
|
||||
T: t,
|
||||
}
|
||||
err := ll.parse(input)
|
||||
ll.clear()
|
||||
return ll, err
|
||||
}
|
||||
|
||||
// parse the given load command.
|
||||
func (ll *LazyLoader) parse(input string) error {
|
||||
lines := getLines(input)
|
||||
// Accepts only 'load' command.
|
||||
for i := 0; i < len(lines); i++ {
|
||||
l := lines[i]
|
||||
if len(l) == 0 {
|
||||
continue
|
||||
}
|
||||
if strings.ToLower(patSpace.Split(l, 2)[0]) == "load" {
|
||||
_, cmd, err := parseLoad(lines, i)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ll.loadCmd = cmd
|
||||
return nil
|
||||
}
|
||||
|
||||
return raise(i, "invalid command %q", l)
|
||||
}
|
||||
return errors.New("no \"load\" command found")
|
||||
}
|
||||
|
||||
// clear the current test storage of all inserted samples.
|
||||
func (ll *LazyLoader) clear() {
|
||||
if ll.storage != nil {
|
||||
if err := ll.storage.Close(); err != nil {
|
||||
ll.T.Fatalf("closing test storage: %s", err)
|
||||
}
|
||||
}
|
||||
if ll.cancelCtx != nil {
|
||||
ll.cancelCtx()
|
||||
}
|
||||
ll.storage = teststorage.New(ll)
|
||||
|
||||
opts := EngineOpts{
|
||||
Logger: nil,
|
||||
Reg: nil,
|
||||
MaxSamples: 10000,
|
||||
Timeout: 100 * time.Second,
|
||||
}
|
||||
|
||||
ll.queryEngine = NewEngine(opts)
|
||||
ll.context, ll.cancelCtx = context.WithCancel(context.Background())
|
||||
}
|
||||
|
||||
// appendTill appends the defined time series to the storage till the given timestamp (in milliseconds).
|
||||
func (ll *LazyLoader) appendTill(ts int64) error {
|
||||
app := ll.storage.Appender()
|
||||
for h, smpls := range ll.loadCmd.defs {
|
||||
m := ll.loadCmd.metrics[h]
|
||||
for i, s := range smpls {
|
||||
if s.T > ts {
|
||||
// Removing the already added samples.
|
||||
ll.loadCmd.defs[h] = smpls[i:]
|
||||
break
|
||||
}
|
||||
if _, err := app.Add(m, s.T, s.V); err != nil {
|
||||
return err
|
||||
}
|
||||
if i == len(smpls)-1 {
|
||||
ll.loadCmd.defs[h] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return app.Commit()
|
||||
}
|
||||
|
||||
// WithSamplesTill loads the samples till given timestamp and executes the given function.
|
||||
func (ll *LazyLoader) WithSamplesTill(ts time.Time, fn func(error)) {
|
||||
tsMilli := ts.Sub(time.Unix(0, 0).UTC()) / time.Millisecond
|
||||
fn(ll.appendTill(int64(tsMilli)))
|
||||
}
|
||||
|
||||
// QueryEngine returns the LazyLoader's query engine.
|
||||
func (ll *LazyLoader) QueryEngine() *Engine {
|
||||
return ll.queryEngine
|
||||
}
|
||||
|
||||
// Queryable allows querying the LazyLoader's data.
|
||||
// Note: only the samples till the max timestamp used
|
||||
// in `WithSamplesTill` can be queried.
|
||||
func (ll *LazyLoader) Queryable() storage.Queryable {
|
||||
return ll.storage
|
||||
}
|
||||
|
||||
// Context returns the LazyLoader's context.
|
||||
func (ll *LazyLoader) Context() context.Context {
|
||||
return ll.context
|
||||
}
|
||||
|
||||
// Storage returns the LazyLoader's storage.
|
||||
func (ll *LazyLoader) Storage() storage.Storage {
|
||||
return ll.storage
|
||||
}
|
||||
|
||||
// Close closes resources associated with the LazyLoader.
|
||||
func (ll *LazyLoader) Close() {
|
||||
ll.cancelCtx()
|
||||
|
||||
if err := ll.storage.Close(); err != nil {
|
||||
ll.T.Fatalf("closing test storage: %s", err)
|
||||
}
|
||||
}
|
||||
306
vendor/github.com/prometheus/prometheus/promql/value.go
generated
vendored
Normal file
306
vendor/github.com/prometheus/prometheus/promql/value.go
generated
vendored
Normal file
@@ -0,0 +1,306 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package promql
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
"github.com/prometheus/prometheus/storage"
|
||||
)
|
||||
|
||||
func (Matrix) Type() parser.ValueType { return parser.ValueTypeMatrix }
|
||||
func (Vector) Type() parser.ValueType { return parser.ValueTypeVector }
|
||||
func (Scalar) Type() parser.ValueType { return parser.ValueTypeScalar }
|
||||
func (String) Type() parser.ValueType { return parser.ValueTypeString }
|
||||
|
||||
// String represents a string value.
|
||||
type String struct {
|
||||
T int64
|
||||
V string
|
||||
}
|
||||
|
||||
func (s String) String() string {
|
||||
return s.V
|
||||
}
|
||||
|
||||
func (s String) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal([...]interface{}{float64(s.T) / 1000, s.V})
|
||||
}
|
||||
|
||||
// Scalar is a data point that's explicitly not associated with a metric.
|
||||
type Scalar struct {
|
||||
T int64
|
||||
V float64
|
||||
}
|
||||
|
||||
func (s Scalar) String() string {
|
||||
v := strconv.FormatFloat(s.V, 'f', -1, 64)
|
||||
return fmt.Sprintf("scalar: %v @[%v]", v, s.T)
|
||||
}
|
||||
|
||||
func (s Scalar) MarshalJSON() ([]byte, error) {
|
||||
v := strconv.FormatFloat(s.V, 'f', -1, 64)
|
||||
return json.Marshal([...]interface{}{float64(s.T) / 1000, v})
|
||||
}
|
||||
|
||||
// Series is a stream of data points belonging to a metric.
|
||||
type Series struct {
|
||||
Metric labels.Labels `json:"metric"`
|
||||
Points []Point `json:"values"`
|
||||
}
|
||||
|
||||
func (s Series) String() string {
|
||||
vals := make([]string, len(s.Points))
|
||||
for i, v := range s.Points {
|
||||
vals[i] = v.String()
|
||||
}
|
||||
return fmt.Sprintf("%s =>\n%s", s.Metric, strings.Join(vals, "\n"))
|
||||
}
|
||||
|
||||
// Point represents a single data point for a given timestamp.
|
||||
type Point struct {
|
||||
T int64
|
||||
V float64
|
||||
}
|
||||
|
||||
func (p Point) String() string {
|
||||
v := strconv.FormatFloat(p.V, 'f', -1, 64)
|
||||
return fmt.Sprintf("%v @[%v]", v, p.T)
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (p Point) MarshalJSON() ([]byte, error) {
|
||||
v := strconv.FormatFloat(p.V, 'f', -1, 64)
|
||||
return json.Marshal([...]interface{}{float64(p.T) / 1000, v})
|
||||
}
|
||||
|
||||
// Sample is a single sample belonging to a metric.
|
||||
type Sample struct {
|
||||
Point
|
||||
|
||||
Metric labels.Labels
|
||||
}
|
||||
|
||||
func (s Sample) String() string {
|
||||
return fmt.Sprintf("%s => %s", s.Metric, s.Point)
|
||||
}
|
||||
|
||||
func (s Sample) MarshalJSON() ([]byte, error) {
|
||||
v := struct {
|
||||
M labels.Labels `json:"metric"`
|
||||
V Point `json:"value"`
|
||||
}{
|
||||
M: s.Metric,
|
||||
V: s.Point,
|
||||
}
|
||||
return json.Marshal(v)
|
||||
}
|
||||
|
||||
// Vector is basically only an alias for model.Samples, but the
|
||||
// contract is that in a Vector, all Samples have the same timestamp.
|
||||
type Vector []Sample
|
||||
|
||||
func (vec Vector) String() string {
|
||||
entries := make([]string, len(vec))
|
||||
for i, s := range vec {
|
||||
entries[i] = s.String()
|
||||
}
|
||||
return strings.Join(entries, "\n")
|
||||
}
|
||||
|
||||
// ContainsSameLabelset checks if a vector has samples with the same labelset
|
||||
// Such a behavior is semantically undefined
|
||||
// https://github.com/prometheus/prometheus/issues/4562
|
||||
func (vec Vector) ContainsSameLabelset() bool {
|
||||
l := make(map[uint64]struct{}, len(vec))
|
||||
for _, s := range vec {
|
||||
hash := s.Metric.Hash()
|
||||
if _, ok := l[hash]; ok {
|
||||
return true
|
||||
}
|
||||
l[hash] = struct{}{}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Matrix is a slice of Series that implements sort.Interface and
|
||||
// has a String method.
|
||||
type Matrix []Series
|
||||
|
||||
func (m Matrix) String() string {
|
||||
// TODO(fabxc): sort, or can we rely on order from the querier?
|
||||
strs := make([]string, len(m))
|
||||
|
||||
for i, ss := range m {
|
||||
strs[i] = ss.String()
|
||||
}
|
||||
|
||||
return strings.Join(strs, "\n")
|
||||
}
|
||||
|
||||
// TotalSamples returns the total number of samples in the series within a matrix.
|
||||
func (m Matrix) TotalSamples() int {
|
||||
numSamples := 0
|
||||
for _, series := range m {
|
||||
numSamples += len(series.Points)
|
||||
}
|
||||
return numSamples
|
||||
}
|
||||
|
||||
func (m Matrix) Len() int { return len(m) }
|
||||
func (m Matrix) Less(i, j int) bool { return labels.Compare(m[i].Metric, m[j].Metric) < 0 }
|
||||
func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
|
||||
|
||||
// ContainsSameLabelset checks if a matrix has samples with the same labelset
|
||||
// Such a behavior is semantically undefined
|
||||
// https://github.com/prometheus/prometheus/issues/4562
|
||||
func (m Matrix) ContainsSameLabelset() bool {
|
||||
l := make(map[uint64]struct{}, len(m))
|
||||
for _, ss := range m {
|
||||
hash := ss.Metric.Hash()
|
||||
if _, ok := l[hash]; ok {
|
||||
return true
|
||||
}
|
||||
l[hash] = struct{}{}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Result holds the resulting value of an execution or an error
|
||||
// if any occurred.
|
||||
type Result struct {
|
||||
Err error
|
||||
Value parser.Value
|
||||
Warnings storage.Warnings
|
||||
}
|
||||
|
||||
// Vector returns a Vector if the result value is one. An error is returned if
|
||||
// the result was an error or the result value is not a Vector.
|
||||
func (r *Result) Vector() (Vector, error) {
|
||||
if r.Err != nil {
|
||||
return nil, r.Err
|
||||
}
|
||||
v, ok := r.Value.(Vector)
|
||||
if !ok {
|
||||
return nil, errors.New("query result is not a Vector")
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// Matrix returns a Matrix. An error is returned if
|
||||
// the result was an error or the result value is not a Matrix.
|
||||
func (r *Result) Matrix() (Matrix, error) {
|
||||
if r.Err != nil {
|
||||
return nil, r.Err
|
||||
}
|
||||
v, ok := r.Value.(Matrix)
|
||||
if !ok {
|
||||
return nil, errors.New("query result is not a range Vector")
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// Scalar returns a Scalar value. An error is returned if
|
||||
// the result was an error or the result value is not a Scalar.
|
||||
func (r *Result) Scalar() (Scalar, error) {
|
||||
if r.Err != nil {
|
||||
return Scalar{}, r.Err
|
||||
}
|
||||
v, ok := r.Value.(Scalar)
|
||||
if !ok {
|
||||
return Scalar{}, errors.New("query result is not a Scalar")
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (r *Result) String() string {
|
||||
if r.Err != nil {
|
||||
return r.Err.Error()
|
||||
}
|
||||
if r.Value == nil {
|
||||
return ""
|
||||
}
|
||||
return r.Value.String()
|
||||
}
|
||||
|
||||
// StorageSeries simulates promql.Series as storage.Series.
|
||||
type StorageSeries struct {
|
||||
series Series
|
||||
}
|
||||
|
||||
// NewStorageSeries returns a StorageSeries from a Series.
|
||||
func NewStorageSeries(series Series) *StorageSeries {
|
||||
return &StorageSeries{
|
||||
series: series,
|
||||
}
|
||||
}
|
||||
|
||||
func (ss *StorageSeries) Labels() labels.Labels {
|
||||
return ss.series.Metric
|
||||
}
|
||||
|
||||
// Iterator returns a new iterator of the data of the series.
|
||||
func (ss *StorageSeries) Iterator() chunkenc.Iterator {
|
||||
return newStorageSeriesIterator(ss.series)
|
||||
}
|
||||
|
||||
type storageSeriesIterator struct {
|
||||
points []Point
|
||||
curr int
|
||||
}
|
||||
|
||||
func newStorageSeriesIterator(series Series) *storageSeriesIterator {
|
||||
return &storageSeriesIterator{
|
||||
points: series.Points,
|
||||
curr: -1,
|
||||
}
|
||||
}
|
||||
|
||||
func (ssi *storageSeriesIterator) Seek(t int64) bool {
|
||||
i := ssi.curr
|
||||
if i < 0 {
|
||||
i = 0
|
||||
}
|
||||
for ; i < len(ssi.points); i++ {
|
||||
if ssi.points[i].T >= t {
|
||||
ssi.curr = i
|
||||
return true
|
||||
}
|
||||
}
|
||||
ssi.curr = len(ssi.points) - 1
|
||||
return false
|
||||
}
|
||||
|
||||
func (ssi *storageSeriesIterator) At() (t int64, v float64) {
|
||||
p := ssi.points[ssi.curr]
|
||||
return p.T, p.V
|
||||
}
|
||||
|
||||
func (ssi *storageSeriesIterator) Next() bool {
|
||||
ssi.curr++
|
||||
return ssi.curr < len(ssi.points)
|
||||
}
|
||||
|
||||
func (ssi *storageSeriesIterator) Err() error {
|
||||
return nil
|
||||
}
|
||||
550
vendor/github.com/prometheus/prometheus/rules/alerting.go
generated
vendored
Normal file
550
vendor/github.com/prometheus/prometheus/rules/alerting.go
generated
vendored
Normal file
@@ -0,0 +1,550 @@
|
||||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package rules
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
html_template "html/template"
|
||||
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/log/level"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/pkg/rulefmt"
|
||||
"github.com/prometheus/prometheus/pkg/timestamp"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
"github.com/prometheus/prometheus/template"
|
||||
"github.com/prometheus/prometheus/util/strutil"
|
||||
)
|
||||
|
||||
const (
|
||||
// AlertMetricName is the metric name for synthetic alert timeseries.
|
||||
alertMetricName = "ALERTS"
|
||||
// AlertForStateMetricName is the metric name for 'for' state of alert.
|
||||
alertForStateMetricName = "ALERTS_FOR_STATE"
|
||||
|
||||
// AlertNameLabel is the label name indicating the name of an alert.
|
||||
alertNameLabel = "alertname"
|
||||
// AlertStateLabel is the label name indicating the state of an alert.
|
||||
alertStateLabel = "alertstate"
|
||||
)
|
||||
|
||||
// AlertState denotes the state of an active alert.
|
||||
type AlertState int
|
||||
|
||||
const (
|
||||
// StateInactive is the state of an alert that is neither firing nor pending.
|
||||
StateInactive AlertState = iota
|
||||
// StatePending is the state of an alert that has been active for less than
|
||||
// the configured threshold duration.
|
||||
StatePending
|
||||
// StateFiring is the state of an alert that has been active for longer than
|
||||
// the configured threshold duration.
|
||||
StateFiring
|
||||
)
|
||||
|
||||
func (s AlertState) String() string {
|
||||
switch s {
|
||||
case StateInactive:
|
||||
return "inactive"
|
||||
case StatePending:
|
||||
return "pending"
|
||||
case StateFiring:
|
||||
return "firing"
|
||||
}
|
||||
panic(errors.Errorf("unknown alert state: %s", s.String()))
|
||||
}
|
||||
|
||||
// Alert is the user-level representation of a single instance of an alerting rule.
|
||||
type Alert struct {
|
||||
State AlertState
|
||||
|
||||
Labels labels.Labels
|
||||
Annotations labels.Labels
|
||||
|
||||
// The value at the last evaluation of the alerting expression.
|
||||
Value float64
|
||||
// The interval during which the condition of this alert held true.
|
||||
// ResolvedAt will be 0 to indicate a still active alert.
|
||||
ActiveAt time.Time
|
||||
FiredAt time.Time
|
||||
ResolvedAt time.Time
|
||||
LastSentAt time.Time
|
||||
ValidUntil time.Time
|
||||
}
|
||||
|
||||
func (a *Alert) needsSending(ts time.Time, resendDelay time.Duration) bool {
|
||||
if a.State == StatePending {
|
||||
return false
|
||||
}
|
||||
|
||||
// if an alert has been resolved since the last send, resend it
|
||||
if a.ResolvedAt.After(a.LastSentAt) {
|
||||
return true
|
||||
}
|
||||
|
||||
return a.LastSentAt.Add(resendDelay).Before(ts)
|
||||
}
|
||||
|
||||
// An AlertingRule generates alerts from its vector expression.
|
||||
type AlertingRule struct {
|
||||
// The name of the alert.
|
||||
name string
|
||||
// The vector expression from which to generate alerts.
|
||||
vector parser.Expr
|
||||
// The duration for which a labelset needs to persist in the expression
|
||||
// output vector before an alert transitions from Pending to Firing state.
|
||||
holdDuration time.Duration
|
||||
// Extra labels to attach to the resulting alert sample vectors.
|
||||
labels labels.Labels
|
||||
// Non-identifying key/value pairs.
|
||||
annotations labels.Labels
|
||||
// External labels from the global config.
|
||||
externalLabels map[string]string
|
||||
// true if old state has been restored. We start persisting samples for ALERT_FOR_STATE
|
||||
// only after the restoration.
|
||||
restored bool
|
||||
// Protects the below.
|
||||
mtx sync.Mutex
|
||||
// Time in seconds taken to evaluate rule.
|
||||
evaluationDuration time.Duration
|
||||
// Timestamp of last evaluation of rule.
|
||||
evaluationTimestamp time.Time
|
||||
// The health of the alerting rule.
|
||||
health RuleHealth
|
||||
// The last error seen by the alerting rule.
|
||||
lastError error
|
||||
// A map of alerts which are currently active (Pending or Firing), keyed by
|
||||
// the fingerprint of the labelset they correspond to.
|
||||
active map[uint64]*Alert
|
||||
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
// NewAlertingRule constructs a new AlertingRule.
|
||||
func NewAlertingRule(
|
||||
name string, vec parser.Expr, hold time.Duration,
|
||||
labels, annotations, externalLabels labels.Labels,
|
||||
restored bool, logger log.Logger,
|
||||
) *AlertingRule {
|
||||
el := make(map[string]string, len(externalLabels))
|
||||
for _, lbl := range externalLabels {
|
||||
el[lbl.Name] = lbl.Value
|
||||
}
|
||||
|
||||
return &AlertingRule{
|
||||
name: name,
|
||||
vector: vec,
|
||||
holdDuration: hold,
|
||||
labels: labels,
|
||||
annotations: annotations,
|
||||
externalLabels: el,
|
||||
health: HealthUnknown,
|
||||
active: map[uint64]*Alert{},
|
||||
logger: logger,
|
||||
restored: restored,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the alerting rule.
|
||||
func (r *AlertingRule) Name() string {
|
||||
return r.name
|
||||
}
|
||||
|
||||
// SetLastError sets the current error seen by the alerting rule.
|
||||
func (r *AlertingRule) SetLastError(err error) {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
r.lastError = err
|
||||
}
|
||||
|
||||
// LastError returns the last error seen by the alerting rule.
|
||||
func (r *AlertingRule) LastError() error {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
return r.lastError
|
||||
}
|
||||
|
||||
// SetHealth sets the current health of the alerting rule.
|
||||
func (r *AlertingRule) SetHealth(health RuleHealth) {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
r.health = health
|
||||
}
|
||||
|
||||
// Health returns the current health of the alerting rule.
|
||||
func (r *AlertingRule) Health() RuleHealth {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
return r.health
|
||||
}
|
||||
|
||||
// Query returns the query expression of the alerting rule.
|
||||
func (r *AlertingRule) Query() parser.Expr {
|
||||
return r.vector
|
||||
}
|
||||
|
||||
// HoldDuration returns the hold duration of the alerting rule.
|
||||
func (r *AlertingRule) HoldDuration() time.Duration {
|
||||
return r.holdDuration
|
||||
}
|
||||
|
||||
// Labels returns the labels of the alerting rule.
|
||||
func (r *AlertingRule) Labels() labels.Labels {
|
||||
return r.labels
|
||||
}
|
||||
|
||||
// Annotations returns the annotations of the alerting rule.
|
||||
func (r *AlertingRule) Annotations() labels.Labels {
|
||||
return r.annotations
|
||||
}
|
||||
|
||||
func (r *AlertingRule) sample(alert *Alert, ts time.Time) promql.Sample {
|
||||
lb := labels.NewBuilder(r.labels)
|
||||
|
||||
for _, l := range alert.Labels {
|
||||
lb.Set(l.Name, l.Value)
|
||||
}
|
||||
|
||||
lb.Set(labels.MetricName, alertMetricName)
|
||||
lb.Set(labels.AlertName, r.name)
|
||||
lb.Set(alertStateLabel, alert.State.String())
|
||||
|
||||
s := promql.Sample{
|
||||
Metric: lb.Labels(),
|
||||
Point: promql.Point{T: timestamp.FromTime(ts), V: 1},
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// forStateSample returns the sample for ALERTS_FOR_STATE.
|
||||
func (r *AlertingRule) forStateSample(alert *Alert, ts time.Time, v float64) promql.Sample {
|
||||
lb := labels.NewBuilder(r.labels)
|
||||
|
||||
for _, l := range alert.Labels {
|
||||
lb.Set(l.Name, l.Value)
|
||||
}
|
||||
|
||||
lb.Set(labels.MetricName, alertForStateMetricName)
|
||||
lb.Set(labels.AlertName, r.name)
|
||||
|
||||
s := promql.Sample{
|
||||
Metric: lb.Labels(),
|
||||
Point: promql.Point{T: timestamp.FromTime(ts), V: v},
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// SetEvaluationDuration updates evaluationDuration to the duration it took to evaluate the rule on its last evaluation.
|
||||
func (r *AlertingRule) SetEvaluationDuration(dur time.Duration) {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
r.evaluationDuration = dur
|
||||
}
|
||||
|
||||
// GetEvaluationDuration returns the time in seconds it took to evaluate the alerting rule.
|
||||
func (r *AlertingRule) GetEvaluationDuration() time.Duration {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
return r.evaluationDuration
|
||||
}
|
||||
|
||||
// SetEvaluationTimestamp updates evaluationTimestamp to the timestamp of when the rule was last evaluated.
|
||||
func (r *AlertingRule) SetEvaluationTimestamp(ts time.Time) {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
r.evaluationTimestamp = ts
|
||||
}
|
||||
|
||||
// GetEvaluationTimestamp returns the time the evaluation took place.
|
||||
func (r *AlertingRule) GetEvaluationTimestamp() time.Time {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
return r.evaluationTimestamp
|
||||
}
|
||||
|
||||
// SetRestored updates the restoration state of the alerting rule.
|
||||
func (r *AlertingRule) SetRestored(restored bool) {
|
||||
r.restored = restored
|
||||
}
|
||||
|
||||
// resolvedRetention is the duration for which a resolved alert instance
|
||||
// is kept in memory state and consequently repeatedly sent to the AlertManager.
|
||||
const resolvedRetention = 15 * time.Minute
|
||||
|
||||
// Eval evaluates the rule expression and then creates pending alerts and fires
|
||||
// or removes previously pending alerts accordingly.
|
||||
func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, externalURL *url.URL) (promql.Vector, error) {
|
||||
res, err := query(ctx, r.vector.String(), ts)
|
||||
if err != nil {
|
||||
r.SetHealth(HealthBad)
|
||||
r.SetLastError(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
|
||||
// Create pending alerts for any new vector elements in the alert expression
|
||||
// or update the expression value for existing elements.
|
||||
resultFPs := map[uint64]struct{}{}
|
||||
|
||||
var vec promql.Vector
|
||||
var alerts = make(map[uint64]*Alert, len(res))
|
||||
for _, smpl := range res {
|
||||
// Provide the alert information to the template.
|
||||
l := make(map[string]string, len(smpl.Metric))
|
||||
for _, lbl := range smpl.Metric {
|
||||
l[lbl.Name] = lbl.Value
|
||||
}
|
||||
|
||||
tmplData := template.AlertTemplateData(l, r.externalLabels, smpl.V)
|
||||
// Inject some convenience variables that are easier to remember for users
|
||||
// who are not used to Go's templating system.
|
||||
defs := []string{
|
||||
"{{$labels := .Labels}}",
|
||||
"{{$externalLabels := .ExternalLabels}}",
|
||||
"{{$value := .Value}}",
|
||||
}
|
||||
|
||||
expand := func(text string) string {
|
||||
tmpl := template.NewTemplateExpander(
|
||||
ctx,
|
||||
strings.Join(append(defs, text), ""),
|
||||
"__alert_"+r.Name(),
|
||||
tmplData,
|
||||
model.Time(timestamp.FromTime(ts)),
|
||||
template.QueryFunc(query),
|
||||
externalURL,
|
||||
)
|
||||
result, err := tmpl.Expand()
|
||||
if err != nil {
|
||||
result = fmt.Sprintf("<error expanding template: %s>", err)
|
||||
level.Warn(r.logger).Log("msg", "Expanding alert template failed", "err", err, "data", tmplData)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
lb := labels.NewBuilder(smpl.Metric).Del(labels.MetricName)
|
||||
|
||||
for _, l := range r.labels {
|
||||
lb.Set(l.Name, expand(l.Value))
|
||||
}
|
||||
lb.Set(labels.AlertName, r.Name())
|
||||
|
||||
annotations := make(labels.Labels, 0, len(r.annotations))
|
||||
for _, a := range r.annotations {
|
||||
annotations = append(annotations, labels.Label{Name: a.Name, Value: expand(a.Value)})
|
||||
}
|
||||
|
||||
lbs := lb.Labels()
|
||||
h := lbs.Hash()
|
||||
resultFPs[h] = struct{}{}
|
||||
|
||||
if _, ok := alerts[h]; ok {
|
||||
err = fmt.Errorf("vector contains metrics with the same labelset after applying alert labels")
|
||||
// We have already acquired the lock above hence using SetHealth and
|
||||
// SetLastError will deadlock.
|
||||
r.health = HealthBad
|
||||
r.lastError = err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
alerts[h] = &Alert{
|
||||
Labels: lbs,
|
||||
Annotations: annotations,
|
||||
ActiveAt: ts,
|
||||
State: StatePending,
|
||||
Value: smpl.V,
|
||||
}
|
||||
}
|
||||
|
||||
for h, a := range alerts {
|
||||
// Check whether we already have alerting state for the identifying label set.
|
||||
// Update the last value and annotations if so, create a new alert entry otherwise.
|
||||
if alert, ok := r.active[h]; ok && alert.State != StateInactive {
|
||||
alert.Value = a.Value
|
||||
alert.Annotations = a.Annotations
|
||||
continue
|
||||
}
|
||||
|
||||
r.active[h] = a
|
||||
}
|
||||
|
||||
// Check if any pending alerts should be removed or fire now. Write out alert timeseries.
|
||||
for fp, a := range r.active {
|
||||
if _, ok := resultFPs[fp]; !ok {
|
||||
// If the alert was previously firing, keep it around for a given
|
||||
// retention time so it is reported as resolved to the AlertManager.
|
||||
if a.State == StatePending || (!a.ResolvedAt.IsZero() && ts.Sub(a.ResolvedAt) > resolvedRetention) {
|
||||
delete(r.active, fp)
|
||||
}
|
||||
if a.State != StateInactive {
|
||||
a.State = StateInactive
|
||||
a.ResolvedAt = ts
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if a.State == StatePending && ts.Sub(a.ActiveAt) >= r.holdDuration {
|
||||
a.State = StateFiring
|
||||
a.FiredAt = ts
|
||||
}
|
||||
|
||||
if r.restored {
|
||||
vec = append(vec, r.sample(a, ts))
|
||||
vec = append(vec, r.forStateSample(a, ts, float64(a.ActiveAt.Unix())))
|
||||
}
|
||||
}
|
||||
|
||||
// We have already acquired the lock above hence using SetHealth and
|
||||
// SetLastError will deadlock.
|
||||
r.health = HealthGood
|
||||
r.lastError = err
|
||||
return vec, nil
|
||||
}
|
||||
|
||||
// State returns the maximum state of alert instances for this rule.
|
||||
// StateFiring > StatePending > StateInactive
|
||||
func (r *AlertingRule) State() AlertState {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
|
||||
maxState := StateInactive
|
||||
for _, a := range r.active {
|
||||
if a.State > maxState {
|
||||
maxState = a.State
|
||||
}
|
||||
}
|
||||
return maxState
|
||||
}
|
||||
|
||||
// ActiveAlerts returns a slice of active alerts.
|
||||
func (r *AlertingRule) ActiveAlerts() []*Alert {
|
||||
var res []*Alert
|
||||
for _, a := range r.currentAlerts() {
|
||||
if a.ResolvedAt.IsZero() {
|
||||
res = append(res, a)
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// currentAlerts returns all instances of alerts for this rule. This may include
|
||||
// inactive alerts that were previously firing.
|
||||
func (r *AlertingRule) currentAlerts() []*Alert {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
|
||||
alerts := make([]*Alert, 0, len(r.active))
|
||||
|
||||
for _, a := range r.active {
|
||||
anew := *a
|
||||
alerts = append(alerts, &anew)
|
||||
}
|
||||
return alerts
|
||||
}
|
||||
|
||||
// ForEachActiveAlert runs the given function on each alert.
|
||||
// This should be used when you want to use the actual alerts from the AlertingRule
|
||||
// and not on its copy.
|
||||
// If you want to run on a copy of alerts then don't use this, get the alerts from 'ActiveAlerts()'.
|
||||
func (r *AlertingRule) ForEachActiveAlert(f func(*Alert)) {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
|
||||
for _, a := range r.active {
|
||||
f(a)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay time.Duration, interval time.Duration, notifyFunc NotifyFunc) {
|
||||
alerts := []*Alert{}
|
||||
r.ForEachActiveAlert(func(alert *Alert) {
|
||||
if alert.needsSending(ts, resendDelay) {
|
||||
alert.LastSentAt = ts
|
||||
// Allow for a couple Eval or Alertmanager send failures
|
||||
delta := resendDelay
|
||||
if interval > resendDelay {
|
||||
delta = interval
|
||||
}
|
||||
alert.ValidUntil = ts.Add(3 * delta)
|
||||
anew := *alert
|
||||
alerts = append(alerts, &anew)
|
||||
}
|
||||
})
|
||||
notifyFunc(ctx, r.vector.String(), alerts...)
|
||||
}
|
||||
|
||||
func (r *AlertingRule) String() string {
|
||||
ar := rulefmt.Rule{
|
||||
Alert: r.name,
|
||||
Expr: r.vector.String(),
|
||||
For: model.Duration(r.holdDuration),
|
||||
Labels: r.labels.Map(),
|
||||
Annotations: r.annotations.Map(),
|
||||
}
|
||||
|
||||
byt, err := yaml.Marshal(ar)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("error marshaling alerting rule: %s", err.Error())
|
||||
}
|
||||
|
||||
return string(byt)
|
||||
}
|
||||
|
||||
// HTMLSnippet returns an HTML snippet representing this alerting rule. The
|
||||
// resulting snippet is expected to be presented in a <pre> element, so that
|
||||
// line breaks and other returned whitespace is respected.
|
||||
func (r *AlertingRule) HTMLSnippet(pathPrefix string) html_template.HTML {
|
||||
alertMetric := model.Metric{
|
||||
model.MetricNameLabel: alertMetricName,
|
||||
alertNameLabel: model.LabelValue(r.name),
|
||||
}
|
||||
|
||||
labelsMap := make(map[string]string, len(r.labels))
|
||||
for _, l := range r.labels {
|
||||
labelsMap[l.Name] = html_template.HTMLEscapeString(l.Value)
|
||||
}
|
||||
|
||||
annotationsMap := make(map[string]string, len(r.annotations))
|
||||
for _, l := range r.annotations {
|
||||
annotationsMap[l.Name] = html_template.HTMLEscapeString(l.Value)
|
||||
}
|
||||
|
||||
ar := rulefmt.Rule{
|
||||
Alert: fmt.Sprintf("<a href=%q>%s</a>", pathPrefix+strutil.TableLinkForExpression(alertMetric.String()), r.name),
|
||||
Expr: fmt.Sprintf("<a href=%q>%s</a>", pathPrefix+strutil.TableLinkForExpression(r.vector.String()), html_template.HTMLEscapeString(r.vector.String())),
|
||||
For: model.Duration(r.holdDuration),
|
||||
Labels: labelsMap,
|
||||
Annotations: annotationsMap,
|
||||
}
|
||||
|
||||
byt, err := yaml.Marshal(ar)
|
||||
if err != nil {
|
||||
return html_template.HTML(fmt.Sprintf("error marshaling alerting rule: %q", html_template.HTMLEscapeString(err.Error())))
|
||||
}
|
||||
return html_template.HTML(byt)
|
||||
}
|
||||
1079
vendor/github.com/prometheus/prometheus/rules/manager.go
generated
vendored
Normal file
1079
vendor/github.com/prometheus/prometheus/rules/manager.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
203
vendor/github.com/prometheus/prometheus/rules/recording.go
generated
vendored
Normal file
203
vendor/github.com/prometheus/prometheus/rules/recording.go
generated
vendored
Normal file
@@ -0,0 +1,203 @@
|
||||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package rules
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/pkg/rulefmt"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
"github.com/prometheus/prometheus/util/strutil"
|
||||
)
|
||||
|
||||
// A RecordingRule records its vector expression into new timeseries.
|
||||
type RecordingRule struct {
|
||||
name string
|
||||
vector parser.Expr
|
||||
labels labels.Labels
|
||||
// Protects the below.
|
||||
mtx sync.Mutex
|
||||
// The health of the recording rule.
|
||||
health RuleHealth
|
||||
// Timestamp of last evaluation of the recording rule.
|
||||
evaluationTimestamp time.Time
|
||||
// The last error seen by the recording rule.
|
||||
lastError error
|
||||
// Duration of how long it took to evaluate the recording rule.
|
||||
evaluationDuration time.Duration
|
||||
}
|
||||
|
||||
// NewRecordingRule returns a new recording rule.
|
||||
func NewRecordingRule(name string, vector parser.Expr, lset labels.Labels) *RecordingRule {
|
||||
return &RecordingRule{
|
||||
name: name,
|
||||
vector: vector,
|
||||
health: HealthUnknown,
|
||||
labels: lset,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the rule name.
|
||||
func (rule *RecordingRule) Name() string {
|
||||
return rule.name
|
||||
}
|
||||
|
||||
// Query returns the rule query expression.
|
||||
func (rule *RecordingRule) Query() parser.Expr {
|
||||
return rule.vector
|
||||
}
|
||||
|
||||
// Labels returns the rule labels.
|
||||
func (rule *RecordingRule) Labels() labels.Labels {
|
||||
return rule.labels
|
||||
}
|
||||
|
||||
// Eval evaluates the rule and then overrides the metric names and labels accordingly.
|
||||
func (rule *RecordingRule) Eval(ctx context.Context, ts time.Time, query QueryFunc, _ *url.URL) (promql.Vector, error) {
|
||||
vector, err := query(ctx, rule.vector.String(), ts)
|
||||
if err != nil {
|
||||
rule.SetHealth(HealthBad)
|
||||
rule.SetLastError(err)
|
||||
return nil, err
|
||||
}
|
||||
// Override the metric name and labels.
|
||||
for i := range vector {
|
||||
sample := &vector[i]
|
||||
|
||||
lb := labels.NewBuilder(sample.Metric)
|
||||
|
||||
lb.Set(labels.MetricName, rule.name)
|
||||
|
||||
for _, l := range rule.labels {
|
||||
lb.Set(l.Name, l.Value)
|
||||
}
|
||||
|
||||
sample.Metric = lb.Labels()
|
||||
}
|
||||
|
||||
// Check that the rule does not produce identical metrics after applying
|
||||
// labels.
|
||||
if vector.ContainsSameLabelset() {
|
||||
err = fmt.Errorf("vector contains metrics with the same labelset after applying rule labels")
|
||||
rule.SetHealth(HealthBad)
|
||||
rule.SetLastError(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rule.SetHealth(HealthGood)
|
||||
rule.SetLastError(err)
|
||||
return vector, nil
|
||||
}
|
||||
|
||||
func (rule *RecordingRule) String() string {
|
||||
r := rulefmt.Rule{
|
||||
Record: rule.name,
|
||||
Expr: rule.vector.String(),
|
||||
Labels: rule.labels.Map(),
|
||||
}
|
||||
|
||||
byt, err := yaml.Marshal(r)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("error marshaling recording rule: %q", err.Error())
|
||||
}
|
||||
|
||||
return string(byt)
|
||||
}
|
||||
|
||||
// SetEvaluationDuration updates evaluationDuration to the time in seconds it took to evaluate the rule on its last evaluation.
|
||||
func (rule *RecordingRule) SetEvaluationDuration(dur time.Duration) {
|
||||
rule.mtx.Lock()
|
||||
defer rule.mtx.Unlock()
|
||||
rule.evaluationDuration = dur
|
||||
}
|
||||
|
||||
// SetLastError sets the current error seen by the recording rule.
|
||||
func (rule *RecordingRule) SetLastError(err error) {
|
||||
rule.mtx.Lock()
|
||||
defer rule.mtx.Unlock()
|
||||
rule.lastError = err
|
||||
}
|
||||
|
||||
// LastError returns the last error seen by the recording rule.
|
||||
func (rule *RecordingRule) LastError() error {
|
||||
rule.mtx.Lock()
|
||||
defer rule.mtx.Unlock()
|
||||
return rule.lastError
|
||||
}
|
||||
|
||||
// SetHealth sets the current health of the recording rule.
|
||||
func (rule *RecordingRule) SetHealth(health RuleHealth) {
|
||||
rule.mtx.Lock()
|
||||
defer rule.mtx.Unlock()
|
||||
rule.health = health
|
||||
}
|
||||
|
||||
// Health returns the current health of the recording rule.
|
||||
func (rule *RecordingRule) Health() RuleHealth {
|
||||
rule.mtx.Lock()
|
||||
defer rule.mtx.Unlock()
|
||||
return rule.health
|
||||
}
|
||||
|
||||
// GetEvaluationDuration returns the time in seconds it took to evaluate the recording rule.
|
||||
func (rule *RecordingRule) GetEvaluationDuration() time.Duration {
|
||||
rule.mtx.Lock()
|
||||
defer rule.mtx.Unlock()
|
||||
return rule.evaluationDuration
|
||||
}
|
||||
|
||||
// SetEvaluationTimestamp updates evaluationTimestamp to the timestamp of when the rule was last evaluated.
|
||||
func (rule *RecordingRule) SetEvaluationTimestamp(ts time.Time) {
|
||||
rule.mtx.Lock()
|
||||
defer rule.mtx.Unlock()
|
||||
rule.evaluationTimestamp = ts
|
||||
}
|
||||
|
||||
// GetEvaluationTimestamp returns the time the evaluation took place.
|
||||
func (rule *RecordingRule) GetEvaluationTimestamp() time.Time {
|
||||
rule.mtx.Lock()
|
||||
defer rule.mtx.Unlock()
|
||||
return rule.evaluationTimestamp
|
||||
}
|
||||
|
||||
// HTMLSnippet returns an HTML snippet representing this rule.
|
||||
func (rule *RecordingRule) HTMLSnippet(pathPrefix string) template.HTML {
|
||||
ruleExpr := rule.vector.String()
|
||||
labels := make(map[string]string, len(rule.labels))
|
||||
for _, l := range rule.labels {
|
||||
labels[l.Name] = template.HTMLEscapeString(l.Value)
|
||||
}
|
||||
|
||||
r := rulefmt.Rule{
|
||||
Record: fmt.Sprintf(`<a href="%s">%s</a>`, pathPrefix+strutil.TableLinkForExpression(rule.name), rule.name),
|
||||
Expr: fmt.Sprintf(`<a href="%s">%s</a>`, pathPrefix+strutil.TableLinkForExpression(ruleExpr), template.HTMLEscapeString(ruleExpr)),
|
||||
Labels: labels,
|
||||
}
|
||||
|
||||
byt, err := yaml.Marshal(r)
|
||||
if err != nil {
|
||||
return template.HTML(fmt.Sprintf("error marshaling recording rule: %q", template.HTMLEscapeString(err.Error())))
|
||||
}
|
||||
|
||||
return template.HTML(byt)
|
||||
}
|
||||
364
vendor/github.com/prometheus/prometheus/template/template.go
generated
vendored
Normal file
364
vendor/github.com/prometheus/prometheus/template/template.go
generated
vendored
Normal file
@@ -0,0 +1,364 @@
|
||||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
html_template "html/template"
|
||||
text_template "text/template"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/prometheus/promql"
|
||||
"github.com/prometheus/prometheus/util/strutil"
|
||||
)
|
||||
|
||||
var (
|
||||
templateTextExpansionFailures = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "prometheus_template_text_expansion_failures_total",
|
||||
Help: "The total number of template text expansion failures.",
|
||||
})
|
||||
templateTextExpansionTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "prometheus_template_text_expansions_total",
|
||||
Help: "The total number of template text expansions.",
|
||||
})
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(templateTextExpansionFailures)
|
||||
prometheus.MustRegister(templateTextExpansionTotal)
|
||||
}
|
||||
|
||||
// A version of vector that's easier to use from templates.
|
||||
type sample struct {
|
||||
Labels map[string]string
|
||||
Value float64
|
||||
}
|
||||
type queryResult []*sample
|
||||
|
||||
type queryResultByLabelSorter struct {
|
||||
results queryResult
|
||||
by string
|
||||
}
|
||||
|
||||
func (q queryResultByLabelSorter) Len() int {
|
||||
return len(q.results)
|
||||
}
|
||||
|
||||
func (q queryResultByLabelSorter) Less(i, j int) bool {
|
||||
return q.results[i].Labels[q.by] < q.results[j].Labels[q.by]
|
||||
}
|
||||
|
||||
func (q queryResultByLabelSorter) Swap(i, j int) {
|
||||
q.results[i], q.results[j] = q.results[j], q.results[i]
|
||||
}
|
||||
|
||||
// QueryFunc executes a PromQL query at the given time.
|
||||
type QueryFunc func(context.Context, string, time.Time) (promql.Vector, error)
|
||||
|
||||
func query(ctx context.Context, q string, ts time.Time, queryFn QueryFunc) (queryResult, error) {
|
||||
vector, err := queryFn(ctx, q, ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// promql.Vector is hard to work with in templates, so convert to
|
||||
// base data types.
|
||||
// TODO(fabxc): probably not true anymore after type rework.
|
||||
var result = make(queryResult, len(vector))
|
||||
for n, v := range vector {
|
||||
s := sample{
|
||||
Value: v.V,
|
||||
Labels: v.Metric.Map(),
|
||||
}
|
||||
result[n] = &s
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Expander executes templates in text or HTML mode with a common set of Prometheus template functions.
|
||||
type Expander struct {
|
||||
text string
|
||||
name string
|
||||
data interface{}
|
||||
funcMap text_template.FuncMap
|
||||
}
|
||||
|
||||
// NewTemplateExpander returns a template expander ready to use.
|
||||
func NewTemplateExpander(
|
||||
ctx context.Context,
|
||||
text string,
|
||||
name string,
|
||||
data interface{},
|
||||
timestamp model.Time,
|
||||
queryFunc QueryFunc,
|
||||
externalURL *url.URL,
|
||||
) *Expander {
|
||||
return &Expander{
|
||||
text: text,
|
||||
name: name,
|
||||
data: data,
|
||||
funcMap: text_template.FuncMap{
|
||||
"query": func(q string) (queryResult, error) {
|
||||
return query(ctx, q, timestamp.Time(), queryFunc)
|
||||
},
|
||||
"first": func(v queryResult) (*sample, error) {
|
||||
if len(v) > 0 {
|
||||
return v[0], nil
|
||||
}
|
||||
return nil, errors.New("first() called on vector with no elements")
|
||||
},
|
||||
"label": func(label string, s *sample) string {
|
||||
return s.Labels[label]
|
||||
},
|
||||
"value": func(s *sample) float64 {
|
||||
return s.Value
|
||||
},
|
||||
"strvalue": func(s *sample) string {
|
||||
return s.Labels["__value__"]
|
||||
},
|
||||
"args": func(args ...interface{}) map[string]interface{} {
|
||||
result := make(map[string]interface{})
|
||||
for i, a := range args {
|
||||
result[fmt.Sprintf("arg%d", i)] = a
|
||||
}
|
||||
return result
|
||||
},
|
||||
"reReplaceAll": func(pattern, repl, text string) string {
|
||||
re := regexp.MustCompile(pattern)
|
||||
return re.ReplaceAllString(text, repl)
|
||||
},
|
||||
"safeHtml": func(text string) html_template.HTML {
|
||||
return html_template.HTML(text)
|
||||
},
|
||||
"match": regexp.MatchString,
|
||||
"title": strings.Title,
|
||||
"toUpper": strings.ToUpper,
|
||||
"toLower": strings.ToLower,
|
||||
"graphLink": strutil.GraphLinkForExpression,
|
||||
"tableLink": strutil.TableLinkForExpression,
|
||||
"sortByLabel": func(label string, v queryResult) queryResult {
|
||||
sorter := queryResultByLabelSorter{v[:], label}
|
||||
sort.Stable(sorter)
|
||||
return v
|
||||
},
|
||||
"humanize": func(v float64) string {
|
||||
if v == 0 || math.IsNaN(v) || math.IsInf(v, 0) {
|
||||
return fmt.Sprintf("%.4g", v)
|
||||
}
|
||||
if math.Abs(v) >= 1 {
|
||||
prefix := ""
|
||||
for _, p := range []string{"k", "M", "G", "T", "P", "E", "Z", "Y"} {
|
||||
if math.Abs(v) < 1000 {
|
||||
break
|
||||
}
|
||||
prefix = p
|
||||
v /= 1000
|
||||
}
|
||||
return fmt.Sprintf("%.4g%s", v, prefix)
|
||||
}
|
||||
prefix := ""
|
||||
for _, p := range []string{"m", "u", "n", "p", "f", "a", "z", "y"} {
|
||||
if math.Abs(v) >= 1 {
|
||||
break
|
||||
}
|
||||
prefix = p
|
||||
v *= 1000
|
||||
}
|
||||
return fmt.Sprintf("%.4g%s", v, prefix)
|
||||
},
|
||||
"humanize1024": func(v float64) string {
|
||||
if math.Abs(v) <= 1 || math.IsNaN(v) || math.IsInf(v, 0) {
|
||||
return fmt.Sprintf("%.4g", v)
|
||||
}
|
||||
prefix := ""
|
||||
for _, p := range []string{"ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi"} {
|
||||
if math.Abs(v) < 1024 {
|
||||
break
|
||||
}
|
||||
prefix = p
|
||||
v /= 1024
|
||||
}
|
||||
return fmt.Sprintf("%.4g%s", v, prefix)
|
||||
},
|
||||
"humanizeDuration": func(v float64) string {
|
||||
if math.IsNaN(v) || math.IsInf(v, 0) {
|
||||
return fmt.Sprintf("%.4g", v)
|
||||
}
|
||||
if v == 0 {
|
||||
return fmt.Sprintf("%.4gs", v)
|
||||
}
|
||||
if math.Abs(v) >= 1 {
|
||||
sign := ""
|
||||
if v < 0 {
|
||||
sign = "-"
|
||||
v = -v
|
||||
}
|
||||
seconds := int64(v) % 60
|
||||
minutes := (int64(v) / 60) % 60
|
||||
hours := (int64(v) / 60 / 60) % 24
|
||||
days := int64(v) / 60 / 60 / 24
|
||||
// For days to minutes, we display seconds as an integer.
|
||||
if days != 0 {
|
||||
return fmt.Sprintf("%s%dd %dh %dm %ds", sign, days, hours, minutes, seconds)
|
||||
}
|
||||
if hours != 0 {
|
||||
return fmt.Sprintf("%s%dh %dm %ds", sign, hours, minutes, seconds)
|
||||
}
|
||||
if minutes != 0 {
|
||||
return fmt.Sprintf("%s%dm %ds", sign, minutes, seconds)
|
||||
}
|
||||
// For seconds, we display 4 significant digits.
|
||||
return fmt.Sprintf("%s%.4gs", sign, v)
|
||||
}
|
||||
prefix := ""
|
||||
for _, p := range []string{"m", "u", "n", "p", "f", "a", "z", "y"} {
|
||||
if math.Abs(v) >= 1 {
|
||||
break
|
||||
}
|
||||
prefix = p
|
||||
v *= 1000
|
||||
}
|
||||
return fmt.Sprintf("%.4g%ss", v, prefix)
|
||||
},
|
||||
"humanizePercentage": func(v float64) string {
|
||||
return fmt.Sprintf("%.4g%%", v*100)
|
||||
},
|
||||
"humanizeTimestamp": func(v float64) string {
|
||||
if math.IsNaN(v) || math.IsInf(v, 0) {
|
||||
return fmt.Sprintf("%.4g", v)
|
||||
}
|
||||
t := model.TimeFromUnixNano(int64(v * 1e9)).Time().UTC()
|
||||
return fmt.Sprint(t)
|
||||
},
|
||||
"pathPrefix": func() string {
|
||||
return externalURL.Path
|
||||
},
|
||||
"externalURL": func() string {
|
||||
return externalURL.String()
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AlertTemplateData returns the interface to be used in expanding the template.
|
||||
func AlertTemplateData(labels map[string]string, externalLabels map[string]string, value float64) interface{} {
|
||||
return struct {
|
||||
Labels map[string]string
|
||||
ExternalLabels map[string]string
|
||||
Value float64
|
||||
}{
|
||||
Labels: labels,
|
||||
ExternalLabels: externalLabels,
|
||||
Value: value,
|
||||
}
|
||||
}
|
||||
|
||||
// Funcs adds the functions in fm to the Expander's function map.
|
||||
// Existing functions will be overwritten in case of conflict.
|
||||
func (te Expander) Funcs(fm text_template.FuncMap) {
|
||||
for k, v := range fm {
|
||||
te.funcMap[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Expand expands a template in text (non-HTML) mode.
|
||||
func (te Expander) Expand() (result string, resultErr error) {
|
||||
// It'd better to have no alert description than to kill the whole process
|
||||
// if there's a bug in the template.
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
var ok bool
|
||||
resultErr, ok = r.(error)
|
||||
if !ok {
|
||||
resultErr = errors.Errorf("panic expanding template %v: %v", te.name, r)
|
||||
}
|
||||
}
|
||||
if resultErr != nil {
|
||||
templateTextExpansionFailures.Inc()
|
||||
}
|
||||
}()
|
||||
|
||||
templateTextExpansionTotal.Inc()
|
||||
|
||||
tmpl, err := text_template.New(te.name).Funcs(te.funcMap).Option("missingkey=zero").Parse(te.text)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error parsing template %v", te.name)
|
||||
}
|
||||
var buffer bytes.Buffer
|
||||
err = tmpl.Execute(&buffer, te.data)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error executing template %v", te.name)
|
||||
}
|
||||
return buffer.String(), nil
|
||||
}
|
||||
|
||||
// ExpandHTML expands a template with HTML escaping, with templates read from the given files.
|
||||
func (te Expander) ExpandHTML(templateFiles []string) (result string, resultErr error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
var ok bool
|
||||
resultErr, ok = r.(error)
|
||||
if !ok {
|
||||
resultErr = errors.Errorf("panic expanding template %s: %v", te.name, r)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
tmpl := html_template.New(te.name).Funcs(html_template.FuncMap(te.funcMap))
|
||||
tmpl.Option("missingkey=zero")
|
||||
tmpl.Funcs(html_template.FuncMap{
|
||||
"tmpl": func(name string, data interface{}) (html_template.HTML, error) {
|
||||
var buffer bytes.Buffer
|
||||
err := tmpl.ExecuteTemplate(&buffer, name, data)
|
||||
return html_template.HTML(buffer.String()), err
|
||||
},
|
||||
})
|
||||
tmpl, err := tmpl.Parse(te.text)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error parsing template %v", te.name)
|
||||
}
|
||||
if len(templateFiles) > 0 {
|
||||
_, err = tmpl.ParseFiles(templateFiles...)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error parsing template files for %v", te.name)
|
||||
}
|
||||
}
|
||||
var buffer bytes.Buffer
|
||||
err = tmpl.Execute(&buffer, te.data)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error executing template %v", te.name)
|
||||
}
|
||||
return buffer.String(), nil
|
||||
}
|
||||
|
||||
// ParseTest parses the templates and returns the error if any.
|
||||
func (te Expander) ParseTest() error {
|
||||
_, err := text_template.New(te.name).Funcs(te.funcMap).Option("missingkey=zero").Parse(te.text)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
1
vendor/github.com/prometheus/prometheus/tsdb/.gitignore
generated
vendored
Normal file
1
vendor/github.com/prometheus/prometheus/tsdb/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
benchout/
|
||||
108
vendor/github.com/prometheus/prometheus/tsdb/CHANGELOG.md
generated
vendored
Normal file
108
vendor/github.com/prometheus/prometheus/tsdb/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
## master / unreleased
|
||||
|
||||
## 0.10.0
|
||||
|
||||
- [FEATURE] Added `DBReadOnly` to allow opening a database in read only mode.
|
||||
- `DBReadOnly.Blocks()` exposes a slice of `BlockReader`s.
|
||||
- `BlockReader` interface - removed MinTime/MaxTime methods and now exposes the full block meta via `Meta()`.
|
||||
- [FEATURE] `chunkenc.Chunk.Iterator` method now takes a `chunkenc.Iterator` interface as an argument for reuse.
|
||||
|
||||
## 0.9.1
|
||||
|
||||
- [CHANGE] LiveReader metrics are now injected rather than global.
|
||||
|
||||
## 0.9.0
|
||||
|
||||
- [FEATURE] Provide option to compress WAL records using Snappy. [#609](https://github.com/prometheus/tsdb/pull/609)
|
||||
- [BUGFIX] Re-calculate block size when calling `block.Delete`.
|
||||
- [BUGFIX] Re-encode all head chunks at compaction that are open (being appended to) or outside the Maxt block range. This avoids writing out corrupt data. It happens when snapshotting with the head included.
|
||||
- [BUGFIX] Improved handling of multiple refs for the same series in WAL reading.
|
||||
- [BUGFIX] `prometheus_tsdb_compactions_failed_total` is now incremented on any compaction failure.
|
||||
- [CHANGE] The meta file `BlockStats` no longer holds size information. This is now dynamically calculated and kept in memory. It also includes the meta file size which was not included before.
|
||||
- [CHANGE] Create new clean segment when starting the WAL.
|
||||
- [CHANGE] Renamed metric from `prometheus_tsdb_wal_reader_corruption_errors` to `prometheus_tsdb_wal_reader_corruption_errors_total`.
|
||||
- [ENHANCEMENT] Improved atomicity of .tmp block replacement during compaction for usual case.
|
||||
- [ENHANCEMENT] Improved postings intersection matching.
|
||||
- [ENHANCEMENT] Reduced disk usage for WAL for small setups.
|
||||
- [ENHANCEMENT] Optimize queries using regexp for set lookups.
|
||||
|
||||
|
||||
## 0.8.0
|
||||
|
||||
- [BUGFIX] Calling `Close` more than once on a querier returns an error instead of a panic.
|
||||
- [BUGFIX] Don't panic and recover nicely when running out of disk space.
|
||||
- [BUGFIX] Correctly handle empty labels.
|
||||
- [BUGFIX] Don't crash on an unknown tombstone ref.
|
||||
- [ENHANCEMENT] Re-add FromData function to create a chunk from bytes. It is used by Cortex and Thanos.
|
||||
- [ENHANCEMENT] Simplify mergedPostings.Seek.
|
||||
- [FEATURE] Added `currentSegment` metric for the current WAL segment it is being written to.
|
||||
|
||||
## 0.7.1
|
||||
|
||||
- [ENHANCEMENT] Reduce memory usage in mergedPostings.Seek
|
||||
|
||||
## 0.7.0
|
||||
|
||||
- [CHANGE] tsdb now requires golang 1.12 or higher.
|
||||
- [REMOVED] `chunks.NewReader` is removed as it wasn't used anywhere.
|
||||
- [REMOVED] `FromData` is considered unused so was removed.
|
||||
- [FEATURE] Added option WALSegmentSize -1 to disable the WAL.
|
||||
- [BUGFIX] Bugfix in selectOverlappingDirs. Only return the first overlapping blocks.
|
||||
- [BUGFIX] Fsync the meta file to persist it on disk to avoid data loss in case of a host crash.
|
||||
- [BUGFIX] Fix fd and vm_area leak on error path in chunks.NewDirReader.
|
||||
- [BUGFIX] Fix fd and vm_area leak on error path in index.NewFileReader.
|
||||
- [BUGFIX] Force persisting the tombstone file to avoid data loss in case of a host crash.
|
||||
- [BUGFIX] Keep series that are still in WAL in checkpoints.
|
||||
- [ENHANCEMENT] Fast path for EmptyPostings cases in Merge, Intersect and Without.
|
||||
- [ENHANCEMENT] Be smarter in how we look at matchers.
|
||||
- [ENHANCEMENT] PostListings and NotMatcher now public.
|
||||
|
||||
## 0.6.1
|
||||
|
||||
- [BUGFIX] Update `last` after appending a non-overlapping chunk in `chunks.MergeOverlappingChunks`. [#539](https://github.com/prometheus/tsdb/pull/539)
|
||||
|
||||
## 0.6.0
|
||||
|
||||
- [CHANGE] `AllowOverlappingBlock` is now `AllowOverlappingBlocks`.
|
||||
|
||||
## 0.5.0
|
||||
|
||||
- [FEATURE] Time-overlapping blocks are now allowed. [#370](https://github.com/prometheus/tsdb/pull/370)
|
||||
- Disabled by default and can be enabled via `AllowOverlappingBlock` option.
|
||||
- Added `MergeChunks` function in `chunkenc/xor.go` to merge 2 time-overlapping chunks.
|
||||
- Added `MergeOverlappingChunks` function in `chunks/chunks.go` to merge multiple time-overlapping Chunk Metas.
|
||||
- Added `MinTime` and `MaxTime` method for `BlockReader`.
|
||||
- [FEATURE] New `dump` command to tsdb tool to dump all samples.
|
||||
- [FEATURE] New `encoding` package for common binary encoding/decoding helpers.
|
||||
- Added to remove some code duplication.
|
||||
- [ENHANCEMENT] When closing the db any running compaction will be cancelled so it doesn't block.
|
||||
- `NewLeveledCompactor` takes a context.
|
||||
- [CHANGE] `prometheus_tsdb_storage_blocks_bytes_total` is now `prometheus_tsdb_storage_blocks_bytes`.
|
||||
- [BUGFIX] Improved Postings Merge performance. Fixes a regression from the previous release.
|
||||
- [BUGFIX] LiveReader can get into an infinite loop on corrupt WALs.
|
||||
|
||||
## 0.4.0
|
||||
|
||||
- [CHANGE] New `WALSegmentSize` option to override the `DefaultOptions.WALSegmentSize`. Added to allow using smaller wal files. For example using tmpfs on a RPI to minimise the SD card wear out from the constant WAL writes. As part of this change the `DefaultOptions.WALSegmentSize` constant was also exposed.
|
||||
- [CHANGE] Empty blocks are not written during compaction [#374](https://github.com/prometheus/tsdb/pull/374)
|
||||
- [FEATURE] Size base retention through `Options.MaxBytes`. As part of this change:
|
||||
- Added new metrics - `prometheus_tsdb_storage_blocks_bytes_total`, `prometheus_tsdb_size_retentions_total`, `prometheus_tsdb_time_retentions_total`
|
||||
- New public interface `SizeReader: Size() int64`
|
||||
- `OpenBlock` signature changed to take a logger.
|
||||
- [REMOVED] `PrefixMatcher` is considered unused so was removed.
|
||||
- [CLEANUP] `Options.WALFlushInterval` is removed as it wasn't used anywhere.
|
||||
- [FEATURE] Add new `LiveReader` to WAL package. Added to allow live tailing of a WAL segment, used by Prometheus Remote Write after refactor. The main difference between the new reader and the existing `Reader` is that for `LiveReader` a call to `Next()` that returns false does not mean that there will never be more data to read.
|
||||
|
||||
## 0.3.1
|
||||
|
||||
- [BUGFIX] Fixed most windows test and some actual bugs for unclosed file readers.
|
||||
|
||||
## 0.3.0
|
||||
|
||||
- [CHANGE] `LastCheckpoint()` used to return just the segment name and now it returns the full relative path.
|
||||
- [CHANGE] `NewSegmentsRangeReader()` can now read over multiple wal ranges by using the new `SegmentRange{}` struct.
|
||||
- [CHANGE] `CorruptionErr{}` now also exposes the Segment `Dir` which is added when displaying any errors.
|
||||
- [CHANGE] `Head.Init()` is changed to `Head.Init(minValidTime int64)`
|
||||
- [CHANGE] `SymbolTable()` renamed to `SymbolTableSize()` to make the name consistent with the `Block{ symbolTableSize uint64 }` field.
|
||||
- [CHANGE] `wal.Reader{}` now exposes `Segment()` for the current segment being read and `Offset()` for the current offset.
|
||||
- [FEATURE] tsdbutil analyze subcomand to find churn, high cardinality, etc.
|
||||
13
vendor/github.com/prometheus/prometheus/tsdb/README.md
generated
vendored
Normal file
13
vendor/github.com/prometheus/prometheus/tsdb/README.md
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
# TSDB
|
||||
|
||||
[](https://godoc.org/github.com/prometheus/prometheus/tsdb)
|
||||
|
||||
This repository contains the Prometheus storage layer that is used in its 2.x releases.
|
||||
|
||||
A writeup of its design can be found [here](https://fabxc.org/blog/2017-04-10-writing-a-tsdb/).
|
||||
|
||||
Based on the Gorilla TSDB [white papers](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf).
|
||||
|
||||
Video: [Storing 16 Bytes at Scale](https://youtu.be/b_pEevMAC3I) from [PromCon 2017](https://promcon.io/2017-munich/).
|
||||
|
||||
See also the [format documentation](docs/format/README.md).
|
||||
627
vendor/github.com/prometheus/prometheus/tsdb/block.go
generated
vendored
Normal file
627
vendor/github.com/prometheus/prometheus/tsdb/block.go
generated
vendored
Normal file
@@ -0,0 +1,627 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tsdb
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/log/level"
|
||||
"github.com/oklog/ulid"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
||||
"github.com/prometheus/prometheus/tsdb/chunks"
|
||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/prometheus/tsdb/index"
|
||||
"github.com/prometheus/prometheus/tsdb/tombstones"
|
||||
)
|
||||
|
||||
// IndexWriter serializes the index for a block of series data.
|
||||
// The methods must be called in the order they are specified in.
|
||||
type IndexWriter interface {
|
||||
// AddSymbols registers all string symbols that are encountered in series
|
||||
// and other indices. Symbols must be added in sorted order.
|
||||
AddSymbol(sym string) error
|
||||
|
||||
// AddSeries populates the index writer with a series and its offsets
|
||||
// of chunks that the index can reference.
|
||||
// Implementations may require series to be insert in increasing order by
|
||||
// their labels.
|
||||
// The reference numbers are used to resolve entries in postings lists that
|
||||
// are added later.
|
||||
AddSeries(ref uint64, l labels.Labels, chunks ...chunks.Meta) error
|
||||
|
||||
// Close writes any finalization and closes the resources associated with
|
||||
// the underlying writer.
|
||||
Close() error
|
||||
}
|
||||
|
||||
// IndexReader provides reading access of serialized index data.
|
||||
type IndexReader interface {
|
||||
// Symbols return an iterator over sorted string symbols that may occur in
|
||||
// series' labels and indices. It is not safe to use the returned strings
|
||||
// beyond the lifetime of the index reader.
|
||||
Symbols() index.StringIter
|
||||
|
||||
// LabelValues returns sorted possible label values.
|
||||
LabelValues(name string) ([]string, error)
|
||||
|
||||
// Postings returns the postings list iterator for the label pairs.
|
||||
// The Postings here contain the offsets to the series inside the index.
|
||||
// Found IDs are not strictly required to point to a valid Series, e.g.
|
||||
// during background garbage collections. Input values must be sorted.
|
||||
Postings(name string, values ...string) (index.Postings, error)
|
||||
|
||||
// SortedPostings returns a postings list that is reordered to be sorted
|
||||
// by the label set of the underlying series.
|
||||
SortedPostings(index.Postings) index.Postings
|
||||
|
||||
// Series populates the given labels and chunk metas for the series identified
|
||||
// by the reference.
|
||||
// Returns storage.ErrNotFound if the ref does not resolve to a known series.
|
||||
Series(ref uint64, lset *labels.Labels, chks *[]chunks.Meta) error
|
||||
|
||||
// LabelNames returns all the unique label names present in the index in sorted order.
|
||||
LabelNames() ([]string, error)
|
||||
|
||||
// Close releases the underlying resources of the reader.
|
||||
Close() error
|
||||
}
|
||||
|
||||
// ChunkWriter serializes a time block of chunked series data.
|
||||
type ChunkWriter interface {
|
||||
// WriteChunks writes several chunks. The Chunk field of the ChunkMetas
|
||||
// must be populated.
|
||||
// After returning successfully, the Ref fields in the ChunkMetas
|
||||
// are set and can be used to retrieve the chunks from the written data.
|
||||
WriteChunks(chunks ...chunks.Meta) error
|
||||
|
||||
// Close writes any required finalization and closes the resources
|
||||
// associated with the underlying writer.
|
||||
Close() error
|
||||
}
|
||||
|
||||
// ChunkReader provides reading access of serialized time series data.
|
||||
type ChunkReader interface {
|
||||
// Chunk returns the series data chunk with the given reference.
|
||||
Chunk(ref uint64) (chunkenc.Chunk, error)
|
||||
|
||||
// Close releases all underlying resources of the reader.
|
||||
Close() error
|
||||
}
|
||||
|
||||
// BlockReader provides reading access to a data block.
|
||||
type BlockReader interface {
|
||||
// Index returns an IndexReader over the block's data.
|
||||
Index() (IndexReader, error)
|
||||
|
||||
// Chunks returns a ChunkReader over the block's data.
|
||||
Chunks() (ChunkReader, error)
|
||||
|
||||
// Tombstones returns a tombstones.Reader over the block's deleted data.
|
||||
Tombstones() (tombstones.Reader, error)
|
||||
|
||||
// Meta provides meta information about the block reader.
|
||||
Meta() BlockMeta
|
||||
}
|
||||
|
||||
// BlockMeta provides meta information about a block.
|
||||
type BlockMeta struct {
|
||||
// Unique identifier for the block and its contents. Changes on compaction.
|
||||
ULID ulid.ULID `json:"ulid"`
|
||||
|
||||
// MinTime and MaxTime specify the time range all samples
|
||||
// in the block are in.
|
||||
MinTime int64 `json:"minTime"`
|
||||
MaxTime int64 `json:"maxTime"`
|
||||
|
||||
// Stats about the contents of the block.
|
||||
Stats BlockStats `json:"stats,omitempty"`
|
||||
|
||||
// Information on compactions the block was created from.
|
||||
Compaction BlockMetaCompaction `json:"compaction"`
|
||||
|
||||
// Version of the index format.
|
||||
Version int `json:"version"`
|
||||
}
|
||||
|
||||
// BlockStats contains stats about contents of a block.
|
||||
type BlockStats struct {
|
||||
NumSamples uint64 `json:"numSamples,omitempty"`
|
||||
NumSeries uint64 `json:"numSeries,omitempty"`
|
||||
NumChunks uint64 `json:"numChunks,omitempty"`
|
||||
NumTombstones uint64 `json:"numTombstones,omitempty"`
|
||||
}
|
||||
|
||||
// BlockDesc describes a block by ULID and time range.
|
||||
type BlockDesc struct {
|
||||
ULID ulid.ULID `json:"ulid"`
|
||||
MinTime int64 `json:"minTime"`
|
||||
MaxTime int64 `json:"maxTime"`
|
||||
}
|
||||
|
||||
// BlockMetaCompaction holds information about compactions a block went through.
|
||||
type BlockMetaCompaction struct {
|
||||
// Maximum number of compaction cycles any source block has
|
||||
// gone through.
|
||||
Level int `json:"level"`
|
||||
// ULIDs of all source head blocks that went into the block.
|
||||
Sources []ulid.ULID `json:"sources,omitempty"`
|
||||
// Indicates that during compaction it resulted in a block without any samples
|
||||
// so it should be deleted on the next reload.
|
||||
Deletable bool `json:"deletable,omitempty"`
|
||||
// Short descriptions of the direct blocks that were used to create
|
||||
// this block.
|
||||
Parents []BlockDesc `json:"parents,omitempty"`
|
||||
Failed bool `json:"failed,omitempty"`
|
||||
}
|
||||
|
||||
const indexFilename = "index"
|
||||
const metaFilename = "meta.json"
|
||||
const metaVersion1 = 1
|
||||
|
||||
func chunkDir(dir string) string { return filepath.Join(dir, "chunks") }
|
||||
|
||||
func readMetaFile(dir string) (*BlockMeta, int64, error) {
|
||||
b, err := ioutil.ReadFile(filepath.Join(dir, metaFilename))
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
var m BlockMeta
|
||||
|
||||
if err := json.Unmarshal(b, &m); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if m.Version != metaVersion1 {
|
||||
return nil, 0, errors.Errorf("unexpected meta file version %d", m.Version)
|
||||
}
|
||||
|
||||
return &m, int64(len(b)), nil
|
||||
}
|
||||
|
||||
func writeMetaFile(logger log.Logger, dir string, meta *BlockMeta) (int64, error) {
|
||||
meta.Version = metaVersion1
|
||||
|
||||
// Make any changes to the file appear atomic.
|
||||
path := filepath.Join(dir, metaFilename)
|
||||
tmp := path + ".tmp"
|
||||
defer func() {
|
||||
if err := os.RemoveAll(tmp); err != nil {
|
||||
level.Error(logger).Log("msg", "remove tmp file", "err", err.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
f, err := os.Create(tmp)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
jsonMeta, err := json.MarshalIndent(meta, "", "\t")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var merr tsdb_errors.MultiError
|
||||
n, err := f.Write(jsonMeta)
|
||||
if err != nil {
|
||||
merr.Add(err)
|
||||
merr.Add(f.Close())
|
||||
return 0, merr.Err()
|
||||
}
|
||||
|
||||
// Force the kernel to persist the file on disk to avoid data loss if the host crashes.
|
||||
if err := f.Sync(); err != nil {
|
||||
merr.Add(err)
|
||||
merr.Add(f.Close())
|
||||
return 0, merr.Err()
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int64(n), fileutil.Replace(tmp, path)
|
||||
}
|
||||
|
||||
// Block represents a directory of time series data covering a continuous time range.
|
||||
type Block struct {
|
||||
mtx sync.RWMutex
|
||||
closing bool
|
||||
pendingReaders sync.WaitGroup
|
||||
|
||||
dir string
|
||||
meta BlockMeta
|
||||
|
||||
// Symbol Table Size in bytes.
|
||||
// We maintain this variable to avoid recalculation every time.
|
||||
symbolTableSize uint64
|
||||
|
||||
chunkr ChunkReader
|
||||
indexr IndexReader
|
||||
tombstones tombstones.Reader
|
||||
|
||||
logger log.Logger
|
||||
|
||||
numBytesChunks int64
|
||||
numBytesIndex int64
|
||||
numBytesTombstone int64
|
||||
numBytesMeta int64
|
||||
}
|
||||
|
||||
// OpenBlock opens the block in the directory. It can be passed a chunk pool, which is used
|
||||
// to instantiate chunk structs.
|
||||
func OpenBlock(logger log.Logger, dir string, pool chunkenc.Pool) (pb *Block, err error) {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
var closers []io.Closer
|
||||
defer func() {
|
||||
if err != nil {
|
||||
var merr tsdb_errors.MultiError
|
||||
merr.Add(err)
|
||||
merr.Add(closeAll(closers))
|
||||
err = merr.Err()
|
||||
}
|
||||
}()
|
||||
meta, sizeMeta, err := readMetaFile(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cr, err := chunks.NewDirReader(chunkDir(dir), pool)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
closers = append(closers, cr)
|
||||
|
||||
ir, err := index.NewFileReader(filepath.Join(dir, indexFilename))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
closers = append(closers, ir)
|
||||
|
||||
tr, sizeTomb, err := tombstones.ReadTombstones(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
closers = append(closers, tr)
|
||||
|
||||
pb = &Block{
|
||||
dir: dir,
|
||||
meta: *meta,
|
||||
chunkr: cr,
|
||||
indexr: ir,
|
||||
tombstones: tr,
|
||||
symbolTableSize: ir.SymbolTableSize(),
|
||||
logger: logger,
|
||||
numBytesChunks: cr.Size(),
|
||||
numBytesIndex: ir.Size(),
|
||||
numBytesTombstone: sizeTomb,
|
||||
numBytesMeta: sizeMeta,
|
||||
}
|
||||
return pb, nil
|
||||
}
|
||||
|
||||
// Close closes the on-disk block. It blocks as long as there are readers reading from the block.
|
||||
func (pb *Block) Close() error {
|
||||
pb.mtx.Lock()
|
||||
pb.closing = true
|
||||
pb.mtx.Unlock()
|
||||
|
||||
pb.pendingReaders.Wait()
|
||||
|
||||
var merr tsdb_errors.MultiError
|
||||
|
||||
merr.Add(pb.chunkr.Close())
|
||||
merr.Add(pb.indexr.Close())
|
||||
merr.Add(pb.tombstones.Close())
|
||||
|
||||
return merr.Err()
|
||||
}
|
||||
|
||||
func (pb *Block) String() string {
|
||||
return pb.meta.ULID.String()
|
||||
}
|
||||
|
||||
// Dir returns the directory of the block.
|
||||
func (pb *Block) Dir() string { return pb.dir }
|
||||
|
||||
// Meta returns meta information about the block.
|
||||
func (pb *Block) Meta() BlockMeta { return pb.meta }
|
||||
|
||||
// MinTime returns the min time of the meta.
|
||||
func (pb *Block) MinTime() int64 { return pb.meta.MinTime }
|
||||
|
||||
// MaxTime returns the max time of the meta.
|
||||
func (pb *Block) MaxTime() int64 { return pb.meta.MaxTime }
|
||||
|
||||
// Size returns the number of bytes that the block takes up.
|
||||
func (pb *Block) Size() int64 {
|
||||
return pb.numBytesChunks + pb.numBytesIndex + pb.numBytesTombstone + pb.numBytesMeta
|
||||
}
|
||||
|
||||
// ErrClosing is returned when a block is in the process of being closed.
|
||||
var ErrClosing = errors.New("block is closing")
|
||||
|
||||
func (pb *Block) startRead() error {
|
||||
pb.mtx.RLock()
|
||||
defer pb.mtx.RUnlock()
|
||||
|
||||
if pb.closing {
|
||||
return ErrClosing
|
||||
}
|
||||
pb.pendingReaders.Add(1)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Index returns a new IndexReader against the block data.
|
||||
func (pb *Block) Index() (IndexReader, error) {
|
||||
if err := pb.startRead(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blockIndexReader{ir: pb.indexr, b: pb}, nil
|
||||
}
|
||||
|
||||
// Chunks returns a new ChunkReader against the block data.
|
||||
func (pb *Block) Chunks() (ChunkReader, error) {
|
||||
if err := pb.startRead(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blockChunkReader{ChunkReader: pb.chunkr, b: pb}, nil
|
||||
}
|
||||
|
||||
// Tombstones returns a new TombstoneReader against the block data.
|
||||
func (pb *Block) Tombstones() (tombstones.Reader, error) {
|
||||
if err := pb.startRead(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blockTombstoneReader{Reader: pb.tombstones, b: pb}, nil
|
||||
}
|
||||
|
||||
// GetSymbolTableSize returns the Symbol Table Size in the index of this block.
|
||||
func (pb *Block) GetSymbolTableSize() uint64 {
|
||||
return pb.symbolTableSize
|
||||
}
|
||||
|
||||
func (pb *Block) setCompactionFailed() error {
|
||||
pb.meta.Compaction.Failed = true
|
||||
n, err := writeMetaFile(pb.logger, pb.dir, &pb.meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pb.numBytesMeta = n
|
||||
return nil
|
||||
}
|
||||
|
||||
type blockIndexReader struct {
|
||||
ir IndexReader
|
||||
b *Block
|
||||
}
|
||||
|
||||
func (r blockIndexReader) Symbols() index.StringIter {
|
||||
return r.ir.Symbols()
|
||||
}
|
||||
|
||||
func (r blockIndexReader) LabelValues(name string) ([]string, error) {
|
||||
st, err := r.ir.LabelValues(name)
|
||||
return st, errors.Wrapf(err, "block: %s", r.b.Meta().ULID)
|
||||
}
|
||||
|
||||
func (r blockIndexReader) Postings(name string, values ...string) (index.Postings, error) {
|
||||
p, err := r.ir.Postings(name, values...)
|
||||
if err != nil {
|
||||
return p, errors.Wrapf(err, "block: %s", r.b.Meta().ULID)
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (r blockIndexReader) SortedPostings(p index.Postings) index.Postings {
|
||||
return r.ir.SortedPostings(p)
|
||||
}
|
||||
|
||||
func (r blockIndexReader) Series(ref uint64, lset *labels.Labels, chks *[]chunks.Meta) error {
|
||||
if err := r.ir.Series(ref, lset, chks); err != nil {
|
||||
return errors.Wrapf(err, "block: %s", r.b.Meta().ULID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r blockIndexReader) LabelNames() ([]string, error) {
|
||||
return r.b.LabelNames()
|
||||
}
|
||||
|
||||
func (r blockIndexReader) Close() error {
|
||||
r.b.pendingReaders.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
type blockTombstoneReader struct {
|
||||
tombstones.Reader
|
||||
b *Block
|
||||
}
|
||||
|
||||
func (r blockTombstoneReader) Close() error {
|
||||
r.b.pendingReaders.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
type blockChunkReader struct {
|
||||
ChunkReader
|
||||
b *Block
|
||||
}
|
||||
|
||||
func (r blockChunkReader) Close() error {
|
||||
r.b.pendingReaders.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete matching series between mint and maxt in the block.
|
||||
func (pb *Block) Delete(mint, maxt int64, ms ...*labels.Matcher) error {
|
||||
pb.mtx.Lock()
|
||||
defer pb.mtx.Unlock()
|
||||
|
||||
if pb.closing {
|
||||
return ErrClosing
|
||||
}
|
||||
|
||||
p, err := PostingsForMatchers(pb.indexr, ms...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "select series")
|
||||
}
|
||||
|
||||
ir := pb.indexr
|
||||
|
||||
// Choose only valid postings which have chunks in the time-range.
|
||||
stones := tombstones.NewMemTombstones()
|
||||
|
||||
var lset labels.Labels
|
||||
var chks []chunks.Meta
|
||||
|
||||
Outer:
|
||||
for p.Next() {
|
||||
err := ir.Series(p.At(), &lset, &chks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, chk := range chks {
|
||||
if chk.OverlapsClosedInterval(mint, maxt) {
|
||||
// Delete only until the current values and not beyond.
|
||||
tmin, tmax := clampInterval(mint, maxt, chks[0].MinTime, chks[len(chks)-1].MaxTime)
|
||||
stones.AddInterval(p.At(), tombstones.Interval{Mint: tmin, Maxt: tmax})
|
||||
continue Outer
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if p.Err() != nil {
|
||||
return p.Err()
|
||||
}
|
||||
|
||||
err = pb.tombstones.Iter(func(id uint64, ivs tombstones.Intervals) error {
|
||||
for _, iv := range ivs {
|
||||
stones.AddInterval(id, iv)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pb.tombstones = stones
|
||||
pb.meta.Stats.NumTombstones = pb.tombstones.Total()
|
||||
|
||||
n, err := tombstones.WriteFile(pb.logger, pb.dir, pb.tombstones)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pb.numBytesTombstone = n
|
||||
n, err = writeMetaFile(pb.logger, pb.dir, &pb.meta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pb.numBytesMeta = n
|
||||
return nil
|
||||
}
|
||||
|
||||
// CleanTombstones will remove the tombstones and rewrite the block (only if there are any tombstones).
|
||||
// If there was a rewrite, then it returns the ULID of the new block written, else nil.
|
||||
func (pb *Block) CleanTombstones(dest string, c Compactor) (*ulid.ULID, error) {
|
||||
numStones := 0
|
||||
|
||||
if err := pb.tombstones.Iter(func(id uint64, ivs tombstones.Intervals) error {
|
||||
numStones += len(ivs)
|
||||
return nil
|
||||
}); err != nil {
|
||||
// This should never happen, as the iteration function only returns nil.
|
||||
panic(err)
|
||||
}
|
||||
if numStones == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
meta := pb.Meta()
|
||||
uid, err := c.Write(dest, pb, pb.meta.MinTime, pb.meta.MaxTime, &meta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &uid, nil
|
||||
}
|
||||
|
||||
// Snapshot creates snapshot of the block into dir.
|
||||
func (pb *Block) Snapshot(dir string) error {
|
||||
blockDir := filepath.Join(dir, pb.meta.ULID.String())
|
||||
if err := os.MkdirAll(blockDir, 0777); err != nil {
|
||||
return errors.Wrap(err, "create snapshot block dir")
|
||||
}
|
||||
|
||||
chunksDir := chunkDir(blockDir)
|
||||
if err := os.MkdirAll(chunksDir, 0777); err != nil {
|
||||
return errors.Wrap(err, "create snapshot chunk dir")
|
||||
}
|
||||
|
||||
// Hardlink meta, index and tombstones
|
||||
for _, fname := range []string{
|
||||
metaFilename,
|
||||
indexFilename,
|
||||
tombstones.TombstonesFilename,
|
||||
} {
|
||||
if err := os.Link(filepath.Join(pb.dir, fname), filepath.Join(blockDir, fname)); err != nil {
|
||||
return errors.Wrapf(err, "create snapshot %s", fname)
|
||||
}
|
||||
}
|
||||
|
||||
// Hardlink the chunks
|
||||
curChunkDir := chunkDir(pb.dir)
|
||||
files, err := ioutil.ReadDir(curChunkDir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "ReadDir the current chunk dir")
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
err := os.Link(filepath.Join(curChunkDir, f.Name()), filepath.Join(chunksDir, f.Name()))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "hardlink a chunk")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OverlapsClosedInterval returns true if the block overlaps [mint, maxt].
|
||||
func (pb *Block) OverlapsClosedInterval(mint, maxt int64) bool {
|
||||
// The block itself is a half-open interval
|
||||
// [pb.meta.MinTime, pb.meta.MaxTime).
|
||||
return pb.meta.MinTime <= maxt && mint < pb.meta.MaxTime
|
||||
}
|
||||
|
||||
// LabelNames returns all the unique label names present in the Block in sorted order.
|
||||
func (pb *Block) LabelNames() ([]string, error) {
|
||||
return pb.indexr.LabelNames()
|
||||
}
|
||||
|
||||
func clampInterval(a, b, mint, maxt int64) (int64, int64) {
|
||||
if a < mint {
|
||||
a = mint
|
||||
}
|
||||
if b > maxt {
|
||||
b = maxt
|
||||
}
|
||||
return a, b
|
||||
}
|
||||
1047
vendor/github.com/prometheus/prometheus/tsdb/compact.go
generated
vendored
Normal file
1047
vendor/github.com/prometheus/prometheus/tsdb/compact.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1465
vendor/github.com/prometheus/prometheus/tsdb/db.go
generated
vendored
Normal file
1465
vendor/github.com/prometheus/prometheus/tsdb/db.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
19
vendor/github.com/prometheus/prometheus/tsdb/goversion/goversion.go
generated
vendored
Normal file
19
vendor/github.com/prometheus/prometheus/tsdb/goversion/goversion.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build go1.12
|
||||
|
||||
// Package goversion enforces the go version supported by the tsdb module.
|
||||
package goversion
|
||||
|
||||
const _SoftwareRequiresGOVERSION1_12 = uint8(0)
|
||||
17
vendor/github.com/prometheus/prometheus/tsdb/goversion/init.go
generated
vendored
Normal file
17
vendor/github.com/prometheus/prometheus/tsdb/goversion/init.go
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package goversion
|
||||
|
||||
// This will fail to compile if the Go runtime version isn't >= 1.12.
|
||||
var _ = _SoftwareRequiresGOVERSION1_12
|
||||
2054
vendor/github.com/prometheus/prometheus/tsdb/head.go
generated
vendored
Normal file
2054
vendor/github.com/prometheus/prometheus/tsdb/head.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1723
vendor/github.com/prometheus/prometheus/tsdb/index/index.go
generated
vendored
Normal file
1723
vendor/github.com/prometheus/prometheus/tsdb/index/index.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
742
vendor/github.com/prometheus/prometheus/tsdb/index/postings.go
generated
vendored
Normal file
742
vendor/github.com/prometheus/prometheus/tsdb/index/postings.go
generated
vendored
Normal file
@@ -0,0 +1,742 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package index
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"encoding/binary"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
)
|
||||
|
||||
var allPostingsKey = labels.Label{}
|
||||
|
||||
// AllPostingsKey returns the label key that is used to store the postings list of all existing IDs.
|
||||
func AllPostingsKey() (name, value string) {
|
||||
return allPostingsKey.Name, allPostingsKey.Value
|
||||
}
|
||||
|
||||
// MemPostings holds postings list for series ID per label pair. They may be written
|
||||
// to out of order.
|
||||
// ensureOrder() must be called once before any reads are done. This allows for quick
|
||||
// unordered batch fills on startup.
|
||||
type MemPostings struct {
|
||||
mtx sync.RWMutex
|
||||
m map[string]map[string][]uint64
|
||||
ordered bool
|
||||
}
|
||||
|
||||
// NewMemPostings returns a memPostings that's ready for reads and writes.
|
||||
func NewMemPostings() *MemPostings {
|
||||
return &MemPostings{
|
||||
m: make(map[string]map[string][]uint64, 512),
|
||||
ordered: true,
|
||||
}
|
||||
}
|
||||
|
||||
// NewUnorderedMemPostings returns a memPostings that is not safe to be read from
|
||||
// until ensureOrder was called once.
|
||||
func NewUnorderedMemPostings() *MemPostings {
|
||||
return &MemPostings{
|
||||
m: make(map[string]map[string][]uint64, 512),
|
||||
ordered: false,
|
||||
}
|
||||
}
|
||||
|
||||
// SortedKeys returns a list of sorted label keys of the postings.
|
||||
func (p *MemPostings) SortedKeys() []labels.Label {
|
||||
p.mtx.RLock()
|
||||
keys := make([]labels.Label, 0, len(p.m))
|
||||
|
||||
for n, e := range p.m {
|
||||
for v := range e {
|
||||
keys = append(keys, labels.Label{Name: n, Value: v})
|
||||
}
|
||||
}
|
||||
p.mtx.RUnlock()
|
||||
|
||||
sort.Slice(keys, func(i, j int) bool {
|
||||
if d := strings.Compare(keys[i].Name, keys[j].Name); d != 0 {
|
||||
return d < 0
|
||||
}
|
||||
return keys[i].Value < keys[j].Value
|
||||
})
|
||||
return keys
|
||||
}
|
||||
|
||||
// PostingsStats contains cardinality based statistics for postings.
|
||||
type PostingsStats struct {
|
||||
CardinalityMetricsStats []Stat
|
||||
CardinalityLabelStats []Stat
|
||||
LabelValueStats []Stat
|
||||
LabelValuePairsStats []Stat
|
||||
}
|
||||
|
||||
// Stats calculates the cardinality statistics from postings.
|
||||
func (p *MemPostings) Stats(label string) *PostingsStats {
|
||||
const maxNumOfRecords = 10
|
||||
var size uint64
|
||||
|
||||
p.mtx.RLock()
|
||||
|
||||
metrics := &maxHeap{}
|
||||
labels := &maxHeap{}
|
||||
labelValueLength := &maxHeap{}
|
||||
labelValuePairs := &maxHeap{}
|
||||
|
||||
metrics.init(maxNumOfRecords)
|
||||
labels.init(maxNumOfRecords)
|
||||
labelValueLength.init(maxNumOfRecords)
|
||||
labelValuePairs.init(maxNumOfRecords)
|
||||
|
||||
for n, e := range p.m {
|
||||
if n == "" {
|
||||
continue
|
||||
}
|
||||
labels.push(Stat{Name: n, Count: uint64(len(e))})
|
||||
size = 0
|
||||
for name, values := range e {
|
||||
if n == label {
|
||||
metrics.push(Stat{Name: name, Count: uint64(len(values))})
|
||||
}
|
||||
labelValuePairs.push(Stat{Name: n + "=" + name, Count: uint64(len(values))})
|
||||
size += uint64(len(name))
|
||||
}
|
||||
labelValueLength.push(Stat{Name: n, Count: size})
|
||||
}
|
||||
|
||||
p.mtx.RUnlock()
|
||||
|
||||
return &PostingsStats{
|
||||
CardinalityMetricsStats: metrics.get(),
|
||||
CardinalityLabelStats: labels.get(),
|
||||
LabelValueStats: labelValueLength.get(),
|
||||
LabelValuePairsStats: labelValuePairs.get(),
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a postings list for the given label pair.
|
||||
func (p *MemPostings) Get(name, value string) Postings {
|
||||
var lp []uint64
|
||||
p.mtx.RLock()
|
||||
l := p.m[name]
|
||||
if l != nil {
|
||||
lp = l[value]
|
||||
}
|
||||
p.mtx.RUnlock()
|
||||
|
||||
if lp == nil {
|
||||
return EmptyPostings()
|
||||
}
|
||||
return newListPostings(lp...)
|
||||
}
|
||||
|
||||
// All returns a postings list over all documents ever added.
|
||||
func (p *MemPostings) All() Postings {
|
||||
return p.Get(AllPostingsKey())
|
||||
}
|
||||
|
||||
// EnsureOrder ensures that all postings lists are sorted. After it returns all further
|
||||
// calls to add and addFor will insert new IDs in a sorted manner.
|
||||
func (p *MemPostings) EnsureOrder() {
|
||||
p.mtx.Lock()
|
||||
defer p.mtx.Unlock()
|
||||
|
||||
if p.ordered {
|
||||
return
|
||||
}
|
||||
|
||||
n := runtime.GOMAXPROCS(0)
|
||||
workc := make(chan []uint64)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(n)
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
go func() {
|
||||
for l := range workc {
|
||||
sort.Slice(l, func(i, j int) bool { return l[i] < l[j] })
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
for _, e := range p.m {
|
||||
for _, l := range e {
|
||||
workc <- l
|
||||
}
|
||||
}
|
||||
close(workc)
|
||||
wg.Wait()
|
||||
|
||||
p.ordered = true
|
||||
}
|
||||
|
||||
// Delete removes all ids in the given map from the postings lists.
|
||||
func (p *MemPostings) Delete(deleted map[uint64]struct{}) {
|
||||
var keys, vals []string
|
||||
|
||||
// Collect all keys relevant for deletion once. New keys added afterwards
|
||||
// can by definition not be affected by any of the given deletes.
|
||||
p.mtx.RLock()
|
||||
for n := range p.m {
|
||||
keys = append(keys, n)
|
||||
}
|
||||
p.mtx.RUnlock()
|
||||
|
||||
for _, n := range keys {
|
||||
p.mtx.RLock()
|
||||
vals = vals[:0]
|
||||
for v := range p.m[n] {
|
||||
vals = append(vals, v)
|
||||
}
|
||||
p.mtx.RUnlock()
|
||||
|
||||
// For each posting we first analyse whether the postings list is affected by the deletes.
|
||||
// If yes, we actually reallocate a new postings list.
|
||||
for _, l := range vals {
|
||||
// Only lock for processing one postings list so we don't block reads for too long.
|
||||
p.mtx.Lock()
|
||||
|
||||
found := false
|
||||
for _, id := range p.m[n][l] {
|
||||
if _, ok := deleted[id]; ok {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
p.mtx.Unlock()
|
||||
continue
|
||||
}
|
||||
repl := make([]uint64, 0, len(p.m[n][l]))
|
||||
|
||||
for _, id := range p.m[n][l] {
|
||||
if _, ok := deleted[id]; !ok {
|
||||
repl = append(repl, id)
|
||||
}
|
||||
}
|
||||
if len(repl) > 0 {
|
||||
p.m[n][l] = repl
|
||||
} else {
|
||||
delete(p.m[n], l)
|
||||
}
|
||||
p.mtx.Unlock()
|
||||
}
|
||||
p.mtx.Lock()
|
||||
if len(p.m[n]) == 0 {
|
||||
delete(p.m, n)
|
||||
}
|
||||
p.mtx.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Iter calls f for each postings list. It aborts if f returns an error and returns it.
|
||||
func (p *MemPostings) Iter(f func(labels.Label, Postings) error) error {
|
||||
p.mtx.RLock()
|
||||
defer p.mtx.RUnlock()
|
||||
|
||||
for n, e := range p.m {
|
||||
for v, p := range e {
|
||||
if err := f(labels.Label{Name: n, Value: v}, newListPostings(p...)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add a label set to the postings index.
|
||||
func (p *MemPostings) Add(id uint64, lset labels.Labels) {
|
||||
p.mtx.Lock()
|
||||
|
||||
for _, l := range lset {
|
||||
p.addFor(id, l)
|
||||
}
|
||||
p.addFor(id, allPostingsKey)
|
||||
|
||||
p.mtx.Unlock()
|
||||
}
|
||||
|
||||
func (p *MemPostings) addFor(id uint64, l labels.Label) {
|
||||
nm, ok := p.m[l.Name]
|
||||
if !ok {
|
||||
nm = map[string][]uint64{}
|
||||
p.m[l.Name] = nm
|
||||
}
|
||||
list := append(nm[l.Value], id)
|
||||
nm[l.Value] = list
|
||||
|
||||
if !p.ordered {
|
||||
return
|
||||
}
|
||||
// There is no guarantee that no higher ID was inserted before as they may
|
||||
// be generated independently before adding them to postings.
|
||||
// We repair order violations on insert. The invariant is that the first n-1
|
||||
// items in the list are already sorted.
|
||||
for i := len(list) - 1; i >= 1; i-- {
|
||||
if list[i] >= list[i-1] {
|
||||
break
|
||||
}
|
||||
list[i], list[i-1] = list[i-1], list[i]
|
||||
}
|
||||
}
|
||||
|
||||
// ExpandPostings returns the postings expanded as a slice.
|
||||
func ExpandPostings(p Postings) (res []uint64, err error) {
|
||||
for p.Next() {
|
||||
res = append(res, p.At())
|
||||
}
|
||||
return res, p.Err()
|
||||
}
|
||||
|
||||
// Postings provides iterative access over a postings list.
|
||||
type Postings interface {
|
||||
// Next advances the iterator and returns true if another value was found.
|
||||
Next() bool
|
||||
|
||||
// Seek advances the iterator to value v or greater and returns
|
||||
// true if a value was found.
|
||||
Seek(v uint64) bool
|
||||
|
||||
// At returns the value at the current iterator position.
|
||||
At() uint64
|
||||
|
||||
// Err returns the last error of the iterator.
|
||||
Err() error
|
||||
}
|
||||
|
||||
// errPostings is an empty iterator that always errors.
|
||||
type errPostings struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (e errPostings) Next() bool { return false }
|
||||
func (e errPostings) Seek(uint64) bool { return false }
|
||||
func (e errPostings) At() uint64 { return 0 }
|
||||
func (e errPostings) Err() error { return e.err }
|
||||
|
||||
var emptyPostings = errPostings{}
|
||||
|
||||
// EmptyPostings returns a postings list that's always empty.
|
||||
// NOTE: Returning EmptyPostings sentinel when index.Postings struct has no postings is recommended.
|
||||
// It triggers optimized flow in other functions like Intersect, Without etc.
|
||||
func EmptyPostings() Postings {
|
||||
return emptyPostings
|
||||
}
|
||||
|
||||
// ErrPostings returns new postings that immediately error.
|
||||
func ErrPostings(err error) Postings {
|
||||
return errPostings{err}
|
||||
}
|
||||
|
||||
// Intersect returns a new postings list over the intersection of the
|
||||
// input postings.
|
||||
func Intersect(its ...Postings) Postings {
|
||||
if len(its) == 0 {
|
||||
return EmptyPostings()
|
||||
}
|
||||
if len(its) == 1 {
|
||||
return its[0]
|
||||
}
|
||||
for _, p := range its {
|
||||
if p == EmptyPostings() {
|
||||
return EmptyPostings()
|
||||
}
|
||||
}
|
||||
|
||||
return newIntersectPostings(its...)
|
||||
}
|
||||
|
||||
type intersectPostings struct {
|
||||
arr []Postings
|
||||
cur uint64
|
||||
}
|
||||
|
||||
func newIntersectPostings(its ...Postings) *intersectPostings {
|
||||
return &intersectPostings{arr: its}
|
||||
}
|
||||
|
||||
func (it *intersectPostings) At() uint64 {
|
||||
return it.cur
|
||||
}
|
||||
|
||||
func (it *intersectPostings) doNext() bool {
|
||||
Loop:
|
||||
for {
|
||||
for _, p := range it.arr {
|
||||
if !p.Seek(it.cur) {
|
||||
return false
|
||||
}
|
||||
if p.At() > it.cur {
|
||||
it.cur = p.At()
|
||||
continue Loop
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func (it *intersectPostings) Next() bool {
|
||||
for _, p := range it.arr {
|
||||
if !p.Next() {
|
||||
return false
|
||||
}
|
||||
if p.At() > it.cur {
|
||||
it.cur = p.At()
|
||||
}
|
||||
}
|
||||
return it.doNext()
|
||||
}
|
||||
|
||||
func (it *intersectPostings) Seek(id uint64) bool {
|
||||
it.cur = id
|
||||
return it.doNext()
|
||||
}
|
||||
|
||||
func (it *intersectPostings) Err() error {
|
||||
for _, p := range it.arr {
|
||||
if p.Err() != nil {
|
||||
return p.Err()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Merge returns a new iterator over the union of the input iterators.
|
||||
func Merge(its ...Postings) Postings {
|
||||
if len(its) == 0 {
|
||||
return EmptyPostings()
|
||||
}
|
||||
if len(its) == 1 {
|
||||
return its[0]
|
||||
}
|
||||
|
||||
p, ok := newMergedPostings(its)
|
||||
if !ok {
|
||||
return EmptyPostings()
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type postingsHeap []Postings
|
||||
|
||||
func (h postingsHeap) Len() int { return len(h) }
|
||||
func (h postingsHeap) Less(i, j int) bool { return h[i].At() < h[j].At() }
|
||||
func (h *postingsHeap) Swap(i, j int) { (*h)[i], (*h)[j] = (*h)[j], (*h)[i] }
|
||||
|
||||
func (h *postingsHeap) Push(x interface{}) {
|
||||
*h = append(*h, x.(Postings))
|
||||
}
|
||||
|
||||
func (h *postingsHeap) Pop() interface{} {
|
||||
old := *h
|
||||
n := len(old)
|
||||
x := old[n-1]
|
||||
*h = old[0 : n-1]
|
||||
return x
|
||||
}
|
||||
|
||||
type mergedPostings struct {
|
||||
h postingsHeap
|
||||
initialized bool
|
||||
cur uint64
|
||||
err error
|
||||
}
|
||||
|
||||
func newMergedPostings(p []Postings) (m *mergedPostings, nonEmpty bool) {
|
||||
ph := make(postingsHeap, 0, len(p))
|
||||
|
||||
for _, it := range p {
|
||||
// NOTE: mergedPostings struct requires the user to issue an initial Next.
|
||||
if it.Next() {
|
||||
ph = append(ph, it)
|
||||
} else {
|
||||
if it.Err() != nil {
|
||||
return &mergedPostings{err: it.Err()}, true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(ph) == 0 {
|
||||
return nil, false
|
||||
}
|
||||
return &mergedPostings{h: ph}, true
|
||||
}
|
||||
|
||||
func (it *mergedPostings) Next() bool {
|
||||
if it.h.Len() == 0 || it.err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// The user must issue an initial Next.
|
||||
if !it.initialized {
|
||||
heap.Init(&it.h)
|
||||
it.cur = it.h[0].At()
|
||||
it.initialized = true
|
||||
return true
|
||||
}
|
||||
|
||||
for {
|
||||
cur := it.h[0]
|
||||
if !cur.Next() {
|
||||
heap.Pop(&it.h)
|
||||
if cur.Err() != nil {
|
||||
it.err = cur.Err()
|
||||
return false
|
||||
}
|
||||
if it.h.Len() == 0 {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
// Value of top of heap has changed, re-heapify.
|
||||
heap.Fix(&it.h, 0)
|
||||
}
|
||||
|
||||
if it.h[0].At() != it.cur {
|
||||
it.cur = it.h[0].At()
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (it *mergedPostings) Seek(id uint64) bool {
|
||||
if it.h.Len() == 0 || it.err != nil {
|
||||
return false
|
||||
}
|
||||
if !it.initialized {
|
||||
if !it.Next() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for it.cur < id {
|
||||
cur := it.h[0]
|
||||
if !cur.Seek(id) {
|
||||
heap.Pop(&it.h)
|
||||
if cur.Err() != nil {
|
||||
it.err = cur.Err()
|
||||
return false
|
||||
}
|
||||
if it.h.Len() == 0 {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
// Value of top of heap has changed, re-heapify.
|
||||
heap.Fix(&it.h, 0)
|
||||
}
|
||||
|
||||
it.cur = it.h[0].At()
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (it mergedPostings) At() uint64 {
|
||||
return it.cur
|
||||
}
|
||||
|
||||
func (it mergedPostings) Err() error {
|
||||
return it.err
|
||||
}
|
||||
|
||||
// Without returns a new postings list that contains all elements from the full list that
|
||||
// are not in the drop list.
|
||||
func Without(full, drop Postings) Postings {
|
||||
if full == EmptyPostings() {
|
||||
return EmptyPostings()
|
||||
}
|
||||
|
||||
if drop == EmptyPostings() {
|
||||
return full
|
||||
}
|
||||
return newRemovedPostings(full, drop)
|
||||
}
|
||||
|
||||
type removedPostings struct {
|
||||
full, remove Postings
|
||||
|
||||
cur uint64
|
||||
|
||||
initialized bool
|
||||
fok, rok bool
|
||||
}
|
||||
|
||||
func newRemovedPostings(full, remove Postings) *removedPostings {
|
||||
return &removedPostings{
|
||||
full: full,
|
||||
remove: remove,
|
||||
}
|
||||
}
|
||||
|
||||
func (rp *removedPostings) At() uint64 {
|
||||
return rp.cur
|
||||
}
|
||||
|
||||
func (rp *removedPostings) Next() bool {
|
||||
if !rp.initialized {
|
||||
rp.fok = rp.full.Next()
|
||||
rp.rok = rp.remove.Next()
|
||||
rp.initialized = true
|
||||
}
|
||||
for {
|
||||
if !rp.fok {
|
||||
return false
|
||||
}
|
||||
|
||||
if !rp.rok {
|
||||
rp.cur = rp.full.At()
|
||||
rp.fok = rp.full.Next()
|
||||
return true
|
||||
}
|
||||
|
||||
fcur, rcur := rp.full.At(), rp.remove.At()
|
||||
if fcur < rcur {
|
||||
rp.cur = fcur
|
||||
rp.fok = rp.full.Next()
|
||||
|
||||
return true
|
||||
} else if rcur < fcur {
|
||||
// Forward the remove postings to the right position.
|
||||
rp.rok = rp.remove.Seek(fcur)
|
||||
} else {
|
||||
// Skip the current posting.
|
||||
rp.fok = rp.full.Next()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rp *removedPostings) Seek(id uint64) bool {
|
||||
if rp.cur >= id {
|
||||
return true
|
||||
}
|
||||
|
||||
rp.fok = rp.full.Seek(id)
|
||||
rp.rok = rp.remove.Seek(id)
|
||||
rp.initialized = true
|
||||
|
||||
return rp.Next()
|
||||
}
|
||||
|
||||
func (rp *removedPostings) Err() error {
|
||||
if rp.full.Err() != nil {
|
||||
return rp.full.Err()
|
||||
}
|
||||
|
||||
return rp.remove.Err()
|
||||
}
|
||||
|
||||
// ListPostings implements the Postings interface over a plain list.
|
||||
type ListPostings struct {
|
||||
list []uint64
|
||||
cur uint64
|
||||
}
|
||||
|
||||
func NewListPostings(list []uint64) Postings {
|
||||
return newListPostings(list...)
|
||||
}
|
||||
|
||||
func newListPostings(list ...uint64) *ListPostings {
|
||||
return &ListPostings{list: list}
|
||||
}
|
||||
|
||||
func (it *ListPostings) At() uint64 {
|
||||
return it.cur
|
||||
}
|
||||
|
||||
func (it *ListPostings) Next() bool {
|
||||
if len(it.list) > 0 {
|
||||
it.cur = it.list[0]
|
||||
it.list = it.list[1:]
|
||||
return true
|
||||
}
|
||||
it.cur = 0
|
||||
return false
|
||||
}
|
||||
|
||||
func (it *ListPostings) Seek(x uint64) bool {
|
||||
// If the current value satisfies, then return.
|
||||
if it.cur >= x {
|
||||
return true
|
||||
}
|
||||
if len(it.list) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Do binary search between current position and end.
|
||||
i := sort.Search(len(it.list), func(i int) bool {
|
||||
return it.list[i] >= x
|
||||
})
|
||||
if i < len(it.list) {
|
||||
it.cur = it.list[i]
|
||||
it.list = it.list[i+1:]
|
||||
return true
|
||||
}
|
||||
it.list = nil
|
||||
return false
|
||||
}
|
||||
|
||||
func (it *ListPostings) Err() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// bigEndianPostings implements the Postings interface over a byte stream of
|
||||
// big endian numbers.
|
||||
type bigEndianPostings struct {
|
||||
list []byte
|
||||
cur uint32
|
||||
}
|
||||
|
||||
func newBigEndianPostings(list []byte) *bigEndianPostings {
|
||||
return &bigEndianPostings{list: list}
|
||||
}
|
||||
|
||||
func (it *bigEndianPostings) At() uint64 {
|
||||
return uint64(it.cur)
|
||||
}
|
||||
|
||||
func (it *bigEndianPostings) Next() bool {
|
||||
if len(it.list) >= 4 {
|
||||
it.cur = binary.BigEndian.Uint32(it.list)
|
||||
it.list = it.list[4:]
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (it *bigEndianPostings) Seek(x uint64) bool {
|
||||
if uint64(it.cur) >= x {
|
||||
return true
|
||||
}
|
||||
|
||||
num := len(it.list) / 4
|
||||
// Do binary search between current position and end.
|
||||
i := sort.Search(num, func(i int) bool {
|
||||
return binary.BigEndian.Uint32(it.list[i*4:]) >= uint32(x)
|
||||
})
|
||||
if i < num {
|
||||
j := i * 4
|
||||
it.cur = binary.BigEndian.Uint32(it.list[j:])
|
||||
it.list = it.list[j+4:]
|
||||
return true
|
||||
}
|
||||
it.list = nil
|
||||
return false
|
||||
}
|
||||
|
||||
func (it *bigEndianPostings) Err() error {
|
||||
return nil
|
||||
}
|
||||
70
vendor/github.com/prometheus/prometheus/tsdb/index/postingsstats.go
generated
vendored
Normal file
70
vendor/github.com/prometheus/prometheus/tsdb/index/postingsstats.go
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package index
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Stat holds values for a single cardinality statistic.
|
||||
type Stat struct {
|
||||
Name string
|
||||
Count uint64
|
||||
}
|
||||
|
||||
type maxHeap struct {
|
||||
maxLength int
|
||||
minValue uint64
|
||||
minIndex int
|
||||
Items []Stat
|
||||
}
|
||||
|
||||
func (m *maxHeap) init(len int) {
|
||||
m.maxLength = len
|
||||
m.minValue = math.MaxUint64
|
||||
m.Items = make([]Stat, 0, len)
|
||||
}
|
||||
|
||||
func (m *maxHeap) push(item Stat) {
|
||||
if len(m.Items) < m.maxLength {
|
||||
if item.Count < m.minValue {
|
||||
m.minValue = item.Count
|
||||
m.minIndex = len(m.Items)
|
||||
}
|
||||
m.Items = append(m.Items, item)
|
||||
return
|
||||
}
|
||||
if item.Count < m.minValue {
|
||||
return
|
||||
}
|
||||
|
||||
m.Items[m.minIndex] = item
|
||||
m.minValue = item.Count
|
||||
|
||||
for i, stat := range m.Items {
|
||||
if stat.Count < m.minValue {
|
||||
m.minValue = stat.Count
|
||||
m.minIndex = i
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (m *maxHeap) get() []Stat {
|
||||
sort.Slice(m.Items, func(i, j int) bool {
|
||||
return m.Items[i].Count > m.Items[j].Count
|
||||
})
|
||||
return m.Items
|
||||
}
|
||||
200
vendor/github.com/prometheus/prometheus/tsdb/isolation.go
generated
vendored
Normal file
200
vendor/github.com/prometheus/prometheus/tsdb/isolation.go
generated
vendored
Normal file
@@ -0,0 +1,200 @@
|
||||
// Copyright 2020 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tsdb
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// isolationState holds the isolation information.
|
||||
type isolationState struct {
|
||||
// We will ignore all appends above the max, or that are incomplete.
|
||||
maxAppendID uint64
|
||||
incompleteAppends map[uint64]struct{}
|
||||
lowWatermark uint64 // Lowest of incompleteAppends/maxAppendID.
|
||||
isolation *isolation
|
||||
|
||||
// Doubly linked list of active reads.
|
||||
next *isolationState
|
||||
prev *isolationState
|
||||
}
|
||||
|
||||
// Close closes the state.
|
||||
func (i *isolationState) Close() {
|
||||
i.isolation.readMtx.Lock()
|
||||
defer i.isolation.readMtx.Unlock()
|
||||
i.next.prev = i.prev
|
||||
i.prev.next = i.next
|
||||
}
|
||||
|
||||
// isolation is the global isolation state.
|
||||
type isolation struct {
|
||||
// Mutex for accessing lastAppendID and appendsOpen.
|
||||
appendMtx sync.Mutex
|
||||
// Each append is given an internal id.
|
||||
lastAppendID uint64
|
||||
// Which appends are currently in progress.
|
||||
appendsOpen map[uint64]struct{}
|
||||
// Mutex for accessing readsOpen.
|
||||
// If taking both appendMtx and readMtx, take appendMtx first.
|
||||
readMtx sync.Mutex
|
||||
// All current in use isolationStates. This is a doubly-linked list.
|
||||
readsOpen *isolationState
|
||||
}
|
||||
|
||||
func newIsolation() *isolation {
|
||||
isoState := &isolationState{}
|
||||
isoState.next = isoState
|
||||
isoState.prev = isoState
|
||||
|
||||
return &isolation{
|
||||
appendsOpen: map[uint64]struct{}{},
|
||||
readsOpen: isoState,
|
||||
}
|
||||
}
|
||||
|
||||
// lowWatermark returns the appendID below which we no longer need to track
|
||||
// which appends were from which appendID.
|
||||
func (i *isolation) lowWatermark() uint64 {
|
||||
i.appendMtx.Lock() // Take appendMtx first.
|
||||
defer i.appendMtx.Unlock()
|
||||
i.readMtx.Lock()
|
||||
defer i.readMtx.Unlock()
|
||||
if i.readsOpen.prev != i.readsOpen {
|
||||
return i.readsOpen.prev.lowWatermark
|
||||
}
|
||||
lw := i.lastAppendID
|
||||
for k := range i.appendsOpen {
|
||||
if k < lw {
|
||||
lw = k
|
||||
}
|
||||
}
|
||||
return lw
|
||||
}
|
||||
|
||||
// State returns an object used to control isolation
|
||||
// between a query and appends. Must be closed when complete.
|
||||
func (i *isolation) State() *isolationState {
|
||||
i.appendMtx.Lock() // Take append mutex before read mutex.
|
||||
defer i.appendMtx.Unlock()
|
||||
isoState := &isolationState{
|
||||
maxAppendID: i.lastAppendID,
|
||||
lowWatermark: i.lastAppendID,
|
||||
incompleteAppends: make(map[uint64]struct{}, len(i.appendsOpen)),
|
||||
isolation: i,
|
||||
}
|
||||
for k := range i.appendsOpen {
|
||||
isoState.incompleteAppends[k] = struct{}{}
|
||||
if k < isoState.lowWatermark {
|
||||
isoState.lowWatermark = k
|
||||
}
|
||||
}
|
||||
|
||||
i.readMtx.Lock()
|
||||
defer i.readMtx.Unlock()
|
||||
isoState.prev = i.readsOpen
|
||||
isoState.next = i.readsOpen.next
|
||||
i.readsOpen.next.prev = isoState
|
||||
i.readsOpen.next = isoState
|
||||
return isoState
|
||||
}
|
||||
|
||||
// newAppendID increments the transaction counter and returns a new transaction
|
||||
// ID. The first ID returned is 1.
|
||||
func (i *isolation) newAppendID() uint64 {
|
||||
i.appendMtx.Lock()
|
||||
defer i.appendMtx.Unlock()
|
||||
i.lastAppendID++
|
||||
i.appendsOpen[i.lastAppendID] = struct{}{}
|
||||
return i.lastAppendID
|
||||
}
|
||||
|
||||
func (i *isolation) closeAppend(appendID uint64) {
|
||||
i.appendMtx.Lock()
|
||||
defer i.appendMtx.Unlock()
|
||||
delete(i.appendsOpen, appendID)
|
||||
}
|
||||
|
||||
// The transactionID ring buffer.
|
||||
type txRing struct {
|
||||
txIDs []uint64
|
||||
txIDFirst int // Position of the first id in the ring.
|
||||
txIDCount int // How many ids in the ring.
|
||||
}
|
||||
|
||||
func newTxRing(cap int) *txRing {
|
||||
return &txRing{
|
||||
txIDs: make([]uint64, cap),
|
||||
}
|
||||
}
|
||||
|
||||
func (txr *txRing) add(appendID uint64) {
|
||||
if txr.txIDCount == len(txr.txIDs) {
|
||||
// Ring buffer is full, expand by doubling.
|
||||
newRing := make([]uint64, txr.txIDCount*2)
|
||||
idx := copy(newRing[:], txr.txIDs[txr.txIDFirst:])
|
||||
copy(newRing[idx:], txr.txIDs[:txr.txIDFirst])
|
||||
txr.txIDs = newRing
|
||||
txr.txIDFirst = 0
|
||||
}
|
||||
|
||||
txr.txIDs[(txr.txIDFirst+txr.txIDCount)%len(txr.txIDs)] = appendID
|
||||
txr.txIDCount++
|
||||
}
|
||||
|
||||
func (txr *txRing) cleanupAppendIDsBelow(bound uint64) {
|
||||
pos := txr.txIDFirst
|
||||
|
||||
for txr.txIDCount > 0 {
|
||||
if txr.txIDs[pos] < bound {
|
||||
txr.txIDFirst++
|
||||
txr.txIDCount--
|
||||
} else {
|
||||
break
|
||||
}
|
||||
|
||||
pos++
|
||||
if pos == len(txr.txIDs) {
|
||||
pos = 0
|
||||
}
|
||||
}
|
||||
|
||||
txr.txIDFirst %= len(txr.txIDs)
|
||||
}
|
||||
|
||||
func (txr *txRing) iterator() *txRingIterator {
|
||||
return &txRingIterator{
|
||||
pos: txr.txIDFirst,
|
||||
ids: txr.txIDs,
|
||||
}
|
||||
}
|
||||
|
||||
// txRingIterator lets you iterate over the ring. It doesn't terminate,
|
||||
// it DOESN'T terminate.
|
||||
type txRingIterator struct {
|
||||
ids []uint64
|
||||
|
||||
pos int
|
||||
}
|
||||
|
||||
func (it *txRingIterator) At() uint64 {
|
||||
return it.ids[it.pos]
|
||||
}
|
||||
|
||||
func (it *txRingIterator) Next() {
|
||||
it.pos++
|
||||
if it.pos == len(it.ids) {
|
||||
it.pos = 0
|
||||
}
|
||||
}
|
||||
1207
vendor/github.com/prometheus/prometheus/tsdb/querier.go
generated
vendored
Normal file
1207
vendor/github.com/prometheus/prometheus/tsdb/querier.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
227
vendor/github.com/prometheus/prometheus/tsdb/record/record.go
generated
vendored
Normal file
227
vendor/github.com/prometheus/prometheus/tsdb/record/record.go
generated
vendored
Normal file
@@ -0,0 +1,227 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package record
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/tsdb/encoding"
|
||||
"github.com/prometheus/prometheus/tsdb/tombstones"
|
||||
)
|
||||
|
||||
// Type represents the data type of a record.
|
||||
type Type uint8
|
||||
|
||||
const (
|
||||
// Invalid is returned for unrecognised WAL record types.
|
||||
Invalid Type = 255
|
||||
// Series is used to match WAL records of type Series.
|
||||
Series Type = 1
|
||||
// Samples is used to match WAL records of type Samples.
|
||||
Samples Type = 2
|
||||
// Tombstones is used to match WAL records of type Tombstones.
|
||||
Tombstones Type = 3
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotFound is returned if a looked up resource was not found. Duplicate ErrNotFound from head.go.
|
||||
ErrNotFound = errors.New("not found")
|
||||
)
|
||||
|
||||
// RefSeries is the series labels with the series ID.
|
||||
type RefSeries struct {
|
||||
Ref uint64
|
||||
Labels labels.Labels
|
||||
}
|
||||
|
||||
// RefSample is a timestamp/value pair associated with a reference to a series.
|
||||
type RefSample struct {
|
||||
Ref uint64
|
||||
T int64
|
||||
V float64
|
||||
}
|
||||
|
||||
// Decoder decodes series, sample, and tombstone records.
|
||||
// The zero value is ready to use.
|
||||
type Decoder struct {
|
||||
}
|
||||
|
||||
// Type returns the type of the record.
|
||||
// Returns RecordInvalid if no valid record type is found.
|
||||
func (d *Decoder) Type(rec []byte) Type {
|
||||
if len(rec) < 1 {
|
||||
return Invalid
|
||||
}
|
||||
switch t := Type(rec[0]); t {
|
||||
case Series, Samples, Tombstones:
|
||||
return t
|
||||
}
|
||||
return Invalid
|
||||
}
|
||||
|
||||
// Series appends series in rec to the given slice.
|
||||
func (d *Decoder) Series(rec []byte, series []RefSeries) ([]RefSeries, error) {
|
||||
dec := encoding.Decbuf{B: rec}
|
||||
|
||||
if Type(dec.Byte()) != Series {
|
||||
return nil, errors.New("invalid record type")
|
||||
}
|
||||
for len(dec.B) > 0 && dec.Err() == nil {
|
||||
ref := dec.Be64()
|
||||
|
||||
lset := make(labels.Labels, dec.Uvarint())
|
||||
|
||||
for i := range lset {
|
||||
lset[i].Name = dec.UvarintStr()
|
||||
lset[i].Value = dec.UvarintStr()
|
||||
}
|
||||
sort.Sort(lset)
|
||||
|
||||
series = append(series, RefSeries{
|
||||
Ref: ref,
|
||||
Labels: lset,
|
||||
})
|
||||
}
|
||||
if dec.Err() != nil {
|
||||
return nil, dec.Err()
|
||||
}
|
||||
if len(dec.B) > 0 {
|
||||
return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B))
|
||||
}
|
||||
return series, nil
|
||||
}
|
||||
|
||||
// Samples appends samples in rec to the given slice.
|
||||
func (d *Decoder) Samples(rec []byte, samples []RefSample) ([]RefSample, error) {
|
||||
dec := encoding.Decbuf{B: rec}
|
||||
|
||||
if Type(dec.Byte()) != Samples {
|
||||
return nil, errors.New("invalid record type")
|
||||
}
|
||||
if dec.Len() == 0 {
|
||||
return samples, nil
|
||||
}
|
||||
var (
|
||||
baseRef = dec.Be64()
|
||||
baseTime = dec.Be64int64()
|
||||
)
|
||||
for len(dec.B) > 0 && dec.Err() == nil {
|
||||
dref := dec.Varint64()
|
||||
dtime := dec.Varint64()
|
||||
val := dec.Be64()
|
||||
|
||||
samples = append(samples, RefSample{
|
||||
Ref: uint64(int64(baseRef) + dref),
|
||||
T: baseTime + dtime,
|
||||
V: math.Float64frombits(val),
|
||||
})
|
||||
}
|
||||
|
||||
if dec.Err() != nil {
|
||||
return nil, errors.Wrapf(dec.Err(), "decode error after %d samples", len(samples))
|
||||
}
|
||||
if len(dec.B) > 0 {
|
||||
return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B))
|
||||
}
|
||||
return samples, nil
|
||||
}
|
||||
|
||||
// Tombstones appends tombstones in rec to the given slice.
|
||||
func (d *Decoder) Tombstones(rec []byte, tstones []tombstones.Stone) ([]tombstones.Stone, error) {
|
||||
dec := encoding.Decbuf{B: rec}
|
||||
|
||||
if Type(dec.Byte()) != Tombstones {
|
||||
return nil, errors.New("invalid record type")
|
||||
}
|
||||
for dec.Len() > 0 && dec.Err() == nil {
|
||||
tstones = append(tstones, tombstones.Stone{
|
||||
Ref: dec.Be64(),
|
||||
Intervals: tombstones.Intervals{
|
||||
{Mint: dec.Varint64(), Maxt: dec.Varint64()},
|
||||
},
|
||||
})
|
||||
}
|
||||
if dec.Err() != nil {
|
||||
return nil, dec.Err()
|
||||
}
|
||||
if len(dec.B) > 0 {
|
||||
return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B))
|
||||
}
|
||||
return tstones, nil
|
||||
}
|
||||
|
||||
// Encoder encodes series, sample, and tombstones records.
|
||||
// The zero value is ready to use.
|
||||
type Encoder struct {
|
||||
}
|
||||
|
||||
// Series appends the encoded series to b and returns the resulting slice.
|
||||
func (e *Encoder) Series(series []RefSeries, b []byte) []byte {
|
||||
buf := encoding.Encbuf{B: b}
|
||||
buf.PutByte(byte(Series))
|
||||
|
||||
for _, s := range series {
|
||||
buf.PutBE64(s.Ref)
|
||||
buf.PutUvarint(len(s.Labels))
|
||||
|
||||
for _, l := range s.Labels {
|
||||
buf.PutUvarintStr(l.Name)
|
||||
buf.PutUvarintStr(l.Value)
|
||||
}
|
||||
}
|
||||
return buf.Get()
|
||||
}
|
||||
|
||||
// Samples appends the encoded samples to b and returns the resulting slice.
|
||||
func (e *Encoder) Samples(samples []RefSample, b []byte) []byte {
|
||||
buf := encoding.Encbuf{B: b}
|
||||
buf.PutByte(byte(Samples))
|
||||
|
||||
if len(samples) == 0 {
|
||||
return buf.Get()
|
||||
}
|
||||
|
||||
// Store base timestamp and base reference number of first sample.
|
||||
// All samples encode their timestamp and ref as delta to those.
|
||||
first := samples[0]
|
||||
|
||||
buf.PutBE64(first.Ref)
|
||||
buf.PutBE64int64(first.T)
|
||||
|
||||
for _, s := range samples {
|
||||
buf.PutVarint64(int64(s.Ref) - int64(first.Ref))
|
||||
buf.PutVarint64(s.T - first.T)
|
||||
buf.PutBE64(math.Float64bits(s.V))
|
||||
}
|
||||
return buf.Get()
|
||||
}
|
||||
|
||||
// Tombstones appends the encoded tombstones to b and returns the resulting slice.
|
||||
func (e *Encoder) Tombstones(tstones []tombstones.Stone, b []byte) []byte {
|
||||
buf := encoding.Encbuf{B: b}
|
||||
buf.PutByte(byte(Tombstones))
|
||||
|
||||
for _, s := range tstones {
|
||||
for _, iv := range s.Intervals {
|
||||
buf.PutBE64(s.Ref)
|
||||
buf.PutVarint64(iv.Mint)
|
||||
buf.PutVarint64(iv.Maxt)
|
||||
}
|
||||
}
|
||||
return buf.Get()
|
||||
}
|
||||
133
vendor/github.com/prometheus/prometheus/tsdb/repair.go
generated
vendored
Normal file
133
vendor/github.com/prometheus/prometheus/tsdb/repair.go
generated
vendored
Normal file
@@ -0,0 +1,133 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tsdb
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/log/level"
|
||||
"github.com/pkg/errors"
|
||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||
)
|
||||
|
||||
// repairBadIndexVersion repairs an issue in index and meta.json persistence introduced in
|
||||
// commit 129773b41a565fde5156301e37f9a87158030443.
|
||||
func repairBadIndexVersion(logger log.Logger, dir string) error {
|
||||
// All blocks written by Prometheus 2.1 with a meta.json version of 2 are affected.
|
||||
// We must actually set the index file version to 2 and revert the meta.json version back to 1.
|
||||
dirs, err := blockDirs(dir)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "list block dirs in %q", dir)
|
||||
}
|
||||
|
||||
wrapErr := func(err error, d string) error {
|
||||
return errors.Wrapf(err, "block dir: %q", d)
|
||||
}
|
||||
|
||||
tmpFiles := make([]string, 0, len(dirs))
|
||||
defer func() {
|
||||
for _, tmp := range tmpFiles {
|
||||
if err := os.RemoveAll(tmp); err != nil {
|
||||
level.Error(logger).Log("msg", "remove tmp file", "err", err.Error())
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for _, d := range dirs {
|
||||
meta, err := readBogusMetaFile(d)
|
||||
if err != nil {
|
||||
return wrapErr(err, d)
|
||||
}
|
||||
if meta.Version == metaVersion1 {
|
||||
level.Info(logger).Log(
|
||||
"msg", "Found healthy block",
|
||||
"mint", meta.MinTime,
|
||||
"maxt", meta.MaxTime,
|
||||
"ulid", meta.ULID,
|
||||
)
|
||||
continue
|
||||
}
|
||||
level.Info(logger).Log(
|
||||
"msg", "Fixing broken block",
|
||||
"mint", meta.MinTime,
|
||||
"maxt", meta.MaxTime,
|
||||
"ulid", meta.ULID,
|
||||
)
|
||||
|
||||
repl, err := os.Create(filepath.Join(d, "index.repaired"))
|
||||
if err != nil {
|
||||
return wrapErr(err, d)
|
||||
}
|
||||
tmpFiles = append(tmpFiles, repl.Name())
|
||||
|
||||
broken, err := os.Open(filepath.Join(d, indexFilename))
|
||||
if err != nil {
|
||||
return wrapErr(err, d)
|
||||
}
|
||||
if _, err := io.Copy(repl, broken); err != nil {
|
||||
return wrapErr(err, d)
|
||||
}
|
||||
|
||||
var merr tsdb_errors.MultiError
|
||||
|
||||
// Set the 5th byte to 2 to indicate the correct file format version.
|
||||
if _, err := repl.WriteAt([]byte{2}, 4); err != nil {
|
||||
merr.Add(wrapErr(err, d))
|
||||
merr.Add(wrapErr(repl.Close(), d))
|
||||
return merr.Err()
|
||||
}
|
||||
if err := repl.Sync(); err != nil {
|
||||
merr.Add(wrapErr(err, d))
|
||||
merr.Add(wrapErr(repl.Close(), d))
|
||||
return merr.Err()
|
||||
}
|
||||
if err := repl.Close(); err != nil {
|
||||
return wrapErr(err, d)
|
||||
}
|
||||
if err := broken.Close(); err != nil {
|
||||
return wrapErr(err, d)
|
||||
}
|
||||
if err := fileutil.Replace(repl.Name(), broken.Name()); err != nil {
|
||||
return wrapErr(err, d)
|
||||
}
|
||||
// Reset version of meta.json to 1.
|
||||
meta.Version = metaVersion1
|
||||
if _, err := writeMetaFile(logger, d, meta); err != nil {
|
||||
return wrapErr(err, d)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func readBogusMetaFile(dir string) (*BlockMeta, error) {
|
||||
b, err := ioutil.ReadFile(filepath.Join(dir, metaFilename))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var m BlockMeta
|
||||
|
||||
if err := json.Unmarshal(b, &m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if m.Version != metaVersion1 && m.Version != 2 {
|
||||
return nil, errors.Errorf("unexpected meta file version %d", m.Version)
|
||||
}
|
||||
return &m, nil
|
||||
}
|
||||
84
vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go
generated
vendored
Normal file
84
vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package tsdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
)
|
||||
|
||||
var ErrInvalidTimes = fmt.Errorf("max time is lesser than min time")
|
||||
|
||||
type MetricSample struct {
|
||||
TimestampMs int64
|
||||
Value float64
|
||||
Labels labels.Labels
|
||||
}
|
||||
|
||||
// CreateHead creates a TSDB writer head to write the sample data to.
|
||||
func CreateHead(samples []*MetricSample, chunkRange int64, logger log.Logger) (*Head, error) {
|
||||
head, err := NewHead(nil, logger, nil, chunkRange, DefaultStripeSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
app := head.Appender()
|
||||
for _, sample := range samples {
|
||||
_, err = app.Add(sample.Labels, sample.TimestampMs, sample.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
err = app.Commit()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return head, nil
|
||||
}
|
||||
|
||||
// CreateBlock creates a chunkrange block from the samples passed to it, and writes it to disk.
|
||||
func CreateBlock(samples []*MetricSample, dir string, mint, maxt int64, logger log.Logger) (string, error) {
|
||||
chunkRange := maxt - mint
|
||||
if chunkRange == 0 {
|
||||
chunkRange = DefaultBlockDuration
|
||||
}
|
||||
if chunkRange < 0 {
|
||||
return "", ErrInvalidTimes
|
||||
}
|
||||
head, err := CreateHead(samples, chunkRange, logger)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
compactor, err := NewLeveledCompactor(context.Background(), nil, logger, ExponentialBlockRanges(DefaultBlockDuration, 3, 5), nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = os.MkdirAll(dir, 0777)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
ulid, err := compactor.Write(dir, head, mint, maxt, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return filepath.Join(dir, ulid.String()), nil
|
||||
}
|
||||
1306
vendor/github.com/prometheus/prometheus/tsdb/wal.go
generated
vendored
Normal file
1306
vendor/github.com/prometheus/prometheus/tsdb/wal.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
291
vendor/github.com/prometheus/prometheus/tsdb/wal/checkpoint.go
generated
vendored
Normal file
291
vendor/github.com/prometheus/prometheus/tsdb/wal/checkpoint.go
generated
vendored
Normal file
@@ -0,0 +1,291 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package wal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
|
||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||
"github.com/prometheus/prometheus/tsdb/record"
|
||||
"github.com/prometheus/prometheus/tsdb/tombstones"
|
||||
)
|
||||
|
||||
// CheckpointStats returns stats about a created checkpoint.
|
||||
type CheckpointStats struct {
|
||||
DroppedSeries int
|
||||
DroppedSamples int
|
||||
DroppedTombstones int
|
||||
TotalSeries int // Processed series including dropped ones.
|
||||
TotalSamples int // Processed samples including dropped ones.
|
||||
TotalTombstones int // Processed tombstones including dropped ones.
|
||||
}
|
||||
|
||||
// LastCheckpoint returns the directory name and index of the most recent checkpoint.
|
||||
// If dir does not contain any checkpoints, ErrNotFound is returned.
|
||||
func LastCheckpoint(dir string) (string, int, error) {
|
||||
checkpoints, err := listCheckpoints(dir)
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
|
||||
if len(checkpoints) == 0 {
|
||||
return "", 0, record.ErrNotFound
|
||||
}
|
||||
|
||||
checkpoint := checkpoints[len(checkpoints)-1]
|
||||
return filepath.Join(dir, checkpoint.name), checkpoint.index, nil
|
||||
}
|
||||
|
||||
// DeleteCheckpoints deletes all checkpoints in a directory below a given index.
|
||||
func DeleteCheckpoints(dir string, maxIndex int) error {
|
||||
checkpoints, err := listCheckpoints(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var errs tsdb_errors.MultiError
|
||||
for _, checkpoint := range checkpoints {
|
||||
if checkpoint.index >= maxIndex {
|
||||
break
|
||||
}
|
||||
if err := os.RemoveAll(filepath.Join(dir, checkpoint.name)); err != nil {
|
||||
errs.Add(err)
|
||||
}
|
||||
}
|
||||
return errs.Err()
|
||||
}
|
||||
|
||||
const checkpointPrefix = "checkpoint."
|
||||
|
||||
// Checkpoint creates a compacted checkpoint of segments in range [first, last] in the given WAL.
|
||||
// It includes the most recent checkpoint if it exists.
|
||||
// All series not satisfying keep and samples below mint are dropped.
|
||||
//
|
||||
// The checkpoint is stored in a directory named checkpoint.N in the same
|
||||
// segmented format as the original WAL itself.
|
||||
// This makes it easy to read it through the WAL package and concatenate
|
||||
// it with the original WAL.
|
||||
func Checkpoint(w *WAL, from, to int, keep func(id uint64) bool, mint int64) (*CheckpointStats, error) {
|
||||
stats := &CheckpointStats{}
|
||||
var sgmReader io.ReadCloser
|
||||
|
||||
{
|
||||
|
||||
var sgmRange []SegmentRange
|
||||
dir, idx, err := LastCheckpoint(w.Dir())
|
||||
if err != nil && err != record.ErrNotFound {
|
||||
return nil, errors.Wrap(err, "find last checkpoint")
|
||||
}
|
||||
last := idx + 1
|
||||
if err == nil {
|
||||
if from > last {
|
||||
return nil, fmt.Errorf("unexpected gap to last checkpoint. expected:%v, requested:%v", last, from)
|
||||
}
|
||||
// Ignore WAL files below the checkpoint. They shouldn't exist to begin with.
|
||||
from = last
|
||||
|
||||
sgmRange = append(sgmRange, SegmentRange{Dir: dir, Last: math.MaxInt32})
|
||||
}
|
||||
|
||||
sgmRange = append(sgmRange, SegmentRange{Dir: w.Dir(), First: from, Last: to})
|
||||
sgmReader, err = NewSegmentsRangeReader(sgmRange...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "create segment reader")
|
||||
}
|
||||
defer sgmReader.Close()
|
||||
}
|
||||
|
||||
cpdir := checkpointDir(w.Dir(), to)
|
||||
cpdirtmp := cpdir + ".tmp"
|
||||
|
||||
if err := os.RemoveAll(cpdirtmp); err != nil {
|
||||
return nil, errors.Wrap(err, "remove previous temporary checkpoint dir")
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(cpdirtmp, 0777); err != nil {
|
||||
return nil, errors.Wrap(err, "create checkpoint dir")
|
||||
}
|
||||
cp, err := New(nil, nil, cpdirtmp, w.CompressionEnabled())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "open checkpoint")
|
||||
}
|
||||
|
||||
// Ensures that an early return caused by an error doesn't leave any tmp files.
|
||||
defer func() {
|
||||
cp.Close()
|
||||
os.RemoveAll(cpdirtmp)
|
||||
}()
|
||||
|
||||
r := NewReader(sgmReader)
|
||||
|
||||
var (
|
||||
series []record.RefSeries
|
||||
samples []record.RefSample
|
||||
tstones []tombstones.Stone
|
||||
dec record.Decoder
|
||||
enc record.Encoder
|
||||
buf []byte
|
||||
recs [][]byte
|
||||
)
|
||||
for r.Next() {
|
||||
series, samples, tstones = series[:0], samples[:0], tstones[:0]
|
||||
|
||||
// We don't reset the buffer since we batch up multiple records
|
||||
// before writing them to the checkpoint.
|
||||
// Remember where the record for this iteration starts.
|
||||
start := len(buf)
|
||||
rec := r.Record()
|
||||
|
||||
switch dec.Type(rec) {
|
||||
case record.Series:
|
||||
series, err = dec.Series(rec, series)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "decode series")
|
||||
}
|
||||
// Drop irrelevant series in place.
|
||||
repl := series[:0]
|
||||
for _, s := range series {
|
||||
if keep(s.Ref) {
|
||||
repl = append(repl, s)
|
||||
}
|
||||
}
|
||||
if len(repl) > 0 {
|
||||
buf = enc.Series(repl, buf)
|
||||
}
|
||||
stats.TotalSeries += len(series)
|
||||
stats.DroppedSeries += len(series) - len(repl)
|
||||
|
||||
case record.Samples:
|
||||
samples, err = dec.Samples(rec, samples)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "decode samples")
|
||||
}
|
||||
// Drop irrelevant samples in place.
|
||||
repl := samples[:0]
|
||||
for _, s := range samples {
|
||||
if s.T >= mint {
|
||||
repl = append(repl, s)
|
||||
}
|
||||
}
|
||||
if len(repl) > 0 {
|
||||
buf = enc.Samples(repl, buf)
|
||||
}
|
||||
stats.TotalSamples += len(samples)
|
||||
stats.DroppedSamples += len(samples) - len(repl)
|
||||
|
||||
case record.Tombstones:
|
||||
tstones, err = dec.Tombstones(rec, tstones)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "decode deletes")
|
||||
}
|
||||
// Drop irrelevant tombstones in place.
|
||||
repl := tstones[:0]
|
||||
for _, s := range tstones {
|
||||
for _, iv := range s.Intervals {
|
||||
if iv.Maxt >= mint {
|
||||
repl = append(repl, s)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(repl) > 0 {
|
||||
buf = enc.Tombstones(repl, buf)
|
||||
}
|
||||
stats.TotalTombstones += len(tstones)
|
||||
stats.DroppedTombstones += len(tstones) - len(repl)
|
||||
|
||||
default:
|
||||
return nil, errors.New("invalid record type")
|
||||
}
|
||||
if len(buf[start:]) == 0 {
|
||||
continue // All contents discarded.
|
||||
}
|
||||
recs = append(recs, buf[start:])
|
||||
|
||||
// Flush records in 1 MB increments.
|
||||
if len(buf) > 1*1024*1024 {
|
||||
if err := cp.Log(recs...); err != nil {
|
||||
return nil, errors.Wrap(err, "flush records")
|
||||
}
|
||||
buf, recs = buf[:0], recs[:0]
|
||||
}
|
||||
}
|
||||
// If we hit any corruption during checkpointing, repairing is not an option.
|
||||
// The head won't know which series records are lost.
|
||||
if r.Err() != nil {
|
||||
return nil, errors.Wrap(r.Err(), "read segments")
|
||||
}
|
||||
|
||||
// Flush remaining records.
|
||||
if err := cp.Log(recs...); err != nil {
|
||||
return nil, errors.Wrap(err, "flush records")
|
||||
}
|
||||
if err := cp.Close(); err != nil {
|
||||
return nil, errors.Wrap(err, "close checkpoint")
|
||||
}
|
||||
if err := fileutil.Replace(cpdirtmp, cpdir); err != nil {
|
||||
return nil, errors.Wrap(err, "rename checkpoint directory")
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func checkpointDir(dir string, i int) string {
|
||||
return filepath.Join(dir, fmt.Sprintf(checkpointPrefix+"%08d", i))
|
||||
}
|
||||
|
||||
type checkpointRef struct {
|
||||
name string
|
||||
index int
|
||||
}
|
||||
|
||||
func listCheckpoints(dir string) (refs []checkpointRef, err error) {
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := 0; i < len(files); i++ {
|
||||
fi := files[i]
|
||||
if !strings.HasPrefix(fi.Name(), checkpointPrefix) {
|
||||
continue
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
return nil, errors.Errorf("checkpoint %s is not a directory", fi.Name())
|
||||
}
|
||||
idx, err := strconv.Atoi(fi.Name()[len(checkpointPrefix):])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
refs = append(refs, checkpointRef{name: fi.Name(), index: idx})
|
||||
}
|
||||
|
||||
sort.Slice(refs, func(i, j int) bool {
|
||||
return refs[i].index < refs[j].index
|
||||
})
|
||||
|
||||
return refs, nil
|
||||
}
|
||||
322
vendor/github.com/prometheus/prometheus/tsdb/wal/live_reader.go
generated
vendored
Normal file
322
vendor/github.com/prometheus/prometheus/tsdb/wal/live_reader.go
generated
vendored
Normal file
@@ -0,0 +1,322 @@
|
||||
// Copyright 2019 The Prometheus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package wal
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/log/level"
|
||||
"github.com/golang/snappy"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// liveReaderMetrics holds all metrics exposed by the LiveReader.
|
||||
type LiveReaderMetrics struct {
|
||||
readerCorruptionErrors *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// NewLiveReaderMetrics instantiates, registers and returns metrics to be injected
|
||||
// at LiveReader instantiation.
|
||||
func NewLiveReaderMetrics(reg prometheus.Registerer) *LiveReaderMetrics {
|
||||
m := &LiveReaderMetrics{
|
||||
readerCorruptionErrors: prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "prometheus_tsdb_wal_reader_corruption_errors_total",
|
||||
Help: "Errors encountered when reading the WAL.",
|
||||
}, []string{"error"}),
|
||||
}
|
||||
|
||||
if reg != nil {
|
||||
reg.MustRegister(m.readerCorruptionErrors)
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// NewLiveReader returns a new live reader.
|
||||
func NewLiveReader(logger log.Logger, metrics *LiveReaderMetrics, r io.Reader) *LiveReader {
|
||||
lr := &LiveReader{
|
||||
logger: logger,
|
||||
rdr: r,
|
||||
metrics: metrics,
|
||||
|
||||
// Until we understand how they come about, make readers permissive
|
||||
// to records spanning pages.
|
||||
permissive: true,
|
||||
}
|
||||
|
||||
return lr
|
||||
}
|
||||
|
||||
// LiveReader reads WAL records from an io.Reader. It allows reading of WALs
|
||||
// that are still in the process of being written, and returns records as soon
|
||||
// as they can be read.
|
||||
type LiveReader struct {
|
||||
logger log.Logger
|
||||
rdr io.Reader
|
||||
err error
|
||||
rec []byte
|
||||
snappyBuf []byte
|
||||
hdr [recordHeaderSize]byte
|
||||
buf [pageSize]byte
|
||||
readIndex int // Index in buf to start at for next read.
|
||||
writeIndex int // Index in buf to start at for next write.
|
||||
total int64 // Total bytes processed during reading in calls to Next().
|
||||
index int // Used to track partial records, should be 0 at the start of every new record.
|
||||
|
||||
// For testing, we can treat EOF as a non-error.
|
||||
eofNonErr bool
|
||||
|
||||
// We sometime see records span page boundaries. Should never happen, but it
|
||||
// does. Until we track down why, set permissive to true to tolerate it.
|
||||
// NB the non-ive Reader implementation allows for this.
|
||||
permissive bool
|
||||
|
||||
metrics *LiveReaderMetrics
|
||||
}
|
||||
|
||||
// Err returns any errors encountered reading the WAL. io.EOFs are not terminal
|
||||
// and Next can be tried again. Non-EOFs are terminal, and the reader should
|
||||
// not be used again. It is up to the user to decide when to stop trying should
|
||||
// io.EOF be returned.
|
||||
func (r *LiveReader) Err() error {
|
||||
if r.eofNonErr && r.err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return r.err
|
||||
}
|
||||
|
||||
// Offset returns the number of bytes consumed from this segment.
|
||||
func (r *LiveReader) Offset() int64 {
|
||||
return r.total
|
||||
}
|
||||
|
||||
func (r *LiveReader) fillBuffer() (int, error) {
|
||||
n, err := r.rdr.Read(r.buf[r.writeIndex:len(r.buf)])
|
||||
r.writeIndex += n
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Next returns true if Record() will contain a full record.
|
||||
// If Next returns false, you should always checked the contents of Error().
|
||||
// Return false guarantees there are no more records if the segment is closed
|
||||
// and not corrupt, otherwise if Err() == io.EOF you should try again when more
|
||||
// data has been written.
|
||||
func (r *LiveReader) Next() bool {
|
||||
for {
|
||||
// If buildRecord returns a non-EOF error, its game up - the segment is
|
||||
// corrupt. If buildRecord returns an EOF, we try and read more in
|
||||
// fillBuffer later on. If that fails to read anything (n=0 && err=EOF),
|
||||
// we return EOF and the user can try again later. If we have a full
|
||||
// page, buildRecord is guaranteed to return a record or a non-EOF; it
|
||||
// has checks the records fit in pages.
|
||||
if ok, err := r.buildRecord(); ok {
|
||||
return true
|
||||
} else if err != nil && err != io.EOF {
|
||||
r.err = err
|
||||
return false
|
||||
}
|
||||
|
||||
// If we've filled the page and not found a record, this
|
||||
// means records have started to span pages. Shouldn't happen
|
||||
// but does and until we found out why, we need to deal with this.
|
||||
if r.permissive && r.writeIndex == pageSize && r.readIndex > 0 {
|
||||
copy(r.buf[:], r.buf[r.readIndex:])
|
||||
r.writeIndex -= r.readIndex
|
||||
r.readIndex = 0
|
||||
continue
|
||||
}
|
||||
|
||||
if r.readIndex == pageSize {
|
||||
r.writeIndex = 0
|
||||
r.readIndex = 0
|
||||
}
|
||||
|
||||
if r.writeIndex != pageSize {
|
||||
n, err := r.fillBuffer()
|
||||
if n == 0 || (err != nil && err != io.EOF) {
|
||||
r.err = err
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Record returns the current record.
|
||||
// The returned byte slice is only valid until the next call to Next.
|
||||
func (r *LiveReader) Record() []byte {
|
||||
return r.rec
|
||||
}
|
||||
|
||||
// Rebuild a full record from potentially partial records. Returns false
|
||||
// if there was an error or if we weren't able to read a record for any reason.
|
||||
// Returns true if we read a full record. Any record data is appended to
|
||||
// LiveReader.rec
|
||||
func (r *LiveReader) buildRecord() (bool, error) {
|
||||
for {
|
||||
// Check that we have data in the internal buffer to read.
|
||||
if r.writeIndex <= r.readIndex {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Attempt to read a record, partial or otherwise.
|
||||
temp, n, err := r.readRecord()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
r.readIndex += n
|
||||
r.total += int64(n)
|
||||
if temp == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
rt := recTypeFromHeader(r.hdr[0])
|
||||
if rt == recFirst || rt == recFull {
|
||||
r.rec = r.rec[:0]
|
||||
r.snappyBuf = r.snappyBuf[:0]
|
||||
}
|
||||
|
||||
compressed := r.hdr[0]&snappyMask != 0
|
||||
if compressed {
|
||||
r.snappyBuf = append(r.snappyBuf, temp...)
|
||||
} else {
|
||||
r.rec = append(r.rec, temp...)
|
||||
}
|
||||
|
||||
if err := validateRecord(rt, r.index); err != nil {
|
||||
r.index = 0
|
||||
return false, err
|
||||
}
|
||||
if rt == recLast || rt == recFull {
|
||||
r.index = 0
|
||||
if compressed && len(r.snappyBuf) > 0 {
|
||||
// The snappy library uses `len` to calculate if we need a new buffer.
|
||||
// In order to allocate as few buffers as possible make the length
|
||||
// equal to the capacity.
|
||||
r.rec = r.rec[:cap(r.rec)]
|
||||
r.rec, err = snappy.Decode(r.rec, r.snappyBuf)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
// Only increment i for non-zero records since we use it
|
||||
// to determine valid content record sequences.
|
||||
r.index++
|
||||
}
|
||||
}
|
||||
|
||||
// Returns an error if the recType and i indicate an invalid record sequence.
|
||||
// As an example, if i is > 0 because we've read some amount of a partial record
|
||||
// (recFirst, recMiddle, etc. but not recLast) and then we get another recFirst or recFull
|
||||
// instead of a recLast or recMiddle we would have an invalid record.
|
||||
func validateRecord(typ recType, i int) error {
|
||||
switch typ {
|
||||
case recFull:
|
||||
if i != 0 {
|
||||
return errors.New("unexpected full record")
|
||||
}
|
||||
return nil
|
||||
case recFirst:
|
||||
if i != 0 {
|
||||
return errors.New("unexpected first record, dropping buffer")
|
||||
}
|
||||
return nil
|
||||
case recMiddle:
|
||||
if i == 0 {
|
||||
return errors.New("unexpected middle record, dropping buffer")
|
||||
}
|
||||
return nil
|
||||
case recLast:
|
||||
if i == 0 {
|
||||
return errors.New("unexpected last record, dropping buffer")
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
return errors.Errorf("unexpected record type %d", typ)
|
||||
}
|
||||
}
|
||||
|
||||
// Read a sub-record (see recType) from the buffer. It could potentially
|
||||
// be a full record (recFull) if the record fits within the bounds of a single page.
|
||||
// Returns a byte slice of the record data read, the number of bytes read, and an error
|
||||
// if there's a non-zero byte in a page term record or the record checksum fails.
|
||||
// This is a non-method function to make it clear it does not mutate the reader.
|
||||
func (r *LiveReader) readRecord() ([]byte, int, error) {
|
||||
// Special case: for recPageTerm, check that are all zeros to end of page,
|
||||
// consume them but don't return them.
|
||||
if r.buf[r.readIndex] == byte(recPageTerm) {
|
||||
// End of page won't necessarily be end of buffer, as we may have
|
||||
// got misaligned by records spanning page boundaries.
|
||||
// r.total % pageSize is the offset into the current page
|
||||
// that r.readIndex points to in buf. Therefore
|
||||
// pageSize - (r.total % pageSize) is the amount left to read of
|
||||
// the current page.
|
||||
remaining := int(pageSize - (r.total % pageSize))
|
||||
if r.readIndex+remaining > r.writeIndex {
|
||||
return nil, 0, io.EOF
|
||||
}
|
||||
|
||||
for i := r.readIndex; i < r.readIndex+remaining; i++ {
|
||||
if r.buf[i] != 0 {
|
||||
return nil, 0, errors.New("unexpected non-zero byte in page term bytes")
|
||||
}
|
||||
}
|
||||
|
||||
return nil, remaining, nil
|
||||
}
|
||||
|
||||
// Not a recPageTerm; read the record and check the checksum.
|
||||
if r.writeIndex-r.readIndex < recordHeaderSize {
|
||||
return nil, 0, io.EOF
|
||||
}
|
||||
|
||||
copy(r.hdr[:], r.buf[r.readIndex:r.readIndex+recordHeaderSize])
|
||||
length := int(binary.BigEndian.Uint16(r.hdr[1:]))
|
||||
crc := binary.BigEndian.Uint32(r.hdr[3:])
|
||||
if r.readIndex+recordHeaderSize+length > pageSize {
|
||||
if !r.permissive {
|
||||
return nil, 0, fmt.Errorf("record would overflow current page: %d > %d", r.readIndex+recordHeaderSize+length, pageSize)
|
||||
}
|
||||
r.metrics.readerCorruptionErrors.WithLabelValues("record_span_page").Inc()
|
||||
level.Warn(r.logger).Log("msg", "Record spans page boundaries", "start", r.readIndex, "end", recordHeaderSize+length, "pageSize", pageSize)
|
||||
}
|
||||
if recordHeaderSize+length > pageSize {
|
||||
return nil, 0, fmt.Errorf("record length greater than a single page: %d > %d", recordHeaderSize+length, pageSize)
|
||||
}
|
||||
if r.readIndex+recordHeaderSize+length > r.writeIndex {
|
||||
return nil, 0, io.EOF
|
||||
}
|
||||
|
||||
rec := r.buf[r.readIndex+recordHeaderSize : r.readIndex+recordHeaderSize+length]
|
||||
if c := crc32.Checksum(rec, castagnoliTable); c != crc {
|
||||
return nil, 0, errors.Errorf("unexpected checksum %x, expected %x", c, crc)
|
||||
}
|
||||
|
||||
return rec, length + recordHeaderSize, nil
|
||||
}
|
||||
|
||||
func min(i, j int) int {
|
||||
if i < j {
|
||||
return i
|
||||
}
|
||||
return j
|
||||
}
|
||||
200
vendor/github.com/prometheus/prometheus/tsdb/wal/reader.go
generated
vendored
Normal file
200
vendor/github.com/prometheus/prometheus/tsdb/wal/reader.go
generated
vendored
Normal file
@@ -0,0 +1,200 @@
|
||||
// Copyright 2019 The Prometheus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package wal
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
|
||||
"github.com/golang/snappy"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Reader reads WAL records from an io.Reader.
|
||||
type Reader struct {
|
||||
rdr io.Reader
|
||||
err error
|
||||
rec []byte
|
||||
snappyBuf []byte
|
||||
buf [pageSize]byte
|
||||
total int64 // Total bytes processed.
|
||||
curRecTyp recType // Used for checking that the last record is not torn.
|
||||
}
|
||||
|
||||
// NewReader returns a new reader.
|
||||
func NewReader(r io.Reader) *Reader {
|
||||
return &Reader{rdr: r}
|
||||
}
|
||||
|
||||
// Next advances the reader to the next records and returns true if it exists.
|
||||
// It must not be called again after it returned false.
|
||||
func (r *Reader) Next() bool {
|
||||
err := r.next()
|
||||
if errors.Cause(err) == io.EOF {
|
||||
// The last WAL segment record shouldn't be torn(should be full or last).
|
||||
// The last record would be torn after a crash just before
|
||||
// the last record part could be persisted to disk.
|
||||
if r.curRecTyp == recFirst || r.curRecTyp == recMiddle {
|
||||
r.err = errors.New("last record is torn")
|
||||
}
|
||||
return false
|
||||
}
|
||||
r.err = err
|
||||
return r.err == nil
|
||||
}
|
||||
|
||||
func (r *Reader) next() (err error) {
|
||||
// We have to use r.buf since allocating byte arrays here fails escape
|
||||
// analysis and ends up on the heap, even though it seemingly should not.
|
||||
hdr := r.buf[:recordHeaderSize]
|
||||
buf := r.buf[recordHeaderSize:]
|
||||
|
||||
r.rec = r.rec[:0]
|
||||
r.snappyBuf = r.snappyBuf[:0]
|
||||
|
||||
i := 0
|
||||
for {
|
||||
if _, err = io.ReadFull(r.rdr, hdr[:1]); err != nil {
|
||||
return errors.Wrap(err, "read first header byte")
|
||||
}
|
||||
r.total++
|
||||
r.curRecTyp = recTypeFromHeader(hdr[0])
|
||||
compressed := hdr[0]&snappyMask != 0
|
||||
|
||||
// Gobble up zero bytes.
|
||||
if r.curRecTyp == recPageTerm {
|
||||
// recPageTerm is a single byte that indicates the rest of the page is padded.
|
||||
// If it's the first byte in a page, buf is too small and
|
||||
// needs to be resized to fit pageSize-1 bytes.
|
||||
buf = r.buf[1:]
|
||||
|
||||
// We are pedantic and check whether the zeros are actually up
|
||||
// to a page boundary.
|
||||
// It's not strictly necessary but may catch sketchy state early.
|
||||
k := pageSize - (r.total % pageSize)
|
||||
if k == pageSize {
|
||||
continue // Initial 0 byte was last page byte.
|
||||
}
|
||||
n, err := io.ReadFull(r.rdr, buf[:k])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "read remaining zeros")
|
||||
}
|
||||
r.total += int64(n)
|
||||
|
||||
for _, c := range buf[:k] {
|
||||
if c != 0 {
|
||||
return errors.New("unexpected non-zero byte in padded page")
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
n, err := io.ReadFull(r.rdr, hdr[1:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "read remaining header")
|
||||
}
|
||||
r.total += int64(n)
|
||||
|
||||
var (
|
||||
length = binary.BigEndian.Uint16(hdr[1:])
|
||||
crc = binary.BigEndian.Uint32(hdr[3:])
|
||||
)
|
||||
|
||||
if length > pageSize-recordHeaderSize {
|
||||
return errors.Errorf("invalid record size %d", length)
|
||||
}
|
||||
n, err = io.ReadFull(r.rdr, buf[:length])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.total += int64(n)
|
||||
|
||||
if n != int(length) {
|
||||
return errors.Errorf("invalid size: expected %d, got %d", length, n)
|
||||
}
|
||||
if c := crc32.Checksum(buf[:length], castagnoliTable); c != crc {
|
||||
return errors.Errorf("unexpected checksum %x, expected %x", c, crc)
|
||||
}
|
||||
|
||||
if compressed {
|
||||
r.snappyBuf = append(r.snappyBuf, buf[:length]...)
|
||||
} else {
|
||||
r.rec = append(r.rec, buf[:length]...)
|
||||
}
|
||||
|
||||
if err := validateRecord(r.curRecTyp, i); err != nil {
|
||||
return err
|
||||
}
|
||||
if r.curRecTyp == recLast || r.curRecTyp == recFull {
|
||||
if compressed && len(r.snappyBuf) > 0 {
|
||||
// The snappy library uses `len` to calculate if we need a new buffer.
|
||||
// In order to allocate as few buffers as possible make the length
|
||||
// equal to the capacity.
|
||||
r.rec = r.rec[:cap(r.rec)]
|
||||
r.rec, err = snappy.Decode(r.rec, r.snappyBuf)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Only increment i for non-zero records since we use it
|
||||
// to determine valid content record sequences.
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
// Err returns the last encountered error wrapped in a corruption error.
|
||||
// If the reader does not allow to infer a segment index and offset, a total
|
||||
// offset in the reader stream will be provided.
|
||||
func (r *Reader) Err() error {
|
||||
if r.err == nil {
|
||||
return nil
|
||||
}
|
||||
if b, ok := r.rdr.(*segmentBufReader); ok {
|
||||
return &CorruptionErr{
|
||||
Err: r.err,
|
||||
Dir: b.segs[b.cur].Dir(),
|
||||
Segment: b.segs[b.cur].Index(),
|
||||
Offset: int64(b.off),
|
||||
}
|
||||
}
|
||||
return &CorruptionErr{
|
||||
Err: r.err,
|
||||
Segment: -1,
|
||||
Offset: r.total,
|
||||
}
|
||||
}
|
||||
|
||||
// Record returns the current record. The returned byte slice is only
|
||||
// valid until the next call to Next.
|
||||
func (r *Reader) Record() []byte {
|
||||
return r.rec
|
||||
}
|
||||
|
||||
// Segment returns the current segment being read.
|
||||
func (r *Reader) Segment() int {
|
||||
if b, ok := r.rdr.(*segmentBufReader); ok {
|
||||
return b.segs[b.cur].Index()
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// Offset returns the current position of the segment being read.
|
||||
func (r *Reader) Offset() int64 {
|
||||
if b, ok := r.rdr.(*segmentBufReader); ok {
|
||||
return int64(b.off)
|
||||
}
|
||||
return r.total
|
||||
}
|
||||
899
vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go
generated
vendored
Normal file
899
vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go
generated
vendored
Normal file
@@ -0,0 +1,899 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package wal
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/log/level"
|
||||
"github.com/golang/snappy"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/prometheus/tsdb/fileutil"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultSegmentSize = 128 * 1024 * 1024 // 128 MB
|
||||
pageSize = 32 * 1024 // 32KB
|
||||
recordHeaderSize = 7
|
||||
)
|
||||
|
||||
// The table gets initialized with sync.Once but may still cause a race
|
||||
// with any other use of the crc32 package anywhere. Thus we initialize it
|
||||
// before.
|
||||
var castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
|
||||
|
||||
// page is an in memory buffer used to batch disk writes.
|
||||
// Records bigger than the page size are split and flushed separately.
|
||||
// A flush is triggered when a single records doesn't fit the page size or
|
||||
// when the next record can't fit in the remaining free page space.
|
||||
type page struct {
|
||||
alloc int
|
||||
flushed int
|
||||
buf [pageSize]byte
|
||||
}
|
||||
|
||||
func (p *page) remaining() int {
|
||||
return pageSize - p.alloc
|
||||
}
|
||||
|
||||
func (p *page) full() bool {
|
||||
return pageSize-p.alloc < recordHeaderSize
|
||||
}
|
||||
|
||||
func (p *page) reset() {
|
||||
for i := range p.buf {
|
||||
p.buf[i] = 0
|
||||
}
|
||||
p.alloc = 0
|
||||
p.flushed = 0
|
||||
}
|
||||
|
||||
// Segment represents a segment file.
|
||||
type Segment struct {
|
||||
*os.File
|
||||
dir string
|
||||
i int
|
||||
}
|
||||
|
||||
// Index returns the index of the segment.
|
||||
func (s *Segment) Index() int {
|
||||
return s.i
|
||||
}
|
||||
|
||||
// Dir returns the directory of the segment.
|
||||
func (s *Segment) Dir() string {
|
||||
return s.dir
|
||||
}
|
||||
|
||||
// CorruptionErr is an error that's returned when corruption is encountered.
|
||||
type CorruptionErr struct {
|
||||
Dir string
|
||||
Segment int
|
||||
Offset int64
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *CorruptionErr) Error() string {
|
||||
if e.Segment < 0 {
|
||||
return fmt.Sprintf("corruption after %d bytes: %s", e.Offset, e.Err)
|
||||
}
|
||||
return fmt.Sprintf("corruption in segment %s at %d: %s", SegmentName(e.Dir, e.Segment), e.Offset, e.Err)
|
||||
}
|
||||
|
||||
// OpenWriteSegment opens segment k in dir. The returned segment is ready for new appends.
|
||||
func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) {
|
||||
segName := SegmentName(dir, k)
|
||||
f, err := os.OpenFile(segName, os.O_WRONLY|os.O_APPEND, 0666)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stat, err := f.Stat()
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
}
|
||||
// If the last page is torn, fill it with zeros.
|
||||
// In case it was torn after all records were written successfully, this
|
||||
// will just pad the page and everything will be fine.
|
||||
// If it was torn mid-record, a full read (which the caller should do anyway
|
||||
// to ensure integrity) will detect it as a corruption by the end.
|
||||
if d := stat.Size() % pageSize; d != 0 {
|
||||
level.Warn(logger).Log("msg", "Last page of the wal is torn, filling it with zeros", "segment", segName)
|
||||
if _, err := f.Write(make([]byte, pageSize-d)); err != nil {
|
||||
f.Close()
|
||||
return nil, errors.Wrap(err, "zero-pad torn page")
|
||||
}
|
||||
}
|
||||
return &Segment{File: f, i: k, dir: dir}, nil
|
||||
}
|
||||
|
||||
// CreateSegment creates a new segment k in dir.
|
||||
func CreateSegment(dir string, k int) (*Segment, error) {
|
||||
f, err := os.OpenFile(SegmentName(dir, k), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Segment{File: f, i: k, dir: dir}, nil
|
||||
}
|
||||
|
||||
// OpenReadSegment opens the segment with the given filename.
|
||||
func OpenReadSegment(fn string) (*Segment, error) {
|
||||
k, err := strconv.Atoi(filepath.Base(fn))
|
||||
if err != nil {
|
||||
return nil, errors.New("not a valid filename")
|
||||
}
|
||||
f, err := os.Open(fn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Segment{File: f, i: k, dir: filepath.Dir(fn)}, nil
|
||||
}
|
||||
|
||||
// WAL is a write ahead log that stores records in segment files.
|
||||
// It must be read from start to end once before logging new data.
|
||||
// If an error occurs during read, the repair procedure must be called
|
||||
// before it's safe to do further writes.
|
||||
//
|
||||
// Segments are written to in pages of 32KB, with records possibly split
|
||||
// across page boundaries.
|
||||
// Records are never split across segments to allow full segments to be
|
||||
// safely truncated. It also ensures that torn writes never corrupt records
|
||||
// beyond the most recent segment.
|
||||
type WAL struct {
|
||||
dir string
|
||||
logger log.Logger
|
||||
segmentSize int
|
||||
mtx sync.RWMutex
|
||||
segment *Segment // Active segment.
|
||||
donePages int // Pages written to the segment.
|
||||
page *page // Active page.
|
||||
stopc chan chan struct{}
|
||||
actorc chan func()
|
||||
closed bool // To allow calling Close() more than once without blocking.
|
||||
compress bool
|
||||
snappyBuf []byte
|
||||
|
||||
metrics *walMetrics
|
||||
}
|
||||
|
||||
type walMetrics struct {
|
||||
fsyncDuration prometheus.Summary
|
||||
pageFlushes prometheus.Counter
|
||||
pageCompletions prometheus.Counter
|
||||
truncateFail prometheus.Counter
|
||||
truncateTotal prometheus.Counter
|
||||
currentSegment prometheus.Gauge
|
||||
writesFailed prometheus.Counter
|
||||
}
|
||||
|
||||
func newWALMetrics(r prometheus.Registerer) *walMetrics {
|
||||
m := &walMetrics{}
|
||||
|
||||
m.fsyncDuration = prometheus.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "prometheus_tsdb_wal_fsync_duration_seconds",
|
||||
Help: "Duration of WAL fsync.",
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||
})
|
||||
m.pageFlushes = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "prometheus_tsdb_wal_page_flushes_total",
|
||||
Help: "Total number of page flushes.",
|
||||
})
|
||||
m.pageCompletions = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "prometheus_tsdb_wal_completed_pages_total",
|
||||
Help: "Total number of completed pages.",
|
||||
})
|
||||
m.truncateFail = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "prometheus_tsdb_wal_truncations_failed_total",
|
||||
Help: "Total number of WAL truncations that failed.",
|
||||
})
|
||||
m.truncateTotal = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "prometheus_tsdb_wal_truncations_total",
|
||||
Help: "Total number of WAL truncations attempted.",
|
||||
})
|
||||
m.currentSegment = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "prometheus_tsdb_wal_segment_current",
|
||||
Help: "WAL segment index that TSDB is currently writing to.",
|
||||
})
|
||||
m.writesFailed = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "prometheus_tsdb_wal_writes_failed_total",
|
||||
Help: "Total number of WAL writes that failed.",
|
||||
})
|
||||
|
||||
if r != nil {
|
||||
r.MustRegister(
|
||||
m.fsyncDuration,
|
||||
m.pageFlushes,
|
||||
m.pageCompletions,
|
||||
m.truncateFail,
|
||||
m.truncateTotal,
|
||||
m.currentSegment,
|
||||
m.writesFailed,
|
||||
)
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// New returns a new WAL over the given directory.
|
||||
func New(logger log.Logger, reg prometheus.Registerer, dir string, compress bool) (*WAL, error) {
|
||||
return NewSize(logger, reg, dir, DefaultSegmentSize, compress)
|
||||
}
|
||||
|
||||
// NewSize returns a new WAL over the given directory.
|
||||
// New segments are created with the specified size.
|
||||
func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSize int, compress bool) (*WAL, error) {
|
||||
if segmentSize%pageSize != 0 {
|
||||
return nil, errors.New("invalid segment size")
|
||||
}
|
||||
if err := os.MkdirAll(dir, 0777); err != nil {
|
||||
return nil, errors.Wrap(err, "create dir")
|
||||
}
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
w := &WAL{
|
||||
dir: dir,
|
||||
logger: logger,
|
||||
segmentSize: segmentSize,
|
||||
page: &page{},
|
||||
actorc: make(chan func(), 100),
|
||||
stopc: make(chan chan struct{}),
|
||||
compress: compress,
|
||||
}
|
||||
w.metrics = newWALMetrics(reg)
|
||||
|
||||
_, last, err := w.Segments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get segment range")
|
||||
}
|
||||
|
||||
// Index of the Segment we want to open and write to.
|
||||
writeSegmentIndex := 0
|
||||
// If some segments already exist create one with a higher index than the last segment.
|
||||
if last != -1 {
|
||||
writeSegmentIndex = last + 1
|
||||
}
|
||||
|
||||
segment, err := CreateSegment(w.dir, writeSegmentIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := w.setSegment(segment); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go w.run()
|
||||
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Open an existing WAL.
|
||||
func Open(logger log.Logger, dir string) (*WAL, error) {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
w := &WAL{
|
||||
dir: dir,
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// CompressionEnabled returns if compression is enabled on this WAL.
|
||||
func (w *WAL) CompressionEnabled() bool {
|
||||
return w.compress
|
||||
}
|
||||
|
||||
// Dir returns the directory of the WAL.
|
||||
func (w *WAL) Dir() string {
|
||||
return w.dir
|
||||
}
|
||||
|
||||
func (w *WAL) run() {
|
||||
Loop:
|
||||
for {
|
||||
select {
|
||||
case f := <-w.actorc:
|
||||
f()
|
||||
case donec := <-w.stopc:
|
||||
close(w.actorc)
|
||||
defer close(donec)
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
// Drain and process any remaining functions.
|
||||
for f := range w.actorc {
|
||||
f()
|
||||
}
|
||||
}
|
||||
|
||||
// Repair attempts to repair the WAL based on the error.
|
||||
// It discards all data after the corruption.
|
||||
func (w *WAL) Repair(origErr error) error {
|
||||
// We could probably have a mode that only discards torn records right around
|
||||
// the corruption to preserve as data much as possible.
|
||||
// But that's not generally applicable if the records have any kind of causality.
|
||||
// Maybe as an extra mode in the future if mid-WAL corruptions become
|
||||
// a frequent concern.
|
||||
err := errors.Cause(origErr) // So that we can pick up errors even if wrapped.
|
||||
|
||||
cerr, ok := err.(*CorruptionErr)
|
||||
if !ok {
|
||||
return errors.Wrap(origErr, "cannot handle error")
|
||||
}
|
||||
if cerr.Segment < 0 {
|
||||
return errors.New("corruption error does not specify position")
|
||||
}
|
||||
level.Warn(w.logger).Log("msg", "Starting corruption repair",
|
||||
"segment", cerr.Segment, "offset", cerr.Offset)
|
||||
|
||||
// All segments behind the corruption can no longer be used.
|
||||
segs, err := listSegments(w.dir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "list segments")
|
||||
}
|
||||
level.Warn(w.logger).Log("msg", "Deleting all segments newer than corrupted segment", "segment", cerr.Segment)
|
||||
|
||||
for _, s := range segs {
|
||||
if w.segment.i == s.index {
|
||||
// The active segment needs to be removed,
|
||||
// close it first (Windows!). Can be closed safely
|
||||
// as we set the current segment to repaired file
|
||||
// below.
|
||||
if err := w.segment.Close(); err != nil {
|
||||
return errors.Wrap(err, "close active segment")
|
||||
}
|
||||
}
|
||||
if s.index <= cerr.Segment {
|
||||
continue
|
||||
}
|
||||
if err := os.Remove(filepath.Join(w.dir, s.name)); err != nil {
|
||||
return errors.Wrapf(err, "delete segment:%v", s.index)
|
||||
}
|
||||
}
|
||||
// Regardless of the corruption offset, no record reaches into the previous segment.
|
||||
// So we can safely repair the WAL by removing the segment and re-inserting all
|
||||
// its records up to the corruption.
|
||||
level.Warn(w.logger).Log("msg", "Rewrite corrupted segment", "segment", cerr.Segment)
|
||||
|
||||
fn := SegmentName(w.dir, cerr.Segment)
|
||||
tmpfn := fn + ".repair"
|
||||
|
||||
if err := fileutil.Rename(fn, tmpfn); err != nil {
|
||||
return err
|
||||
}
|
||||
// Create a clean segment and make it the active one.
|
||||
s, err := CreateSegment(w.dir, cerr.Segment)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.setSegment(s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := os.Open(tmpfn)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "open segment")
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
r := NewReader(bufio.NewReader(f))
|
||||
|
||||
for r.Next() {
|
||||
// Add records only up to the where the error was.
|
||||
if r.Offset() >= cerr.Offset {
|
||||
break
|
||||
}
|
||||
if err := w.Log(r.Record()); err != nil {
|
||||
return errors.Wrap(err, "insert record")
|
||||
}
|
||||
}
|
||||
// We expect an error here from r.Err(), so nothing to handle.
|
||||
|
||||
// We need to pad to the end of the last page in the repaired segment
|
||||
if err := w.flushPage(true); err != nil {
|
||||
return errors.Wrap(err, "flush page in repair")
|
||||
}
|
||||
|
||||
// We explicitly close even when there is a defer for Windows to be
|
||||
// able to delete it. The defer is in place to close it in-case there
|
||||
// are errors above.
|
||||
if err := f.Close(); err != nil {
|
||||
return errors.Wrap(err, "close corrupted file")
|
||||
}
|
||||
if err := os.Remove(tmpfn); err != nil {
|
||||
return errors.Wrap(err, "delete corrupted segment")
|
||||
}
|
||||
|
||||
// Explicitly close the segment we just repaired to avoid issues with Windows.
|
||||
s.Close()
|
||||
|
||||
// We always want to start writing to a new Segment rather than an existing
|
||||
// Segment, which is handled by NewSize, but earlier in Repair we're deleting
|
||||
// all segments that come after the corrupted Segment. Recreate a new Segment here.
|
||||
s, err = CreateSegment(w.dir, cerr.Segment+1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.setSegment(s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SegmentName builds a segment name for the directory.
|
||||
func SegmentName(dir string, i int) string {
|
||||
return filepath.Join(dir, fmt.Sprintf("%08d", i))
|
||||
}
|
||||
|
||||
// NextSegment creates the next segment and closes the previous one.
|
||||
func (w *WAL) NextSegment() error {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
return w.nextSegment()
|
||||
}
|
||||
|
||||
// nextSegment creates the next segment and closes the previous one.
|
||||
func (w *WAL) nextSegment() error {
|
||||
// Only flush the current page if it actually holds data.
|
||||
if w.page.alloc > 0 {
|
||||
if err := w.flushPage(true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
next, err := CreateSegment(w.dir, w.segment.Index()+1)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "create new segment file")
|
||||
}
|
||||
prev := w.segment
|
||||
if err := w.setSegment(next); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Don't block further writes by fsyncing the last segment.
|
||||
w.actorc <- func() {
|
||||
if err := w.fsync(prev); err != nil {
|
||||
level.Error(w.logger).Log("msg", "sync previous segment", "err", err)
|
||||
}
|
||||
if err := prev.Close(); err != nil {
|
||||
level.Error(w.logger).Log("msg", "close previous segment", "err", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *WAL) setSegment(segment *Segment) error {
|
||||
w.segment = segment
|
||||
|
||||
// Correctly initialize donePages.
|
||||
stat, err := segment.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.donePages = int(stat.Size() / pageSize)
|
||||
w.metrics.currentSegment.Set(float64(segment.Index()))
|
||||
return nil
|
||||
}
|
||||
|
||||
// flushPage writes the new contents of the page to disk. If no more records will fit into
|
||||
// the page, the remaining bytes will be set to zero and a new page will be started.
|
||||
// If clear is true, this is enforced regardless of how many bytes are left in the page.
|
||||
func (w *WAL) flushPage(clear bool) error {
|
||||
w.metrics.pageFlushes.Inc()
|
||||
|
||||
p := w.page
|
||||
clear = clear || p.full()
|
||||
|
||||
// No more data will fit into the page or an implicit clear.
|
||||
// Enqueue and clear it.
|
||||
if clear {
|
||||
p.alloc = pageSize // Write till end of page.
|
||||
}
|
||||
n, err := w.segment.Write(p.buf[p.flushed:p.alloc])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.flushed += n
|
||||
|
||||
// We flushed an entire page, prepare a new one.
|
||||
if clear {
|
||||
p.reset()
|
||||
w.donePages++
|
||||
w.metrics.pageCompletions.Inc()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// First Byte of header format:
|
||||
// [ 4 bits unallocated] [1 bit snappy compression flag] [ 3 bit record type ]
|
||||
const (
|
||||
snappyMask = 1 << 3
|
||||
recTypeMask = snappyMask - 1
|
||||
)
|
||||
|
||||
type recType uint8
|
||||
|
||||
const (
|
||||
recPageTerm recType = 0 // Rest of page is empty.
|
||||
recFull recType = 1 // Full record.
|
||||
recFirst recType = 2 // First fragment of a record.
|
||||
recMiddle recType = 3 // Middle fragments of a record.
|
||||
recLast recType = 4 // Final fragment of a record.
|
||||
)
|
||||
|
||||
func recTypeFromHeader(header byte) recType {
|
||||
return recType(header & recTypeMask)
|
||||
}
|
||||
|
||||
func (t recType) String() string {
|
||||
switch t {
|
||||
case recPageTerm:
|
||||
return "zero"
|
||||
case recFull:
|
||||
return "full"
|
||||
case recFirst:
|
||||
return "first"
|
||||
case recMiddle:
|
||||
return "middle"
|
||||
case recLast:
|
||||
return "last"
|
||||
default:
|
||||
return "<invalid>"
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WAL) pagesPerSegment() int {
|
||||
return w.segmentSize / pageSize
|
||||
}
|
||||
|
||||
// Log writes the records into the log.
|
||||
// Multiple records can be passed at once to reduce writes and increase throughput.
|
||||
func (w *WAL) Log(recs ...[]byte) error {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
// Callers could just implement their own list record format but adding
|
||||
// a bit of extra logic here frees them from that overhead.
|
||||
for i, r := range recs {
|
||||
if err := w.log(r, i == len(recs)-1); err != nil {
|
||||
w.metrics.writesFailed.Inc()
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// log writes rec to the log and forces a flush of the current page if:
|
||||
// - the final record of a batch
|
||||
// - the record is bigger than the page size
|
||||
// - the current page is full.
|
||||
func (w *WAL) log(rec []byte, final bool) error {
|
||||
// When the last page flush failed the page will remain full.
|
||||
// When the page is full, need to flush it before trying to add more records to it.
|
||||
if w.page.full() {
|
||||
if err := w.flushPage(true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// If the record is too big to fit within the active page in the current
|
||||
// segment, terminate the active segment and advance to the next one.
|
||||
// This ensures that records do not cross segment boundaries.
|
||||
left := w.page.remaining() - recordHeaderSize // Free space in the active page.
|
||||
left += (pageSize - recordHeaderSize) * (w.pagesPerSegment() - w.donePages - 1) // Free pages in the active segment.
|
||||
|
||||
if len(rec) > left {
|
||||
if err := w.nextSegment(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
compressed := false
|
||||
if w.compress && len(rec) > 0 {
|
||||
// The snappy library uses `len` to calculate if we need a new buffer.
|
||||
// In order to allocate as few buffers as possible make the length
|
||||
// equal to the capacity.
|
||||
w.snappyBuf = w.snappyBuf[:cap(w.snappyBuf)]
|
||||
w.snappyBuf = snappy.Encode(w.snappyBuf, rec)
|
||||
if len(w.snappyBuf) < len(rec) {
|
||||
rec = w.snappyBuf
|
||||
compressed = true
|
||||
}
|
||||
}
|
||||
|
||||
// Populate as many pages as necessary to fit the record.
|
||||
// Be careful to always do one pass to ensure we write zero-length records.
|
||||
for i := 0; i == 0 || len(rec) > 0; i++ {
|
||||
p := w.page
|
||||
|
||||
// Find how much of the record we can fit into the page.
|
||||
var (
|
||||
l = min(len(rec), (pageSize-p.alloc)-recordHeaderSize)
|
||||
part = rec[:l]
|
||||
buf = p.buf[p.alloc:]
|
||||
typ recType
|
||||
)
|
||||
|
||||
switch {
|
||||
case i == 0 && len(part) == len(rec):
|
||||
typ = recFull
|
||||
case len(part) == len(rec):
|
||||
typ = recLast
|
||||
case i == 0:
|
||||
typ = recFirst
|
||||
default:
|
||||
typ = recMiddle
|
||||
}
|
||||
if compressed {
|
||||
typ |= snappyMask
|
||||
}
|
||||
|
||||
buf[0] = byte(typ)
|
||||
crc := crc32.Checksum(part, castagnoliTable)
|
||||
binary.BigEndian.PutUint16(buf[1:], uint16(len(part)))
|
||||
binary.BigEndian.PutUint32(buf[3:], crc)
|
||||
|
||||
copy(buf[recordHeaderSize:], part)
|
||||
p.alloc += len(part) + recordHeaderSize
|
||||
|
||||
if w.page.full() {
|
||||
if err := w.flushPage(true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
rec = rec[l:]
|
||||
}
|
||||
|
||||
// If it's the final record of the batch and the page is not empty, flush it.
|
||||
if final && w.page.alloc > 0 {
|
||||
if err := w.flushPage(false); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Segments returns the range [first, n] of currently existing segments.
|
||||
// If no segments are found, first and n are -1.
|
||||
func (w *WAL) Segments() (first, last int, err error) {
|
||||
refs, err := listSegments(w.dir)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
if len(refs) == 0 {
|
||||
return -1, -1, nil
|
||||
}
|
||||
return refs[0].index, refs[len(refs)-1].index, nil
|
||||
}
|
||||
|
||||
// Truncate drops all segments before i.
|
||||
func (w *WAL) Truncate(i int) (err error) {
|
||||
w.metrics.truncateTotal.Inc()
|
||||
defer func() {
|
||||
if err != nil {
|
||||
w.metrics.truncateFail.Inc()
|
||||
}
|
||||
}()
|
||||
refs, err := listSegments(w.dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, r := range refs {
|
||||
if r.index >= i {
|
||||
break
|
||||
}
|
||||
if err = os.Remove(filepath.Join(w.dir, r.name)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *WAL) fsync(f *Segment) error {
|
||||
start := time.Now()
|
||||
err := f.File.Sync()
|
||||
w.metrics.fsyncDuration.Observe(time.Since(start).Seconds())
|
||||
return err
|
||||
}
|
||||
|
||||
// Close flushes all writes and closes active segment.
|
||||
func (w *WAL) Close() (err error) {
|
||||
w.mtx.Lock()
|
||||
defer w.mtx.Unlock()
|
||||
|
||||
if w.closed {
|
||||
return errors.New("wal already closed")
|
||||
}
|
||||
|
||||
if w.segment == nil {
|
||||
w.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flush the last page and zero out all its remaining size.
|
||||
// We must not flush an empty page as it would falsely signal
|
||||
// the segment is done if we start writing to it again after opening.
|
||||
if w.page.alloc > 0 {
|
||||
if err := w.flushPage(true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
donec := make(chan struct{})
|
||||
w.stopc <- donec
|
||||
<-donec
|
||||
|
||||
if err = w.fsync(w.segment); err != nil {
|
||||
level.Error(w.logger).Log("msg", "sync previous segment", "err", err)
|
||||
}
|
||||
if err := w.segment.Close(); err != nil {
|
||||
level.Error(w.logger).Log("msg", "close previous segment", "err", err)
|
||||
}
|
||||
w.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
type segmentRef struct {
|
||||
name string
|
||||
index int
|
||||
}
|
||||
|
||||
func listSegments(dir string) (refs []segmentRef, err error) {
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, f := range files {
|
||||
fn := f.Name()
|
||||
k, err := strconv.Atoi(fn)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
refs = append(refs, segmentRef{name: fn, index: k})
|
||||
}
|
||||
sort.Slice(refs, func(i, j int) bool {
|
||||
return refs[i].index < refs[j].index
|
||||
})
|
||||
for i := 0; i < len(refs)-1; i++ {
|
||||
if refs[i].index+1 != refs[i+1].index {
|
||||
return nil, errors.New("segments are not sequential")
|
||||
}
|
||||
}
|
||||
return refs, nil
|
||||
}
|
||||
|
||||
// SegmentRange groups segments by the directory and the first and last index it includes.
|
||||
type SegmentRange struct {
|
||||
Dir string
|
||||
First, Last int
|
||||
}
|
||||
|
||||
// NewSegmentsReader returns a new reader over all segments in the directory.
|
||||
func NewSegmentsReader(dir string) (io.ReadCloser, error) {
|
||||
return NewSegmentsRangeReader(SegmentRange{dir, -1, -1})
|
||||
}
|
||||
|
||||
// NewSegmentsRangeReader returns a new reader over the given WAL segment ranges.
|
||||
// If first or last are -1, the range is open on the respective end.
|
||||
func NewSegmentsRangeReader(sr ...SegmentRange) (io.ReadCloser, error) {
|
||||
var segs []*Segment
|
||||
|
||||
for _, sgmRange := range sr {
|
||||
refs, err := listSegments(sgmRange.Dir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "list segment in dir:%v", sgmRange.Dir)
|
||||
}
|
||||
|
||||
for _, r := range refs {
|
||||
if sgmRange.First >= 0 && r.index < sgmRange.First {
|
||||
continue
|
||||
}
|
||||
if sgmRange.Last >= 0 && r.index > sgmRange.Last {
|
||||
break
|
||||
}
|
||||
s, err := OpenReadSegment(filepath.Join(sgmRange.Dir, r.name))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "open segment:%v in dir:%v", r.name, sgmRange.Dir)
|
||||
}
|
||||
segs = append(segs, s)
|
||||
}
|
||||
}
|
||||
return NewSegmentBufReader(segs...), nil
|
||||
}
|
||||
|
||||
// segmentBufReader is a buffered reader that reads in multiples of pages.
|
||||
// The main purpose is that we are able to track segment and offset for
|
||||
// corruption reporting. We have to be careful not to increment curr too
|
||||
// early, as it is used by Reader.Err() to tell Repair which segment is corrupt.
|
||||
// As such we pad the end of non-page align segments with zeros.
|
||||
type segmentBufReader struct {
|
||||
buf *bufio.Reader
|
||||
segs []*Segment
|
||||
cur int // Index into segs.
|
||||
off int // Offset of read data into current segment.
|
||||
}
|
||||
|
||||
// nolint:golint // TODO: Consider exporting segmentBufReader
|
||||
func NewSegmentBufReader(segs ...*Segment) *segmentBufReader {
|
||||
return &segmentBufReader{
|
||||
buf: bufio.NewReaderSize(segs[0], 16*pageSize),
|
||||
segs: segs,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *segmentBufReader) Close() (err error) {
|
||||
for _, s := range r.segs {
|
||||
if e := s.Close(); e != nil {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Read implements io.Reader.
|
||||
func (r *segmentBufReader) Read(b []byte) (n int, err error) {
|
||||
n, err = r.buf.Read(b)
|
||||
r.off += n
|
||||
|
||||
// If we succeeded, or hit a non-EOF, we can stop.
|
||||
if err == nil || err != io.EOF {
|
||||
return n, err
|
||||
}
|
||||
|
||||
// We hit EOF; fake out zero padding at the end of short segments, so we
|
||||
// don't increment curr too early and report the wrong segment as corrupt.
|
||||
if r.off%pageSize != 0 {
|
||||
i := 0
|
||||
for ; n+i < len(b) && (r.off+i)%pageSize != 0; i++ {
|
||||
b[n+i] = 0
|
||||
}
|
||||
|
||||
// Return early, even if we didn't fill b.
|
||||
r.off += i
|
||||
return n + i, nil
|
||||
}
|
||||
|
||||
// There is no more deta left in the curr segment and there are no more
|
||||
// segments left. Return EOF.
|
||||
if r.cur+1 >= len(r.segs) {
|
||||
return n, io.EOF
|
||||
}
|
||||
|
||||
// Move to next segment.
|
||||
r.cur++
|
||||
r.off = 0
|
||||
r.buf.Reset(r.segs[r.cur])
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Computing size of the WAL.
|
||||
// We do this by adding the sizes of all the files under the WAL dir.
|
||||
func (w *WAL) Size() (int64, error) {
|
||||
return fileutil.DirSize(w.Dir())
|
||||
}
|
||||
614
vendor/github.com/prometheus/prometheus/tsdb/wal/watcher.go
generated
vendored
Normal file
614
vendor/github.com/prometheus/prometheus/tsdb/wal/watcher.go
generated
vendored
Normal file
@@ -0,0 +1,614 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package wal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/log/level"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/prometheus/pkg/timestamp"
|
||||
"github.com/prometheus/prometheus/tsdb/record"
|
||||
)
|
||||
|
||||
const (
|
||||
readPeriod = 10 * time.Millisecond
|
||||
checkpointPeriod = 5 * time.Second
|
||||
segmentCheckPeriod = 100 * time.Millisecond
|
||||
consumer = "consumer"
|
||||
)
|
||||
|
||||
// WriteTo is an interface used by the Watcher to send the samples it's read
|
||||
// from the WAL on to somewhere else. Functions will be called concurrently
|
||||
// and it is left to the implementer to make sure they are safe.
|
||||
type WriteTo interface {
|
||||
Append([]record.RefSample) bool
|
||||
StoreSeries([]record.RefSeries, int)
|
||||
// SeriesReset is called after reading a checkpoint to allow the deletion
|
||||
// of all series created in a segment lower than the argument.
|
||||
SeriesReset(int)
|
||||
}
|
||||
|
||||
type WatcherMetrics struct {
|
||||
recordsRead *prometheus.CounterVec
|
||||
recordDecodeFails *prometheus.CounterVec
|
||||
samplesSentPreTailing *prometheus.CounterVec
|
||||
currentSegment *prometheus.GaugeVec
|
||||
}
|
||||
|
||||
// Watcher watches the TSDB WAL for a given WriteTo.
|
||||
type Watcher struct {
|
||||
name string
|
||||
writer WriteTo
|
||||
logger log.Logger
|
||||
walDir string
|
||||
lastCheckpoint string
|
||||
metrics *WatcherMetrics
|
||||
readerMetrics *LiveReaderMetrics
|
||||
|
||||
startTime time.Time
|
||||
startTimestamp int64 // the start time as a Prometheus timestamp
|
||||
sendSamples bool
|
||||
|
||||
recordsReadMetric *prometheus.CounterVec
|
||||
recordDecodeFailsMetric prometheus.Counter
|
||||
samplesSentPreTailing prometheus.Counter
|
||||
currentSegmentMetric prometheus.Gauge
|
||||
|
||||
quit chan struct{}
|
||||
done chan struct{}
|
||||
|
||||
// For testing, stop when we hit this segment.
|
||||
MaxSegment int
|
||||
}
|
||||
|
||||
func NewWatcherMetrics(reg prometheus.Registerer) *WatcherMetrics {
|
||||
m := &WatcherMetrics{
|
||||
recordsRead: prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "prometheus",
|
||||
Subsystem: "wal_watcher",
|
||||
Name: "records_read_total",
|
||||
Help: "Number of records read by the WAL watcher from the WAL.",
|
||||
},
|
||||
[]string{consumer, "type"},
|
||||
),
|
||||
recordDecodeFails: prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "prometheus",
|
||||
Subsystem: "wal_watcher",
|
||||
Name: "record_decode_failures_total",
|
||||
Help: "Number of records read by the WAL watcher that resulted in an error when decoding.",
|
||||
},
|
||||
[]string{consumer},
|
||||
),
|
||||
samplesSentPreTailing: prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "prometheus",
|
||||
Subsystem: "wal_watcher",
|
||||
Name: "samples_sent_pre_tailing_total",
|
||||
Help: "Number of sample records read by the WAL watcher and sent to remote write during replay of existing WAL.",
|
||||
},
|
||||
[]string{consumer},
|
||||
),
|
||||
currentSegment: prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "prometheus",
|
||||
Subsystem: "wal_watcher",
|
||||
Name: "current_segment",
|
||||
Help: "Current segment the WAL watcher is reading records from.",
|
||||
},
|
||||
[]string{consumer},
|
||||
),
|
||||
}
|
||||
|
||||
if reg != nil {
|
||||
reg.MustRegister(m.recordsRead)
|
||||
reg.MustRegister(m.recordDecodeFails)
|
||||
reg.MustRegister(m.samplesSentPreTailing)
|
||||
reg.MustRegister(m.currentSegment)
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// NewWatcher creates a new WAL watcher for a given WriteTo.
|
||||
func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger log.Logger, name string, writer WriteTo, walDir string) *Watcher {
|
||||
if logger == nil {
|
||||
logger = log.NewNopLogger()
|
||||
}
|
||||
return &Watcher{
|
||||
logger: logger,
|
||||
writer: writer,
|
||||
metrics: metrics,
|
||||
readerMetrics: readerMetrics,
|
||||
walDir: path.Join(walDir, "wal"),
|
||||
name: name,
|
||||
quit: make(chan struct{}),
|
||||
done: make(chan struct{}),
|
||||
|
||||
MaxSegment: -1,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) setMetrics() {
|
||||
// Setup the WAL Watchers metrics. We do this here rather than in the
|
||||
// constructor because of the ordering of creating Queue Managers's,
|
||||
// stopping them, and then starting new ones in storage/remote/storage.go ApplyConfig.
|
||||
if w.metrics != nil {
|
||||
w.recordsReadMetric = w.metrics.recordsRead.MustCurryWith(prometheus.Labels{consumer: w.name})
|
||||
w.recordDecodeFailsMetric = w.metrics.recordDecodeFails.WithLabelValues(w.name)
|
||||
w.samplesSentPreTailing = w.metrics.samplesSentPreTailing.WithLabelValues(w.name)
|
||||
w.currentSegmentMetric = w.metrics.currentSegment.WithLabelValues(w.name)
|
||||
}
|
||||
}
|
||||
|
||||
// Start the Watcher.
|
||||
func (w *Watcher) Start() {
|
||||
w.setMetrics()
|
||||
level.Info(w.logger).Log("msg", "Starting WAL watcher", "queue", w.name)
|
||||
|
||||
go w.loop()
|
||||
}
|
||||
|
||||
// Stop the Watcher.
|
||||
func (w *Watcher) Stop() {
|
||||
close(w.quit)
|
||||
<-w.done
|
||||
|
||||
// Records read metric has series and samples.
|
||||
if w.metrics != nil {
|
||||
w.metrics.recordsRead.DeleteLabelValues(w.name, "series")
|
||||
w.metrics.recordsRead.DeleteLabelValues(w.name, "samples")
|
||||
w.metrics.recordDecodeFails.DeleteLabelValues(w.name)
|
||||
w.metrics.samplesSentPreTailing.DeleteLabelValues(w.name)
|
||||
w.metrics.currentSegment.DeleteLabelValues(w.name)
|
||||
}
|
||||
|
||||
level.Info(w.logger).Log("msg", "WAL watcher stopped", "queue", w.name)
|
||||
}
|
||||
|
||||
func (w *Watcher) loop() {
|
||||
defer close(w.done)
|
||||
|
||||
// We may encounter failures processing the WAL; we should wait and retry.
|
||||
for !isClosed(w.quit) {
|
||||
w.SetStartTime(time.Now())
|
||||
if err := w.Run(); err != nil {
|
||||
level.Error(w.logger).Log("msg", "error tailing WAL", "err", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-w.quit:
|
||||
return
|
||||
case <-time.After(5 * time.Second):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Run the watcher, which will tail the WAL until the quit channel is closed
|
||||
// or an error case is hit.
|
||||
func (w *Watcher) Run() error {
|
||||
_, lastSegment, err := w.firstAndLast()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "wal.Segments")
|
||||
}
|
||||
|
||||
// We want to ensure this is false across iterations since
|
||||
// Run will be called again if there was a failure to read the WAL.
|
||||
w.sendSamples = false
|
||||
|
||||
level.Info(w.logger).Log("msg", "Replaying WAL", "queue", w.name)
|
||||
|
||||
// Backfill from the checkpoint first if it exists.
|
||||
lastCheckpoint, checkpointIndex, err := LastCheckpoint(w.walDir)
|
||||
if err != nil && err != record.ErrNotFound {
|
||||
return errors.Wrap(err, "tsdb.LastCheckpoint")
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
if err = w.readCheckpoint(lastCheckpoint); err != nil {
|
||||
return errors.Wrap(err, "readCheckpoint")
|
||||
}
|
||||
}
|
||||
w.lastCheckpoint = lastCheckpoint
|
||||
|
||||
currentSegment, err := w.findSegmentForIndex(checkpointIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
level.Debug(w.logger).Log("msg", "Tailing WAL", "lastCheckpoint", lastCheckpoint, "checkpointIndex", checkpointIndex, "currentSegment", currentSegment, "lastSegment", lastSegment)
|
||||
for !isClosed(w.quit) {
|
||||
w.currentSegmentMetric.Set(float64(currentSegment))
|
||||
level.Debug(w.logger).Log("msg", "Processing segment", "currentSegment", currentSegment)
|
||||
|
||||
// On start, after reading the existing WAL for series records, we have a pointer to what is the latest segment.
|
||||
// On subsequent calls to this function, currentSegment will have been incremented and we should open that segment.
|
||||
if err := w.watch(currentSegment, currentSegment >= lastSegment); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// For testing: stop when you hit a specific segment.
|
||||
if currentSegment == w.MaxSegment {
|
||||
return nil
|
||||
}
|
||||
|
||||
currentSegment++
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// findSegmentForIndex finds the first segment greater than or equal to index.
|
||||
func (w *Watcher) findSegmentForIndex(index int) (int, error) {
|
||||
refs, err := w.segments(w.walDir)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
for _, r := range refs {
|
||||
if r >= index {
|
||||
return r, nil
|
||||
}
|
||||
}
|
||||
|
||||
return -1, errors.New("failed to find segment for index")
|
||||
}
|
||||
|
||||
func (w *Watcher) firstAndLast() (int, int, error) {
|
||||
refs, err := w.segments(w.walDir)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
|
||||
if len(refs) == 0 {
|
||||
return -1, -1, nil
|
||||
}
|
||||
return refs[0], refs[len(refs)-1], nil
|
||||
}
|
||||
|
||||
// Copied from tsdb/wal/wal.go so we do not have to open a WAL.
|
||||
// Plan is to move WAL watcher to TSDB and dedupe these implementations.
|
||||
func (w *Watcher) segments(dir string) ([]int, error) {
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var refs []int
|
||||
var last int
|
||||
for _, f := range files {
|
||||
k, err := strconv.Atoi(f.Name())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if len(refs) > 0 && k > last+1 {
|
||||
return nil, errors.New("segments are not sequential")
|
||||
}
|
||||
refs = append(refs, k)
|
||||
last = k
|
||||
}
|
||||
sort.Ints(refs)
|
||||
|
||||
return refs, nil
|
||||
}
|
||||
|
||||
// Use tail true to indicate that the reader is currently on a segment that is
|
||||
// actively being written to. If false, assume it's a full segment and we're
|
||||
// replaying it on start to cache the series records.
|
||||
func (w *Watcher) watch(segmentNum int, tail bool) error {
|
||||
segment, err := OpenReadSegment(SegmentName(w.walDir, segmentNum))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer segment.Close()
|
||||
|
||||
reader := NewLiveReader(w.logger, w.readerMetrics, segment)
|
||||
|
||||
readTicker := time.NewTicker(readPeriod)
|
||||
defer readTicker.Stop()
|
||||
|
||||
checkpointTicker := time.NewTicker(checkpointPeriod)
|
||||
defer checkpointTicker.Stop()
|
||||
|
||||
segmentTicker := time.NewTicker(segmentCheckPeriod)
|
||||
defer segmentTicker.Stop()
|
||||
|
||||
// If we're replaying the segment we need to know the size of the file to know
|
||||
// when to return from watch and move on to the next segment.
|
||||
size := int64(math.MaxInt64)
|
||||
if !tail {
|
||||
segmentTicker.Stop()
|
||||
checkpointTicker.Stop()
|
||||
var err error
|
||||
size, err = getSegmentSize(w.walDir, segmentNum)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getSegmentSize")
|
||||
}
|
||||
}
|
||||
|
||||
gcSem := make(chan struct{}, 1)
|
||||
for {
|
||||
select {
|
||||
case <-w.quit:
|
||||
return nil
|
||||
|
||||
case <-checkpointTicker.C:
|
||||
// Periodically check if there is a new checkpoint so we can garbage
|
||||
// collect labels. As this is considered an optimisation, we ignore
|
||||
// errors during checkpoint processing. Doing the process asynchronously
|
||||
// allows the current WAL segment to be processed while reading the
|
||||
// checkpoint.
|
||||
select {
|
||||
case gcSem <- struct{}{}:
|
||||
go func() {
|
||||
defer func() {
|
||||
<-gcSem
|
||||
}()
|
||||
if err := w.garbageCollectSeries(segmentNum); err != nil {
|
||||
level.Warn(w.logger).Log("msg", "Error process checkpoint", "err", err)
|
||||
}
|
||||
}()
|
||||
default:
|
||||
// Currently doing a garbage collect, try again later.
|
||||
}
|
||||
|
||||
case <-segmentTicker.C:
|
||||
_, last, err := w.firstAndLast()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "segments")
|
||||
}
|
||||
|
||||
// Check if new segments exists.
|
||||
if last <= segmentNum {
|
||||
continue
|
||||
}
|
||||
|
||||
err = w.readSegment(reader, segmentNum, tail)
|
||||
|
||||
// Ignore errors reading to end of segment whilst replaying the WAL.
|
||||
if !tail {
|
||||
if err != nil && err != io.EOF {
|
||||
level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "err", err)
|
||||
} else if reader.Offset() != size {
|
||||
level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Otherwise, when we are tailing, non-EOFs are fatal.
|
||||
if err != io.EOF {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
case <-readTicker.C:
|
||||
err = w.readSegment(reader, segmentNum, tail)
|
||||
|
||||
// Ignore all errors reading to end of segment whilst replaying the WAL.
|
||||
if !tail {
|
||||
if err != nil && err != io.EOF {
|
||||
level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err)
|
||||
} else if reader.Offset() != size {
|
||||
level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Otherwise, when we are tailing, non-EOFs are fatal.
|
||||
if err != io.EOF {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) garbageCollectSeries(segmentNum int) error {
|
||||
dir, _, err := LastCheckpoint(w.walDir)
|
||||
if err != nil && err != record.ErrNotFound {
|
||||
return errors.Wrap(err, "tsdb.LastCheckpoint")
|
||||
}
|
||||
|
||||
if dir == "" || dir == w.lastCheckpoint {
|
||||
return nil
|
||||
}
|
||||
w.lastCheckpoint = dir
|
||||
|
||||
index, err := checkpointNum(dir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error parsing checkpoint filename")
|
||||
}
|
||||
|
||||
if index >= segmentNum {
|
||||
level.Debug(w.logger).Log("msg", "Current segment is behind the checkpoint, skipping reading of checkpoint", "current", fmt.Sprintf("%08d", segmentNum), "checkpoint", dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
level.Debug(w.logger).Log("msg", "New checkpoint detected", "new", dir, "currentSegment", segmentNum)
|
||||
|
||||
if err = w.readCheckpoint(dir); err != nil {
|
||||
return errors.Wrap(err, "readCheckpoint")
|
||||
}
|
||||
|
||||
// Clear series with a checkpoint or segment index # lower than the checkpoint we just read.
|
||||
w.writer.SeriesReset(index)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error {
|
||||
var (
|
||||
dec record.Decoder
|
||||
series []record.RefSeries
|
||||
samples []record.RefSample
|
||||
send []record.RefSample
|
||||
)
|
||||
for r.Next() && !isClosed(w.quit) {
|
||||
rec := r.Record()
|
||||
w.recordsReadMetric.WithLabelValues(recordType(dec.Type(rec))).Inc()
|
||||
|
||||
switch dec.Type(rec) {
|
||||
case record.Series:
|
||||
series, err := dec.Series(rec, series[:0])
|
||||
if err != nil {
|
||||
w.recordDecodeFailsMetric.Inc()
|
||||
return err
|
||||
}
|
||||
w.writer.StoreSeries(series, segmentNum)
|
||||
|
||||
case record.Samples:
|
||||
// If we're not tailing a segment we can ignore any samples records we see.
|
||||
// This speeds up replay of the WAL by > 10x.
|
||||
if !tail {
|
||||
break
|
||||
}
|
||||
samples, err := dec.Samples(rec, samples[:0])
|
||||
if err != nil {
|
||||
w.recordDecodeFailsMetric.Inc()
|
||||
return err
|
||||
}
|
||||
for _, s := range samples {
|
||||
if s.T > w.startTimestamp {
|
||||
if !w.sendSamples {
|
||||
w.sendSamples = true
|
||||
duration := time.Since(w.startTime)
|
||||
level.Info(w.logger).Log("msg", "Done replaying WAL", "duration", duration)
|
||||
}
|
||||
send = append(send, s)
|
||||
}
|
||||
}
|
||||
if len(send) > 0 {
|
||||
// Blocks until the sample is sent to all remote write endpoints or closed (because enqueue blocks).
|
||||
w.writer.Append(send)
|
||||
send = send[:0]
|
||||
}
|
||||
|
||||
case record.Tombstones:
|
||||
// noop
|
||||
case record.Invalid:
|
||||
return errors.New("invalid record")
|
||||
|
||||
default:
|
||||
w.recordDecodeFailsMetric.Inc()
|
||||
return errors.New("unknown TSDB record type")
|
||||
}
|
||||
}
|
||||
return r.Err()
|
||||
}
|
||||
|
||||
func (w *Watcher) SetStartTime(t time.Time) {
|
||||
w.startTime = t
|
||||
w.startTimestamp = timestamp.FromTime(t)
|
||||
}
|
||||
|
||||
func recordType(rt record.Type) string {
|
||||
switch rt {
|
||||
case record.Invalid:
|
||||
return "invalid"
|
||||
case record.Series:
|
||||
return "series"
|
||||
case record.Samples:
|
||||
return "samples"
|
||||
case record.Tombstones:
|
||||
return "tombstones"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// Read all the series records from a Checkpoint directory.
|
||||
func (w *Watcher) readCheckpoint(checkpointDir string) error {
|
||||
level.Debug(w.logger).Log("msg", "Reading checkpoint", "dir", checkpointDir)
|
||||
index, err := checkpointNum(checkpointDir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "checkpointNum")
|
||||
}
|
||||
|
||||
// Ensure we read the whole contents of every segment in the checkpoint dir.
|
||||
segs, err := w.segments(checkpointDir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Unable to get segments checkpoint dir")
|
||||
}
|
||||
for _, seg := range segs {
|
||||
size, err := getSegmentSize(checkpointDir, seg)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getSegmentSize")
|
||||
}
|
||||
|
||||
sr, err := OpenReadSegment(SegmentName(checkpointDir, seg))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to open segment")
|
||||
}
|
||||
defer sr.Close()
|
||||
|
||||
r := NewLiveReader(w.logger, w.readerMetrics, sr)
|
||||
if err := w.readSegment(r, index, false); err != io.EOF && err != nil {
|
||||
return errors.Wrap(err, "readSegment")
|
||||
}
|
||||
|
||||
if r.Offset() != size {
|
||||
return fmt.Errorf("readCheckpoint wasn't able to read all data from the checkpoint %s/%08d, size: %d, totalRead: %d", checkpointDir, seg, size, r.Offset())
|
||||
}
|
||||
}
|
||||
|
||||
level.Debug(w.logger).Log("msg", "Read series references from checkpoint", "checkpoint", checkpointDir)
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkpointNum(dir string) (int, error) {
|
||||
// Checkpoint dir names are in the format checkpoint.000001
|
||||
// dir may contain a hidden directory, so only check the base directory
|
||||
chunks := strings.Split(path.Base(dir), ".")
|
||||
if len(chunks) != 2 {
|
||||
return 0, errors.Errorf("invalid checkpoint dir string: %s", dir)
|
||||
}
|
||||
|
||||
result, err := strconv.Atoi(chunks[1])
|
||||
if err != nil {
|
||||
return 0, errors.Errorf("invalid checkpoint dir string: %s", dir)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Get size of segment.
|
||||
func getSegmentSize(dir string, index int) (int64, error) {
|
||||
i := int64(-1)
|
||||
fi, err := os.Stat(SegmentName(dir, index))
|
||||
if err == nil {
|
||||
i = fi.Size()
|
||||
}
|
||||
return i, err
|
||||
}
|
||||
|
||||
func isClosed(c chan struct{}) bool {
|
||||
select {
|
||||
case <-c:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
157
vendor/github.com/prometheus/prometheus/util/stats/query_stats.go
generated
vendored
Normal file
157
vendor/github.com/prometheus/prometheus/util/stats/query_stats.go
generated
vendored
Normal file
@@ -0,0 +1,157 @@
|
||||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package stats
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// QueryTiming identifies the code area or functionality in which time is spent
|
||||
// during a query.
|
||||
type QueryTiming int
|
||||
|
||||
// Query timings.
|
||||
const (
|
||||
EvalTotalTime QueryTiming = iota
|
||||
ResultSortTime
|
||||
QueryPreparationTime
|
||||
InnerEvalTime
|
||||
ExecQueueTime
|
||||
ExecTotalTime
|
||||
)
|
||||
|
||||
// Return a string representation of a QueryTiming identifier.
|
||||
func (s QueryTiming) String() string {
|
||||
switch s {
|
||||
case EvalTotalTime:
|
||||
return "Eval total time"
|
||||
case ResultSortTime:
|
||||
return "Result sorting time"
|
||||
case QueryPreparationTime:
|
||||
return "Query preparation time"
|
||||
case InnerEvalTime:
|
||||
return "Inner eval time"
|
||||
case ExecQueueTime:
|
||||
return "Exec queue wait time"
|
||||
case ExecTotalTime:
|
||||
return "Exec total time"
|
||||
default:
|
||||
return "Unknown query timing"
|
||||
}
|
||||
}
|
||||
|
||||
// SpanOperation returns a string representation of a QueryTiming span operation.
|
||||
func (s QueryTiming) SpanOperation() string {
|
||||
switch s {
|
||||
case EvalTotalTime:
|
||||
return "promqlEval"
|
||||
case ResultSortTime:
|
||||
return "promqlSort"
|
||||
case QueryPreparationTime:
|
||||
return "promqlPrepare"
|
||||
case InnerEvalTime:
|
||||
return "promqlInnerEval"
|
||||
case ExecQueueTime:
|
||||
return "promqlExecQueue"
|
||||
case ExecTotalTime:
|
||||
return "promqlExec"
|
||||
default:
|
||||
return "Unknown query timing"
|
||||
}
|
||||
}
|
||||
|
||||
// queryTimings with all query timers mapped to durations.
|
||||
type queryTimings struct {
|
||||
EvalTotalTime float64 `json:"evalTotalTime"`
|
||||
ResultSortTime float64 `json:"resultSortTime"`
|
||||
QueryPreparationTime float64 `json:"queryPreparationTime"`
|
||||
InnerEvalTime float64 `json:"innerEvalTime"`
|
||||
ExecQueueTime float64 `json:"execQueueTime"`
|
||||
ExecTotalTime float64 `json:"execTotalTime"`
|
||||
}
|
||||
|
||||
// QueryStats currently only holding query timings.
|
||||
type QueryStats struct {
|
||||
Timings queryTimings `json:"timings,omitempty"`
|
||||
}
|
||||
|
||||
// NewQueryStats makes a QueryStats struct with all QueryTimings found in the
|
||||
// given TimerGroup.
|
||||
func NewQueryStats(tg *QueryTimers) *QueryStats {
|
||||
var qt queryTimings
|
||||
|
||||
for s, timer := range tg.TimerGroup.timers {
|
||||
switch s {
|
||||
case EvalTotalTime:
|
||||
qt.EvalTotalTime = timer.Duration()
|
||||
case ResultSortTime:
|
||||
qt.ResultSortTime = timer.Duration()
|
||||
case QueryPreparationTime:
|
||||
qt.QueryPreparationTime = timer.Duration()
|
||||
case InnerEvalTime:
|
||||
qt.InnerEvalTime = timer.Duration()
|
||||
case ExecQueueTime:
|
||||
qt.ExecQueueTime = timer.Duration()
|
||||
case ExecTotalTime:
|
||||
qt.ExecTotalTime = timer.Duration()
|
||||
}
|
||||
}
|
||||
|
||||
qs := QueryStats{Timings: qt}
|
||||
return &qs
|
||||
}
|
||||
|
||||
// SpanTimer unifies tracing and timing, to reduce repetition.
|
||||
type SpanTimer struct {
|
||||
timer *Timer
|
||||
observers []prometheus.Observer
|
||||
|
||||
span opentracing.Span
|
||||
}
|
||||
|
||||
func NewSpanTimer(ctx context.Context, operation string, timer *Timer, observers ...prometheus.Observer) (*SpanTimer, context.Context) {
|
||||
span, ctx := opentracing.StartSpanFromContext(ctx, operation)
|
||||
timer.Start()
|
||||
|
||||
return &SpanTimer{
|
||||
timer: timer,
|
||||
observers: observers,
|
||||
|
||||
span: span,
|
||||
}, ctx
|
||||
}
|
||||
|
||||
func (s *SpanTimer) Finish() {
|
||||
s.timer.Stop()
|
||||
s.span.Finish()
|
||||
|
||||
for _, obs := range s.observers {
|
||||
obs.Observe(s.timer.ElapsedTime().Seconds())
|
||||
}
|
||||
}
|
||||
|
||||
type QueryTimers struct {
|
||||
*TimerGroup
|
||||
}
|
||||
|
||||
func NewQueryTimers() *QueryTimers {
|
||||
return &QueryTimers{NewTimerGroup()}
|
||||
}
|
||||
|
||||
func (qs *QueryTimers) GetSpanTimer(ctx context.Context, qt QueryTiming, observers ...prometheus.Observer) (*SpanTimer, context.Context) {
|
||||
return NewSpanTimer(ctx, qt.SpanOperation(), qs.TimerGroup.GetTimer(qt), observers...)
|
||||
}
|
||||
113
vendor/github.com/prometheus/prometheus/util/stats/timer.go
generated
vendored
Normal file
113
vendor/github.com/prometheus/prometheus/util/stats/timer.go
generated
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package stats
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A Timer that can be started and stopped and accumulates the total time it
|
||||
// was running (the time between Start() and Stop()).
|
||||
type Timer struct {
|
||||
name fmt.Stringer
|
||||
created int
|
||||
start time.Time
|
||||
duration time.Duration
|
||||
}
|
||||
|
||||
// Start the timer.
|
||||
func (t *Timer) Start() *Timer {
|
||||
t.start = time.Now()
|
||||
return t
|
||||
}
|
||||
|
||||
// Stop the timer.
|
||||
func (t *Timer) Stop() {
|
||||
t.duration += time.Since(t.start)
|
||||
}
|
||||
|
||||
// ElapsedTime returns the time that passed since starting the timer.
|
||||
func (t *Timer) ElapsedTime() time.Duration {
|
||||
return time.Since(t.start)
|
||||
}
|
||||
|
||||
// Duration returns the duration value of the timer in seconds.
|
||||
func (t *Timer) Duration() float64 {
|
||||
return t.duration.Seconds()
|
||||
}
|
||||
|
||||
// Return a string representation of the Timer.
|
||||
func (t *Timer) String() string {
|
||||
return fmt.Sprintf("%s: %s", t.name, t.duration)
|
||||
}
|
||||
|
||||
// A TimerGroup represents a group of timers relevant to a single query.
|
||||
type TimerGroup struct {
|
||||
timers map[fmt.Stringer]*Timer
|
||||
}
|
||||
|
||||
// NewTimerGroup constructs a new TimerGroup.
|
||||
func NewTimerGroup() *TimerGroup {
|
||||
return &TimerGroup{timers: map[fmt.Stringer]*Timer{}}
|
||||
}
|
||||
|
||||
// GetTimer gets (and creates, if necessary) the Timer for a given code section.
|
||||
func (t *TimerGroup) GetTimer(name fmt.Stringer) *Timer {
|
||||
if timer, exists := t.timers[name]; exists {
|
||||
return timer
|
||||
}
|
||||
timer := &Timer{
|
||||
name: name,
|
||||
created: len(t.timers),
|
||||
}
|
||||
t.timers[name] = timer
|
||||
return timer
|
||||
}
|
||||
|
||||
// Timers is a slice of Timer pointers that implements Len and Swap from
|
||||
// sort.Interface.
|
||||
type Timers []*Timer
|
||||
|
||||
type byCreationTimeSorter struct{ Timers }
|
||||
|
||||
// Len implements sort.Interface.
|
||||
func (t Timers) Len() int {
|
||||
return len(t)
|
||||
}
|
||||
|
||||
// Swap implements sort.Interface.
|
||||
func (t Timers) Swap(i, j int) {
|
||||
t[i], t[j] = t[j], t[i]
|
||||
}
|
||||
|
||||
func (s byCreationTimeSorter) Less(i, j int) bool {
|
||||
return s.Timers[i].created < s.Timers[j].created
|
||||
}
|
||||
|
||||
// Return a string representation of a TimerGroup.
|
||||
func (t *TimerGroup) String() string {
|
||||
timers := byCreationTimeSorter{}
|
||||
for _, timer := range t.timers {
|
||||
timers.Timers = append(timers.Timers, timer)
|
||||
}
|
||||
sort.Sort(timers)
|
||||
result := &bytes.Buffer{}
|
||||
for _, timer := range timers.Timers {
|
||||
fmt.Fprintf(result, "%s\n", timer)
|
||||
}
|
||||
return result.String()
|
||||
}
|
||||
55
vendor/github.com/prometheus/prometheus/util/teststorage/storage.go
generated
vendored
Normal file
55
vendor/github.com/prometheus/prometheus/util/teststorage/storage.go
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package teststorage
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
"github.com/prometheus/prometheus/util/testutil"
|
||||
)
|
||||
|
||||
// New returns a new TestStorage for testing purposes
|
||||
// that removes all associated files on closing.
|
||||
func New(t testutil.T) *TestStorage {
|
||||
dir, err := ioutil.TempDir("", "test_storage")
|
||||
if err != nil {
|
||||
t.Fatalf("Opening test dir failed: %s", err)
|
||||
}
|
||||
|
||||
// Tests just load data for a series sequentially. Thus we
|
||||
// need a long appendable window.
|
||||
opts := tsdb.DefaultOptions()
|
||||
opts.MinBlockDuration = int64(24 * time.Hour / time.Millisecond)
|
||||
opts.MaxBlockDuration = int64(24 * time.Hour / time.Millisecond)
|
||||
db, err := tsdb.Open(dir, nil, nil, opts)
|
||||
if err != nil {
|
||||
t.Fatalf("Opening test storage failed: %s", err)
|
||||
}
|
||||
return &TestStorage{DB: db, dir: dir}
|
||||
}
|
||||
|
||||
type TestStorage struct {
|
||||
*tsdb.DB
|
||||
dir string
|
||||
}
|
||||
|
||||
func (s TestStorage) Close() error {
|
||||
if err := s.DB.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return os.RemoveAll(s.dir)
|
||||
}
|
||||
42
vendor/github.com/prometheus/prometheus/util/testutil/context.go
generated
vendored
Normal file
42
vendor/github.com/prometheus/prometheus/util/testutil/context.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package testutil
|
||||
|
||||
import "time"
|
||||
|
||||
// A MockContext provides a simple stub implementation of a Context
|
||||
type MockContext struct {
|
||||
Error error
|
||||
DoneCh chan struct{}
|
||||
}
|
||||
|
||||
// Deadline always will return not set
|
||||
func (c *MockContext) Deadline() (deadline time.Time, ok bool) {
|
||||
return time.Time{}, false
|
||||
}
|
||||
|
||||
// Done returns a read channel for listening to the Done event
|
||||
func (c *MockContext) Done() <-chan struct{} {
|
||||
return c.DoneCh
|
||||
}
|
||||
|
||||
// Err returns the error, is nil if not set.
|
||||
func (c *MockContext) Err() error {
|
||||
return c.Error
|
||||
}
|
||||
|
||||
// Value ignores the Value and always returns nil
|
||||
func (c *MockContext) Value(key interface{}) interface{} {
|
||||
return nil
|
||||
}
|
||||
168
vendor/github.com/prometheus/prometheus/util/testutil/directory.go
generated
vendored
Normal file
168
vendor/github.com/prometheus/prometheus/util/testutil/directory.go
generated
vendored
Normal file
@@ -0,0 +1,168 @@
|
||||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
// The base directory used for test emissions, which instructs the operating
|
||||
// system to use the default temporary directory as the base or TMPDIR
|
||||
// environment variable.
|
||||
defaultDirectory = ""
|
||||
|
||||
// NilCloser is a no-op Closer.
|
||||
NilCloser = nilCloser(true)
|
||||
|
||||
// The number of times that a TemporaryDirectory will retry its removal
|
||||
temporaryDirectoryRemoveRetries = 2
|
||||
)
|
||||
|
||||
type (
|
||||
// Closer is the interface that wraps the Close method.
|
||||
Closer interface {
|
||||
// Close reaps the underlying directory and its children. The directory
|
||||
// could be deleted by its users already.
|
||||
Close()
|
||||
}
|
||||
|
||||
nilCloser bool
|
||||
|
||||
// TemporaryDirectory models a closeable path for transient POSIX disk
|
||||
// activities.
|
||||
TemporaryDirectory interface {
|
||||
Closer
|
||||
|
||||
// Path returns the underlying path for access.
|
||||
Path() string
|
||||
}
|
||||
|
||||
// temporaryDirectory is kept as a private type due to private fields and
|
||||
// their interactions.
|
||||
temporaryDirectory struct {
|
||||
path string
|
||||
tester T
|
||||
}
|
||||
|
||||
callbackCloser struct {
|
||||
fn func()
|
||||
}
|
||||
|
||||
// T implements the needed methods of testing.TB so that we do not need
|
||||
// to actually import testing (which has the side effect of adding all
|
||||
// the test flags, which we do not want in non-test binaries even if
|
||||
// they make use of these utilities for some reason).
|
||||
T interface {
|
||||
Fatal(args ...interface{})
|
||||
Fatalf(format string, args ...interface{})
|
||||
}
|
||||
)
|
||||
|
||||
func (c nilCloser) Close() {
|
||||
}
|
||||
|
||||
func (c callbackCloser) Close() {
|
||||
c.fn()
|
||||
}
|
||||
|
||||
// NewCallbackCloser returns a Closer that calls the provided function upon
|
||||
// closing.
|
||||
func NewCallbackCloser(fn func()) Closer {
|
||||
return &callbackCloser{
|
||||
fn: fn,
|
||||
}
|
||||
}
|
||||
|
||||
func (t temporaryDirectory) Close() {
|
||||
retries := temporaryDirectoryRemoveRetries
|
||||
err := os.RemoveAll(t.path)
|
||||
for err != nil && retries > 0 {
|
||||
switch {
|
||||
case os.IsNotExist(err):
|
||||
err = nil
|
||||
default:
|
||||
retries--
|
||||
err = os.RemoveAll(t.path)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
t.tester.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (t temporaryDirectory) Path() string {
|
||||
return t.path
|
||||
}
|
||||
|
||||
// NewTemporaryDirectory creates a new temporary directory for transient POSIX
|
||||
// activities.
|
||||
func NewTemporaryDirectory(name string, t T) (handler TemporaryDirectory) {
|
||||
var (
|
||||
directory string
|
||||
err error
|
||||
)
|
||||
|
||||
directory, err = ioutil.TempDir(defaultDirectory, name)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
handler = temporaryDirectory{
|
||||
path: directory,
|
||||
tester: t,
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// DirHash returns a hash of all files attributes and their content within a directory.
|
||||
func DirHash(t *testing.T, path string) []byte {
|
||||
hash := sha256.New()
|
||||
err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
|
||||
Ok(t, err)
|
||||
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
f, err := os.Open(path)
|
||||
Ok(t, err)
|
||||
defer f.Close()
|
||||
|
||||
_, err = io.Copy(hash, f)
|
||||
Ok(t, err)
|
||||
|
||||
_, err = io.WriteString(hash, strconv.Itoa(int(info.Size())))
|
||||
Ok(t, err)
|
||||
|
||||
_, err = io.WriteString(hash, info.Name())
|
||||
Ok(t, err)
|
||||
|
||||
modTime, err := info.ModTime().GobEncode()
|
||||
Ok(t, err)
|
||||
|
||||
_, err = io.WriteString(hash, string(modTime))
|
||||
Ok(t, err)
|
||||
return nil
|
||||
})
|
||||
Ok(t, err)
|
||||
|
||||
return hash.Sum(nil)
|
||||
}
|
||||
35
vendor/github.com/prometheus/prometheus/util/testutil/logging.go
generated
vendored
Normal file
35
vendor/github.com/prometheus/prometheus/util/testutil/logging.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
// Copyright 2019 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/go-kit/kit/log"
|
||||
)
|
||||
|
||||
type logger struct {
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
// NewLogger returns a gokit compatible Logger which calls t.Log.
|
||||
func NewLogger(t *testing.T) log.Logger {
|
||||
return logger{t: t}
|
||||
}
|
||||
|
||||
// Log implements log.Logger.
|
||||
func (t logger) Log(keyvals ...interface{}) error {
|
||||
t.t.Log(keyvals...)
|
||||
return nil
|
||||
}
|
||||
47
vendor/github.com/prometheus/prometheus/util/testutil/roundtrip.go
generated
vendored
Normal file
47
vendor/github.com/prometheus/prometheus/util/testutil/roundtrip.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type roundTrip struct {
|
||||
theResponse *http.Response
|
||||
theError error
|
||||
}
|
||||
|
||||
func (rt *roundTrip) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
return rt.theResponse, rt.theError
|
||||
}
|
||||
|
||||
type roundTripCheckRequest struct {
|
||||
checkRequest func(*http.Request)
|
||||
roundTrip
|
||||
}
|
||||
|
||||
func (rt *roundTripCheckRequest) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
rt.checkRequest(r)
|
||||
return rt.theResponse, rt.theError
|
||||
}
|
||||
|
||||
// NewRoundTripCheckRequest creates a new instance of a type that implements http.RoundTripper,
|
||||
// which before returning theResponse and theError, executes checkRequest against a http.Request.
|
||||
func NewRoundTripCheckRequest(checkRequest func(*http.Request), theResponse *http.Response, theError error) http.RoundTripper {
|
||||
return &roundTripCheckRequest{
|
||||
checkRequest: checkRequest,
|
||||
roundTrip: roundTrip{
|
||||
theResponse: theResponse,
|
||||
theError: theError}}
|
||||
}
|
||||
156
vendor/github.com/prometheus/prometheus/util/testutil/testing.go
generated
vendored
Normal file
156
vendor/github.com/prometheus/prometheus/util/testutil/testing.go
generated
vendored
Normal file
@@ -0,0 +1,156 @@
|
||||
// The MIT License (MIT)
|
||||
|
||||
// Copyright (c) 2014 Ben Johnson
|
||||
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
// SOFTWARE.
|
||||
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/pmezard/go-difflib/difflib"
|
||||
)
|
||||
|
||||
// This package is imported by non-test code and therefore cannot import the
|
||||
// testing package, which has side effects such as adding flags. Hence we use an
|
||||
// interface to testing.{T,B}.
|
||||
type TB interface {
|
||||
Helper()
|
||||
Fatalf(string, ...interface{})
|
||||
}
|
||||
|
||||
// Assert fails the test if the condition is false.
|
||||
func Assert(tb TB, condition bool, format string, a ...interface{}) {
|
||||
tb.Helper()
|
||||
if !condition {
|
||||
tb.Fatalf("\033[31m"+format+"\033[39m\n", a...)
|
||||
}
|
||||
}
|
||||
|
||||
// Ok fails the test if an err is not nil.
|
||||
func Ok(tb TB, err error) {
|
||||
tb.Helper()
|
||||
if err != nil {
|
||||
tb.Fatalf("\033[31munexpected error: %v\033[39m\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
// NotOk fails the test if an err is nil.
|
||||
func NotOk(tb TB, err error, a ...interface{}) {
|
||||
tb.Helper()
|
||||
if err == nil {
|
||||
if len(a) != 0 {
|
||||
format := a[0].(string)
|
||||
tb.Fatalf("\033[31m"+format+": expected error, got none\033[39m", a[1:]...)
|
||||
}
|
||||
tb.Fatalf("\033[31mexpected error, got none\033[39m")
|
||||
}
|
||||
}
|
||||
|
||||
// Equals fails the test if exp is not equal to act.
|
||||
func Equals(tb TB, exp, act interface{}, msgAndArgs ...interface{}) {
|
||||
tb.Helper()
|
||||
if !reflect.DeepEqual(exp, act) {
|
||||
tb.Fatalf("\033[31m%s\n\nexp: %#v\n\ngot: %#v%s\033[39m\n", formatMessage(msgAndArgs), exp, act, diff(exp, act))
|
||||
}
|
||||
}
|
||||
|
||||
func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) {
|
||||
t := reflect.TypeOf(v)
|
||||
k := t.Kind()
|
||||
|
||||
if k == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
k = t.Kind()
|
||||
}
|
||||
return t, k
|
||||
}
|
||||
|
||||
// diff returns a diff of both values as long as both are of the same type and
|
||||
// are a struct, map, slice, array or string. Otherwise it returns an empty string.
|
||||
func diff(expected interface{}, actual interface{}) string {
|
||||
if expected == nil || actual == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
et, ek := typeAndKind(expected)
|
||||
at, _ := typeAndKind(actual)
|
||||
if et != at {
|
||||
return ""
|
||||
}
|
||||
|
||||
if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String {
|
||||
return ""
|
||||
}
|
||||
|
||||
var e, a string
|
||||
c := spew.ConfigState{
|
||||
Indent: " ",
|
||||
DisablePointerAddresses: true,
|
||||
DisableCapacities: true,
|
||||
SortKeys: true,
|
||||
}
|
||||
if et != reflect.TypeOf("") {
|
||||
e = c.Sdump(expected)
|
||||
a = c.Sdump(actual)
|
||||
} else {
|
||||
e = reflect.ValueOf(expected).String()
|
||||
a = reflect.ValueOf(actual).String()
|
||||
}
|
||||
|
||||
diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
|
||||
A: difflib.SplitLines(e),
|
||||
B: difflib.SplitLines(a),
|
||||
FromFile: "Expected",
|
||||
FromDate: "",
|
||||
ToFile: "Actual",
|
||||
ToDate: "",
|
||||
Context: 1,
|
||||
})
|
||||
return "\n\nDiff:\n" + diff
|
||||
}
|
||||
|
||||
// ErrorEqual compares Go errors for equality.
|
||||
func ErrorEqual(tb TB, left, right error, msgAndArgs ...interface{}) {
|
||||
tb.Helper()
|
||||
if left == right {
|
||||
return
|
||||
}
|
||||
|
||||
if left != nil && right != nil {
|
||||
Equals(tb, left.Error(), right.Error(), msgAndArgs...)
|
||||
return
|
||||
}
|
||||
|
||||
tb.Fatalf("\033[31m%s\n\nexp: %#v\n\ngot: %#v\033[39m\n", formatMessage(msgAndArgs), left, right)
|
||||
}
|
||||
|
||||
func formatMessage(msgAndArgs []interface{}) string {
|
||||
if len(msgAndArgs) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
if msg, ok := msgAndArgs[0].(string); ok {
|
||||
return fmt.Sprintf("\n\nmsg: "+msg, msgAndArgs[1:]...)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
Reference in New Issue
Block a user