From f4a1d30c79668f2d1fa8e6cae153283ddca453b3 Mon Sep 17 00:00:00 2001 From: tommyers-elastic <106530686+tommyers-elastic@users.noreply.github.com> Date: Tue, 4 Mar 2025 10:58:59 +0000 Subject: [PATCH 1/3] Vendor github.com/prometheus/prometheus dependency to avoid conflicts with Otel collector deps in elastic agent. [git-generate] # this is pretty nasty, but it's the best way i could find to reliably get only the required source # # the way the whole thing works in general is: in go.mod, you replace the module you want to vendor # with a different name, using the major version suffix notation. you then add a 'replace' statement # which points to a local copy of that module at a specific version. # # this script will not work if the module you are trying to vendor is already vendored. before running # it, the module you wish to vendor should currently be used as-is in the beats source. if you need to # update an already vendored module, you should first 'un-vendor' it, then run this script fresh. # # when you're ready, run this script from the root go module directory. module=github.com/prometheus/prometheus version=v0.54.1 go_import_path=${module}@${version} go_mod_path=https://raw.githubusercontent.com/prometheus/prometheus/refs/tags/${version}/go.mod # ensure the correct version is in the root go.mod file so the right sources are downloaded in the next step go get ${go_import_path} # there's no way to selectively vendor, so we have to get everything ... go mod vendor # copy our module to a non-go-managed vendor directory and add a go.mod mkdir -p _vendor/${go_import_path} cp -R vendor/${module}/ _vendor/${go_import_path} curl ${go_mod_path} -o _vendor/${go_import_path}/go.mod pushd _vendor/${go_import_path} # this removes unnecessary dependencies from the vendored source go mod tidy popd rm -r vendor # update go.mod with the vendored module go mod edit -droprequire=${module} go mod edit -require=${module}/v2@v2.54.1 go mod edit -replace=${module}/v2=./_vendor/${go_import_path} # replace the existing imports grep -Rl --include="*.go" ${module} | xargs sed -i "" "s|${module}|${module}/v2|g" go mod tidy --- .../prometheus/prometheus@v0.54.1/LICENSE | 201 + .../prometheus/prometheus@v0.54.1/NOTICE | 108 + .../prometheus/prometheus@v0.54.1/go.mod | 32 + .../prometheus/prometheus@v0.54.1/go.sum | 45 + .../model/exemplar/exemplar.go | 65 + .../model/histogram/float_histogram.go | 1340 +++++ .../model/histogram/generic.go | 786 +++ .../model/histogram/histogram.go | 632 +++ .../model/histogram/test_utils.go | 52 + .../prometheus@v0.54.1/model/labels/labels.go | 489 ++ .../model/labels/labels_common.go | 222 + .../model/labels/labels_dedupelabels.go | 817 +++ .../model/labels/labels_stringlabels.go | 696 +++ .../model/labels/matcher.go | 170 + .../prometheus@v0.54.1/model/labels/regexp.go | 1086 ++++ .../model/labels/sharding.go | 47 + .../model/labels/sharding_dedupelabels.go | 52 + .../model/labels/sharding_stringlabels.go | 54 + .../model/labels/test_utils.go | 87 + .../model/textparse/README.md | 6 + .../model/textparse/interface.go | 113 + .../model/textparse/openmetricslex.l | 83 + .../model/textparse/openmetricslex.l.go | 870 ++++ .../model/textparse/openmetricsparse.go | 530 ++ .../model/textparse/promlex.l | 106 + .../model/textparse/promlex.l.go | 656 +++ .../model/textparse/promparse.go | 514 ++ .../model/textparse/promtestdata.nometa.txt | 411 ++ .../model/textparse/promtestdata.txt | 529 ++ .../model/textparse/protobufparse.go | 644 +++ .../model/timestamp/timestamp.go | 34 + .../prometheus@v0.54.1/model/value/value.go | 34 + .../prometheus@v0.54.1/prompb/README.md | 9 + .../prometheus@v0.54.1/prompb/buf.lock | 10 + .../prometheus@v0.54.1/prompb/buf.yaml | 21 + .../prometheus@v0.54.1/prompb/codec.go | 201 + .../prometheus@v0.54.1/prompb/custom.go | 31 + .../prompb/io/prometheus/client/metrics.pb.go | 4300 ++++++++++++++++ .../prompb/io/prometheus/client/metrics.proto | 160 + .../prometheus@v0.54.1/prompb/remote.pb.go | 1702 +++++++ .../prometheus@v0.54.1/prompb/remote.proto | 88 + .../prometheus@v0.54.1/prompb/types.pb.go | 4440 +++++++++++++++++ .../prometheus@v0.54.1/prompb/types.proto | 187 + go.mod | 4 +- go.sum | 8 +- metricbeat/helper/prometheus/textparse.go | 8 +- .../helper/prometheus/textparse_test.go | 2 +- .../module/openmetrics/collector/collector.go | 2 +- .../openmetrics/collector/collector_test.go | 2 +- .../module/prometheus/collector/collector.go | 2 +- .../prometheus/collector/collector_test.go | 2 +- .../prometheus/remote_write/remote_write.go | 2 +- 52 files changed, 22674 insertions(+), 18 deletions(-) create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/LICENSE create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/NOTICE create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/go.mod create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/go.sum create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/exemplar/exemplar.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/histogram/float_histogram.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/histogram/generic.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/histogram/histogram.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/histogram/test_utils.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/labels.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/labels_common.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/labels_dedupelabels.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/labels_stringlabels.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/matcher.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/regexp.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/sharding.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/sharding_dedupelabels.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/sharding_stringlabels.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/test_utils.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/README.md create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/interface.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/openmetricslex.l create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/openmetricslex.l.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/openmetricsparse.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/promlex.l create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/promlex.l.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/promparse.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/promtestdata.nometa.txt create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/promtestdata.txt create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/protobufparse.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/timestamp/timestamp.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/model/value/value.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/prompb/README.md create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/prompb/buf.lock create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/prompb/buf.yaml create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/prompb/codec.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/prompb/custom.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/prompb/io/prometheus/client/metrics.pb.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/prompb/io/prometheus/client/metrics.proto create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/prompb/remote.pb.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/prompb/remote.proto create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/prompb/types.pb.go create mode 100644 _vendor/github.com/prometheus/prometheus@v0.54.1/prompb/types.proto diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/LICENSE b/_vendor/github.com/prometheus/prometheus@v0.54.1/LICENSE new file mode 100644 index 000000000000..261eeb9e9f8b --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/NOTICE b/_vendor/github.com/prometheus/prometheus@v0.54.1/NOTICE new file mode 100644 index 000000000000..8605c258e32d --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/NOTICE @@ -0,0 +1,108 @@ +The Prometheus systems and service monitoring server +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (https://soundcloud.com/). + + +The following components are included in this product: + +Bootstrap +https://getbootstrap.com +Copyright 2011-2014 Twitter, Inc. +Licensed under the MIT License + +bootstrap3-typeahead.js +https://github.com/bassjobsen/Bootstrap-3-Typeahead +Original written by @mdo and @fat +Copyright 2014 Bass Jobsen @bassjobsen +Licensed under the Apache License, Version 2.0 + +fuzzy +https://github.com/mattyork/fuzzy +Original written by @mattyork +Copyright 2012 Matt York +Licensed under the MIT License + +bootstrap-datetimepicker.js +https://github.com/Eonasdan/bootstrap-datetimepicker +Copyright 2015 Jonathan Peterson (@Eonasdan) +Licensed under the MIT License + +moment.js +https://github.com/moment/moment/ +Copyright JS Foundation and other contributors +Licensed under the MIT License + +Rickshaw +https://github.com/shutterstock/rickshaw +Copyright 2011-2014 by Shutterstock Images, LLC +See https://github.com/shutterstock/rickshaw/blob/master/LICENSE for license details + +mustache.js +https://github.com/janl/mustache.js +Copyright 2009 Chris Wanstrath (Ruby) +Copyright 2010-2014 Jan Lehnardt (JavaScript) +Copyright 2010-2015 The mustache.js community +Licensed under the MIT License + +jQuery +https://jquery.org +Copyright jQuery Foundation and other contributors +Licensed under the MIT License + +Protocol Buffers for Go with Gadgets +https://github.com/gogo/protobuf/ +Copyright (c) 2013, The GoGo Authors. +See source code for license details. + +Go support for leveled logs, analogous to +https://code.google.com/p/google-glog/ +Copyright 2013 Google Inc. +Licensed under the Apache License, Version 2.0 + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 + +DNS library in Go +https://miek.nl/2014/august/16/go-dns-package/ +Copyright 2009 The Go Authors, 2011 Miek Gieben +See https://github.com/miekg/dns/blob/master/LICENSE for license details. + +LevelDB key/value database in Go +https://github.com/syndtr/goleveldb +Copyright 2012 Suryandaru Triandana +See https://github.com/syndtr/goleveldb/blob/master/LICENSE for license details. + +gosnappy - a fork of code.google.com/p/snappy-go +https://github.com/syndtr/gosnappy +Copyright 2011 The Snappy-Go Authors +See https://github.com/syndtr/gosnappy/blob/master/LICENSE for license details. + +go-zookeeper - Native ZooKeeper client for Go +https://github.com/samuel/go-zookeeper +Copyright (c) 2013, Samuel Stauffer +See https://github.com/samuel/go-zookeeper/blob/master/LICENSE for license details. + +Time series compression algorithm from Facebook's Gorilla paper +https://github.com/dgryski/go-tsz +Copyright (c) 2015,2016 Damian Gryski +See https://github.com/dgryski/go-tsz/blob/master/LICENSE for license details. + +The Go programming language +https://go.dev/ +Copyright (c) 2009 The Go Authors +See https://go.dev/LICENSE for license details. + +The Codicon icon font from Microsoft +https://github.com/microsoft/vscode-codicons +Copyright (c) Microsoft Corporation and other contributors +See https://github.com/microsoft/vscode-codicons/blob/main/LICENSE for license details. + +We also use code from a large number of npm packages. For details, see: +- https://github.com/prometheus/prometheus/blob/main/web/ui/react-app/package.json +- https://github.com/prometheus/prometheus/blob/main/web/ui/react-app/package-lock.json +- The individual package licenses as copied from the node_modules directory can be found in + the npm_licenses.tar.bz2 archive in release tarballs and Docker images. diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/go.mod b/_vendor/github.com/prometheus/prometheus@v0.54.1/go.mod new file mode 100644 index 000000000000..512113db89a4 --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/go.mod @@ -0,0 +1,32 @@ +module github.com/prometheus/prometheus + +go 1.21.0 + +toolchain go1.22.5 + +require ( + github.com/cespare/xxhash/v2 v2.3.0 + github.com/gogo/protobuf v1.3.2 + github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc + github.com/prometheus/common v0.55.0 + golang.org/x/text v0.16.0 +) + +require ( + github.com/prometheus/client_model v0.6.1 // indirect + google.golang.org/protobuf v1.34.2 // indirect +) + +replace ( + k8s.io/klog => github.com/simonpasquier/klog-gokit v0.3.0 + k8s.io/klog/v2 => github.com/simonpasquier/klog-gokit/v3 v3.3.0 +) + +// Exclude linodego v1.0.0 as it is no longer published on github. +exclude github.com/linode/linodego v1.0.0 + +// Exclude grpc v1.30.0 because of breaking changes. See #7621. +exclude ( + github.com/grpc-ecosystem/grpc-gateway v1.14.7 + google.golang.org/api v0.30.0 +) diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/go.sum b/_vendor/github.com/prometheus/prometheus@v0.54.1/go.sum new file mode 100644 index 000000000000..3ba5ea20b92c --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/go.sum @@ -0,0 +1,45 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/exemplar/exemplar.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/exemplar/exemplar.go new file mode 100644 index 000000000000..c9ecf21659a5 --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/exemplar/exemplar.go @@ -0,0 +1,65 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exemplar + +import "github.com/prometheus/prometheus/v2/model/labels" + +// The combined length of the label names and values of an Exemplar's LabelSet MUST NOT exceed 128 UTF-8 characters +// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars +const ExemplarMaxLabelSetLength = 128 + +// Exemplar is additional information associated with a time series. +type Exemplar struct { + Labels labels.Labels `json:"labels"` + Value float64 `json:"value"` + Ts int64 `json:"timestamp"` + HasTs bool +} + +type QueryResult struct { + SeriesLabels labels.Labels `json:"seriesLabels"` + Exemplars []Exemplar `json:"exemplars"` +} + +// Equals compares if the exemplar e is the same as e2. Note that if HasTs is false for +// both exemplars then the timestamps will be ignored for the comparison. This can come up +// when an exemplar is exported without it's own timestamp, in which case the scrape timestamp +// is assigned to the Ts field. However we still want to treat the same exemplar, scraped without +// an exported timestamp, as a duplicate of itself for each subsequent scrape. +func (e Exemplar) Equals(e2 Exemplar) bool { + if !labels.Equal(e.Labels, e2.Labels) { + return false + } + + if (e.HasTs || e2.HasTs) && e.Ts != e2.Ts { + return false + } + + return e.Value == e2.Value +} + +// Sort first by timestamp, then value, then labels. +func Compare(a, b Exemplar) int { + if a.Ts < b.Ts { + return -1 + } else if a.Ts > b.Ts { + return 1 + } + if a.Value < b.Value { + return -1 + } else if a.Value > b.Value { + return 1 + } + return labels.Compare(a.Labels, b.Labels) +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/histogram/float_histogram.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/histogram/float_histogram.go new file mode 100644 index 000000000000..2a37ea66d457 --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/histogram/float_histogram.go @@ -0,0 +1,1340 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package histogram + +import ( + "fmt" + "math" + "strings" +) + +// FloatHistogram is similar to Histogram but uses float64 for all +// counts. Additionally, bucket counts are absolute and not deltas. +// +// A FloatHistogram is needed by PromQL to handle operations that might result +// in fractional counts. Since the counts in a histogram are unlikely to be too +// large to be represented precisely by a float64, a FloatHistogram can also be +// used to represent a histogram with integer counts and thus serves as a more +// generalized representation. +type FloatHistogram struct { + // Counter reset information. + CounterResetHint CounterResetHint + // Currently valid schema numbers are -4 <= n <= 8 for exponential buckets. + // They are all for base-2 bucket schemas, where 1 is a bucket boundary in + // each case, and then each power of two is divided into 2^n logarithmic buckets. + // Or in other words, each bucket boundary is the previous boundary times + // 2^(2^-n). Another valid schema number is -53 for custom buckets, defined by + // the CustomValues field. + Schema int32 + // Width of the zero bucket. + ZeroThreshold float64 + // Observations falling into the zero bucket. Must be zero or positive. + ZeroCount float64 + // Total number of observations. Must be zero or positive. + Count float64 + // Sum of observations. This is also used as the stale marker. + Sum float64 + // Spans for positive and negative buckets (see Span below). + PositiveSpans, NegativeSpans []Span + // Observation counts in buckets. Each represents an absolute count and + // must be zero or positive. + PositiveBuckets, NegativeBuckets []float64 + // Holds the custom (usually upper) bounds for bucket definitions, otherwise nil. + // This slice is interned, to be treated as immutable and copied by reference. + // These numbers should be strictly increasing. This field is only used when the + // schema is for custom buckets, and the ZeroThreshold, ZeroCount, NegativeSpans + // and NegativeBuckets fields are not used in that case. + CustomValues []float64 +} + +func (h *FloatHistogram) UsesCustomBuckets() bool { + return IsCustomBucketsSchema(h.Schema) +} + +// Copy returns a deep copy of the Histogram. +func (h *FloatHistogram) Copy() *FloatHistogram { + c := FloatHistogram{ + CounterResetHint: h.CounterResetHint, + Schema: h.Schema, + Count: h.Count, + Sum: h.Sum, + } + + if h.UsesCustomBuckets() { + if len(h.CustomValues) != 0 { + c.CustomValues = make([]float64, len(h.CustomValues)) + copy(c.CustomValues, h.CustomValues) + } + } else { + c.ZeroThreshold = h.ZeroThreshold + c.ZeroCount = h.ZeroCount + + if len(h.NegativeSpans) != 0 { + c.NegativeSpans = make([]Span, len(h.NegativeSpans)) + copy(c.NegativeSpans, h.NegativeSpans) + } + if len(h.NegativeBuckets) != 0 { + c.NegativeBuckets = make([]float64, len(h.NegativeBuckets)) + copy(c.NegativeBuckets, h.NegativeBuckets) + } + } + + if len(h.PositiveSpans) != 0 { + c.PositiveSpans = make([]Span, len(h.PositiveSpans)) + copy(c.PositiveSpans, h.PositiveSpans) + } + if len(h.PositiveBuckets) != 0 { + c.PositiveBuckets = make([]float64, len(h.PositiveBuckets)) + copy(c.PositiveBuckets, h.PositiveBuckets) + } + + return &c +} + +// CopyTo makes a deep copy into the given FloatHistogram. +// The destination object has to be a non-nil pointer. +func (h *FloatHistogram) CopyTo(to *FloatHistogram) { + to.CounterResetHint = h.CounterResetHint + to.Schema = h.Schema + to.Count = h.Count + to.Sum = h.Sum + + if h.UsesCustomBuckets() { + to.ZeroThreshold = 0 + to.ZeroCount = 0 + + to.NegativeSpans = clearIfNotNil(to.NegativeSpans) + to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets) + + to.CustomValues = resize(to.CustomValues, len(h.CustomValues)) + copy(to.CustomValues, h.CustomValues) + } else { + to.ZeroThreshold = h.ZeroThreshold + to.ZeroCount = h.ZeroCount + + to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans)) + copy(to.NegativeSpans, h.NegativeSpans) + + to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets)) + copy(to.NegativeBuckets, h.NegativeBuckets) + + to.CustomValues = clearIfNotNil(to.CustomValues) + } + + to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans)) + copy(to.PositiveSpans, h.PositiveSpans) + + to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets)) + copy(to.PositiveBuckets, h.PositiveBuckets) +} + +// CopyToSchema works like Copy, but the returned deep copy has the provided +// target schema, which must be ≤ the original schema (i.e. it must have a lower +// resolution). This method panics if a custom buckets schema is used in the +// receiving FloatHistogram or as the provided targetSchema. +func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram { + if targetSchema == h.Schema { + // Fast path. + return h.Copy() + } + if h.UsesCustomBuckets() { + panic(fmt.Errorf("cannot reduce resolution to %d when there are custom buckets", targetSchema)) + } + if IsCustomBucketsSchema(targetSchema) { + panic("cannot reduce resolution to custom buckets schema") + } + if targetSchema > h.Schema { + panic(fmt.Errorf("cannot copy from schema %d to %d", h.Schema, targetSchema)) + } + c := FloatHistogram{ + Schema: targetSchema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: h.ZeroCount, + Count: h.Count, + Sum: h.Sum, + } + + c.PositiveSpans, c.PositiveBuckets = reduceResolution(h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, false, false) + c.NegativeSpans, c.NegativeBuckets = reduceResolution(h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, false, false) + + return &c +} + +// String returns a string representation of the Histogram. +func (h *FloatHistogram) String() string { + var sb strings.Builder + fmt.Fprintf(&sb, "{count:%g, sum:%g", h.Count, h.Sum) + + var nBuckets []Bucket[float64] + for it := h.NegativeBucketIterator(); it.Next(); { + bucket := it.At() + if bucket.Count != 0 { + nBuckets = append(nBuckets, it.At()) + } + } + for i := len(nBuckets) - 1; i >= 0; i-- { + fmt.Fprintf(&sb, ", %s", nBuckets[i].String()) + } + + if h.ZeroCount != 0 { + fmt.Fprintf(&sb, ", %s", h.ZeroBucket().String()) + } + + for it := h.PositiveBucketIterator(); it.Next(); { + bucket := it.At() + if bucket.Count != 0 { + fmt.Fprintf(&sb, ", %s", bucket.String()) + } + } + + sb.WriteRune('}') + return sb.String() +} + +// TestExpression returns the string representation of this histogram as it is used in the internal PromQL testing +// framework as well as in promtool rules unit tests. +// The syntax is described in https://prometheus.io/docs/prometheus/latest/configuration/unit_testing_rules/#series +func (h *FloatHistogram) TestExpression() string { + var res []string + m := h.Copy() + + m.Compact(math.MaxInt) // Compact to reduce the number of positive and negative spans to 1. + + if m.Schema != 0 { + res = append(res, fmt.Sprintf("schema:%d", m.Schema)) + } + if m.Count != 0 { + res = append(res, fmt.Sprintf("count:%g", m.Count)) + } + if m.Sum != 0 { + res = append(res, fmt.Sprintf("sum:%g", m.Sum)) + } + if m.ZeroCount != 0 { + res = append(res, fmt.Sprintf("z_bucket:%g", m.ZeroCount)) + } + if m.ZeroThreshold != 0 { + res = append(res, fmt.Sprintf("z_bucket_w:%g", m.ZeroThreshold)) + } + if m.UsesCustomBuckets() { + res = append(res, fmt.Sprintf("custom_values:%g", m.CustomValues)) + } + + addBuckets := func(kind, bucketsKey, offsetKey string, buckets []float64, spans []Span) []string { + if len(spans) > 1 { + panic(fmt.Sprintf("histogram with multiple %s spans not supported", kind)) + } + for _, span := range spans { + if span.Offset != 0 { + res = append(res, fmt.Sprintf("%s:%d", offsetKey, span.Offset)) + } + } + + var bucketStr []string + for _, bucket := range buckets { + bucketStr = append(bucketStr, fmt.Sprintf("%g", bucket)) + } + if len(bucketStr) > 0 { + res = append(res, fmt.Sprintf("%s:[%s]", bucketsKey, strings.Join(bucketStr, " "))) + } + return res + } + res = addBuckets("positive", "buckets", "offset", m.PositiveBuckets, m.PositiveSpans) + res = addBuckets("negative", "n_buckets", "n_offset", m.NegativeBuckets, m.NegativeSpans) + return "{{" + strings.Join(res, " ") + "}}" +} + +// ZeroBucket returns the zero bucket. This method panics if the schema is for custom buckets. +func (h *FloatHistogram) ZeroBucket() Bucket[float64] { + if h.UsesCustomBuckets() { + panic("histograms with custom buckets have no zero bucket") + } + return Bucket[float64]{ + Lower: -h.ZeroThreshold, + Upper: h.ZeroThreshold, + LowerInclusive: true, + UpperInclusive: true, + Count: h.ZeroCount, + // Index is irrelevant for the zero bucket. + } +} + +// Mul multiplies the FloatHistogram by the provided factor, i.e. it scales all +// bucket counts including the zero bucket and the count and the sum of +// observations. The bucket layout stays the same. This method changes the +// receiving histogram directly (rather than acting on a copy). It returns a +// pointer to the receiving histogram for convenience. +func (h *FloatHistogram) Mul(factor float64) *FloatHistogram { + h.ZeroCount *= factor + h.Count *= factor + h.Sum *= factor + for i := range h.PositiveBuckets { + h.PositiveBuckets[i] *= factor + } + for i := range h.NegativeBuckets { + h.NegativeBuckets[i] *= factor + } + return h +} + +// Div works like Mul but divides instead of multiplies. +// When dividing by 0, everything will be set to Inf. +func (h *FloatHistogram) Div(scalar float64) *FloatHistogram { + h.ZeroCount /= scalar + h.Count /= scalar + h.Sum /= scalar + for i := range h.PositiveBuckets { + h.PositiveBuckets[i] /= scalar + } + for i := range h.NegativeBuckets { + h.NegativeBuckets[i] /= scalar + } + return h +} + +// Add adds the provided other histogram to the receiving histogram. Count, Sum, +// and buckets from the other histogram are added to the corresponding +// components of the receiving histogram. Buckets in the other histogram that do +// not exist in the receiving histogram are inserted into the latter. The +// resulting histogram might have buckets with a population of zero or directly +// adjacent spans (offset=0). To normalize those, call the Compact method. +// +// The method reconciles differences in the zero threshold and in the schema, and +// changes them if needed. The other histogram will not be modified in any case. +// Adding is currently only supported between 2 exponential histograms, or between +// 2 custom buckets histograms with the exact same custom bounds. +// +// This method returns a pointer to the receiving histogram for convenience. +func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) { + if h.UsesCustomBuckets() != other.UsesCustomBuckets() { + return nil, ErrHistogramsIncompatibleSchema + } + if h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, other.CustomValues) { + return nil, ErrHistogramsIncompatibleBounds + } + + switch { + case other.CounterResetHint == h.CounterResetHint: + // Adding apples to apples, all good. No need to change anything. + case h.CounterResetHint == GaugeType: + // Adding something else to a gauge. That's probably OK. Outcome is a gauge. + // Nothing to do since the receiver is already marked as gauge. + case other.CounterResetHint == GaugeType: + // Similar to before, but this time the receiver is "something else" and we have to change it to gauge. + h.CounterResetHint = GaugeType + case h.CounterResetHint == UnknownCounterReset: + // With the receiver's CounterResetHint being "unknown", this could still be legitimate + // if the caller knows what they are doing. Outcome is then again "unknown". + // No need to do anything since the receiver's CounterResetHint is already "unknown". + case other.CounterResetHint == UnknownCounterReset: + // Similar to before, but now we have to set the receiver's CounterResetHint to "unknown". + h.CounterResetHint = UnknownCounterReset + default: + // All other cases shouldn't actually happen. + // They are a direct collision of CounterReset and NotCounterReset. + // Conservatively set the CounterResetHint to "unknown" and isse a warning. + h.CounterResetHint = UnknownCounterReset + // TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place + } + + if !h.UsesCustomBuckets() { + otherZeroCount := h.reconcileZeroBuckets(other) + h.ZeroCount += otherZeroCount + } + h.Count += other.Count + h.Sum += other.Sum + + var ( + hPositiveSpans = h.PositiveSpans + hPositiveBuckets = h.PositiveBuckets + otherPositiveSpans = other.PositiveSpans + otherPositiveBuckets = other.PositiveBuckets + ) + + if h.UsesCustomBuckets() { + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + return h, nil + } + + var ( + hNegativeSpans = h.NegativeSpans + hNegativeBuckets = h.NegativeBuckets + otherNegativeSpans = other.NegativeSpans + otherNegativeBuckets = other.NegativeBuckets + ) + + switch { + case other.Schema < h.Schema: + hPositiveSpans, hPositiveBuckets = reduceResolution(hPositiveSpans, hPositiveBuckets, h.Schema, other.Schema, false, true) + hNegativeSpans, hNegativeBuckets = reduceResolution(hNegativeSpans, hNegativeBuckets, h.Schema, other.Schema, false, true) + h.Schema = other.Schema + + case other.Schema > h.Schema: + otherPositiveSpans, otherPositiveBuckets = reduceResolution(otherPositiveSpans, otherPositiveBuckets, other.Schema, h.Schema, false, false) + otherNegativeSpans, otherNegativeBuckets = reduceResolution(otherNegativeSpans, otherNegativeBuckets, other.Schema, h.Schema, false, false) + } + + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets) + + return h, nil +} + +// Sub works like Add but subtracts the other histogram. +func (h *FloatHistogram) Sub(other *FloatHistogram) (*FloatHistogram, error) { + if h.UsesCustomBuckets() != other.UsesCustomBuckets() { + return nil, ErrHistogramsIncompatibleSchema + } + if h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, other.CustomValues) { + return nil, ErrHistogramsIncompatibleBounds + } + + if !h.UsesCustomBuckets() { + otherZeroCount := h.reconcileZeroBuckets(other) + h.ZeroCount -= otherZeroCount + } + h.Count -= other.Count + h.Sum -= other.Sum + + var ( + hPositiveSpans = h.PositiveSpans + hPositiveBuckets = h.PositiveBuckets + otherPositiveSpans = other.PositiveSpans + otherPositiveBuckets = other.PositiveBuckets + ) + + if h.UsesCustomBuckets() { + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + return h, nil + } + + var ( + hNegativeSpans = h.NegativeSpans + hNegativeBuckets = h.NegativeBuckets + otherNegativeSpans = other.NegativeSpans + otherNegativeBuckets = other.NegativeBuckets + ) + + switch { + case other.Schema < h.Schema: + hPositiveSpans, hPositiveBuckets = reduceResolution(hPositiveSpans, hPositiveBuckets, h.Schema, other.Schema, false, true) + hNegativeSpans, hNegativeBuckets = reduceResolution(hNegativeSpans, hNegativeBuckets, h.Schema, other.Schema, false, true) + h.Schema = other.Schema + case other.Schema > h.Schema: + otherPositiveSpans, otherPositiveBuckets = reduceResolution(otherPositiveSpans, otherPositiveBuckets, other.Schema, h.Schema, false, false) + otherNegativeSpans, otherNegativeBuckets = reduceResolution(otherNegativeSpans, otherNegativeBuckets, other.Schema, h.Schema, false, false) + } + + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets) + + return h, nil +} + +// Equals returns true if the given float histogram matches exactly. +// Exact match is when there are no new buckets (even empty) and no missing buckets, +// and all the bucket values match. Spans can have different empty length spans in between, +// but they must represent the same bucket layout to match. +// Sum, Count, ZeroCount and bucket values are compared based on their bit patterns +// because this method is about data equality rather than mathematical equality. +// We ignore fields that are not used based on the exponential / custom buckets schema, +// but check fields where differences may cause unintended behaviour even if they are not +// supposed to be used according to the schema. +func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool { + if h2 == nil { + return false + } + + if h.Schema != h2.Schema || + math.Float64bits(h.Count) != math.Float64bits(h2.Count) || + math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) { + return false + } + + if h.UsesCustomBuckets() { + if !FloatBucketsMatch(h.CustomValues, h2.CustomValues) { + return false + } + } + + if h.ZeroThreshold != h2.ZeroThreshold || + math.Float64bits(h.ZeroCount) != math.Float64bits(h2.ZeroCount) { + return false + } + + if !spansMatch(h.NegativeSpans, h2.NegativeSpans) { + return false + } + if !FloatBucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) { + return false + } + + if !spansMatch(h.PositiveSpans, h2.PositiveSpans) { + return false + } + if !FloatBucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) { + return false + } + + return true +} + +// Size returns the total size of the FloatHistogram, which includes the size of the pointer +// to FloatHistogram, all its fields, and all elements contained in slices. +// NOTE: this is only valid for 64 bit architectures. +func (h *FloatHistogram) Size() int { + // Size of each slice separately. + posSpanSize := len(h.PositiveSpans) * 8 // 8 bytes (int32 + uint32). + negSpanSize := len(h.NegativeSpans) * 8 // 8 bytes (int32 + uint32). + posBucketSize := len(h.PositiveBuckets) * 8 // 8 bytes (float64). + negBucketSize := len(h.NegativeBuckets) * 8 // 8 bytes (float64). + customBoundSize := len(h.CustomValues) * 8 // 8 bytes (float64). + + // Total size of the struct. + + // fh is 8 bytes. + // fh.CounterResetHint is 4 bytes (1 byte bool + 3 bytes padding). + // fh.Schema is 4 bytes. + // fh.ZeroThreshold is 8 bytes. + // fh.ZeroCount is 8 bytes. + // fh.Count is 8 bytes. + // fh.Sum is 8 bytes. + // fh.PositiveSpans is 24 bytes. + // fh.NegativeSpans is 24 bytes. + // fh.PositiveBuckets is 24 bytes. + // fh.NegativeBuckets is 24 bytes. + // fh.CustomValues is 24 bytes. + structSize := 168 + + return structSize + posSpanSize + negSpanSize + posBucketSize + negBucketSize + customBoundSize +} + +// Compact eliminates empty buckets at the beginning and end of each span, then +// merges spans that are consecutive or at most maxEmptyBuckets apart, and +// finally splits spans that contain more consecutive empty buckets than +// maxEmptyBuckets. (The actual implementation might do something more efficient +// but with the same result.) The compaction happens "in place" in the +// receiving histogram, but a pointer to it is returned for convenience. +// +// The ideal value for maxEmptyBuckets depends on circumstances. The motivation +// to set maxEmptyBuckets > 0 is the assumption that is less overhead to +// represent very few empty buckets explicitly within one span than cutting the +// one span into two to treat the empty buckets as a gap between the two spans, +// both in terms of storage requirement as well as in terms of encoding and +// decoding effort. However, the tradeoffs are subtle. For one, they are +// different in the exposition format vs. in a TSDB chunk vs. for the in-memory +// representation as Go types. In the TSDB, as an additional aspects, the span +// layout is only stored once per chunk, while many histograms with that same +// chunk layout are then only stored with their buckets (so that even a single +// empty bucket will be stored many times). +// +// For the Go types, an additional Span takes 8 bytes. Similarly, an additional +// bucket takes 8 bytes. Therefore, with a single separating empty bucket, both +// options have the same storage requirement, but the single-span solution is +// easier to iterate through. Still, the safest bet is to use maxEmptyBuckets==0 +// and only use a larger number if you know what you are doing. +func (h *FloatHistogram) Compact(maxEmptyBuckets int) *FloatHistogram { + h.PositiveBuckets, h.PositiveSpans = compactBuckets( + h.PositiveBuckets, h.PositiveSpans, maxEmptyBuckets, false, + ) + h.NegativeBuckets, h.NegativeSpans = compactBuckets( + h.NegativeBuckets, h.NegativeSpans, maxEmptyBuckets, false, + ) + return h +} + +// DetectReset returns true if the receiving histogram is missing any buckets +// that have a non-zero population in the provided previous histogram. It also +// returns true if any count (in any bucket, in the zero count, or in the count +// of observations, but NOT the sum of observations) is smaller in the receiving +// histogram compared to the previous histogram. Otherwise, it returns false. +// +// This method will shortcut to true if a CounterReset is detected, and shortcut +// to false if NotCounterReset is detected. Otherwise it will do the work to detect +// a reset. +// +// Special behavior in case the Schema or the ZeroThreshold are not the same in +// both histograms: +// +// - A decrease of the ZeroThreshold or an increase of the Schema (i.e. an +// increase of resolution) can only happen together with a reset. Thus, the +// method returns true in either case. +// +// - Upon an increase of the ZeroThreshold, the buckets in the previous +// histogram that fall within the new ZeroThreshold are added to the ZeroCount +// of the previous histogram (without mutating the provided previous +// histogram). The scenario that a populated bucket of the previous histogram +// is partially within, partially outside of the new ZeroThreshold, can only +// happen together with a counter reset and therefore shortcuts to returning +// true. +// +// - Upon a decrease of the Schema, the buckets of the previous histogram are +// merged so that they match the new, lower-resolution schema (again without +// mutating the provided previous histogram). +func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool { + if h.CounterResetHint == CounterReset { + return true + } + if h.CounterResetHint == NotCounterReset { + return false + } + // In all other cases of CounterResetHint (UnknownCounterReset and GaugeType), + // we go on as we would otherwise, for reasons explained below. + // + // If the CounterResetHint is UnknownCounterReset, we do not know yet if this histogram comes + // with a counter reset. Therefore, we have to do all the detailed work to find out if there + // is a counter reset or not. + // We do the same if the CounterResetHint is GaugeType, which should not happen, but PromQL still + // allows the user to apply functions to gauge histograms that are only meant for counter histograms. + // In this case, we treat the gauge histograms as counter histograms. A warning should be returned + // to the user in this case. + if h.Count < previous.Count { + return true + } + if h.UsesCustomBuckets() != previous.UsesCustomBuckets() || (h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, previous.CustomValues)) { + // Mark that something has changed or that the application has been restarted. However, this does + // not matter so much since the change in schema will be handled directly in the chunks and PromQL + // functions. + return true + } + if h.Schema > previous.Schema { + return true + } + if h.ZeroThreshold < previous.ZeroThreshold { + // ZeroThreshold decreased. + return true + } + previousZeroCount, newThreshold := previous.zeroCountForLargerThreshold(h.ZeroThreshold) + if newThreshold != h.ZeroThreshold { + // ZeroThreshold is within a populated bucket in previous + // histogram. + return true + } + if h.ZeroCount < previousZeroCount { + return true + } + currIt := h.floatBucketIterator(true, h.ZeroThreshold, h.Schema) + prevIt := previous.floatBucketIterator(true, h.ZeroThreshold, h.Schema) + if detectReset(&currIt, &prevIt) { + return true + } + currIt = h.floatBucketIterator(false, h.ZeroThreshold, h.Schema) + prevIt = previous.floatBucketIterator(false, h.ZeroThreshold, h.Schema) + return detectReset(&currIt, &prevIt) +} + +func detectReset(currIt, prevIt *floatBucketIterator) bool { + if !prevIt.Next() { + return false // If no buckets in previous histogram, nothing can be reset. + } + prevBucket := prevIt.strippedAt() + if !currIt.Next() { + // No bucket in current, but at least one in previous + // histogram. Check if any of those are non-zero, in which case + // this is a reset. + for { + if prevBucket.count != 0 { + return true + } + if !prevIt.Next() { + return false + } + } + } + currBucket := currIt.strippedAt() + for { + // Forward currIt until we find the bucket corresponding to prevBucket. + for currBucket.index < prevBucket.index { + if !currIt.Next() { + // Reached end of currIt early, therefore + // previous histogram has a bucket that the + // current one does not have. Unlass all + // remaining buckets in the previous histogram + // are unpopulated, this is a reset. + for { + if prevBucket.count != 0 { + return true + } + if !prevIt.Next() { + return false + } + } + } + currBucket = currIt.strippedAt() + } + if currBucket.index > prevBucket.index { + // Previous histogram has a bucket the current one does + // not have. If it's populated, it's a reset. + if prevBucket.count != 0 { + return true + } + } else { + // We have reached corresponding buckets in both iterators. + // We can finally compare the counts. + if currBucket.count < prevBucket.count { + return true + } + } + if !prevIt.Next() { + // Reached end of prevIt without finding offending buckets. + return false + } + prevBucket = prevIt.strippedAt() + } +} + +// PositiveBucketIterator returns a BucketIterator to iterate over all positive +// buckets in ascending order (starting next to the zero bucket and going up). +func (h *FloatHistogram) PositiveBucketIterator() BucketIterator[float64] { + it := h.floatBucketIterator(true, 0, h.Schema) + return &it +} + +// NegativeBucketIterator returns a BucketIterator to iterate over all negative +// buckets in descending order (starting next to the zero bucket and going +// down). +func (h *FloatHistogram) NegativeBucketIterator() BucketIterator[float64] { + it := h.floatBucketIterator(false, 0, h.Schema) + return &it +} + +// PositiveReverseBucketIterator returns a BucketIterator to iterate over all +// positive buckets in descending order (starting at the highest bucket and +// going down towards the zero bucket). +func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64] { + it := newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues) + return &it +} + +// NegativeReverseBucketIterator returns a BucketIterator to iterate over all +// negative buckets in ascending order (starting at the lowest bucket and going +// up towards the zero bucket). +func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] { + it := newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil) + return &it +} + +// AllBucketIterator returns a BucketIterator to iterate over all negative, +// zero, and positive buckets in ascending order (starting at the lowest bucket +// and going up). If the highest negative bucket or the lowest positive bucket +// overlap with the zero bucket, their upper or lower boundary, respectively, is +// set to the zero threshold. +func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] { + return &allFloatBucketIterator{ + h: h, + leftIter: newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil), + rightIter: h.floatBucketIterator(true, 0, h.Schema), + state: -1, + } +} + +// AllReverseBucketIterator returns a BucketIterator to iterate over all negative, +// zero, and positive buckets in descending order (starting at the lowest bucket +// and going up). If the highest negative bucket or the lowest positive bucket +// overlap with the zero bucket, their upper or lower boundary, respectively, is +// set to the zero threshold. +func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] { + return &allFloatBucketIterator{ + h: h, + leftIter: newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues), + rightIter: h.floatBucketIterator(false, 0, h.Schema), + state: -1, + } +} + +// Validate validates consistency between span and bucket slices. Also, buckets are checked +// against negative values. We check to make sure there are no unexpected fields or field values +// based on the exponential / custom buckets schema. +// We do not check for h.Count being at least as large as the sum of the +// counts in the buckets because floating point precision issues can +// create false positives here. +func (h *FloatHistogram) Validate() error { + var nCount, pCount float64 + if h.UsesCustomBuckets() { + if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return fmt.Errorf("custom buckets: %w", err) + } + if h.ZeroCount != 0 { + return fmt.Errorf("custom buckets: must have zero count of 0") + } + if h.ZeroThreshold != 0 { + return fmt.Errorf("custom buckets: must have zero threshold of 0") + } + if len(h.NegativeSpans) > 0 { + return fmt.Errorf("custom buckets: must not have negative spans") + } + if len(h.NegativeBuckets) > 0 { + return fmt.Errorf("custom buckets: must not have negative buckets") + } + } else { + if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return fmt.Errorf("positive side: %w", err) + } + if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { + return fmt.Errorf("negative side: %w", err) + } + err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false) + if err != nil { + return fmt.Errorf("negative side: %w", err) + } + if h.CustomValues != nil { + return fmt.Errorf("histogram with exponential schema must not have custom bounds") + } + } + err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false) + if err != nil { + return fmt.Errorf("positive side: %w", err) + } + + return nil +} + +// zeroCountForLargerThreshold returns what the histogram's zero count would be +// if the ZeroThreshold had the provided larger (or equal) value. If the +// provided value is less than the histogram's ZeroThreshold, the method panics. +// If the largerThreshold ends up within a populated bucket of the histogram, it +// is adjusted upwards to the lower limit of that bucket (all in terms of +// absolute values) and that bucket's count is included in the returned +// count. The adjusted threshold is returned, too. +func (h *FloatHistogram) zeroCountForLargerThreshold(largerThreshold float64) (count, threshold float64) { + // Fast path. + if largerThreshold == h.ZeroThreshold { + return h.ZeroCount, largerThreshold + } + if largerThreshold < h.ZeroThreshold { + panic(fmt.Errorf("new threshold %f is less than old threshold %f", largerThreshold, h.ZeroThreshold)) + } +outer: + for { + count = h.ZeroCount + i := h.PositiveBucketIterator() + for i.Next() { + b := i.At() + if b.Lower >= largerThreshold { + break + } + count += b.Count // Bucket to be merged into zero bucket. + if b.Upper > largerThreshold { + // New threshold ended up within a bucket. if it's + // populated, we need to adjust largerThreshold before + // we are done here. + if b.Count != 0 { + largerThreshold = b.Upper + } + break + } + } + i = h.NegativeBucketIterator() + for i.Next() { + b := i.At() + if b.Upper <= -largerThreshold { + break + } + count += b.Count // Bucket to be merged into zero bucket. + if b.Lower < -largerThreshold { + // New threshold ended up within a bucket. If + // it's populated, we need to adjust + // largerThreshold and have to redo the whole + // thing because the treatment of the positive + // buckets is invalid now. + if b.Count != 0 { + largerThreshold = -b.Lower + continue outer + } + break + } + } + return count, largerThreshold + } +} + +// trimBucketsInZeroBucket removes all buckets that are within the zero +// bucket. It assumes that the zero threshold is at a bucket boundary and that +// the counts in the buckets to remove are already part of the zero count. +func (h *FloatHistogram) trimBucketsInZeroBucket() { + i := h.PositiveBucketIterator() + bucketsIdx := 0 + for i.Next() { + b := i.At() + if b.Lower >= h.ZeroThreshold { + break + } + h.PositiveBuckets[bucketsIdx] = 0 + bucketsIdx++ + } + i = h.NegativeBucketIterator() + bucketsIdx = 0 + for i.Next() { + b := i.At() + if b.Upper <= -h.ZeroThreshold { + break + } + h.NegativeBuckets[bucketsIdx] = 0 + bucketsIdx++ + } + // We are abusing Compact to trim the buckets set to zero + // above. Premature compacting could cause additional cost, but this + // code path is probably rarely used anyway. + h.Compact(0) +} + +// reconcileZeroBuckets finds a zero bucket large enough to include the zero +// buckets of both histograms (the receiving histogram and the other histogram) +// with a zero threshold that is not within a populated bucket in either +// histogram. This method modifies the receiving histogram accourdingly, but +// leaves the other histogram as is. Instead, it returns the zero count the +// other histogram would have if it were modified. +func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 { + otherZeroCount := other.ZeroCount + otherZeroThreshold := other.ZeroThreshold + + for otherZeroThreshold != h.ZeroThreshold { + if h.ZeroThreshold > otherZeroThreshold { + otherZeroCount, otherZeroThreshold = other.zeroCountForLargerThreshold(h.ZeroThreshold) + } + if otherZeroThreshold > h.ZeroThreshold { + h.ZeroCount, h.ZeroThreshold = h.zeroCountForLargerThreshold(otherZeroThreshold) + h.trimBucketsInZeroBucket() + } + } + return otherZeroCount +} + +// floatBucketIterator is a low-level constructor for bucket iterators. +// +// If positive is true, the returned iterator iterates through the positive +// buckets, otherwise through the negative buckets. +// +// Only for exponential schemas, if absoluteStartValue is < the lowest absolute +// value of any upper bucket boundary, the iterator starts with the first bucket. +// Otherwise, it will skip all buckets with an absolute value of their upper boundary ≤ +// absoluteStartValue. For custom bucket schemas, absoluteStartValue is ignored and +// no buckets are skipped. +// +// targetSchema must be ≤ the schema of FloatHistogram (and of course within the +// legal values for schemas in general). The buckets are merged to match the +// targetSchema prior to iterating (without mutating FloatHistogram), but custom buckets +// schemas cannot be merged with other schemas. +func (h *FloatHistogram) floatBucketIterator( + positive bool, absoluteStartValue float64, targetSchema int32, +) floatBucketIterator { + if h.UsesCustomBuckets() && targetSchema != h.Schema { + panic(fmt.Errorf("cannot merge from custom buckets schema to exponential schema")) + } + if !h.UsesCustomBuckets() && IsCustomBucketsSchema(targetSchema) { + panic(fmt.Errorf("cannot merge from exponential buckets schema to custom schema")) + } + if targetSchema > h.Schema { + panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema)) + } + i := floatBucketIterator{ + baseBucketIterator: baseBucketIterator[float64, float64]{ + schema: h.Schema, + positive: positive, + }, + targetSchema: targetSchema, + absoluteStartValue: absoluteStartValue, + boundReachedStartValue: absoluteStartValue == 0, + } + if positive { + i.spans = h.PositiveSpans + i.buckets = h.PositiveBuckets + i.customValues = h.CustomValues + } else { + i.spans = h.NegativeSpans + i.buckets = h.NegativeBuckets + } + return i +} + +// reverseFloatBucketIterator is a low-level constructor for reverse bucket iterators. +func newReverseFloatBucketIterator( + spans []Span, buckets []float64, schema int32, positive bool, customValues []float64, +) reverseFloatBucketIterator { + r := reverseFloatBucketIterator{ + baseBucketIterator: baseBucketIterator[float64, float64]{ + schema: schema, + spans: spans, + buckets: buckets, + positive: positive, + customValues: customValues, + }, + } + + r.spansIdx = len(r.spans) - 1 + r.bucketsIdx = len(r.buckets) - 1 + if r.spansIdx >= 0 { + r.idxInSpan = int32(r.spans[r.spansIdx].Length) - 1 + } + r.currIdx = 0 + for _, s := range r.spans { + r.currIdx += s.Offset + int32(s.Length) + } + + return r +} + +type floatBucketIterator struct { + baseBucketIterator[float64, float64] + + targetSchema int32 // targetSchema is the schema to merge to and must be ≤ schema. + origIdx int32 // The bucket index within the original schema. + absoluteStartValue float64 // Never return buckets with an upper bound ≤ this value. + + boundReachedStartValue bool // Has getBound reached absoluteStartValue already? +} + +func (i *floatBucketIterator) At() Bucket[float64] { + // Need to use i.targetSchema rather than i.baseBucketIterator.schema. + return i.baseBucketIterator.at(i.targetSchema) +} + +func (i *floatBucketIterator) Next() bool { + if i.spansIdx >= len(i.spans) { + return false + } + + if i.schema == i.targetSchema { + // Fast path for the common case. + span := i.spans[i.spansIdx] + if i.bucketsIdx == 0 { + // Seed origIdx for the first bucket. + i.currIdx = span.Offset + } else { + i.currIdx++ + } + + for i.idxInSpan >= span.Length { + // We have exhausted the current span and have to find a new + // one. We even handle pathologic spans of length 0 here. + i.idxInSpan = 0 + i.spansIdx++ + if i.spansIdx >= len(i.spans) { + return false + } + span = i.spans[i.spansIdx] + i.currIdx += span.Offset + } + + i.currCount = i.buckets[i.bucketsIdx] + i.idxInSpan++ + i.bucketsIdx++ + } else { + // Copy all of these into local variables so that we can forward to the + // next bucket and then roll back if needed. + origIdx, spansIdx, idxInSpan := i.origIdx, i.spansIdx, i.idxInSpan + span := i.spans[spansIdx] + firstPass := true + i.currCount = 0 + + mergeLoop: // Merge together all buckets from the original schema that fall into one bucket in the targetSchema. + for { + if i.bucketsIdx == 0 { + // Seed origIdx for the first bucket. + origIdx = span.Offset + } else { + origIdx++ + } + for idxInSpan >= span.Length { + // We have exhausted the current span and have to find a new + // one. We even handle pathologic spans of length 0 here. + idxInSpan = 0 + spansIdx++ + if spansIdx >= len(i.spans) { + if firstPass { + return false + } + break mergeLoop + } + span = i.spans[spansIdx] + origIdx += span.Offset + } + currIdx := targetIdx(origIdx, i.schema, i.targetSchema) + switch { + case firstPass: + i.currIdx = currIdx + firstPass = false + case currIdx != i.currIdx: + // Reached next bucket in targetSchema. + // Do not actually forward to the next bucket, but break out. + break mergeLoop + } + i.currCount += i.buckets[i.bucketsIdx] + idxInSpan++ + i.bucketsIdx++ + i.origIdx, i.spansIdx, i.idxInSpan = origIdx, spansIdx, idxInSpan + if i.schema == i.targetSchema { + // Don't need to test the next bucket for mergeability + // if we have no schema change anyway. + break mergeLoop + } + } + } + + // Skip buckets before absoluteStartValue for exponential schemas. + // TODO(beorn7): Maybe do something more efficient than this recursive call. + if !i.boundReachedStartValue && IsExponentialSchema(i.targetSchema) && getBoundExponential(i.currIdx, i.targetSchema) <= i.absoluteStartValue { + return i.Next() + } + i.boundReachedStartValue = true + return true +} + +type reverseFloatBucketIterator struct { + baseBucketIterator[float64, float64] + idxInSpan int32 // Changed from uint32 to allow negative values for exhaustion detection. +} + +func (i *reverseFloatBucketIterator) Next() bool { + i.currIdx-- + if i.bucketsIdx < 0 { + return false + } + + for i.idxInSpan < 0 { + // We have exhausted the current span and have to find a new + // one. We'll even handle pathologic spans of length 0. + i.spansIdx-- + i.idxInSpan = int32(i.spans[i.spansIdx].Length) - 1 + i.currIdx -= i.spans[i.spansIdx+1].Offset + } + + i.currCount = i.buckets[i.bucketsIdx] + i.bucketsIdx-- + i.idxInSpan-- + return true +} + +type allFloatBucketIterator struct { + h *FloatHistogram + leftIter reverseFloatBucketIterator + rightIter floatBucketIterator + // -1 means we are iterating negative buckets. + // 0 means it is time for the zero bucket. + // 1 means we are iterating positive buckets. + // Anything else means iteration is over. + state int8 + currBucket Bucket[float64] +} + +func (i *allFloatBucketIterator) Next() bool { + switch i.state { + case -1: + if i.leftIter.Next() { + i.currBucket = i.leftIter.At() + switch { + case i.currBucket.Upper < 0 && i.currBucket.Upper > -i.h.ZeroThreshold: + i.currBucket.Upper = -i.h.ZeroThreshold + case i.currBucket.Lower > 0 && i.currBucket.Lower < i.h.ZeroThreshold: + i.currBucket.Lower = i.h.ZeroThreshold + } + return true + } + i.state = 0 + return i.Next() + case 0: + i.state = 1 + if i.h.ZeroCount > 0 { + i.currBucket = i.h.ZeroBucket() + return true + } + return i.Next() + case 1: + if i.rightIter.Next() { + i.currBucket = i.rightIter.At() + switch { + case i.currBucket.Lower > 0 && i.currBucket.Lower < i.h.ZeroThreshold: + i.currBucket.Lower = i.h.ZeroThreshold + case i.currBucket.Upper < 0 && i.currBucket.Upper > -i.h.ZeroThreshold: + i.currBucket.Upper = -i.h.ZeroThreshold + } + return true + } + i.state = 42 + return false + } + + return false +} + +func (i *allFloatBucketIterator) At() Bucket[float64] { + return i.currBucket +} + +// targetIdx returns the bucket index in the target schema for the given bucket +// index idx in the original schema. +func targetIdx(idx, originSchema, targetSchema int32) int32 { + return ((idx - 1) >> (originSchema - targetSchema)) + 1 +} + +// addBuckets adds the buckets described by spansB/bucketsB to the buckets described by spansA/bucketsA, +// creating missing buckets in spansA/bucketsA as needed. +// It returns the resulting spans/buckets (which must be used instead of the original spansA/bucketsA, +// although spansA/bucketsA might get modified by this function). +// All buckets must use the same provided schema. +// Buckets in spansB/bucketsB with an absolute upper limit ≤ threshold are ignored. +// If negative is true, the buckets in spansB/bucketsB are subtracted rather than added. +func addBuckets( + schema int32, threshold float64, negative bool, + spansA []Span, bucketsA []float64, + spansB []Span, bucketsB []float64, +) ([]Span, []float64) { + var ( + iSpan = -1 + iBucket = -1 + iInSpan int32 + indexA int32 + indexB int32 + bIdxB int + bucketB float64 + deltaIndex int32 + lowerThanThreshold = true + ) + + for _, spanB := range spansB { + indexB += spanB.Offset + for j := 0; j < int(spanB.Length); j++ { + if lowerThanThreshold && IsExponentialSchema(schema) && getBoundExponential(indexB, schema) <= threshold { + goto nextLoop + } + lowerThanThreshold = false + + bucketB = bucketsB[bIdxB] + if negative { + bucketB *= -1 + } + + if iSpan == -1 { + if len(spansA) == 0 || spansA[0].Offset > indexB { + // Add bucket before all others. + bucketsA = append(bucketsA, 0) + copy(bucketsA[1:], bucketsA) + bucketsA[0] = bucketB + if len(spansA) > 0 && spansA[0].Offset == indexB+1 { + spansA[0].Length++ + spansA[0].Offset-- + goto nextLoop + } + spansA = append(spansA, Span{}) + copy(spansA[1:], spansA) + spansA[0] = Span{Offset: indexB, Length: 1} + if len(spansA) > 1 { + // Convert the absolute offset in the formerly + // first span to a relative offset. + spansA[1].Offset -= indexB + 1 + } + goto nextLoop + } else if spansA[0].Offset == indexB { + // Just add to first bucket. + bucketsA[0] += bucketB + goto nextLoop + } + iSpan, iBucket, iInSpan = 0, 0, 0 + indexA = spansA[0].Offset + } + deltaIndex = indexB - indexA + for { + remainingInSpan := int32(spansA[iSpan].Length) - iInSpan + if deltaIndex < remainingInSpan { + // Bucket is in current span. + iBucket += int(deltaIndex) + iInSpan += deltaIndex + bucketsA[iBucket] += bucketB + break + } + deltaIndex -= remainingInSpan + iBucket += int(remainingInSpan) + iSpan++ + if iSpan == len(spansA) || deltaIndex < spansA[iSpan].Offset { + // Bucket is in gap behind previous span (or there are no further spans). + bucketsA = append(bucketsA, 0) + copy(bucketsA[iBucket+1:], bucketsA[iBucket:]) + bucketsA[iBucket] = bucketB + switch { + case deltaIndex == 0: + // Directly after previous span, extend previous span. + if iSpan < len(spansA) { + spansA[iSpan].Offset-- + } + iSpan-- + iInSpan = int32(spansA[iSpan].Length) + spansA[iSpan].Length++ + goto nextLoop + case iSpan < len(spansA) && deltaIndex == spansA[iSpan].Offset-1: + // Directly before next span, extend next span. + iInSpan = 0 + spansA[iSpan].Offset-- + spansA[iSpan].Length++ + goto nextLoop + default: + // No next span, or next span is not directly adjacent to new bucket. + // Add new span. + iInSpan = 0 + if iSpan < len(spansA) { + spansA[iSpan].Offset -= deltaIndex + 1 + } + spansA = append(spansA, Span{}) + copy(spansA[iSpan+1:], spansA[iSpan:]) + spansA[iSpan] = Span{Length: 1, Offset: deltaIndex} + goto nextLoop + } + } else { + // Try start of next span. + deltaIndex -= spansA[iSpan].Offset + iInSpan = 0 + } + } + + nextLoop: + indexA = indexB + indexB++ + bIdxB++ + } + } + + return spansA, bucketsA +} + +func FloatBucketsMatch(b1, b2 []float64) bool { + if len(b1) != len(b2) { + return false + } + for i, b := range b1 { + if math.Float64bits(b) != math.Float64bits(b2[i]) { + return false + } + } + return true +} + +// ReduceResolution reduces the float histogram's spans, buckets into target schema. +// The target schema must be smaller than the current float histogram's schema. +// This will panic if the histogram has custom buckets or if the target schema is +// a custom buckets schema. +func (h *FloatHistogram) ReduceResolution(targetSchema int32) *FloatHistogram { + if h.UsesCustomBuckets() { + panic("cannot reduce resolution when there are custom buckets") + } + if IsCustomBucketsSchema(targetSchema) { + panic("cannot reduce resolution to custom buckets schema") + } + if targetSchema >= h.Schema { + panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema)) + } + + h.PositiveSpans, h.PositiveBuckets = reduceResolution(h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, false, true) + h.NegativeSpans, h.NegativeBuckets = reduceResolution(h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, false, true) + + h.Schema = targetSchema + return h +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/histogram/generic.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/histogram/generic.go new file mode 100644 index 000000000000..a36b58d0696c --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/histogram/generic.go @@ -0,0 +1,786 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package histogram + +import ( + "errors" + "fmt" + "math" + "strings" +) + +const ( + ExponentialSchemaMax int32 = 8 + ExponentialSchemaMin int32 = -4 + CustomBucketsSchema int32 = -53 +) + +var ( + ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets") + ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)") + ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative") + ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative") + ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided") + ErrHistogramCustomBucketsMismatch = errors.New("histogram custom bounds are too few") + ErrHistogramCustomBucketsInvalid = errors.New("histogram custom bounds must be in strictly increasing order") + ErrHistogramCustomBucketsInfinite = errors.New("histogram custom bounds must be finite") + ErrHistogramsIncompatibleSchema = errors.New("cannot apply this operation on histograms with a mix of exponential and custom bucket schemas") + ErrHistogramsIncompatibleBounds = errors.New("cannot apply this operation on custom buckets histograms with different custom bounds") +) + +func IsCustomBucketsSchema(s int32) bool { + return s == CustomBucketsSchema +} + +func IsExponentialSchema(s int32) bool { + return s >= ExponentialSchemaMin && s <= ExponentialSchemaMax +} + +// BucketCount is a type constraint for the count in a bucket, which can be +// float64 (for type FloatHistogram) or uint64 (for type Histogram). +type BucketCount interface { + float64 | uint64 +} + +// InternalBucketCount is used internally by Histogram and FloatHistogram. The +// difference to the BucketCount above is that Histogram internally uses deltas +// between buckets rather than absolute counts (while FloatHistogram uses +// absolute counts directly). Go type parameters don't allow type +// specialization. Therefore, where special treatment of deltas between buckets +// vs. absolute counts is important, this information has to be provided as a +// separate boolean parameter "deltaBuckets". +type InternalBucketCount interface { + float64 | int64 +} + +// Bucket represents a bucket with lower and upper limit and the absolute count +// of samples in the bucket. It also specifies if each limit is inclusive or +// not. (Mathematically, inclusive limits create a closed interval, and +// non-inclusive limits an open interval.) +// +// To represent cumulative buckets, Lower is set to -Inf, and the Count is then +// cumulative (including the counts of all buckets for smaller values). +type Bucket[BC BucketCount] struct { + Lower, Upper float64 + LowerInclusive, UpperInclusive bool + Count BC + + // Index within schema. To easily compare buckets that share the same + // schema and sign (positive or negative). Irrelevant for the zero bucket. + Index int32 +} + +// strippedBucket is Bucket without bound values (which are expensive to calculate +// and not used in certain use cases). +type strippedBucket[BC BucketCount] struct { + count BC + index int32 +} + +// String returns a string representation of a Bucket, using the usual +// mathematical notation of '['/']' for inclusive bounds and '('/')' for +// non-inclusive bounds. +func (b Bucket[BC]) String() string { + var sb strings.Builder + if b.LowerInclusive { + sb.WriteRune('[') + } else { + sb.WriteRune('(') + } + fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper) + if b.UpperInclusive { + sb.WriteRune(']') + } else { + sb.WriteRune(')') + } + fmt.Fprintf(&sb, ":%v", b.Count) + return sb.String() +} + +// BucketIterator iterates over the buckets of a Histogram, returning decoded +// buckets. +type BucketIterator[BC BucketCount] interface { + // Next advances the iterator by one. + Next() bool + // At returns the current bucket. + At() Bucket[BC] +} + +// baseBucketIterator provides a struct that is shared by most BucketIterator +// implementations, together with an implementation of the At method. This +// iterator can be embedded in full implementations of BucketIterator to save on +// code replication. +type baseBucketIterator[BC BucketCount, IBC InternalBucketCount] struct { + schema int32 + spans []Span + buckets []IBC + + positive bool // Whether this is for positive buckets. + + spansIdx int // Current span within spans slice. + idxInSpan uint32 // Index in the current span. 0 <= idxInSpan < span.Length. + bucketsIdx int // Current bucket within buckets slice. + + currCount IBC // Count in the current bucket. + currIdx int32 // The actual bucket index. + + customValues []float64 // Bounds (usually upper) for histograms with custom buckets. +} + +func (b *baseBucketIterator[BC, IBC]) At() Bucket[BC] { + return b.at(b.schema) +} + +// at is an internal version of the exported At to enable using a different schema. +func (b *baseBucketIterator[BC, IBC]) at(schema int32) Bucket[BC] { + bucket := Bucket[BC]{ + Count: BC(b.currCount), + Index: b.currIdx, + } + if b.positive { + bucket.Upper = getBound(b.currIdx, schema, b.customValues) + bucket.Lower = getBound(b.currIdx-1, schema, b.customValues) + } else { + bucket.Lower = -getBound(b.currIdx, schema, b.customValues) + bucket.Upper = -getBound(b.currIdx-1, schema, b.customValues) + } + if IsCustomBucketsSchema(schema) { + bucket.LowerInclusive = b.currIdx == 0 + bucket.UpperInclusive = true + } else { + bucket.LowerInclusive = bucket.Lower < 0 + bucket.UpperInclusive = bucket.Upper > 0 + } + return bucket +} + +// strippedAt returns current strippedBucket (which lacks bucket bounds but is cheaper to compute). +func (b *baseBucketIterator[BC, IBC]) strippedAt() strippedBucket[BC] { + return strippedBucket[BC]{ + count: BC(b.currCount), + index: b.currIdx, + } +} + +// compactBuckets is a generic function used by both Histogram.Compact and +// FloatHistogram.Compact. Set deltaBuckets to true if the provided buckets are +// deltas. Set it to false if the buckets contain absolute counts. +func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmptyBuckets int, deltaBuckets bool) ([]IBC, []Span) { + // Fast path: If there are no empty buckets AND no offset in any span is + // <= maxEmptyBuckets AND no span has length 0, there is nothing to do and we can return + // immediately. We check that first because it's cheap and presumably + // common. + nothingToDo := true + var currentBucketAbsolute IBC + for _, bucket := range buckets { + if deltaBuckets { + currentBucketAbsolute += bucket + } else { + currentBucketAbsolute = bucket + } + if currentBucketAbsolute == 0 { + nothingToDo = false + break + } + } + if nothingToDo { + for _, span := range spans { + if int(span.Offset) <= maxEmptyBuckets || span.Length == 0 { + nothingToDo = false + break + } + } + if nothingToDo { + return buckets, spans + } + } + + var iBucket, iSpan int + var posInSpan uint32 + currentBucketAbsolute = 0 + + // Helper function. + emptyBucketsHere := func() int { + i := 0 + abs := currentBucketAbsolute + for uint32(i)+posInSpan < spans[iSpan].Length && abs == 0 { + i++ + if i+iBucket >= len(buckets) { + break + } + abs = buckets[i+iBucket] + } + return i + } + + // Merge spans with zero-offset to avoid special cases later. + if len(spans) > 1 { + for i, span := range spans[1:] { + if span.Offset == 0 { + spans[iSpan].Length += span.Length + continue + } + iSpan++ + if i+1 != iSpan { + spans[iSpan] = span + } + } + spans = spans[:iSpan+1] + iSpan = 0 + } + + // Merge spans with zero-length to avoid special cases later. + for i, span := range spans { + if span.Length == 0 { + if i+1 < len(spans) { + spans[i+1].Offset += span.Offset + } + continue + } + if i != iSpan { + spans[iSpan] = span + } + iSpan++ + } + spans = spans[:iSpan] + iSpan = 0 + + // Cut out empty buckets from start and end of spans, no matter + // what. Also cut out empty buckets from the middle of a span but only + // if there are more than maxEmptyBuckets consecutive empty buckets. + for iBucket < len(buckets) { + if deltaBuckets { + currentBucketAbsolute += buckets[iBucket] + } else { + currentBucketAbsolute = buckets[iBucket] + } + if nEmpty := emptyBucketsHere(); nEmpty > 0 { + if posInSpan > 0 && + nEmpty < int(spans[iSpan].Length-posInSpan) && + nEmpty <= maxEmptyBuckets { + // The empty buckets are in the middle of a + // span, and there are few enough to not bother. + // Just fast-forward. + iBucket += nEmpty + if deltaBuckets { + currentBucketAbsolute = 0 + } + posInSpan += uint32(nEmpty) + continue + } + // In all other cases, we cut out the empty buckets. + if deltaBuckets && iBucket+nEmpty < len(buckets) { + currentBucketAbsolute = -buckets[iBucket] + buckets[iBucket+nEmpty] += buckets[iBucket] + } + buckets = append(buckets[:iBucket], buckets[iBucket+nEmpty:]...) + if posInSpan == 0 { + // Start of span. + if nEmpty == int(spans[iSpan].Length) { + // The whole span is empty. + offset := spans[iSpan].Offset + spans = append(spans[:iSpan], spans[iSpan+1:]...) + if len(spans) > iSpan { + spans[iSpan].Offset += offset + int32(nEmpty) + } + continue + } + spans[iSpan].Length -= uint32(nEmpty) + spans[iSpan].Offset += int32(nEmpty) + continue + } + // It's in the middle or in the end of the span. + // Split the current span. + newSpan := Span{ + Offset: int32(nEmpty), + Length: spans[iSpan].Length - posInSpan - uint32(nEmpty), + } + spans[iSpan].Length = posInSpan + // In any case, we have to split to the next span. + iSpan++ + posInSpan = 0 + if newSpan.Length == 0 { + // The span is empty, so we were already at the end of a span. + // We don't have to insert the new span, just adjust the next + // span's offset, if there is one. + if iSpan < len(spans) { + spans[iSpan].Offset += int32(nEmpty) + } + continue + } + // Insert the new span. + spans = append(spans, Span{}) + if iSpan+1 < len(spans) { + copy(spans[iSpan+1:], spans[iSpan:]) + } + spans[iSpan] = newSpan + continue + } + iBucket++ + posInSpan++ + if posInSpan >= spans[iSpan].Length { + posInSpan = 0 + iSpan++ + } + } + if maxEmptyBuckets == 0 || len(buckets) == 0 { + return buckets, spans + } + + // Finally, check if any offsets between spans are small enough to merge + // the spans. + iBucket = int(spans[0].Length) + if deltaBuckets { + currentBucketAbsolute = 0 + for _, bucket := range buckets[:iBucket] { + currentBucketAbsolute += bucket + } + } + iSpan = 1 + for iSpan < len(spans) { + if int(spans[iSpan].Offset) > maxEmptyBuckets { + l := int(spans[iSpan].Length) + if deltaBuckets { + for _, bucket := range buckets[iBucket : iBucket+l] { + currentBucketAbsolute += bucket + } + } + iBucket += l + iSpan++ + continue + } + // Merge span with previous one and insert empty buckets. + offset := int(spans[iSpan].Offset) + spans[iSpan-1].Length += uint32(offset) + spans[iSpan].Length + spans = append(spans[:iSpan], spans[iSpan+1:]...) + newBuckets := make([]IBC, len(buckets)+offset) + copy(newBuckets, buckets[:iBucket]) + copy(newBuckets[iBucket+offset:], buckets[iBucket:]) + if deltaBuckets { + newBuckets[iBucket] = -currentBucketAbsolute + newBuckets[iBucket+offset] += currentBucketAbsolute + } + iBucket += offset + buckets = newBuckets + currentBucketAbsolute = buckets[iBucket] + // Note that with many merges, it would be more efficient to + // first record all the chunks of empty buckets to insert and + // then do it in one go through all the buckets. + } + + return buckets, spans +} + +func checkHistogramSpans(spans []Span, numBuckets int) error { + var spanBuckets int + for n, span := range spans { + if n > 0 && span.Offset < 0 { + return fmt.Errorf("span number %d with offset %d: %w", n+1, span.Offset, ErrHistogramSpanNegativeOffset) + } + spanBuckets += int(span.Length) + } + if spanBuckets != numBuckets { + return fmt.Errorf("spans need %d buckets, have %d buckets: %w", spanBuckets, numBuckets, ErrHistogramSpansBucketsMismatch) + } + return nil +} + +func checkHistogramBuckets[BC BucketCount, IBC InternalBucketCount](buckets []IBC, count *BC, deltas bool) error { + if len(buckets) == 0 { + return nil + } + + var last IBC + for i := 0; i < len(buckets); i++ { + var c IBC + if deltas { + c = last + buckets[i] + } else { + c = buckets[i] + } + if c < 0 { + return fmt.Errorf("bucket number %d has observation count of %v: %w", i+1, c, ErrHistogramNegativeBucketCount) + } + last = c + *count += BC(c) + } + + return nil +} + +func checkHistogramCustomBounds(bounds []float64, spans []Span, numBuckets int) error { + prev := math.Inf(-1) + for _, curr := range bounds { + if curr <= prev { + return fmt.Errorf("previous bound is %f and current is %f: %w", prev, curr, ErrHistogramCustomBucketsInvalid) + } + prev = curr + } + if prev == math.Inf(1) { + return fmt.Errorf("last +Inf bound must not be explicitly defined: %w", ErrHistogramCustomBucketsInfinite) + } + + var spanBuckets int + var totalSpanLength int + for n, span := range spans { + if span.Offset < 0 { + return fmt.Errorf("span number %d with offset %d: %w", n+1, span.Offset, ErrHistogramSpanNegativeOffset) + } + spanBuckets += int(span.Length) + totalSpanLength += int(span.Length) + int(span.Offset) + } + if spanBuckets != numBuckets { + return fmt.Errorf("spans need %d buckets, have %d buckets: %w", spanBuckets, numBuckets, ErrHistogramSpansBucketsMismatch) + } + if (len(bounds) + 1) < totalSpanLength { + return fmt.Errorf("only %d custom bounds defined which is insufficient to cover total span length of %d: %w", len(bounds), totalSpanLength, ErrHistogramCustomBucketsMismatch) + } + + return nil +} + +func getBound(idx, schema int32, customValues []float64) float64 { + if IsCustomBucketsSchema(schema) { + length := int32(len(customValues)) + switch { + case idx > length || idx < -1: + panic(fmt.Errorf("index %d out of bounds for custom bounds of length %d", idx, length)) + case idx == length: + return math.Inf(1) + case idx == -1: + return math.Inf(-1) + default: + return customValues[idx] + } + } + return getBoundExponential(idx, schema) +} + +func getBoundExponential(idx, schema int32) float64 { + // Here a bit of context about the behavior for the last bucket counting + // regular numbers (called simply "last bucket" below) and the bucket + // counting observations of ±Inf (called "inf bucket" below, with an idx + // one higher than that of the "last bucket"): + // + // If we apply the usual formula to the last bucket, its upper bound + // would be calculated as +Inf. The reason is that the max possible + // regular float64 number (math.MaxFloat64) doesn't coincide with one of + // the calculated bucket boundaries. So the calculated boundary has to + // be larger than math.MaxFloat64, and the only float64 larger than + // math.MaxFloat64 is +Inf. However, we want to count actual + // observations of ±Inf in the inf bucket. Therefore, we have to treat + // the upper bound of the last bucket specially and set it to + // math.MaxFloat64. (The upper bound of the inf bucket, with its idx + // being one higher than that of the last bucket, naturally comes out as + // +Inf by the usual formula. So that's fine.) + // + // math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of + // 1024. If there were a float64 number following math.MaxFloat64, it + // would have a frac of 1.0 and an exp of 1024, or equivalently a frac + // of 0.5 and an exp of 1025. However, since frac must be smaller than + // 1, and exp must be smaller than 1025, either representation overflows + // a float64. (Which, in turn, is the reason that math.MaxFloat64 is the + // largest possible float64. Q.E.D.) However, the formula for + // calculating the upper bound from the idx and schema of the last + // bucket results in precisely that. It is either frac=1.0 & exp=1024 + // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is, + // by the way, a power of two where the exponent itself is a power of + // two, 2¹⁰ in fact, which coincides with a bucket boundary in all + // schemas.) So these are the special cases we have to catch below. + if schema < 0 { + exp := int(idx) << -schema + if exp == 1024 { + // This is the last bucket before the overflow bucket + // (for ±Inf observations). Return math.MaxFloat64 as + // explained above. + return math.MaxFloat64 + } + return math.Ldexp(1, exp) + } + + fracIdx := idx & ((1 << schema) - 1) + frac := exponentialBounds[schema][fracIdx] + exp := (int(idx) >> schema) + 1 + if frac == 0.5 && exp == 1025 { + // This is the last bucket before the overflow bucket (for ±Inf + // observations). Return math.MaxFloat64 as explained above. + return math.MaxFloat64 + } + return math.Ldexp(frac, exp) +} + +// exponentialBounds is a precalculated table of bucket bounds in the interval +// [0.5,1) in schema 0 to 8. +var exponentialBounds = [][]float64{ + // Schema "0": + {0.5}, + // Schema 1: + {0.5, 0.7071067811865475}, + // Schema 2: + {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144}, + // Schema 3: + { + 0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048, + 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711, + }, + // Schema 4: + { + 0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458, + 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463, + 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627, + 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735, + }, + // Schema 5: + { + 0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117, + 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887, + 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666, + 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159, + 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112, + 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823, + 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533, + 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999, + }, + // Schema 6: + { + 0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142, + 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598, + 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209, + 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406, + 0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349, + 0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891, + 0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515, + 0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555, + 0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234, + 0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269, + 0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334, + 0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681, + 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529, + 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991, + 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827, + 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752, + }, + // Schema 7: + { + 0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764, + 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894, + 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309, + 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545, + 0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393, + 0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595, + 0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754, + 0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704, + 0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907, + 0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665, + 0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253, + 0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329, + 0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032, + 0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728, + 0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265, + 0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076, + 0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491, + 0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908, + 0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126, + 0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777, + 0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764, + 0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465, + 0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821, + 0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981, + 0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312, + 0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842, + 0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671, + 0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263, + 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943, + 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368, + 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164, + 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328, + }, + // Schema 8: + { + 0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088, + 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869, + 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205, + 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158, + 0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313, + 0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321, + 0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954, + 0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847, + 0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111, + 0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088, + 0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098, + 0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026, + 0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894, + 0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493, + 0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185, + 0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968, + 0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903, + 0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005, + 0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725, + 0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082, + 0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581, + 0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031, + 0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346, + 0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447, + 0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385, + 0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788, + 0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727, + 0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171, + 0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058, + 0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119, + 0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999, + 0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352, + 0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471, + 0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126, + 0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218, + 0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837, + 0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984, + 0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031, + 0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071, + 0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282, + 0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442, + 0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707, + 0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818, + 0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853, + 0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642, + 0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003, + 0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079, + 0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391, + 0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661, + 0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629, + 0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553, + 0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389, + 0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771, + 0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002, + 0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155, + 0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483, + 0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253, + 0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191, + 0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693, + 0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947, + 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133, + 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889, + 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168, + 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698, + }, +} + +// reduceResolution reduces the input spans, buckets in origin schema to the spans, buckets in target schema. +// The target schema must be smaller than the original schema. +// Set deltaBuckets to true if the provided buckets are +// deltas. Set it to false if the buckets contain absolute counts. +// Set inplace to true to reuse input slices and avoid allocations (otherwise +// new slices will be allocated for result). +func reduceResolution[IBC InternalBucketCount]( + originSpans []Span, + originBuckets []IBC, + originSchema, + targetSchema int32, + deltaBuckets bool, + inplace bool, +) ([]Span, []IBC) { + var ( + targetSpans []Span // The spans in the target schema. + targetBuckets []IBC // The bucket counts in the target schema. + bucketIdx int32 // The index of bucket in the origin schema. + bucketCountIdx int // The position of a bucket in origin bucket count slice `originBuckets`. + targetBucketIdx int32 // The index of bucket in the target schema. + lastBucketCount IBC // The last visited bucket's count in the origin schema. + lastTargetBucketIdx int32 // The index of the last added target bucket. + lastTargetBucketCount IBC + ) + + if inplace { + // Slice reuse is safe because when reducing the resolution, + // target slices don't grow faster than origin slices are being read. + targetSpans = originSpans[:0] + targetBuckets = originBuckets[:0] + } + + for _, span := range originSpans { + // Determine the index of the first bucket in this span. + bucketIdx += span.Offset + for j := 0; j < int(span.Length); j++ { + // Determine the index of the bucket in the target schema from the index in the original schema. + targetBucketIdx = targetIdx(bucketIdx, originSchema, targetSchema) + + switch { + case len(targetSpans) == 0: + // This is the first span in the targetSpans. + span := Span{ + Offset: targetBucketIdx, + Length: 1, + } + targetSpans = append(targetSpans, span) + targetBuckets = append(targetBuckets, originBuckets[bucketCountIdx]) + lastTargetBucketIdx = targetBucketIdx + lastBucketCount = originBuckets[bucketCountIdx] + lastTargetBucketCount = originBuckets[bucketCountIdx] + + case lastTargetBucketIdx == targetBucketIdx: + // The current bucket has to be merged into the same target bucket as the previous bucket. + if deltaBuckets { + lastBucketCount += originBuckets[bucketCountIdx] + targetBuckets[len(targetBuckets)-1] += lastBucketCount + lastTargetBucketCount += lastBucketCount + } else { + targetBuckets[len(targetBuckets)-1] += originBuckets[bucketCountIdx] + } + + case (lastTargetBucketIdx + 1) == targetBucketIdx: + // The current bucket has to go into a new target bucket, + // and that bucket is next to the previous target bucket, + // so we add it to the current target span. + targetSpans[len(targetSpans)-1].Length++ + lastTargetBucketIdx++ + if deltaBuckets { + lastBucketCount += originBuckets[bucketCountIdx] + targetBuckets = append(targetBuckets, lastBucketCount-lastTargetBucketCount) + lastTargetBucketCount = lastBucketCount + } else { + targetBuckets = append(targetBuckets, originBuckets[bucketCountIdx]) + } + + case (lastTargetBucketIdx + 1) < targetBucketIdx: + // The current bucket has to go into a new target bucket, + // and that bucket is separated by a gap from the previous target bucket, + // so we need to add a new target span. + span := Span{ + Offset: targetBucketIdx - lastTargetBucketIdx - 1, + Length: 1, + } + targetSpans = append(targetSpans, span) + lastTargetBucketIdx = targetBucketIdx + if deltaBuckets { + lastBucketCount += originBuckets[bucketCountIdx] + targetBuckets = append(targetBuckets, lastBucketCount-lastTargetBucketCount) + lastTargetBucketCount = lastBucketCount + } else { + targetBuckets = append(targetBuckets, originBuckets[bucketCountIdx]) + } + } + + bucketIdx++ + bucketCountIdx++ + } + } + + return targetSpans, targetBuckets +} + +func clearIfNotNil[T any](items []T) []T { + if items == nil { + return nil + } + return items[:0] +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/histogram/histogram.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/histogram/histogram.go new file mode 100644 index 000000000000..e4b99ec420ae --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/histogram/histogram.go @@ -0,0 +1,632 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package histogram + +import ( + "fmt" + "math" + "slices" + "strings" +) + +// CounterResetHint contains the known information about a counter reset, +// or alternatively that we are dealing with a gauge histogram, where counter resets do not apply. +type CounterResetHint byte + +const ( + UnknownCounterReset CounterResetHint = iota // UnknownCounterReset means we cannot say if this histogram signals a counter reset or not. + CounterReset // CounterReset means there was definitely a counter reset starting from this histogram. + NotCounterReset // NotCounterReset means there was definitely no counter reset with this histogram. + GaugeType // GaugeType means this is a gauge histogram, where counter resets do not happen. +) + +// Histogram encodes a sparse, high-resolution histogram. See the design +// document for full details: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit# +// +// The most tricky bit is how bucket indices represent real bucket boundaries. +// An example for schema 0 (by which each bucket is twice as wide as the +// previous bucket): +// +// Bucket boundaries → [-2,-1) [-1,-0.5) [-0.5,-0.25) ... [-0.001,0.001] ... (0.25,0.5] (0.5,1] (1,2] .... +// ↑ ↑ ↑ ↑ ↑ ↑ ↑ +// Zero bucket (width e.g. 0.001) → | | | ZB | | | +// Positive bucket indices → | | | ... -1 0 1 2 3 +// Negative bucket indices → 3 2 1 0 -1 ... +// +// Which bucket indices are actually used is determined by the spans. +type Histogram struct { + // Counter reset information. + CounterResetHint CounterResetHint + // Currently valid schema numbers are -4 <= n <= 8 for exponential buckets, + // They are all for base-2 bucket schemas, where 1 is a bucket boundary in + // each case, and then each power of two is divided into 2^n logarithmic buckets. + // Or in other words, each bucket boundary is the previous boundary times + // 2^(2^-n). Another valid schema number is -53 for custom buckets, defined by + // the CustomValues field. + Schema int32 + // Width of the zero bucket. + ZeroThreshold float64 + // Observations falling into the zero bucket. + ZeroCount uint64 + // Total number of observations. + Count uint64 + // Sum of observations. This is also used as the stale marker. + Sum float64 + // Spans for positive and negative buckets (see Span below). + PositiveSpans, NegativeSpans []Span + // Observation counts in buckets. The first element is an absolute + // count. All following ones are deltas relative to the previous + // element. + PositiveBuckets, NegativeBuckets []int64 + // Holds the custom (usually upper) bounds for bucket definitions, otherwise nil. + // This slice is interned, to be treated as immutable and copied by reference. + // These numbers should be strictly increasing. This field is only used when the + // schema is for custom buckets, and the ZeroThreshold, ZeroCount, NegativeSpans + // and NegativeBuckets fields are not used in that case. + CustomValues []float64 +} + +// A Span defines a continuous sequence of buckets. +type Span struct { + // Gap to previous span (always positive), or starting index for the 1st + // span (which can be negative). + Offset int32 + // Length of the span. + Length uint32 +} + +func (h *Histogram) UsesCustomBuckets() bool { + return IsCustomBucketsSchema(h.Schema) +} + +// Copy returns a deep copy of the Histogram. +func (h *Histogram) Copy() *Histogram { + c := Histogram{ + CounterResetHint: h.CounterResetHint, + Schema: h.Schema, + Count: h.Count, + Sum: h.Sum, + } + + if h.UsesCustomBuckets() { + if len(h.CustomValues) != 0 { + c.CustomValues = make([]float64, len(h.CustomValues)) + copy(c.CustomValues, h.CustomValues) + } + } else { + c.ZeroThreshold = h.ZeroThreshold + c.ZeroCount = h.ZeroCount + + if len(h.NegativeSpans) != 0 { + c.NegativeSpans = make([]Span, len(h.NegativeSpans)) + copy(c.NegativeSpans, h.NegativeSpans) + } + if len(h.NegativeBuckets) != 0 { + c.NegativeBuckets = make([]int64, len(h.NegativeBuckets)) + copy(c.NegativeBuckets, h.NegativeBuckets) + } + } + + if len(h.PositiveSpans) != 0 { + c.PositiveSpans = make([]Span, len(h.PositiveSpans)) + copy(c.PositiveSpans, h.PositiveSpans) + } + if len(h.PositiveBuckets) != 0 { + c.PositiveBuckets = make([]int64, len(h.PositiveBuckets)) + copy(c.PositiveBuckets, h.PositiveBuckets) + } + + return &c +} + +// CopyTo makes a deep copy into the given Histogram object. +// The destination object has to be a non-nil pointer. +func (h *Histogram) CopyTo(to *Histogram) { + to.CounterResetHint = h.CounterResetHint + to.Schema = h.Schema + to.Count = h.Count + to.Sum = h.Sum + + if h.UsesCustomBuckets() { + to.ZeroThreshold = 0 + to.ZeroCount = 0 + + to.NegativeSpans = clearIfNotNil(to.NegativeSpans) + to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets) + + to.CustomValues = resize(to.CustomValues, len(h.CustomValues)) + copy(to.CustomValues, h.CustomValues) + } else { + to.ZeroThreshold = h.ZeroThreshold + to.ZeroCount = h.ZeroCount + + to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans)) + copy(to.NegativeSpans, h.NegativeSpans) + + to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets)) + copy(to.NegativeBuckets, h.NegativeBuckets) + + to.CustomValues = clearIfNotNil(to.CustomValues) + } + + to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans)) + copy(to.PositiveSpans, h.PositiveSpans) + + to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets)) + copy(to.PositiveBuckets, h.PositiveBuckets) +} + +// String returns a string representation of the Histogram. +func (h *Histogram) String() string { + var sb strings.Builder + fmt.Fprintf(&sb, "{count:%d, sum:%g", h.Count, h.Sum) + + var nBuckets []Bucket[uint64] + for it := h.NegativeBucketIterator(); it.Next(); { + bucket := it.At() + if bucket.Count != 0 { + nBuckets = append(nBuckets, it.At()) + } + } + for i := len(nBuckets) - 1; i >= 0; i-- { + fmt.Fprintf(&sb, ", %s", nBuckets[i].String()) + } + + if h.ZeroCount != 0 { + fmt.Fprintf(&sb, ", %s", h.ZeroBucket().String()) + } + + for it := h.PositiveBucketIterator(); it.Next(); { + bucket := it.At() + if bucket.Count != 0 { + fmt.Fprintf(&sb, ", %s", bucket.String()) + } + } + + sb.WriteRune('}') + return sb.String() +} + +// ZeroBucket returns the zero bucket. This method panics if the schema is for custom buckets. +func (h *Histogram) ZeroBucket() Bucket[uint64] { + if h.UsesCustomBuckets() { + panic("histograms with custom buckets have no zero bucket") + } + return Bucket[uint64]{ + Lower: -h.ZeroThreshold, + Upper: h.ZeroThreshold, + LowerInclusive: true, + UpperInclusive: true, + Count: h.ZeroCount, + } +} + +// PositiveBucketIterator returns a BucketIterator to iterate over all positive +// buckets in ascending order (starting next to the zero bucket and going up). +func (h *Histogram) PositiveBucketIterator() BucketIterator[uint64] { + it := newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues) + return &it +} + +// NegativeBucketIterator returns a BucketIterator to iterate over all negative +// buckets in descending order (starting next to the zero bucket and going down). +func (h *Histogram) NegativeBucketIterator() BucketIterator[uint64] { + it := newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil) + return &it +} + +// CumulativeBucketIterator returns a BucketIterator to iterate over a +// cumulative view of the buckets. This method currently only supports +// Histograms without negative buckets and panics if the Histogram has negative +// buckets. It is currently only used for testing. +func (h *Histogram) CumulativeBucketIterator() BucketIterator[uint64] { + if len(h.NegativeBuckets) > 0 { + panic("CumulativeBucketIterator called on Histogram with negative buckets") + } + return &cumulativeBucketIterator{h: h, posSpansIdx: -1} +} + +// Equals returns true if the given histogram matches exactly. +// Exact match is when there are no new buckets (even empty) and no missing buckets, +// and all the bucket values match. Spans can have different empty length spans in between, +// but they must represent the same bucket layout to match. +// Sum is compared based on its bit pattern because this method +// is about data equality rather than mathematical equality. +// We ignore fields that are not used based on the exponential / custom buckets schema, +// but check fields where differences may cause unintended behaviour even if they are not +// supposed to be used according to the schema. +func (h *Histogram) Equals(h2 *Histogram) bool { + if h2 == nil { + return false + } + + if h.Schema != h2.Schema || h.Count != h2.Count || + math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) { + return false + } + + if h.UsesCustomBuckets() { + if !FloatBucketsMatch(h.CustomValues, h2.CustomValues) { + return false + } + } + + if h.ZeroThreshold != h2.ZeroThreshold || h.ZeroCount != h2.ZeroCount { + return false + } + + if !spansMatch(h.NegativeSpans, h2.NegativeSpans) { + return false + } + if !slices.Equal(h.NegativeBuckets, h2.NegativeBuckets) { + return false + } + + if !spansMatch(h.PositiveSpans, h2.PositiveSpans) { + return false + } + if !slices.Equal(h.PositiveBuckets, h2.PositiveBuckets) { + return false + } + + return true +} + +// spansMatch returns true if both spans represent the same bucket layout +// after combining zero length spans with the next non-zero length span. +func spansMatch(s1, s2 []Span) bool { + if len(s1) == 0 && len(s2) == 0 { + return true + } + + s1idx, s2idx := 0, 0 + for { + if s1idx >= len(s1) { + return allEmptySpans(s2[s2idx:]) + } + if s2idx >= len(s2) { + return allEmptySpans(s1[s1idx:]) + } + + currS1, currS2 := s1[s1idx], s2[s2idx] + s1idx++ + s2idx++ + if currS1.Length == 0 { + // This span is zero length, so we add consecutive such spans + // until we find a non-zero span. + for ; s1idx < len(s1) && s1[s1idx].Length == 0; s1idx++ { + currS1.Offset += s1[s1idx].Offset + } + if s1idx < len(s1) { + currS1.Offset += s1[s1idx].Offset + currS1.Length = s1[s1idx].Length + s1idx++ + } + } + if currS2.Length == 0 { + // This span is zero length, so we add consecutive such spans + // until we find a non-zero span. + for ; s2idx < len(s2) && s2[s2idx].Length == 0; s2idx++ { + currS2.Offset += s2[s2idx].Offset + } + if s2idx < len(s2) { + currS2.Offset += s2[s2idx].Offset + currS2.Length = s2[s2idx].Length + s2idx++ + } + } + + if currS1.Length == 0 && currS2.Length == 0 { + // The last spans of both set are zero length. Previous spans match. + return true + } + + if currS1.Offset != currS2.Offset || currS1.Length != currS2.Length { + return false + } + } +} + +func allEmptySpans(s []Span) bool { + for _, ss := range s { + if ss.Length > 0 { + return false + } + } + return true +} + +// Compact works like FloatHistogram.Compact. See there for detailed +// explanations. +func (h *Histogram) Compact(maxEmptyBuckets int) *Histogram { + h.PositiveBuckets, h.PositiveSpans = compactBuckets( + h.PositiveBuckets, h.PositiveSpans, maxEmptyBuckets, true, + ) + h.NegativeBuckets, h.NegativeSpans = compactBuckets( + h.NegativeBuckets, h.NegativeSpans, maxEmptyBuckets, true, + ) + return h +} + +// ToFloat returns a FloatHistogram representation of the Histogram. It is a deep +// copy (e.g. spans are not shared). The function accepts a FloatHistogram as an +// argument whose memory will be reused and overwritten if provided. If this +// argument is nil, a new FloatHistogram will be allocated. +func (h *Histogram) ToFloat(fh *FloatHistogram) *FloatHistogram { + if fh == nil { + fh = &FloatHistogram{} + } + fh.CounterResetHint = h.CounterResetHint + fh.Schema = h.Schema + fh.Count = float64(h.Count) + fh.Sum = h.Sum + + if h.UsesCustomBuckets() { + fh.ZeroThreshold = 0 + fh.ZeroCount = 0 + fh.NegativeSpans = clearIfNotNil(fh.NegativeSpans) + fh.NegativeBuckets = clearIfNotNil(fh.NegativeBuckets) + + fh.CustomValues = resize(fh.CustomValues, len(h.CustomValues)) + copy(fh.CustomValues, h.CustomValues) + } else { + fh.ZeroThreshold = h.ZeroThreshold + fh.ZeroCount = float64(h.ZeroCount) + + fh.NegativeSpans = resize(fh.NegativeSpans, len(h.NegativeSpans)) + copy(fh.NegativeSpans, h.NegativeSpans) + + fh.NegativeBuckets = resize(fh.NegativeBuckets, len(h.NegativeBuckets)) + var currentNegative float64 + for i, b := range h.NegativeBuckets { + currentNegative += float64(b) + fh.NegativeBuckets[i] = currentNegative + } + fh.CustomValues = clearIfNotNil(fh.CustomValues) + } + + fh.PositiveSpans = resize(fh.PositiveSpans, len(h.PositiveSpans)) + copy(fh.PositiveSpans, h.PositiveSpans) + + fh.PositiveBuckets = resize(fh.PositiveBuckets, len(h.PositiveBuckets)) + var currentPositive float64 + for i, b := range h.PositiveBuckets { + currentPositive += float64(b) + fh.PositiveBuckets[i] = currentPositive + } + + return fh +} + +func resize[T any](items []T, n int) []T { + if cap(items) < n { + return make([]T, n) + } + return items[:n] +} + +// Validate validates consistency between span and bucket slices. Also, buckets are checked +// against negative values. We check to make sure there are no unexpected fields or field values +// based on the exponential / custom buckets schema. +// For histograms that have not observed any NaN values (based on IsNaN(h.Sum) check), a +// strict h.Count = nCount + pCount + h.ZeroCount check is performed. +// Otherwise, only a lower bound check will be done (h.Count >= nCount + pCount + h.ZeroCount), +// because NaN observations do not increment the values of buckets (but they do increment +// the total h.Count). +func (h *Histogram) Validate() error { + var nCount, pCount uint64 + if h.UsesCustomBuckets() { + if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return fmt.Errorf("custom buckets: %w", err) + } + if h.ZeroCount != 0 { + return fmt.Errorf("custom buckets: must have zero count of 0") + } + if h.ZeroThreshold != 0 { + return fmt.Errorf("custom buckets: must have zero threshold of 0") + } + if len(h.NegativeSpans) > 0 { + return fmt.Errorf("custom buckets: must not have negative spans") + } + if len(h.NegativeBuckets) > 0 { + return fmt.Errorf("custom buckets: must not have negative buckets") + } + } else { + if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return fmt.Errorf("positive side: %w", err) + } + if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { + return fmt.Errorf("negative side: %w", err) + } + err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true) + if err != nil { + return fmt.Errorf("negative side: %w", err) + } + if h.CustomValues != nil { + return fmt.Errorf("histogram with exponential schema must not have custom bounds") + } + } + err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true) + if err != nil { + return fmt.Errorf("positive side: %w", err) + } + + sumOfBuckets := nCount + pCount + h.ZeroCount + if math.IsNaN(h.Sum) { + if sumOfBuckets > h.Count { + return fmt.Errorf("%d observations found in buckets, but the Count field is %d: %w", sumOfBuckets, h.Count, ErrHistogramCountNotBigEnough) + } + } else { + if sumOfBuckets != h.Count { + return fmt.Errorf("%d observations found in buckets, but the Count field is %d: %w", sumOfBuckets, h.Count, ErrHistogramCountMismatch) + } + } + + return nil +} + +type regularBucketIterator struct { + baseBucketIterator[uint64, int64] +} + +func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool, customValues []float64) regularBucketIterator { + i := baseBucketIterator[uint64, int64]{ + schema: schema, + spans: spans, + buckets: buckets, + positive: positive, + customValues: customValues, + } + return regularBucketIterator{i} +} + +func (r *regularBucketIterator) Next() bool { + if r.spansIdx >= len(r.spans) { + return false + } + span := r.spans[r.spansIdx] + // Seed currIdx for the first bucket. + if r.bucketsIdx == 0 { + r.currIdx = span.Offset + } else { + r.currIdx++ + } + for r.idxInSpan >= span.Length { + // We have exhausted the current span and have to find a new + // one. We'll even handle pathologic spans of length 0. + r.idxInSpan = 0 + r.spansIdx++ + if r.spansIdx >= len(r.spans) { + return false + } + span = r.spans[r.spansIdx] + r.currIdx += span.Offset + } + + r.currCount += r.buckets[r.bucketsIdx] + r.idxInSpan++ + r.bucketsIdx++ + return true +} + +type cumulativeBucketIterator struct { + h *Histogram + + posSpansIdx int // Index in h.PositiveSpans we are in. -1 means 0 bucket. + posBucketsIdx int // Index in h.PositiveBuckets. + idxInSpan uint32 // Index in the current span. 0 <= idxInSpan < span.Length. + + initialized bool + currIdx int32 // The actual bucket index after decoding from spans. + currUpper float64 // The upper boundary of the current bucket. + currCount int64 // Current non-cumulative count for the current bucket. Does not apply for empty bucket. + currCumulativeCount uint64 // Current "cumulative" count for the current bucket. + + // Between 2 spans there could be some empty buckets which + // still needs to be counted for cumulative buckets. + // When we hit the end of a span, we use this to iterate + // through the empty buckets. + emptyBucketCount int32 +} + +func (c *cumulativeBucketIterator) Next() bool { + if c.posSpansIdx == -1 { + // Zero bucket. + c.posSpansIdx++ + if c.h.ZeroCount == 0 { + return c.Next() + } + + c.currUpper = c.h.ZeroThreshold + c.currCount = int64(c.h.ZeroCount) + c.currCumulativeCount = uint64(c.currCount) + return true + } + + if c.posSpansIdx >= len(c.h.PositiveSpans) { + return false + } + + if c.emptyBucketCount > 0 { + // We are traversing through empty buckets at the moment. + c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomValues) + c.currIdx++ + c.emptyBucketCount-- + return true + } + + span := c.h.PositiveSpans[c.posSpansIdx] + if c.posSpansIdx == 0 && !c.initialized { + // Initializing. + c.currIdx = span.Offset + // The first bucket is an absolute value and not a delta with Zero bucket. + c.currCount = 0 + c.initialized = true + } + + c.currCount += c.h.PositiveBuckets[c.posBucketsIdx] + c.currCumulativeCount += uint64(c.currCount) + c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomValues) + + c.posBucketsIdx++ + c.idxInSpan++ + c.currIdx++ + if c.idxInSpan >= span.Length { + // Move to the next span. This one is done. + c.posSpansIdx++ + c.idxInSpan = 0 + if c.posSpansIdx < len(c.h.PositiveSpans) { + c.emptyBucketCount = c.h.PositiveSpans[c.posSpansIdx].Offset + } + } + + return true +} + +func (c *cumulativeBucketIterator) At() Bucket[uint64] { + return Bucket[uint64]{ + Upper: c.currUpper, + Lower: math.Inf(-1), + UpperInclusive: true, + LowerInclusive: true, + Count: c.currCumulativeCount, + Index: c.currIdx - 1, + } +} + +// ReduceResolution reduces the histogram's spans, buckets into target schema. +// The target schema must be smaller than the current histogram's schema. +// This will panic if the histogram has custom buckets or if the target schema is +// a custom buckets schema. +func (h *Histogram) ReduceResolution(targetSchema int32) *Histogram { + if h.UsesCustomBuckets() { + panic("cannot reduce resolution when there are custom buckets") + } + if IsCustomBucketsSchema(targetSchema) { + panic("cannot reduce resolution to custom buckets schema") + } + if targetSchema >= h.Schema { + panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema)) + } + + h.PositiveSpans, h.PositiveBuckets = reduceResolution( + h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, true, true, + ) + h.NegativeSpans, h.NegativeBuckets = reduceResolution( + h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, true, true, + ) + h.Schema = targetSchema + return h +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/histogram/test_utils.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/histogram/test_utils.go new file mode 100644 index 000000000000..9e9a711c29ab --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/histogram/test_utils.go @@ -0,0 +1,52 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package histogram + +// GenerateBigTestHistograms generates a slice of histograms with given number of buckets each. +func GenerateBigTestHistograms(numHistograms, numBuckets int) []*Histogram { + numSpans := numBuckets / 10 + bucketsPerSide := numBuckets / 2 + spanLength := uint32(bucketsPerSide / numSpans) + // Given all bucket deltas are 1, sum bucketsPerSide + 1. + observationCount := bucketsPerSide * (1 + bucketsPerSide) + + var histograms []*Histogram + for i := 0; i < numHistograms; i++ { + h := &Histogram{ + Count: uint64(i + observationCount), + ZeroCount: uint64(i), + ZeroThreshold: 1e-128, + Sum: 18.4 * float64(i+1), + Schema: 2, + NegativeSpans: make([]Span, numSpans), + PositiveSpans: make([]Span, numSpans), + NegativeBuckets: make([]int64, bucketsPerSide), + PositiveBuckets: make([]int64, bucketsPerSide), + } + + for j := 0; j < numSpans; j++ { + s := Span{Offset: 1, Length: spanLength} + h.NegativeSpans[j] = s + h.PositiveSpans[j] = s + } + + for j := 0; j < bucketsPerSide; j++ { + h.NegativeBuckets[j] = 1 + h.PositiveBuckets[j] = 1 + } + + histograms = append(histograms, h) + } + return histograms +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/labels.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/labels.go new file mode 100644 index 000000000000..01514abf3858 --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/labels.go @@ -0,0 +1,489 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !stringlabels && !dedupelabels + +package labels + +import ( + "bytes" + "slices" + "strings" + + "github.com/cespare/xxhash/v2" +) + +// Labels is a sorted set of labels. Order has to be guaranteed upon +// instantiation. +type Labels []Label + +func (ls Labels) Len() int { return len(ls) } +func (ls Labels) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] } +func (ls Labels) Less(i, j int) bool { return ls[i].Name < ls[j].Name } + +// Bytes returns ls as a byte slice. +// It uses an byte invalid character as a separator and so should not be used for printing. +func (ls Labels) Bytes(buf []byte) []byte { + b := bytes.NewBuffer(buf[:0]) + b.WriteByte(labelSep) + for i, l := range ls { + if i > 0 { + b.WriteByte(seps[0]) + } + b.WriteString(l.Name) + b.WriteByte(seps[0]) + b.WriteString(l.Value) + } + return b.Bytes() +} + +// MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean. +// If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false. +func (ls Labels) MatchLabels(on bool, names ...string) Labels { + matchedLabels := Labels{} + + nameSet := make(map[string]struct{}, len(names)) + for _, n := range names { + nameSet[n] = struct{}{} + } + + for _, v := range ls { + if _, ok := nameSet[v.Name]; on == ok && (on || v.Name != MetricName) { + matchedLabels = append(matchedLabels, v) + } + } + + return matchedLabels +} + +// Hash returns a hash value for the label set. +// Note: the result is not guaranteed to be consistent across different runs of Prometheus. +func (ls Labels) Hash() uint64 { + // Use xxhash.Sum64(b) for fast path as it's faster. + b := make([]byte, 0, 1024) + for i, v := range ls { + if len(b)+len(v.Name)+len(v.Value)+2 >= cap(b) { + // If labels entry is 1KB+ do not allocate whole entry. + h := xxhash.New() + _, _ = h.Write(b) + for _, v := range ls[i:] { + _, _ = h.WriteString(v.Name) + _, _ = h.Write(seps) + _, _ = h.WriteString(v.Value) + _, _ = h.Write(seps) + } + return h.Sum64() + } + + b = append(b, v.Name...) + b = append(b, seps[0]) + b = append(b, v.Value...) + b = append(b, seps[0]) + } + return xxhash.Sum64(b) +} + +// HashForLabels returns a hash value for the labels matching the provided names. +// 'names' have to be sorted in ascending order. +func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { + b = b[:0] + i, j := 0, 0 + for i < len(ls) && j < len(names) { + switch { + case names[j] < ls[i].Name: + j++ + case ls[i].Name < names[j]: + i++ + default: + b = append(b, ls[i].Name...) + b = append(b, seps[0]) + b = append(b, ls[i].Value...) + b = append(b, seps[0]) + i++ + j++ + } + } + return xxhash.Sum64(b), b +} + +// HashWithoutLabels returns a hash value for all labels except those matching +// the provided names. +// 'names' have to be sorted in ascending order. +func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { + b = b[:0] + j := 0 + for i := range ls { + for j < len(names) && names[j] < ls[i].Name { + j++ + } + if ls[i].Name == MetricName || (j < len(names) && ls[i].Name == names[j]) { + continue + } + b = append(b, ls[i].Name...) + b = append(b, seps[0]) + b = append(b, ls[i].Value...) + b = append(b, seps[0]) + } + return xxhash.Sum64(b), b +} + +// BytesWithLabels is just as Bytes(), but only for labels matching names. +// 'names' have to be sorted in ascending order. +func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { + b := bytes.NewBuffer(buf[:0]) + b.WriteByte(labelSep) + i, j := 0, 0 + for i < len(ls) && j < len(names) { + switch { + case names[j] < ls[i].Name: + j++ + case ls[i].Name < names[j]: + i++ + default: + if b.Len() > 1 { + b.WriteByte(seps[0]) + } + b.WriteString(ls[i].Name) + b.WriteByte(seps[0]) + b.WriteString(ls[i].Value) + i++ + j++ + } + } + return b.Bytes() +} + +// BytesWithoutLabels is just as Bytes(), but only for labels not matching names. +// 'names' have to be sorted in ascending order. +func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { + b := bytes.NewBuffer(buf[:0]) + b.WriteByte(labelSep) + j := 0 + for i := range ls { + for j < len(names) && names[j] < ls[i].Name { + j++ + } + if j < len(names) && ls[i].Name == names[j] { + continue + } + if b.Len() > 1 { + b.WriteByte(seps[0]) + } + b.WriteString(ls[i].Name) + b.WriteByte(seps[0]) + b.WriteString(ls[i].Value) + } + return b.Bytes() +} + +// Copy returns a copy of the labels. +func (ls Labels) Copy() Labels { + res := make(Labels, len(ls)) + copy(res, ls) + return res +} + +// Get returns the value for the label with the given name. +// Returns an empty string if the label doesn't exist. +func (ls Labels) Get(name string) string { + for _, l := range ls { + if l.Name == name { + return l.Value + } + } + return "" +} + +// Has returns true if the label with the given name is present. +func (ls Labels) Has(name string) bool { + for _, l := range ls { + if l.Name == name { + return true + } + } + return false +} + +// HasDuplicateLabelNames returns whether ls has duplicate label names. +// It assumes that the labelset is sorted. +func (ls Labels) HasDuplicateLabelNames() (string, bool) { + for i, l := range ls { + if i == 0 { + continue + } + if l.Name == ls[i-1].Name { + return l.Name, true + } + } + return "", false +} + +// WithoutEmpty returns the labelset without empty labels. +// May return the same labelset. +func (ls Labels) WithoutEmpty() Labels { + for _, v := range ls { + if v.Value != "" { + continue + } + // Do not copy the slice until it's necessary. + els := make(Labels, 0, len(ls)-1) + for _, v := range ls { + if v.Value != "" { + els = append(els, v) + } + } + return els + } + return ls +} + +// Equal returns whether the two label sets are equal. +func Equal(ls, o Labels) bool { + if len(ls) != len(o) { + return false + } + for i, l := range ls { + if l != o[i] { + return false + } + } + return true +} + +// EmptyLabels returns n empty Labels value, for convenience. +func EmptyLabels() Labels { + return Labels{} +} + +// New returns a sorted Labels from the given labels. +// The caller has to guarantee that all label names are unique. +func New(ls ...Label) Labels { + set := make(Labels, 0, len(ls)) + set = append(set, ls...) + slices.SortFunc(set, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) + + return set +} + +// FromStrings creates new labels from pairs of strings. +func FromStrings(ss ...string) Labels { + if len(ss)%2 != 0 { + panic("invalid number of strings") + } + res := make(Labels, 0, len(ss)/2) + for i := 0; i < len(ss); i += 2 { + res = append(res, Label{Name: ss[i], Value: ss[i+1]}) + } + + slices.SortFunc(res, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) + return res +} + +// Compare compares the two label sets. +// The result will be 0 if a==b, <0 if a < b, and >0 if a > b. +func Compare(a, b Labels) int { + l := len(a) + if len(b) < l { + l = len(b) + } + + for i := 0; i < l; i++ { + if a[i].Name != b[i].Name { + if a[i].Name < b[i].Name { + return -1 + } + return 1 + } + if a[i].Value != b[i].Value { + if a[i].Value < b[i].Value { + return -1 + } + return 1 + } + } + // If all labels so far were in common, the set with fewer labels comes first. + return len(a) - len(b) +} + +// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. +func (ls *Labels) CopyFrom(b Labels) { + (*ls) = append((*ls)[:0], b...) +} + +// IsEmpty returns true if ls represents an empty set of labels. +func (ls Labels) IsEmpty() bool { + return len(ls) == 0 +} + +// Range calls f on each label. +func (ls Labels) Range(f func(l Label)) { + for _, l := range ls { + f(l) + } +} + +// Validate calls f on each label. If f returns a non-nil error, then it returns that error cancelling the iteration. +func (ls Labels) Validate(f func(l Label) error) error { + for _, l := range ls { + if err := f(l); err != nil { + return err + } + } + return nil +} + +// DropMetricName returns Labels with "__name__" removed. +func (ls Labels) DropMetricName() Labels { + for i, l := range ls { + if l.Name == MetricName { + if i == 0 { // Make common case fast with no allocations. + return ls[1:] + } + // Avoid modifying original Labels - use [:i:i] so that left slice would not + // have any spare capacity and append would have to allocate a new slice for the result. + return append(ls[:i:i], ls[i+1:]...) + } + } + return ls +} + +// InternStrings calls intern on every string value inside ls, replacing them with what it returns. +func (ls *Labels) InternStrings(intern func(string) string) { + for i, l := range *ls { + (*ls)[i].Name = intern(l.Name) + (*ls)[i].Value = intern(l.Value) + } +} + +// ReleaseStrings calls release on every string value inside ls. +func (ls Labels) ReleaseStrings(release func(string)) { + for _, l := range ls { + release(l.Name) + release(l.Value) + } +} + +// Builder allows modifying Labels. +type Builder struct { + base Labels + del []string + add []Label +} + +// Reset clears all current state for the builder. +func (b *Builder) Reset(base Labels) { + b.base = base + b.del = b.del[:0] + b.add = b.add[:0] + b.base.Range(func(l Label) { + if l.Value == "" { + b.del = append(b.del, l.Name) + } + }) +} + +// Labels returns the labels from the builder. +// If no modifications were made, the original labels are returned. +func (b *Builder) Labels() Labels { + if len(b.del) == 0 && len(b.add) == 0 { + return b.base + } + + expectedSize := len(b.base) + len(b.add) - len(b.del) + if expectedSize < 1 { + expectedSize = 1 + } + res := make(Labels, 0, expectedSize) + for _, l := range b.base { + if slices.Contains(b.del, l.Name) || contains(b.add, l.Name) { + continue + } + res = append(res, l) + } + if len(b.add) > 0 { // Base is already in order, so we only need to sort if we add to it. + res = append(res, b.add...) + slices.SortFunc(res, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) + } + return res +} + +// ScratchBuilder allows efficient construction of a Labels from scratch. +type ScratchBuilder struct { + add Labels +} + +// Symbol-table is no-op, just for api parity with dedupelabels. +type SymbolTable struct{} + +func NewSymbolTable() *SymbolTable { return nil } + +func (t *SymbolTable) Len() int { return 0 } + +// NewScratchBuilder creates a ScratchBuilder initialized for Labels with n entries. +func NewScratchBuilder(n int) ScratchBuilder { + return ScratchBuilder{add: make([]Label, 0, n)} +} + +// NewBuilderWithSymbolTable creates a Builder, for api parity with dedupelabels. +func NewBuilderWithSymbolTable(_ *SymbolTable) *Builder { + return NewBuilder(EmptyLabels()) +} + +// NewScratchBuilderWithSymbolTable creates a ScratchBuilder, for api parity with dedupelabels. +func NewScratchBuilderWithSymbolTable(_ *SymbolTable, n int) ScratchBuilder { + return NewScratchBuilder(n) +} + +func (b *ScratchBuilder) SetSymbolTable(_ *SymbolTable) { + // no-op +} + +func (b *ScratchBuilder) Reset() { + b.add = b.add[:0] +} + +// Add a name/value pair. +// Note if you Add the same name twice you will get a duplicate label, which is invalid. +func (b *ScratchBuilder) Add(name, value string) { + b.add = append(b.add, Label{Name: name, Value: value}) +} + +// Add a name/value pair, using []byte instead of string. +// The '-tags stringlabels' version of this function is unsafe, hence the name. +// This version is safe - it copies the strings immediately - but we keep the same name so everything compiles. +func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) { + b.add = append(b.add, Label{Name: string(name), Value: string(value)}) +} + +// Sort the labels added so far by name. +func (b *ScratchBuilder) Sort() { + slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) +} + +// Assign is for when you already have a Labels which you want this ScratchBuilder to return. +func (b *ScratchBuilder) Assign(ls Labels) { + b.add = append(b.add[:0], ls...) // Copy on top of our slice, so we don't retain the input slice. +} + +// Return the name/value pairs added so far as a Labels object. +// Note: if you want them sorted, call Sort() first. +func (b *ScratchBuilder) Labels() Labels { + // Copy the slice, so the next use of ScratchBuilder doesn't overwrite. + return append([]Label{}, b.add...) +} + +// Write the newly-built Labels out to ls. +// Callers must ensure that there are no other references to ls, or any strings fetched from it. +func (b *ScratchBuilder) Overwrite(ls *Labels) { + *ls = append((*ls)[:0], b.add...) +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/labels_common.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/labels_common.go new file mode 100644 index 000000000000..4bc94f84fe51 --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/labels_common.go @@ -0,0 +1,222 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package labels + +import ( + "bytes" + "encoding/json" + "slices" + "strconv" + "unsafe" + + "github.com/prometheus/common/model" +) + +const ( + MetricName = "__name__" + AlertName = "alertname" + BucketLabel = "le" + InstanceName = "instance" + + labelSep = '\xfe' +) + +var seps = []byte{'\xff'} + +// Label is a key/value pair of strings. +type Label struct { + Name, Value string +} + +func (ls Labels) String() string { + var bytea [1024]byte // On stack to avoid memory allocation while building the output. + b := bytes.NewBuffer(bytea[:0]) + + b.WriteByte('{') + i := 0 + ls.Range(func(l Label) { + if i > 0 { + b.WriteByte(',') + b.WriteByte(' ') + } + b.WriteString(l.Name) + b.WriteByte('=') + b.Write(strconv.AppendQuote(b.AvailableBuffer(), l.Value)) + i++ + }) + b.WriteByte('}') + return b.String() +} + +// MarshalJSON implements json.Marshaler. +func (ls Labels) MarshalJSON() ([]byte, error) { + return json.Marshal(ls.Map()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (ls *Labels) UnmarshalJSON(b []byte) error { + var m map[string]string + + if err := json.Unmarshal(b, &m); err != nil { + return err + } + + *ls = FromMap(m) + return nil +} + +// MarshalYAML implements yaml.Marshaler. +func (ls Labels) MarshalYAML() (interface{}, error) { + return ls.Map(), nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error { + var m map[string]string + + if err := unmarshal(&m); err != nil { + return err + } + + *ls = FromMap(m) + return nil +} + +// IsValid checks if the metric name or label names are valid. +func (ls Labels) IsValid() bool { + err := ls.Validate(func(l Label) error { + if l.Name == model.MetricNameLabel && !model.IsValidMetricName(model.LabelValue(l.Value)) { + return strconv.ErrSyntax + } + if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { + return strconv.ErrSyntax + } + return nil + }) + return err == nil +} + +// Map returns a string map of the labels. +func (ls Labels) Map() map[string]string { + m := make(map[string]string) + ls.Range(func(l Label) { + m[l.Name] = l.Value + }) + return m +} + +// FromMap returns new sorted Labels from the given map. +func FromMap(m map[string]string) Labels { + l := make([]Label, 0, len(m)) + for k, v := range m { + l = append(l, Label{Name: k, Value: v}) + } + return New(l...) +} + +// NewBuilder returns a new LabelsBuilder. +func NewBuilder(base Labels) *Builder { + b := &Builder{ + del: make([]string, 0, 5), + add: make([]Label, 0, 5), + } + b.Reset(base) + return b +} + +// Del deletes the label of the given name. +func (b *Builder) Del(ns ...string) *Builder { + for _, n := range ns { + for i, a := range b.add { + if a.Name == n { + b.add = append(b.add[:i], b.add[i+1:]...) + } + } + b.del = append(b.del, n) + } + return b +} + +// Keep removes all labels from the base except those with the given names. +func (b *Builder) Keep(ns ...string) *Builder { + b.base.Range(func(l Label) { + for _, n := range ns { + if l.Name == n { + return + } + } + b.del = append(b.del, l.Name) + }) + return b +} + +// Set the name/value pair as a label. A value of "" means delete that label. +func (b *Builder) Set(n, v string) *Builder { + if v == "" { + // Empty labels are the same as missing labels. + return b.Del(n) + } + for i, a := range b.add { + if a.Name == n { + b.add[i].Value = v + return b + } + } + b.add = append(b.add, Label{Name: n, Value: v}) + + return b +} + +func (b *Builder) Get(n string) string { + // Del() removes entries from .add but Set() does not remove from .del, so check .add first. + for _, a := range b.add { + if a.Name == n { + return a.Value + } + } + if slices.Contains(b.del, n) { + return "" + } + return b.base.Get(n) +} + +// Range calls f on each label in the Builder. +func (b *Builder) Range(f func(l Label)) { + // Stack-based arrays to avoid heap allocation in most cases. + var addStack [128]Label + var delStack [128]string + // Take a copy of add and del, so they are unaffected by calls to Set() or Del(). + origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...) + b.base.Range(func(l Label) { + if !slices.Contains(origDel, l.Name) && !contains(origAdd, l.Name) { + f(l) + } + }) + for _, a := range origAdd { + f(a) + } +} + +func contains(s []Label, n string) bool { + for _, a := range s { + if a.Name == n { + return true + } + } + return false +} + +func yoloString(b []byte) string { + return *((*string)(unsafe.Pointer(&b))) +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/labels_dedupelabels.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/labels_dedupelabels.go new file mode 100644 index 000000000000..0e5bb048beb0 --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/labels_dedupelabels.go @@ -0,0 +1,817 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build dedupelabels + +package labels + +import ( + "bytes" + "slices" + "strings" + "sync" + + "github.com/cespare/xxhash/v2" +) + +// Labels is implemented by a SymbolTable and string holding name/value +// pairs encoded as indexes into the table in varint encoding. +// Names are in alphabetical order. +type Labels struct { + syms *nameTable + data string +} + +// Split SymbolTable into the part used by Labels and the part used by Builder. Only the latter needs the map. + +// This part is used by Labels. All fields are immutable after construction. +type nameTable struct { + byNum []string // This slice header is never changed, even while we are building the symbol table. + symbolTable *SymbolTable // If we need to use it in a Builder. +} + +// SymbolTable is used to map strings into numbers so they can be packed together. +type SymbolTable struct { + mx sync.Mutex + *nameTable + nextNum int + byName map[string]int +} + +const defaultSymbolTableSize = 1024 + +func NewSymbolTable() *SymbolTable { + t := &SymbolTable{ + nameTable: &nameTable{byNum: make([]string, defaultSymbolTableSize)}, + byName: make(map[string]int, defaultSymbolTableSize), + } + t.nameTable.symbolTable = t + return t +} + +func (t *SymbolTable) Len() int { + t.mx.Lock() + defer t.mx.Unlock() + return len(t.byName) +} + +// ToNum maps a string to an integer, adding the string to the table if it is not already there. +// Note: copies the string before adding, in case the caller passed part of +// a buffer that should not be kept alive by this SymbolTable. +func (t *SymbolTable) ToNum(name string) int { + t.mx.Lock() + defer t.mx.Unlock() + return t.toNumUnlocked(name) +} + +func (t *SymbolTable) toNumUnlocked(name string) int { + if i, found := t.byName[name]; found { + return i + } + i := t.nextNum + if t.nextNum == cap(t.byNum) { + // Name table is full; copy to a new one. Don't touch the existing slice, as nameTable is immutable after construction. + newSlice := make([]string, cap(t.byNum)*2) + copy(newSlice, t.byNum) + t.nameTable = &nameTable{byNum: newSlice, symbolTable: t} + } + name = strings.Clone(name) + t.byNum[i] = name + t.byName[name] = i + t.nextNum++ + return i +} + +func (t *SymbolTable) checkNum(name string) (int, bool) { + t.mx.Lock() + defer t.mx.Unlock() + i, bool := t.byName[name] + return i, bool +} + +// ToName maps an integer to a string. +func (t *nameTable) ToName(num int) string { + return t.byNum[num] +} + +// "Varint" in this file is non-standard: we encode small numbers (up to 32767) in 2 bytes, +// because we expect most Prometheus to have more than 127 unique strings. +// And we don't encode numbers larger than 4 bytes because we don't expect more than 536,870,912 unique strings. +func decodeVarint(data string, index int) (int, int) { + b := int(data[index]) + int(data[index+1])<<8 + index += 2 + if b < 0x8000 { + return b, index + } + return decodeVarintRest(b, data, index) +} + +func decodeVarintRest(b int, data string, index int) (int, int) { + value := int(b & 0x7FFF) + b = int(data[index]) + index++ + if b < 0x80 { + return value | (b << 15), index + } + + value |= (b & 0x7f) << 15 + b = int(data[index]) + index++ + return value | (b << 22), index +} + +func decodeString(t *nameTable, data string, index int) (string, int) { + // Copy decodeVarint here, because the Go compiler says it's too big to inline. + num := int(data[index]) + int(data[index+1])<<8 + index += 2 + if num >= 0x8000 { + num, index = decodeVarintRest(num, data, index) + } + return t.ToName(num), index +} + +// Bytes returns ls as a byte slice. +// It uses non-printing characters and so should not be used for printing. +func (ls Labels) Bytes(buf []byte) []byte { + b := bytes.NewBuffer(buf[:0]) + for i := 0; i < len(ls.data); { + if i > 0 { + b.WriteByte(seps[0]) + } + var name, value string + name, i = decodeString(ls.syms, ls.data, i) + value, i = decodeString(ls.syms, ls.data, i) + b.WriteString(name) + b.WriteByte(seps[0]) + b.WriteString(value) + } + return b.Bytes() +} + +// IsZero implements yaml.IsZeroer - if we don't have this then 'omitempty' fields are always omitted. +func (ls Labels) IsZero() bool { + return len(ls.data) == 0 +} + +// MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean. +// If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false. +// TODO: This is only used in printing an error message +func (ls Labels) MatchLabels(on bool, names ...string) Labels { + b := NewBuilder(ls) + if on { + b.Keep(names...) + } else { + b.Del(MetricName) + b.Del(names...) + } + return b.Labels() +} + +// Hash returns a hash value for the label set. +// Note: the result is not guaranteed to be consistent across different runs of Prometheus. +func (ls Labels) Hash() uint64 { + // Use xxhash.Sum64(b) for fast path as it's faster. + b := make([]byte, 0, 1024) + for pos := 0; pos < len(ls.data); { + name, newPos := decodeString(ls.syms, ls.data, pos) + value, newPos := decodeString(ls.syms, ls.data, newPos) + if len(b)+len(name)+len(value)+2 >= cap(b) { + // If labels entry is 1KB+, hash the rest of them via Write(). + h := xxhash.New() + _, _ = h.Write(b) + for pos < len(ls.data) { + name, pos = decodeString(ls.syms, ls.data, pos) + value, pos = decodeString(ls.syms, ls.data, pos) + _, _ = h.WriteString(name) + _, _ = h.Write(seps) + _, _ = h.WriteString(value) + _, _ = h.Write(seps) + } + return h.Sum64() + } + + b = append(b, name...) + b = append(b, seps[0]) + b = append(b, value...) + b = append(b, seps[0]) + pos = newPos + } + return xxhash.Sum64(b) +} + +// HashForLabels returns a hash value for the labels matching the provided names. +// 'names' have to be sorted in ascending order. +func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { + b = b[:0] + j := 0 + for i := 0; i < len(ls.data); { + var name, value string + name, i = decodeString(ls.syms, ls.data, i) + value, i = decodeString(ls.syms, ls.data, i) + for j < len(names) && names[j] < name { + j++ + } + if j == len(names) { + break + } + if name == names[j] { + b = append(b, name...) + b = append(b, seps[0]) + b = append(b, value...) + b = append(b, seps[0]) + } + } + + return xxhash.Sum64(b), b +} + +// HashWithoutLabels returns a hash value for all labels except those matching +// the provided names. +// 'names' have to be sorted in ascending order. +func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { + b = b[:0] + j := 0 + for i := 0; i < len(ls.data); { + var name, value string + name, i = decodeString(ls.syms, ls.data, i) + value, i = decodeString(ls.syms, ls.data, i) + for j < len(names) && names[j] < name { + j++ + } + if name == MetricName || (j < len(names) && name == names[j]) { + continue + } + b = append(b, name...) + b = append(b, seps[0]) + b = append(b, value...) + b = append(b, seps[0]) + } + return xxhash.Sum64(b), b +} + +// BytesWithLabels is just as Bytes(), but only for labels matching names. +// 'names' have to be sorted in ascending order. +func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { + b := bytes.NewBuffer(buf[:0]) + j := 0 + for pos := 0; pos < len(ls.data); { + lName, newPos := decodeString(ls.syms, ls.data, pos) + lValue, newPos := decodeString(ls.syms, ls.data, newPos) + for j < len(names) && names[j] < lName { + j++ + } + if j == len(names) { + break + } + if lName == names[j] { + if b.Len() > 1 { + b.WriteByte(seps[0]) + } + b.WriteString(lName) + b.WriteByte(seps[0]) + b.WriteString(lValue) + } + pos = newPos + } + return b.Bytes() +} + +// BytesWithoutLabels is just as Bytes(), but only for labels not matching names. +// 'names' have to be sorted in ascending order. +func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { + b := bytes.NewBuffer(buf[:0]) + j := 0 + for pos := 0; pos < len(ls.data); { + lName, newPos := decodeString(ls.syms, ls.data, pos) + lValue, newPos := decodeString(ls.syms, ls.data, newPos) + for j < len(names) && names[j] < lName { + j++ + } + if j == len(names) || lName != names[j] { + if b.Len() > 1 { + b.WriteByte(seps[0]) + } + b.WriteString(lName) + b.WriteByte(seps[0]) + b.WriteString(lValue) + } + pos = newPos + } + return b.Bytes() +} + +// Copy returns a copy of the labels. +func (ls Labels) Copy() Labels { + return Labels{syms: ls.syms, data: strings.Clone(ls.data)} +} + +// Get returns the value for the label with the given name. +// Returns an empty string if the label doesn't exist. +func (ls Labels) Get(name string) string { + if name == "" { // Avoid crash in loop if someone asks for "". + return "" // Prometheus does not store blank label names. + } + for i := 0; i < len(ls.data); { + var lName, lValue string + lName, i = decodeString(ls.syms, ls.data, i) + if lName == name { + lValue, _ = decodeString(ls.syms, ls.data, i) + return lValue + } else if lName[0] > name[0] { // Stop looking if we've gone past. + break + } + // Copy decodeVarint here, because the Go compiler says it's too big to inline. + num := int(ls.data[i]) + int(ls.data[i+1])<<8 + i += 2 + if num >= 0x8000 { + _, i = decodeVarintRest(num, ls.data, i) + } + } + return "" +} + +// Has returns true if the label with the given name is present. +func (ls Labels) Has(name string) bool { + if name == "" { // Avoid crash in loop if someone asks for "". + return false // Prometheus does not store blank label names. + } + for i := 0; i < len(ls.data); { + var lName string + lName, i = decodeString(ls.syms, ls.data, i) + if lName == name { + return true + } else if lName[0] > name[0] { // Stop looking if we've gone past. + break + } + // Copy decodeVarint here, because the Go compiler says it's too big to inline. + num := int(ls.data[i]) + int(ls.data[i+1])<<8 + i += 2 + if num >= 0x8000 { + _, i = decodeVarintRest(num, ls.data, i) + } + } + return false +} + +// HasDuplicateLabelNames returns whether ls has duplicate label names. +// It assumes that the labelset is sorted. +func (ls Labels) HasDuplicateLabelNames() (string, bool) { + prevNum := -1 + for i := 0; i < len(ls.data); { + var lNum int + lNum, i = decodeVarint(ls.data, i) + _, i = decodeVarint(ls.data, i) + if lNum == prevNum { + return ls.syms.ToName(lNum), true + } + prevNum = lNum + } + return "", false +} + +// WithoutEmpty returns the labelset without empty labels. +// May return the same labelset. +func (ls Labels) WithoutEmpty() Labels { + if ls.IsEmpty() { + return ls + } + // Idea: have a constant symbol for blank, then we don't have to look it up. + blank, ok := ls.syms.symbolTable.checkNum("") + if !ok { // Symbol table has no entry for blank - none of the values can be blank. + return ls + } + for pos := 0; pos < len(ls.data); { + _, newPos := decodeVarint(ls.data, pos) + lValue, newPos := decodeVarint(ls.data, newPos) + if lValue != blank { + pos = newPos + continue + } + // Do not copy the slice until it's necessary. + // TODO: could optimise the case where all blanks are at the end. + // Note: we size the new buffer on the assumption there is exactly one blank value. + buf := make([]byte, pos, pos+(len(ls.data)-newPos)) + copy(buf, ls.data[:pos]) // copy the initial non-blank labels + pos = newPos // move past the first blank value + for pos < len(ls.data) { + var newPos int + _, newPos = decodeVarint(ls.data, pos) + lValue, newPos = decodeVarint(ls.data, newPos) + if lValue != blank { + buf = append(buf, ls.data[pos:newPos]...) + } + pos = newPos + } + return Labels{syms: ls.syms, data: yoloString(buf)} + } + return ls +} + +// Equal returns whether the two label sets are equal. +func Equal(a, b Labels) bool { + if a.syms == b.syms { + return a.data == b.data + } + + la, lb := len(a.data), len(b.data) + ia, ib := 0, 0 + for ia < la && ib < lb { + var aValue, bValue string + aValue, ia = decodeString(a.syms, a.data, ia) + bValue, ib = decodeString(b.syms, b.data, ib) + if aValue != bValue { + return false + } + } + if ia != la || ib != lb { + return false + } + return true +} + +// EmptyLabels returns an empty Labels value, for convenience. +func EmptyLabels() Labels { + return Labels{} +} + +// New returns a sorted Labels from the given labels. +// The caller has to guarantee that all label names are unique. +// Note this function is not efficient; should not be used in performance-critical places. +func New(ls ...Label) Labels { + slices.SortFunc(ls, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) + syms := NewSymbolTable() + var stackSpace [16]int + size, nums := mapLabelsToNumbers(syms, ls, stackSpace[:]) + buf := make([]byte, size) + marshalNumbersToSizedBuffer(nums, buf) + return Labels{syms: syms.nameTable, data: yoloString(buf)} +} + +// FromStrings creates new labels from pairs of strings. +func FromStrings(ss ...string) Labels { + if len(ss)%2 != 0 { + panic("invalid number of strings") + } + ls := make([]Label, 0, len(ss)/2) + for i := 0; i < len(ss); i += 2 { + ls = append(ls, Label{Name: ss[i], Value: ss[i+1]}) + } + + return New(ls...) +} + +// Compare compares the two label sets. +// The result will be 0 if a==b, <0 if a < b, and >0 if a > b. +func Compare(a, b Labels) int { + la, lb := len(a.data), len(b.data) + ia, ib := 0, 0 + for ia < la && ib < lb { + var aName, bName string + aName, ia = decodeString(a.syms, a.data, ia) + bName, ib = decodeString(b.syms, b.data, ib) + if aName != bName { + if aName < bName { + return -1 + } + return 1 + } + var aValue, bValue string + aValue, ia = decodeString(a.syms, a.data, ia) + bValue, ib = decodeString(b.syms, b.data, ib) + if aValue != bValue { + if aValue < bValue { + return -1 + } + return 1 + } + } + // If all labels so far were in common, the set with fewer labels comes first. + return (la - ia) - (lb - ib) +} + +// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. +func (ls *Labels) CopyFrom(b Labels) { + *ls = b // Straightforward memberwise copy is all we need. +} + +// IsEmpty returns true if ls represents an empty set of labels. +func (ls Labels) IsEmpty() bool { + return len(ls.data) == 0 +} + +// Len returns the number of labels; it is relatively slow. +func (ls Labels) Len() int { + count := 0 + for i := 0; i < len(ls.data); { + _, i = decodeVarint(ls.data, i) + _, i = decodeVarint(ls.data, i) + count++ + } + return count +} + +// Range calls f on each label. +func (ls Labels) Range(f func(l Label)) { + for i := 0; i < len(ls.data); { + var lName, lValue string + lName, i = decodeString(ls.syms, ls.data, i) + lValue, i = decodeString(ls.syms, ls.data, i) + f(Label{Name: lName, Value: lValue}) + } +} + +// Validate calls f on each label. If f returns a non-nil error, then it returns that error cancelling the iteration. +func (ls Labels) Validate(f func(l Label) error) error { + for i := 0; i < len(ls.data); { + var lName, lValue string + lName, i = decodeString(ls.syms, ls.data, i) + lValue, i = decodeString(ls.syms, ls.data, i) + err := f(Label{Name: lName, Value: lValue}) + if err != nil { + return err + } + } + return nil +} + +// InternStrings calls intern on every string value inside ls, replacing them with what it returns. +func (ls *Labels) InternStrings(intern func(string) string) { + // TODO: remove these calls as there is nothing to do. +} + +// ReleaseStrings calls release on every string value inside ls. +func (ls Labels) ReleaseStrings(release func(string)) { + // TODO: remove these calls as there is nothing to do. +} + +// DropMetricName returns Labels with "__name__" removed. +func (ls Labels) DropMetricName() Labels { + for i := 0; i < len(ls.data); { + lName, i2 := decodeString(ls.syms, ls.data, i) + _, i2 = decodeVarint(ls.data, i2) + if lName == MetricName { + if i == 0 { // Make common case fast with no allocations. + ls.data = ls.data[i2:] + } else { + ls.data = ls.data[:i] + ls.data[i2:] + } + break + } else if lName[0] > MetricName[0] { // Stop looking if we've gone past. + break + } + i = i2 + } + return ls +} + +// Builder allows modifying Labels. +type Builder struct { + syms *SymbolTable + nums []int + base Labels + del []string + add []Label +} + +// NewBuilderWithSymbolTable returns a new LabelsBuilder not based on any labels, but with the SymbolTable. +func NewBuilderWithSymbolTable(s *SymbolTable) *Builder { + return &Builder{ + syms: s, + } +} + +// Reset clears all current state for the builder. +func (b *Builder) Reset(base Labels) { + if base.syms != nil { // If base has a symbol table, use that. + b.syms = base.syms.symbolTable + } else if b.syms == nil { // Or continue using previous symbol table in builder. + b.syms = NewSymbolTable() // Don't do this in performance-sensitive code. + } + + b.base = base + b.del = b.del[:0] + b.add = b.add[:0] + base.Range(func(l Label) { + if l.Value == "" { + b.del = append(b.del, l.Name) + } + }) +} + +// Labels returns the labels from the builder. +// If no modifications were made, the original labels are returned. +func (b *Builder) Labels() Labels { + if len(b.del) == 0 && len(b.add) == 0 { + return b.base + } + + slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) + slices.Sort(b.del) + a, d, newSize := 0, 0, 0 + + newSize, b.nums = mapLabelsToNumbers(b.syms, b.add, b.nums) + bufSize := len(b.base.data) + newSize + buf := make([]byte, 0, bufSize) + for pos := 0; pos < len(b.base.data); { + oldPos := pos + var lName string + lName, pos = decodeString(b.base.syms, b.base.data, pos) + _, pos = decodeVarint(b.base.data, pos) + for d < len(b.del) && b.del[d] < lName { + d++ + } + if d < len(b.del) && b.del[d] == lName { + continue // This label has been deleted. + } + for ; a < len(b.add) && b.add[a].Name < lName; a++ { + buf = appendLabelTo(b.nums[a*2], b.nums[a*2+1], buf) // Insert label that was not in the base set. + } + if a < len(b.add) && b.add[a].Name == lName { + buf = appendLabelTo(b.nums[a*2], b.nums[a*2+1], buf) + a++ + continue // This label has been replaced. + } + buf = append(buf, b.base.data[oldPos:pos]...) // If base had a symbol-table we are using it, so we don't need to look up these symbols. + } + // We have come to the end of the base set; add any remaining labels. + for ; a < len(b.add); a++ { + buf = appendLabelTo(b.nums[a*2], b.nums[a*2+1], buf) + } + return Labels{syms: b.syms.nameTable, data: yoloString(buf)} +} + +func marshalNumbersToSizedBuffer(nums []int, data []byte) int { + i := len(data) + for index := len(nums) - 1; index >= 0; index-- { + i = encodeVarint(data, i, nums[index]) + } + return len(data) - i +} + +func sizeVarint(x uint64) (n int) { + // Most common case first + if x < 1<<15 { + return 2 + } + if x < 1<<22 { + return 3 + } + if x >= 1<<29 { + panic("Number too large to represent") + } + return 4 +} + +func encodeVarintSlow(data []byte, offset int, v uint64) int { + offset -= sizeVarint(v) + base := offset + data[offset] = uint8(v) + v >>= 8 + offset++ + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return base +} + +// Special code for the common case that a value is less than 32768 +func encodeVarint(data []byte, offset, v int) int { + if v < 1<<15 { + offset -= 2 + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + return offset + } + return encodeVarintSlow(data, offset, uint64(v)) +} + +// Map all the strings in lbls to the symbol table; return the total size required to hold them and all the individual mappings. +func mapLabelsToNumbers(t *SymbolTable, lbls []Label, buf []int) (totalSize int, nums []int) { + nums = buf[:0] + t.mx.Lock() + defer t.mx.Unlock() + // we just encode name/value/name/value, without any extra tags or length bytes + for _, m := range lbls { + // strings are encoded as a single varint, the index into the symbol table. + i := t.toNumUnlocked(m.Name) + nums = append(nums, i) + totalSize += sizeVarint(uint64(i)) + i = t.toNumUnlocked(m.Value) + nums = append(nums, i) + totalSize += sizeVarint(uint64(i)) + } + return totalSize, nums +} + +func appendLabelTo(nameNum, valueNum int, buf []byte) []byte { + size := sizeVarint(uint64(nameNum)) + sizeVarint(uint64(valueNum)) + sizeRequired := len(buf) + size + if cap(buf) >= sizeRequired { + buf = buf[:sizeRequired] + } else { + bufSize := cap(buf) + // Double size of buffer each time it needs to grow, to amortise copying cost. + for bufSize < sizeRequired { + bufSize = bufSize*2 + 1 + } + newBuf := make([]byte, sizeRequired, bufSize) + copy(newBuf, buf) + buf = newBuf + } + i := sizeRequired + i = encodeVarint(buf, i, valueNum) + i = encodeVarint(buf, i, nameNum) + return buf +} + +// ScratchBuilder allows efficient construction of a Labels from scratch. +type ScratchBuilder struct { + syms *SymbolTable + nums []int + add []Label + output Labels + overwriteBuffer []byte +} + +// NewScratchBuilder creates a ScratchBuilder initialized for Labels with n entries. +// Warning: expensive; don't call in tight loops. +func NewScratchBuilder(n int) ScratchBuilder { + return ScratchBuilder{syms: NewSymbolTable(), add: make([]Label, 0, n)} +} + +// NewScratchBuilderWithSymbolTable creates a ScratchBuilder initialized for Labels with n entries. +func NewScratchBuilderWithSymbolTable(s *SymbolTable, n int) ScratchBuilder { + return ScratchBuilder{syms: s, add: make([]Label, 0, n)} +} + +func (b *ScratchBuilder) SetSymbolTable(s *SymbolTable) { + b.syms = s +} + +func (b *ScratchBuilder) Reset() { + b.add = b.add[:0] + b.output = EmptyLabels() +} + +// Add a name/value pair. +// Note if you Add the same name twice you will get a duplicate label, which is invalid. +func (b *ScratchBuilder) Add(name, value string) { + b.add = append(b.add, Label{Name: name, Value: value}) +} + +// Add a name/value pair, using []byte instead of string to reduce memory allocations. +// The values must remain live until Labels() is called. +func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) { + b.add = append(b.add, Label{Name: yoloString(name), Value: yoloString(value)}) +} + +// Sort the labels added so far by name. +func (b *ScratchBuilder) Sort() { + slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) +} + +// Assign is for when you already have a Labels which you want this ScratchBuilder to return. +func (b *ScratchBuilder) Assign(l Labels) { + b.output = l +} + +// Labels returns the name/value pairs added as a Labels object. Calling Add() after Labels() has no effect. +// Note: if you want them sorted, call Sort() first. +func (b *ScratchBuilder) Labels() Labels { + if b.output.IsEmpty() { + var size int + size, b.nums = mapLabelsToNumbers(b.syms, b.add, b.nums) + buf := make([]byte, size) + marshalNumbersToSizedBuffer(b.nums, buf) + b.output = Labels{syms: b.syms.nameTable, data: yoloString(buf)} + } + return b.output +} + +// Write the newly-built Labels out to ls, reusing an internal buffer. +// Callers must ensure that there are no other references to ls, or any strings fetched from it. +func (b *ScratchBuilder) Overwrite(ls *Labels) { + var size int + size, b.nums = mapLabelsToNumbers(b.syms, b.add, b.nums) + if size <= cap(b.overwriteBuffer) { + b.overwriteBuffer = b.overwriteBuffer[:size] + } else { + b.overwriteBuffer = make([]byte, size) + } + marshalNumbersToSizedBuffer(b.nums, b.overwriteBuffer) + ls.syms = b.syms.nameTable + ls.data = yoloString(b.overwriteBuffer) +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/labels_stringlabels.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/labels_stringlabels.go new file mode 100644 index 000000000000..bccceb61fe1b --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/labels_stringlabels.go @@ -0,0 +1,696 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build stringlabels + +package labels + +import ( + "reflect" + "slices" + "strings" + "unsafe" + + "github.com/cespare/xxhash/v2" +) + +// Labels is implemented by a single flat string holding name/value pairs. +// Each name and value is preceded by its length in varint encoding. +// Names are in order. +type Labels struct { + data string +} + +func decodeSize(data string, index int) (int, int) { + // Fast-path for common case of a single byte, value 0..127. + b := data[index] + index++ + if b < 0x80 { + return int(b), index + } + size := int(b & 0x7F) + for shift := uint(7); ; shift += 7 { + // Just panic if we go of the end of data, since all Labels strings are constructed internally and + // malformed data indicates a bug, or memory corruption. + b := data[index] + index++ + size |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + return size, index +} + +func decodeString(data string, index int) (string, int) { + var size int + size, index = decodeSize(data, index) + return data[index : index+size], index + size +} + +// Bytes returns ls as a byte slice. +// It uses non-printing characters and so should not be used for printing. +func (ls Labels) Bytes(buf []byte) []byte { + if cap(buf) < len(ls.data) { + buf = make([]byte, len(ls.data)) + } else { + buf = buf[:len(ls.data)] + } + copy(buf, ls.data) + return buf +} + +// IsZero implements yaml.IsZeroer - if we don't have this then 'omitempty' fields are always omitted. +func (ls Labels) IsZero() bool { + return len(ls.data) == 0 +} + +// MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean. +// If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false. +// TODO: This is only used in printing an error message +func (ls Labels) MatchLabels(on bool, names ...string) Labels { + b := NewBuilder(ls) + if on { + b.Keep(names...) + } else { + b.Del(MetricName) + b.Del(names...) + } + return b.Labels() +} + +// Hash returns a hash value for the label set. +// Note: the result is not guaranteed to be consistent across different runs of Prometheus. +func (ls Labels) Hash() uint64 { + return xxhash.Sum64(yoloBytes(ls.data)) +} + +// HashForLabels returns a hash value for the labels matching the provided names. +// 'names' have to be sorted in ascending order. +func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { + b = b[:0] + j := 0 + for i := 0; i < len(ls.data); { + var name, value string + name, i = decodeString(ls.data, i) + value, i = decodeString(ls.data, i) + for j < len(names) && names[j] < name { + j++ + } + if j == len(names) { + break + } + if name == names[j] { + b = append(b, name...) + b = append(b, seps[0]) + b = append(b, value...) + b = append(b, seps[0]) + } + } + + return xxhash.Sum64(b), b +} + +// HashWithoutLabels returns a hash value for all labels except those matching +// the provided names. +// 'names' have to be sorted in ascending order. +func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { + b = b[:0] + j := 0 + for i := 0; i < len(ls.data); { + var name, value string + name, i = decodeString(ls.data, i) + value, i = decodeString(ls.data, i) + for j < len(names) && names[j] < name { + j++ + } + if name == MetricName || (j < len(names) && name == names[j]) { + continue + } + b = append(b, name...) + b = append(b, seps[0]) + b = append(b, value...) + b = append(b, seps[0]) + } + return xxhash.Sum64(b), b +} + +// BytesWithLabels is just as Bytes(), but only for labels matching names. +// 'names' have to be sorted in ascending order. +func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { + b := buf[:0] + j := 0 + for pos := 0; pos < len(ls.data); { + lName, newPos := decodeString(ls.data, pos) + _, newPos = decodeString(ls.data, newPos) + for j < len(names) && names[j] < lName { + j++ + } + if j == len(names) { + break + } + if lName == names[j] { + b = append(b, ls.data[pos:newPos]...) + } + pos = newPos + } + return b +} + +// BytesWithoutLabels is just as Bytes(), but only for labels not matching names. +// 'names' have to be sorted in ascending order. +func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { + b := buf[:0] + j := 0 + for pos := 0; pos < len(ls.data); { + lName, newPos := decodeString(ls.data, pos) + _, newPos = decodeString(ls.data, newPos) + for j < len(names) && names[j] < lName { + j++ + } + if j == len(names) || lName != names[j] { + b = append(b, ls.data[pos:newPos]...) + } + pos = newPos + } + return b +} + +// Copy returns a copy of the labels. +func (ls Labels) Copy() Labels { + return Labels{data: strings.Clone(ls.data)} +} + +// Get returns the value for the label with the given name. +// Returns an empty string if the label doesn't exist. +func (ls Labels) Get(name string) string { + if name == "" { // Avoid crash in loop if someone asks for "". + return "" // Prometheus does not store blank label names. + } + for i := 0; i < len(ls.data); { + var size int + size, i = decodeSize(ls.data, i) + if ls.data[i] == name[0] { + lName := ls.data[i : i+size] + i += size + if lName == name { + lValue, _ := decodeString(ls.data, i) + return lValue + } + } else { + if ls.data[i] > name[0] { // Stop looking if we've gone past. + break + } + i += size + } + size, i = decodeSize(ls.data, i) + i += size + } + return "" +} + +// Has returns true if the label with the given name is present. +func (ls Labels) Has(name string) bool { + if name == "" { // Avoid crash in loop if someone asks for "". + return false // Prometheus does not store blank label names. + } + for i := 0; i < len(ls.data); { + var size int + size, i = decodeSize(ls.data, i) + if ls.data[i] == name[0] { + lName := ls.data[i : i+size] + i += size + if lName == name { + return true + } + } else { + if ls.data[i] > name[0] { // Stop looking if we've gone past. + break + } + i += size + } + size, i = decodeSize(ls.data, i) + i += size + } + return false +} + +// HasDuplicateLabelNames returns whether ls has duplicate label names. +// It assumes that the labelset is sorted. +func (ls Labels) HasDuplicateLabelNames() (string, bool) { + var lName, prevName string + for i := 0; i < len(ls.data); { + lName, i = decodeString(ls.data, i) + _, i = decodeString(ls.data, i) + if lName == prevName { + return lName, true + } + prevName = lName + } + return "", false +} + +// WithoutEmpty returns the labelset without empty labels. +// May return the same labelset. +func (ls Labels) WithoutEmpty() Labels { + for pos := 0; pos < len(ls.data); { + _, newPos := decodeString(ls.data, pos) + lValue, newPos := decodeString(ls.data, newPos) + if lValue != "" { + pos = newPos + continue + } + // Do not copy the slice until it's necessary. + // TODO: could optimise the case where all blanks are at the end. + // Note: we size the new buffer on the assumption there is exactly one blank value. + buf := make([]byte, pos, pos+(len(ls.data)-newPos)) + copy(buf, ls.data[:pos]) // copy the initial non-blank labels + pos = newPos // move past the first blank value + for pos < len(ls.data) { + var newPos int + _, newPos = decodeString(ls.data, pos) + lValue, newPos = decodeString(ls.data, newPos) + if lValue != "" { + buf = append(buf, ls.data[pos:newPos]...) + } + pos = newPos + } + return Labels{data: yoloString(buf)} + } + return ls +} + +// Equal returns whether the two label sets are equal. +func Equal(ls, o Labels) bool { + return ls.data == o.data +} + +// EmptyLabels returns an empty Labels value, for convenience. +func EmptyLabels() Labels { + return Labels{} +} +func yoloBytes(s string) (b []byte) { + *(*string)(unsafe.Pointer(&b)) = s + (*reflect.SliceHeader)(unsafe.Pointer(&b)).Cap = len(s) + return +} + +// New returns a sorted Labels from the given labels. +// The caller has to guarantee that all label names are unique. +func New(ls ...Label) Labels { + slices.SortFunc(ls, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) + size := labelsSize(ls) + buf := make([]byte, size) + marshalLabelsToSizedBuffer(ls, buf) + return Labels{data: yoloString(buf)} +} + +// FromStrings creates new labels from pairs of strings. +func FromStrings(ss ...string) Labels { + if len(ss)%2 != 0 { + panic("invalid number of strings") + } + ls := make([]Label, 0, len(ss)/2) + for i := 0; i < len(ss); i += 2 { + ls = append(ls, Label{Name: ss[i], Value: ss[i+1]}) + } + + return New(ls...) +} + +// Compare compares the two label sets. +// The result will be 0 if a==b, <0 if a < b, and >0 if a > b. +func Compare(a, b Labels) int { + // Find the first byte in the string where a and b differ. + shorter, longer := a.data, b.data + if len(b.data) < len(a.data) { + shorter, longer = b.data, a.data + } + i := 0 + // First, go 8 bytes at a time. Data strings are expected to be 8-byte aligned. + sp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&shorter)).Data) + lp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&longer)).Data) + for ; i < len(shorter)-8; i += 8 { + if *(*uint64)(unsafe.Add(sp, i)) != *(*uint64)(unsafe.Add(lp, i)) { + break + } + } + // Now go 1 byte at a time. + for ; i < len(shorter); i++ { + if shorter[i] != longer[i] { + break + } + } + if i == len(shorter) { + // One Labels was a prefix of the other; the set with fewer labels compares lower. + return len(a.data) - len(b.data) + } + + // Now we know that there is some difference before the end of a and b. + // Go back through the fields and find which field that difference is in. + firstCharDifferent, i := i, 0 + size, nextI := decodeSize(a.data, i) + for nextI+size <= firstCharDifferent { + i = nextI + size + size, nextI = decodeSize(a.data, i) + } + // Difference is inside this entry. + aStr, _ := decodeString(a.data, i) + bStr, _ := decodeString(b.data, i) + if aStr < bStr { + return -1 + } + return +1 +} + +// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. +func (ls *Labels) CopyFrom(b Labels) { + ls.data = b.data // strings are immutable +} + +// IsEmpty returns true if ls represents an empty set of labels. +func (ls Labels) IsEmpty() bool { + return len(ls.data) == 0 +} + +// Len returns the number of labels; it is relatively slow. +func (ls Labels) Len() int { + count := 0 + for i := 0; i < len(ls.data); { + var size int + size, i = decodeSize(ls.data, i) + i += size + size, i = decodeSize(ls.data, i) + i += size + count++ + } + return count +} + +// Range calls f on each label. +func (ls Labels) Range(f func(l Label)) { + for i := 0; i < len(ls.data); { + var lName, lValue string + lName, i = decodeString(ls.data, i) + lValue, i = decodeString(ls.data, i) + f(Label{Name: lName, Value: lValue}) + } +} + +// Validate calls f on each label. If f returns a non-nil error, then it returns that error cancelling the iteration. +func (ls Labels) Validate(f func(l Label) error) error { + for i := 0; i < len(ls.data); { + var lName, lValue string + lName, i = decodeString(ls.data, i) + lValue, i = decodeString(ls.data, i) + err := f(Label{Name: lName, Value: lValue}) + if err != nil { + return err + } + } + return nil +} + +// DropMetricName returns Labels with "__name__" removed. +func (ls Labels) DropMetricName() Labels { + for i := 0; i < len(ls.data); { + lName, i2 := decodeString(ls.data, i) + size, i2 := decodeSize(ls.data, i2) + i2 += size + if lName == MetricName { + if i == 0 { // Make common case fast with no allocations. + ls.data = ls.data[i2:] + } else { + ls.data = ls.data[:i] + ls.data[i2:] + } + break + } else if lName[0] > MetricName[0] { // Stop looking if we've gone past. + break + } + i = i2 + } + return ls +} + +// InternStrings is a no-op because it would only save when the whole set of labels is identical. +func (ls *Labels) InternStrings(intern func(string) string) { +} + +// ReleaseStrings is a no-op for the same reason as InternStrings. +func (ls Labels) ReleaseStrings(release func(string)) { +} + +// Builder allows modifying Labels. +type Builder struct { + base Labels + del []string + add []Label +} + +// Reset clears all current state for the builder. +func (b *Builder) Reset(base Labels) { + b.base = base + b.del = b.del[:0] + b.add = b.add[:0] + b.base.Range(func(l Label) { + if l.Value == "" { + b.del = append(b.del, l.Name) + } + }) +} + +// Labels returns the labels from the builder. +// If no modifications were made, the original labels are returned. +func (b *Builder) Labels() Labels { + if len(b.del) == 0 && len(b.add) == 0 { + return b.base + } + + slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) + slices.Sort(b.del) + a, d := 0, 0 + + bufSize := len(b.base.data) + labelsSize(b.add) + buf := make([]byte, 0, bufSize) + for pos := 0; pos < len(b.base.data); { + oldPos := pos + var lName string + lName, pos = decodeString(b.base.data, pos) + _, pos = decodeString(b.base.data, pos) + for d < len(b.del) && b.del[d] < lName { + d++ + } + if d < len(b.del) && b.del[d] == lName { + continue // This label has been deleted. + } + for ; a < len(b.add) && b.add[a].Name < lName; a++ { + buf = appendLabelTo(buf, &b.add[a]) // Insert label that was not in the base set. + } + if a < len(b.add) && b.add[a].Name == lName { + buf = appendLabelTo(buf, &b.add[a]) + a++ + continue // This label has been replaced. + } + buf = append(buf, b.base.data[oldPos:pos]...) + } + // We have come to the end of the base set; add any remaining labels. + for ; a < len(b.add); a++ { + buf = appendLabelTo(buf, &b.add[a]) + } + return Labels{data: yoloString(buf)} +} + +func marshalLabelsToSizedBuffer(lbls []Label, data []byte) int { + i := len(data) + for index := len(lbls) - 1; index >= 0; index-- { + size := marshalLabelToSizedBuffer(&lbls[index], data[:i]) + i -= size + } + return len(data) - i +} + +func marshalLabelToSizedBuffer(m *Label, data []byte) int { + i := len(data) + i -= len(m.Value) + copy(data[i:], m.Value) + i = encodeSize(data, i, len(m.Value)) + i -= len(m.Name) + copy(data[i:], m.Name) + i = encodeSize(data, i, len(m.Name)) + return len(data) - i +} + +func sizeVarint(x uint64) (n int) { + // Most common case first + if x < 1<<7 { + return 1 + } + if x >= 1<<56 { + return 9 + } + if x >= 1<<28 { + x >>= 28 + n = 4 + } + if x >= 1<<14 { + x >>= 14 + n += 2 + } + if x >= 1<<7 { + n++ + } + return n + 1 +} + +func encodeVarint(data []byte, offset int, v uint64) int { + offset -= sizeVarint(v) + base := offset + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return base +} + +// Special code for the common case that a size is less than 128 +func encodeSize(data []byte, offset, v int) int { + if v < 1<<7 { + offset-- + data[offset] = uint8(v) + return offset + } + return encodeVarint(data, offset, uint64(v)) +} + +func labelsSize(lbls []Label) (n int) { + // we just encode name/value/name/value, without any extra tags or length bytes + for _, e := range lbls { + n += labelSize(&e) + } + return n +} + +func labelSize(m *Label) (n int) { + // strings are encoded as length followed by contents. + l := len(m.Name) + n += l + sizeVarint(uint64(l)) + l = len(m.Value) + n += l + sizeVarint(uint64(l)) + return n +} + +func appendLabelTo(buf []byte, m *Label) []byte { + size := labelSize(m) + sizeRequired := len(buf) + size + if cap(buf) >= sizeRequired { + buf = buf[:sizeRequired] + } else { + bufSize := cap(buf) + // Double size of buffer each time it needs to grow, to amortise copying cost. + for bufSize < sizeRequired { + bufSize = bufSize*2 + 1 + } + newBuf := make([]byte, sizeRequired, bufSize) + copy(newBuf, buf) + buf = newBuf + } + marshalLabelToSizedBuffer(m, buf) + return buf +} + +// ScratchBuilder allows efficient construction of a Labels from scratch. +type ScratchBuilder struct { + add []Label + output Labels + overwriteBuffer []byte +} + +// NewScratchBuilder creates a ScratchBuilder initialized for Labels with n entries. +func NewScratchBuilder(n int) ScratchBuilder { + return ScratchBuilder{add: make([]Label, 0, n)} +} + +func (b *ScratchBuilder) Reset() { + b.add = b.add[:0] + b.output = EmptyLabels() +} + +// Add a name/value pair. +// Note if you Add the same name twice you will get a duplicate label, which is invalid. +func (b *ScratchBuilder) Add(name, value string) { + b.add = append(b.add, Label{Name: name, Value: value}) +} + +// Add a name/value pair, using []byte instead of string to reduce memory allocations. +// The values must remain live until Labels() is called. +func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) { + b.add = append(b.add, Label{Name: yoloString(name), Value: yoloString(value)}) +} + +// Sort the labels added so far by name. +func (b *ScratchBuilder) Sort() { + slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) +} + +// Assign is for when you already have a Labels which you want this ScratchBuilder to return. +func (b *ScratchBuilder) Assign(l Labels) { + b.output = l +} + +// Labels returns the name/value pairs added as a Labels object. Calling Add() after Labels() has no effect. +// Note: if you want them sorted, call Sort() first. +func (b *ScratchBuilder) Labels() Labels { + if b.output.IsEmpty() { + size := labelsSize(b.add) + buf := make([]byte, size) + marshalLabelsToSizedBuffer(b.add, buf) + b.output = Labels{data: yoloString(buf)} + } + return b.output +} + +// Write the newly-built Labels out to ls, reusing an internal buffer. +// Callers must ensure that there are no other references to ls, or any strings fetched from it. +func (b *ScratchBuilder) Overwrite(ls *Labels) { + size := labelsSize(b.add) + if size <= cap(b.overwriteBuffer) { + b.overwriteBuffer = b.overwriteBuffer[:size] + } else { + b.overwriteBuffer = make([]byte, size) + } + marshalLabelsToSizedBuffer(b.add, b.overwriteBuffer) + ls.data = yoloString(b.overwriteBuffer) +} + +// Symbol-table is no-op, just for api parity with dedupelabels. +type SymbolTable struct{} + +func NewSymbolTable() *SymbolTable { return nil } + +func (t *SymbolTable) Len() int { return 0 } + +// NewBuilderWithSymbolTable creates a Builder, for api parity with dedupelabels. +func NewBuilderWithSymbolTable(_ *SymbolTable) *Builder { + return NewBuilder(EmptyLabels()) +} + +// NewScratchBuilderWithSymbolTable creates a ScratchBuilder, for api parity with dedupelabels. +func NewScratchBuilderWithSymbolTable(_ *SymbolTable, n int) ScratchBuilder { + return NewScratchBuilder(n) +} + +func (b *ScratchBuilder) SetSymbolTable(_ *SymbolTable) { + // no-op +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/matcher.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/matcher.go new file mode 100644 index 000000000000..a09c838e3f86 --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/matcher.go @@ -0,0 +1,170 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package labels + +import ( + "bytes" + "strconv" +) + +// MatchType is an enum for label matching types. +type MatchType int + +// Possible MatchTypes. +const ( + MatchEqual MatchType = iota + MatchNotEqual + MatchRegexp + MatchNotRegexp +) + +var matchTypeToStr = [...]string{ + MatchEqual: "=", + MatchNotEqual: "!=", + MatchRegexp: "=~", + MatchNotRegexp: "!~", +} + +func (m MatchType) String() string { + if m < MatchEqual || m > MatchNotRegexp { + panic("unknown match type") + } + return matchTypeToStr[m] +} + +// Matcher models the matching of a label. +type Matcher struct { + Type MatchType + Name string + Value string + + re *FastRegexMatcher +} + +// NewMatcher returns a matcher object. +func NewMatcher(t MatchType, n, v string) (*Matcher, error) { + m := &Matcher{ + Type: t, + Name: n, + Value: v, + } + if t == MatchRegexp || t == MatchNotRegexp { + re, err := NewFastRegexMatcher(v) + if err != nil { + return nil, err + } + m.re = re + } + return m, nil +} + +// MustNewMatcher panics on error - only for use in tests! +func MustNewMatcher(mt MatchType, name, val string) *Matcher { + m, err := NewMatcher(mt, name, val) + if err != nil { + panic(err) + } + return m +} + +func (m *Matcher) String() string { + // Start a buffer with a pre-allocated size on stack to cover most needs. + var bytea [1024]byte + b := bytes.NewBuffer(bytea[:0]) + + if m.shouldQuoteName() { + b.Write(strconv.AppendQuote(b.AvailableBuffer(), m.Name)) + } else { + b.WriteString(m.Name) + } + b.WriteString(m.Type.String()) + b.Write(strconv.AppendQuote(b.AvailableBuffer(), m.Value)) + + return b.String() +} + +func (m *Matcher) shouldQuoteName() bool { + for i, c := range m.Name { + if c == '_' || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (i > 0 && c >= '0' && c <= '9') { + continue + } + return true + } + return len(m.Name) == 0 +} + +// Matches returns whether the matcher matches the given string value. +func (m *Matcher) Matches(s string) bool { + switch m.Type { + case MatchEqual: + return s == m.Value + case MatchNotEqual: + return s != m.Value + case MatchRegexp: + return m.re.MatchString(s) + case MatchNotRegexp: + return !m.re.MatchString(s) + } + panic("labels.Matcher.Matches: invalid match type") +} + +// Inverse returns a matcher that matches the opposite. +func (m *Matcher) Inverse() (*Matcher, error) { + switch m.Type { + case MatchEqual: + return NewMatcher(MatchNotEqual, m.Name, m.Value) + case MatchNotEqual: + return NewMatcher(MatchEqual, m.Name, m.Value) + case MatchRegexp: + return NewMatcher(MatchNotRegexp, m.Name, m.Value) + case MatchNotRegexp: + return NewMatcher(MatchRegexp, m.Name, m.Value) + } + panic("labels.Matcher.Matches: invalid match type") +} + +// GetRegexString returns the regex string. +func (m *Matcher) GetRegexString() string { + if m.re == nil { + return "" + } + return m.re.GetRegexString() +} + +// SetMatches returns a set of equality matchers for the current regex matchers if possible. +// For examples the regexp `a(b|f)` will returns "ab" and "af". +// Returns nil if we can't replace the regexp by only equality matchers. +func (m *Matcher) SetMatches() []string { + if m.re == nil { + return nil + } + return m.re.SetMatches() +} + +// Prefix returns the required prefix of the value to match, if possible. +// It will be empty if it's an equality matcher or if the prefix can't be determined. +func (m *Matcher) Prefix() string { + if m.re == nil { + return "" + } + return m.re.prefix +} + +// IsRegexOptimized returns whether regex is optimized. +func (m *Matcher) IsRegexOptimized() bool { + if m.re == nil { + return false + } + return m.re.IsOptimized() +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/regexp.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/regexp.go new file mode 100644 index 000000000000..d2151d83ddb9 --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/regexp.go @@ -0,0 +1,1086 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package labels + +import ( + "slices" + "strings" + "unicode" + "unicode/utf8" + + "github.com/grafana/regexp" + "github.com/grafana/regexp/syntax" + "golang.org/x/text/unicode/norm" +) + +const ( + maxSetMatches = 256 + + // The minimum number of alternate values a regex should have to trigger + // the optimization done by optimizeEqualOrPrefixStringMatchers() and so use a map + // to match values instead of iterating over a list. This value has + // been computed running BenchmarkOptimizeEqualStringMatchers. + minEqualMultiStringMatcherMapThreshold = 16 +) + +type FastRegexMatcher struct { + // Under some conditions, re is nil because the expression is never parsed. + // We store the original string to be able to return it in GetRegexString(). + reString string + re *regexp.Regexp + + setMatches []string + stringMatcher StringMatcher + prefix string + suffix string + contains []string + + // matchString is the "compiled" function to run by MatchString(). + matchString func(string) bool +} + +func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) { + m := &FastRegexMatcher{ + reString: v, + } + + m.stringMatcher, m.setMatches = optimizeAlternatingLiterals(v) + if m.stringMatcher != nil { + // If we already have a string matcher, we don't need to parse the regex + // or compile the matchString function. This also avoids the behavior in + // compileMatchStringFunction where it prefers to use setMatches when + // available, even if the string matcher is faster. + m.matchString = m.stringMatcher.Matches + } else { + parsed, err := syntax.Parse(v, syntax.Perl) + if err != nil { + return nil, err + } + // Simplify the syntax tree to run faster. + parsed = parsed.Simplify() + m.re, err = regexp.Compile("^(?:" + parsed.String() + ")$") + if err != nil { + return nil, err + } + if parsed.Op == syntax.OpConcat { + m.prefix, m.suffix, m.contains = optimizeConcatRegex(parsed) + } + if matches, caseSensitive := findSetMatches(parsed); caseSensitive { + m.setMatches = matches + } + m.stringMatcher = stringMatcherFromRegexp(parsed) + m.matchString = m.compileMatchStringFunction() + } + + return m, nil +} + +// compileMatchStringFunction returns the function to run by MatchString(). +func (m *FastRegexMatcher) compileMatchStringFunction() func(string) bool { + // If the only optimization available is the string matcher, then we can just run it. + if len(m.setMatches) == 0 && m.prefix == "" && m.suffix == "" && len(m.contains) == 0 && m.stringMatcher != nil { + return m.stringMatcher.Matches + } + + return func(s string) bool { + if len(m.setMatches) != 0 { + for _, match := range m.setMatches { + if match == s { + return true + } + } + return false + } + if m.prefix != "" && !strings.HasPrefix(s, m.prefix) { + return false + } + if m.suffix != "" && !strings.HasSuffix(s, m.suffix) { + return false + } + if len(m.contains) > 0 && !containsInOrder(s, m.contains) { + return false + } + if m.stringMatcher != nil { + return m.stringMatcher.Matches(s) + } + return m.re.MatchString(s) + } +} + +// IsOptimized returns true if any fast-path optimization is applied to the +// regex matcher. +func (m *FastRegexMatcher) IsOptimized() bool { + return len(m.setMatches) > 0 || m.stringMatcher != nil || m.prefix != "" || m.suffix != "" || len(m.contains) > 0 +} + +// findSetMatches extract equality matches from a regexp. +// Returns nil if we can't replace the regexp by only equality matchers or the regexp contains +// a mix of case sensitive and case insensitive matchers. +func findSetMatches(re *syntax.Regexp) (matches []string, caseSensitive bool) { + clearBeginEndText(re) + + return findSetMatchesInternal(re, "") +} + +func findSetMatchesInternal(re *syntax.Regexp, base string) (matches []string, caseSensitive bool) { + switch re.Op { + case syntax.OpBeginText: + // Correctly handling the begin text operator inside a regex is tricky, + // so in this case we fallback to the regex engine. + return nil, false + case syntax.OpEndText: + // Correctly handling the end text operator inside a regex is tricky, + // so in this case we fallback to the regex engine. + return nil, false + case syntax.OpLiteral: + return []string{base + string(re.Rune)}, isCaseSensitive(re) + case syntax.OpEmptyMatch: + if base != "" { + return []string{base}, isCaseSensitive(re) + } + case syntax.OpAlternate: + return findSetMatchesFromAlternate(re, base) + case syntax.OpCapture: + clearCapture(re) + return findSetMatchesInternal(re, base) + case syntax.OpConcat: + return findSetMatchesFromConcat(re, base) + case syntax.OpCharClass: + if len(re.Rune)%2 != 0 { + return nil, false + } + var matches []string + var totalSet int + for i := 0; i+1 < len(re.Rune); i += 2 { + totalSet += int(re.Rune[i+1]-re.Rune[i]) + 1 + } + // limits the total characters that can be used to create matches. + // In some case like negation [^0-9] a lot of possibilities exists and that + // can create thousands of possible matches at which points we're better off using regexp. + if totalSet > maxSetMatches { + return nil, false + } + for i := 0; i+1 < len(re.Rune); i += 2 { + lo, hi := re.Rune[i], re.Rune[i+1] + for c := lo; c <= hi; c++ { + matches = append(matches, base+string(c)) + } + } + return matches, isCaseSensitive(re) + default: + return nil, false + } + return nil, false +} + +func findSetMatchesFromConcat(re *syntax.Regexp, base string) (matches []string, matchesCaseSensitive bool) { + if len(re.Sub) == 0 { + return nil, false + } + clearCapture(re.Sub...) + + matches = []string{base} + + for i := 0; i < len(re.Sub); i++ { + var newMatches []string + for j, b := range matches { + m, caseSensitive := findSetMatchesInternal(re.Sub[i], b) + if m == nil { + return nil, false + } + if tooManyMatches(newMatches, m...) { + return nil, false + } + + // All matches must have the same case sensitivity. If it's the first set of matches + // returned, we store its sensitivity as the expected case, and then we'll check all + // other ones. + if i == 0 && j == 0 { + matchesCaseSensitive = caseSensitive + } + if matchesCaseSensitive != caseSensitive { + return nil, false + } + + newMatches = append(newMatches, m...) + } + matches = newMatches + } + + return matches, matchesCaseSensitive +} + +func findSetMatchesFromAlternate(re *syntax.Regexp, base string) (matches []string, matchesCaseSensitive bool) { + for i, sub := range re.Sub { + found, caseSensitive := findSetMatchesInternal(sub, base) + if found == nil { + return nil, false + } + if tooManyMatches(matches, found...) { + return nil, false + } + + // All matches must have the same case sensitivity. If it's the first set of matches + // returned, we store its sensitivity as the expected case, and then we'll check all + // other ones. + if i == 0 { + matchesCaseSensitive = caseSensitive + } + if matchesCaseSensitive != caseSensitive { + return nil, false + } + + matches = append(matches, found...) + } + + return matches, matchesCaseSensitive +} + +// clearCapture removes capture operation as they are not used for matching. +func clearCapture(regs ...*syntax.Regexp) { + for _, r := range regs { + // Iterate on the regexp because capture groups could be nested. + for r.Op == syntax.OpCapture { + *r = *r.Sub[0] + } + } +} + +// clearBeginEndText removes the begin and end text from the regexp. Prometheus regexp are anchored to the beginning and end of the string. +func clearBeginEndText(re *syntax.Regexp) { + // Do not clear begin/end text from an alternate operator because it could + // change the actual regexp properties. + if re.Op == syntax.OpAlternate { + return + } + + if len(re.Sub) == 0 { + return + } + if len(re.Sub) == 1 { + if re.Sub[0].Op == syntax.OpBeginText || re.Sub[0].Op == syntax.OpEndText { + // We need to remove this element. Since it's the only one, we convert into a matcher of an empty string. + // OpEmptyMatch is regexp's nop operator. + re.Op = syntax.OpEmptyMatch + re.Sub = nil + return + } + } + if re.Sub[0].Op == syntax.OpBeginText { + re.Sub = re.Sub[1:] + } + if re.Sub[len(re.Sub)-1].Op == syntax.OpEndText { + re.Sub = re.Sub[:len(re.Sub)-1] + } +} + +// isCaseInsensitive tells if a regexp is case insensitive. +// The flag should be check at each level of the syntax tree. +func isCaseInsensitive(reg *syntax.Regexp) bool { + return (reg.Flags & syntax.FoldCase) != 0 +} + +// isCaseSensitive tells if a regexp is case sensitive. +// The flag should be check at each level of the syntax tree. +func isCaseSensitive(reg *syntax.Regexp) bool { + return !isCaseInsensitive(reg) +} + +// tooManyMatches guards against creating too many set matches. +func tooManyMatches(matches []string, added ...string) bool { + return len(matches)+len(added) > maxSetMatches +} + +func (m *FastRegexMatcher) MatchString(s string) bool { + return m.matchString(s) +} + +func (m *FastRegexMatcher) SetMatches() []string { + // IMPORTANT: always return a copy, otherwise if the caller manipulate this slice it will + // also get manipulated in the cached FastRegexMatcher instance. + return slices.Clone(m.setMatches) +} + +func (m *FastRegexMatcher) GetRegexString() string { + return m.reString +} + +// optimizeAlternatingLiterals optimizes a regex of the form +// +// `literal1|literal2|literal3|...` +// +// this function returns an optimized StringMatcher or nil if the regex +// cannot be optimized in this way, and a list of setMatches up to maxSetMatches. +func optimizeAlternatingLiterals(s string) (StringMatcher, []string) { + if len(s) == 0 { + return emptyStringMatcher{}, nil + } + + estimatedAlternates := strings.Count(s, "|") + 1 + + // If there are no alternates, check if the string is a literal + if estimatedAlternates == 1 { + if regexp.QuoteMeta(s) == s { + return &equalStringMatcher{s: s, caseSensitive: true}, []string{s} + } + return nil, nil + } + + multiMatcher := newEqualMultiStringMatcher(true, estimatedAlternates, 0, 0) + + for end := strings.IndexByte(s, '|'); end > -1; end = strings.IndexByte(s, '|') { + // Split the string into the next literal and the remainder + subMatch := s[:end] + s = s[end+1:] + + // break if any of the submatches are not literals + if regexp.QuoteMeta(subMatch) != subMatch { + return nil, nil + } + + multiMatcher.add(subMatch) + } + + // break if the remainder is not a literal + if regexp.QuoteMeta(s) != s { + return nil, nil + } + multiMatcher.add(s) + + return multiMatcher, multiMatcher.setMatches() +} + +// optimizeConcatRegex returns literal prefix/suffix text that can be safely +// checked against the label value before running the regexp matcher. +func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix string, contains []string) { + sub := r.Sub + clearCapture(sub...) + + // We can safely remove begin and end text matchers respectively + // at the beginning and end of the regexp. + if len(sub) > 0 && sub[0].Op == syntax.OpBeginText { + sub = sub[1:] + } + if len(sub) > 0 && sub[len(sub)-1].Op == syntax.OpEndText { + sub = sub[:len(sub)-1] + } + + if len(sub) == 0 { + return + } + + // Given Prometheus regex matchers are always anchored to the begin/end + // of the text, if the first/last operations are literals, we can safely + // treat them as prefix/suffix. + if sub[0].Op == syntax.OpLiteral && (sub[0].Flags&syntax.FoldCase) == 0 { + prefix = string(sub[0].Rune) + } + if last := len(sub) - 1; sub[last].Op == syntax.OpLiteral && (sub[last].Flags&syntax.FoldCase) == 0 { + suffix = string(sub[last].Rune) + } + + // If contains any literal which is not a prefix/suffix, we keep track of + // all the ones which are case-sensitive. + for i := 1; i < len(sub)-1; i++ { + if sub[i].Op == syntax.OpLiteral && (sub[i].Flags&syntax.FoldCase) == 0 { + contains = append(contains, string(sub[i].Rune)) + } + } + + return +} + +// StringMatcher is a matcher that matches a string in place of a regular expression. +type StringMatcher interface { + Matches(s string) bool +} + +// stringMatcherFromRegexp attempts to replace a common regexp with a string matcher. +// It returns nil if the regexp is not supported. +func stringMatcherFromRegexp(re *syntax.Regexp) StringMatcher { + clearBeginEndText(re) + + m := stringMatcherFromRegexpInternal(re) + m = optimizeEqualOrPrefixStringMatchers(m, minEqualMultiStringMatcherMapThreshold) + + return m +} + +func stringMatcherFromRegexpInternal(re *syntax.Regexp) StringMatcher { + clearCapture(re) + + switch re.Op { + case syntax.OpBeginText: + // Correctly handling the begin text operator inside a regex is tricky, + // so in this case we fallback to the regex engine. + return nil + case syntax.OpEndText: + // Correctly handling the end text operator inside a regex is tricky, + // so in this case we fallback to the regex engine. + return nil + case syntax.OpPlus: + if re.Sub[0].Op != syntax.OpAnyChar && re.Sub[0].Op != syntax.OpAnyCharNotNL { + return nil + } + return &anyNonEmptyStringMatcher{ + matchNL: re.Sub[0].Op == syntax.OpAnyChar, + } + case syntax.OpStar: + if re.Sub[0].Op != syntax.OpAnyChar && re.Sub[0].Op != syntax.OpAnyCharNotNL { + return nil + } + + // If the newline is valid, than this matcher literally match any string (even empty). + if re.Sub[0].Op == syntax.OpAnyChar { + return trueMatcher{} + } + + // Any string is fine (including an empty one), as far as it doesn't contain any newline. + return anyStringWithoutNewlineMatcher{} + case syntax.OpQuest: + // Only optimize for ".?". + if len(re.Sub) != 1 || (re.Sub[0].Op != syntax.OpAnyChar && re.Sub[0].Op != syntax.OpAnyCharNotNL) { + return nil + } + + return &zeroOrOneCharacterStringMatcher{ + matchNL: re.Sub[0].Op == syntax.OpAnyChar, + } + case syntax.OpEmptyMatch: + return emptyStringMatcher{} + + case syntax.OpLiteral: + return &equalStringMatcher{ + s: string(re.Rune), + caseSensitive: !isCaseInsensitive(re), + } + case syntax.OpAlternate: + or := make([]StringMatcher, 0, len(re.Sub)) + for _, sub := range re.Sub { + m := stringMatcherFromRegexpInternal(sub) + if m == nil { + return nil + } + or = append(or, m) + } + return orStringMatcher(or) + case syntax.OpConcat: + clearCapture(re.Sub...) + + if len(re.Sub) == 0 { + return emptyStringMatcher{} + } + if len(re.Sub) == 1 { + return stringMatcherFromRegexpInternal(re.Sub[0]) + } + + var left, right StringMatcher + + // Let's try to find if there's a first and last any matchers. + if re.Sub[0].Op == syntax.OpPlus || re.Sub[0].Op == syntax.OpStar || re.Sub[0].Op == syntax.OpQuest { + left = stringMatcherFromRegexpInternal(re.Sub[0]) + if left == nil { + return nil + } + re.Sub = re.Sub[1:] + } + if re.Sub[len(re.Sub)-1].Op == syntax.OpPlus || re.Sub[len(re.Sub)-1].Op == syntax.OpStar || re.Sub[len(re.Sub)-1].Op == syntax.OpQuest { + right = stringMatcherFromRegexpInternal(re.Sub[len(re.Sub)-1]) + if right == nil { + return nil + } + re.Sub = re.Sub[:len(re.Sub)-1] + } + + matches, matchesCaseSensitive := findSetMatchesInternal(re, "") + + if len(matches) == 0 && len(re.Sub) == 2 { + // We have not find fixed set matches. We look for other known cases that + // we can optimize. + switch { + // Prefix is literal. + case right == nil && re.Sub[0].Op == syntax.OpLiteral: + right = stringMatcherFromRegexpInternal(re.Sub[1]) + if right != nil { + matches = []string{string(re.Sub[0].Rune)} + matchesCaseSensitive = !isCaseInsensitive(re.Sub[0]) + } + + // Suffix is literal. + case left == nil && re.Sub[1].Op == syntax.OpLiteral: + left = stringMatcherFromRegexpInternal(re.Sub[0]) + if left != nil { + matches = []string{string(re.Sub[1].Rune)} + matchesCaseSensitive = !isCaseInsensitive(re.Sub[1]) + } + } + } + + // Ensure we've found some literals to match (optionally with a left and/or right matcher). + // If not, then this optimization doesn't trigger. + if len(matches) == 0 { + return nil + } + + // Use the right (and best) matcher based on what we've found. + switch { + // No left and right matchers (only fixed set matches). + case left == nil && right == nil: + // if there's no any matchers on both side it's a concat of literals + or := make([]StringMatcher, 0, len(matches)) + for _, match := range matches { + or = append(or, &equalStringMatcher{ + s: match, + caseSensitive: matchesCaseSensitive, + }) + } + return orStringMatcher(or) + + // Right matcher with 1 fixed set match. + case left == nil && len(matches) == 1: + return newLiteralPrefixStringMatcher(matches[0], matchesCaseSensitive, right) + + // Left matcher with 1 fixed set match. + case right == nil && len(matches) == 1: + return &literalSuffixStringMatcher{ + left: left, + suffix: matches[0], + suffixCaseSensitive: matchesCaseSensitive, + } + + // We found literals in the middle. We can trigger the fast path only if + // the matches are case sensitive because containsStringMatcher doesn't + // support case insensitive. + case matchesCaseSensitive: + return &containsStringMatcher{ + substrings: matches, + left: left, + right: right, + } + } + } + return nil +} + +// containsStringMatcher matches a string if it contains any of the substrings. +// If left and right are not nil, it's a contains operation where left and right must match. +// If left is nil, it's a hasPrefix operation and right must match. +// Finally, if right is nil it's a hasSuffix operation and left must match. +type containsStringMatcher struct { + // The matcher that must match the left side. Can be nil. + left StringMatcher + + // At least one of these strings must match in the "middle", between left and right matchers. + substrings []string + + // The matcher that must match the right side. Can be nil. + right StringMatcher +} + +func (m *containsStringMatcher) Matches(s string) bool { + for _, substr := range m.substrings { + switch { + case m.right != nil && m.left != nil: + searchStartPos := 0 + + for { + pos := strings.Index(s[searchStartPos:], substr) + if pos < 0 { + break + } + + // Since we started searching from searchStartPos, we have to add that offset + // to get the actual position of the substring inside the text. + pos += searchStartPos + + // If both the left and right matchers match, then we can stop searching because + // we've found a match. + if m.left.Matches(s[:pos]) && m.right.Matches(s[pos+len(substr):]) { + return true + } + + // Continue searching for another occurrence of the substring inside the text. + searchStartPos = pos + 1 + } + case m.left != nil: + // If we have to check for characters on the left then we need to match a suffix. + if strings.HasSuffix(s, substr) && m.left.Matches(s[:len(s)-len(substr)]) { + return true + } + case m.right != nil: + if strings.HasPrefix(s, substr) && m.right.Matches(s[len(substr):]) { + return true + } + } + } + return false +} + +func newLiteralPrefixStringMatcher(prefix string, prefixCaseSensitive bool, right StringMatcher) StringMatcher { + if prefixCaseSensitive { + return &literalPrefixSensitiveStringMatcher{ + prefix: prefix, + right: right, + } + } + + return &literalPrefixInsensitiveStringMatcher{ + prefix: prefix, + right: right, + } +} + +// literalPrefixSensitiveStringMatcher matches a string with the given literal case-sensitive prefix and right side matcher. +type literalPrefixSensitiveStringMatcher struct { + prefix string + + // The matcher that must match the right side. Can be nil. + right StringMatcher +} + +func (m *literalPrefixSensitiveStringMatcher) Matches(s string) bool { + if !strings.HasPrefix(s, m.prefix) { + return false + } + + // Ensure the right side matches. + return m.right.Matches(s[len(m.prefix):]) +} + +// literalPrefixInsensitiveStringMatcher matches a string with the given literal case-insensitive prefix and right side matcher. +type literalPrefixInsensitiveStringMatcher struct { + prefix string + + // The matcher that must match the right side. Can be nil. + right StringMatcher +} + +func (m *literalPrefixInsensitiveStringMatcher) Matches(s string) bool { + if !hasPrefixCaseInsensitive(s, m.prefix) { + return false + } + + // Ensure the right side matches. + return m.right.Matches(s[len(m.prefix):]) +} + +// literalSuffixStringMatcher matches a string with the given literal suffix and left side matcher. +type literalSuffixStringMatcher struct { + // The matcher that must match the left side. Can be nil. + left StringMatcher + + suffix string + suffixCaseSensitive bool +} + +func (m *literalSuffixStringMatcher) Matches(s string) bool { + // Ensure the suffix matches. + if m.suffixCaseSensitive && !strings.HasSuffix(s, m.suffix) { + return false + } + if !m.suffixCaseSensitive && !hasSuffixCaseInsensitive(s, m.suffix) { + return false + } + + // Ensure the left side matches. + return m.left.Matches(s[:len(s)-len(m.suffix)]) +} + +// emptyStringMatcher matches an empty string. +type emptyStringMatcher struct{} + +func (m emptyStringMatcher) Matches(s string) bool { + return len(s) == 0 +} + +// orStringMatcher matches any of the sub-matchers. +type orStringMatcher []StringMatcher + +func (m orStringMatcher) Matches(s string) bool { + for _, matcher := range m { + if matcher.Matches(s) { + return true + } + } + return false +} + +// equalStringMatcher matches a string exactly and support case insensitive. +type equalStringMatcher struct { + s string + caseSensitive bool +} + +func (m *equalStringMatcher) Matches(s string) bool { + if m.caseSensitive { + return m.s == s + } + return strings.EqualFold(m.s, s) +} + +type multiStringMatcherBuilder interface { + StringMatcher + add(s string) + addPrefix(prefix string, prefixCaseSensitive bool, matcher StringMatcher) + setMatches() []string +} + +func newEqualMultiStringMatcher(caseSensitive bool, estimatedSize, estimatedPrefixes, minPrefixLength int) multiStringMatcherBuilder { + // If the estimated size is low enough, it's faster to use a slice instead of a map. + if estimatedSize < minEqualMultiStringMatcherMapThreshold && estimatedPrefixes == 0 { + return &equalMultiStringSliceMatcher{caseSensitive: caseSensitive, values: make([]string, 0, estimatedSize)} + } + + return &equalMultiStringMapMatcher{ + values: make(map[string]struct{}, estimatedSize), + prefixes: make(map[string][]StringMatcher, estimatedPrefixes), + minPrefixLen: minPrefixLength, + caseSensitive: caseSensitive, + } +} + +// equalMultiStringSliceMatcher matches a string exactly against a slice of valid values. +type equalMultiStringSliceMatcher struct { + values []string + + caseSensitive bool +} + +func (m *equalMultiStringSliceMatcher) add(s string) { + m.values = append(m.values, s) +} + +func (m *equalMultiStringSliceMatcher) addPrefix(_ string, _ bool, _ StringMatcher) { + panic("not implemented") +} + +func (m *equalMultiStringSliceMatcher) setMatches() []string { + return m.values +} + +func (m *equalMultiStringSliceMatcher) Matches(s string) bool { + if m.caseSensitive { + for _, v := range m.values { + if s == v { + return true + } + } + } else { + for _, v := range m.values { + if strings.EqualFold(s, v) { + return true + } + } + } + return false +} + +// equalMultiStringMapMatcher matches a string exactly against a map of valid values +// or against a set of prefix matchers. +type equalMultiStringMapMatcher struct { + // values contains values to match a string against. If the matching is case insensitive, + // the values here must be lowercase. + values map[string]struct{} + // prefixes maps strings, all of length minPrefixLen, to sets of matchers to check the rest of the string. + // If the matching is case insensitive, prefixes are all lowercase. + prefixes map[string][]StringMatcher + // minPrefixLen can be zero, meaning there are no prefix matchers. + minPrefixLen int + caseSensitive bool +} + +func (m *equalMultiStringMapMatcher) add(s string) { + if !m.caseSensitive { + s = toNormalisedLower(s) + } + + m.values[s] = struct{}{} +} + +func (m *equalMultiStringMapMatcher) addPrefix(prefix string, prefixCaseSensitive bool, matcher StringMatcher) { + if m.minPrefixLen == 0 { + panic("addPrefix called when no prefix length defined") + } + if len(prefix) < m.minPrefixLen { + panic("addPrefix called with a too short prefix") + } + if m.caseSensitive != prefixCaseSensitive { + panic("addPrefix called with a prefix whose case sensitivity is different than the expected one") + } + + s := prefix[:m.minPrefixLen] + if !m.caseSensitive { + s = strings.ToLower(s) + } + + m.prefixes[s] = append(m.prefixes[s], matcher) +} + +func (m *equalMultiStringMapMatcher) setMatches() []string { + if len(m.values) >= maxSetMatches || len(m.prefixes) > 0 { + return nil + } + + matches := make([]string, 0, len(m.values)) + for s := range m.values { + matches = append(matches, s) + } + return matches +} + +func (m *equalMultiStringMapMatcher) Matches(s string) bool { + if !m.caseSensitive { + s = toNormalisedLower(s) + } + + if _, ok := m.values[s]; ok { + return true + } + if m.minPrefixLen > 0 && len(s) >= m.minPrefixLen { + for _, matcher := range m.prefixes[s[:m.minPrefixLen]] { + if matcher.Matches(s) { + return true + } + } + } + return false +} + +// toNormalisedLower normalise the input string using "Unicode Normalization Form D" and then convert +// it to lower case. +func toNormalisedLower(s string) string { + var buf []byte + for i := 0; i < len(s); i++ { + c := s[i] + if c >= utf8.RuneSelf { + return strings.Map(unicode.ToLower, norm.NFKD.String(s)) + } + if 'A' <= c && c <= 'Z' { + if buf == nil { + buf = []byte(s) + } + buf[i] = c + 'a' - 'A' + } + } + if buf == nil { + return s + } + return yoloString(buf) +} + +// anyStringWithoutNewlineMatcher is a stringMatcher which matches any string +// (including an empty one) as far as it doesn't contain any newline character. +type anyStringWithoutNewlineMatcher struct{} + +func (m anyStringWithoutNewlineMatcher) Matches(s string) bool { + // We need to make sure it doesn't contain a newline. Since the newline is + // an ASCII character, we can use strings.IndexByte(). + return strings.IndexByte(s, '\n') == -1 +} + +// anyNonEmptyStringMatcher is a stringMatcher which matches any non-empty string. +type anyNonEmptyStringMatcher struct { + matchNL bool +} + +func (m *anyNonEmptyStringMatcher) Matches(s string) bool { + if m.matchNL { + // It's OK if the string contains a newline so we just need to make + // sure it's non-empty. + return len(s) > 0 + } + + // We need to make sure it non-empty and doesn't contain a newline. + // Since the newline is an ASCII character, we can use strings.IndexByte(). + return len(s) > 0 && strings.IndexByte(s, '\n') == -1 +} + +// zeroOrOneCharacterStringMatcher is a StringMatcher which matches zero or one occurrence +// of any character. The newline character is matches only if matchNL is set to true. +type zeroOrOneCharacterStringMatcher struct { + matchNL bool +} + +func (m *zeroOrOneCharacterStringMatcher) Matches(s string) bool { + // If there's more than one rune in the string, then it can't match. + if r, size := utf8.DecodeRuneInString(s); r == utf8.RuneError { + // Size is 0 for empty strings, 1 for invalid rune. + // Empty string matches, invalid rune matches if there isn't anything else. + return size == len(s) + } else if size < len(s) { + return false + } + + // No need to check for the newline if the string is empty or matching a newline is OK. + if m.matchNL || len(s) == 0 { + return true + } + + return s[0] != '\n' +} + +// trueMatcher is a stringMatcher which matches any string (always returns true). +type trueMatcher struct{} + +func (m trueMatcher) Matches(_ string) bool { + return true +} + +// optimizeEqualOrPrefixStringMatchers optimize a specific case where all matchers are made by an +// alternation (orStringMatcher) of strings checked for equality (equalStringMatcher) or +// with a literal prefix (literalPrefixSensitiveStringMatcher or literalPrefixInsensitiveStringMatcher). +// +// In this specific case, when we have many strings to match against we can use a map instead +// of iterating over the list of strings. +func optimizeEqualOrPrefixStringMatchers(input StringMatcher, threshold int) StringMatcher { + var ( + caseSensitive bool + caseSensitiveSet bool + numValues int + numPrefixes int + minPrefixLength int + ) + + // Analyse the input StringMatcher to count the number of occurrences + // and ensure all of them have the same case sensitivity. + analyseEqualMatcherCallback := func(matcher *equalStringMatcher) bool { + // Ensure we don't have mixed case sensitivity. + if caseSensitiveSet && caseSensitive != matcher.caseSensitive { + return false + } else if !caseSensitiveSet { + caseSensitive = matcher.caseSensitive + caseSensitiveSet = true + } + + numValues++ + return true + } + + analysePrefixMatcherCallback := func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool { + // Ensure we don't have mixed case sensitivity. + if caseSensitiveSet && caseSensitive != prefixCaseSensitive { + return false + } else if !caseSensitiveSet { + caseSensitive = prefixCaseSensitive + caseSensitiveSet = true + } + if numPrefixes == 0 || len(prefix) < minPrefixLength { + minPrefixLength = len(prefix) + } + + numPrefixes++ + return true + } + + if !findEqualOrPrefixStringMatchers(input, analyseEqualMatcherCallback, analysePrefixMatcherCallback) { + return input + } + + // If the number of values and prefixes found is less than the threshold, then we should skip the optimization. + if (numValues + numPrefixes) < threshold { + return input + } + + // Parse again the input StringMatcher to extract all values and storing them. + // We can skip the case sensitivity check because we've already checked it and + // if the code reach this point then it means all matchers have the same case sensitivity. + multiMatcher := newEqualMultiStringMatcher(caseSensitive, numValues, numPrefixes, minPrefixLength) + + // Ignore the return value because we already iterated over the input StringMatcher + // and it was all good. + findEqualOrPrefixStringMatchers(input, func(matcher *equalStringMatcher) bool { + multiMatcher.add(matcher.s) + return true + }, func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool { + multiMatcher.addPrefix(prefix, caseSensitive, matcher) + return true + }) + + return multiMatcher +} + +// findEqualOrPrefixStringMatchers analyze the input StringMatcher and calls the equalMatcherCallback for each +// equalStringMatcher found, and prefixMatcherCallback for each literalPrefixSensitiveStringMatcher and literalPrefixInsensitiveStringMatcher found. +// +// Returns true if and only if the input StringMatcher is *only* composed by an alternation of equalStringMatcher and/or +// literal prefix matcher. Returns false if prefixMatcherCallback is nil and a literal prefix matcher is encountered. +func findEqualOrPrefixStringMatchers(input StringMatcher, equalMatcherCallback func(matcher *equalStringMatcher) bool, prefixMatcherCallback func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool) bool { + orInput, ok := input.(orStringMatcher) + if !ok { + return false + } + + for _, m := range orInput { + switch casted := m.(type) { + case orStringMatcher: + if !findEqualOrPrefixStringMatchers(m, equalMatcherCallback, prefixMatcherCallback) { + return false + } + + case *equalStringMatcher: + if !equalMatcherCallback(casted) { + return false + } + + case *literalPrefixSensitiveStringMatcher: + if prefixMatcherCallback == nil || !prefixMatcherCallback(casted.prefix, true, casted) { + return false + } + + case *literalPrefixInsensitiveStringMatcher: + if prefixMatcherCallback == nil || !prefixMatcherCallback(casted.prefix, false, casted) { + return false + } + + default: + // It's not an equal or prefix string matcher, so we have to stop searching + // cause this optimization can't be applied. + return false + } + } + + return true +} + +func hasPrefixCaseInsensitive(s, prefix string) bool { + return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) +} + +func hasSuffixCaseInsensitive(s, suffix string) bool { + return len(s) >= len(suffix) && strings.EqualFold(s[len(s)-len(suffix):], suffix) +} + +func containsInOrder(s string, contains []string) bool { + // Optimization for the case we only have to look for 1 substring. + if len(contains) == 1 { + return strings.Contains(s, contains[0]) + } + + return containsInOrderMulti(s, contains) +} + +func containsInOrderMulti(s string, contains []string) bool { + offset := 0 + + for _, substr := range contains { + at := strings.Index(s[offset:], substr) + if at == -1 { + return false + } + + offset += at + len(substr) + } + + return true +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/sharding.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/sharding.go new file mode 100644 index 000000000000..5e3e89fbbba4 --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/sharding.go @@ -0,0 +1,47 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !stringlabels && !dedupelabels + +package labels + +import ( + "github.com/cespare/xxhash/v2" +) + +// StableHash is a labels hashing implementation which is guaranteed to not change over time. +// This function should be used whenever labels hashing backward compatibility must be guaranteed. +func StableHash(ls Labels) uint64 { + // Use xxhash.Sum64(b) for fast path as it's faster. + b := make([]byte, 0, 1024) + for i, v := range ls { + if len(b)+len(v.Name)+len(v.Value)+2 >= cap(b) { + // If labels entry is 1KB+ do not allocate whole entry. + h := xxhash.New() + _, _ = h.Write(b) + for _, v := range ls[i:] { + _, _ = h.WriteString(v.Name) + _, _ = h.Write(seps) + _, _ = h.WriteString(v.Value) + _, _ = h.Write(seps) + } + return h.Sum64() + } + + b = append(b, v.Name...) + b = append(b, seps[0]) + b = append(b, v.Value...) + b = append(b, seps[0]) + } + return xxhash.Sum64(b) +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/sharding_dedupelabels.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/sharding_dedupelabels.go new file mode 100644 index 000000000000..5912724f9b0a --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/sharding_dedupelabels.go @@ -0,0 +1,52 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build dedupelabels + +package labels + +import ( + "github.com/cespare/xxhash/v2" +) + +// StableHash is a labels hashing implementation which is guaranteed to not change over time. +// This function should be used whenever labels hashing backward compatibility must be guaranteed. +func StableHash(ls Labels) uint64 { + // Use xxhash.Sum64(b) for fast path as it's faster. + b := make([]byte, 0, 1024) + for pos := 0; pos < len(ls.data); { + name, newPos := decodeString(ls.syms, ls.data, pos) + value, newPos := decodeString(ls.syms, ls.data, newPos) + if len(b)+len(name)+len(value)+2 >= cap(b) { + // If labels entry is 1KB+, hash the rest of them via Write(). + h := xxhash.New() + _, _ = h.Write(b) + for pos < len(ls.data) { + name, pos = decodeString(ls.syms, ls.data, pos) + value, pos = decodeString(ls.syms, ls.data, pos) + _, _ = h.WriteString(name) + _, _ = h.Write(seps) + _, _ = h.WriteString(value) + _, _ = h.Write(seps) + } + return h.Sum64() + } + + b = append(b, name...) + b = append(b, seps[0]) + b = append(b, value...) + b = append(b, seps[0]) + pos = newPos + } + return xxhash.Sum64(b) +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/sharding_stringlabels.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/sharding_stringlabels.go new file mode 100644 index 000000000000..3ad2027d8cd1 --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/sharding_stringlabels.go @@ -0,0 +1,54 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build stringlabels + +package labels + +import ( + "github.com/cespare/xxhash/v2" +) + +// StableHash is a labels hashing implementation which is guaranteed to not change over time. +// This function should be used whenever labels hashing backward compatibility must be guaranteed. +func StableHash(ls Labels) uint64 { + // Use xxhash.Sum64(b) for fast path as it's faster. + b := make([]byte, 0, 1024) + var h *xxhash.Digest + for i := 0; i < len(ls.data); { + var v Label + v.Name, i = decodeString(ls.data, i) + v.Value, i = decodeString(ls.data, i) + if h == nil && len(b)+len(v.Name)+len(v.Value)+2 >= cap(b) { + // If labels entry is 1KB+, switch to Write API. Copy in the values up to this point. + h = xxhash.New() + _, _ = h.Write(b) + } + if h != nil { + _, _ = h.WriteString(v.Name) + _, _ = h.Write(seps) + _, _ = h.WriteString(v.Value) + _, _ = h.Write(seps) + continue + } + + b = append(b, v.Name...) + b = append(b, seps[0]) + b = append(b, v.Value...) + b = append(b, seps[0]) + } + if h != nil { + return h.Sum64() + } + return xxhash.Sum64(b) +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/test_utils.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/test_utils.go new file mode 100644 index 000000000000..d060def48115 --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/labels/test_utils.go @@ -0,0 +1,87 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package labels + +import ( + "bufio" + "fmt" + "os" + "strings" +) + +// Slice is a sortable slice of label sets. +type Slice []Labels + +func (s Slice) Len() int { return len(s) } +func (s Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s Slice) Less(i, j int) bool { return Compare(s[i], s[j]) < 0 } + +// Selector holds constraints for matching against a label set. +type Selector []*Matcher + +// Matches returns whether the labels satisfy all matchers. +func (s Selector) Matches(labels Labels) bool { + for _, m := range s { + if v := labels.Get(m.Name); !m.Matches(v) { + return false + } + } + return true +} + +// ReadLabels reads up to n label sets in a JSON formatted file fn. It is mostly useful +// to load testing data. +func ReadLabels(fn string, n int) ([]Labels, error) { + f, err := os.Open(fn) + if err != nil { + return nil, err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + b := NewScratchBuilder(0) + + var mets []Labels + hashes := map[uint64]struct{}{} + i := 0 + + for scanner.Scan() && i < n { + b.Reset() + + r := strings.NewReplacer("\"", "", "{", "", "}", "") + s := r.Replace(scanner.Text()) + + labelChunks := strings.Split(s, ",") + for _, labelChunk := range labelChunks { + split := strings.Split(labelChunk, ":") + b.Add(split[0], split[1]) + } + // Order of the k/v labels matters, don't assume we'll always receive them already sorted. + b.Sort() + m := b.Labels() + + h := m.Hash() + if _, ok := hashes[h]; ok { + continue + } + mets = append(mets, m) + hashes[h] = struct{}{} + i++ + } + + if i != n { + return mets, fmt.Errorf("requested %d metrics but found %d", n, i) + } + return mets, nil +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/README.md b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/README.md new file mode 100644 index 000000000000..697966f0975a --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/README.md @@ -0,0 +1,6 @@ +# Making changes to textparse lexers +In the rare case that you need to update the textparse lexers, edit promlex.l or openmetricslex.l and then run the following command: +`golex -o=promlex.l.go promlex.l` + +Note that you need golex installed: +`go get -u modernc.org/golex` \ No newline at end of file diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/interface.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/interface.go new file mode 100644 index 000000000000..82920542cd47 --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/interface.go @@ -0,0 +1,113 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "mime" + + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/v2/model/exemplar" + "github.com/prometheus/prometheus/v2/model/histogram" + "github.com/prometheus/prometheus/v2/model/labels" +) + +// Parser parses samples from a byte slice of samples in the official +// Prometheus and OpenMetrics text exposition formats. +type Parser interface { + // Series returns the bytes of a series with a simple float64 as a + // value, the timestamp if set, and the value of the current sample. + Series() ([]byte, *int64, float64) + + // Histogram returns the bytes of a series with a sparse histogram as a + // value, the timestamp if set, and the histogram in the current sample. + // Depending on the parsed input, the function returns an (integer) Histogram + // or a FloatHistogram, with the respective other return value being nil. + Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) + + // Help returns the metric name and help text in the current entry. + // Must only be called after Next returned a help entry. + // The returned byte slices become invalid after the next call to Next. + Help() ([]byte, []byte) + + // Type returns the metric name and type in the current entry. + // Must only be called after Next returned a type entry. + // The returned byte slices become invalid after the next call to Next. + Type() ([]byte, model.MetricType) + + // Unit returns the metric name and unit in the current entry. + // Must only be called after Next returned a unit entry. + // The returned byte slices become invalid after the next call to Next. + Unit() ([]byte, []byte) + + // Comment returns the text of the current comment. + // Must only be called after Next returned a comment entry. + // The returned byte slice becomes invalid after the next call to Next. + Comment() []byte + + // Metric writes the labels of the current sample into the passed labels. + // It returns the string from which the metric was parsed. + Metric(l *labels.Labels) string + + // Exemplar writes the exemplar of the current sample into the passed + // exemplar. It can be called repeatedly to retrieve multiple exemplars + // for the same sample. It returns false once all exemplars are + // retrieved (including the case where no exemplars exist at all). + Exemplar(l *exemplar.Exemplar) bool + + // CreatedTimestamp returns the created timestamp (in milliseconds) for the + // current sample. It returns nil if it is unknown e.g. if it wasn't set, + // if the scrape protocol or metric type does not support created timestamps. + CreatedTimestamp() *int64 + + // Next advances the parser to the next sample. + // It returns (EntryInvalid, io.EOF) if no samples were read. + Next() (Entry, error) +} + +// New returns a new parser of the byte slice. +// +// This function always returns a valid parser, but might additionally +// return an error if the content type cannot be parsed. +func New(b []byte, contentType string, parseClassicHistograms bool, st *labels.SymbolTable) (Parser, error) { + if contentType == "" { + return NewPromParser(b, st), nil + } + + mediaType, _, err := mime.ParseMediaType(contentType) + if err != nil { + return NewPromParser(b, st), err + } + switch mediaType { + case "application/openmetrics-text": + return NewOpenMetricsParser(b, st), nil + case "application/vnd.google.protobuf": + return NewProtobufParser(b, parseClassicHistograms, st), nil + default: + return NewPromParser(b, st), nil + } +} + +// Entry represents the type of a parsed entry. +type Entry int + +const ( + EntryInvalid Entry = -1 + EntryType Entry = 0 + EntryHelp Entry = 1 + EntrySeries Entry = 2 // A series with a simple float64 as value. + EntryComment Entry = 3 + EntryUnit Entry = 4 + EntryHistogram Entry = 5 // A series with a native histogram as a value. +) diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/openmetricslex.l b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/openmetricslex.l new file mode 100644 index 000000000000..9afbbbd8bd55 --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/openmetricslex.l @@ -0,0 +1,83 @@ +%{ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "fmt" +) + +// Lex is called by the parser generated by "go tool yacc" to obtain each +// token. The method is opened before the matching rules block and closed at +// the end of the file. +func (l *openMetricsLexer) Lex() token { + if l.i >= len(l.b) { + return tEOF + } + c := l.b[l.i] + l.start = l.i + +%} + +D [0-9] +L [a-zA-Z_] +M [a-zA-Z_:] +C [^\n] +S [ ] + +%x sComment sMeta1 sMeta2 sLabels sLValue sValue sTimestamp sExemplar sEValue sETimestamp + +%yyc c +%yyn c = l.next() +%yyt l.state + + +%% + +#{S} l.state = sComment +HELP{S} l.state = sMeta1; return tHelp +TYPE{S} l.state = sMeta1; return tType +UNIT{S} l.state = sMeta1; return tUnit +"EOF"\n? l.state = sInit; return tEOFWord +\"(\\.|[^\\"])*\" l.state = sMeta2; return tMName +{M}({M}|{D})* l.state = sMeta2; return tMName +{S}{C}*\n l.state = sInit; return tText + +{M}({M}|{D})* l.state = sValue; return tMName +\{ l.state = sLabels; return tBraceOpen +\{ l.state = sLabels; return tBraceOpen +{L}({L}|{D})* return tLName +\"(\\.|[^\\"])*\" l.state = sLabels; return tQString +\} l.state = sValue; return tBraceClose += l.state = sLValue; return tEqual +, return tComma +\"(\\.|[^\\"\n])*\" l.state = sLabels; return tLValue +{S}[^ \n]+ l.state = sTimestamp; return tValue +{S}[^ \n]+ return tTimestamp +\n l.state = sInit; return tLinebreak +{S}#{S}\{ l.state = sExemplar; return tComment + +{L}({L}|{D})* return tLName +\} l.state = sEValue; return tBraceClose += l.state = sEValue; return tEqual +\"(\\.|[^\\"\n])*\" l.state = sExemplar; return tLValue +, return tComma +{S}[^ \n]+ l.state = sETimestamp; return tValue +{S}[^ \n]+ return tTimestamp +\n l.state = sInit; return tLinebreak + +%% + + return tInvalid +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/openmetricslex.l.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/openmetricslex.l.go new file mode 100644 index 000000000000..c8789ef60d4a --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/openmetricslex.l.go @@ -0,0 +1,870 @@ +// Code generated by golex. DO NOT EDIT. + +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "fmt" +) + +// Lex is called by the parser generated by "go tool yacc" to obtain each +// token. The method is opened before the matching rules block and closed at +// the end of the file. +func (l *openMetricsLexer) Lex() token { + if l.i >= len(l.b) { + return tEOF + } + c := l.b[l.i] + l.start = l.i + +yystate0: + + switch yyt := l.state; yyt { + default: + panic(fmt.Errorf(`invalid start condition %d`, yyt)) + case 0: // start condition: INITIAL + goto yystart1 + case 1: // start condition: sComment + goto yystart6 + case 2: // start condition: sMeta1 + goto yystart26 + case 3: // start condition: sMeta2 + goto yystart31 + case 4: // start condition: sLabels + goto yystart34 + case 5: // start condition: sLValue + goto yystart42 + case 6: // start condition: sValue + goto yystart46 + case 7: // start condition: sTimestamp + goto yystart50 + case 8: // start condition: sExemplar + goto yystart57 + case 9: // start condition: sEValue + goto yystart62 + case 10: // start condition: sETimestamp + goto yystart68 + } + +yystate1: + c = l.next() +yystart1: + switch { + default: + goto yyabort + case c == '#': + goto yystate2 + case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate4 + case c == '{': + goto yystate5 + } + +yystate2: + c = l.next() + switch { + default: + goto yyabort + case c == ' ': + goto yystate3 + } + +yystate3: + c = l.next() + goto yyrule1 + +yystate4: + c = l.next() + switch { + default: + goto yyrule9 + case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate4 + } + +yystate5: + c = l.next() + goto yyrule11 + +yystate6: + c = l.next() +yystart6: + switch { + default: + goto yyabort + case c == 'E': + goto yystate7 + case c == 'H': + goto yystate11 + case c == 'T': + goto yystate16 + case c == 'U': + goto yystate21 + } + +yystate7: + c = l.next() + switch { + default: + goto yyabort + case c == 'O': + goto yystate8 + } + +yystate8: + c = l.next() + switch { + default: + goto yyabort + case c == 'F': + goto yystate9 + } + +yystate9: + c = l.next() + switch { + default: + goto yyrule5 + case c == '\n': + goto yystate10 + } + +yystate10: + c = l.next() + goto yyrule5 + +yystate11: + c = l.next() + switch { + default: + goto yyabort + case c == 'E': + goto yystate12 + } + +yystate12: + c = l.next() + switch { + default: + goto yyabort + case c == 'L': + goto yystate13 + } + +yystate13: + c = l.next() + switch { + default: + goto yyabort + case c == 'P': + goto yystate14 + } + +yystate14: + c = l.next() + switch { + default: + goto yyabort + case c == ' ': + goto yystate15 + } + +yystate15: + c = l.next() + goto yyrule2 + +yystate16: + c = l.next() + switch { + default: + goto yyabort + case c == 'Y': + goto yystate17 + } + +yystate17: + c = l.next() + switch { + default: + goto yyabort + case c == 'P': + goto yystate18 + } + +yystate18: + c = l.next() + switch { + default: + goto yyabort + case c == 'E': + goto yystate19 + } + +yystate19: + c = l.next() + switch { + default: + goto yyabort + case c == ' ': + goto yystate20 + } + +yystate20: + c = l.next() + goto yyrule3 + +yystate21: + c = l.next() + switch { + default: + goto yyabort + case c == 'N': + goto yystate22 + } + +yystate22: + c = l.next() + switch { + default: + goto yyabort + case c == 'I': + goto yystate23 + } + +yystate23: + c = l.next() + switch { + default: + goto yyabort + case c == 'T': + goto yystate24 + } + +yystate24: + c = l.next() + switch { + default: + goto yyabort + case c == ' ': + goto yystate25 + } + +yystate25: + c = l.next() + goto yyrule4 + +yystate26: + c = l.next() +yystart26: + switch { + default: + goto yyabort + case c == '"': + goto yystate27 + case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate30 + } + +yystate27: + c = l.next() + switch { + default: + goto yyabort + case c == '"': + goto yystate28 + case c == '\\': + goto yystate29 + case c >= '\x01' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': + goto yystate27 + } + +yystate28: + c = l.next() + goto yyrule6 + +yystate29: + c = l.next() + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate27 + } + +yystate30: + c = l.next() + switch { + default: + goto yyrule7 + case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate30 + } + +yystate31: + c = l.next() +yystart31: + switch { + default: + goto yyabort + case c == ' ': + goto yystate32 + } + +yystate32: + c = l.next() + switch { + default: + goto yyabort + case c == '\n': + goto yystate33 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate32 + } + +yystate33: + c = l.next() + goto yyrule8 + +yystate34: + c = l.next() +yystart34: + switch { + default: + goto yyabort + case c == '"': + goto yystate35 + case c == ',': + goto yystate38 + case c == '=': + goto yystate39 + case c == '}': + goto yystate41 + case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate40 + } + +yystate35: + c = l.next() + switch { + default: + goto yyabort + case c == '"': + goto yystate36 + case c == '\\': + goto yystate37 + case c >= '\x01' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': + goto yystate35 + } + +yystate36: + c = l.next() + goto yyrule13 + +yystate37: + c = l.next() + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate35 + } + +yystate38: + c = l.next() + goto yyrule16 + +yystate39: + c = l.next() + goto yyrule15 + +yystate40: + c = l.next() + switch { + default: + goto yyrule12 + case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate40 + } + +yystate41: + c = l.next() + goto yyrule14 + +yystate42: + c = l.next() +yystart42: + switch { + default: + goto yyabort + case c == '"': + goto yystate43 + } + +yystate43: + c = l.next() + switch { + default: + goto yyabort + case c == '"': + goto yystate44 + case c == '\\': + goto yystate45 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': + goto yystate43 + } + +yystate44: + c = l.next() + goto yyrule17 + +yystate45: + c = l.next() + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate43 + } + +yystate46: + c = l.next() +yystart46: + switch { + default: + goto yyabort + case c == ' ': + goto yystate47 + case c == '{': + goto yystate49 + } + +yystate47: + c = l.next() + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate48 + } + +yystate48: + c = l.next() + switch { + default: + goto yyrule18 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate48 + } + +yystate49: + c = l.next() + goto yyrule10 + +yystate50: + c = l.next() +yystart50: + switch { + default: + goto yyabort + case c == ' ': + goto yystate52 + case c == '\n': + goto yystate51 + } + +yystate51: + c = l.next() + goto yyrule20 + +yystate52: + c = l.next() + switch { + default: + goto yyabort + case c == '#': + goto yystate54 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c == '!' || c == '"' || c >= '$' && c <= 'ÿ': + goto yystate53 + } + +yystate53: + c = l.next() + switch { + default: + goto yyrule19 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate53 + } + +yystate54: + c = l.next() + switch { + default: + goto yyrule19 + case c == ' ': + goto yystate55 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate53 + } + +yystate55: + c = l.next() + switch { + default: + goto yyabort + case c == '{': + goto yystate56 + } + +yystate56: + c = l.next() + goto yyrule21 + +yystate57: + c = l.next() +yystart57: + switch { + default: + goto yyabort + case c == ',': + goto yystate58 + case c == '=': + goto yystate59 + case c == '}': + goto yystate61 + case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate60 + } + +yystate58: + c = l.next() + goto yyrule26 + +yystate59: + c = l.next() + goto yyrule24 + +yystate60: + c = l.next() + switch { + default: + goto yyrule22 + case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate60 + } + +yystate61: + c = l.next() + goto yyrule23 + +yystate62: + c = l.next() +yystart62: + switch { + default: + goto yyabort + case c == ' ': + goto yystate63 + case c == '"': + goto yystate65 + } + +yystate63: + c = l.next() + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate64 + } + +yystate64: + c = l.next() + switch { + default: + goto yyrule27 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate64 + } + +yystate65: + c = l.next() + switch { + default: + goto yyabort + case c == '"': + goto yystate66 + case c == '\\': + goto yystate67 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': + goto yystate65 + } + +yystate66: + c = l.next() + goto yyrule25 + +yystate67: + c = l.next() + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate65 + } + +yystate68: + c = l.next() +yystart68: + switch { + default: + goto yyabort + case c == ' ': + goto yystate70 + case c == '\n': + goto yystate69 + } + +yystate69: + c = l.next() + goto yyrule29 + +yystate70: + c = l.next() + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate71 + } + +yystate71: + c = l.next() + switch { + default: + goto yyrule28 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate71 + } + +yyrule1: // #{S} + { + l.state = sComment + goto yystate0 + } +yyrule2: // HELP{S} + { + l.state = sMeta1 + return tHelp + goto yystate0 + } +yyrule3: // TYPE{S} + { + l.state = sMeta1 + return tType + goto yystate0 + } +yyrule4: // UNIT{S} + { + l.state = sMeta1 + return tUnit + goto yystate0 + } +yyrule5: // "EOF"\n? + { + l.state = sInit + return tEOFWord + goto yystate0 + } +yyrule6: // \"(\\.|[^\\"])*\" + { + l.state = sMeta2 + return tMName + goto yystate0 + } +yyrule7: // {M}({M}|{D})* + { + l.state = sMeta2 + return tMName + goto yystate0 + } +yyrule8: // {S}{C}*\n + { + l.state = sInit + return tText + goto yystate0 + } +yyrule9: // {M}({M}|{D})* + { + l.state = sValue + return tMName + goto yystate0 + } +yyrule10: // \{ + { + l.state = sLabels + return tBraceOpen + goto yystate0 + } +yyrule11: // \{ + { + l.state = sLabels + return tBraceOpen + goto yystate0 + } +yyrule12: // {L}({L}|{D})* + { + return tLName + } +yyrule13: // \"(\\.|[^\\"])*\" + { + l.state = sLabels + return tQString + goto yystate0 + } +yyrule14: // \} + { + l.state = sValue + return tBraceClose + goto yystate0 + } +yyrule15: // = + { + l.state = sLValue + return tEqual + goto yystate0 + } +yyrule16: // , + { + return tComma + } +yyrule17: // \"(\\.|[^\\"\n])*\" + { + l.state = sLabels + return tLValue + goto yystate0 + } +yyrule18: // {S}[^ \n]+ + { + l.state = sTimestamp + return tValue + goto yystate0 + } +yyrule19: // {S}[^ \n]+ + { + return tTimestamp + } +yyrule20: // \n + { + l.state = sInit + return tLinebreak + goto yystate0 + } +yyrule21: // {S}#{S}\{ + { + l.state = sExemplar + return tComment + goto yystate0 + } +yyrule22: // {L}({L}|{D})* + { + return tLName + } +yyrule23: // \} + { + l.state = sEValue + return tBraceClose + goto yystate0 + } +yyrule24: // = + { + l.state = sEValue + return tEqual + goto yystate0 + } +yyrule25: // \"(\\.|[^\\"\n])*\" + { + l.state = sExemplar + return tLValue + goto yystate0 + } +yyrule26: // , + { + return tComma + } +yyrule27: // {S}[^ \n]+ + { + l.state = sETimestamp + return tValue + goto yystate0 + } +yyrule28: // {S}[^ \n]+ + { + return tTimestamp + } +yyrule29: // \n + if true { // avoid go vet determining the below panic will not be reached + l.state = sInit + return tLinebreak + goto yystate0 + } + panic("unreachable") + +yyabort: // no lexem recognized + // silence unused label errors for build and satisfy go vet reachability analysis + { + if false { + goto yyabort + } + if false { + goto yystate0 + } + if false { + goto yystate1 + } + if false { + goto yystate6 + } + if false { + goto yystate26 + } + if false { + goto yystate31 + } + if false { + goto yystate34 + } + if false { + goto yystate42 + } + if false { + goto yystate46 + } + if false { + goto yystate50 + } + if false { + goto yystate57 + } + if false { + goto yystate62 + } + if false { + goto yystate68 + } + } + + return tInvalid +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/openmetricsparse.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/openmetricsparse.go new file mode 100644 index 000000000000..b011d8204bea --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/openmetricsparse.go @@ -0,0 +1,530 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate go get -u modernc.org/golex +//go:generate golex -o=openmetricslex.l.go openmetricslex.l + +package textparse + +import ( + "errors" + "fmt" + "io" + "math" + "strings" + "unicode/utf8" + + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/v2/model/exemplar" + "github.com/prometheus/prometheus/v2/model/histogram" + "github.com/prometheus/prometheus/v2/model/labels" + "github.com/prometheus/prometheus/v2/model/value" +) + +type openMetricsLexer struct { + b []byte + i int + start int + err error + state int +} + +// buf returns the buffer of the current token. +func (l *openMetricsLexer) buf() []byte { + return l.b[l.start:l.i] +} + +// next advances the openMetricsLexer to the next character. +func (l *openMetricsLexer) next() byte { + l.i++ + if l.i >= len(l.b) { + l.err = io.EOF + return byte(tEOF) + } + // Lex struggles with null bytes. If we are in a label value or help string, where + // they are allowed, consume them here immediately. + for l.b[l.i] == 0 && (l.state == sLValue || l.state == sMeta2 || l.state == sComment) { + l.i++ + if l.i >= len(l.b) { + l.err = io.EOF + return byte(tEOF) + } + } + return l.b[l.i] +} + +func (l *openMetricsLexer) Error(es string) { + l.err = errors.New(es) +} + +// OpenMetricsParser parses samples from a byte slice of samples in the official +// OpenMetrics text exposition format. +// This is based on the working draft https://docs.google.com/document/u/1/d/1KwV0mAXwwbvvifBvDKH_LU1YjyXE_wxCkHNoCGq1GX0/edit +type OpenMetricsParser struct { + l *openMetricsLexer + builder labels.ScratchBuilder + series []byte + text []byte + mtype model.MetricType + val float64 + ts int64 + hasTS bool + start int + // offsets is a list of offsets into series that describe the positions + // of the metric name and label names and values for this series. + // p.offsets[0] is the start character of the metric name. + // p.offsets[1] is the end of the metric name. + // Subsequently, p.offsets is a pair of pair of offsets for the positions + // of the label name and value start and end characters. + offsets []int + + eOffsets []int + exemplar []byte + exemplarVal float64 + exemplarTs int64 + hasExemplarTs bool +} + +// NewOpenMetricsParser returns a new parser of the byte slice. +func NewOpenMetricsParser(b []byte, st *labels.SymbolTable) Parser { + return &OpenMetricsParser{ + l: &openMetricsLexer{b: b}, + builder: labels.NewScratchBuilderWithSymbolTable(st, 16), + } +} + +// Series returns the bytes of the series, the timestamp if set, and the value +// of the current sample. +func (p *OpenMetricsParser) Series() ([]byte, *int64, float64) { + if p.hasTS { + ts := p.ts + return p.series, &ts, p.val + } + return p.series, nil, p.val +} + +// Histogram returns (nil, nil, nil, nil) for now because OpenMetrics does not +// support sparse histograms yet. +func (p *OpenMetricsParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { + return nil, nil, nil, nil +} + +// Help returns the metric name and help text in the current entry. +// Must only be called after Next returned a help entry. +// The returned byte slices become invalid after the next call to Next. +func (p *OpenMetricsParser) Help() ([]byte, []byte) { + m := p.l.b[p.offsets[0]:p.offsets[1]] + + // Replacer causes allocations. Replace only when necessary. + if strings.IndexByte(yoloString(p.text), byte('\\')) >= 0 { + // OpenMetrics always uses the Prometheus format label value escaping. + return m, []byte(lvalReplacer.Replace(string(p.text))) + } + return m, p.text +} + +// Type returns the metric name and type in the current entry. +// Must only be called after Next returned a type entry. +// The returned byte slices become invalid after the next call to Next. +func (p *OpenMetricsParser) Type() ([]byte, model.MetricType) { + return p.l.b[p.offsets[0]:p.offsets[1]], p.mtype +} + +// Unit returns the metric name and unit in the current entry. +// Must only be called after Next returned a unit entry. +// The returned byte slices become invalid after the next call to Next. +func (p *OpenMetricsParser) Unit() ([]byte, []byte) { + return p.l.b[p.offsets[0]:p.offsets[1]], p.text +} + +// Comment returns the text of the current comment. +// Must only be called after Next returned a comment entry. +// The returned byte slice becomes invalid after the next call to Next. +func (p *OpenMetricsParser) Comment() []byte { + return p.text +} + +// Metric writes the labels of the current sample into the passed labels. +// It returns the string from which the metric was parsed. +func (p *OpenMetricsParser) Metric(l *labels.Labels) string { + // Copy the buffer to a string: this is only necessary for the return value. + s := string(p.series) + + p.builder.Reset() + metricName := unreplace(s[p.offsets[0]-p.start : p.offsets[1]-p.start]) + p.builder.Add(labels.MetricName, metricName) + + for i := 2; i < len(p.offsets); i += 4 { + a := p.offsets[i] - p.start + b := p.offsets[i+1] - p.start + label := unreplace(s[a:b]) + c := p.offsets[i+2] - p.start + d := p.offsets[i+3] - p.start + value := unreplace(s[c:d]) + + p.builder.Add(label, value) + } + + p.builder.Sort() + *l = p.builder.Labels() + + return s +} + +// Exemplar writes the exemplar of the current sample into the passed exemplar. +// It returns whether an exemplar exists. As OpenMetrics only ever has one +// exemplar per sample, every call after the first (for the same sample) will +// always return false. +func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { + if len(p.exemplar) == 0 { + return false + } + + // Allocate the full immutable string immediately, so we just + // have to create references on it below. + s := string(p.exemplar) + + e.Value = p.exemplarVal + if p.hasExemplarTs { + e.HasTs = true + e.Ts = p.exemplarTs + } + + p.builder.Reset() + for i := 0; i < len(p.eOffsets); i += 4 { + a := p.eOffsets[i] - p.start + b := p.eOffsets[i+1] - p.start + c := p.eOffsets[i+2] - p.start + d := p.eOffsets[i+3] - p.start + + p.builder.Add(s[a:b], s[c:d]) + } + + p.builder.Sort() + e.Labels = p.builder.Labels() + + // Wipe exemplar so that future calls return false. + p.exemplar = p.exemplar[:0] + return true +} + +// CreatedTimestamp returns nil as it's not implemented yet. +// TODO(bwplotka): https://github.com/prometheus/prometheus/v2/issues/12980 +func (p *OpenMetricsParser) CreatedTimestamp() *int64 { + return nil +} + +// nextToken returns the next token from the openMetricsLexer. +func (p *OpenMetricsParser) nextToken() token { + tok := p.l.Lex() + return tok +} + +func (p *OpenMetricsParser) parseError(exp string, got token) error { + e := p.l.i + 1 + if len(p.l.b) < e { + e = len(p.l.b) + } + return fmt.Errorf("%s, got %q (%q) while parsing: %q", exp, p.l.b[p.l.start:e], got, p.l.b[p.start:e]) +} + +// Next advances the parser to the next sample. +// It returns (EntryInvalid, io.EOF) if no samples were read. +func (p *OpenMetricsParser) Next() (Entry, error) { + var err error + + p.start = p.l.i + p.offsets = p.offsets[:0] + p.eOffsets = p.eOffsets[:0] + p.exemplar = p.exemplar[:0] + p.exemplarVal = 0 + p.hasExemplarTs = false + + switch t := p.nextToken(); t { + case tEOFWord: + if t := p.nextToken(); t != tEOF { + return EntryInvalid, errors.New("unexpected data after # EOF") + } + return EntryInvalid, io.EOF + case tEOF: + return EntryInvalid, errors.New("data does not end with # EOF") + case tHelp, tType, tUnit: + switch t2 := p.nextToken(); t2 { + case tMName: + mStart := p.l.start + mEnd := p.l.i + if p.l.b[mStart] == '"' && p.l.b[mEnd-1] == '"' { + mStart++ + mEnd-- + } + p.offsets = append(p.offsets, mStart, mEnd) + default: + return EntryInvalid, p.parseError("expected metric name after "+t.String(), t2) + } + switch t2 := p.nextToken(); t2 { + case tText: + if len(p.l.buf()) > 1 { + p.text = p.l.buf()[1 : len(p.l.buf())-1] + } else { + p.text = []byte{} + } + default: + return EntryInvalid, fmt.Errorf("expected text in %s", t.String()) + } + switch t { + case tType: + switch s := yoloString(p.text); s { + case "counter": + p.mtype = model.MetricTypeCounter + case "gauge": + p.mtype = model.MetricTypeGauge + case "histogram": + p.mtype = model.MetricTypeHistogram + case "gaugehistogram": + p.mtype = model.MetricTypeGaugeHistogram + case "summary": + p.mtype = model.MetricTypeSummary + case "info": + p.mtype = model.MetricTypeInfo + case "stateset": + p.mtype = model.MetricTypeStateset + case "unknown": + p.mtype = model.MetricTypeUnknown + default: + return EntryInvalid, fmt.Errorf("invalid metric type %q", s) + } + case tHelp: + if !utf8.Valid(p.text) { + return EntryInvalid, fmt.Errorf("help text %q is not a valid utf8 string", p.text) + } + } + switch t { + case tHelp: + return EntryHelp, nil + case tType: + return EntryType, nil + case tUnit: + m := yoloString(p.l.b[p.offsets[0]:p.offsets[1]]) + u := yoloString(p.text) + if len(u) > 0 { + if !strings.HasSuffix(m, u) || len(m) < len(u)+1 || p.l.b[p.offsets[1]-len(u)-1] != '_' { + return EntryInvalid, fmt.Errorf("unit %q not a suffix of metric %q", u, m) + } + } + return EntryUnit, nil + } + + case tBraceOpen: + // We found a brace, so make room for the eventual metric name. If these + // values aren't updated, then the metric name was not set inside the + // braces and we can return an error. + if len(p.offsets) == 0 { + p.offsets = []int{-1, -1} + } + if p.offsets, err = p.parseLVals(p.offsets, false); err != nil { + return EntryInvalid, err + } + + p.series = p.l.b[p.start:p.l.i] + return p.parseMetricSuffix(p.nextToken()) + case tMName: + p.offsets = append(p.offsets, p.start, p.l.i) + p.series = p.l.b[p.start:p.l.i] + + t2 := p.nextToken() + if t2 == tBraceOpen { + p.offsets, err = p.parseLVals(p.offsets, false) + if err != nil { + return EntryInvalid, err + } + p.series = p.l.b[p.start:p.l.i] + t2 = p.nextToken() + } + return p.parseMetricSuffix(t2) + + default: + err = p.parseError("expected a valid start token", t) + } + return EntryInvalid, err +} + +func (p *OpenMetricsParser) parseComment() error { + var err error + // Parse the labels. + p.eOffsets, err = p.parseLVals(p.eOffsets, true) + if err != nil { + return err + } + p.exemplar = p.l.b[p.start:p.l.i] + + // Get the value. + p.exemplarVal, err = p.getFloatValue(p.nextToken(), "exemplar labels") + if err != nil { + return err + } + + // Read the optional timestamp. + p.hasExemplarTs = false + switch t2 := p.nextToken(); t2 { + case tEOF: + return errors.New("data does not end with # EOF") + case tLinebreak: + break + case tTimestamp: + p.hasExemplarTs = true + var ts float64 + // A float is enough to hold what we need for millisecond resolution. + if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { + return fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) + } + if math.IsNaN(ts) || math.IsInf(ts, 0) { + return fmt.Errorf("invalid exemplar timestamp %f", ts) + } + p.exemplarTs = int64(ts * 1000) + switch t3 := p.nextToken(); t3 { + case tLinebreak: + default: + return p.parseError("expected next entry after exemplar timestamp", t3) + } + default: + return p.parseError("expected timestamp or comment", t2) + } + return nil +} + +func (p *OpenMetricsParser) parseLVals(offsets []int, isExemplar bool) ([]int, error) { + t := p.nextToken() + for { + curTStart := p.l.start + curTI := p.l.i + switch t { + case tBraceClose: + return offsets, nil + case tLName: + case tQString: + default: + return nil, p.parseError("expected label name", t) + } + + t = p.nextToken() + // A quoted string followed by a comma or brace is a metric name. Set the + // offsets and continue processing. If this is an exemplar, this format + // is not allowed. + if t == tComma || t == tBraceClose { + if isExemplar { + return nil, p.parseError("expected label name", t) + } + if offsets[0] != -1 || offsets[1] != -1 { + return nil, fmt.Errorf("metric name already set while parsing: %q", p.l.b[p.start:p.l.i]) + } + offsets[0] = curTStart + 1 + offsets[1] = curTI - 1 + if t == tBraceClose { + return offsets, nil + } + t = p.nextToken() + continue + } + // We have a label name, and it might be quoted. + if p.l.b[curTStart] == '"' { + curTStart++ + curTI-- + } + offsets = append(offsets, curTStart, curTI) + + if t != tEqual { + return nil, p.parseError("expected equal", t) + } + if t := p.nextToken(); t != tLValue { + return nil, p.parseError("expected label value", t) + } + if !utf8.Valid(p.l.buf()) { + return nil, fmt.Errorf("invalid UTF-8 label value: %q", p.l.buf()) + } + + // The openMetricsLexer ensures the value string is quoted. Strip first + // and last character. + offsets = append(offsets, p.l.start+1, p.l.i-1) + + // Free trailing commas are allowed. + t = p.nextToken() + if t == tComma { + t = p.nextToken() + } else if t != tBraceClose { + return nil, p.parseError("expected comma or brace close", t) + } + } +} + +// parseMetricSuffix parses the end of the line after the metric name and +// labels. It starts parsing with the provided token. +func (p *OpenMetricsParser) parseMetricSuffix(t token) (Entry, error) { + if p.offsets[0] == -1 { + return EntryInvalid, fmt.Errorf("metric name not set while parsing: %q", p.l.b[p.start:p.l.i]) + } + + var err error + p.val, err = p.getFloatValue(t, "metric") + if err != nil { + return EntryInvalid, err + } + + p.hasTS = false + switch t2 := p.nextToken(); t2 { + case tEOF: + return EntryInvalid, errors.New("data does not end with # EOF") + case tLinebreak: + break + case tComment: + if err := p.parseComment(); err != nil { + return EntryInvalid, err + } + case tTimestamp: + p.hasTS = true + var ts float64 + // A float is enough to hold what we need for millisecond resolution. + if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { + return EntryInvalid, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) + } + if math.IsNaN(ts) || math.IsInf(ts, 0) { + return EntryInvalid, fmt.Errorf("invalid timestamp %f", ts) + } + p.ts = int64(ts * 1000) + switch t3 := p.nextToken(); t3 { + case tLinebreak: + case tComment: + if err := p.parseComment(); err != nil { + return EntryInvalid, err + } + default: + return EntryInvalid, p.parseError("expected next entry after timestamp", t3) + } + } + return EntrySeries, nil +} + +func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error) { + if t != tValue { + return 0, p.parseError(fmt.Sprintf("expected value after %v", after), t) + } + val, err := parseFloat(yoloString(p.l.buf()[1:])) + if err != nil { + return 0, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) + } + // Ensure canonical NaN value. + if math.IsNaN(p.exemplarVal) { + val = math.Float64frombits(value.NormalNaN) + } + return val, nil +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/promlex.l b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/promlex.l new file mode 100644 index 000000000000..e9fa1fb71c6b --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/promlex.l @@ -0,0 +1,106 @@ +%{ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "fmt" +) + +const ( + sInit = iota + sComment + sMeta1 + sMeta2 + sLabels + sLValue + sValue + sTimestamp + sExemplar + sEValue + sETimestamp +) + +// Lex is called by the parser generated by "go tool yacc" to obtain each +// token. The method is opened before the matching rules block and closed at +// the end of the file. +func (l *promlexer) Lex() token { + if l.i >= len(l.b) { + return tEOF + } + c := l.b[l.i] + l.start = l.i + +%} + +D [0-9] +L [a-zA-Z_] +M [a-zA-Z_:] +C [^\n] + +%x sComment sMeta1 sMeta2 sLabels sLValue sValue sTimestamp + +%yyc c +%yyn c = l.next() +%yyt l.state + + +%% + +\0 return tEOF +\n l.state = sInit; return tLinebreak +<*>[ \t]+ return tWhitespace + +#[ \t]+ l.state = sComment +# return l.consumeComment() +HELP[\t ]+ l.state = sMeta1; return tHelp +TYPE[\t ]+ l.state = sMeta1; return tType +\"(\\.|[^\\"])*\" l.state = sMeta2; return tMName +{M}({M}|{D})* l.state = sMeta2; return tMName +{C}* l.state = sInit; return tText + +{M}({M}|{D})* l.state = sValue; return tMName +\{ l.state = sLabels; return tBraceOpen +\{ l.state = sLabels; return tBraceOpen +{L}({L}|{D})* return tLName +\"(\\.|[^\\"])*\" l.state = sLabels; return tQString +\} l.state = sValue; return tBraceClose += l.state = sLValue; return tEqual +, return tComma +\"(\\.|[^\\"])*\" l.state = sLabels; return tLValue +[^{ \t\n]+ l.state = sTimestamp; return tValue +{D}+ return tTimestamp +\n l.state = sInit; return tLinebreak + +%% + // Workaround to gobble up comments that started with a HELP or TYPE + // prefix. We just consume all characters until we reach a newline. + // This saves us from adding disproportionate complexity to the parser. + if l.state == sComment { + return l.consumeComment() + } + return tInvalid +} + +func (l *promlexer) consumeComment() token { + for c := l.cur(); ; c = l.next() { + switch c { + case 0: + return tEOF + case '\n': + l.state = sInit + return tComment + } + } +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/promlex.l.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/promlex.l.go new file mode 100644 index 000000000000..a083e5549b64 --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/promlex.l.go @@ -0,0 +1,656 @@ +// Code generated by golex. DO NOT EDIT. + +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "fmt" +) + +const ( + sInit = iota + sComment + sMeta1 + sMeta2 + sLabels + sLValue + sValue + sTimestamp + sExemplar + sEValue + sETimestamp +) + +// Lex is called by the parser generated by "go tool yacc" to obtain each +// token. The method is opened before the matching rules block and closed at +// the end of the file. +func (l *promlexer) Lex() token { + if l.i >= len(l.b) { + return tEOF + } + c := l.b[l.i] + l.start = l.i + +yystate0: + + switch yyt := l.state; yyt { + default: + panic(fmt.Errorf(`invalid start condition %d`, yyt)) + case 0: // start condition: INITIAL + goto yystart1 + case 1: // start condition: sComment + goto yystart9 + case 2: // start condition: sMeta1 + goto yystart20 + case 3: // start condition: sMeta2 + goto yystart25 + case 4: // start condition: sLabels + goto yystart28 + case 5: // start condition: sLValue + goto yystart36 + case 6: // start condition: sValue + goto yystart40 + case 7: // start condition: sTimestamp + goto yystart43 + } + +yystate1: + c = l.next() +yystart1: + switch { + default: + goto yyabort + case c == '#': + goto yystate5 + case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate7 + case c == '\n': + goto yystate4 + case c == '\t' || c == ' ': + goto yystate3 + case c == '\x00': + goto yystate2 + case c == '{': + goto yystate8 + } + +yystate2: + c = l.next() + goto yyrule1 + +yystate3: + c = l.next() + switch { + default: + goto yyrule3 + case c == '\t' || c == ' ': + goto yystate3 + } + +yystate4: + c = l.next() + goto yyrule2 + +yystate5: + c = l.next() + switch { + default: + goto yyrule5 + case c == '\t' || c == ' ': + goto yystate6 + } + +yystate6: + c = l.next() + switch { + default: + goto yyrule4 + case c == '\t' || c == ' ': + goto yystate6 + } + +yystate7: + c = l.next() + switch { + default: + goto yyrule11 + case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate7 + } + +yystate8: + c = l.next() + goto yyrule13 + +yystate9: + c = l.next() +yystart9: + switch { + default: + goto yyabort + case c == 'H': + goto yystate10 + case c == 'T': + goto yystate15 + case c == '\t' || c == ' ': + goto yystate3 + } + +yystate10: + c = l.next() + switch { + default: + goto yyabort + case c == 'E': + goto yystate11 + } + +yystate11: + c = l.next() + switch { + default: + goto yyabort + case c == 'L': + goto yystate12 + } + +yystate12: + c = l.next() + switch { + default: + goto yyabort + case c == 'P': + goto yystate13 + } + +yystate13: + c = l.next() + switch { + default: + goto yyabort + case c == '\t' || c == ' ': + goto yystate14 + } + +yystate14: + c = l.next() + switch { + default: + goto yyrule6 + case c == '\t' || c == ' ': + goto yystate14 + } + +yystate15: + c = l.next() + switch { + default: + goto yyabort + case c == 'Y': + goto yystate16 + } + +yystate16: + c = l.next() + switch { + default: + goto yyabort + case c == 'P': + goto yystate17 + } + +yystate17: + c = l.next() + switch { + default: + goto yyabort + case c == 'E': + goto yystate18 + } + +yystate18: + c = l.next() + switch { + default: + goto yyabort + case c == '\t' || c == ' ': + goto yystate19 + } + +yystate19: + c = l.next() + switch { + default: + goto yyrule7 + case c == '\t' || c == ' ': + goto yystate19 + } + +yystate20: + c = l.next() +yystart20: + switch { + default: + goto yyabort + case c == '"': + goto yystate21 + case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate24 + case c == '\t' || c == ' ': + goto yystate3 + } + +yystate21: + c = l.next() + switch { + default: + goto yyabort + case c == '"': + goto yystate22 + case c == '\\': + goto yystate23 + case c >= '\x01' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': + goto yystate21 + } + +yystate22: + c = l.next() + goto yyrule8 + +yystate23: + c = l.next() + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate21 + } + +yystate24: + c = l.next() + switch { + default: + goto yyrule9 + case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate24 + } + +yystate25: + c = l.next() +yystart25: + switch { + default: + goto yyrule10 + case c == '\t' || c == ' ': + goto yystate27 + case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate26 + } + +yystate26: + c = l.next() + switch { + default: + goto yyrule10 + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate26 + } + +yystate27: + c = l.next() + switch { + default: + goto yyrule3 + case c == '\t' || c == ' ': + goto yystate27 + case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ': + goto yystate26 + } + +yystate28: + c = l.next() +yystart28: + switch { + default: + goto yyabort + case c == '"': + goto yystate29 + case c == ',': + goto yystate32 + case c == '=': + goto yystate33 + case c == '\t' || c == ' ': + goto yystate3 + case c == '}': + goto yystate35 + case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate34 + } + +yystate29: + c = l.next() + switch { + default: + goto yyabort + case c == '"': + goto yystate30 + case c == '\\': + goto yystate31 + case c >= '\x01' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': + goto yystate29 + } + +yystate30: + c = l.next() + goto yyrule15 + +yystate31: + c = l.next() + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate29 + } + +yystate32: + c = l.next() + goto yyrule18 + +yystate33: + c = l.next() + goto yyrule17 + +yystate34: + c = l.next() + switch { + default: + goto yyrule14 + case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z': + goto yystate34 + } + +yystate35: + c = l.next() + goto yyrule16 + +yystate36: + c = l.next() +yystart36: + switch { + default: + goto yyabort + case c == '"': + goto yystate37 + case c == '\t' || c == ' ': + goto yystate3 + } + +yystate37: + c = l.next() + switch { + default: + goto yyabort + case c == '"': + goto yystate38 + case c == '\\': + goto yystate39 + case c >= '\x01' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ': + goto yystate37 + } + +yystate38: + c = l.next() + goto yyrule19 + +yystate39: + c = l.next() + switch { + default: + goto yyabort + case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ': + goto yystate37 + } + +yystate40: + c = l.next() +yystart40: + switch { + default: + goto yyabort + case c == '\t' || c == ' ': + goto yystate3 + case c == '{': + goto yystate42 + case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'z' || c >= '|' && c <= 'ÿ': + goto yystate41 + } + +yystate41: + c = l.next() + switch { + default: + goto yyrule20 + case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'z' || c >= '|' && c <= 'ÿ': + goto yystate41 + } + +yystate42: + c = l.next() + goto yyrule12 + +yystate43: + c = l.next() +yystart43: + switch { + default: + goto yyabort + case c == '\n': + goto yystate44 + case c == '\t' || c == ' ': + goto yystate3 + case c >= '0' && c <= '9': + goto yystate45 + } + +yystate44: + c = l.next() + goto yyrule22 + +yystate45: + c = l.next() + switch { + default: + goto yyrule21 + case c >= '0' && c <= '9': + goto yystate45 + } + +yyrule1: // \0 + { + return tEOF + } +yyrule2: // \n + { + l.state = sInit + return tLinebreak + goto yystate0 + } +yyrule3: // [ \t]+ + { + return tWhitespace + } +yyrule4: // #[ \t]+ + { + l.state = sComment + goto yystate0 + } +yyrule5: // # + { + return l.consumeComment() + } +yyrule6: // HELP[\t ]+ + { + l.state = sMeta1 + return tHelp + goto yystate0 + } +yyrule7: // TYPE[\t ]+ + { + l.state = sMeta1 + return tType + goto yystate0 + } +yyrule8: // \"(\\.|[^\\"])*\" + { + l.state = sMeta2 + return tMName + goto yystate0 + } +yyrule9: // {M}({M}|{D})* + { + l.state = sMeta2 + return tMName + goto yystate0 + } +yyrule10: // {C}* + { + l.state = sInit + return tText + goto yystate0 + } +yyrule11: // {M}({M}|{D})* + { + l.state = sValue + return tMName + goto yystate0 + } +yyrule12: // \{ + { + l.state = sLabels + return tBraceOpen + goto yystate0 + } +yyrule13: // \{ + { + l.state = sLabels + return tBraceOpen + goto yystate0 + } +yyrule14: // {L}({L}|{D})* + { + return tLName + } +yyrule15: // \"(\\.|[^\\"])*\" + { + l.state = sLabels + return tQString + goto yystate0 + } +yyrule16: // \} + { + l.state = sValue + return tBraceClose + goto yystate0 + } +yyrule17: // = + { + l.state = sLValue + return tEqual + goto yystate0 + } +yyrule18: // , + { + return tComma + } +yyrule19: // \"(\\.|[^\\"])*\" + { + l.state = sLabels + return tLValue + goto yystate0 + } +yyrule20: // [^{ \t\n]+ + { + l.state = sTimestamp + return tValue + goto yystate0 + } +yyrule21: // {D}+ + { + return tTimestamp + } +yyrule22: // \n + if true { // avoid go vet determining the below panic will not be reached + l.state = sInit + return tLinebreak + goto yystate0 + } + panic("unreachable") + +yyabort: // no lexem recognized + // silence unused label errors for build and satisfy go vet reachability analysis + { + if false { + goto yyabort + } + if false { + goto yystate0 + } + if false { + goto yystate1 + } + if false { + goto yystate9 + } + if false { + goto yystate20 + } + if false { + goto yystate25 + } + if false { + goto yystate28 + } + if false { + goto yystate36 + } + if false { + goto yystate40 + } + if false { + goto yystate43 + } + } + + // Workaround to gobble up comments that started with a HELP or TYPE + // prefix. We just consume all characters until we reach a newline. + // This saves us from adding disproportionate complexity to the parser. + if l.state == sComment { + return l.consumeComment() + } + return tInvalid +} + +func (l *promlexer) consumeComment() token { + for c := l.cur(); ; c = l.next() { + switch c { + case 0: + return tEOF + case '\n': + l.state = sInit + return tComment + } + } +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/promparse.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/promparse.go new file mode 100644 index 000000000000..9acd6090060b --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/promparse.go @@ -0,0 +1,514 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate go get -u modernc.org/golex +//go:generate golex -o=promlex.l.go promlex.l + +package textparse + +import ( + "errors" + "fmt" + "io" + "math" + "strconv" + "strings" + "unicode/utf8" + "unsafe" + + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/v2/model/exemplar" + "github.com/prometheus/prometheus/v2/model/histogram" + "github.com/prometheus/prometheus/v2/model/labels" + "github.com/prometheus/prometheus/v2/model/value" +) + +type promlexer struct { + b []byte + i int + start int + err error + state int +} + +type token int + +const ( + tInvalid token = -1 + tEOF token = 0 + tLinebreak token = iota + tWhitespace + tHelp + tType + tUnit + tEOFWord + tText + tComment + tBlank + tMName + tQString + tBraceOpen + tBraceClose + tLName + tLValue + tComma + tEqual + tTimestamp + tValue +) + +func (t token) String() string { + switch t { + case tInvalid: + return "INVALID" + case tEOF: + return "EOF" + case tLinebreak: + return "LINEBREAK" + case tWhitespace: + return "WHITESPACE" + case tHelp: + return "HELP" + case tType: + return "TYPE" + case tUnit: + return "UNIT" + case tEOFWord: + return "EOFWORD" + case tText: + return "TEXT" + case tComment: + return "COMMENT" + case tBlank: + return "BLANK" + case tMName: + return "MNAME" + case tQString: + return "QSTRING" + case tBraceOpen: + return "BOPEN" + case tBraceClose: + return "BCLOSE" + case tLName: + return "LNAME" + case tLValue: + return "LVALUE" + case tEqual: + return "EQUAL" + case tComma: + return "COMMA" + case tTimestamp: + return "TIMESTAMP" + case tValue: + return "VALUE" + } + return fmt.Sprintf("", t) +} + +// buf returns the buffer of the current token. +func (l *promlexer) buf() []byte { + return l.b[l.start:l.i] +} + +func (l *promlexer) cur() byte { + return l.b[l.i] +} + +// next advances the promlexer to the next character. +func (l *promlexer) next() byte { + l.i++ + if l.i >= len(l.b) { + l.err = io.EOF + return byte(tEOF) + } + // Lex struggles with null bytes. If we are in a label value or help string, where + // they are allowed, consume them here immediately. + for l.b[l.i] == 0 && (l.state == sLValue || l.state == sMeta2 || l.state == sComment) { + l.i++ + } + return l.b[l.i] +} + +func (l *promlexer) Error(es string) { + l.err = errors.New(es) +} + +// PromParser parses samples from a byte slice of samples in the official +// Prometheus text exposition format. +type PromParser struct { + l *promlexer + builder labels.ScratchBuilder + series []byte + text []byte + mtype model.MetricType + val float64 + ts int64 + hasTS bool + start int + // offsets is a list of offsets into series that describe the positions + // of the metric name and label names and values for this series. + // p.offsets[0] is the start character of the metric name. + // p.offsets[1] is the end of the metric name. + // Subsequently, p.offsets is a pair of pair of offsets for the positions + // of the label name and value start and end characters. + offsets []int +} + +// NewPromParser returns a new parser of the byte slice. +func NewPromParser(b []byte, st *labels.SymbolTable) Parser { + return &PromParser{ + l: &promlexer{b: append(b, '\n')}, + builder: labels.NewScratchBuilderWithSymbolTable(st, 16), + } +} + +// Series returns the bytes of the series, the timestamp if set, and the value +// of the current sample. +func (p *PromParser) Series() ([]byte, *int64, float64) { + if p.hasTS { + return p.series, &p.ts, p.val + } + return p.series, nil, p.val +} + +// Histogram returns (nil, nil, nil, nil) for now because the Prometheus text +// format does not support sparse histograms yet. +func (p *PromParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { + return nil, nil, nil, nil +} + +// Help returns the metric name and help text in the current entry. +// Must only be called after Next returned a help entry. +// The returned byte slices become invalid after the next call to Next. +func (p *PromParser) Help() ([]byte, []byte) { + m := p.l.b[p.offsets[0]:p.offsets[1]] + + // Replacer causes allocations. Replace only when necessary. + if strings.IndexByte(yoloString(p.text), byte('\\')) >= 0 { + return m, []byte(helpReplacer.Replace(string(p.text))) + } + return m, p.text +} + +// Type returns the metric name and type in the current entry. +// Must only be called after Next returned a type entry. +// The returned byte slices become invalid after the next call to Next. +func (p *PromParser) Type() ([]byte, model.MetricType) { + return p.l.b[p.offsets[0]:p.offsets[1]], p.mtype +} + +// Unit returns the metric name and unit in the current entry. +// Must only be called after Next returned a unit entry. +// The returned byte slices become invalid after the next call to Next. +func (p *PromParser) Unit() ([]byte, []byte) { + // The Prometheus format does not have units. + return nil, nil +} + +// Comment returns the text of the current comment. +// Must only be called after Next returned a comment entry. +// The returned byte slice becomes invalid after the next call to Next. +func (p *PromParser) Comment() []byte { + return p.text +} + +// Metric writes the labels of the current sample into the passed labels. +// It returns the string from which the metric was parsed. +func (p *PromParser) Metric(l *labels.Labels) string { + // Copy the buffer to a string: this is only necessary for the return value. + s := string(p.series) + + p.builder.Reset() + metricName := unreplace(s[p.offsets[0]-p.start : p.offsets[1]-p.start]) + p.builder.Add(labels.MetricName, metricName) + + for i := 2; i < len(p.offsets); i += 4 { + a := p.offsets[i] - p.start + b := p.offsets[i+1] - p.start + label := unreplace(s[a:b]) + c := p.offsets[i+2] - p.start + d := p.offsets[i+3] - p.start + value := unreplace(s[c:d]) + p.builder.Add(label, value) + } + + p.builder.Sort() + *l = p.builder.Labels() + + return s +} + +// Exemplar implements the Parser interface. However, since the classic +// Prometheus text format does not support exemplars, this implementation simply +// returns false and does nothing else. +func (p *PromParser) Exemplar(*exemplar.Exemplar) bool { + return false +} + +// CreatedTimestamp returns nil as it's not implemented yet. +// TODO(bwplotka): https://github.com/prometheus/prometheus/v2/issues/12980 +func (p *PromParser) CreatedTimestamp() *int64 { + return nil +} + +// nextToken returns the next token from the promlexer. It skips over tabs +// and spaces. +func (p *PromParser) nextToken() token { + for { + if tok := p.l.Lex(); tok != tWhitespace { + return tok + } + } +} + +func (p *PromParser) parseError(exp string, got token) error { + e := p.l.i + 1 + if len(p.l.b) < e { + e = len(p.l.b) + } + return fmt.Errorf("%s, got %q (%q) while parsing: %q", exp, p.l.b[p.l.start:e], got, p.l.b[p.start:e]) +} + +// Next advances the parser to the next sample. +// It returns (EntryInvalid, io.EOF) if no samples were read. +func (p *PromParser) Next() (Entry, error) { + var err error + + p.start = p.l.i + p.offsets = p.offsets[:0] + + switch t := p.nextToken(); t { + case tEOF: + return EntryInvalid, io.EOF + case tLinebreak: + // Allow full blank lines. + return p.Next() + + case tHelp, tType: + switch t2 := p.nextToken(); t2 { + case tMName: + mStart := p.l.start + mEnd := p.l.i + if p.l.b[mStart] == '"' && p.l.b[mEnd-1] == '"' { + mStart++ + mEnd-- + } + p.offsets = append(p.offsets, mStart, mEnd) + default: + return EntryInvalid, p.parseError("expected metric name after "+t.String(), t2) + } + switch t2 := p.nextToken(); t2 { + case tText: + if len(p.l.buf()) > 1 { + p.text = p.l.buf()[1:] + } else { + p.text = []byte{} + } + default: + return EntryInvalid, fmt.Errorf("expected text in %s, got %v", t.String(), t2.String()) + } + switch t { + case tType: + switch s := yoloString(p.text); s { + case "counter": + p.mtype = model.MetricTypeCounter + case "gauge": + p.mtype = model.MetricTypeGauge + case "histogram": + p.mtype = model.MetricTypeHistogram + case "summary": + p.mtype = model.MetricTypeSummary + case "untyped": + p.mtype = model.MetricTypeUnknown + default: + return EntryInvalid, fmt.Errorf("invalid metric type %q", s) + } + case tHelp: + if !utf8.Valid(p.text) { + return EntryInvalid, fmt.Errorf("help text %q is not a valid utf8 string", p.text) + } + } + if t := p.nextToken(); t != tLinebreak { + return EntryInvalid, p.parseError("linebreak expected after metadata", t) + } + switch t { + case tHelp: + return EntryHelp, nil + case tType: + return EntryType, nil + } + case tComment: + p.text = p.l.buf() + if t := p.nextToken(); t != tLinebreak { + return EntryInvalid, p.parseError("linebreak expected after comment", t) + } + return EntryComment, nil + case tBraceOpen: + // We found a brace, so make room for the eventual metric name. If these + // values aren't updated, then the metric name was not set inside the + // braces and we can return an error. + if len(p.offsets) == 0 { + p.offsets = []int{-1, -1} + } + if err := p.parseLVals(); err != nil { + return EntryInvalid, err + } + + p.series = p.l.b[p.start:p.l.i] + return p.parseMetricSuffix(p.nextToken()) + case tMName: + p.offsets = append(p.offsets, p.start, p.l.i) + p.series = p.l.b[p.start:p.l.i] + t2 := p.nextToken() + // If there's a brace, consume and parse the label values. + if t2 == tBraceOpen { + if err := p.parseLVals(); err != nil { + return EntryInvalid, err + } + p.series = p.l.b[p.start:p.l.i] + t2 = p.nextToken() + } + return p.parseMetricSuffix(t2) + + default: + err = p.parseError("expected a valid start token", t) + } + return EntryInvalid, err +} + +// parseLVals parses the contents inside the braces. +func (p *PromParser) parseLVals() error { + t := p.nextToken() + for { + curTStart := p.l.start + curTI := p.l.i + switch t { + case tBraceClose: + return nil + case tLName: + case tQString: + default: + return p.parseError("expected label name", t) + } + + t = p.nextToken() + // A quoted string followed by a comma or brace is a metric name. Set the + // offsets and continue processing. + if t == tComma || t == tBraceClose { + if p.offsets[0] != -1 || p.offsets[1] != -1 { + return fmt.Errorf("metric name already set while parsing: %q", p.l.b[p.start:p.l.i]) + } + p.offsets[0] = curTStart + 1 + p.offsets[1] = curTI - 1 + if t == tBraceClose { + return nil + } + t = p.nextToken() + continue + } + // We have a label name, and it might be quoted. + if p.l.b[curTStart] == '"' { + curTStart++ + curTI-- + } + p.offsets = append(p.offsets, curTStart, curTI) + if t != tEqual { + return p.parseError("expected equal", t) + } + if t := p.nextToken(); t != tLValue { + return p.parseError("expected label value", t) + } + if !utf8.Valid(p.l.buf()) { + return fmt.Errorf("invalid UTF-8 label value: %q", p.l.buf()) + } + + // The promlexer ensures the value string is quoted. Strip first + // and last character. + p.offsets = append(p.offsets, p.l.start+1, p.l.i-1) + + // Free trailing commas are allowed. NOTE: this allows spaces between label + // names, unlike in OpenMetrics. It is not clear if this is intended or an + // accidental bug. + if t = p.nextToken(); t == tComma { + t = p.nextToken() + } + } +} + +// parseMetricSuffix parses the end of the line after the metric name and +// labels. It starts parsing with the provided token. +func (p *PromParser) parseMetricSuffix(t token) (Entry, error) { + if p.offsets[0] == -1 { + return EntryInvalid, fmt.Errorf("metric name not set while parsing: %q", p.l.b[p.start:p.l.i]) + } + if t != tValue { + return EntryInvalid, p.parseError("expected value after metric", t) + } + var err error + if p.val, err = parseFloat(yoloString(p.l.buf())); err != nil { + return EntryInvalid, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) + } + // Ensure canonical NaN value. + if math.IsNaN(p.val) { + p.val = math.Float64frombits(value.NormalNaN) + } + p.hasTS = false + switch t := p.nextToken(); t { + case tLinebreak: + break + case tTimestamp: + p.hasTS = true + if p.ts, err = strconv.ParseInt(yoloString(p.l.buf()), 10, 64); err != nil { + return EntryInvalid, fmt.Errorf("%w while parsing: %q", err, p.l.b[p.start:p.l.i]) + } + if t2 := p.nextToken(); t2 != tLinebreak { + return EntryInvalid, p.parseError("expected next entry after timestamp", t2) + } + default: + return EntryInvalid, p.parseError("expected timestamp or new record", t) + } + + return EntrySeries, nil +} + +var lvalReplacer = strings.NewReplacer( + `\"`, "\"", + `\\`, "\\", + `\n`, "\n", +) + +var helpReplacer = strings.NewReplacer( + `\\`, "\\", + `\n`, "\n", +) + +func unreplace(s string) string { + // Replacer causes allocations. Replace only when necessary. + if strings.IndexByte(s, byte('\\')) >= 0 { + return lvalReplacer.Replace(s) + } + return s +} + +func yoloString(b []byte) string { + return *((*string)(unsafe.Pointer(&b))) +} + +func parseFloat(s string) (float64, error) { + // Keep to pre-Go 1.13 float formats. + if strings.ContainsAny(s, "pP_") { + return 0, fmt.Errorf("unsupported character in float") + } + return strconv.ParseFloat(s, 64) +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/promtestdata.nometa.txt b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/promtestdata.nometa.txt new file mode 100644 index 000000000000..235f0aa464b9 --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/promtestdata.nometa.txt @@ -0,0 +1,411 @@ +go_gc_duration_seconds{quantile="0"} 4.9351e-05 +go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05 +go_gc_duration_seconds{quantile="0.5"} 8.3835e-05 +go_gc_duration_seconds{quantile="0.75"} 0.000106744 +go_gc_duration_seconds{quantile="1"} 0.002072195 +go_gc_duration_seconds_sum 0.012139815 +go_gc_duration_seconds_count 99 +go_goroutines 33 +go_memstats_alloc_bytes 1.7518624e+07 +go_memstats_alloc_bytes_total 8.3062296e+08 +go_memstats_buck_hash_sys_bytes 1.494637e+06 +go_memstats_frees_total 4.65658e+06 +go_memstats_gc_sys_bytes 1.107968e+06 +go_memstats_heap_alloc_bytes 1.7518624e+07 +go_memstats_heap_idle_bytes 6.668288e+06 +go_memstats_heap_inuse_bytes 1.8956288e+07 +go_memstats_heap_objects 72755 +go_memstats_heap_released_bytes_total 0 +go_memstats_heap_sys_bytes 2.5624576e+07 +go_memstats_last_gc_time_seconds 1.4843955586166437e+09 +go_memstats_lookups_total 2089 +go_memstats_mallocs_total 4.729335e+06 +go_memstats_mcache_inuse_bytes 9600 +go_memstats_mcache_sys_bytes 16384 +go_memstats_mspan_inuse_bytes 211520 +go_memstats_mspan_sys_bytes 245760 +go_memstats_next_gc_bytes 2.033527e+07 +go_memstats_other_sys_bytes 2.077323e+06 +go_memstats_stack_inuse_bytes 1.6384e+06 +go_memstats_stack_sys_bytes 1.6384e+06 +go_memstats_sys_bytes 3.2205048e+07 +http_request_duration_microseconds{handler="alerts",quantile="0.5"} NaN +http_request_duration_microseconds{handler="alerts",quantile="0.9"} NaN +http_request_duration_microseconds{handler="alerts",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="alerts"} 0 +http_request_duration_microseconds_count{handler="alerts"} 0 +http_request_duration_microseconds{handler="config",quantile="0.5"} NaN +http_request_duration_microseconds{handler="config",quantile="0.9"} NaN +http_request_duration_microseconds{handler="config",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="config"} 0 +http_request_duration_microseconds_count{handler="config"} 0 +http_request_duration_microseconds{handler="consoles",quantile="0.5"} NaN +http_request_duration_microseconds{handler="consoles",quantile="0.9"} NaN +http_request_duration_microseconds{handler="consoles",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="consoles"} 0 +http_request_duration_microseconds_count{handler="consoles"} 0 +http_request_duration_microseconds{handler="drop_series",quantile="0.5"} NaN +http_request_duration_microseconds{handler="drop_series",quantile="0.9"} NaN +http_request_duration_microseconds{handler="drop_series",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="drop_series"} 0 +http_request_duration_microseconds_count{handler="drop_series"} 0 +http_request_duration_microseconds{handler="federate",quantile="0.5"} NaN +http_request_duration_microseconds{handler="federate",quantile="0.9"} NaN +http_request_duration_microseconds{handler="federate",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="federate"} 0 +http_request_duration_microseconds_count{handler="federate"} 0 +http_request_duration_microseconds{handler="flags",quantile="0.5"} NaN +http_request_duration_microseconds{handler="flags",quantile="0.9"} NaN +http_request_duration_microseconds{handler="flags",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="flags"} 0 +http_request_duration_microseconds_count{handler="flags"} 0 +http_request_duration_microseconds{handler="graph",quantile="0.5"} 771.655 +http_request_duration_microseconds{handler="graph",quantile="0.9"} 1761.823 +http_request_duration_microseconds{handler="graph",quantile="0.99"} 1761.823 +http_request_duration_microseconds_sum{handler="graph"} 5803.93 +http_request_duration_microseconds_count{handler="graph"} 3 +http_request_duration_microseconds{handler="heap",quantile="0.5"} NaN +http_request_duration_microseconds{handler="heap",quantile="0.9"} NaN +http_request_duration_microseconds{handler="heap",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="heap"} 0 +http_request_duration_microseconds_count{handler="heap"} 0 +http_request_duration_microseconds{handler="label_values",quantile="0.5"} 325.401 +http_request_duration_microseconds{handler="label_values",quantile="0.9"} 414.708 +http_request_duration_microseconds{handler="label_values",quantile="0.99"} 414.708 +http_request_duration_microseconds_sum{handler="label_values"} 3995.574 +http_request_duration_microseconds_count{handler="label_values"} 3 +http_request_duration_microseconds{handler="options",quantile="0.5"} NaN +http_request_duration_microseconds{handler="options",quantile="0.9"} NaN +http_request_duration_microseconds{handler="options",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="options"} 0 +http_request_duration_microseconds_count{handler="options"} 0 +http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1351.859 +http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1714.035 +http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 2833.523 +http_request_duration_microseconds_sum{handler="prometheus"} 661851.54 +http_request_duration_microseconds_count{handler="prometheus"} 462 +http_request_duration_microseconds{handler="query",quantile="0.5"} 3885.448 +http_request_duration_microseconds{handler="query",quantile="0.9"} 4390.558 +http_request_duration_microseconds{handler="query",quantile="0.99"} 4390.558 +http_request_duration_microseconds_sum{handler="query"} 26074.11 +http_request_duration_microseconds_count{handler="query"} 6 +http_request_duration_microseconds{handler="query_range",quantile="0.5"} NaN +http_request_duration_microseconds{handler="query_range",quantile="0.9"} NaN +http_request_duration_microseconds{handler="query_range",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="query_range"} 0 +http_request_duration_microseconds_count{handler="query_range"} 0 +http_request_duration_microseconds{handler="rules",quantile="0.5"} NaN +http_request_duration_microseconds{handler="rules",quantile="0.9"} NaN +http_request_duration_microseconds{handler="rules",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="rules"} 0 +http_request_duration_microseconds_count{handler="rules"} 0 +http_request_duration_microseconds{handler="series",quantile="0.5"} NaN +http_request_duration_microseconds{handler="series",quantile="0.9"} NaN +http_request_duration_microseconds{handler="series",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="series"} 0 +http_request_duration_microseconds_count{handler="series"} 0 +http_request_duration_microseconds{handler="static",quantile="0.5"} 212.311 +http_request_duration_microseconds{handler="static",quantile="0.9"} 265.174 +http_request_duration_microseconds{handler="static",quantile="0.99"} 265.174 +http_request_duration_microseconds_sum{handler="static"} 6458.621 +http_request_duration_microseconds_count{handler="static"} 3 +http_request_duration_microseconds{handler="status",quantile="0.5"} NaN +http_request_duration_microseconds{handler="status",quantile="0.9"} NaN +http_request_duration_microseconds{handler="status",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="status"} 0 +http_request_duration_microseconds_count{handler="status"} 0 +http_request_duration_microseconds{handler="targets",quantile="0.5"} NaN +http_request_duration_microseconds{handler="targets",quantile="0.9"} NaN +http_request_duration_microseconds{handler="targets",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="targets"} 0 +http_request_duration_microseconds_count{handler="targets"} 0 +http_request_duration_microseconds{handler="version",quantile="0.5"} NaN +http_request_duration_microseconds{handler="version",quantile="0.9"} NaN +http_request_duration_microseconds{handler="version",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="version"} 0 +http_request_duration_microseconds_count{handler="version"} 0 +http_request_size_bytes{handler="alerts",quantile="0.5"} NaN +http_request_size_bytes{handler="alerts",quantile="0.9"} NaN +http_request_size_bytes{handler="alerts",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="alerts"} 0 +http_request_size_bytes_count{handler="alerts"} 0 +http_request_size_bytes{handler="config",quantile="0.5"} NaN +http_request_size_bytes{handler="config",quantile="0.9"} NaN +http_request_size_bytes{handler="config",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="config"} 0 +http_request_size_bytes_count{handler="config"} 0 +http_request_size_bytes{handler="consoles",quantile="0.5"} NaN +http_request_size_bytes{handler="consoles",quantile="0.9"} NaN +http_request_size_bytes{handler="consoles",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="consoles"} 0 +http_request_size_bytes_count{handler="consoles"} 0 +http_request_size_bytes{handler="drop_series",quantile="0.5"} NaN +http_request_size_bytes{handler="drop_series",quantile="0.9"} NaN +http_request_size_bytes{handler="drop_series",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="drop_series"} 0 +http_request_size_bytes_count{handler="drop_series"} 0 +http_request_size_bytes{handler="federate",quantile="0.5"} NaN +http_request_size_bytes{handler="federate",quantile="0.9"} NaN +http_request_size_bytes{handler="federate",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="federate"} 0 +http_request_size_bytes_count{handler="federate"} 0 +http_request_size_bytes{handler="flags",quantile="0.5"} NaN +http_request_size_bytes{handler="flags",quantile="0.9"} NaN +http_request_size_bytes{handler="flags",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="flags"} 0 +http_request_size_bytes_count{handler="flags"} 0 +http_request_size_bytes{handler="graph",quantile="0.5"} 367 +http_request_size_bytes{handler="graph",quantile="0.9"} 389 +http_request_size_bytes{handler="graph",quantile="0.99"} 389 +http_request_size_bytes_sum{handler="graph"} 1145 +http_request_size_bytes_count{handler="graph"} 3 +http_request_size_bytes{handler="heap",quantile="0.5"} NaN +http_request_size_bytes{handler="heap",quantile="0.9"} NaN +http_request_size_bytes{handler="heap",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="heap"} 0 +http_request_size_bytes_count{handler="heap"} 0 +http_request_size_bytes{handler="label_values",quantile="0.5"} 416 +http_request_size_bytes{handler="label_values",quantile="0.9"} 416 +http_request_size_bytes{handler="label_values",quantile="0.99"} 416 +http_request_size_bytes_sum{handler="label_values"} 1248 +http_request_size_bytes_count{handler="label_values"} 3 +http_request_size_bytes{handler="options",quantile="0.5"} NaN +http_request_size_bytes{handler="options",quantile="0.9"} NaN +http_request_size_bytes{handler="options",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="options"} 0 +http_request_size_bytes_count{handler="options"} 0 +http_request_size_bytes{handler="prometheus",quantile="0.5"} 238 +http_request_size_bytes{handler="prometheus",quantile="0.9"} 238 +http_request_size_bytes{handler="prometheus",quantile="0.99"} 238 +http_request_size_bytes_sum{handler="prometheus"} 109956 +http_request_size_bytes_count{handler="prometheus"} 462 +http_request_size_bytes{handler="query",quantile="0.5"} 531 +http_request_size_bytes{handler="query",quantile="0.9"} 531 +http_request_size_bytes{handler="query",quantile="0.99"} 531 +http_request_size_bytes_sum{handler="query"} 3186 +http_request_size_bytes_count{handler="query"} 6 +http_request_size_bytes{handler="query_range",quantile="0.5"} NaN +http_request_size_bytes{handler="query_range",quantile="0.9"} NaN +http_request_size_bytes{handler="query_range",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="query_range"} 0 +http_request_size_bytes_count{handler="query_range"} 0 +http_request_size_bytes{handler="rules",quantile="0.5"} NaN +http_request_size_bytes{handler="rules",quantile="0.9"} NaN +http_request_size_bytes{handler="rules",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="rules"} 0 +http_request_size_bytes_count{handler="rules"} 0 +http_request_size_bytes{handler="series",quantile="0.5"} NaN +http_request_size_bytes{handler="series",quantile="0.9"} NaN +http_request_size_bytes{handler="series",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="series"} 0 +http_request_size_bytes_count{handler="series"} 0 +http_request_size_bytes{handler="static",quantile="0.5"} 379 +http_request_size_bytes{handler="static",quantile="0.9"} 379 +http_request_size_bytes{handler="static",quantile="0.99"} 379 +http_request_size_bytes_sum{handler="static"} 1137 +http_request_size_bytes_count{handler="static"} 3 +http_request_size_bytes{handler="status",quantile="0.5"} NaN +http_request_size_bytes{handler="status",quantile="0.9"} NaN +http_request_size_bytes{handler="status",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="status"} 0 +http_request_size_bytes_count{handler="status"} 0 +http_request_size_bytes{handler="targets",quantile="0.5"} NaN +http_request_size_bytes{handler="targets",quantile="0.9"} NaN +http_request_size_bytes{handler="targets",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="targets"} 0 +http_request_size_bytes_count{handler="targets"} 0 +http_request_size_bytes{handler="version",quantile="0.5"} NaN +http_request_size_bytes{handler="version",quantile="0.9"} NaN +http_request_size_bytes{handler="version",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="version"} 0 +http_request_size_bytes_count{handler="version"} 0 +http_requests_total{code="200",handler="graph",method="get"} 3 +http_requests_total{code="200",handler="label_values",method="get"} 3 +http_requests_total{code="200",handler="prometheus",method="get"} 462 +http_requests_total{code="200",handler="query",method="get"} 6 +http_requests_total{code="200",handler="static",method="get"} 3 +http_response_size_bytes{handler="alerts",quantile="0.5"} NaN +http_response_size_bytes{handler="alerts",quantile="0.9"} NaN +http_response_size_bytes{handler="alerts",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="alerts"} 0 +http_response_size_bytes_count{handler="alerts"} 0 +http_response_size_bytes{handler="config",quantile="0.5"} NaN +http_response_size_bytes{handler="config",quantile="0.9"} NaN +http_response_size_bytes{handler="config",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="config"} 0 +http_response_size_bytes_count{handler="config"} 0 +http_response_size_bytes{handler="consoles",quantile="0.5"} NaN +http_response_size_bytes{handler="consoles",quantile="0.9"} NaN +http_response_size_bytes{handler="consoles",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="consoles"} 0 +http_response_size_bytes_count{handler="consoles"} 0 +http_response_size_bytes{handler="drop_series",quantile="0.5"} NaN +http_response_size_bytes{handler="drop_series",quantile="0.9"} NaN +http_response_size_bytes{handler="drop_series",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="drop_series"} 0 +http_response_size_bytes_count{handler="drop_series"} 0 +http_response_size_bytes{handler="federate",quantile="0.5"} NaN +http_response_size_bytes{handler="federate",quantile="0.9"} NaN +http_response_size_bytes{handler="federate",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="federate"} 0 +http_response_size_bytes_count{handler="federate"} 0 +http_response_size_bytes{handler="flags",quantile="0.5"} NaN +http_response_size_bytes{handler="flags",quantile="0.9"} NaN +http_response_size_bytes{handler="flags",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="flags"} 0 +http_response_size_bytes_count{handler="flags"} 0 +http_response_size_bytes{handler="graph",quantile="0.5"} 3619 +http_response_size_bytes{handler="graph",quantile="0.9"} 3619 +http_response_size_bytes{handler="graph",quantile="0.99"} 3619 +http_response_size_bytes_sum{handler="graph"} 10857 +http_response_size_bytes_count{handler="graph"} 3 +http_response_size_bytes{handler="heap",quantile="0.5"} NaN +http_response_size_bytes{handler="heap",quantile="0.9"} NaN +http_response_size_bytes{handler="heap",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="heap"} 0 +http_response_size_bytes_count{handler="heap"} 0 +http_response_size_bytes{handler="label_values",quantile="0.5"} 642 +http_response_size_bytes{handler="label_values",quantile="0.9"} 642 +http_response_size_bytes{handler="label_values",quantile="0.99"} 642 +http_response_size_bytes_sum{handler="label_values"} 1926 +http_response_size_bytes_count{handler="label_values"} 3 +http_response_size_bytes{handler="options",quantile="0.5"} NaN +http_response_size_bytes{handler="options",quantile="0.9"} NaN +http_response_size_bytes{handler="options",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="options"} 0 +http_response_size_bytes_count{handler="options"} 0 +http_response_size_bytes{handler="prometheus",quantile="0.5"} 3033 +http_response_size_bytes{handler="prometheus",quantile="0.9"} 3123 +http_response_size_bytes{handler="prometheus",quantile="0.99"} 3128 +http_response_size_bytes_sum{handler="prometheus"} 1.374097e+06 +http_response_size_bytes_count{handler="prometheus"} 462 +http_response_size_bytes{handler="query",quantile="0.5"} 776 +http_response_size_bytes{handler="query",quantile="0.9"} 781 +http_response_size_bytes{handler="query",quantile="0.99"} 781 +http_response_size_bytes_sum{handler="query"} 4656 +http_response_size_bytes_count{handler="query"} 6 +http_response_size_bytes{handler="query_range",quantile="0.5"} NaN +http_response_size_bytes{handler="query_range",quantile="0.9"} NaN +http_response_size_bytes{handler="query_range",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="query_range"} 0 +http_response_size_bytes_count{handler="query_range"} 0 +http_response_size_bytes{handler="rules",quantile="0.5"} NaN +http_response_size_bytes{handler="rules",quantile="0.9"} NaN +http_response_size_bytes{handler="rules",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="rules"} 0 +http_response_size_bytes_count{handler="rules"} 0 +http_response_size_bytes{handler="series",quantile="0.5"} NaN +http_response_size_bytes{handler="series",quantile="0.9"} NaN +http_response_size_bytes{handler="series",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="series"} 0 +http_response_size_bytes_count{handler="series"} 0 +http_response_size_bytes{handler="static",quantile="0.5"} 6316 +http_response_size_bytes{handler="static",quantile="0.9"} 6316 +http_response_size_bytes{handler="static",quantile="0.99"} 6316 +http_response_size_bytes_sum{handler="static"} 18948 +http_response_size_bytes_count{handler="static"} 3 +http_response_size_bytes{handler="status",quantile="0.5"} NaN +http_response_size_bytes{handler="status",quantile="0.9"} NaN +http_response_size_bytes{handler="status",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="status"} 0 +http_response_size_bytes_count{handler="status"} 0 +http_response_size_bytes{handler="targets",quantile="0.5"} NaN +http_response_size_bytes{handler="targets",quantile="0.9"} NaN +http_response_size_bytes{handler="targets",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="targets"} 0 +http_response_size_bytes_count{handler="targets"} 0 +http_response_size_bytes{handler="version",quantile="0.5"} NaN +http_response_size_bytes{handler="version",quantile="0.9"} NaN +http_response_size_bytes{handler="version",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="version"} 0 +http_response_size_bytes_count{handler="version"} 0 +prometheus_build_info{branch="",goversion="go1.7.3",revision="",version=""} 1 +prometheus_config_last_reload_success_timestamp_seconds 1.484395547e+09 +prometheus_config_last_reload_successful 1 +prometheus_evaluator_duration_seconds{quantile="0.01"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.05"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.5"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.9"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.99"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds_sum 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds_count 1 +prometheus_evaluator_iterations_skipped_total 0 +prometheus_notifications_dropped_total 0 +prometheus_notifications_queue_capacity 10000 +prometheus_notifications_queue_length 0 +prometheus_rule_evaluation_failures_total{rule_type="alerting"} 0 +prometheus_rule_evaluation_failures_total{rule_type="recording"} 0 +prometheus_sd_azure_refresh_duration_seconds{quantile="0.5"} NaN +prometheus_sd_azure_refresh_duration_seconds{quantile="0.9"} NaN +prometheus_sd_azure_refresh_duration_seconds{quantile="0.99"} NaN +prometheus_sd_azure_refresh_duration_seconds_sum 0 +prometheus_sd_azure_refresh_duration_seconds_count 0 +prometheus_sd_azure_refresh_failures_total 0 +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN +prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN +prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_failures_total 0 +prometheus_sd_dns_lookup_failures_total 0 +prometheus_sd_dns_lookups_total 0 +prometheus_sd_ec2_refresh_duration_seconds{quantile="0.5"} NaN +prometheus_sd_ec2_refresh_duration_seconds{quantile="0.9"} NaN +prometheus_sd_ec2_refresh_duration_seconds{quantile="0.99"} NaN +prometheus_sd_ec2_refresh_duration_seconds_sum 0 +prometheus_sd_ec2_refresh_duration_seconds_count 0 +prometheus_sd_ec2_refresh_failures_total 0 +prometheus_sd_file_read_errors_total 0 +prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN +prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN +prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN +prometheus_sd_file_scan_duration_seconds_sum 0 +prometheus_sd_file_scan_duration_seconds_count 0 +prometheus_sd_gce_refresh_duration{quantile="0.5"} NaN +prometheus_sd_gce_refresh_duration{quantile="0.9"} NaN +prometheus_sd_gce_refresh_duration{quantile="0.99"} NaN +prometheus_sd_gce_refresh_duration_sum 0 +prometheus_sd_gce_refresh_duration_count 0 +prometheus_sd_gce_refresh_failures_total 0 +prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="service"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="service"} 0 +prometheus_sd_marathon_refresh_duration_seconds{quantile="0.5"} NaN +prometheus_sd_marathon_refresh_duration_seconds{quantile="0.9"} NaN +prometheus_sd_marathon_refresh_duration_seconds{quantile="0.99"} NaN +prometheus_sd_marathon_refresh_duration_seconds_sum 0 +prometheus_sd_marathon_refresh_duration_seconds_count 0 +prometheus_sd_marathon_refresh_failures_total 0 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.01"} 0.046182157 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.05"} 0.047306979000000006 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.5"} 0.050381782 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.9"} 0.052614556 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.99"} 0.054404386000000006 +prometheus_target_interval_length_seconds_sum{interval="50ms"} 34.512091221999995 +prometheus_target_interval_length_seconds_count{interval="50ms"} 685 +prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1 +prometheus_target_skipped_scrapes_total 0 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.00020043300000000002 +prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.00020043300000000002 +prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1 +prometheus_treecache_watcher_goroutines 0 +prometheus_treecache_zookeeper_failures_total 0 +# EOF diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/promtestdata.txt b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/promtestdata.txt new file mode 100644 index 000000000000..174f383e911f --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/promtestdata.txt @@ -0,0 +1,529 @@ +# HELP go_gc_duration_seconds A summary of the GC invocation durations. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 4.9351e-05 +go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05 +go_gc_duration_seconds{quantile="0.5"} 8.3835e-05 +go_gc_duration_seconds{quantile="0.75"} 0.000106744 +go_gc_duration_seconds{quantile="1"} 0.002072195 +go_gc_duration_seconds_sum 0.012139815 +go_gc_duration_seconds_count 99 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 33 +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +go_memstats_alloc_bytes 1.7518624e+07 +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +go_memstats_alloc_bytes_total 8.3062296e+08 +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +go_memstats_buck_hash_sys_bytes 1.494637e+06 +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +go_memstats_frees_total 4.65658e+06 +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +go_memstats_gc_sys_bytes 1.107968e+06 +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +go_memstats_heap_alloc_bytes 1.7518624e+07 +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +go_memstats_heap_idle_bytes 6.668288e+06 +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +go_memstats_heap_inuse_bytes 1.8956288e+07 +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +go_memstats_heap_objects 72755 +# HELP go_memstats_heap_released_bytes_total Total number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes_total counter +go_memstats_heap_released_bytes_total 0 +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +go_memstats_heap_sys_bytes 2.5624576e+07 +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +go_memstats_last_gc_time_seconds 1.4843955586166437e+09 +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +go_memstats_lookups_total 2089 +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +go_memstats_mallocs_total 4.729335e+06 +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +go_memstats_mcache_inuse_bytes 9600 +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +go_memstats_mcache_sys_bytes 16384 +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +go_memstats_mspan_inuse_bytes 211520 +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +go_memstats_mspan_sys_bytes 245760 +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +go_memstats_next_gc_bytes 2.033527e+07 +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +go_memstats_other_sys_bytes 2.077323e+06 +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +go_memstats_stack_inuse_bytes 1.6384e+06 +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +go_memstats_stack_sys_bytes 1.6384e+06 +# HELP go_memstats_sys_bytes Number of bytes obtained by system. Sum of all system allocations. +# TYPE go_memstats_sys_bytes gauge +go_memstats_sys_bytes 3.2205048e+07 +# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. +# TYPE http_request_duration_microseconds summary +http_request_duration_microseconds{handler="alerts",quantile="0.5"} NaN +http_request_duration_microseconds{handler="alerts",quantile="0.9"} NaN +http_request_duration_microseconds{handler="alerts",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="alerts"} 0 +http_request_duration_microseconds_count{handler="alerts"} 0 +http_request_duration_microseconds{handler="config",quantile="0.5"} NaN +http_request_duration_microseconds{handler="config",quantile="0.9"} NaN +http_request_duration_microseconds{handler="config",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="config"} 0 +http_request_duration_microseconds_count{handler="config"} 0 +http_request_duration_microseconds{handler="consoles",quantile="0.5"} NaN +http_request_duration_microseconds{handler="consoles",quantile="0.9"} NaN +http_request_duration_microseconds{handler="consoles",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="consoles"} 0 +http_request_duration_microseconds_count{handler="consoles"} 0 +http_request_duration_microseconds{handler="drop_series",quantile="0.5"} NaN +http_request_duration_microseconds{handler="drop_series",quantile="0.9"} NaN +http_request_duration_microseconds{handler="drop_series",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="drop_series"} 0 +http_request_duration_microseconds_count{handler="drop_series"} 0 +http_request_duration_microseconds{handler="federate",quantile="0.5"} NaN +http_request_duration_microseconds{handler="federate",quantile="0.9"} NaN +http_request_duration_microseconds{handler="federate",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="federate"} 0 +http_request_duration_microseconds_count{handler="federate"} 0 +http_request_duration_microseconds{handler="flags",quantile="0.5"} NaN +http_request_duration_microseconds{handler="flags",quantile="0.9"} NaN +http_request_duration_microseconds{handler="flags",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="flags"} 0 +http_request_duration_microseconds_count{handler="flags"} 0 +http_request_duration_microseconds{handler="graph",quantile="0.5"} 771.655 +http_request_duration_microseconds{handler="graph",quantile="0.9"} 1761.823 +http_request_duration_microseconds{handler="graph",quantile="0.99"} 1761.823 +http_request_duration_microseconds_sum{handler="graph"} 5803.93 +http_request_duration_microseconds_count{handler="graph"} 3 +http_request_duration_microseconds{handler="heap",quantile="0.5"} NaN +http_request_duration_microseconds{handler="heap",quantile="0.9"} NaN +http_request_duration_microseconds{handler="heap",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="heap"} 0 +http_request_duration_microseconds_count{handler="heap"} 0 +http_request_duration_microseconds{handler="label_values",quantile="0.5"} 325.401 +http_request_duration_microseconds{handler="label_values",quantile="0.9"} 414.708 +http_request_duration_microseconds{handler="label_values",quantile="0.99"} 414.708 +http_request_duration_microseconds_sum{handler="label_values"} 3995.574 +http_request_duration_microseconds_count{handler="label_values"} 3 +http_request_duration_microseconds{handler="options",quantile="0.5"} NaN +http_request_duration_microseconds{handler="options",quantile="0.9"} NaN +http_request_duration_microseconds{handler="options",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="options"} 0 +http_request_duration_microseconds_count{handler="options"} 0 +http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1351.859 +http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1714.035 +http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 2833.523 +http_request_duration_microseconds_sum{handler="prometheus"} 661851.54 +http_request_duration_microseconds_count{handler="prometheus"} 462 +http_request_duration_microseconds{handler="query",quantile="0.5"} 3885.448 +http_request_duration_microseconds{handler="query",quantile="0.9"} 4390.558 +http_request_duration_microseconds{handler="query",quantile="0.99"} 4390.558 +http_request_duration_microseconds_sum{handler="query"} 26074.11 +http_request_duration_microseconds_count{handler="query"} 6 +http_request_duration_microseconds{handler="query_range",quantile="0.5"} NaN +http_request_duration_microseconds{handler="query_range",quantile="0.9"} NaN +http_request_duration_microseconds{handler="query_range",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="query_range"} 0 +http_request_duration_microseconds_count{handler="query_range"} 0 +http_request_duration_microseconds{handler="rules",quantile="0.5"} NaN +http_request_duration_microseconds{handler="rules",quantile="0.9"} NaN +http_request_duration_microseconds{handler="rules",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="rules"} 0 +http_request_duration_microseconds_count{handler="rules"} 0 +http_request_duration_microseconds{handler="series",quantile="0.5"} NaN +http_request_duration_microseconds{handler="series",quantile="0.9"} NaN +http_request_duration_microseconds{handler="series",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="series"} 0 +http_request_duration_microseconds_count{handler="series"} 0 +http_request_duration_microseconds{handler="static",quantile="0.5"} 212.311 +http_request_duration_microseconds{handler="static",quantile="0.9"} 265.174 +http_request_duration_microseconds{handler="static",quantile="0.99"} 265.174 +http_request_duration_microseconds_sum{handler="static"} 6458.621 +http_request_duration_microseconds_count{handler="static"} 3 +http_request_duration_microseconds{handler="status",quantile="0.5"} NaN +http_request_duration_microseconds{handler="status",quantile="0.9"} NaN +http_request_duration_microseconds{handler="status",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="status"} 0 +http_request_duration_microseconds_count{handler="status"} 0 +http_request_duration_microseconds{handler="targets",quantile="0.5"} NaN +http_request_duration_microseconds{handler="targets",quantile="0.9"} NaN +http_request_duration_microseconds{handler="targets",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="targets"} 0 +http_request_duration_microseconds_count{handler="targets"} 0 +http_request_duration_microseconds{handler="version",quantile="0.5"} NaN +http_request_duration_microseconds{handler="version",quantile="0.9"} NaN +http_request_duration_microseconds{handler="version",quantile="0.99"} NaN +http_request_duration_microseconds_sum{handler="version"} 0 +http_request_duration_microseconds_count{handler="version"} 0 +# HELP http_request_size_bytes The HTTP request sizes in bytes. +# TYPE http_request_size_bytes summary +http_request_size_bytes{handler="alerts",quantile="0.5"} NaN +http_request_size_bytes{handler="alerts",quantile="0.9"} NaN +http_request_size_bytes{handler="alerts",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="alerts"} 0 +http_request_size_bytes_count{handler="alerts"} 0 +http_request_size_bytes{handler="config",quantile="0.5"} NaN +http_request_size_bytes{handler="config",quantile="0.9"} NaN +http_request_size_bytes{handler="config",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="config"} 0 +http_request_size_bytes_count{handler="config"} 0 +http_request_size_bytes{handler="consoles",quantile="0.5"} NaN +http_request_size_bytes{handler="consoles",quantile="0.9"} NaN +http_request_size_bytes{handler="consoles",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="consoles"} 0 +http_request_size_bytes_count{handler="consoles"} 0 +http_request_size_bytes{handler="drop_series",quantile="0.5"} NaN +http_request_size_bytes{handler="drop_series",quantile="0.9"} NaN +http_request_size_bytes{handler="drop_series",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="drop_series"} 0 +http_request_size_bytes_count{handler="drop_series"} 0 +http_request_size_bytes{handler="federate",quantile="0.5"} NaN +http_request_size_bytes{handler="federate",quantile="0.9"} NaN +http_request_size_bytes{handler="federate",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="federate"} 0 +http_request_size_bytes_count{handler="federate"} 0 +http_request_size_bytes{handler="flags",quantile="0.5"} NaN +http_request_size_bytes{handler="flags",quantile="0.9"} NaN +http_request_size_bytes{handler="flags",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="flags"} 0 +http_request_size_bytes_count{handler="flags"} 0 +http_request_size_bytes{handler="graph",quantile="0.5"} 367 +http_request_size_bytes{handler="graph",quantile="0.9"} 389 +http_request_size_bytes{handler="graph",quantile="0.99"} 389 +http_request_size_bytes_sum{handler="graph"} 1145 +http_request_size_bytes_count{handler="graph"} 3 +http_request_size_bytes{handler="heap",quantile="0.5"} NaN +http_request_size_bytes{handler="heap",quantile="0.9"} NaN +http_request_size_bytes{handler="heap",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="heap"} 0 +http_request_size_bytes_count{handler="heap"} 0 +http_request_size_bytes{handler="label_values",quantile="0.5"} 416 +http_request_size_bytes{handler="label_values",quantile="0.9"} 416 +http_request_size_bytes{handler="label_values",quantile="0.99"} 416 +http_request_size_bytes_sum{handler="label_values"} 1248 +http_request_size_bytes_count{handler="label_values"} 3 +http_request_size_bytes{handler="options",quantile="0.5"} NaN +http_request_size_bytes{handler="options",quantile="0.9"} NaN +http_request_size_bytes{handler="options",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="options"} 0 +http_request_size_bytes_count{handler="options"} 0 +http_request_size_bytes{handler="prometheus",quantile="0.5"} 238 +http_request_size_bytes{handler="prometheus",quantile="0.9"} 238 +http_request_size_bytes{handler="prometheus",quantile="0.99"} 238 +http_request_size_bytes_sum{handler="prometheus"} 109956 +http_request_size_bytes_count{handler="prometheus"} 462 +http_request_size_bytes{handler="query",quantile="0.5"} 531 +http_request_size_bytes{handler="query",quantile="0.9"} 531 +http_request_size_bytes{handler="query",quantile="0.99"} 531 +http_request_size_bytes_sum{handler="query"} 3186 +http_request_size_bytes_count{handler="query"} 6 +http_request_size_bytes{handler="query_range",quantile="0.5"} NaN +http_request_size_bytes{handler="query_range",quantile="0.9"} NaN +http_request_size_bytes{handler="query_range",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="query_range"} 0 +http_request_size_bytes_count{handler="query_range"} 0 +http_request_size_bytes{handler="rules",quantile="0.5"} NaN +http_request_size_bytes{handler="rules",quantile="0.9"} NaN +http_request_size_bytes{handler="rules",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="rules"} 0 +http_request_size_bytes_count{handler="rules"} 0 +http_request_size_bytes{handler="series",quantile="0.5"} NaN +http_request_size_bytes{handler="series",quantile="0.9"} NaN +http_request_size_bytes{handler="series",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="series"} 0 +http_request_size_bytes_count{handler="series"} 0 +http_request_size_bytes{handler="static",quantile="0.5"} 379 +http_request_size_bytes{handler="static",quantile="0.9"} 379 +http_request_size_bytes{handler="static",quantile="0.99"} 379 +http_request_size_bytes_sum{handler="static"} 1137 +http_request_size_bytes_count{handler="static"} 3 +http_request_size_bytes{handler="status",quantile="0.5"} NaN +http_request_size_bytes{handler="status",quantile="0.9"} NaN +http_request_size_bytes{handler="status",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="status"} 0 +http_request_size_bytes_count{handler="status"} 0 +http_request_size_bytes{handler="targets",quantile="0.5"} NaN +http_request_size_bytes{handler="targets",quantile="0.9"} NaN +http_request_size_bytes{handler="targets",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="targets"} 0 +http_request_size_bytes_count{handler="targets"} 0 +http_request_size_bytes{handler="version",quantile="0.5"} NaN +http_request_size_bytes{handler="version",quantile="0.9"} NaN +http_request_size_bytes{handler="version",quantile="0.99"} NaN +http_request_size_bytes_sum{handler="version"} 0 +http_request_size_bytes_count{handler="version"} 0 +# HELP http_requests_total Total number of HTTP requests made. +# TYPE http_requests_total counter +http_requests_total{code="200",handler="graph",method="get"} 3 +http_requests_total{code="200",handler="label_values",method="get"} 3 +http_requests_total{code="200",handler="prometheus",method="get"} 462 +http_requests_total{code="200",handler="query",method="get"} 6 +http_requests_total{code="200",handler="static",method="get"} 3 +# HELP http_response_size_bytes The HTTP response sizes in bytes. +# TYPE http_response_size_bytes summary +http_response_size_bytes{handler="alerts",quantile="0.5"} NaN +http_response_size_bytes{handler="alerts",quantile="0.9"} NaN +http_response_size_bytes{handler="alerts",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="alerts"} 0 +http_response_size_bytes_count{handler="alerts"} 0 +http_response_size_bytes{handler="config",quantile="0.5"} NaN +http_response_size_bytes{handler="config",quantile="0.9"} NaN +http_response_size_bytes{handler="config",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="config"} 0 +http_response_size_bytes_count{handler="config"} 0 +http_response_size_bytes{handler="consoles",quantile="0.5"} NaN +http_response_size_bytes{handler="consoles",quantile="0.9"} NaN +http_response_size_bytes{handler="consoles",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="consoles"} 0 +http_response_size_bytes_count{handler="consoles"} 0 +http_response_size_bytes{handler="drop_series",quantile="0.5"} NaN +http_response_size_bytes{handler="drop_series",quantile="0.9"} NaN +http_response_size_bytes{handler="drop_series",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="drop_series"} 0 +http_response_size_bytes_count{handler="drop_series"} 0 +http_response_size_bytes{handler="federate",quantile="0.5"} NaN +http_response_size_bytes{handler="federate",quantile="0.9"} NaN +http_response_size_bytes{handler="federate",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="federate"} 0 +http_response_size_bytes_count{handler="federate"} 0 +http_response_size_bytes{handler="flags",quantile="0.5"} NaN +http_response_size_bytes{handler="flags",quantile="0.9"} NaN +http_response_size_bytes{handler="flags",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="flags"} 0 +http_response_size_bytes_count{handler="flags"} 0 +http_response_size_bytes{handler="graph",quantile="0.5"} 3619 +http_response_size_bytes{handler="graph",quantile="0.9"} 3619 +http_response_size_bytes{handler="graph",quantile="0.99"} 3619 +http_response_size_bytes_sum{handler="graph"} 10857 +http_response_size_bytes_count{handler="graph"} 3 +http_response_size_bytes{handler="heap",quantile="0.5"} NaN +http_response_size_bytes{handler="heap",quantile="0.9"} NaN +http_response_size_bytes{handler="heap",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="heap"} 0 +http_response_size_bytes_count{handler="heap"} 0 +http_response_size_bytes{handler="label_values",quantile="0.5"} 642 +http_response_size_bytes{handler="label_values",quantile="0.9"} 642 +http_response_size_bytes{handler="label_values",quantile="0.99"} 642 +http_response_size_bytes_sum{handler="label_values"} 1926 +http_response_size_bytes_count{handler="label_values"} 3 +http_response_size_bytes{handler="options",quantile="0.5"} NaN +http_response_size_bytes{handler="options",quantile="0.9"} NaN +http_response_size_bytes{handler="options",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="options"} 0 +http_response_size_bytes_count{handler="options"} 0 +http_response_size_bytes{handler="prometheus",quantile="0.5"} 3033 +http_response_size_bytes{handler="prometheus",quantile="0.9"} 3123 +http_response_size_bytes{handler="prometheus",quantile="0.99"} 3128 +http_response_size_bytes_sum{handler="prometheus"} 1.374097e+06 +http_response_size_bytes_count{handler="prometheus"} 462 +http_response_size_bytes{handler="query",quantile="0.5"} 776 +http_response_size_bytes{handler="query",quantile="0.9"} 781 +http_response_size_bytes{handler="query",quantile="0.99"} 781 +http_response_size_bytes_sum{handler="query"} 4656 +http_response_size_bytes_count{handler="query"} 6 +http_response_size_bytes{handler="query_range",quantile="0.5"} NaN +http_response_size_bytes{handler="query_range",quantile="0.9"} NaN +http_response_size_bytes{handler="query_range",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="query_range"} 0 +http_response_size_bytes_count{handler="query_range"} 0 +http_response_size_bytes{handler="rules",quantile="0.5"} NaN +http_response_size_bytes{handler="rules",quantile="0.9"} NaN +http_response_size_bytes{handler="rules",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="rules"} 0 +http_response_size_bytes_count{handler="rules"} 0 +http_response_size_bytes{handler="series",quantile="0.5"} NaN +http_response_size_bytes{handler="series",quantile="0.9"} NaN +http_response_size_bytes{handler="series",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="series"} 0 +http_response_size_bytes_count{handler="series"} 0 +http_response_size_bytes{handler="static",quantile="0.5"} 6316 +http_response_size_bytes{handler="static",quantile="0.9"} 6316 +http_response_size_bytes{handler="static",quantile="0.99"} 6316 +http_response_size_bytes_sum{handler="static"} 18948 +http_response_size_bytes_count{handler="static"} 3 +http_response_size_bytes{handler="status",quantile="0.5"} NaN +http_response_size_bytes{handler="status",quantile="0.9"} NaN +http_response_size_bytes{handler="status",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="status"} 0 +http_response_size_bytes_count{handler="status"} 0 +http_response_size_bytes{handler="targets",quantile="0.5"} NaN +http_response_size_bytes{handler="targets",quantile="0.9"} NaN +http_response_size_bytes{handler="targets",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="targets"} 0 +http_response_size_bytes_count{handler="targets"} 0 +http_response_size_bytes{handler="version",quantile="0.5"} NaN +http_response_size_bytes{handler="version",quantile="0.9"} NaN +http_response_size_bytes{handler="version",quantile="0.99"} NaN +http_response_size_bytes_sum{handler="version"} 0 +http_response_size_bytes_count{handler="version"} 0 +# HELP prometheus_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which prometheus was built. +# TYPE prometheus_build_info gauge +prometheus_build_info{branch="",goversion="go1.7.3",revision="",version=""} 1 +# HELP prometheus_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload. +# TYPE prometheus_config_last_reload_success_timestamp_seconds gauge +prometheus_config_last_reload_success_timestamp_seconds 1.484395547e+09 +# HELP prometheus_config_last_reload_successful Whether the last configuration reload attempt was successful. +# TYPE prometheus_config_last_reload_successful gauge +prometheus_config_last_reload_successful 1 +# HELP prometheus_evaluator_duration_seconds The duration of rule group evaluations. +# TYPE prometheus_evaluator_duration_seconds summary +prometheus_evaluator_duration_seconds{quantile="0.01"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.05"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.5"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.9"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds{quantile="0.99"} 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds_sum 1.7890000000000002e-06 +prometheus_evaluator_duration_seconds_count 1 +# HELP prometheus_evaluator_iterations_skipped_total The total number of rule group evaluations skipped due to throttled metric storage. +# TYPE prometheus_evaluator_iterations_skipped_total counter +prometheus_evaluator_iterations_skipped_total 0 +# HELP prometheus_notifications_dropped_total Total number of alerts dropped due to alert manager missing in configuration. +# TYPE prometheus_notifications_dropped_total counter +prometheus_notifications_dropped_total 0 +# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue. +# TYPE prometheus_notifications_queue_capacity gauge +prometheus_notifications_queue_capacity 10000 +# HELP prometheus_notifications_queue_length The number of alert notifications in the queue. +# TYPE prometheus_notifications_queue_length gauge +prometheus_notifications_queue_length 0 +# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures. +# TYPE prometheus_rule_evaluation_failures_total counter +prometheus_rule_evaluation_failures_total{rule_type="alerting"} 0 +prometheus_rule_evaluation_failures_total{rule_type="recording"} 0 +# HELP prometheus_sd_azure_refresh_duration_seconds The duration of a Azure-SD refresh in seconds. +# TYPE prometheus_sd_azure_refresh_duration_seconds summary +prometheus_sd_azure_refresh_duration_seconds{quantile="0.5"} NaN +prometheus_sd_azure_refresh_duration_seconds{quantile="0.9"} NaN +prometheus_sd_azure_refresh_duration_seconds{quantile="0.99"} NaN +prometheus_sd_azure_refresh_duration_seconds_sum 0 +prometheus_sd_azure_refresh_duration_seconds_count 0 +# HELP prometheus_sd_azure_refresh_failures_total Number of Azure-SD refresh failures. +# TYPE prometheus_sd_azure_refresh_failures_total counter +prometheus_sd_azure_refresh_failures_total 0 +# HELP prometheus_sd_consul_rpc_duration_seconds The duration of a Consul RPC call in seconds. +# TYPE prometheus_sd_consul_rpc_duration_seconds summary +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN +prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN +prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN +prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0 +prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0 +# HELP prometheus_sd_consul_rpc_failures_total The number of Consul RPC call failures. +# TYPE prometheus_sd_consul_rpc_failures_total counter +prometheus_sd_consul_rpc_failures_total 0 +# HELP prometheus_sd_dns_lookup_failures_total The number of DNS-SD lookup failures. +# TYPE prometheus_sd_dns_lookup_failures_total counter +prometheus_sd_dns_lookup_failures_total 0 +# HELP prometheus_sd_dns_lookups_total The number of DNS-SD lookups. +# TYPE prometheus_sd_dns_lookups_total counter +prometheus_sd_dns_lookups_total 0 +# HELP prometheus_sd_ec2_refresh_duration_seconds The duration of a EC2-SD refresh in seconds. +# TYPE prometheus_sd_ec2_refresh_duration_seconds summary +prometheus_sd_ec2_refresh_duration_seconds{quantile="0.5"} NaN +prometheus_sd_ec2_refresh_duration_seconds{quantile="0.9"} NaN +prometheus_sd_ec2_refresh_duration_seconds{quantile="0.99"} NaN +prometheus_sd_ec2_refresh_duration_seconds_sum 0 +prometheus_sd_ec2_refresh_duration_seconds_count 0 +# HELP prometheus_sd_ec2_refresh_failures_total The number of EC2-SD scrape failures. +# TYPE prometheus_sd_ec2_refresh_failures_total counter +prometheus_sd_ec2_refresh_failures_total 0 +# HELP prometheus_sd_file_read_errors_total The number of File-SD read errors. +# TYPE prometheus_sd_file_read_errors_total counter +prometheus_sd_file_read_errors_total 0 +# HELP prometheus_sd_file_scan_duration_seconds The duration of the File-SD scan in seconds. +# TYPE prometheus_sd_file_scan_duration_seconds summary +prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN +prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN +prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN +prometheus_sd_file_scan_duration_seconds_sum 0 +prometheus_sd_file_scan_duration_seconds_count 0 +# HELP prometheus_sd_gce_refresh_duration The duration of a GCE-SD refresh in seconds. +# TYPE prometheus_sd_gce_refresh_duration summary +prometheus_sd_gce_refresh_duration{quantile="0.5"} NaN +prometheus_sd_gce_refresh_duration{quantile="0.9"} NaN +prometheus_sd_gce_refresh_duration{quantile="0.99"} NaN +prometheus_sd_gce_refresh_duration_sum 0 +prometheus_sd_gce_refresh_duration_count 0 +# HELP prometheus_sd_gce_refresh_failures_total The number of GCE-SD refresh failures. +# TYPE prometheus_sd_gce_refresh_failures_total counter +prometheus_sd_gce_refresh_failures_total 0 +# HELP prometheus_sd_kubernetes_events_total The number of Kubernetes events handled. +# TYPE prometheus_sd_kubernetes_events_total counter +prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="add",role="service"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="node"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0 +prometheus_sd_kubernetes_events_total{event="update",role="service"} 0 +# HELP prometheus_sd_marathon_refresh_duration_seconds The duration of a Marathon-SD refresh in seconds. +# TYPE prometheus_sd_marathon_refresh_duration_seconds summary +prometheus_sd_marathon_refresh_duration_seconds{quantile="0.5"} NaN +prometheus_sd_marathon_refresh_duration_seconds{quantile="0.9"} NaN +prometheus_sd_marathon_refresh_duration_seconds{quantile="0.99"} NaN +prometheus_sd_marathon_refresh_duration_seconds_sum 0 +prometheus_sd_marathon_refresh_duration_seconds_count 0 +# HELP prometheus_sd_marathon_refresh_failures_total The number of Marathon-SD refresh failures. +# TYPE prometheus_sd_marathon_refresh_failures_total counter +prometheus_sd_marathon_refresh_failures_total 0 +# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes. +# TYPE prometheus_target_interval_length_seconds summary +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.01"} 0.046182157 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.05"} 0.047306979000000006 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.5"} 0.050381782 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.9"} 0.052614556 +prometheus_target_interval_length_seconds{interval="50ms",quantile="0.99"} 0.054404386000000006 +prometheus_target_interval_length_seconds_sum{interval="50ms"} 34.512091221999995 +prometheus_target_interval_length_seconds_count{interval="50ms"} 685 +# HELP prometheus_target_scrape_pool_sync_total Total number of syncs that were executed on a scrape pool. +# TYPE prometheus_target_scrape_pool_sync_total counter +prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1 +# HELP prometheus_target_skipped_scrapes_total Total number of scrapes that were skipped because the metric storage was throttled. +# TYPE prometheus_target_skipped_scrapes_total counter +prometheus_target_skipped_scrapes_total 0 +# HELP prometheus_target_sync_length_seconds Actual interval to sync the scrape pool. +# TYPE prometheus_target_sync_length_seconds summary +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.00020043300000000002 +prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.00020043300000000002 +prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.00020043300000000002 +prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1 +# HELP prometheus_treecache_watcher_goroutines The current number of watcher goroutines. +# TYPE prometheus_treecache_watcher_goroutines gauge +prometheus_treecache_watcher_goroutines 0 +# HELP prometheus_treecache_zookeeper_failures_total The total number of ZooKeeper failures. +# TYPE prometheus_treecache_zookeeper_failures_total counter +prometheus_treecache_zookeeper_failures_total 0 +# EOF diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/protobufparse.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/protobufparse.go new file mode 100644 index 000000000000..cf5b82231951 --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/textparse/protobufparse.go @@ -0,0 +1,644 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "strings" + "unicode/utf8" + + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/v2/model/exemplar" + "github.com/prometheus/prometheus/v2/model/histogram" + "github.com/prometheus/prometheus/v2/model/labels" + + dto "github.com/prometheus/prometheus/v2/prompb/io/prometheus/client" +) + +// ProtobufParser is a very inefficient way of unmarshaling the old Prometheus +// protobuf format and then present it as it if were parsed by a +// Prometheus-2-style text parser. This is only done so that we can easily plug +// in the protobuf format into Prometheus 2. For future use (with the final +// format that will be used for native histograms), we have to revisit the +// parsing. A lot of the efficiency tricks of the Prometheus-2-style parsing +// could be used in a similar fashion (byte-slice pointers into the raw +// payload), which requires some hand-coded protobuf handling. But the current +// parsers all expect the full series name (metric name plus label pairs) as one +// string, which is not how things are represented in the protobuf format. If +// the re-arrangement work is actually causing problems (which has to be seen), +// that expectation needs to be changed. +type ProtobufParser struct { + in []byte // The intput to parse. + inPos int // Position within the input. + metricPos int // Position within Metric slice. + // fieldPos is the position within a Summary or (legacy) Histogram. -2 + // is the count. -1 is the sum. Otherwise it is the index within + // quantiles/buckets. + fieldPos int + fieldsDone bool // true if no more fields of a Summary or (legacy) Histogram to be processed. + redoClassic bool // true after parsing a native histogram if we need to parse it again as a classic histogram. + // exemplarPos is the position within the exemplars slice of a native histogram. + exemplarPos int + + // exemplarReturned is set to true each time an exemplar has been + // returned, and set back to false upon each Next() call. + exemplarReturned bool + + // state is marked by the entry we are processing. EntryInvalid implies + // that we have to decode the next MetricFamily. + state Entry + + builder labels.ScratchBuilder // held here to reduce allocations when building Labels + + mf *dto.MetricFamily + + // Wether to also parse a classic histogram that is also present as a + // native histogram. + parseClassicHistograms bool + + // The following are just shenanigans to satisfy the Parser interface. + metricBytes *bytes.Buffer // A somewhat fluid representation of the current metric. +} + +// NewProtobufParser returns a parser for the payload in the byte slice. +func NewProtobufParser(b []byte, parseClassicHistograms bool, st *labels.SymbolTable) Parser { + return &ProtobufParser{ + in: b, + state: EntryInvalid, + mf: &dto.MetricFamily{}, + metricBytes: &bytes.Buffer{}, + parseClassicHistograms: parseClassicHistograms, + builder: labels.NewScratchBuilderWithSymbolTable(st, 16), + } +} + +// Series returns the bytes of a series with a simple float64 as a +// value, the timestamp if set, and the value of the current sample. +func (p *ProtobufParser) Series() ([]byte, *int64, float64) { + var ( + m = p.mf.GetMetric()[p.metricPos] + ts = m.GetTimestampMs() + v float64 + ) + switch p.mf.GetType() { + case dto.MetricType_COUNTER: + v = m.GetCounter().GetValue() + case dto.MetricType_GAUGE: + v = m.GetGauge().GetValue() + case dto.MetricType_UNTYPED: + v = m.GetUntyped().GetValue() + case dto.MetricType_SUMMARY: + s := m.GetSummary() + switch p.fieldPos { + case -2: + v = float64(s.GetSampleCount()) + case -1: + v = s.GetSampleSum() + // Need to detect summaries without quantile here. + if len(s.GetQuantile()) == 0 { + p.fieldsDone = true + } + default: + v = s.GetQuantile()[p.fieldPos].GetValue() + } + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: + // This should only happen for a classic histogram. + h := m.GetHistogram() + switch p.fieldPos { + case -2: + v = h.GetSampleCountFloat() + if v == 0 { + v = float64(h.GetSampleCount()) + } + case -1: + v = h.GetSampleSum() + default: + bb := h.GetBucket() + if p.fieldPos >= len(bb) { + v = h.GetSampleCountFloat() + if v == 0 { + v = float64(h.GetSampleCount()) + } + } else { + v = bb[p.fieldPos].GetCumulativeCountFloat() + if v == 0 { + v = float64(bb[p.fieldPos].GetCumulativeCount()) + } + } + } + default: + panic("encountered unexpected metric type, this is a bug") + } + if ts != 0 { + return p.metricBytes.Bytes(), &ts, v + } + // TODO(beorn7): We assume here that ts==0 means no timestamp. That's + // not true in general, but proto3 originally has no distinction between + // unset and default. At a later stage, the `optional` keyword was + // (re-)introduced in proto3, but gogo-protobuf never got updated to + // support it. (Note that setting `[(gogoproto.nullable) = true]` for + // the `timestamp_ms` field doesn't help, either.) We plan to migrate + // away from gogo-protobuf to an actively maintained protobuf + // implementation. Once that's done, we can simply use the `optional` + // keyword and check for the unset state explicitly. + return p.metricBytes.Bytes(), nil, v +} + +// Histogram returns the bytes of a series with a native histogram as a value, +// the timestamp if set, and the native histogram in the current sample. +// +// The Compact method is called before returning the Histogram (or FloatHistogram). +// +// If the SampleCountFloat or the ZeroCountFloat in the proto message is > 0, +// the histogram is parsed and returned as a FloatHistogram and nil is returned +// as the (integer) Histogram return value. Otherwise, it is parsed and returned +// as an (integer) Histogram and nil is returned as the FloatHistogram return +// value. +func (p *ProtobufParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { + var ( + m = p.mf.GetMetric()[p.metricPos] + ts = m.GetTimestampMs() + h = m.GetHistogram() + ) + if p.parseClassicHistograms && len(h.GetBucket()) > 0 { + p.redoClassic = true + } + if h.GetSampleCountFloat() > 0 || h.GetZeroCountFloat() > 0 { + // It is a float histogram. + fh := histogram.FloatHistogram{ + Count: h.GetSampleCountFloat(), + Sum: h.GetSampleSum(), + ZeroThreshold: h.GetZeroThreshold(), + ZeroCount: h.GetZeroCountFloat(), + Schema: h.GetSchema(), + PositiveSpans: make([]histogram.Span, len(h.GetPositiveSpan())), + PositiveBuckets: h.GetPositiveCount(), + NegativeSpans: make([]histogram.Span, len(h.GetNegativeSpan())), + NegativeBuckets: h.GetNegativeCount(), + } + for i, span := range h.GetPositiveSpan() { + fh.PositiveSpans[i].Offset = span.GetOffset() + fh.PositiveSpans[i].Length = span.GetLength() + } + for i, span := range h.GetNegativeSpan() { + fh.NegativeSpans[i].Offset = span.GetOffset() + fh.NegativeSpans[i].Length = span.GetLength() + } + if p.mf.GetType() == dto.MetricType_GAUGE_HISTOGRAM { + fh.CounterResetHint = histogram.GaugeType + } + fh.Compact(0) + if ts != 0 { + return p.metricBytes.Bytes(), &ts, nil, &fh + } + // Nasty hack: Assume that ts==0 means no timestamp. That's not true in + // general, but proto3 has no distinction between unset and + // default. Need to avoid in the final format. + return p.metricBytes.Bytes(), nil, nil, &fh + } + + sh := histogram.Histogram{ + Count: h.GetSampleCount(), + Sum: h.GetSampleSum(), + ZeroThreshold: h.GetZeroThreshold(), + ZeroCount: h.GetZeroCount(), + Schema: h.GetSchema(), + PositiveSpans: make([]histogram.Span, len(h.GetPositiveSpan())), + PositiveBuckets: h.GetPositiveDelta(), + NegativeSpans: make([]histogram.Span, len(h.GetNegativeSpan())), + NegativeBuckets: h.GetNegativeDelta(), + } + for i, span := range h.GetPositiveSpan() { + sh.PositiveSpans[i].Offset = span.GetOffset() + sh.PositiveSpans[i].Length = span.GetLength() + } + for i, span := range h.GetNegativeSpan() { + sh.NegativeSpans[i].Offset = span.GetOffset() + sh.NegativeSpans[i].Length = span.GetLength() + } + if p.mf.GetType() == dto.MetricType_GAUGE_HISTOGRAM { + sh.CounterResetHint = histogram.GaugeType + } + sh.Compact(0) + if ts != 0 { + return p.metricBytes.Bytes(), &ts, &sh, nil + } + return p.metricBytes.Bytes(), nil, &sh, nil +} + +// Help returns the metric name and help text in the current entry. +// Must only be called after Next returned a help entry. +// The returned byte slices become invalid after the next call to Next. +func (p *ProtobufParser) Help() ([]byte, []byte) { + return p.metricBytes.Bytes(), []byte(p.mf.GetHelp()) +} + +// Type returns the metric name and type in the current entry. +// Must only be called after Next returned a type entry. +// The returned byte slices become invalid after the next call to Next. +func (p *ProtobufParser) Type() ([]byte, model.MetricType) { + n := p.metricBytes.Bytes() + switch p.mf.GetType() { + case dto.MetricType_COUNTER: + return n, model.MetricTypeCounter + case dto.MetricType_GAUGE: + return n, model.MetricTypeGauge + case dto.MetricType_HISTOGRAM: + return n, model.MetricTypeHistogram + case dto.MetricType_GAUGE_HISTOGRAM: + return n, model.MetricTypeGaugeHistogram + case dto.MetricType_SUMMARY: + return n, model.MetricTypeSummary + } + return n, model.MetricTypeUnknown +} + +// Unit returns the metric unit in the current entry. +// Must only be called after Next returned a unit entry. +// The returned byte slices become invalid after the next call to Next. +func (p *ProtobufParser) Unit() ([]byte, []byte) { + return p.metricBytes.Bytes(), []byte(p.mf.GetUnit()) +} + +// Comment always returns nil because comments aren't supported by the protobuf +// format. +func (p *ProtobufParser) Comment() []byte { + return nil +} + +// Metric writes the labels of the current sample into the passed labels. +// It returns the string from which the metric was parsed. +func (p *ProtobufParser) Metric(l *labels.Labels) string { + p.builder.Reset() + p.builder.Add(labels.MetricName, p.getMagicName()) + + for _, lp := range p.mf.GetMetric()[p.metricPos].GetLabel() { + p.builder.Add(lp.GetName(), lp.GetValue()) + } + if needed, name, value := p.getMagicLabel(); needed { + p.builder.Add(name, value) + } + + // Sort labels to maintain the sorted labels invariant. + p.builder.Sort() + *l = p.builder.Labels() + + return p.metricBytes.String() +} + +// Exemplar writes the exemplar of the current sample into the passed +// exemplar. It returns if an exemplar exists or not. In case of a native +// histogram, the exemplars in the native histogram will be returned. +// If this field is empty, the classic bucket section is still used for exemplars. +// To ingest all exemplars, call the Exemplar method repeatedly until it returns false. +func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { + if p.exemplarReturned && p.state == EntrySeries { + // We only ever return one exemplar per (non-native-histogram) series. + return false + } + m := p.mf.GetMetric()[p.metricPos] + var exProto *dto.Exemplar + switch p.mf.GetType() { + case dto.MetricType_COUNTER: + exProto = m.GetCounter().GetExemplar() + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: + isClassic := p.state == EntrySeries + if !isClassic && len(m.GetHistogram().GetExemplars()) > 0 { + exs := m.GetHistogram().GetExemplars() + for p.exemplarPos < len(exs) { + exProto = exs[p.exemplarPos] + p.exemplarPos++ + if exProto != nil && exProto.GetTimestamp() != nil { + break + } + } + if exProto != nil && exProto.GetTimestamp() == nil { + return false + } + } else { + bb := m.GetHistogram().GetBucket() + if p.fieldPos < 0 { + if isClassic { + return false // At _count or _sum. + } + p.fieldPos = 0 // Start at 1st bucket for native histograms. + } + for p.fieldPos < len(bb) { + exProto = bb[p.fieldPos].GetExemplar() + if isClassic { + break + } + p.fieldPos++ + // We deliberately drop exemplars with no timestamp only for native histograms. + if exProto != nil && (isClassic || exProto.GetTimestamp() != nil) { + break // Found a classic histogram exemplar or a native histogram exemplar with a timestamp. + } + } + // If the last exemplar for native histograms has no timestamp, ignore it. + if !isClassic && exProto.GetTimestamp() == nil { + return false + } + } + default: + return false + } + if exProto == nil { + return false + } + ex.Value = exProto.GetValue() + if ts := exProto.GetTimestamp(); ts != nil { + ex.HasTs = true + ex.Ts = ts.GetSeconds()*1000 + int64(ts.GetNanos()/1_000_000) + } + p.builder.Reset() + for _, lp := range exProto.GetLabel() { + p.builder.Add(lp.GetName(), lp.GetValue()) + } + p.builder.Sort() + ex.Labels = p.builder.Labels() + p.exemplarReturned = true + return true +} + +// CreatedTimestamp returns CT or nil if CT is not present or +// invalid (as timestamp e.g. negative value) on counters, summaries or histograms. +func (p *ProtobufParser) CreatedTimestamp() *int64 { + var ct *types.Timestamp + switch p.mf.GetType() { + case dto.MetricType_COUNTER: + ct = p.mf.GetMetric()[p.metricPos].GetCounter().GetCreatedTimestamp() + case dto.MetricType_SUMMARY: + ct = p.mf.GetMetric()[p.metricPos].GetSummary().GetCreatedTimestamp() + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: + ct = p.mf.GetMetric()[p.metricPos].GetHistogram().GetCreatedTimestamp() + default: + } + ctAsTime, err := types.TimestampFromProto(ct) + if err != nil { + // Errors means ct == nil or invalid timestamp, which we silently ignore. + return nil + } + ctMilis := ctAsTime.UnixMilli() + return &ctMilis +} + +// Next advances the parser to the next "sample" (emulating the behavior of a +// text format parser). It returns (EntryInvalid, io.EOF) if no samples were +// read. +func (p *ProtobufParser) Next() (Entry, error) { + p.exemplarReturned = false + switch p.state { + case EntryInvalid: + p.metricPos = 0 + p.fieldPos = -2 + n, err := readDelimited(p.in[p.inPos:], p.mf) + p.inPos += n + if err != nil { + return p.state, err + } + + // Skip empty metric families. + if len(p.mf.GetMetric()) == 0 { + return p.Next() + } + + // We are at the beginning of a metric family. Put only the name + // into metricBytes and validate only name, help, and type for now. + name := p.mf.GetName() + if !model.IsValidMetricName(model.LabelValue(name)) { + return EntryInvalid, fmt.Errorf("invalid metric name: %s", name) + } + if help := p.mf.GetHelp(); !utf8.ValidString(help) { + return EntryInvalid, fmt.Errorf("invalid help for metric %q: %s", name, help) + } + switch p.mf.GetType() { + case dto.MetricType_COUNTER, + dto.MetricType_GAUGE, + dto.MetricType_HISTOGRAM, + dto.MetricType_GAUGE_HISTOGRAM, + dto.MetricType_SUMMARY, + dto.MetricType_UNTYPED: + // All good. + default: + return EntryInvalid, fmt.Errorf("unknown metric type for metric %q: %s", name, p.mf.GetType()) + } + unit := p.mf.GetUnit() + if len(unit) > 0 { + if p.mf.GetType() == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { + if !strings.HasSuffix(name[:len(name)-6], unit) || len(name)-6 < len(unit)+1 || name[len(name)-6-len(unit)-1] != '_' { + return EntryInvalid, fmt.Errorf("unit %q not a suffix of counter %q", unit, name) + } + } else if !strings.HasSuffix(name, unit) || len(name) < len(unit)+1 || name[len(name)-len(unit)-1] != '_' { + return EntryInvalid, fmt.Errorf("unit %q not a suffix of metric %q", unit, name) + } + } + p.metricBytes.Reset() + p.metricBytes.WriteString(name) + + p.state = EntryHelp + case EntryHelp: + p.state = EntryType + case EntryType: + t := p.mf.GetType() + if (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) && + isNativeHistogram(p.mf.GetMetric()[0].GetHistogram()) { + p.state = EntryHistogram + } else { + p.state = EntrySeries + } + if err := p.updateMetricBytes(); err != nil { + return EntryInvalid, err + } + case EntryHistogram, EntrySeries: + if p.redoClassic { + p.redoClassic = false + p.state = EntrySeries + p.fieldPos = -3 + p.fieldsDone = false + } + t := p.mf.GetType() + if p.state == EntrySeries && !p.fieldsDone && + (t == dto.MetricType_SUMMARY || + t == dto.MetricType_HISTOGRAM || + t == dto.MetricType_GAUGE_HISTOGRAM) { + p.fieldPos++ + } else { + p.metricPos++ + p.fieldPos = -2 + p.fieldsDone = false + // If this is a metric family containing native + // histograms, we have to switch back to native + // histograms after parsing a classic histogram. + if p.state == EntrySeries && + (t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM) && + isNativeHistogram(p.mf.GetMetric()[0].GetHistogram()) { + p.state = EntryHistogram + } + } + if p.metricPos >= len(p.mf.GetMetric()) { + p.state = EntryInvalid + return p.Next() + } + if err := p.updateMetricBytes(); err != nil { + return EntryInvalid, err + } + default: + return EntryInvalid, fmt.Errorf("invalid protobuf parsing state: %d", p.state) + } + return p.state, nil +} + +func (p *ProtobufParser) updateMetricBytes() error { + b := p.metricBytes + b.Reset() + b.WriteString(p.getMagicName()) + for _, lp := range p.mf.GetMetric()[p.metricPos].GetLabel() { + b.WriteByte(model.SeparatorByte) + n := lp.GetName() + if !model.LabelName(n).IsValid() { + return fmt.Errorf("invalid label name: %s", n) + } + b.WriteString(n) + b.WriteByte(model.SeparatorByte) + v := lp.GetValue() + if !utf8.ValidString(v) { + return fmt.Errorf("invalid label value: %s", v) + } + b.WriteString(v) + } + if needed, n, v := p.getMagicLabel(); needed { + b.WriteByte(model.SeparatorByte) + b.WriteString(n) + b.WriteByte(model.SeparatorByte) + b.WriteString(v) + } + return nil +} + +// getMagicName usually just returns p.mf.GetType() but adds a magic suffix +// ("_count", "_sum", "_bucket") if needed according to the current parser +// state. +func (p *ProtobufParser) getMagicName() string { + t := p.mf.GetType() + if p.state == EntryHistogram || (t != dto.MetricType_HISTOGRAM && t != dto.MetricType_GAUGE_HISTOGRAM && t != dto.MetricType_SUMMARY) { + return p.mf.GetName() + } + if p.fieldPos == -2 { + return p.mf.GetName() + "_count" + } + if p.fieldPos == -1 { + return p.mf.GetName() + "_sum" + } + if t == dto.MetricType_HISTOGRAM || t == dto.MetricType_GAUGE_HISTOGRAM { + return p.mf.GetName() + "_bucket" + } + return p.mf.GetName() +} + +// getMagicLabel returns if a magic label ("quantile" or "le") is needed and, if +// so, its name and value. It also sets p.fieldsDone if applicable. +func (p *ProtobufParser) getMagicLabel() (bool, string, string) { + if p.state == EntryHistogram || p.fieldPos < 0 { + return false, "", "" + } + switch p.mf.GetType() { + case dto.MetricType_SUMMARY: + qq := p.mf.GetMetric()[p.metricPos].GetSummary().GetQuantile() + q := qq[p.fieldPos] + p.fieldsDone = p.fieldPos == len(qq)-1 + return true, model.QuantileLabel, formatOpenMetricsFloat(q.GetQuantile()) + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: + bb := p.mf.GetMetric()[p.metricPos].GetHistogram().GetBucket() + if p.fieldPos >= len(bb) { + p.fieldsDone = true + return true, model.BucketLabel, "+Inf" + } + b := bb[p.fieldPos] + p.fieldsDone = math.IsInf(b.GetUpperBound(), +1) + return true, model.BucketLabel, formatOpenMetricsFloat(b.GetUpperBound()) + } + return false, "", "" +} + +var errInvalidVarint = errors.New("protobufparse: invalid varint encountered") + +// readDelimited is essentially doing what the function of the same name in +// github.com/matttproud/golang_protobuf_extensions/pbutil is doing, but it is +// specific to a MetricFamily, utilizes the more efficient gogo-protobuf +// unmarshaling, and acts on a byte slice directly without any additional +// staging buffers. +func readDelimited(b []byte, mf *dto.MetricFamily) (n int, err error) { + if len(b) == 0 { + return 0, io.EOF + } + messageLength, varIntLength := proto.DecodeVarint(b) + if varIntLength == 0 || varIntLength > binary.MaxVarintLen32 { + return 0, errInvalidVarint + } + totalLength := varIntLength + int(messageLength) + if totalLength > len(b) { + return 0, fmt.Errorf("protobufparse: insufficient length of buffer, expected at least %d bytes, got %d bytes", totalLength, len(b)) + } + mf.Reset() + return totalLength, mf.Unmarshal(b[varIntLength:totalLength]) +} + +// formatOpenMetricsFloat works like the usual Go string formatting of a fleat +// but appends ".0" if the resulting number would otherwise contain neither a +// "." nor an "e". +func formatOpenMetricsFloat(f float64) string { + // A few common cases hardcoded. + switch { + case f == 1: + return "1.0" + case f == 0: + return "0.0" + case f == -1: + return "-1.0" + case math.IsNaN(f): + return "NaN" + case math.IsInf(f, +1): + return "+Inf" + case math.IsInf(f, -1): + return "-Inf" + } + s := fmt.Sprint(f) + if strings.ContainsAny(s, "e.") { + return s + } + return s + ".0" +} + +// isNativeHistogram returns false iff the provided histograms has no spans at +// all (neither positive nor negative) and a zero threshold of 0 and a zero +// count of 0. In principle, this could still be meant to be a native histogram +// with a zero threshold of 0 and no observations yet. In that case, +// instrumentation libraries should add a "no-op" span (e.g. length zero, offset +// zero) to signal that the histogram is meant to be parsed as a native +// histogram. Failing to do so will cause Prometheus to parse it as a classic +// histogram as long as no observations have happened. +func isNativeHistogram(h *dto.Histogram) bool { + return len(h.GetPositiveSpan()) > 0 || + len(h.GetNegativeSpan()) > 0 || + h.GetZeroThreshold() > 0 || + h.GetZeroCount() > 0 +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/timestamp/timestamp.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/timestamp/timestamp.go new file mode 100644 index 000000000000..93458f644d1b --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/timestamp/timestamp.go @@ -0,0 +1,34 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package timestamp + +import ( + "math" + "time" +) + +// FromTime returns a new millisecond timestamp from a time. +func FromTime(t time.Time) int64 { + return t.Unix()*1000 + int64(t.Nanosecond())/int64(time.Millisecond) +} + +// Time returns a new time.Time object from a millisecond timestamp. +func Time(ts int64) time.Time { + return time.Unix(ts/1000, (ts%1000)*int64(time.Millisecond)).UTC() +} + +// FromFloatSeconds returns a millisecond timestamp from float seconds. +func FromFloatSeconds(ts float64) int64 { + return int64(math.Round(ts * 1000)) +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/model/value/value.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/value/value.go new file mode 100644 index 000000000000..655ce852d51d --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/model/value/value.go @@ -0,0 +1,34 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package value + +import ( + "math" +) + +const ( + // NormalNaN is a quiet NaN. This is also math.NaN(). + NormalNaN uint64 = 0x7ff8000000000001 + + // StaleNaN is a signaling NaN, due to the MSB of the mantissa being 0. + // This value is chosen with many leading 0s, so we have scope to store more + // complicated values in the future. It is 2 rather than 1 to make + // it easier to distinguish from the NormalNaN by a human when debugging. + StaleNaN uint64 = 0x7ff0000000000002 +) + +// IsStaleNaN returns true when the provided NaN value is a stale marker. +func IsStaleNaN(v float64) bool { + return math.Float64bits(v) == StaleNaN +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/README.md b/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/README.md new file mode 100644 index 000000000000..a33d7bfb8816 --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/README.md @@ -0,0 +1,9 @@ +The compiled protobufs are version controlled and you won't normally need to +re-compile them when building Prometheus. + +If however you have modified the defs and do need to re-compile, run +`make proto` from the parent dir. + +In order for the [script](../scripts/genproto.sh) to run, you'll need `protoc` (version 3.15.8) in +your PATH. + diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/buf.lock b/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/buf.lock new file mode 100644 index 000000000000..30b0f08479be --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/buf.lock @@ -0,0 +1,10 @@ +# Generated by buf. DO NOT EDIT. +version: v1 +deps: + - remote: buf.build + owner: gogo + repository: protobuf + branch: main + commit: 4df00b267f944190a229ce3695781e99 + digest: b1-sjLgsg7CzrkOrIjBDh3s-l0aMjE6oqTj85-OsoopKAw= + create_time: 2021-08-10T00:14:28.345069Z diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/buf.yaml b/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/buf.yaml new file mode 100644 index 000000000000..0f7ec13401b7 --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/buf.yaml @@ -0,0 +1,21 @@ +version: v1 +name: buf.build/prometheus/prometheus +lint: + ignore_only: + ENUM_VALUE_PREFIX: + - remote.proto + - types.proto + - io/prometheus/client/metrics.proto + ENUM_ZERO_VALUE_SUFFIX: + - remote.proto + - types.proto + - io/prometheus/client/metrics.proto + PACKAGE_DIRECTORY_MATCH: + - remote.proto + - types.proto + PACKAGE_VERSION_SUFFIX: + - remote.proto + - types.proto + - io/prometheus/client/metrics.proto +deps: + - buf.build/gogo/protobuf diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/codec.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/codec.go new file mode 100644 index 000000000000..9a770bc8a8db --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/codec.go @@ -0,0 +1,201 @@ +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prompb + +import ( + "strings" + + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/v2/model/exemplar" + "github.com/prometheus/prometheus/v2/model/histogram" + "github.com/prometheus/prometheus/v2/model/labels" +) + +// NOTE(bwplotka): This file's code is tested in /prompb/rwcommon. + +// ToLabels return model labels.Labels from timeseries' remote labels. +func (m TimeSeries) ToLabels(b *labels.ScratchBuilder, _ []string) labels.Labels { + return labelProtosToLabels(b, m.GetLabels()) +} + +// ToLabels return model labels.Labels from timeseries' remote labels. +func (m ChunkedSeries) ToLabels(b *labels.ScratchBuilder, _ []string) labels.Labels { + return labelProtosToLabels(b, m.GetLabels()) +} + +func labelProtosToLabels(b *labels.ScratchBuilder, labelPairs []Label) labels.Labels { + b.Reset() + for _, l := range labelPairs { + b.Add(l.Name, l.Value) + } + b.Sort() + return b.Labels() +} + +// FromLabels transforms labels into prompb labels. The buffer slice +// will be used to avoid allocations if it is big enough to store the labels. +func FromLabels(lbls labels.Labels, buf []Label) []Label { + result := buf[:0] + lbls.Range(func(l labels.Label) { + result = append(result, Label{ + Name: l.Name, + Value: l.Value, + }) + }) + return result +} + +// FromMetadataType transforms a Prometheus metricType into prompb metricType. Since the former is a string we need to transform it to an enum. +func FromMetadataType(t model.MetricType) MetricMetadata_MetricType { + mt := strings.ToUpper(string(t)) + v, ok := MetricMetadata_MetricType_value[mt] + if !ok { + return MetricMetadata_UNKNOWN + } + return MetricMetadata_MetricType(v) +} + +// IsFloatHistogram returns true if the histogram is float. +func (h Histogram) IsFloatHistogram() bool { + _, ok := h.GetCount().(*Histogram_CountFloat) + return ok +} + +// ToIntHistogram returns integer Prometheus histogram from the remote implementation +// of integer histogram. If it's a float histogram, the method returns nil. +func (h Histogram) ToIntHistogram() *histogram.Histogram { + if h.IsFloatHistogram() { + return nil + } + return &histogram.Histogram{ + CounterResetHint: histogram.CounterResetHint(h.ResetHint), + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: h.GetZeroCountInt(), + Count: h.GetCountInt(), + Sum: h.Sum, + PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()), + PositiveBuckets: h.GetPositiveDeltas(), + NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), + NegativeBuckets: h.GetNegativeDeltas(), + } +} + +// ToFloatHistogram returns float Prometheus histogram from the remote implementation +// of float histogram. If the underlying implementation is an integer histogram, a +// conversion is performed. +func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram { + if h.IsFloatHistogram() { + return &histogram.FloatHistogram{ + CounterResetHint: histogram.CounterResetHint(h.ResetHint), + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: h.GetZeroCountFloat(), + Count: h.GetCountFloat(), + Sum: h.Sum, + PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()), + PositiveBuckets: h.GetPositiveCounts(), + NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), + NegativeBuckets: h.GetNegativeCounts(), + } + } + // Conversion from integer histogram. + return &histogram.FloatHistogram{ + CounterResetHint: histogram.CounterResetHint(h.ResetHint), + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: float64(h.GetZeroCountInt()), + Count: float64(h.GetCountInt()), + Sum: h.Sum, + PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()), + PositiveBuckets: deltasToCounts(h.GetPositiveDeltas()), + NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), + NegativeBuckets: deltasToCounts(h.GetNegativeDeltas()), + } +} + +func spansProtoToSpans(s []BucketSpan) []histogram.Span { + spans := make([]histogram.Span, len(s)) + for i := 0; i < len(s); i++ { + spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length} + } + + return spans +} + +func deltasToCounts(deltas []int64) []float64 { + counts := make([]float64, len(deltas)) + var cur float64 + for i, d := range deltas { + cur += float64(d) + counts[i] = cur + } + return counts +} + +// FromIntHistogram returns remote Histogram from the integer Histogram. +func FromIntHistogram(timestamp int64, h *histogram.Histogram) Histogram { + return Histogram{ + Count: &Histogram_CountInt{CountInt: h.Count}, + Sum: h.Sum, + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: &Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount}, + NegativeSpans: spansToSpansProto(h.NegativeSpans), + NegativeDeltas: h.NegativeBuckets, + PositiveSpans: spansToSpansProto(h.PositiveSpans), + PositiveDeltas: h.PositiveBuckets, + ResetHint: Histogram_ResetHint(h.CounterResetHint), + Timestamp: timestamp, + } +} + +// FromFloatHistogram returns remote Histogram from the float Histogram. +func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram { + return Histogram{ + Count: &Histogram_CountFloat{CountFloat: fh.Count}, + Sum: fh.Sum, + Schema: fh.Schema, + ZeroThreshold: fh.ZeroThreshold, + ZeroCount: &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount}, + NegativeSpans: spansToSpansProto(fh.NegativeSpans), + NegativeCounts: fh.NegativeBuckets, + PositiveSpans: spansToSpansProto(fh.PositiveSpans), + PositiveCounts: fh.PositiveBuckets, + ResetHint: Histogram_ResetHint(fh.CounterResetHint), + Timestamp: timestamp, + } +} + +func spansToSpansProto(s []histogram.Span) []BucketSpan { + spans := make([]BucketSpan, len(s)) + for i := 0; i < len(s); i++ { + spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length} + } + + return spans +} + +// ToExemplar converts remote exemplar to model exemplar. +func (m Exemplar) ToExemplar(b *labels.ScratchBuilder, _ []string) exemplar.Exemplar { + timestamp := m.Timestamp + + return exemplar.Exemplar{ + Labels: labelProtosToLabels(b, m.GetLabels()), + Value: m.Value, + Ts: timestamp, + HasTs: timestamp != 0, + } +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/custom.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/custom.go new file mode 100644 index 000000000000..f73ddd446b5b --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/custom.go @@ -0,0 +1,31 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prompb + +import ( + "sync" +) + +func (r *ChunkedReadResponse) PooledMarshal(p *sync.Pool) ([]byte, error) { + size := r.Size() + data, ok := p.Get().(*[]byte) + if ok && cap(*data) >= size { + n, err := r.MarshalToSizedBuffer((*data)[:size]) + if err != nil { + return nil, err + } + return (*data)[:n], nil + } + return r.Marshal() +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/io/prometheus/client/metrics.pb.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/io/prometheus/client/metrics.pb.go new file mode 100644 index 000000000000..17cab6081e25 --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/io/prometheus/client/metrics.pb.go @@ -0,0 +1,4300 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: io/prometheus/client/metrics.proto + +package io_prometheus_client + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + types "github.com/gogo/protobuf/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type MetricType int32 + +const ( + // COUNTER must use the Metric field "counter". + MetricType_COUNTER MetricType = 0 + // GAUGE must use the Metric field "gauge". + MetricType_GAUGE MetricType = 1 + // SUMMARY must use the Metric field "summary". + MetricType_SUMMARY MetricType = 2 + // UNTYPED must use the Metric field "untyped". + MetricType_UNTYPED MetricType = 3 + // HISTOGRAM must use the Metric field "histogram". + MetricType_HISTOGRAM MetricType = 4 + // GAUGE_HISTOGRAM must use the Metric field "histogram". + MetricType_GAUGE_HISTOGRAM MetricType = 5 +) + +var MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", + 5: "GAUGE_HISTOGRAM", +} + +var MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, + "GAUGE_HISTOGRAM": 5, +} + +func (x MetricType) String() string { + return proto.EnumName(MetricType_name, int32(x)) +} + +func (MetricType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{0} +} + +type LabelPair struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelPair) Reset() { *m = LabelPair{} } +func (m *LabelPair) String() string { return proto.CompactTextString(m) } +func (*LabelPair) ProtoMessage() {} +func (*LabelPair) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{0} +} +func (m *LabelPair) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelPair.Merge(m, src) +} +func (m *LabelPair) XXX_Size() int { + return m.Size() +} +func (m *LabelPair) XXX_DiscardUnknown() { + xxx_messageInfo_LabelPair.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelPair proto.InternalMessageInfo + +func (m *LabelPair) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *LabelPair) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +type Gauge struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Gauge) Reset() { *m = Gauge{} } +func (m *Gauge) String() string { return proto.CompactTextString(m) } +func (*Gauge) ProtoMessage() {} +func (*Gauge) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{1} +} +func (m *Gauge) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Gauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_Gauge.Merge(m, src) +} +func (m *Gauge) XXX_Size() int { + return m.Size() +} +func (m *Gauge) XXX_DiscardUnknown() { + xxx_messageInfo_Gauge.DiscardUnknown(m) +} + +var xxx_messageInfo_Gauge proto.InternalMessageInfo + +func (m *Gauge) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +type Counter struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"` + CreatedTimestamp *types.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Counter) Reset() { *m = Counter{} } +func (m *Counter) String() string { return proto.CompactTextString(m) } +func (*Counter) ProtoMessage() {} +func (*Counter) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{2} +} +func (m *Counter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Counter.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Counter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Counter.Merge(m, src) +} +func (m *Counter) XXX_Size() int { + return m.Size() +} +func (m *Counter) XXX_DiscardUnknown() { + xxx_messageInfo_Counter.DiscardUnknown(m) +} + +var xxx_messageInfo_Counter proto.InternalMessageInfo + +func (m *Counter) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Counter) GetExemplar() *Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + +func (m *Counter) GetCreatedTimestamp() *types.Timestamp { + if m != nil { + return m.CreatedTimestamp + } + return nil +} + +type Quantile struct { + Quantile float64 `protobuf:"fixed64,1,opt,name=quantile,proto3" json:"quantile,omitempty"` + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Quantile) Reset() { *m = Quantile{} } +func (m *Quantile) String() string { return proto.CompactTextString(m) } +func (*Quantile) ProtoMessage() {} +func (*Quantile) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{3} +} +func (m *Quantile) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Quantile) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quantile.Merge(m, src) +} +func (m *Quantile) XXX_Size() int { + return m.Size() +} +func (m *Quantile) XXX_DiscardUnknown() { + xxx_messageInfo_Quantile.DiscardUnknown(m) +} + +var xxx_messageInfo_Quantile proto.InternalMessageInfo + +func (m *Quantile) GetQuantile() float64 { + if m != nil { + return m.Quantile + } + return 0 +} + +func (m *Quantile) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +type Summary struct { + SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"` + SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"` + Quantile []Quantile `protobuf:"bytes,3,rep,name=quantile,proto3" json:"quantile"` + CreatedTimestamp *types.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Summary) Reset() { *m = Summary{} } +func (m *Summary) String() string { return proto.CompactTextString(m) } +func (*Summary) ProtoMessage() {} +func (*Summary) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{4} +} +func (m *Summary) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Summary.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Summary) XXX_Merge(src proto.Message) { + xxx_messageInfo_Summary.Merge(m, src) +} +func (m *Summary) XXX_Size() int { + return m.Size() +} +func (m *Summary) XXX_DiscardUnknown() { + xxx_messageInfo_Summary.DiscardUnknown(m) +} + +var xxx_messageInfo_Summary proto.InternalMessageInfo + +func (m *Summary) GetSampleCount() uint64 { + if m != nil { + return m.SampleCount + } + return 0 +} + +func (m *Summary) GetSampleSum() float64 { + if m != nil { + return m.SampleSum + } + return 0 +} + +func (m *Summary) GetQuantile() []Quantile { + if m != nil { + return m.Quantile + } + return nil +} + +func (m *Summary) GetCreatedTimestamp() *types.Timestamp { + if m != nil { + return m.CreatedTimestamp + } + return nil +} + +type Untyped struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Untyped) Reset() { *m = Untyped{} } +func (m *Untyped) String() string { return proto.CompactTextString(m) } +func (*Untyped) ProtoMessage() {} +func (*Untyped) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{5} +} +func (m *Untyped) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Untyped) XXX_Merge(src proto.Message) { + xxx_messageInfo_Untyped.Merge(m, src) +} +func (m *Untyped) XXX_Size() int { + return m.Size() +} +func (m *Untyped) XXX_DiscardUnknown() { + xxx_messageInfo_Untyped.DiscardUnknown(m) +} + +var xxx_messageInfo_Untyped proto.InternalMessageInfo + +func (m *Untyped) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +type Histogram struct { + SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"` + SampleCountFloat float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat,proto3" json:"sample_count_float,omitempty"` + SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"` + // Buckets for the classic histogram. + Bucket []Bucket `protobuf:"bytes,3,rep,name=bucket,proto3" json:"bucket"` + CreatedTimestamp *types.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"` + // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. + // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and + // then each power of two is divided into 2^n logarithmic buckets. + // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n). + // In the future, more bucket schemas may be added using numbers < -4 or > 8. + Schema int32 `protobuf:"zigzag32,5,opt,name=schema,proto3" json:"schema,omitempty"` + ZeroThreshold float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"` + ZeroCount uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount,proto3" json:"zero_count,omitempty"` + ZeroCountFloat float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat,proto3" json:"zero_count_float,omitempty"` + // Negative buckets for the native histogram. + NegativeSpan []BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan,proto3" json:"negative_span"` + // Use either "negative_delta" or "negative_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + NegativeDelta []int64 `protobuf:"zigzag64,10,rep,packed,name=negative_delta,json=negativeDelta,proto3" json:"negative_delta,omitempty"` + NegativeCount []float64 `protobuf:"fixed64,11,rep,packed,name=negative_count,json=negativeCount,proto3" json:"negative_count,omitempty"` + // Positive buckets for the native histogram. + // Use a no-op span (offset 0, length 0) for a native histogram without any + // observations yet and with a zero_threshold of 0. Otherwise, it would be + // indistinguishable from a classic histogram. + PositiveSpan []BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan,proto3" json:"positive_span"` + // Use either "positive_delta" or "positive_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + PositiveDelta []int64 `protobuf:"zigzag64,13,rep,packed,name=positive_delta,json=positiveDelta,proto3" json:"positive_delta,omitempty"` + PositiveCount []float64 `protobuf:"fixed64,14,rep,packed,name=positive_count,json=positiveCount,proto3" json:"positive_count,omitempty"` + // Only used for native histograms. These exemplars MUST have a timestamp. + Exemplars []*Exemplar `protobuf:"bytes,16,rep,name=exemplars,proto3" json:"exemplars,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{6} +} +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(m, src) +} +func (m *Histogram) XXX_Size() int { + return m.Size() +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo + +func (m *Histogram) GetSampleCount() uint64 { + if m != nil { + return m.SampleCount + } + return 0 +} + +func (m *Histogram) GetSampleCountFloat() float64 { + if m != nil { + return m.SampleCountFloat + } + return 0 +} + +func (m *Histogram) GetSampleSum() float64 { + if m != nil { + return m.SampleSum + } + return 0 +} + +func (m *Histogram) GetBucket() []Bucket { + if m != nil { + return m.Bucket + } + return nil +} + +func (m *Histogram) GetCreatedTimestamp() *types.Timestamp { + if m != nil { + return m.CreatedTimestamp + } + return nil +} + +func (m *Histogram) GetSchema() int32 { + if m != nil { + return m.Schema + } + return 0 +} + +func (m *Histogram) GetZeroThreshold() float64 { + if m != nil { + return m.ZeroThreshold + } + return 0 +} + +func (m *Histogram) GetZeroCount() uint64 { + if m != nil { + return m.ZeroCount + } + return 0 +} + +func (m *Histogram) GetZeroCountFloat() float64 { + if m != nil { + return m.ZeroCountFloat + } + return 0 +} + +func (m *Histogram) GetNegativeSpan() []BucketSpan { + if m != nil { + return m.NegativeSpan + } + return nil +} + +func (m *Histogram) GetNegativeDelta() []int64 { + if m != nil { + return m.NegativeDelta + } + return nil +} + +func (m *Histogram) GetNegativeCount() []float64 { + if m != nil { + return m.NegativeCount + } + return nil +} + +func (m *Histogram) GetPositiveSpan() []BucketSpan { + if m != nil { + return m.PositiveSpan + } + return nil +} + +func (m *Histogram) GetPositiveDelta() []int64 { + if m != nil { + return m.PositiveDelta + } + return nil +} + +func (m *Histogram) GetPositiveCount() []float64 { + if m != nil { + return m.PositiveCount + } + return nil +} + +func (m *Histogram) GetExemplars() []*Exemplar { + if m != nil { + return m.Exemplars + } + return nil +} + +type Bucket struct { + CumulativeCount uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount,proto3" json:"cumulative_count,omitempty"` + CumulativeCountFloat float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat,proto3" json:"cumulative_count_float,omitempty"` + UpperBound float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound,proto3" json:"upper_bound,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar,proto3" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Bucket) Reset() { *m = Bucket{} } +func (m *Bucket) String() string { return proto.CompactTextString(m) } +func (*Bucket) ProtoMessage() {} +func (*Bucket) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{7} +} +func (m *Bucket) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_Bucket.Merge(m, src) +} +func (m *Bucket) XXX_Size() int { + return m.Size() +} +func (m *Bucket) XXX_DiscardUnknown() { + xxx_messageInfo_Bucket.DiscardUnknown(m) +} + +var xxx_messageInfo_Bucket proto.InternalMessageInfo + +func (m *Bucket) GetCumulativeCount() uint64 { + if m != nil { + return m.CumulativeCount + } + return 0 +} + +func (m *Bucket) GetCumulativeCountFloat() float64 { + if m != nil { + return m.CumulativeCountFloat + } + return 0 +} + +func (m *Bucket) GetUpperBound() float64 { + if m != nil { + return m.UpperBound + } + return 0 +} + +func (m *Bucket) GetExemplar() *Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + +// A BucketSpan defines a number of consecutive buckets in a native +// histogram with their offset. Logically, it would be more +// straightforward to include the bucket counts in the Span. However, +// the protobuf representation is more compact in the way the data is +// structured here (with all the buckets in a single array separate +// from the Spans). +type BucketSpan struct { + Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"` + Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketSpan) Reset() { *m = BucketSpan{} } +func (m *BucketSpan) String() string { return proto.CompactTextString(m) } +func (*BucketSpan) ProtoMessage() {} +func (*BucketSpan) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{8} +} +func (m *BucketSpan) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BucketSpan) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketSpan.Merge(m, src) +} +func (m *BucketSpan) XXX_Size() int { + return m.Size() +} +func (m *BucketSpan) XXX_DiscardUnknown() { + xxx_messageInfo_BucketSpan.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketSpan proto.InternalMessageInfo + +func (m *BucketSpan) GetOffset() int32 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *BucketSpan) GetLength() uint32 { + if m != nil { + return m.Length + } + return 0 +} + +type Exemplar struct { + Label []LabelPair `protobuf:"bytes,1,rep,name=label,proto3" json:"label"` + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + Timestamp *types.Timestamp `protobuf:"bytes,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Exemplar) Reset() { *m = Exemplar{} } +func (m *Exemplar) String() string { return proto.CompactTextString(m) } +func (*Exemplar) ProtoMessage() {} +func (*Exemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{9} +} +func (m *Exemplar) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Exemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_Exemplar.Merge(m, src) +} +func (m *Exemplar) XXX_Size() int { + return m.Size() +} +func (m *Exemplar) XXX_DiscardUnknown() { + xxx_messageInfo_Exemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_Exemplar proto.InternalMessageInfo + +func (m *Exemplar) GetLabel() []LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Exemplar) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Exemplar) GetTimestamp() *types.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +type Metric struct { + Label []LabelPair `protobuf:"bytes,1,rep,name=label,proto3" json:"label"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge,proto3" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter,proto3" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary,proto3" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped,proto3" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram,proto3" json:"histogram,omitempty"` + TimestampMs int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{10} +} +func (m *Metric) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) +} +func (m *Metric) XXX_Size() int { + return m.Size() +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +func (m *Metric) GetLabel() []LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Metric) GetGauge() *Gauge { + if m != nil { + return m.Gauge + } + return nil +} + +func (m *Metric) GetCounter() *Counter { + if m != nil { + return m.Counter + } + return nil +} + +func (m *Metric) GetSummary() *Summary { + if m != nil { + return m.Summary + } + return nil +} + +func (m *Metric) GetUntyped() *Untyped { + if m != nil { + return m.Untyped + } + return nil +} + +func (m *Metric) GetHistogram() *Histogram { + if m != nil { + return m.Histogram + } + return nil +} + +func (m *Metric) GetTimestampMs() int64 { + if m != nil { + return m.TimestampMs + } + return 0 +} + +type MetricFamily struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Help string `protobuf:"bytes,2,opt,name=help,proto3" json:"help,omitempty"` + Type MetricType `protobuf:"varint,3,opt,name=type,proto3,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []Metric `protobuf:"bytes,4,rep,name=metric,proto3" json:"metric"` + Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricFamily) Reset() { *m = MetricFamily{} } +func (m *MetricFamily) String() string { return proto.CompactTextString(m) } +func (*MetricFamily) ProtoMessage() {} +func (*MetricFamily) Descriptor() ([]byte, []int) { + return fileDescriptor_d1e5ddb18987a258, []int{11} +} +func (m *MetricFamily) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MetricFamily) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricFamily.Merge(m, src) +} +func (m *MetricFamily) XXX_Size() int { + return m.Size() +} +func (m *MetricFamily) XXX_DiscardUnknown() { + xxx_messageInfo_MetricFamily.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricFamily proto.InternalMessageInfo + +func (m *MetricFamily) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MetricFamily) GetHelp() string { + if m != nil { + return m.Help + } + return "" +} + +func (m *MetricFamily) GetType() MetricType { + if m != nil { + return m.Type + } + return MetricType_COUNTER +} + +func (m *MetricFamily) GetMetric() []Metric { + if m != nil { + return m.Metric + } + return nil +} + +func (m *MetricFamily) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + +func init() { + proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) + proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") + proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") + proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") + proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") + proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") + proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") + proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") + proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") + proto.RegisterType((*BucketSpan)(nil), "io.prometheus.client.BucketSpan") + proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") + proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") + proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") +} + +func init() { + proto.RegisterFile("io/prometheus/client/metrics.proto", fileDescriptor_d1e5ddb18987a258) +} + +var fileDescriptor_d1e5ddb18987a258 = []byte{ + // 982 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x4d, 0x8f, 0xdb, 0x44, + 0x18, 0xae, 0x9b, 0x4f, 0xbf, 0xd9, 0x6c, 0xbd, 0x43, 0x54, 0x59, 0x0b, 0xbb, 0x09, 0x96, 0x90, + 0x16, 0x84, 0x12, 0x01, 0x45, 0xa0, 0xb2, 0x48, 0xec, 0xb6, 0xdb, 0x14, 0x95, 0xb4, 0x65, 0x92, + 0x1c, 0xca, 0xc5, 0x9a, 0x24, 0xb3, 0x8e, 0x85, 0xbf, 0xb0, 0xc7, 0x15, 0xcb, 0x9d, 0xdf, 0xc0, + 0x1f, 0xe0, 0x67, 0x70, 0x46, 0x3d, 0x72, 0xe2, 0x88, 0xd0, 0xfe, 0x0e, 0x0e, 0x68, 0xbe, 0xec, + 0x6c, 0xe5, 0x2c, 0x2c, 0xdc, 0x3c, 0x8f, 0x9f, 0x67, 0xe6, 0x79, 0x1f, 0xdb, 0xef, 0x6b, 0x70, + 0xfc, 0x78, 0x94, 0xa4, 0x71, 0x48, 0xd9, 0x9a, 0xe6, 0xd9, 0x68, 0x19, 0xf8, 0x34, 0x62, 0xa3, + 0x90, 0xb2, 0xd4, 0x5f, 0x66, 0xc3, 0x24, 0x8d, 0x59, 0x8c, 0x7a, 0x7e, 0x3c, 0x2c, 0x39, 0x43, + 0xc9, 0xd9, 0xef, 0x79, 0xb1, 0x17, 0x0b, 0xc2, 0x88, 0x5f, 0x49, 0xee, 0x7e, 0xdf, 0x8b, 0x63, + 0x2f, 0xa0, 0x23, 0xb1, 0x5a, 0xe4, 0xe7, 0x23, 0xe6, 0x87, 0x34, 0x63, 0x24, 0x4c, 0x24, 0xc1, + 0xf9, 0x18, 0xcc, 0xaf, 0xc8, 0x82, 0x06, 0xcf, 0x89, 0x9f, 0x22, 0x04, 0xf5, 0x88, 0x84, 0xd4, + 0x36, 0x06, 0xc6, 0x91, 0x89, 0xc5, 0x35, 0xea, 0x41, 0xe3, 0x25, 0x09, 0x72, 0x6a, 0xdf, 0x16, + 0xa0, 0x5c, 0x38, 0x07, 0xd0, 0x18, 0x93, 0xdc, 0xdb, 0xb8, 0xcd, 0x35, 0x86, 0xbe, 0xfd, 0xb3, + 0x01, 0xad, 0x07, 0x71, 0x1e, 0x31, 0x9a, 0x56, 0x33, 0xd0, 0x7d, 0x68, 0xd3, 0xef, 0x69, 0x98, + 0x04, 0x24, 0x15, 0x3b, 0x77, 0x3e, 0x3c, 0x1c, 0x56, 0xd5, 0x35, 0x3c, 0x53, 0x2c, 0x5c, 0xf0, + 0xd1, 0x18, 0xf6, 0x96, 0x29, 0x25, 0x8c, 0xae, 0xdc, 0xa2, 0x1c, 0xbb, 0x26, 0x36, 0xd9, 0x1f, + 0xca, 0x82, 0x87, 0xba, 0xe0, 0xe1, 0x4c, 0x33, 0xb0, 0xa5, 0x44, 0x05, 0xe2, 0x1c, 0x43, 0xfb, + 0xeb, 0x9c, 0x44, 0xcc, 0x0f, 0x28, 0xda, 0x87, 0xf6, 0x77, 0xea, 0x5a, 0x39, 0x2d, 0xd6, 0x57, + 0x33, 0x28, 0x8a, 0xfc, 0xdd, 0x80, 0xd6, 0x34, 0x0f, 0x43, 0x92, 0x5e, 0xa0, 0xb7, 0x61, 0x27, + 0x23, 0x61, 0x12, 0x50, 0x77, 0xc9, 0xcb, 0x16, 0x3b, 0xd4, 0x71, 0x47, 0x62, 0x22, 0x09, 0x74, + 0x00, 0xa0, 0x28, 0x59, 0x1e, 0xaa, 0x9d, 0x4c, 0x89, 0x4c, 0xf3, 0x10, 0x7d, 0xb1, 0x71, 0x7e, + 0x6d, 0x50, 0xdb, 0x1e, 0x88, 0x76, 0x7c, 0x5a, 0x7f, 0xf5, 0x47, 0xff, 0xd6, 0x86, 0xcb, 0xca, + 0x58, 0xea, 0xff, 0x21, 0x96, 0x3e, 0xb4, 0xe6, 0x11, 0xbb, 0x48, 0xe8, 0x6a, 0xcb, 0xe3, 0xfd, + 0xab, 0x01, 0xe6, 0x63, 0x3f, 0x63, 0xb1, 0x97, 0x92, 0xf0, 0xdf, 0xd4, 0xfe, 0x3e, 0xa0, 0x4d, + 0x8a, 0x7b, 0x1e, 0xc4, 0x84, 0x09, 0x6f, 0x06, 0xb6, 0x36, 0x88, 0x8f, 0x38, 0xfe, 0x4f, 0x49, + 0xdd, 0x87, 0xe6, 0x22, 0x5f, 0x7e, 0x4b, 0x99, 0xca, 0xe9, 0xad, 0xea, 0x9c, 0x4e, 0x05, 0x47, + 0xa5, 0xa4, 0x14, 0xd5, 0x19, 0xdd, 0xb9, 0x79, 0x46, 0xe8, 0x2e, 0x34, 0xb3, 0xe5, 0x9a, 0x86, + 0xc4, 0x6e, 0x0c, 0x8c, 0xa3, 0x3d, 0xac, 0x56, 0xe8, 0x1d, 0xd8, 0xfd, 0x81, 0xa6, 0xb1, 0xcb, + 0xd6, 0x29, 0xcd, 0xd6, 0x71, 0xb0, 0xb2, 0x9b, 0xc2, 0x7f, 0x97, 0xa3, 0x33, 0x0d, 0xf2, 0x12, + 0x05, 0x4d, 0x26, 0xd6, 0x12, 0x89, 0x99, 0x1c, 0x91, 0x79, 0x1d, 0x81, 0x55, 0xde, 0x56, 0x69, + 0xb5, 0xc5, 0x3e, 0xbb, 0x05, 0x49, 0x66, 0xf5, 0x04, 0xba, 0x11, 0xf5, 0x08, 0xf3, 0x5f, 0x52, + 0x37, 0x4b, 0x48, 0x64, 0x9b, 0x22, 0x93, 0xc1, 0x75, 0x99, 0x4c, 0x13, 0x12, 0xa9, 0x5c, 0x76, + 0xb4, 0x98, 0x63, 0xdc, 0x7c, 0xb1, 0xd9, 0x8a, 0x06, 0x8c, 0xd8, 0x30, 0xa8, 0x1d, 0x21, 0x5c, + 0x1c, 0xf1, 0x90, 0x83, 0x57, 0x68, 0xb2, 0x80, 0xce, 0xa0, 0xc6, 0x6b, 0xd4, 0xa8, 0x2c, 0xe2, + 0x09, 0x74, 0x93, 0x38, 0xf3, 0x4b, 0x6b, 0x3b, 0x37, 0xb3, 0xa6, 0xc5, 0xda, 0x5a, 0xb1, 0x99, + 0xb4, 0xd6, 0x95, 0xd6, 0x34, 0x5a, 0x58, 0x2b, 0x68, 0xd2, 0xda, 0xae, 0xb4, 0xa6, 0x51, 0x69, + 0xed, 0x18, 0x4c, 0xdd, 0x4d, 0x32, 0xdb, 0xba, 0xee, 0x6b, 0x2b, 0xda, 0x4f, 0x29, 0x70, 0x7e, + 0x35, 0xa0, 0x29, 0xed, 0xa2, 0x77, 0xc1, 0x5a, 0xe6, 0x61, 0x1e, 0x6c, 0x86, 0x21, 0xdf, 0xff, + 0x3b, 0x25, 0x2e, 0xcf, 0xbc, 0x07, 0x77, 0x5f, 0xa7, 0x5e, 0xf9, 0x0e, 0x7a, 0xaf, 0x09, 0xe4, + 0xf3, 0xed, 0x43, 0x27, 0x4f, 0x12, 0x9a, 0xba, 0x8b, 0x38, 0x8f, 0x56, 0xea, 0x63, 0x00, 0x01, + 0x9d, 0x72, 0xe4, 0x4a, 0x23, 0xad, 0xdd, 0xac, 0x91, 0x3a, 0xc7, 0x00, 0x65, 0xec, 0xfc, 0x95, + 0x8e, 0xcf, 0xcf, 0x33, 0x2a, 0x2b, 0xd8, 0xc3, 0x6a, 0xc5, 0xf1, 0x80, 0x46, 0x1e, 0x5b, 0x8b, + 0xd3, 0xbb, 0x58, 0xad, 0x9c, 0x9f, 0x0c, 0x68, 0xeb, 0x4d, 0xd1, 0x67, 0xd0, 0x08, 0xf8, 0x1c, + 0xb1, 0x0d, 0x91, 0x66, 0xbf, 0xda, 0x43, 0x31, 0x6a, 0xd4, 0x33, 0x96, 0x9a, 0xea, 0xfe, 0x8a, + 0x3e, 0x05, 0xf3, 0x26, 0xed, 0xbd, 0x24, 0x3b, 0x3f, 0xd6, 0xa0, 0x39, 0x11, 0x33, 0xf3, 0xff, + 0xf9, 0xfa, 0x00, 0x1a, 0x1e, 0x9f, 0x72, 0x6a, 0x42, 0xbd, 0x59, 0x2d, 0x16, 0x83, 0x10, 0x4b, + 0x26, 0xfa, 0x04, 0x5a, 0x4b, 0x39, 0xf8, 0x94, 0xe5, 0x83, 0x6a, 0x91, 0x9a, 0x8e, 0x58, 0xb3, + 0xb9, 0x30, 0x93, 0xc3, 0x44, 0xf5, 0xec, 0x2d, 0x42, 0x35, 0x71, 0xb0, 0x66, 0x73, 0x61, 0x2e, + 0xbb, 0xb5, 0x68, 0x45, 0x5b, 0x85, 0xaa, 0xa5, 0x63, 0xcd, 0x46, 0x9f, 0x83, 0xb9, 0xd6, 0x4d, + 0x5c, 0xb4, 0xa0, 0xad, 0xf1, 0x14, 0xbd, 0x1e, 0x97, 0x0a, 0xde, 0xf6, 0x8b, 0xc4, 0xdd, 0x30, + 0x13, 0x7d, 0xae, 0x86, 0x3b, 0x05, 0x36, 0xc9, 0x9c, 0x5f, 0x0c, 0xd8, 0x91, 0xcf, 0xe1, 0x11, + 0x09, 0xfd, 0xe0, 0xa2, 0xf2, 0x07, 0x03, 0x41, 0x7d, 0x4d, 0x83, 0x44, 0xfd, 0x5f, 0x88, 0x6b, + 0x74, 0x0f, 0xea, 0xdc, 0xa3, 0x88, 0x70, 0x77, 0x5b, 0xc7, 0x90, 0x3b, 0xcf, 0x2e, 0x12, 0x8a, + 0x05, 0x9b, 0x0f, 0x06, 0xf9, 0xa7, 0x64, 0xd7, 0xaf, 0x1b, 0x0c, 0x52, 0xa7, 0x07, 0x83, 0x54, + 0x70, 0x17, 0x79, 0xe4, 0x33, 0x11, 0xa1, 0x89, 0xc5, 0xf5, 0x7b, 0x0b, 0x80, 0xf2, 0x0c, 0xd4, + 0x81, 0xd6, 0x83, 0x67, 0xf3, 0xa7, 0xb3, 0x33, 0x6c, 0xdd, 0x42, 0x26, 0x34, 0xc6, 0x27, 0xf3, + 0xf1, 0x99, 0x65, 0x70, 0x7c, 0x3a, 0x9f, 0x4c, 0x4e, 0xf0, 0x0b, 0xeb, 0x36, 0x5f, 0xcc, 0x9f, + 0xce, 0x5e, 0x3c, 0x3f, 0x7b, 0x68, 0xd5, 0x50, 0x17, 0xcc, 0xc7, 0x5f, 0x4e, 0x67, 0xcf, 0xc6, + 0xf8, 0x64, 0x62, 0xd5, 0xd1, 0x1b, 0x70, 0x47, 0x68, 0xdc, 0x12, 0x6c, 0x9c, 0x3a, 0xaf, 0x2e, + 0x0f, 0x8d, 0xdf, 0x2e, 0x0f, 0x8d, 0x3f, 0x2f, 0x0f, 0x8d, 0x6f, 0x7a, 0x7e, 0xec, 0x96, 0x86, + 0x5d, 0x69, 0x78, 0xd1, 0x14, 0x6f, 0xfb, 0x47, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x1c, 0xe1, + 0xcf, 0xb8, 0x1d, 0x0a, 0x00, 0x00, +} + +func (m *LabelPair) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelPair) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Gauge) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Gauge) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Gauge) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Counter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Counter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Counter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.CreatedTimestamp != nil { + { + size, err := m.CreatedTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Exemplar != nil { + { + size, err := m.Exemplar.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Quantile) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Quantile) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Quantile) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x11 + } + if m.Quantile != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Quantile)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Summary) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Summary) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Summary) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.CreatedTimestamp != nil { + { + size, err := m.CreatedTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Quantile) > 0 { + for iNdEx := len(m.Quantile) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Quantile[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.SampleSum != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.SampleSum)))) + i-- + dAtA[i] = 0x11 + } + if m.SampleCount != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.SampleCount)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Untyped) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Untyped) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Untyped) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Histogram) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Histogram) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + } + if m.CreatedTimestamp != nil { + { + size, err := m.CreatedTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + if len(m.PositiveCount) > 0 { + for iNdEx := len(m.PositiveCount) - 1; iNdEx >= 0; iNdEx-- { + f5 := math.Float64bits(float64(m.PositiveCount[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f5)) + } + i = encodeVarintMetrics(dAtA, i, uint64(len(m.PositiveCount)*8)) + i-- + dAtA[i] = 0x72 + } + if len(m.PositiveDelta) > 0 { + var j6 int + dAtA8 := make([]byte, len(m.PositiveDelta)*10) + for _, num := range m.PositiveDelta { + x7 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x7 >= 1<<7 { + dAtA8[j6] = uint8(uint64(x7)&0x7f | 0x80) + j6++ + x7 >>= 7 + } + dAtA8[j6] = uint8(x7) + j6++ + } + i -= j6 + copy(dAtA[i:], dAtA8[:j6]) + i = encodeVarintMetrics(dAtA, i, uint64(j6)) + i-- + dAtA[i] = 0x6a + } + if len(m.PositiveSpan) > 0 { + for iNdEx := len(m.PositiveSpan) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PositiveSpan[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } + if len(m.NegativeCount) > 0 { + for iNdEx := len(m.NegativeCount) - 1; iNdEx >= 0; iNdEx-- { + f9 := math.Float64bits(float64(m.NegativeCount[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f9)) + } + i = encodeVarintMetrics(dAtA, i, uint64(len(m.NegativeCount)*8)) + i-- + dAtA[i] = 0x5a + } + if len(m.NegativeDelta) > 0 { + var j10 int + dAtA12 := make([]byte, len(m.NegativeDelta)*10) + for _, num := range m.NegativeDelta { + x11 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x11 >= 1<<7 { + dAtA12[j10] = uint8(uint64(x11)&0x7f | 0x80) + j10++ + x11 >>= 7 + } + dAtA12[j10] = uint8(x11) + j10++ + } + i -= j10 + copy(dAtA[i:], dAtA12[:j10]) + i = encodeVarintMetrics(dAtA, i, uint64(j10)) + i-- + dAtA[i] = 0x52 + } + if len(m.NegativeSpan) > 0 { + for iNdEx := len(m.NegativeSpan) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NegativeSpan[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if m.ZeroCountFloat != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroCountFloat)))) + i-- + dAtA[i] = 0x41 + } + if m.ZeroCount != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.ZeroCount)) + i-- + dAtA[i] = 0x38 + } + if m.ZeroThreshold != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold)))) + i-- + dAtA[i] = 0x31 + } + if m.Schema != 0 { + i = encodeVarintMetrics(dAtA, i, uint64((uint32(m.Schema)<<1)^uint32((m.Schema>>31)))) + i-- + dAtA[i] = 0x28 + } + if m.SampleCountFloat != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.SampleCountFloat)))) + i-- + dAtA[i] = 0x21 + } + if len(m.Bucket) > 0 { + for iNdEx := len(m.Bucket) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Bucket[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.SampleSum != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.SampleSum)))) + i-- + dAtA[i] = 0x11 + } + if m.SampleCount != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.SampleCount)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Bucket) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bucket) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Bucket) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.CumulativeCountFloat != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.CumulativeCountFloat)))) + i-- + dAtA[i] = 0x21 + } + if m.Exemplar != nil { + { + size, err := m.Exemplar.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.UpperBound != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.UpperBound)))) + i-- + dAtA[i] = 0x11 + } + if m.CumulativeCount != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.CumulativeCount)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BucketSpan) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BucketSpan) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BucketSpan) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Length != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Length)) + i-- + dAtA[i] = 0x10 + } + if m.Offset != 0 { + i = encodeVarintMetrics(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31)))) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Exemplar) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Exemplar) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Timestamp != nil { + { + size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x11 + } + if len(m.Label) > 0 { + for iNdEx := len(m.Label) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Label[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Metric) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metric) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metric) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Histogram != nil { + { + size, err := m.Histogram.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.TimestampMs != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.TimestampMs)) + i-- + dAtA[i] = 0x30 + } + if m.Untyped != nil { + { + size, err := m.Untyped.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Summary != nil { + { + size, err := m.Summary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Counter != nil { + { + size, err := m.Counter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Gauge != nil { + { + size, err := m.Gauge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Label) > 0 { + for iNdEx := len(m.Label) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Label[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *MetricFamily) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricFamily) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricFamily) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Unit) > 0 { + i -= len(m.Unit) + copy(dAtA[i:], m.Unit) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Unit))) + i-- + dAtA[i] = 0x2a + } + if len(m.Metric) > 0 { + for iNdEx := len(m.Metric) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metric[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.Type != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x18 + } + if len(m.Help) > 0 { + i -= len(m.Help) + copy(dAtA[i:], m.Help) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Help))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { + offset -= sovMetrics(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *LabelPair) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Gauge) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Counter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.Exemplar != nil { + l = m.Exemplar.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.CreatedTimestamp != nil { + l = m.CreatedTimestamp.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Quantile) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Quantile != 0 { + n += 9 + } + if m.Value != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Summary) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SampleCount != 0 { + n += 1 + sovMetrics(uint64(m.SampleCount)) + } + if m.SampleSum != 0 { + n += 9 + } + if len(m.Quantile) > 0 { + for _, e := range m.Quantile { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.CreatedTimestamp != nil { + l = m.CreatedTimestamp.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Untyped) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Histogram) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SampleCount != 0 { + n += 1 + sovMetrics(uint64(m.SampleCount)) + } + if m.SampleSum != 0 { + n += 9 + } + if len(m.Bucket) > 0 { + for _, e := range m.Bucket { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.SampleCountFloat != 0 { + n += 9 + } + if m.Schema != 0 { + n += 1 + sozMetrics(uint64(m.Schema)) + } + if m.ZeroThreshold != 0 { + n += 9 + } + if m.ZeroCount != 0 { + n += 1 + sovMetrics(uint64(m.ZeroCount)) + } + if m.ZeroCountFloat != 0 { + n += 9 + } + if len(m.NegativeSpan) > 0 { + for _, e := range m.NegativeSpan { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.NegativeDelta) > 0 { + l = 0 + for _, e := range m.NegativeDelta { + l += sozMetrics(uint64(e)) + } + n += 1 + sovMetrics(uint64(l)) + l + } + if len(m.NegativeCount) > 0 { + n += 1 + sovMetrics(uint64(len(m.NegativeCount)*8)) + len(m.NegativeCount)*8 + } + if len(m.PositiveSpan) > 0 { + for _, e := range m.PositiveSpan { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.PositiveDelta) > 0 { + l = 0 + for _, e := range m.PositiveDelta { + l += sozMetrics(uint64(e)) + } + n += 1 + sovMetrics(uint64(l)) + l + } + if len(m.PositiveCount) > 0 { + n += 1 + sovMetrics(uint64(len(m.PositiveCount)*8)) + len(m.PositiveCount)*8 + } + if m.CreatedTimestamp != nil { + l = m.CreatedTimestamp.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if len(m.Exemplars) > 0 { + for _, e := range m.Exemplars { + l = e.Size() + n += 2 + l + sovMetrics(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Bucket) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CumulativeCount != 0 { + n += 1 + sovMetrics(uint64(m.CumulativeCount)) + } + if m.UpperBound != 0 { + n += 9 + } + if m.Exemplar != nil { + l = m.Exemplar.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.CumulativeCountFloat != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BucketSpan) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Offset != 0 { + n += 1 + sozMetrics(uint64(m.Offset)) + } + if m.Length != 0 { + n += 1 + sovMetrics(uint64(m.Length)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Exemplar) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Label) > 0 { + for _, e := range m.Label { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.Value != 0 { + n += 9 + } + if m.Timestamp != nil { + l = m.Timestamp.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Metric) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Label) > 0 { + for _, e := range m.Label { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.Gauge != nil { + l = m.Gauge.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Counter != nil { + l = m.Counter.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Summary != nil { + l = m.Summary.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Untyped != nil { + l = m.Untyped.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.TimestampMs != 0 { + n += 1 + sovMetrics(uint64(m.TimestampMs)) + } + if m.Histogram != nil { + l = m.Histogram.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *MetricFamily) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + l = len(m.Help) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Type != 0 { + n += 1 + sovMetrics(uint64(m.Type)) + } + if len(m.Metric) > 0 { + for _, e := range m.Metric { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + l = len(m.Unit) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovMetrics(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMetrics(x uint64) (n int) { + return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *LabelPair) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Gauge) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Gauge: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Gauge: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Counter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Counter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Counter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplar", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Exemplar == nil { + m.Exemplar = &Exemplar{} + } + if err := m.Exemplar.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CreatedTimestamp == nil { + m.CreatedTimestamp = &types.Timestamp{} + } + if err := m.CreatedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Quantile) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Quantile: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Quantile: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Quantile = float64(math.Float64frombits(v)) + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Summary) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Summary: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Summary: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SampleCount", wireType) + } + m.SampleCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SampleCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field SampleSum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.SampleSum = float64(math.Float64frombits(v)) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Quantile = append(m.Quantile, Quantile{}) + if err := m.Quantile[len(m.Quantile)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CreatedTimestamp == nil { + m.CreatedTimestamp = &types.Timestamp{} + } + if err := m.CreatedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Untyped) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Untyped: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Untyped: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Histogram) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Histogram: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SampleCount", wireType) + } + m.SampleCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SampleCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field SampleSum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.SampleSum = float64(math.Float64frombits(v)) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bucket", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Bucket = append(m.Bucket, Bucket{}) + if err := m.Bucket[len(m.Bucket)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field SampleCountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.SampleCountFloat = float64(math.Float64frombits(v)) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Schema = v + case 6: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroThreshold = float64(math.Float64frombits(v)) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCount", wireType) + } + m.ZeroCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ZeroCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroCountFloat = float64(math.Float64frombits(v)) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeSpan", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NegativeSpan = append(m.NegativeSpan, BucketSpan{}) + if err := m.NegativeSpan[len(m.NegativeSpan)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDelta = append(m.NegativeDelta, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.NegativeDelta) == 0 { + m.NegativeDelta = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDelta = append(m.NegativeDelta, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeDelta", wireType) + } + case 11: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCount = append(m.NegativeCount, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.NegativeCount) == 0 { + m.NegativeCount = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCount = append(m.NegativeCount, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeCount", wireType) + } + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveSpan", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PositiveSpan = append(m.PositiveSpan, BucketSpan{}) + if err := m.PositiveSpan[len(m.PositiveSpan)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDelta = append(m.PositiveDelta, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.PositiveDelta) == 0 { + m.PositiveDelta = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDelta = append(m.PositiveDelta, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveDelta", wireType) + } + case 14: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCount = append(m.PositiveCount, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.PositiveCount) == 0 { + m.PositiveCount = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCount = append(m.PositiveCount, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveCount", wireType) + } + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CreatedTimestamp == nil { + m.CreatedTimestamp = &types.Timestamp{} + } + if err := m.CreatedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exemplars = append(m.Exemplars, &Exemplar{}) + if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Bucket) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Bucket: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Bucket: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CumulativeCount", wireType) + } + m.CumulativeCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CumulativeCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field UpperBound", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.UpperBound = float64(math.Float64frombits(v)) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplar", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Exemplar == nil { + m.Exemplar = &Exemplar{} + } + if err := m.Exemplar.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field CumulativeCountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.CumulativeCountFloat = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BucketSpan) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BucketSpan: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BucketSpan: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Offset = v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + m.Length = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Length |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Exemplar) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Exemplar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Label = append(m.Label, LabelPair{}) + if err := m.Label[len(m.Label)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Timestamp == nil { + m.Timestamp = &types.Timestamp{} + } + if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metric) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metric: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Label", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Label = append(m.Label, LabelPair{}) + if err := m.Label[len(m.Label)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Gauge == nil { + m.Gauge = &Gauge{} + } + if err := m.Gauge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Counter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Counter == nil { + m.Counter = &Counter{} + } + if err := m.Counter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Summary == nil { + m.Summary = &Summary{} + } + if err := m.Summary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Untyped", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Untyped == nil { + m.Untyped = &Untyped{} + } + if err := m.Untyped.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimestampMs", wireType) + } + m.TimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Histogram == nil { + m.Histogram = &Histogram{} + } + if err := m.Histogram.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricFamily) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricFamily: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricFamily: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Help", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Help = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= MetricType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metric = append(m.Metric, Metric{}) + if err := m.Metric[len(m.Metric)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Unit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMetrics(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMetrics + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMetrics + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMetrics + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group") +) diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/io/prometheus/client/metrics.proto b/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/io/prometheus/client/metrics.proto new file mode 100644 index 000000000000..fe55638bb788 --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/io/prometheus/client/metrics.proto @@ -0,0 +1,160 @@ +// Copyright 2013 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This is copied and lightly edited from +// github.com/prometheus/client_model/io/prometheus/client/metrics.proto +// and finally converted to proto3 syntax to make it usable for the +// gogo-protobuf approach taken within prometheus/prometheus. + +syntax = "proto3"; + +package io.prometheus.client; +option go_package = "io_prometheus_client"; + +import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; + +message LabelPair { + string name = 1; + string value = 2; +} + +enum MetricType { + // COUNTER must use the Metric field "counter". + COUNTER = 0; + // GAUGE must use the Metric field "gauge". + GAUGE = 1; + // SUMMARY must use the Metric field "summary". + SUMMARY = 2; + // UNTYPED must use the Metric field "untyped". + UNTYPED = 3; + // HISTOGRAM must use the Metric field "histogram". + HISTOGRAM = 4; + // GAUGE_HISTOGRAM must use the Metric field "histogram". + GAUGE_HISTOGRAM = 5; +} + +message Gauge { + double value = 1; +} + +message Counter { + double value = 1; + Exemplar exemplar = 2; + + google.protobuf.Timestamp created_timestamp = 3; +} + +message Quantile { + double quantile = 1; + double value = 2; +} + +message Summary { + uint64 sample_count = 1; + double sample_sum = 2; + repeated Quantile quantile = 3 [(gogoproto.nullable) = false]; + + google.protobuf.Timestamp created_timestamp = 4; +} + +message Untyped { + double value = 1; +} + +message Histogram { + uint64 sample_count = 1; + double sample_count_float = 4; // Overrides sample_count if > 0. + double sample_sum = 2; + // Buckets for the classic histogram. + repeated Bucket bucket = 3 [(gogoproto.nullable) = false]; // Ordered in increasing order of upper_bound, +Inf bucket is optional. + + google.protobuf.Timestamp created_timestamp = 15; + + // Everything below here is for native histograms (also known as sparse histograms). + // Native histograms are an experimental feature without stability guarantees. + + // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. + // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and + // then each power of two is divided into 2^n logarithmic buckets. + // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n). + // In the future, more bucket schemas may be added using numbers < -4 or > 8. + sint32 schema = 5; + double zero_threshold = 6; // Breadth of the zero bucket. + uint64 zero_count = 7; // Count in zero bucket. + double zero_count_float = 8; // Overrides sb_zero_count if > 0. + + // Negative buckets for the native histogram. + repeated BucketSpan negative_span = 9 [(gogoproto.nullable) = false]; + // Use either "negative_delta" or "negative_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + repeated sint64 negative_delta = 10; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double negative_count = 11; // Absolute count of each bucket. + + // Positive buckets for the native histogram. + // Use a no-op span (offset 0, length 0) for a native histogram without any + // observations yet and with a zero_threshold of 0. Otherwise, it would be + // indistinguishable from a classic histogram. + repeated BucketSpan positive_span = 12 [(gogoproto.nullable) = false]; + // Use either "positive_delta" or "positive_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + repeated sint64 positive_delta = 13; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double positive_count = 14; // Absolute count of each bucket. + + // Only used for native histograms. These exemplars MUST have a timestamp. + repeated Exemplar exemplars = 16; +} + +message Bucket { + uint64 cumulative_count = 1; // Cumulative in increasing order. + double cumulative_count_float = 4; // Overrides cumulative_count if > 0. + double upper_bound = 2; // Inclusive. + Exemplar exemplar = 3; +} + +// A BucketSpan defines a number of consecutive buckets in a native +// histogram with their offset. Logically, it would be more +// straightforward to include the bucket counts in the Span. However, +// the protobuf representation is more compact in the way the data is +// structured here (with all the buckets in a single array separate +// from the Spans). +message BucketSpan { + sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative). + uint32 length = 2; // Length of consecutive buckets. +} + +message Exemplar { + repeated LabelPair label = 1 [(gogoproto.nullable) = false]; + double value = 2; + google.protobuf.Timestamp timestamp = 3; // OpenMetrics-style. +} + +message Metric { + repeated LabelPair label = 1 [(gogoproto.nullable) = false]; + Gauge gauge = 2; + Counter counter = 3; + Summary summary = 4; + Untyped untyped = 5; + Histogram histogram = 7; + int64 timestamp_ms = 6; +} + +message MetricFamily { + string name = 1; + string help = 2; + MetricType type = 3; + repeated Metric metric = 4 [(gogoproto.nullable) = false]; + string unit = 5; +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/remote.pb.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/remote.pb.go new file mode 100644 index 000000000000..19318878d757 --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/remote.pb.go @@ -0,0 +1,1702 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: remote.proto + +package prompb + +import ( + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type ReadRequest_ResponseType int32 + +const ( + // Server will return a single ReadResponse message with matched series that includes list of raw samples. + // It's recommended to use streamed response types instead. + // + // Response headers: + // Content-Type: "application/x-protobuf" + // Content-Encoding: "snappy" + ReadRequest_SAMPLES ReadRequest_ResponseType = 0 + // Server will stream a delimited ChunkedReadResponse message that + // contains XOR or HISTOGRAM(!) encoded chunks for a single series. + // Each message is following varint size and fixed size bigendian + // uint32 for CRC32 Castagnoli checksum. + // + // Response headers: + // Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse" + // Content-Encoding: "" + ReadRequest_STREAMED_XOR_CHUNKS ReadRequest_ResponseType = 1 +) + +var ReadRequest_ResponseType_name = map[int32]string{ + 0: "SAMPLES", + 1: "STREAMED_XOR_CHUNKS", +} + +var ReadRequest_ResponseType_value = map[string]int32{ + "SAMPLES": 0, + "STREAMED_XOR_CHUNKS": 1, +} + +func (x ReadRequest_ResponseType) String() string { + return proto.EnumName(ReadRequest_ResponseType_name, int32(x)) +} + +func (ReadRequest_ResponseType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_eefc82927d57d89b, []int{1, 0} +} + +type WriteRequest struct { + Timeseries []TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"` + Metadata []MetricMetadata `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WriteRequest) Reset() { *m = WriteRequest{} } +func (m *WriteRequest) String() string { return proto.CompactTextString(m) } +func (*WriteRequest) ProtoMessage() {} +func (*WriteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_eefc82927d57d89b, []int{0} +} +func (m *WriteRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WriteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WriteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WriteRequest.Merge(m, src) +} +func (m *WriteRequest) XXX_Size() int { + return m.Size() +} +func (m *WriteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WriteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WriteRequest proto.InternalMessageInfo + +func (m *WriteRequest) GetTimeseries() []TimeSeries { + if m != nil { + return m.Timeseries + } + return nil +} + +func (m *WriteRequest) GetMetadata() []MetricMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +// ReadRequest represents a remote read request. +type ReadRequest struct { + Queries []*Query `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` + // accepted_response_types allows negotiating the content type of the response. + // + // Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is + // implemented by server, error is returned. + // For request that do not contain `accepted_response_types` field the SAMPLES response type will be used. + AcceptedResponseTypes []ReadRequest_ResponseType `protobuf:"varint,2,rep,packed,name=accepted_response_types,json=acceptedResponseTypes,proto3,enum=prometheus.ReadRequest_ResponseType" json:"accepted_response_types,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadRequest) Reset() { *m = ReadRequest{} } +func (m *ReadRequest) String() string { return proto.CompactTextString(m) } +func (*ReadRequest) ProtoMessage() {} +func (*ReadRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_eefc82927d57d89b, []int{1} +} +func (m *ReadRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReadRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ReadRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadRequest.Merge(m, src) +} +func (m *ReadRequest) XXX_Size() int { + return m.Size() +} +func (m *ReadRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReadRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadRequest proto.InternalMessageInfo + +func (m *ReadRequest) GetQueries() []*Query { + if m != nil { + return m.Queries + } + return nil +} + +func (m *ReadRequest) GetAcceptedResponseTypes() []ReadRequest_ResponseType { + if m != nil { + return m.AcceptedResponseTypes + } + return nil +} + +// ReadResponse is a response when response_type equals SAMPLES. +type ReadResponse struct { + // In same order as the request's queries. + Results []*QueryResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadResponse) Reset() { *m = ReadResponse{} } +func (m *ReadResponse) String() string { return proto.CompactTextString(m) } +func (*ReadResponse) ProtoMessage() {} +func (*ReadResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_eefc82927d57d89b, []int{2} +} +func (m *ReadResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReadResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ReadResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResponse.Merge(m, src) +} +func (m *ReadResponse) XXX_Size() int { + return m.Size() +} +func (m *ReadResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReadResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadResponse proto.InternalMessageInfo + +func (m *ReadResponse) GetResults() []*QueryResult { + if m != nil { + return m.Results + } + return nil +} + +type Query struct { + StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` + EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` + Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers,proto3" json:"matchers,omitempty"` + Hints *ReadHints `protobuf:"bytes,4,opt,name=hints,proto3" json:"hints,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Query) Reset() { *m = Query{} } +func (m *Query) String() string { return proto.CompactTextString(m) } +func (*Query) ProtoMessage() {} +func (*Query) Descriptor() ([]byte, []int) { + return fileDescriptor_eefc82927d57d89b, []int{3} +} +func (m *Query) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Query) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Query.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Query) XXX_Merge(src proto.Message) { + xxx_messageInfo_Query.Merge(m, src) +} +func (m *Query) XXX_Size() int { + return m.Size() +} +func (m *Query) XXX_DiscardUnknown() { + xxx_messageInfo_Query.DiscardUnknown(m) +} + +var xxx_messageInfo_Query proto.InternalMessageInfo + +func (m *Query) GetStartTimestampMs() int64 { + if m != nil { + return m.StartTimestampMs + } + return 0 +} + +func (m *Query) GetEndTimestampMs() int64 { + if m != nil { + return m.EndTimestampMs + } + return 0 +} + +func (m *Query) GetMatchers() []*LabelMatcher { + if m != nil { + return m.Matchers + } + return nil +} + +func (m *Query) GetHints() *ReadHints { + if m != nil { + return m.Hints + } + return nil +} + +type QueryResult struct { + // Samples within a time series must be ordered by time. + Timeseries []*TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QueryResult) Reset() { *m = QueryResult{} } +func (m *QueryResult) String() string { return proto.CompactTextString(m) } +func (*QueryResult) ProtoMessage() {} +func (*QueryResult) Descriptor() ([]byte, []int) { + return fileDescriptor_eefc82927d57d89b, []int{4} +} +func (m *QueryResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryResult.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryResult.Merge(m, src) +} +func (m *QueryResult) XXX_Size() int { + return m.Size() +} +func (m *QueryResult) XXX_DiscardUnknown() { + xxx_messageInfo_QueryResult.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryResult proto.InternalMessageInfo + +func (m *QueryResult) GetTimeseries() []*TimeSeries { + if m != nil { + return m.Timeseries + } + return nil +} + +// ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS. +// We strictly stream full series after series, optionally split by time. This means that a single frame can contain +// partition of the single series, but once a new series is started to be streamed it means that no more chunks will +// be sent for previous one. Series are returned sorted in the same way TSDB block are internally. +type ChunkedReadResponse struct { + ChunkedSeries []*ChunkedSeries `protobuf:"bytes,1,rep,name=chunked_series,json=chunkedSeries,proto3" json:"chunked_series,omitempty"` + // query_index represents an index of the query from ReadRequest.queries these chunks relates to. + QueryIndex int64 `protobuf:"varint,2,opt,name=query_index,json=queryIndex,proto3" json:"query_index,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ChunkedReadResponse) Reset() { *m = ChunkedReadResponse{} } +func (m *ChunkedReadResponse) String() string { return proto.CompactTextString(m) } +func (*ChunkedReadResponse) ProtoMessage() {} +func (*ChunkedReadResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_eefc82927d57d89b, []int{5} +} +func (m *ChunkedReadResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ChunkedReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ChunkedReadResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ChunkedReadResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChunkedReadResponse.Merge(m, src) +} +func (m *ChunkedReadResponse) XXX_Size() int { + return m.Size() +} +func (m *ChunkedReadResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ChunkedReadResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ChunkedReadResponse proto.InternalMessageInfo + +func (m *ChunkedReadResponse) GetChunkedSeries() []*ChunkedSeries { + if m != nil { + return m.ChunkedSeries + } + return nil +} + +func (m *ChunkedReadResponse) GetQueryIndex() int64 { + if m != nil { + return m.QueryIndex + } + return 0 +} + +func init() { + proto.RegisterEnum("prometheus.ReadRequest_ResponseType", ReadRequest_ResponseType_name, ReadRequest_ResponseType_value) + proto.RegisterType((*WriteRequest)(nil), "prometheus.WriteRequest") + proto.RegisterType((*ReadRequest)(nil), "prometheus.ReadRequest") + proto.RegisterType((*ReadResponse)(nil), "prometheus.ReadResponse") + proto.RegisterType((*Query)(nil), "prometheus.Query") + proto.RegisterType((*QueryResult)(nil), "prometheus.QueryResult") + proto.RegisterType((*ChunkedReadResponse)(nil), "prometheus.ChunkedReadResponse") +} + +func init() { proto.RegisterFile("remote.proto", fileDescriptor_eefc82927d57d89b) } + +var fileDescriptor_eefc82927d57d89b = []byte{ + // 496 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xee, 0x26, 0x69, 0x13, 0x8d, 0x43, 0x14, 0xb6, 0x2d, 0x09, 0x39, 0xa4, 0x91, 0xc5, 0x21, + 0x52, 0x51, 0x10, 0xa1, 0xe2, 0xd4, 0x03, 0x69, 0x89, 0x54, 0xa0, 0xe6, 0x67, 0x13, 0x04, 0x42, + 0x48, 0xd6, 0xc6, 0x1e, 0x35, 0x16, 0xf5, 0x4f, 0x77, 0xd7, 0x52, 0xf3, 0x16, 0x3c, 0x13, 0xa7, + 0x9e, 0x10, 0x4f, 0x80, 0x50, 0x9e, 0x04, 0x79, 0x6d, 0x87, 0x2d, 0x5c, 0xb8, 0xad, 0xbf, 0x3f, + 0xcf, 0xcc, 0xce, 0x42, 0x53, 0x60, 0x18, 0x2b, 0x1c, 0x25, 0x22, 0x56, 0x31, 0x85, 0x44, 0xc4, + 0x21, 0xaa, 0x25, 0xa6, 0xb2, 0x67, 0xa9, 0x55, 0x82, 0x32, 0x27, 0x7a, 0x7b, 0x17, 0xf1, 0x45, + 0xac, 0x8f, 0x8f, 0xb2, 0x53, 0x8e, 0xda, 0x5f, 0x09, 0x34, 0x3f, 0x88, 0x40, 0x21, 0xc3, 0xab, + 0x14, 0xa5, 0xa2, 0xc7, 0x00, 0x2a, 0x08, 0x51, 0xa2, 0x08, 0x50, 0x76, 0xc9, 0xa0, 0x3a, 0xb4, + 0xc6, 0xf7, 0x46, 0x7f, 0x42, 0x47, 0xf3, 0x20, 0xc4, 0x99, 0x66, 0x4f, 0x6a, 0x37, 0x3f, 0x0f, + 0xb6, 0x98, 0xa1, 0xa7, 0xc7, 0xd0, 0x08, 0x51, 0x71, 0x9f, 0x2b, 0xde, 0xad, 0x6a, 0x6f, 0xcf, + 0xf4, 0x3a, 0xa8, 0x44, 0xe0, 0x39, 0x85, 0xa2, 0xf0, 0x6f, 0x1c, 0x2f, 0x6b, 0x8d, 0x4a, 0xbb, + 0x6a, 0x7f, 0x27, 0x60, 0x31, 0xe4, 0x7e, 0x59, 0xd1, 0x21, 0xd4, 0xaf, 0x52, 0xb3, 0x9c, 0xbb, + 0x66, 0xe4, 0xbb, 0x14, 0xc5, 0x8a, 0x95, 0x0a, 0xfa, 0x19, 0x3a, 0xdc, 0xf3, 0x30, 0x51, 0xe8, + 0xbb, 0x02, 0x65, 0x12, 0x47, 0x12, 0x5d, 0x3d, 0x86, 0x6e, 0x65, 0x50, 0x1d, 0xb6, 0xc6, 0x0f, + 0x4c, 0xb3, 0xf1, 0x9b, 0x11, 0x2b, 0xd4, 0xf3, 0x55, 0x82, 0x6c, 0xbf, 0x0c, 0x31, 0x51, 0x69, + 0x1f, 0x41, 0xd3, 0x04, 0xa8, 0x05, 0xf5, 0xd9, 0xc4, 0x79, 0x7b, 0x3e, 0x9d, 0xb5, 0xb7, 0x68, + 0x07, 0x76, 0x67, 0x73, 0x36, 0x9d, 0x38, 0xd3, 0xe7, 0xee, 0xc7, 0x37, 0xcc, 0x3d, 0x3d, 0x7b, + 0xff, 0xfa, 0xd5, 0xac, 0x4d, 0xec, 0x49, 0xe6, 0xe2, 0x9b, 0x28, 0xfa, 0x18, 0xea, 0x02, 0x65, + 0x7a, 0xa9, 0xca, 0x86, 0x3a, 0xff, 0x36, 0xa4, 0x79, 0x56, 0xea, 0xec, 0x6f, 0x04, 0xb6, 0x35, + 0x41, 0x1f, 0x02, 0x95, 0x8a, 0x0b, 0xe5, 0xea, 0xa9, 0x2b, 0x1e, 0x26, 0x6e, 0x98, 0xe5, 0x90, + 0x61, 0x95, 0xb5, 0x35, 0x33, 0x2f, 0x09, 0x47, 0xd2, 0x21, 0xb4, 0x31, 0xf2, 0x6f, 0x6b, 0x2b, + 0x5a, 0xdb, 0xc2, 0xc8, 0x37, 0x95, 0x47, 0xd0, 0x08, 0xb9, 0xf2, 0x96, 0x28, 0x64, 0x71, 0x73, + 0x5d, 0xb3, 0xaa, 0x73, 0xbe, 0xc0, 0x4b, 0x27, 0x17, 0xb0, 0x8d, 0x92, 0x1e, 0xc2, 0xf6, 0x32, + 0x88, 0x94, 0xec, 0xd6, 0x06, 0x64, 0x68, 0x8d, 0xf7, 0xff, 0x1e, 0xee, 0x59, 0x46, 0xb2, 0x5c, + 0x63, 0x4f, 0xc1, 0x32, 0x9a, 0xa3, 0x4f, 0xff, 0x7f, 0xd3, 0xcc, 0x1d, 0xb3, 0xaf, 0x61, 0xf7, + 0x74, 0x99, 0x46, 0x5f, 0xb2, 0xcb, 0x31, 0xa6, 0xfa, 0x0c, 0x5a, 0x5e, 0x0e, 0xbb, 0xb7, 0x22, + 0xef, 0x9b, 0x91, 0x85, 0xb1, 0x48, 0xbd, 0xe3, 0x99, 0x9f, 0xf4, 0x00, 0xac, 0x6c, 0x8d, 0x56, + 0x6e, 0x10, 0xf9, 0x78, 0x5d, 0xcc, 0x09, 0x34, 0xf4, 0x22, 0x43, 0x4e, 0xf6, 0x6e, 0xd6, 0x7d, + 0xf2, 0x63, 0xdd, 0x27, 0xbf, 0xd6, 0x7d, 0xf2, 0x69, 0x27, 0xcb, 0x4d, 0x16, 0x8b, 0x1d, 0xfd, + 0x92, 0x9e, 0xfc, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x13, 0x18, 0x12, 0x0a, 0x88, 0x03, 0x00, 0x00, +} + +func (m *WriteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Metadata) > 0 { + for iNdEx := len(m.Metadata) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metadata[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRemote(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Timeseries) > 0 { + for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRemote(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ReadRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.AcceptedResponseTypes) > 0 { + dAtA2 := make([]byte, len(m.AcceptedResponseTypes)*10) + var j1 int + for _, num := range m.AcceptedResponseTypes { + for num >= 1<<7 { + dAtA2[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA2[j1] = uint8(num) + j1++ + } + i -= j1 + copy(dAtA[i:], dAtA2[:j1]) + i = encodeVarintRemote(dAtA, i, uint64(j1)) + i-- + dAtA[i] = 0x12 + } + if len(m.Queries) > 0 { + for iNdEx := len(m.Queries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Queries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRemote(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ReadResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Results[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRemote(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Query) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Query) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Hints != nil { + { + size, err := m.Hints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRemote(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Matchers) > 0 { + for iNdEx := len(m.Matchers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Matchers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRemote(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.EndTimestampMs != 0 { + i = encodeVarintRemote(dAtA, i, uint64(m.EndTimestampMs)) + i-- + dAtA[i] = 0x10 + } + if m.StartTimestampMs != 0 { + i = encodeVarintRemote(dAtA, i, uint64(m.StartTimestampMs)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Timeseries) > 0 { + for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRemote(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ChunkedReadResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ChunkedReadResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ChunkedReadResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.QueryIndex != 0 { + i = encodeVarintRemote(dAtA, i, uint64(m.QueryIndex)) + i-- + dAtA[i] = 0x10 + } + if len(m.ChunkedSeries) > 0 { + for iNdEx := len(m.ChunkedSeries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ChunkedSeries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRemote(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintRemote(dAtA []byte, offset int, v uint64) int { + offset -= sovRemote(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *WriteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Timeseries) > 0 { + for _, e := range m.Timeseries { + l = e.Size() + n += 1 + l + sovRemote(uint64(l)) + } + } + if len(m.Metadata) > 0 { + for _, e := range m.Metadata { + l = e.Size() + n += 1 + l + sovRemote(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReadRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Queries) > 0 { + for _, e := range m.Queries { + l = e.Size() + n += 1 + l + sovRemote(uint64(l)) + } + } + if len(m.AcceptedResponseTypes) > 0 { + l = 0 + for _, e := range m.AcceptedResponseTypes { + l += sovRemote(uint64(e)) + } + n += 1 + sovRemote(uint64(l)) + l + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReadResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, e := range m.Results { + l = e.Size() + n += 1 + l + sovRemote(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Query) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StartTimestampMs != 0 { + n += 1 + sovRemote(uint64(m.StartTimestampMs)) + } + if m.EndTimestampMs != 0 { + n += 1 + sovRemote(uint64(m.EndTimestampMs)) + } + if len(m.Matchers) > 0 { + for _, e := range m.Matchers { + l = e.Size() + n += 1 + l + sovRemote(uint64(l)) + } + } + if m.Hints != nil { + l = m.Hints.Size() + n += 1 + l + sovRemote(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *QueryResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Timeseries) > 0 { + for _, e := range m.Timeseries { + l = e.Size() + n += 1 + l + sovRemote(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ChunkedReadResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ChunkedSeries) > 0 { + for _, e := range m.ChunkedSeries { + l = e.Size() + n += 1 + l + sovRemote(uint64(l)) + } + } + if m.QueryIndex != 0 { + n += 1 + sovRemote(uint64(m.QueryIndex)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovRemote(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozRemote(x uint64) (n int) { + return sovRemote(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *WriteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WriteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WriteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRemote + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Timeseries = append(m.Timeseries, TimeSeries{}) + if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRemote + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metadata = append(m.Metadata, MetricMetadata{}) + if err := m.Metadata[len(m.Metadata)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRemote(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRemote + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Queries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRemote + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Queries = append(m.Queries, &Query{}) + if err := m.Queries[len(m.Queries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType == 0 { + var v ReadRequest_ResponseType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= ReadRequest_ResponseType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AcceptedResponseTypes = append(m.AcceptedResponseTypes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthRemote + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + if elementCount != 0 && len(m.AcceptedResponseTypes) == 0 { + m.AcceptedResponseTypes = make([]ReadRequest_ResponseType, 0, elementCount) + } + for iNdEx < postIndex { + var v ReadRequest_ResponseType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= ReadRequest_ResponseType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AcceptedResponseTypes = append(m.AcceptedResponseTypes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field AcceptedResponseTypes", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipRemote(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRemote + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRemote + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Results = append(m.Results, &QueryResult{}) + if err := m.Results[len(m.Results)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRemote(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRemote + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Query) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Query: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Query: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestampMs", wireType) + } + m.StartTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndTimestampMs", wireType) + } + m.EndTimestampMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndTimestampMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRemote + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Matchers = append(m.Matchers, &LabelMatcher{}) + if err := m.Matchers[len(m.Matchers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRemote + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Hints == nil { + m.Hints = &ReadHints{} + } + if err := m.Hints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRemote(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRemote + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRemote + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Timeseries = append(m.Timeseries, &TimeSeries{}) + if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRemote(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRemote + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChunkedReadResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChunkedReadResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChunkedReadResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChunkedSeries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRemote + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRemote + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChunkedSeries = append(m.ChunkedSeries, &ChunkedSeries{}) + if err := m.ChunkedSeries[len(m.ChunkedSeries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QueryIndex", wireType) + } + m.QueryIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRemote + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.QueryIndex |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRemote(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRemote + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRemote(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRemote + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRemote + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRemote + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthRemote + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupRemote + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthRemote + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthRemote = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRemote = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupRemote = fmt.Errorf("proto: unexpected end of group") +) diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/remote.proto b/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/remote.proto new file mode 100644 index 000000000000..b4f82f5f9d74 --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/remote.proto @@ -0,0 +1,88 @@ +// Copyright 2016 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; +package prometheus; + +option go_package = "prompb"; + +import "types.proto"; +import "gogoproto/gogo.proto"; + +message WriteRequest { + repeated prometheus.TimeSeries timeseries = 1 [(gogoproto.nullable) = false]; + // Cortex uses this field to determine the source of the write request. + // We reserve it to avoid any compatibility issues. + reserved 2; + repeated prometheus.MetricMetadata metadata = 3 [(gogoproto.nullable) = false]; +} + +// ReadRequest represents a remote read request. +message ReadRequest { + repeated Query queries = 1; + + enum ResponseType { + // Server will return a single ReadResponse message with matched series that includes list of raw samples. + // It's recommended to use streamed response types instead. + // + // Response headers: + // Content-Type: "application/x-protobuf" + // Content-Encoding: "snappy" + SAMPLES = 0; + // Server will stream a delimited ChunkedReadResponse message that + // contains XOR or HISTOGRAM(!) encoded chunks for a single series. + // Each message is following varint size and fixed size bigendian + // uint32 for CRC32 Castagnoli checksum. + // + // Response headers: + // Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse" + // Content-Encoding: "" + STREAMED_XOR_CHUNKS = 1; + } + + // accepted_response_types allows negotiating the content type of the response. + // + // Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is + // implemented by server, error is returned. + // For request that do not contain `accepted_response_types` field the SAMPLES response type will be used. + repeated ResponseType accepted_response_types = 2; +} + +// ReadResponse is a response when response_type equals SAMPLES. +message ReadResponse { + // In same order as the request's queries. + repeated QueryResult results = 1; +} + +message Query { + int64 start_timestamp_ms = 1; + int64 end_timestamp_ms = 2; + repeated prometheus.LabelMatcher matchers = 3; + prometheus.ReadHints hints = 4; +} + +message QueryResult { + // Samples within a time series must be ordered by time. + repeated prometheus.TimeSeries timeseries = 1; +} + +// ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS. +// We strictly stream full series after series, optionally split by time. This means that a single frame can contain +// partition of the single series, but once a new series is started to be streamed it means that no more chunks will +// be sent for previous one. Series are returned sorted in the same way TSDB block are internally. +message ChunkedReadResponse { + repeated prometheus.ChunkedSeries chunked_series = 1; + + // query_index represents an index of the query from ReadRequest.queries these chunks relates to. + int64 query_index = 2; +} diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/types.pb.go b/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/types.pb.go new file mode 100644 index 000000000000..93883daa1330 --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/types.pb.go @@ -0,0 +1,4440 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: types.proto + +package prompb + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type MetricMetadata_MetricType int32 + +const ( + MetricMetadata_UNKNOWN MetricMetadata_MetricType = 0 + MetricMetadata_COUNTER MetricMetadata_MetricType = 1 + MetricMetadata_GAUGE MetricMetadata_MetricType = 2 + MetricMetadata_HISTOGRAM MetricMetadata_MetricType = 3 + MetricMetadata_GAUGEHISTOGRAM MetricMetadata_MetricType = 4 + MetricMetadata_SUMMARY MetricMetadata_MetricType = 5 + MetricMetadata_INFO MetricMetadata_MetricType = 6 + MetricMetadata_STATESET MetricMetadata_MetricType = 7 +) + +var MetricMetadata_MetricType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "COUNTER", + 2: "GAUGE", + 3: "HISTOGRAM", + 4: "GAUGEHISTOGRAM", + 5: "SUMMARY", + 6: "INFO", + 7: "STATESET", +} + +var MetricMetadata_MetricType_value = map[string]int32{ + "UNKNOWN": 0, + "COUNTER": 1, + "GAUGE": 2, + "HISTOGRAM": 3, + "GAUGEHISTOGRAM": 4, + "SUMMARY": 5, + "INFO": 6, + "STATESET": 7, +} + +func (x MetricMetadata_MetricType) String() string { + return proto.EnumName(MetricMetadata_MetricType_name, int32(x)) +} + +func (MetricMetadata_MetricType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{0, 0} +} + +type Histogram_ResetHint int32 + +const ( + Histogram_UNKNOWN Histogram_ResetHint = 0 + Histogram_YES Histogram_ResetHint = 1 + Histogram_NO Histogram_ResetHint = 2 + Histogram_GAUGE Histogram_ResetHint = 3 +) + +var Histogram_ResetHint_name = map[int32]string{ + 0: "UNKNOWN", + 1: "YES", + 2: "NO", + 3: "GAUGE", +} + +var Histogram_ResetHint_value = map[string]int32{ + "UNKNOWN": 0, + "YES": 1, + "NO": 2, + "GAUGE": 3, +} + +func (x Histogram_ResetHint) String() string { + return proto.EnumName(Histogram_ResetHint_name, int32(x)) +} + +func (Histogram_ResetHint) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{3, 0} +} + +type LabelMatcher_Type int32 + +const ( + LabelMatcher_EQ LabelMatcher_Type = 0 + LabelMatcher_NEQ LabelMatcher_Type = 1 + LabelMatcher_RE LabelMatcher_Type = 2 + LabelMatcher_NRE LabelMatcher_Type = 3 +) + +var LabelMatcher_Type_name = map[int32]string{ + 0: "EQ", + 1: "NEQ", + 2: "RE", + 3: "NRE", +} + +var LabelMatcher_Type_value = map[string]int32{ + "EQ": 0, + "NEQ": 1, + "RE": 2, + "NRE": 3, +} + +func (x LabelMatcher_Type) String() string { + return proto.EnumName(LabelMatcher_Type_name, int32(x)) +} + +func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{8, 0} +} + +// We require this to match chunkenc.Encoding. +type Chunk_Encoding int32 + +const ( + Chunk_UNKNOWN Chunk_Encoding = 0 + Chunk_XOR Chunk_Encoding = 1 + Chunk_HISTOGRAM Chunk_Encoding = 2 + Chunk_FLOAT_HISTOGRAM Chunk_Encoding = 3 +) + +var Chunk_Encoding_name = map[int32]string{ + 0: "UNKNOWN", + 1: "XOR", + 2: "HISTOGRAM", + 3: "FLOAT_HISTOGRAM", +} + +var Chunk_Encoding_value = map[string]int32{ + "UNKNOWN": 0, + "XOR": 1, + "HISTOGRAM": 2, + "FLOAT_HISTOGRAM": 3, +} + +func (x Chunk_Encoding) String() string { + return proto.EnumName(Chunk_Encoding_name, int32(x)) +} + +func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{10, 0} +} + +type MetricMetadata struct { + // Represents the metric type, these match the set from Prometheus. + // Refer to github.com/prometheus/common/model/metadata.go for details. + Type MetricMetadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=prometheus.MetricMetadata_MetricType" json:"type,omitempty"` + MetricFamilyName string `protobuf:"bytes,2,opt,name=metric_family_name,json=metricFamilyName,proto3" json:"metric_family_name,omitempty"` + Help string `protobuf:"bytes,4,opt,name=help,proto3" json:"help,omitempty"` + Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricMetadata) Reset() { *m = MetricMetadata{} } +func (m *MetricMetadata) String() string { return proto.CompactTextString(m) } +func (*MetricMetadata) ProtoMessage() {} +func (*MetricMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{0} +} +func (m *MetricMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MetricMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MetricMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricMetadata.Merge(m, src) +} +func (m *MetricMetadata) XXX_Size() int { + return m.Size() +} +func (m *MetricMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_MetricMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricMetadata proto.InternalMessageInfo + +func (m *MetricMetadata) GetType() MetricMetadata_MetricType { + if m != nil { + return m.Type + } + return MetricMetadata_UNKNOWN +} + +func (m *MetricMetadata) GetMetricFamilyName() string { + if m != nil { + return m.MetricFamilyName + } + return "" +} + +func (m *MetricMetadata) GetHelp() string { + if m != nil { + return m.Help + } + return "" +} + +func (m *MetricMetadata) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + +type Sample struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + // timestamp is in ms format, see model/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Sample) Reset() { *m = Sample{} } +func (m *Sample) String() string { return proto.CompactTextString(m) } +func (*Sample) ProtoMessage() {} +func (*Sample) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{1} +} +func (m *Sample) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Sample.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Sample) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sample.Merge(m, src) +} +func (m *Sample) XXX_Size() int { + return m.Size() +} +func (m *Sample) XXX_DiscardUnknown() { + xxx_messageInfo_Sample.DiscardUnknown(m) +} + +var xxx_messageInfo_Sample proto.InternalMessageInfo + +func (m *Sample) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Sample) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +type Exemplar struct { + // Optional, can be empty. + Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + // timestamp is in ms format, see model/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Exemplar) Reset() { *m = Exemplar{} } +func (m *Exemplar) String() string { return proto.CompactTextString(m) } +func (*Exemplar) ProtoMessage() {} +func (*Exemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{2} +} +func (m *Exemplar) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Exemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_Exemplar.Merge(m, src) +} +func (m *Exemplar) XXX_Size() int { + return m.Size() +} +func (m *Exemplar) XXX_DiscardUnknown() { + xxx_messageInfo_Exemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_Exemplar proto.InternalMessageInfo + +func (m *Exemplar) GetLabels() []Label { + if m != nil { + return m.Labels + } + return nil +} + +func (m *Exemplar) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *Exemplar) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +// A native histogram, also known as a sparse histogram. +// Original design doc: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit +// The appendix of this design doc also explains the concept of float +// histograms. This Histogram message can represent both, the usual +// integer histogram as well as a float histogram. +type Histogram struct { + // Types that are valid to be assigned to Count: + // + // *Histogram_CountInt + // *Histogram_CountFloat + Count isHistogram_Count `protobuf_oneof:"count"` + Sum float64 `protobuf:"fixed64,3,opt,name=sum,proto3" json:"sum,omitempty"` + // The schema defines the bucket schema. Currently, valid numbers + // are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1 + // is a bucket boundary in each case, and then each power of two is + // divided into 2^n logarithmic buckets. Or in other words, each + // bucket boundary is the previous boundary times 2^(2^-n). In the + // future, more bucket schemas may be added using numbers < -4 or > + // 8. + Schema int32 `protobuf:"zigzag32,4,opt,name=schema,proto3" json:"schema,omitempty"` + ZeroThreshold float64 `protobuf:"fixed64,5,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"` + // Types that are valid to be assigned to ZeroCount: + // + // *Histogram_ZeroCountInt + // *Histogram_ZeroCountFloat + ZeroCount isHistogram_ZeroCount `protobuf_oneof:"zero_count"` + // Negative Buckets. + NegativeSpans []BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans"` + // Use either "negative_deltas" or "negative_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + NegativeDeltas []int64 `protobuf:"zigzag64,9,rep,packed,name=negative_deltas,json=negativeDeltas,proto3" json:"negative_deltas,omitempty"` + NegativeCounts []float64 `protobuf:"fixed64,10,rep,packed,name=negative_counts,json=negativeCounts,proto3" json:"negative_counts,omitempty"` + // Positive Buckets. + PositiveSpans []BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans"` + // Use either "positive_deltas" or "positive_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + PositiveDeltas []int64 `protobuf:"zigzag64,12,rep,packed,name=positive_deltas,json=positiveDeltas,proto3" json:"positive_deltas,omitempty"` + PositiveCounts []float64 `protobuf:"fixed64,13,rep,packed,name=positive_counts,json=positiveCounts,proto3" json:"positive_counts,omitempty"` + ResetHint Histogram_ResetHint `protobuf:"varint,14,opt,name=reset_hint,json=resetHint,proto3,enum=prometheus.Histogram_ResetHint" json:"reset_hint,omitempty"` + // timestamp is in ms format, see model/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{3} +} +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(m, src) +} +func (m *Histogram) XXX_Size() int { + return m.Size() +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo + +type isHistogram_Count interface { + isHistogram_Count() + MarshalTo([]byte) (int, error) + Size() int +} +type isHistogram_ZeroCount interface { + isHistogram_ZeroCount() + MarshalTo([]byte) (int, error) + Size() int +} + +type Histogram_CountInt struct { + CountInt uint64 `protobuf:"varint,1,opt,name=count_int,json=countInt,proto3,oneof" json:"count_int,omitempty"` +} +type Histogram_CountFloat struct { + CountFloat float64 `protobuf:"fixed64,2,opt,name=count_float,json=countFloat,proto3,oneof" json:"count_float,omitempty"` +} +type Histogram_ZeroCountInt struct { + ZeroCountInt uint64 `protobuf:"varint,6,opt,name=zero_count_int,json=zeroCountInt,proto3,oneof" json:"zero_count_int,omitempty"` +} +type Histogram_ZeroCountFloat struct { + ZeroCountFloat float64 `protobuf:"fixed64,7,opt,name=zero_count_float,json=zeroCountFloat,proto3,oneof" json:"zero_count_float,omitempty"` +} + +func (*Histogram_CountInt) isHistogram_Count() {} +func (*Histogram_CountFloat) isHistogram_Count() {} +func (*Histogram_ZeroCountInt) isHistogram_ZeroCount() {} +func (*Histogram_ZeroCountFloat) isHistogram_ZeroCount() {} + +func (m *Histogram) GetCount() isHistogram_Count { + if m != nil { + return m.Count + } + return nil +} +func (m *Histogram) GetZeroCount() isHistogram_ZeroCount { + if m != nil { + return m.ZeroCount + } + return nil +} + +func (m *Histogram) GetCountInt() uint64 { + if x, ok := m.GetCount().(*Histogram_CountInt); ok { + return x.CountInt + } + return 0 +} + +func (m *Histogram) GetCountFloat() float64 { + if x, ok := m.GetCount().(*Histogram_CountFloat); ok { + return x.CountFloat + } + return 0 +} + +func (m *Histogram) GetSum() float64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *Histogram) GetSchema() int32 { + if m != nil { + return m.Schema + } + return 0 +} + +func (m *Histogram) GetZeroThreshold() float64 { + if m != nil { + return m.ZeroThreshold + } + return 0 +} + +func (m *Histogram) GetZeroCountInt() uint64 { + if x, ok := m.GetZeroCount().(*Histogram_ZeroCountInt); ok { + return x.ZeroCountInt + } + return 0 +} + +func (m *Histogram) GetZeroCountFloat() float64 { + if x, ok := m.GetZeroCount().(*Histogram_ZeroCountFloat); ok { + return x.ZeroCountFloat + } + return 0 +} + +func (m *Histogram) GetNegativeSpans() []BucketSpan { + if m != nil { + return m.NegativeSpans + } + return nil +} + +func (m *Histogram) GetNegativeDeltas() []int64 { + if m != nil { + return m.NegativeDeltas + } + return nil +} + +func (m *Histogram) GetNegativeCounts() []float64 { + if m != nil { + return m.NegativeCounts + } + return nil +} + +func (m *Histogram) GetPositiveSpans() []BucketSpan { + if m != nil { + return m.PositiveSpans + } + return nil +} + +func (m *Histogram) GetPositiveDeltas() []int64 { + if m != nil { + return m.PositiveDeltas + } + return nil +} + +func (m *Histogram) GetPositiveCounts() []float64 { + if m != nil { + return m.PositiveCounts + } + return nil +} + +func (m *Histogram) GetResetHint() Histogram_ResetHint { + if m != nil { + return m.ResetHint + } + return Histogram_UNKNOWN +} + +func (m *Histogram) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Histogram) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Histogram_CountInt)(nil), + (*Histogram_CountFloat)(nil), + (*Histogram_ZeroCountInt)(nil), + (*Histogram_ZeroCountFloat)(nil), + } +} + +// A BucketSpan defines a number of consecutive buckets with their +// offset. Logically, it would be more straightforward to include the +// bucket counts in the Span. However, the protobuf representation is +// more compact in the way the data is structured here (with all the +// buckets in a single array separate from the Spans). +type BucketSpan struct { + Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"` + Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketSpan) Reset() { *m = BucketSpan{} } +func (m *BucketSpan) String() string { return proto.CompactTextString(m) } +func (*BucketSpan) ProtoMessage() {} +func (*BucketSpan) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{4} +} +func (m *BucketSpan) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BucketSpan) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketSpan.Merge(m, src) +} +func (m *BucketSpan) XXX_Size() int { + return m.Size() +} +func (m *BucketSpan) XXX_DiscardUnknown() { + xxx_messageInfo_BucketSpan.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketSpan proto.InternalMessageInfo + +func (m *BucketSpan) GetOffset() int32 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *BucketSpan) GetLength() uint32 { + if m != nil { + return m.Length + } + return 0 +} + +// TimeSeries represents samples and labels for a single time series. +type TimeSeries struct { + // For a timeseries to be valid, and for the samples and exemplars + // to be ingested by the remote system properly, the labels field is required. + Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` + Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` + Exemplars []Exemplar `protobuf:"bytes,3,rep,name=exemplars,proto3" json:"exemplars"` + Histograms []Histogram `protobuf:"bytes,4,rep,name=histograms,proto3" json:"histograms"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TimeSeries) Reset() { *m = TimeSeries{} } +func (m *TimeSeries) String() string { return proto.CompactTextString(m) } +func (*TimeSeries) ProtoMessage() {} +func (*TimeSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{5} +} +func (m *TimeSeries) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TimeSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeries.Merge(m, src) +} +func (m *TimeSeries) XXX_Size() int { + return m.Size() +} +func (m *TimeSeries) XXX_DiscardUnknown() { + xxx_messageInfo_TimeSeries.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeSeries proto.InternalMessageInfo + +func (m *TimeSeries) GetLabels() []Label { + if m != nil { + return m.Labels + } + return nil +} + +func (m *TimeSeries) GetSamples() []Sample { + if m != nil { + return m.Samples + } + return nil +} + +func (m *TimeSeries) GetExemplars() []Exemplar { + if m != nil { + return m.Exemplars + } + return nil +} + +func (m *TimeSeries) GetHistograms() []Histogram { + if m != nil { + return m.Histograms + } + return nil +} + +type Label struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Label) Reset() { *m = Label{} } +func (m *Label) String() string { return proto.CompactTextString(m) } +func (*Label) ProtoMessage() {} +func (*Label) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{6} +} +func (m *Label) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Label) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Label.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Label) XXX_Merge(src proto.Message) { + xxx_messageInfo_Label.Merge(m, src) +} +func (m *Label) XXX_Size() int { + return m.Size() +} +func (m *Label) XXX_DiscardUnknown() { + xxx_messageInfo_Label.DiscardUnknown(m) +} + +var xxx_messageInfo_Label proto.InternalMessageInfo + +func (m *Label) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Label) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +type Labels struct { + Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Labels) Reset() { *m = Labels{} } +func (m *Labels) String() string { return proto.CompactTextString(m) } +func (*Labels) ProtoMessage() {} +func (*Labels) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{7} +} +func (m *Labels) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Labels) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Labels.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Labels) XXX_Merge(src proto.Message) { + xxx_messageInfo_Labels.Merge(m, src) +} +func (m *Labels) XXX_Size() int { + return m.Size() +} +func (m *Labels) XXX_DiscardUnknown() { + xxx_messageInfo_Labels.DiscardUnknown(m) +} + +var xxx_messageInfo_Labels proto.InternalMessageInfo + +func (m *Labels) GetLabels() []Label { + if m != nil { + return m.Labels + } + return nil +} + +// Matcher specifies a rule, which can match or set of labels or not. +type LabelMatcher struct { + Type LabelMatcher_Type `protobuf:"varint,1,opt,name=type,proto3,enum=prometheus.LabelMatcher_Type" json:"type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } +func (m *LabelMatcher) String() string { return proto.CompactTextString(m) } +func (*LabelMatcher) ProtoMessage() {} +func (*LabelMatcher) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{8} +} +func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelMatcher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelMatcher.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LabelMatcher) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelMatcher.Merge(m, src) +} +func (m *LabelMatcher) XXX_Size() int { + return m.Size() +} +func (m *LabelMatcher) XXX_DiscardUnknown() { + xxx_messageInfo_LabelMatcher.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelMatcher proto.InternalMessageInfo + +func (m *LabelMatcher) GetType() LabelMatcher_Type { + if m != nil { + return m.Type + } + return LabelMatcher_EQ +} + +func (m *LabelMatcher) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *LabelMatcher) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +type ReadHints struct { + StepMs int64 `protobuf:"varint,1,opt,name=step_ms,json=stepMs,proto3" json:"step_ms,omitempty"` + Func string `protobuf:"bytes,2,opt,name=func,proto3" json:"func,omitempty"` + StartMs int64 `protobuf:"varint,3,opt,name=start_ms,json=startMs,proto3" json:"start_ms,omitempty"` + EndMs int64 `protobuf:"varint,4,opt,name=end_ms,json=endMs,proto3" json:"end_ms,omitempty"` + Grouping []string `protobuf:"bytes,5,rep,name=grouping,proto3" json:"grouping,omitempty"` + By bool `protobuf:"varint,6,opt,name=by,proto3" json:"by,omitempty"` + RangeMs int64 `protobuf:"varint,7,opt,name=range_ms,json=rangeMs,proto3" json:"range_ms,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadHints) Reset() { *m = ReadHints{} } +func (m *ReadHints) String() string { return proto.CompactTextString(m) } +func (*ReadHints) ProtoMessage() {} +func (*ReadHints) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{9} +} +func (m *ReadHints) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReadHints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReadHints.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ReadHints) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadHints.Merge(m, src) +} +func (m *ReadHints) XXX_Size() int { + return m.Size() +} +func (m *ReadHints) XXX_DiscardUnknown() { + xxx_messageInfo_ReadHints.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadHints proto.InternalMessageInfo + +func (m *ReadHints) GetStepMs() int64 { + if m != nil { + return m.StepMs + } + return 0 +} + +func (m *ReadHints) GetFunc() string { + if m != nil { + return m.Func + } + return "" +} + +func (m *ReadHints) GetStartMs() int64 { + if m != nil { + return m.StartMs + } + return 0 +} + +func (m *ReadHints) GetEndMs() int64 { + if m != nil { + return m.EndMs + } + return 0 +} + +func (m *ReadHints) GetGrouping() []string { + if m != nil { + return m.Grouping + } + return nil +} + +func (m *ReadHints) GetBy() bool { + if m != nil { + return m.By + } + return false +} + +func (m *ReadHints) GetRangeMs() int64 { + if m != nil { + return m.RangeMs + } + return 0 +} + +// Chunk represents a TSDB chunk. +// Time range [min, max] is inclusive. +type Chunk struct { + MinTimeMs int64 `protobuf:"varint,1,opt,name=min_time_ms,json=minTimeMs,proto3" json:"min_time_ms,omitempty"` + MaxTimeMs int64 `protobuf:"varint,2,opt,name=max_time_ms,json=maxTimeMs,proto3" json:"max_time_ms,omitempty"` + Type Chunk_Encoding `protobuf:"varint,3,opt,name=type,proto3,enum=prometheus.Chunk_Encoding" json:"type,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Chunk) Reset() { *m = Chunk{} } +func (m *Chunk) String() string { return proto.CompactTextString(m) } +func (*Chunk) ProtoMessage() {} +func (*Chunk) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{10} +} +func (m *Chunk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Chunk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Chunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_Chunk.Merge(m, src) +} +func (m *Chunk) XXX_Size() int { + return m.Size() +} +func (m *Chunk) XXX_DiscardUnknown() { + xxx_messageInfo_Chunk.DiscardUnknown(m) +} + +var xxx_messageInfo_Chunk proto.InternalMessageInfo + +func (m *Chunk) GetMinTimeMs() int64 { + if m != nil { + return m.MinTimeMs + } + return 0 +} + +func (m *Chunk) GetMaxTimeMs() int64 { + if m != nil { + return m.MaxTimeMs + } + return 0 +} + +func (m *Chunk) GetType() Chunk_Encoding { + if m != nil { + return m.Type + } + return Chunk_UNKNOWN +} + +func (m *Chunk) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +// ChunkedSeries represents single, encoded time series. +type ChunkedSeries struct { + // Labels should be sorted. + Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` + // Chunks will be in start time order and may overlap. + Chunks []Chunk `protobuf:"bytes,2,rep,name=chunks,proto3" json:"chunks"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ChunkedSeries) Reset() { *m = ChunkedSeries{} } +func (m *ChunkedSeries) String() string { return proto.CompactTextString(m) } +func (*ChunkedSeries) ProtoMessage() {} +func (*ChunkedSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{11} +} +func (m *ChunkedSeries) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ChunkedSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ChunkedSeries.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ChunkedSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChunkedSeries.Merge(m, src) +} +func (m *ChunkedSeries) XXX_Size() int { + return m.Size() +} +func (m *ChunkedSeries) XXX_DiscardUnknown() { + xxx_messageInfo_ChunkedSeries.DiscardUnknown(m) +} + +var xxx_messageInfo_ChunkedSeries proto.InternalMessageInfo + +func (m *ChunkedSeries) GetLabels() []Label { + if m != nil { + return m.Labels + } + return nil +} + +func (m *ChunkedSeries) GetChunks() []Chunk { + if m != nil { + return m.Chunks + } + return nil +} + +func init() { + proto.RegisterEnum("prometheus.MetricMetadata_MetricType", MetricMetadata_MetricType_name, MetricMetadata_MetricType_value) + proto.RegisterEnum("prometheus.Histogram_ResetHint", Histogram_ResetHint_name, Histogram_ResetHint_value) + proto.RegisterEnum("prometheus.LabelMatcher_Type", LabelMatcher_Type_name, LabelMatcher_Type_value) + proto.RegisterEnum("prometheus.Chunk_Encoding", Chunk_Encoding_name, Chunk_Encoding_value) + proto.RegisterType((*MetricMetadata)(nil), "prometheus.MetricMetadata") + proto.RegisterType((*Sample)(nil), "prometheus.Sample") + proto.RegisterType((*Exemplar)(nil), "prometheus.Exemplar") + proto.RegisterType((*Histogram)(nil), "prometheus.Histogram") + proto.RegisterType((*BucketSpan)(nil), "prometheus.BucketSpan") + proto.RegisterType((*TimeSeries)(nil), "prometheus.TimeSeries") + proto.RegisterType((*Label)(nil), "prometheus.Label") + proto.RegisterType((*Labels)(nil), "prometheus.Labels") + proto.RegisterType((*LabelMatcher)(nil), "prometheus.LabelMatcher") + proto.RegisterType((*ReadHints)(nil), "prometheus.ReadHints") + proto.RegisterType((*Chunk)(nil), "prometheus.Chunk") + proto.RegisterType((*ChunkedSeries)(nil), "prometheus.ChunkedSeries") +} + +func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } + +var fileDescriptor_d938547f84707355 = []byte{ + // 1092 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdb, 0x6e, 0xdb, 0x46, + 0x13, 0x36, 0x49, 0x89, 0x12, 0x47, 0x87, 0xd0, 0xfb, 0x3b, 0xf9, 0x59, 0xa3, 0x71, 0x54, 0x02, + 0x69, 0x85, 0xa2, 0x90, 0x11, 0xb7, 0x17, 0x0d, 0x1a, 0x14, 0xb0, 0x1d, 0xf9, 0x80, 0x5a, 0x12, + 0xb2, 0x92, 0xd1, 0xa6, 0x37, 0xc2, 0x5a, 0x5a, 0x4b, 0x44, 0xc4, 0x43, 0xb9, 0xab, 0xc0, 0xea, + 0x7b, 0xf4, 0xae, 0x2f, 0xd1, 0xb7, 0x08, 0xd0, 0x9b, 0xf6, 0x05, 0x8a, 0xc2, 0x57, 0x7d, 0x8c, + 0x62, 0x87, 0xa4, 0x48, 0xc5, 0x29, 0xd0, 0xf4, 0x6e, 0xe7, 0x9b, 0x6f, 0x76, 0x3e, 0xee, 0xce, + 0xcc, 0x12, 0x6a, 0x72, 0x15, 0x71, 0xd1, 0x89, 0xe2, 0x50, 0x86, 0x04, 0xa2, 0x38, 0xf4, 0xb9, + 0x9c, 0xf3, 0xa5, 0xd8, 0xdd, 0x99, 0x85, 0xb3, 0x10, 0xe1, 0x7d, 0xb5, 0x4a, 0x18, 0xee, 0xcf, + 0x3a, 0x34, 0x7b, 0x5c, 0xc6, 0xde, 0xa4, 0xc7, 0x25, 0x9b, 0x32, 0xc9, 0xc8, 0x53, 0x28, 0xa9, + 0x3d, 0x1c, 0xad, 0xa5, 0xb5, 0x9b, 0x07, 0x8f, 0x3b, 0xf9, 0x1e, 0x9d, 0x4d, 0x66, 0x6a, 0x8e, + 0x56, 0x11, 0xa7, 0x18, 0x42, 0x3e, 0x03, 0xe2, 0x23, 0x36, 0xbe, 0x66, 0xbe, 0xb7, 0x58, 0x8d, + 0x03, 0xe6, 0x73, 0x47, 0x6f, 0x69, 0x6d, 0x8b, 0xda, 0x89, 0xe7, 0x04, 0x1d, 0x7d, 0xe6, 0x73, + 0x42, 0xa0, 0x34, 0xe7, 0x8b, 0xc8, 0x29, 0xa1, 0x1f, 0xd7, 0x0a, 0x5b, 0x06, 0x9e, 0x74, 0xca, + 0x09, 0xa6, 0xd6, 0xee, 0x0a, 0x20, 0xcf, 0x44, 0x6a, 0x50, 0xb9, 0xec, 0x7f, 0xd3, 0x1f, 0x7c, + 0xdb, 0xb7, 0xb7, 0x94, 0x71, 0x3c, 0xb8, 0xec, 0x8f, 0xba, 0xd4, 0xd6, 0x88, 0x05, 0xe5, 0xd3, + 0xc3, 0xcb, 0xd3, 0xae, 0xad, 0x93, 0x06, 0x58, 0x67, 0xe7, 0xc3, 0xd1, 0xe0, 0x94, 0x1e, 0xf6, + 0x6c, 0x83, 0x10, 0x68, 0xa2, 0x27, 0xc7, 0x4a, 0x2a, 0x74, 0x78, 0xd9, 0xeb, 0x1d, 0xd2, 0x97, + 0x76, 0x99, 0x54, 0xa1, 0x74, 0xde, 0x3f, 0x19, 0xd8, 0x26, 0xa9, 0x43, 0x75, 0x38, 0x3a, 0x1c, + 0x75, 0x87, 0xdd, 0x91, 0x5d, 0x71, 0x9f, 0x81, 0x39, 0x64, 0x7e, 0xb4, 0xe0, 0x64, 0x07, 0xca, + 0xaf, 0xd9, 0x62, 0x99, 0x1c, 0x8b, 0x46, 0x13, 0x83, 0x7c, 0x08, 0x96, 0xf4, 0x7c, 0x2e, 0x24, + 0xf3, 0x23, 0xfc, 0x4e, 0x83, 0xe6, 0x80, 0x1b, 0x42, 0xb5, 0x7b, 0xc3, 0xfd, 0x68, 0xc1, 0x62, + 0xb2, 0x0f, 0xe6, 0x82, 0x5d, 0xf1, 0x85, 0x70, 0xb4, 0x96, 0xd1, 0xae, 0x1d, 0x6c, 0x17, 0xcf, + 0xf5, 0x42, 0x79, 0x8e, 0x4a, 0x6f, 0xfe, 0x78, 0xb4, 0x45, 0x53, 0x5a, 0x9e, 0x50, 0xff, 0xc7, + 0x84, 0xc6, 0xdb, 0x09, 0x7f, 0x2d, 0x83, 0x75, 0xe6, 0x09, 0x19, 0xce, 0x62, 0xe6, 0x93, 0x87, + 0x60, 0x4d, 0xc2, 0x65, 0x20, 0xc7, 0x5e, 0x20, 0x51, 0x76, 0xe9, 0x6c, 0x8b, 0x56, 0x11, 0x3a, + 0x0f, 0x24, 0xf9, 0x08, 0x6a, 0x89, 0xfb, 0x7a, 0x11, 0x32, 0x99, 0xa4, 0x39, 0xdb, 0xa2, 0x80, + 0xe0, 0x89, 0xc2, 0x88, 0x0d, 0x86, 0x58, 0xfa, 0x98, 0x47, 0xa3, 0x6a, 0x49, 0x1e, 0x80, 0x29, + 0x26, 0x73, 0xee, 0x33, 0xbc, 0xb5, 0x6d, 0x9a, 0x5a, 0xe4, 0x31, 0x34, 0x7f, 0xe4, 0x71, 0x38, + 0x96, 0xf3, 0x98, 0x8b, 0x79, 0xb8, 0x98, 0xe2, 0x0d, 0x6a, 0xb4, 0xa1, 0xd0, 0x51, 0x06, 0x92, + 0x8f, 0x53, 0x5a, 0xae, 0xcb, 0x44, 0x5d, 0x1a, 0xad, 0x2b, 0xfc, 0x38, 0xd3, 0xf6, 0x29, 0xd8, + 0x05, 0x5e, 0x22, 0xb0, 0x82, 0x02, 0x35, 0xda, 0x5c, 0x33, 0x13, 0x91, 0xc7, 0xd0, 0x0c, 0xf8, + 0x8c, 0x49, 0xef, 0x35, 0x1f, 0x8b, 0x88, 0x05, 0xc2, 0xa9, 0xe2, 0x09, 0x3f, 0x28, 0x9e, 0xf0, + 0xd1, 0x72, 0xf2, 0x8a, 0xcb, 0x61, 0xc4, 0x82, 0xf4, 0x98, 0x1b, 0x59, 0x8c, 0xc2, 0x04, 0xf9, + 0x04, 0xee, 0xad, 0x37, 0x99, 0xf2, 0x85, 0x64, 0xc2, 0xb1, 0x5a, 0x46, 0x9b, 0xd0, 0xf5, 0xde, + 0xcf, 0x11, 0xdd, 0x20, 0xa2, 0x3a, 0xe1, 0x40, 0xcb, 0x68, 0x6b, 0x39, 0x11, 0xa5, 0x09, 0x25, + 0x2b, 0x0a, 0x85, 0x57, 0x90, 0x55, 0xfb, 0x37, 0xb2, 0xb2, 0x98, 0xb5, 0xac, 0xf5, 0x26, 0xa9, + 0xac, 0x7a, 0x22, 0x2b, 0x83, 0x73, 0x59, 0x6b, 0x62, 0x2a, 0xab, 0x91, 0xc8, 0xca, 0xe0, 0x54, + 0xd6, 0xd7, 0x00, 0x31, 0x17, 0x5c, 0x8e, 0xe7, 0xea, 0xf4, 0x9b, 0xd8, 0xe3, 0x8f, 0x8a, 0x92, + 0xd6, 0xf5, 0xd3, 0xa1, 0x8a, 0x77, 0xe6, 0x05, 0x92, 0x5a, 0x71, 0xb6, 0xdc, 0x2c, 0xc0, 0x7b, + 0x6f, 0x17, 0xe0, 0x17, 0x60, 0xad, 0xa3, 0x36, 0x3b, 0xb5, 0x02, 0xc6, 0xcb, 0xee, 0xd0, 0xd6, + 0x88, 0x09, 0x7a, 0x7f, 0x60, 0xeb, 0x79, 0xb7, 0x1a, 0x47, 0x15, 0x28, 0xa3, 0xe6, 0xa3, 0x3a, + 0x40, 0x7e, 0xed, 0xee, 0x33, 0x80, 0xfc, 0x7c, 0x54, 0xe5, 0x85, 0xd7, 0xd7, 0x82, 0x27, 0xa5, + 0xbc, 0x4d, 0x53, 0x4b, 0xe1, 0x0b, 0x1e, 0xcc, 0xe4, 0x1c, 0x2b, 0xb8, 0x41, 0x53, 0xcb, 0xfd, + 0x4b, 0x03, 0x18, 0x79, 0x3e, 0x1f, 0xf2, 0xd8, 0xe3, 0xe2, 0xfd, 0xfb, 0xef, 0x00, 0x2a, 0x02, + 0x5b, 0x5f, 0x38, 0x3a, 0x46, 0x90, 0x62, 0x44, 0x32, 0x15, 0xd2, 0x90, 0x8c, 0x48, 0xbe, 0x04, + 0x8b, 0xa7, 0x0d, 0x2f, 0x1c, 0x03, 0xa3, 0x76, 0x8a, 0x51, 0xd9, 0x34, 0x48, 0xe3, 0x72, 0x32, + 0xf9, 0x0a, 0x60, 0x9e, 0x1d, 0xbc, 0x70, 0x4a, 0x18, 0x7a, 0xff, 0x9d, 0xd7, 0x92, 0xc6, 0x16, + 0xe8, 0xee, 0x13, 0x28, 0xe3, 0x17, 0xa8, 0xe9, 0x89, 0x13, 0x57, 0x4b, 0xa6, 0xa7, 0x5a, 0x6f, + 0xce, 0x11, 0x2b, 0x9d, 0x23, 0xee, 0x53, 0x30, 0x2f, 0x92, 0xef, 0x7c, 0xdf, 0x83, 0x71, 0x7f, + 0xd2, 0xa0, 0x8e, 0x78, 0x8f, 0xc9, 0xc9, 0x9c, 0xc7, 0xe4, 0xc9, 0xc6, 0x83, 0xf1, 0xf0, 0x4e, + 0x7c, 0xca, 0xeb, 0x14, 0x1e, 0x8a, 0x4c, 0xa8, 0xfe, 0x2e, 0xa1, 0x46, 0x51, 0x68, 0x1b, 0x4a, + 0x38, 0xf6, 0x4d, 0xd0, 0xbb, 0x2f, 0x92, 0x3a, 0xea, 0x77, 0x5f, 0x24, 0x75, 0x44, 0xd5, 0xa8, + 0x57, 0x00, 0xed, 0xda, 0x86, 0xfb, 0x8b, 0xa6, 0x8a, 0x8f, 0x4d, 0x55, 0xed, 0x09, 0xf2, 0x7f, + 0xa8, 0x08, 0xc9, 0xa3, 0xb1, 0x2f, 0x50, 0x97, 0x41, 0x4d, 0x65, 0xf6, 0x84, 0x4a, 0x7d, 0xbd, + 0x0c, 0x26, 0x59, 0x6a, 0xb5, 0x26, 0x1f, 0x40, 0x55, 0x48, 0x16, 0x4b, 0xc5, 0x4e, 0x86, 0x6a, + 0x05, 0xed, 0x9e, 0x20, 0xf7, 0xc1, 0xe4, 0xc1, 0x74, 0x8c, 0x97, 0xa2, 0x1c, 0x65, 0x1e, 0x4c, + 0x7b, 0x82, 0xec, 0x42, 0x75, 0x16, 0x87, 0xcb, 0xc8, 0x0b, 0x66, 0x4e, 0xb9, 0x65, 0xb4, 0x2d, + 0xba, 0xb6, 0x49, 0x13, 0xf4, 0xab, 0x15, 0x0e, 0xb6, 0x2a, 0xd5, 0xaf, 0x56, 0x6a, 0xf7, 0x98, + 0x05, 0x33, 0xae, 0x36, 0xa9, 0x24, 0xbb, 0xa3, 0xdd, 0x13, 0xee, 0xef, 0x1a, 0x94, 0x8f, 0xe7, + 0xcb, 0xe0, 0x15, 0xd9, 0x83, 0x9a, 0xef, 0x05, 0x63, 0xd5, 0x4a, 0xb9, 0x66, 0xcb, 0xf7, 0x02, + 0x55, 0xc3, 0x3d, 0x81, 0x7e, 0x76, 0xb3, 0xf6, 0xa7, 0x6f, 0x8d, 0xcf, 0x6e, 0x52, 0x7f, 0x27, + 0xbd, 0x04, 0x03, 0x2f, 0x61, 0xb7, 0x78, 0x09, 0x98, 0xa0, 0xd3, 0x0d, 0x26, 0xe1, 0xd4, 0x0b, + 0x66, 0xf9, 0x0d, 0xa8, 0x37, 0x1c, 0xbf, 0xaa, 0x4e, 0x71, 0xed, 0x3e, 0x87, 0x6a, 0xc6, 0xba, + 0xd3, 0xbc, 0xdf, 0x0d, 0xd4, 0x13, 0xbb, 0xf1, 0xae, 0xea, 0xe4, 0x7f, 0x70, 0xef, 0xe4, 0x62, + 0x70, 0x38, 0x1a, 0x17, 0x1e, 0x5b, 0xf7, 0x07, 0x68, 0x60, 0x46, 0x3e, 0xfd, 0xaf, 0xad, 0xb7, + 0x0f, 0xe6, 0x44, 0xed, 0x90, 0x75, 0xde, 0xf6, 0x9d, 0xaf, 0xc9, 0x02, 0x12, 0xda, 0xd1, 0xce, + 0x9b, 0xdb, 0x3d, 0xed, 0xb7, 0xdb, 0x3d, 0xed, 0xcf, 0xdb, 0x3d, 0xed, 0x7b, 0x53, 0xb1, 0xa3, + 0xab, 0x2b, 0x13, 0x7f, 0x71, 0x3e, 0xff, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xfb, 0x5f, 0xf2, 0x4d, + 0x13, 0x09, 0x00, 0x00, +} + +func (m *MetricMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricMetadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Unit) > 0 { + i -= len(m.Unit) + copy(dAtA[i:], m.Unit) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Unit))) + i-- + dAtA[i] = 0x2a + } + if len(m.Help) > 0 { + i -= len(m.Help) + copy(dAtA[i:], m.Help) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Help))) + i-- + dAtA[i] = 0x22 + } + if len(m.MetricFamilyName) > 0 { + i -= len(m.MetricFamilyName) + copy(dAtA[i:], m.MetricFamilyName) + i = encodeVarintTypes(dAtA, i, uint64(len(m.MetricFamilyName))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Sample) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sample) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Timestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Exemplar) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Exemplar) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Timestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x18 + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x11 + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Histogram) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Histogram) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Timestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x78 + } + if m.ResetHint != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.ResetHint)) + i-- + dAtA[i] = 0x70 + } + if len(m.PositiveCounts) > 0 { + for iNdEx := len(m.PositiveCounts) - 1; iNdEx >= 0; iNdEx-- { + f1 := math.Float64bits(float64(m.PositiveCounts[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1)) + } + i = encodeVarintTypes(dAtA, i, uint64(len(m.PositiveCounts)*8)) + i-- + dAtA[i] = 0x6a + } + if len(m.PositiveDeltas) > 0 { + var j2 int + dAtA4 := make([]byte, len(m.PositiveDeltas)*10) + for _, num := range m.PositiveDeltas { + x3 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x3 >= 1<<7 { + dAtA4[j2] = uint8(uint64(x3)&0x7f | 0x80) + j2++ + x3 >>= 7 + } + dAtA4[j2] = uint8(x3) + j2++ + } + i -= j2 + copy(dAtA[i:], dAtA4[:j2]) + i = encodeVarintTypes(dAtA, i, uint64(j2)) + i-- + dAtA[i] = 0x62 + } + if len(m.PositiveSpans) > 0 { + for iNdEx := len(m.PositiveSpans) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PositiveSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } + if len(m.NegativeCounts) > 0 { + for iNdEx := len(m.NegativeCounts) - 1; iNdEx >= 0; iNdEx-- { + f5 := math.Float64bits(float64(m.NegativeCounts[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f5)) + } + i = encodeVarintTypes(dAtA, i, uint64(len(m.NegativeCounts)*8)) + i-- + dAtA[i] = 0x52 + } + if len(m.NegativeDeltas) > 0 { + var j6 int + dAtA8 := make([]byte, len(m.NegativeDeltas)*10) + for _, num := range m.NegativeDeltas { + x7 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x7 >= 1<<7 { + dAtA8[j6] = uint8(uint64(x7)&0x7f | 0x80) + j6++ + x7 >>= 7 + } + dAtA8[j6] = uint8(x7) + j6++ + } + i -= j6 + copy(dAtA[i:], dAtA8[:j6]) + i = encodeVarintTypes(dAtA, i, uint64(j6)) + i-- + dAtA[i] = 0x4a + } + if len(m.NegativeSpans) > 0 { + for iNdEx := len(m.NegativeSpans) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NegativeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.ZeroCount != nil { + { + size := m.ZeroCount.Size() + i -= size + if _, err := m.ZeroCount.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.ZeroThreshold != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold)))) + i-- + dAtA[i] = 0x29 + } + if m.Schema != 0 { + i = encodeVarintTypes(dAtA, i, uint64((uint32(m.Schema)<<1)^uint32((m.Schema>>31)))) + i-- + dAtA[i] = 0x20 + } + if m.Sum != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) + i-- + dAtA[i] = 0x19 + } + if m.Count != nil { + { + size := m.Count.Size() + i -= size + if _, err := m.Count.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Histogram_CountInt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_CountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintTypes(dAtA, i, uint64(m.CountInt)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *Histogram_CountFloat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_CountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.CountFloat)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *Histogram_ZeroCountInt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_ZeroCountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintTypes(dAtA, i, uint64(m.ZeroCountInt)) + i-- + dAtA[i] = 0x30 + return len(dAtA) - i, nil +} +func (m *Histogram_ZeroCountFloat) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram_ZeroCountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroCountFloat)))) + i-- + dAtA[i] = 0x39 + return len(dAtA) - i, nil +} +func (m *BucketSpan) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BucketSpan) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BucketSpan) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Length != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Length)) + i-- + dAtA[i] = 0x10 + } + if m.Offset != 0 { + i = encodeVarintTypes(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31)))) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TimeSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Histograms) > 0 { + for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Label) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Label) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Label) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Labels) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Labels) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Labels) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LabelMatcher) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelMatcher) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelMatcher) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ReadHints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadHints) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadHints) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.RangeMs != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.RangeMs)) + i-- + dAtA[i] = 0x38 + } + if m.By { + i-- + if m.By { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if len(m.Grouping) > 0 { + for iNdEx := len(m.Grouping) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Grouping[iNdEx]) + copy(dAtA[i:], m.Grouping[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Grouping[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if m.EndMs != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.EndMs)) + i-- + dAtA[i] = 0x20 + } + if m.StartMs != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.StartMs)) + i-- + dAtA[i] = 0x18 + } + if len(m.Func) > 0 { + i -= len(m.Func) + copy(dAtA[i:], m.Func) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Func))) + i-- + dAtA[i] = 0x12 + } + if m.StepMs != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.StepMs)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Chunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Chunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Chunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x22 + } + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x18 + } + if m.MaxTimeMs != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.MaxTimeMs)) + i-- + dAtA[i] = 0x10 + } + if m.MinTimeMs != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.MinTimeMs)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ChunkedSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ChunkedSeries) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ChunkedSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Chunks) > 0 { + for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Chunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MetricMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + l = len(m.MetricFamilyName) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Help) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Unit) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Sample) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.Timestamp != 0 { + n += 1 + sovTypes(uint64(m.Timestamp)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Exemplar) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.Value != 0 { + n += 9 + } + if m.Timestamp != 0 { + n += 1 + sovTypes(uint64(m.Timestamp)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Histogram) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Count != nil { + n += m.Count.Size() + } + if m.Sum != 0 { + n += 9 + } + if m.Schema != 0 { + n += 1 + sozTypes(uint64(m.Schema)) + } + if m.ZeroThreshold != 0 { + n += 9 + } + if m.ZeroCount != nil { + n += m.ZeroCount.Size() + } + if len(m.NegativeSpans) > 0 { + for _, e := range m.NegativeSpans { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.NegativeDeltas) > 0 { + l = 0 + for _, e := range m.NegativeDeltas { + l += sozTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if len(m.NegativeCounts) > 0 { + n += 1 + sovTypes(uint64(len(m.NegativeCounts)*8)) + len(m.NegativeCounts)*8 + } + if len(m.PositiveSpans) > 0 { + for _, e := range m.PositiveSpans { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.PositiveDeltas) > 0 { + l = 0 + for _, e := range m.PositiveDeltas { + l += sozTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if len(m.PositiveCounts) > 0 { + n += 1 + sovTypes(uint64(len(m.PositiveCounts)*8)) + len(m.PositiveCounts)*8 + } + if m.ResetHint != 0 { + n += 1 + sovTypes(uint64(m.ResetHint)) + } + if m.Timestamp != 0 { + n += 1 + sovTypes(uint64(m.Timestamp)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Histogram_CountInt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovTypes(uint64(m.CountInt)) + return n +} +func (m *Histogram_CountFloat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *Histogram_ZeroCountInt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovTypes(uint64(m.ZeroCountInt)) + return n +} +func (m *Histogram_ZeroCountFloat) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *BucketSpan) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Offset != 0 { + n += 1 + sozTypes(uint64(m.Offset)) + } + if m.Length != 0 { + n += 1 + sovTypes(uint64(m.Length)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TimeSeries) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Samples) > 0 { + for _, e := range m.Samples { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Exemplars) > 0 { + for _, e := range m.Exemplars { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Histograms) > 0 { + for _, e := range m.Histograms { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Label) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Labels) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *LabelMatcher) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReadHints) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StepMs != 0 { + n += 1 + sovTypes(uint64(m.StepMs)) + } + l = len(m.Func) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.StartMs != 0 { + n += 1 + sovTypes(uint64(m.StartMs)) + } + if m.EndMs != 0 { + n += 1 + sovTypes(uint64(m.EndMs)) + } + if len(m.Grouping) > 0 { + for _, s := range m.Grouping { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.By { + n += 2 + } + if m.RangeMs != 0 { + n += 1 + sovTypes(uint64(m.RangeMs)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Chunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MinTimeMs != 0 { + n += 1 + sovTypes(uint64(m.MinTimeMs)) + } + if m.MaxTimeMs != 0 { + n += 1 + sovTypes(uint64(m.MaxTimeMs)) + } + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ChunkedSeries) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Chunks) > 0 { + for _, e := range m.Chunks { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MetricMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= MetricMetadata_MetricType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricFamilyName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricFamilyName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Help", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Help = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Unit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Sample) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sample: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Exemplar) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Exemplar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, Label{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Histogram) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Histogram: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CountInt", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Count = &Histogram_CountInt{v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field CountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Count = &Histogram_CountFloat{float64(math.Float64frombits(v))} + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Sum = float64(math.Float64frombits(v)) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Schema = v + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroThreshold = float64(math.Float64frombits(v)) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountInt", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ZeroCount = &Histogram_ZeroCountInt{v} + case 7: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroCount = &Histogram_ZeroCountFloat{float64(math.Float64frombits(v))} + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NegativeSpans = append(m.NegativeSpans, BucketSpan{}) + if err := m.NegativeSpans[len(m.NegativeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.NegativeDeltas) == 0 { + m.NegativeDeltas = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeDeltas", wireType) + } + case 10: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCounts = append(m.NegativeCounts, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.NegativeCounts) == 0 { + m.NegativeCounts = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCounts = append(m.NegativeCounts, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeCounts", wireType) + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PositiveSpans = append(m.PositiveSpans, BucketSpan{}) + if err := m.PositiveSpans[len(m.PositiveSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.PositiveDeltas) == 0 { + m.PositiveDeltas = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveDeltas", wireType) + } + case 13: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCounts = append(m.PositiveCounts, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.PositiveCounts) == 0 { + m.PositiveCounts = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCounts = append(m.PositiveCounts, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveCounts", wireType) + } + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResetHint", wireType) + } + m.ResetHint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResetHint |= Histogram_ResetHint(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BucketSpan) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BucketSpan: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BucketSpan: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Offset = v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + m.Length = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Length |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, Label{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Samples = append(m.Samples, Sample{}) + if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exemplars = append(m.Exemplars, Exemplar{}) + if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Histograms = append(m.Histograms, Histogram{}) + if err := m.Histograms[len(m.Histograms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Label) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Label: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Label: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Labels) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Labels: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Labels: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, Label{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelMatcher) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelMatcher: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelMatcher: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= LabelMatcher_Type(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadHints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadHints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadHints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StepMs", wireType) + } + m.StepMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StepMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Func", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Func = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartMs", wireType) + } + m.StartMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndMs", wireType) + } + m.EndMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Grouping", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Grouping = append(m.Grouping, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field By", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.By = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeMs", wireType) + } + m.RangeMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RangeMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Chunk) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Chunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Chunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinTimeMs", wireType) + } + m.MinTimeMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinTimeMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTimeMs", wireType) + } + m.MaxTimeMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTimeMs |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Chunk_Encoding(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChunkedSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChunkedSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChunkedSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, Label{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Chunks = append(m.Chunks, Chunk{}) + if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/types.proto b/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/types.proto new file mode 100644 index 000000000000..61fc1e0143ea --- /dev/null +++ b/_vendor/github.com/prometheus/prometheus@v0.54.1/prompb/types.proto @@ -0,0 +1,187 @@ +// Copyright 2017 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; +package prometheus; + +option go_package = "prompb"; + +import "gogoproto/gogo.proto"; + +message MetricMetadata { + enum MetricType { + UNKNOWN = 0; + COUNTER = 1; + GAUGE = 2; + HISTOGRAM = 3; + GAUGEHISTOGRAM = 4; + SUMMARY = 5; + INFO = 6; + STATESET = 7; + } + + // Represents the metric type, these match the set from Prometheus. + // Refer to github.com/prometheus/common/model/metadata.go for details. + MetricType type = 1; + string metric_family_name = 2; + string help = 4; + string unit = 5; +} + +message Sample { + double value = 1; + // timestamp is in ms format, see model/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + int64 timestamp = 2; +} + +message Exemplar { + // Optional, can be empty. + repeated Label labels = 1 [(gogoproto.nullable) = false]; + double value = 2; + // timestamp is in ms format, see model/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + int64 timestamp = 3; +} + +// A native histogram, also known as a sparse histogram. +// Original design doc: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit +// The appendix of this design doc also explains the concept of float +// histograms. This Histogram message can represent both, the usual +// integer histogram as well as a float histogram. +message Histogram { + enum ResetHint { + UNKNOWN = 0; // Need to test for a counter reset explicitly. + YES = 1; // This is the 1st histogram after a counter reset. + NO = 2; // There was no counter reset between this and the previous Histogram. + GAUGE = 3; // This is a gauge histogram where counter resets don't happen. + } + + oneof count { // Count of observations in the histogram. + uint64 count_int = 1; + double count_float = 2; + } + double sum = 3; // Sum of observations in the histogram. + // The schema defines the bucket schema. Currently, valid numbers + // are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1 + // is a bucket boundary in each case, and then each power of two is + // divided into 2^n logarithmic buckets. Or in other words, each + // bucket boundary is the previous boundary times 2^(2^-n). In the + // future, more bucket schemas may be added using numbers < -4 or > + // 8. + sint32 schema = 4; + double zero_threshold = 5; // Breadth of the zero bucket. + oneof zero_count { // Count in zero bucket. + uint64 zero_count_int = 6; + double zero_count_float = 7; + } + + // Negative Buckets. + repeated BucketSpan negative_spans = 8 [(gogoproto.nullable) = false]; + // Use either "negative_deltas" or "negative_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + repeated sint64 negative_deltas = 9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double negative_counts = 10; // Absolute count of each bucket. + + // Positive Buckets. + repeated BucketSpan positive_spans = 11 [(gogoproto.nullable) = false]; + // Use either "positive_deltas" or "positive_counts", the former for + // regular histograms with integer counts, the latter for float + // histograms. + repeated sint64 positive_deltas = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + repeated double positive_counts = 13; // Absolute count of each bucket. + + ResetHint reset_hint = 14; + // timestamp is in ms format, see model/timestamp/timestamp.go for + // conversion from time.Time to Prometheus timestamp. + int64 timestamp = 15; +} + +// A BucketSpan defines a number of consecutive buckets with their +// offset. Logically, it would be more straightforward to include the +// bucket counts in the Span. However, the protobuf representation is +// more compact in the way the data is structured here (with all the +// buckets in a single array separate from the Spans). +message BucketSpan { + sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative). + uint32 length = 2; // Length of consecutive buckets. +} + +// TimeSeries represents samples and labels for a single time series. +message TimeSeries { + // For a timeseries to be valid, and for the samples and exemplars + // to be ingested by the remote system properly, the labels field is required. + repeated Label labels = 1 [(gogoproto.nullable) = false]; + repeated Sample samples = 2 [(gogoproto.nullable) = false]; + repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false]; + repeated Histogram histograms = 4 [(gogoproto.nullable) = false]; +} + +message Label { + string name = 1; + string value = 2; +} + +message Labels { + repeated Label labels = 1 [(gogoproto.nullable) = false]; +} + +// Matcher specifies a rule, which can match or set of labels or not. +message LabelMatcher { + enum Type { + EQ = 0; + NEQ = 1; + RE = 2; + NRE = 3; + } + Type type = 1; + string name = 2; + string value = 3; +} + +message ReadHints { + int64 step_ms = 1; // Query step size in milliseconds. + string func = 2; // String representation of surrounding function or aggregation. + int64 start_ms = 3; // Start time in milliseconds. + int64 end_ms = 4; // End time in milliseconds. + repeated string grouping = 5; // List of label names used in aggregation. + bool by = 6; // Indicate whether it is without or by. + int64 range_ms = 7; // Range vector selector range in milliseconds. +} + +// Chunk represents a TSDB chunk. +// Time range [min, max] is inclusive. +message Chunk { + int64 min_time_ms = 1; + int64 max_time_ms = 2; + + // We require this to match chunkenc.Encoding. + enum Encoding { + UNKNOWN = 0; + XOR = 1; + HISTOGRAM = 2; + FLOAT_HISTOGRAM = 3; + } + Encoding type = 3; + bytes data = 4; +} + +// ChunkedSeries represents single, encoded time series. +message ChunkedSeries { + // Labels should be sorted. + repeated Label labels = 1 [(gogoproto.nullable) = false]; + // Chunks will be in start time order and may overlap. + repeated Chunk chunks = 2 [(gogoproto.nullable) = false]; +} diff --git a/go.mod b/go.mod index 30f5522581b2..4d5fc2a3c043 100644 --- a/go.mod +++ b/go.mod @@ -208,7 +208,6 @@ require ( github.com/otiai10/copy v1.12.0 github.com/pierrec/lz4/v4 v4.1.21 github.com/pkg/xattr v0.4.9 - github.com/prometheus/prometheus v0.54.1 github.com/shirou/gopsutil/v4 v4.24.10 github.com/teambition/rrule-go v1.8.2 github.com/tklauser/go-sysconf v0.3.12 @@ -481,6 +480,7 @@ require ( github.com/dlclark/regexp2 v1.4.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/moby/term v0.5.0 // indirect + github.com/prometheus/prometheus/v2 v2.54.1 github.com/yuin/gopher-lua v1.1.1 // indirect ) @@ -494,3 +494,5 @@ replace ( github.com/insomniacslk/dhcp => github.com/elastic/dhcp v0.0.0-20200227161230-57ec251c7eb3 // indirect github.com/meraki/dashboard-api-go/v3 => github.com/tommyers-elastic/dashboard-api-go/v3 v3.0.0-20240913150833-a945473a8f25 ) + +replace github.com/prometheus/prometheus/v2 => ./_vendor/github.com/prometheus/prometheus@v0.54.1 diff --git a/go.sum b/go.sum index 5d1702535981..fa2e806f5887 100644 --- a/go.sum +++ b/go.sum @@ -440,8 +440,6 @@ github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkPro github.com/go-faker/faker/v4 v4.2.0 h1:dGebOupKwssrODV51E0zbMrv5e2gO9VWSLNC1WDCpWg= github.com/go-faker/faker/v4 v4.2.0/go.mod h1:F/bBy8GH9NxOxMInug5Gx4WYeG6fHJZ8Ol/dhcpRub4= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= -github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-ldap/ldap/v3 v3.4.6 h1:ert95MdbiG7aWo/oPYp9btL3KJlMPKnP58r09rI8T+A= github.com/go-ldap/ldap/v3 v3.4.6/go.mod h1:IGMQANNtxpsOzj7uUAMjpGBaOVTC4DYyIy8VsTdxmtc= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= @@ -778,7 +776,6 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid/v2 v2.0.2 h1:r4fFzBm+bv0wNKNh5eXTwU7i85y5x+uwkxCUTNVQqLc= github.com/oklog/ulid/v2 v2.0.2/go.mod h1:mtBL0Qe/0HAx6/a4Z30qxVIAL1eQDweXq5lxOEiwQ68= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= @@ -849,8 +846,6 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/prometheus/prometheus v0.54.1 h1:vKuwQNjnYN2/mDoWfHXDhAsz/68q/dQDb+YbcEqU7MQ= -github.com/prometheus/prometheus v0.54.1/go.mod h1:xlLByHhk2g3ycakQGrMaU8K7OySZx98BzeCR99991NY= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= @@ -1120,9 +1115,8 @@ go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQD go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= -go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= diff --git a/metricbeat/helper/prometheus/textparse.go b/metricbeat/helper/prometheus/textparse.go index d9f2f9c8a7c2..a92fd0af6045 100644 --- a/metricbeat/helper/prometheus/textparse.go +++ b/metricbeat/helper/prometheus/textparse.go @@ -28,10 +28,10 @@ import ( "time" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/exemplar" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/textparse" - "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/v2/model/exemplar" + "github.com/prometheus/prometheus/v2/model/labels" + "github.com/prometheus/prometheus/v2/model/textparse" + "github.com/prometheus/prometheus/v2/model/timestamp" "github.com/elastic/elastic-agent-libs/logp" ) diff --git a/metricbeat/helper/prometheus/textparse_test.go b/metricbeat/helper/prometheus/textparse_test.go index 18baf4b4b758..328d3ae7d9cc 100644 --- a/metricbeat/helper/prometheus/textparse_test.go +++ b/metricbeat/helper/prometheus/textparse_test.go @@ -21,7 +21,7 @@ import ( "testing" "time" - "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/v2/model/labels" "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent-libs/logp" diff --git a/metricbeat/module/openmetrics/collector/collector.go b/metricbeat/module/openmetrics/collector/collector.go index 30119d2df9e1..8cb9841d3f9f 100644 --- a/metricbeat/module/openmetrics/collector/collector.go +++ b/metricbeat/module/openmetrics/collector/collector.go @@ -22,7 +22,7 @@ import ( "regexp" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/v2/model/labels" p "github.com/elastic/beats/v7/metricbeat/helper/openmetrics" "github.com/elastic/beats/v7/metricbeat/helper/prometheus" diff --git a/metricbeat/module/openmetrics/collector/collector_test.go b/metricbeat/module/openmetrics/collector/collector_test.go index 708d0d0e92fc..6093c37480d6 100644 --- a/metricbeat/module/openmetrics/collector/collector_test.go +++ b/metricbeat/module/openmetrics/collector/collector_test.go @@ -23,7 +23,7 @@ import ( "testing" "github.com/prometheus/common/model" - prometheuslabels "github.com/prometheus/prometheus/model/labels" + prometheuslabels "github.com/prometheus/prometheus/v2/model/labels" "github.com/stretchr/testify/assert" "google.golang.org/protobuf/proto" diff --git a/metricbeat/module/prometheus/collector/collector.go b/metricbeat/module/prometheus/collector/collector.go index 8c110ee19db9..72d4f83964f0 100644 --- a/metricbeat/module/prometheus/collector/collector.go +++ b/metricbeat/module/prometheus/collector/collector.go @@ -22,7 +22,7 @@ import ( "regexp" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/v2/model/labels" p "github.com/elastic/beats/v7/metricbeat/helper/prometheus" "github.com/elastic/beats/v7/metricbeat/mb" diff --git a/metricbeat/module/prometheus/collector/collector_test.go b/metricbeat/module/prometheus/collector/collector_test.go index baec45115263..ce5ac130a3d8 100644 --- a/metricbeat/module/prometheus/collector/collector_test.go +++ b/metricbeat/module/prometheus/collector/collector_test.go @@ -27,7 +27,7 @@ import ( "testing" "github.com/prometheus/common/model" - pl "github.com/prometheus/prometheus/model/labels" + pl "github.com/prometheus/prometheus/v2/model/labels" "github.com/stretchr/testify/assert" "google.golang.org/protobuf/proto" diff --git a/metricbeat/module/prometheus/remote_write/remote_write.go b/metricbeat/module/prometheus/remote_write/remote_write.go index 95048f05263b..fbf6a9f0cf4f 100644 --- a/metricbeat/module/prometheus/remote_write/remote_write.go +++ b/metricbeat/module/prometheus/remote_write/remote_write.go @@ -24,7 +24,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/prompb" + "github.com/prometheus/prometheus/v2/prompb" serverhelper "github.com/elastic/beats/v7/metricbeat/helper/server" httpserver "github.com/elastic/beats/v7/metricbeat/helper/server/http" From 0ae4a021ced2d3ba90631ec782d170a2015f35c8 Mon Sep 17 00:00:00 2001 From: tommyers-elastic <106530686+tommyers-elastic@users.noreply.github.com> Date: Tue, 4 Mar 2025 15:47:43 +0000 Subject: [PATCH 2/3] do not run license checker on _vendor dir --- magefile.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/magefile.go b/magefile.go index 0e47301e7cca..22b2d830eea8 100644 --- a/magefile.go +++ b/magefile.go @@ -115,6 +115,7 @@ func AddLicenseHeaders() error { licenser.Exclude("x-pack"), licenser.Exclude("generator/_templates/beat/{beat}"), licenser.Exclude("generator/_templates/metricbeat/{beat}"), + licenser.Exclude("_vendor"), ), licenser( licenser.License("Elastic"), @@ -139,6 +140,7 @@ func CheckLicenseHeaders() error { licenser.Exclude("x-pack"), licenser.Exclude("generator/_templates/beat/{beat}"), licenser.Exclude("generator/_templates/metricbeat/{beat}"), + licenser.Exclude("_vendor"), ), licenser( licenser.Check(), From 837fd0b5a98f744000298586f67b571ebea4af25 Mon Sep 17 00:00:00 2001 From: tommyers-elastic <106530686+tommyers-elastic@users.noreply.github.com> Date: Tue, 4 Mar 2025 20:03:45 +0000 Subject: [PATCH 3/3] update NOTICE.txt --- NOTICE.txt | 252 ++--------------------------------------------------- 1 file changed, 5 insertions(+), 247 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index 069f419fce6e..1d9af8a7ebe6 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -22393,12 +22393,12 @@ Contents of probable licence file $GOMODCACHE/github.com/prometheus/procfs@v0.15 -------------------------------------------------------------------------------- -Dependency : github.com/prometheus/prometheus -Version: v0.54.1 +Dependency : github.com/prometheus/prometheus/v2 +Version: v2.54.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/prometheus/prometheus@v0.54.1/LICENSE: +Contents of probable licence file LICENSE: Apache License Version 2.0, January 2004 @@ -44295,37 +44295,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------------------------------- -Dependency : github.com/go-kit/log -Version: v0.2.1 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/go-kit/log@v0.2.1/LICENSE: - -MIT License - -Copyright (c) 2021 Go kit - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -------------------------------------------------------------------------------- Dependency : github.com/go-logfmt/logfmt Version: v0.6.0 @@ -55093,217 +55062,6 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------------- -Dependency : github.com/oklog/ulid -Version: v1.3.1 -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/oklog/ulid@v1.3.1/LICENSE: - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - -------------------------------------------------------------------------------- Dependency : github.com/oklog/ulid/v2 Version: v2.0.2 @@ -73101,11 +72859,11 @@ Contents of probable licence file $GOMODCACHE/go.opentelemetry.io/proto/otlp@v1. -------------------------------------------------------------------------------- Dependency : go.uber.org/atomic -Version: v1.11.0 +Version: v1.7.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.uber.org/atomic@v1.11.0/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/go.uber.org/atomic@v1.7.0/LICENSE.txt: Copyright (c) 2016 Uber Technologies, Inc.