Update go dependencies
This commit is contained in:
parent
432f534383
commit
f4a4daed84
1299 changed files with 71186 additions and 91183 deletions
19
vendor/github.com/ncabatoff/go-seq/.travis.yml
generated
vendored
Normal file
19
vendor/github.com/ncabatoff/go-seq/.travis.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
sudo: false
|
||||
language: go
|
||||
before_install:
|
||||
- go get github.com/mattn/goveralls
|
||||
matrix:
|
||||
include:
|
||||
- go: 1.6.x
|
||||
script: go test -v -race ./...
|
||||
- go: 1.x
|
||||
script:
|
||||
- diff -u <(echo -n) <(gofmt -d .)
|
||||
- go install -v ./...
|
||||
- go vet -v ./...
|
||||
- $GOPATH/bin/goveralls -v -race -service=travis-ci
|
||||
- go: master
|
||||
script: go test -v -race ./...
|
||||
allow_failures:
|
||||
- go: master
|
||||
fast_finish: true
|
||||
21
vendor/github.com/ncabatoff/go-seq/LICENSE
generated
vendored
Normal file
21
vendor/github.com/ncabatoff/go-seq/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2018 ncabatoff
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
44
vendor/github.com/ncabatoff/go-seq/README.md
generated
vendored
Normal file
44
vendor/github.com/ncabatoff/go-seq/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
# Package for ordering of Go values
|
||||
|
||||
[][godoc]
|
||||
[][travis]
|
||||
[](https://coveralls.io/github/ncabatoff/go-seq?branch=master)
|
||||
|
||||
This package is intended to allow ordering (most) values via reflection,
|
||||
much like go-cmp allows comparing values for equality.
|
||||
|
||||
This is helpful when trying to write `Less()` methods for sorting structures.
|
||||
Provided all the types in question are supported, you can simply define it
|
||||
as follows:
|
||||
|
||||
```go
|
||||
import "github.com/ncabatoff/go-seq/seq"
|
||||
|
||||
type (
|
||||
MyType struct {
|
||||
a int
|
||||
b string
|
||||
}
|
||||
MyTypeSlice []MyType
|
||||
)
|
||||
func (m MyTypeSlice) Less(i, j int) bool { return seq.Compare(m[i], m[j]) < 0 }
|
||||
```
|
||||
|
||||
Noteable unsupported types include the builtin `complex` type, channels,
|
||||
functions, and maps with non-string keys. Pointers can be ordered if their
|
||||
underlying types are orderable.
|
||||
|
||||
[godoc]: https://godoc.org/github.com/ncabatoff/go-seq/seq
|
||||
[travis]: https://travis-ci.org/ncabatoff/go-seq
|
||||
|
||||
## Install
|
||||
|
||||
```
|
||||
go get -u github.com/ncabatoff/go-seq/seq
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT - See [LICENSE][license] file
|
||||
|
||||
[license]: https://github.com/ncabatoff/go-seq/blob/master/LICENSE
|
||||
176
vendor/github.com/ncabatoff/go-seq/seq/compare.go
generated
vendored
Normal file
176
vendor/github.com/ncabatoff/go-seq/seq/compare.go
generated
vendored
Normal file
|
|
@ -0,0 +1,176 @@
|
|||
package seq
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Compare returns 0 if a and b are equal, -1 if a < b, or 1 if a > b.
|
||||
// Panics if a and b are not of the same type, or are of a type not listed here.
|
||||
// * Bools are compared assuming false < true.
|
||||
// * Strings, integer and float values are compared as Go compares them.
|
||||
// * Two nil pointers are equal; one nil pointer is treated as smaller than a non-nil pointer.
|
||||
// * Non-nil pointers are compared by comparing the values they point to.
|
||||
// * Structures are compared by comparing their fields in order.
|
||||
// * Slices are compared by comparing elements sequentially. If the slices are of different
|
||||
// length and all elements are the same up to the shorter length, the shorter slice is treated as
|
||||
// smaller.
|
||||
// * Maps can only be compared if they have string keys, in which case the ordered list of
|
||||
// keys are first compared as string slices, and if they're equal then the values are compared
|
||||
// sequentially in key order.
|
||||
func Compare(a, b interface{}) int {
|
||||
return compareValue(reflect.ValueOf(a), reflect.ValueOf(b))
|
||||
}
|
||||
|
||||
func boolToInt(b bool) int {
|
||||
if b {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func compareValue(ir1, ir2 reflect.Value) int {
|
||||
var zerovalue reflect.Value
|
||||
r1, r2 := reflect.Indirect(ir1), reflect.Indirect(ir2)
|
||||
|
||||
if r1 == zerovalue {
|
||||
if r2 == zerovalue {
|
||||
return 0
|
||||
}
|
||||
return -1
|
||||
}
|
||||
if r2 == zerovalue {
|
||||
return 1
|
||||
}
|
||||
|
||||
switch r1.Kind() {
|
||||
case reflect.Bool:
|
||||
v1, v2 := boolToInt(r1.Bool()), boolToInt(r2.Bool())
|
||||
if v1 < v2 {
|
||||
return -1
|
||||
}
|
||||
if v1 > v2 {
|
||||
return 1
|
||||
}
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
v1, v2 := r1.Int(), r2.Int()
|
||||
if v1 < v2 {
|
||||
return -1
|
||||
}
|
||||
if v1 > v2 {
|
||||
return 1
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
v1, v2 := r1.Uint(), r2.Uint()
|
||||
if v1 < v2 {
|
||||
return -1
|
||||
}
|
||||
if v1 > v2 {
|
||||
return 1
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
v1, v2 := r1.Float(), r2.Float()
|
||||
if v1 < v2 {
|
||||
return -1
|
||||
}
|
||||
if v1 > v2 {
|
||||
return 1
|
||||
}
|
||||
case reflect.Map:
|
||||
return compareMap(r1, r2)
|
||||
case reflect.Struct:
|
||||
return compareStruct(r1, r2)
|
||||
case reflect.Slice:
|
||||
if r1.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// Not using bytes.Compare because that fails on unexported fields:
|
||||
// return bytes.Compare(r1.Interface().([]byte), r2.Interface().([]byte))
|
||||
var s string
|
||||
strtype := reflect.TypeOf(s)
|
||||
v1, v2 := r1.Convert(strtype).String(), r2.Convert(strtype).String()
|
||||
if v1 < v2 {
|
||||
return -1
|
||||
}
|
||||
if v1 > v2 {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
return compareSlice(r1, r2)
|
||||
case reflect.String:
|
||||
v1, v2 := r1.String(), r2.String()
|
||||
if v1 < v2 {
|
||||
return -1
|
||||
}
|
||||
if v1 > v2 {
|
||||
return 1
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("don't know how to compare values of type %v", r1.Type()))
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func compareStruct(r1, r2 reflect.Value) int {
|
||||
if r1.Type() != r2.Type() {
|
||||
panic(fmt.Sprintf("s1 and s2 are not of the same type: %v, %v", r1.Type(), r2.Type()))
|
||||
}
|
||||
|
||||
n := r1.NumField()
|
||||
for i := 0; i < n; i++ {
|
||||
c := compareValue(r1.Field(i), r2.Field(i))
|
||||
if c != 0 {
|
||||
return c
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func compareSlice(r1, r2 reflect.Value) int {
|
||||
maxlen := r1.Len()
|
||||
if r2.Len() < maxlen {
|
||||
maxlen = r2.Len()
|
||||
}
|
||||
for i := 0; i < maxlen; i++ {
|
||||
c := compareValue(r1.Index(i), r2.Index(i))
|
||||
if c != 0 {
|
||||
return c
|
||||
}
|
||||
}
|
||||
if r1.Len() > maxlen {
|
||||
return 1
|
||||
}
|
||||
if r2.Len() > maxlen {
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func sortedKeys(r1 reflect.Value) []string {
|
||||
keys := make([]string, 0, r1.Len())
|
||||
for _, k := range r1.MapKeys() {
|
||||
keys = append(keys, k.String())
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
func compareMap(r1, r2 reflect.Value) int {
|
||||
if r1.Type().Key().Kind() != reflect.String {
|
||||
panic("can only compare maps with keys of type string")
|
||||
}
|
||||
|
||||
s1, s2 := sortedKeys(r1), sortedKeys(r2)
|
||||
c := compareSlice(reflect.ValueOf(s1), reflect.ValueOf(s2))
|
||||
if c != 0 {
|
||||
return c
|
||||
}
|
||||
|
||||
for _, k := range s1 {
|
||||
vk := reflect.ValueOf(k)
|
||||
c := compareValue(r1.MapIndex(vk), r2.MapIndex(vk))
|
||||
if c != 0 {
|
||||
return c
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
5
vendor/github.com/ncabatoff/process-exporter/.gitignore
generated
vendored
5
vendor/github.com/ncabatoff/process-exporter/.gitignore
generated
vendored
|
|
@ -1,4 +1,5 @@
|
|||
.*.sw?
|
||||
process-exporter
|
||||
.tarballs
|
||||
process-exporter-*.tar.gz
|
||||
load-generator
|
||||
integration-tester
|
||||
dist
|
||||
|
|
|
|||
39
vendor/github.com/ncabatoff/process-exporter/.goreleaser.yml
generated
vendored
Normal file
39
vendor/github.com/ncabatoff/process-exporter/.goreleaser.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
builds:
|
||||
- main: cmd/process-exporter/main.go
|
||||
binary: process-exporter
|
||||
flags: -tags netgo
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
- 386
|
||||
- arm
|
||||
- arm64
|
||||
- ppc64
|
||||
- ppc64le
|
||||
archive:
|
||||
name_template: "process-exporter-{{ .Version }}.{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
|
||||
wrap_in_directory: true
|
||||
nfpm:
|
||||
homepage: https://github.com/ncabatoff/process-exporter
|
||||
maintainer: nick.cabatoff+procexp@gmail.com
|
||||
description: Prometheus exporter to report on processes running
|
||||
license: MIT
|
||||
formats:
|
||||
- deb
|
||||
- rpm
|
||||
bindir: /usr/bin
|
||||
files:
|
||||
"packaging/process-exporter.service": "/lib/systemd/system/process-exporter.service"
|
||||
config_files:
|
||||
"packaging/conf/all.yaml": "/etc/process-exporter/all.yaml"
|
||||
scripts:
|
||||
postinstall: "packaging/scripts/postinstall.sh"
|
||||
postremove: "packaging/scripts/postremove.sh"
|
||||
preremove: "packaging/scripts/preremove.sh"
|
||||
release:
|
||||
github:
|
||||
owner: ncabatoff
|
||||
name: process-exporter
|
||||
draft: false
|
||||
prerelease: true
|
||||
35
vendor/github.com/ncabatoff/process-exporter/.promu.yml
generated
vendored
35
vendor/github.com/ncabatoff/process-exporter/.promu.yml
generated
vendored
|
|
@ -1,35 +0,0 @@
|
|||
repository:
|
||||
path: github.com/ncabatoff/process-exporter
|
||||
build:
|
||||
binaries:
|
||||
- name: process-exporter
|
||||
path: ./cmd/process-exporter
|
||||
flags: -a -tags netgo
|
||||
tarball:
|
||||
files:
|
||||
- LICENSE
|
||||
crossbuild:
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/386
|
||||
- darwin/amd64
|
||||
- darwin/386
|
||||
- freebsd/amd64
|
||||
- freebsd/386
|
||||
- openbsd/amd64
|
||||
- openbsd/386
|
||||
- netbsd/amd64
|
||||
- netbsd/386
|
||||
- dragonfly/amd64
|
||||
- linux/arm
|
||||
- linux/arm64
|
||||
- freebsd/arm
|
||||
# Temporarily deactivated as golang.org/x/sys does not have syscalls
|
||||
# implemented for that os/platform combination.
|
||||
#- openbsd/arm
|
||||
#- linux/mips64
|
||||
#- linux/mips64le
|
||||
- netbsd/arm
|
||||
- linux/ppc64
|
||||
- linux/ppc64le
|
||||
|
||||
29
vendor/github.com/ncabatoff/process-exporter/.travis.yml
generated
vendored
Normal file
29
vendor/github.com/ncabatoff/process-exporter/.travis.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
services:
|
||||
- docker
|
||||
|
||||
language: go
|
||||
|
||||
env:
|
||||
- IMAGE_TAG=`echo $TRAVIS_TAG|sed s/v//`
|
||||
|
||||
go:
|
||||
- 1.10.x
|
||||
|
||||
before_install:
|
||||
- sudo apt-get -qq update
|
||||
- sudo apt-get install -y rpm
|
||||
|
||||
go_import_path: github.com/ncabatoff/process-exporter
|
||||
|
||||
script:
|
||||
- make style vet test build smoke docker
|
||||
- if [ -n "$IMAGE_TAG" ]; then make docker DOCKER_IMAGE_TAG=$IMAGE_TAG; fi
|
||||
|
||||
after_success:
|
||||
- docker login -u $DOCKER_USER -p "$DOCKER_PASSWORD"
|
||||
- >
|
||||
test -n "$TRAVIS_TAG" &&
|
||||
docker tag ncabatoff/process-exporter:$IMAGE_TAG ncabatoff/process-exporter:latest &&
|
||||
docker push ncabatoff/process-exporter:$IMAGE_TAG &&
|
||||
docker push ncabatoff/process-exporter:latest &&
|
||||
curl -sL http://git.io/goreleaser | bash
|
||||
18
vendor/github.com/ncabatoff/process-exporter/Dockerfile
generated
vendored
18
vendor/github.com/ncabatoff/process-exporter/Dockerfile
generated
vendored
|
|
@ -1,17 +1,21 @@
|
|||
# Start from a Debian image with the latest version of Go installed
|
||||
# and a workspace (GOPATH) configured at /go.
|
||||
FROM golang
|
||||
|
||||
# Copy the local package files to the container's workspace.
|
||||
ADD . /go/src/github.com/ncabatoff/process-exporter
|
||||
FROM golang:1.10 AS build
|
||||
#RUN curl -L -s https://github.com/golang/dep/releases/download/v0.5.0/dep-linux-amd64 -o $GOPATH/bin/dep
|
||||
#RUN chmod +x $GOPATH/bin/dep
|
||||
WORKDIR /go/src/github.com/ncabatoff/process-exporter
|
||||
ADD . .
|
||||
#RUN dep ensure
|
||||
|
||||
# Build the process-exporter command inside the container.
|
||||
RUN make -C /go/src/github.com/ncabatoff/process-exporter
|
||||
RUN make
|
||||
|
||||
USER root
|
||||
FROM scratch
|
||||
|
||||
COPY --from=build /go/src/github.com/ncabatoff/process-exporter/process-exporter /bin/process-exporter
|
||||
|
||||
# Run the process-exporter command by default when the container starts.
|
||||
ENTRYPOINT ["/go/src/github.com/ncabatoff/process-exporter/process-exporter"]
|
||||
ENTRYPOINT ["/bin/process-exporter"]
|
||||
|
||||
# Document that the service listens on port 9256.
|
||||
EXPOSE 9256
|
||||
|
|
|
|||
4
vendor/github.com/ncabatoff/process-exporter/Dockerfile.cloudbuild
generated
vendored
Normal file
4
vendor/github.com/ncabatoff/process-exporter/Dockerfile.cloudbuild
generated
vendored
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
FROM scratch
|
||||
COPY gopath/bin/process-exporter /process-exporter
|
||||
ENTRYPOINT ["/process-exporter"]
|
||||
EXPOSE 9256
|
||||
119
vendor/github.com/ncabatoff/process-exporter/Gopkg.lock
generated
vendored
119
vendor/github.com/ncabatoff/process-exporter/Gopkg.lock
generated
vendored
|
|
@ -3,73 +3,156 @@
|
|||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
|
||||
name = "github.com/beorn7/perks"
|
||||
packages = ["quantile"]
|
||||
revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9"
|
||||
pruneopts = "UT"
|
||||
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:15042ad3498153684d09f393bbaec6b216c8eec6d61f63dff711de7d64ed8861"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
revision = "17ce1425424ab154092bbb43af630bd647f3bb0d"
|
||||
pruneopts = "UT"
|
||||
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kylelemons/godebug"
|
||||
packages = ["diff","pretty"]
|
||||
revision = "d65d576e9348f5982d7f6d83682b694e731a45c6"
|
||||
digest = "1:d2754cafcab0d22c13541618a8029a70a8959eb3525ff201fe971637e2274cd0"
|
||||
name = "github.com/google/go-cmp"
|
||||
packages = [
|
||||
"cmp",
|
||||
"cmp/cmpopts",
|
||||
"cmp/internal/diff",
|
||||
"cmp/internal/function",
|
||||
"cmp/internal/value",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "3af367b6b30c263d47e8895973edcca9a49cf029"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ca955a9cd5b50b0f43d2cc3aeb35c951473eeca41b34eb67507f1dbcc0542394"
|
||||
name = "github.com/kr/pretty"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "73f6ac0b30a98e433b289500d779f50c1a6f0712"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:15b5cc79aad436d47019f814fde81a10221c740dc8ddf769221a65097fb6c2e9"
|
||||
name = "github.com/kr/text"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "e2ffdb16a802fe2bb95e2e35ff34f0e53aeef34f"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc"
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
packages = ["pbutil"]
|
||||
revision = "3247c84500bff8d9fb6d579d800f20b3e091582c"
|
||||
version = "v1.0.0"
|
||||
pruneopts = "UT"
|
||||
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:71520363c3acc43c35a2a53f79f6c61f110a026326c8b16dbdd351164765feac"
|
||||
name = "github.com/ncabatoff/fakescraper"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "15938421d91a82d197de7fc59aebcac65c43407d"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:9e33629d4ec9e9344715a54fa0a107f23ce800deb13999b0190df04c3540ccb5"
|
||||
name = "github.com/ncabatoff/go-seq"
|
||||
packages = ["seq"]
|
||||
pruneopts = "UT"
|
||||
revision = "b08ef85ed83364cba413c98a94bbd4169a0ce70b"
|
||||
|
||||
[[projects]]
|
||||
branch = "add-proc-status"
|
||||
digest = "1:df5079557e0fa0fe9fb973f84fffd52e32ef26ada655900fdeea9b0848766c74"
|
||||
name = "github.com/ncabatoff/procfs"
|
||||
packages = [
|
||||
".",
|
||||
"internal/util",
|
||||
"nfs",
|
||||
"xfs",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "e1a38cb53622f65e073c5e750e6498a44ebfbd2a"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b6221ec0f8903b556e127c449e7106b63e6867170c2d10a7c058623d086f2081"
|
||||
name = "github.com/prometheus/client_golang"
|
||||
packages = ["prometheus"]
|
||||
pruneopts = "UT"
|
||||
revision = "c5b7fccd204277076155f10851dad72b76a49317"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4"
|
||||
name = "github.com/prometheus/client_model"
|
||||
packages = ["go"]
|
||||
revision = "6f3806018612930941127f2a7c6c453ba2c527d2"
|
||||
pruneopts = "UT"
|
||||
revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5"
|
||||
name = "github.com/prometheus/common"
|
||||
packages = ["expfmt","internal/bitbucket.org/ww/goautoneg","model"]
|
||||
revision = "2f17f4a9d485bf34b4bfaccc273805040e4f86c8"
|
||||
packages = [
|
||||
"expfmt",
|
||||
"internal/bitbucket.org/ww/goautoneg",
|
||||
"model",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "c7de2306084e37d54b8be01f3541a8464345e9a5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = [".","xfs"]
|
||||
revision = "e645f4e5aaa8506fc71d6edbc5c4ff02c04c46f2"
|
||||
packages = [
|
||||
".",
|
||||
"internal/util",
|
||||
"nfs",
|
||||
"xfs",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92"
|
||||
|
||||
[[projects]]
|
||||
branch = "v1"
|
||||
digest = "1:af715ae33cc1f5695c4b2a4e4b21d008add8802a99e15bb467ac7c32edb5000d"
|
||||
name = "gopkg.in/check.v1"
|
||||
packages = ["."]
|
||||
revision = "20d25e2804050c1cd24a7eea1e7a6447dd0e74ec"
|
||||
pruneopts = "UT"
|
||||
revision = "788fd78401277ebd861206a03c884797c6ec5541"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
|
||||
pruneopts = "UT"
|
||||
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
||||
version = "v2.2.1"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "abd920f891c3e5fe2ee27ce40acbdde66e0799704d160b01f22530df003adfe1"
|
||||
input-imports = [
|
||||
"github.com/google/go-cmp/cmp",
|
||||
"github.com/google/go-cmp/cmp/cmpopts",
|
||||
"github.com/ncabatoff/fakescraper",
|
||||
"github.com/ncabatoff/go-seq/seq",
|
||||
"github.com/ncabatoff/procfs",
|
||||
"github.com/prometheus/client_golang/prometheus",
|
||||
"gopkg.in/check.v1",
|
||||
"gopkg.in/yaml.v2",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
|
|
|||
28
vendor/github.com/ncabatoff/process-exporter/Gopkg.toml
generated
vendored
28
vendor/github.com/ncabatoff/process-exporter/Gopkg.toml
generated
vendored
|
|
@ -1,4 +1,3 @@
|
|||
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||
|
|
@ -17,30 +16,39 @@
|
|||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
#
|
||||
# [prune]
|
||||
# non-go = false
|
||||
# go-tests = true
|
||||
# unused-packages = true
|
||||
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/kylelemons/godebug"
|
||||
name = "github.com/google/go-cmp"
|
||||
version = "0.2.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/ncabatoff/fakescraper"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
version = "0.8.0"
|
||||
branch = "add-proc-status"
|
||||
name = "github.com/ncabatoff/procfs"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/procfs"
|
||||
name = "github.com/prometheus/client_golang"
|
||||
version = "0.8.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "v1"
|
||||
name = "gopkg.in/check.v1"
|
||||
|
||||
[[constraint]]
|
||||
branch = "v2"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
version = "2.2.1"
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
|
|
|
|||
71
vendor/github.com/ncabatoff/process-exporter/Makefile
generated
vendored
71
vendor/github.com/ncabatoff/process-exporter/Makefile
generated
vendored
|
|
@ -1,32 +1,12 @@
|
|||
# Copyright 2015 The Prometheus Authors
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
GO := GO15VENDOREXPERIMENT=1 go
|
||||
FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
|
||||
PROMU := $(FIRST_GOPATH)/bin/promu
|
||||
pkgs = $(shell $(GO) list ./... | grep -v /vendor/)
|
||||
pkgs = $(shell go list ./... | grep -v /vendor/)
|
||||
|
||||
PREFIX ?= $(shell pwd)
|
||||
BIN_DIR ?= $(shell pwd)
|
||||
DOCKER_IMAGE_NAME ?= process-exporter
|
||||
DOCKER_IMAGE_NAME ?= ncabatoff/process-exporter
|
||||
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
|
||||
SMOKE_TEST = -config.path packaging/conf/all.yaml -once-to-stdout-delay 1s |grep -q 'namedprocess_namegroup_memory_bytes{groupname="process-exporte",memtype="virtual"}'
|
||||
|
||||
ifdef DEBUG
|
||||
bindata_flags = -debug
|
||||
endif
|
||||
|
||||
|
||||
all: format vet build test
|
||||
all: format vet test build smoke
|
||||
|
||||
style:
|
||||
@echo ">> checking code style"
|
||||
|
|
@ -34,38 +14,37 @@ style:
|
|||
|
||||
test:
|
||||
@echo ">> running short tests"
|
||||
@$(GO) test -short $(pkgs)
|
||||
go test -short $(pkgs)
|
||||
|
||||
format:
|
||||
@echo ">> formatting code"
|
||||
@$(GO) fmt $(pkgs)
|
||||
go fmt $(pkgs)
|
||||
|
||||
vet:
|
||||
@echo ">> vetting code"
|
||||
@$(GO) vet $(pkgs)
|
||||
go vet $(pkgs)
|
||||
|
||||
build: promu
|
||||
@echo ">> building binaries"
|
||||
@$(PROMU) build --prefix $(PREFIX)
|
||||
build:
|
||||
@echo ">> building code"
|
||||
cd cmd/process-exporter; CGO_ENABLED=0 go build -o ../../process-exporter -a -tags netgo
|
||||
|
||||
tarball: promu
|
||||
@echo ">> building release tarball"
|
||||
@$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
|
||||
smoke:
|
||||
@echo ">> smoke testing process-exporter"
|
||||
./process-exporter $(SMOKE_TEST)
|
||||
|
||||
crossbuild: promu
|
||||
@echo ">> cross-building"
|
||||
@$(PROMU) crossbuild
|
||||
@$(PROMU) crossbuild tarballs
|
||||
integ:
|
||||
@echo ">> integration testing process-exporter"
|
||||
go build -o integration-tester cmd/integration-tester/main.go
|
||||
go build -o load-generator cmd/load-generator/main.go
|
||||
./integration-tester -write-size-bytes 65536
|
||||
|
||||
install:
|
||||
@echo ">> installing binary"
|
||||
cd cmd/process-exporter; CGO_ENABLED=0 go install -a -tags netgo
|
||||
|
||||
docker:
|
||||
@echo ">> building docker image"
|
||||
@docker build -t "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" .
|
||||
docker build -t "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" .
|
||||
docker run --rm -v `pwd`/packaging:/packaging "$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(SMOKE_TEST)
|
||||
|
||||
promu:
|
||||
@echo ">> fetching promu"
|
||||
@GOOS=$(shell uname -s | tr A-Z a-z) \
|
||||
GOARCH=$(subst x86_64,amd64,$(patsubst i%86,386,$(patsubst arm%,arm,$(shell uname -m)))) \
|
||||
$(GO) get -u github.com/prometheus/promu
|
||||
|
||||
|
||||
.PHONY: all style format build test vet tarball crossbuild docker promu
|
||||
.PHONY: all style format test vet build integ docker
|
||||
|
|
|
|||
345
vendor/github.com/ncabatoff/process-exporter/README.md
generated
vendored
345
vendor/github.com/ncabatoff/process-exporter/README.md
generated
vendored
|
|
@ -1,64 +1,111 @@
|
|||
# process-exporter
|
||||
Prometheus exporter that mines /proc to report on selected processes.
|
||||
|
||||
The premise for this exporter is that sometimes you have apps that are
|
||||
impractical to instrument directly, either because you don't control the code
|
||||
or they're written in a language that isn't easy to instrument with Prometheus.
|
||||
A fair bit of information can be gleaned from /proc, especially for
|
||||
long-running programs.
|
||||
[release]: https://github.com/ncabatoff/process-exporter/releases/latest
|
||||
|
||||
For most systems it won't be beneficial to create metrics for every process by
|
||||
name: there are just too many of them and most don't do enough to merit it.
|
||||
Various command-line options are provided to control how processes are grouped
|
||||
and the groups are named. Run "process-exporter -man" to see a help page
|
||||
giving details.
|
||||
[][release]
|
||||
[](https://travis-ci.org/ncabatoff/process-exporter)
|
||||
[](https://github.com/goreleaser)
|
||||
|
||||
Metrics available currently include CPU usage, bytes written and read, and
|
||||
number of processes in each group.
|
||||
Some apps are impractical to instrument directly, either because you
|
||||
don't control the code or they're written in a language that isn't easy to
|
||||
instrument with Prometheus. We must instead resort to mining /proc.
|
||||
|
||||
Bytes read and written come from /proc/[pid]/io in recent enough kernels.
|
||||
These correspond to the fields `read_bytes` and `write_bytes` respectively.
|
||||
These IO stats come with plenty of caveats, see either the Linux kernel
|
||||
documentation or man 5 proc.
|
||||
## Installation
|
||||
|
||||
CPU usage comes from /proc/[pid]/stat fields utime (user time) and stime (system
|
||||
time.) It has been translated into fractional seconds of CPU consumed. Since
|
||||
it is a counter, using rate() will tell you how many fractional cores were running
|
||||
code from this process during the interval given.
|
||||
Either grab a package for your OS from the [Releases][release] page, or
|
||||
install via [docker](https://hub.docker.com/r/ncabatoff/process-exporter/).
|
||||
|
||||
An example Grafana dashboard to view the metrics is available at https://grafana.net/dashboards/249
|
||||
## Running
|
||||
|
||||
## Instrumentation cost
|
||||
Usage:
|
||||
|
||||
process-exporter will consume CPU in proportion to the number of processes in
|
||||
the system and the rate at which new ones are created. The most expensive
|
||||
parts - applying regexps and executing templates - are only applied once per
|
||||
process seen. If you have mostly long-running processes process-exporter
|
||||
should be lightweight: each time a scrape occurs, parsing of /proc/$pid/stat
|
||||
and /proc/$pid/cmdline for every process being monitored and adding a few
|
||||
numbers.
|
||||
```
|
||||
process-exporter [options] -config.path filename.yml
|
||||
```
|
||||
|
||||
## Config
|
||||
or via docker:
|
||||
|
||||
```
|
||||
docker run -d --rm -p 9256:9256 --privileged -v /proc:/host/proc -v `pwd`:/config ncabatoff/process-exporter --procfs /host/proc -config.path /config/filename.yml
|
||||
|
||||
```
|
||||
|
||||
Important options (run process-exporter --help for full list):
|
||||
|
||||
-children (default:true) makes it so that any process that otherwise
|
||||
isn't part of its own group becomes part of the first group found (if any) when
|
||||
walking the process tree upwards. In other words, resource usage of
|
||||
subprocesses is added to their parent's usage unless the subprocess identifies
|
||||
as a different group name.
|
||||
|
||||
-recheck (default:false) means that on each scrape the process names are
|
||||
re-evaluated. This is disabled by default as an optimization, but since
|
||||
processes can choose to change their names, this may result in a process
|
||||
falling into the wrong group if we happen to see it for the first time before
|
||||
it's assumed its proper name.
|
||||
|
||||
-procnames is intended as a quick alternative to using a config file. Details
|
||||
in the following section.
|
||||
|
||||
## Configuration and group naming
|
||||
|
||||
To select and group the processes to monitor, either provide command-line
|
||||
arguments or use a YAML configuration file.
|
||||
|
||||
To avoid confusion with the cmdline YAML element, we'll refer to the
|
||||
null-delimited contents of `/proc/<pid>/cmdline` as the array `argv[]`.
|
||||
The recommended option is to use a config file via -config.path, but for
|
||||
convenience and backwards compatability the -procnames/-namemapping options
|
||||
exist as an alternative.
|
||||
|
||||
### Using a config file
|
||||
|
||||
The general format of the -config.path YAML file is a top-level
|
||||
`process_names` section, containing a list of name matchers:
|
||||
|
||||
```
|
||||
process_names:
|
||||
- matcher1
|
||||
- matcher2
|
||||
...
|
||||
- matcherN
|
||||
```
|
||||
|
||||
The default config shipped with the deb/rpm packages is:
|
||||
|
||||
```
|
||||
process_names:
|
||||
- name: "{{.Comm}}"
|
||||
cmdline:
|
||||
- '.+'
|
||||
```
|
||||
|
||||
A process may only belong to one group: even if multiple items would match, the
|
||||
first one listed in the file wins.
|
||||
|
||||
(Side note: to avoid confusion with the cmdline YAML element, we'll refer to
|
||||
the command-line arguments of a process `/proc/<pid>/cmdline` as the array
|
||||
`argv[]`.)
|
||||
|
||||
#### Using a config file: group name
|
||||
|
||||
Each item in `process_names` gives a recipe for identifying and naming
|
||||
processes. The optional `name` tag defines a template to use to name
|
||||
matching processes; if not specified, `name` defaults to `{{.ExeBase}}`.
|
||||
|
||||
Template variables available:
|
||||
- `{{.Comm}}` contains the basename of the original executable, i.e. 2nd field in `/proc/<pid>/stat`
|
||||
- `{{.ExeBase}}` contains the basename of the executable
|
||||
- `{{.ExeFull}}` contains the fully qualified path of the executable
|
||||
- `{{.Username}}` contains the username of the effective user
|
||||
- `{{.Matches}}` map contains all the matches resulting from applying cmdline regexps
|
||||
|
||||
#### Using a config file: process selectors
|
||||
|
||||
Each item in `process_names` must contain one or more selectors (`comm`, `exe`
|
||||
or `cmdline`); if more than one selector is present, they must all match. Each
|
||||
selector is a list of strings to match against a process's `comm`, `argv[0]`,
|
||||
or in the case of `cmdline`, a regexp to apply to the command line.
|
||||
or in the case of `cmdline`, a regexp to apply to the command line. The cmdline
|
||||
regexp uses the [Go syntax](https://golang.org/pkg/regexp).
|
||||
|
||||
For `comm` and `exe`, the list of strings is an OR, meaning any process
|
||||
matching any of the strings will be added to the item's group.
|
||||
|
|
@ -67,10 +114,7 @@ For `cmdline`, the list of regexes is an AND, meaning they all must match. Any
|
|||
capturing groups in a regexp must use the `?P<name>` option to assign a name to
|
||||
the capture, which is used to populate `.Matches`.
|
||||
|
||||
A process may only belong to one group: even if multiple items would match, the
|
||||
first one listed in the file wins.
|
||||
|
||||
Other performance tips: give an exe or comm clause in addition to any cmdline
|
||||
Performance tip: give an exe or comm clause in addition to any cmdline
|
||||
clause, so you avoid executing the regexp when the executable name doesn't
|
||||
match.
|
||||
|
||||
|
|
@ -95,8 +139,7 @@ process_names:
|
|||
exe:
|
||||
- /usr/local/bin/process-exporter
|
||||
cmdline:
|
||||
- -config.path\\s+(?P<Cfgfile>\\S+)
|
||||
|
||||
- -config.path\s+(?P<Cfgfile>\S+)
|
||||
|
||||
```
|
||||
|
||||
|
|
@ -118,43 +161,195 @@ process_names:
|
|||
|
||||
```
|
||||
|
||||
## Docker
|
||||
### Using -procnames/-namemapping instead of config.path
|
||||
|
||||
A docker image can be created with
|
||||
Every name in the procnames list becomes a process group. The default name of
|
||||
a process is the value found in the second field of /proc/<pid>/stat
|
||||
("comm"), which is truncated at 15 chars. Usually this is the same as the
|
||||
name of the executable.
|
||||
|
||||
If -namemapping isn't provided, every process with a comm value present
|
||||
in -procnames is assigned to a group based on that name, and any other
|
||||
processes are ignored.
|
||||
|
||||
The -namemapping option is a comma-separated list of alternating
|
||||
name,regexp values. It allows assigning a name to a process based on a
|
||||
combination of the process name and command line. For example, using
|
||||
|
||||
-namemapping "python2,([^/]+)\.py,java,-jar\s+([^/]+).jar"
|
||||
|
||||
will make it so that each different python2 and java -jar invocation will be
|
||||
tracked with distinct metrics. Processes whose remapped name is absent from
|
||||
the procnames list will be ignored. On a Ubuntu Xenian machine being used as
|
||||
a workstation, here's a good way of tracking resource usage for a few
|
||||
different key user apps:
|
||||
|
||||
process-exporter -namemapping "upstart,(--user)" \
|
||||
-procnames chromium-browse,bash,gvim,prometheus,process-exporter,upstart:-user
|
||||
|
||||
Since upstart --user is the parent process of the X11 session, this will
|
||||
make all apps started by the user fall into the group named "upstart:-user",
|
||||
unless they're one of the others named explicitly with -procnames, like gvim.
|
||||
|
||||
## Group Metrics
|
||||
|
||||
There's no meaningful way to name a process that will only ever name a single process, so process-exporter assumes that every metric will be attached
|
||||
to a group of processes - not a
|
||||
[process group](https://en.wikipedia.org/wiki/Process_group) in the technical
|
||||
sense, just one or more processes that meet a configuration's specification
|
||||
of what should be monitored and how to name it.
|
||||
|
||||
All these metrics start with `namedprocess_namegroup_` and have at minimum
|
||||
the label `groupname`.
|
||||
|
||||
### num_procs gauge
|
||||
|
||||
Number of processes in this group.
|
||||
|
||||
### cpu_user_seconds_total counter
|
||||
|
||||
CPU usage based on /proc/[pid]/stat field utime(14) i.e. user time.
|
||||
A value of 1 indicates that the processes in this group have been scheduled
|
||||
in user mode for a total of 1 second on a single virtual CPU.
|
||||
|
||||
### cpu_system_seconds_total counter
|
||||
|
||||
CPU usage based on /proc/[pid]/stat field stime(15) i.e. system time.
|
||||
|
||||
### read_bytes_total counter
|
||||
|
||||
Bytes read based on /proc/[pid]/io field read_bytes. The man page
|
||||
says
|
||||
|
||||
> Attempt to count the number of bytes which this process really did cause to be fetched from the storage layer. This is accurate for block-backed filesystems.
|
||||
|
||||
but I would take it with a grain of salt.
|
||||
|
||||
### write_bytes_total counter
|
||||
|
||||
Bytes written based on /proc/[pid]/io field write_bytes. As with
|
||||
read_bytes, somewhat dubious. May be useful for isolating which processes
|
||||
are doing the most I/O, but probably not measuring just how much I/O is happening.
|
||||
|
||||
### major_page_faults_total counter
|
||||
|
||||
Number of major page faults based on /proc/[pid]/stat field majflt(12).
|
||||
|
||||
### minor_page_faults_total counter
|
||||
|
||||
Number of minor page faults based on /proc/[pid]/stat field minflt(10).
|
||||
|
||||
### context_switches_total counter
|
||||
|
||||
Number of context switches based on /proc/[pid]/status fields voluntary_ctxt_switches
|
||||
and nonvoluntary_ctxt_switches. The extra label `ctxswitchtype` can have two values:
|
||||
`voluntary` and `nonvoluntary`.
|
||||
|
||||
### memory_bytes gauge
|
||||
|
||||
Number of bytes of memory used. The extra label `memtype` can have two values:
|
||||
|
||||
*resident*: Field rss(24) from /proc/[pid]/stat, whose doc says:
|
||||
|
||||
> This is just the pages which count toward text, data, or stack space. This does not include pages which have not been demand-loaded in, or which are swapped out.
|
||||
|
||||
*virtual*: Field vsize(23) from /proc/[pid]/stat, virtual memory size.
|
||||
|
||||
*swapped*: Field VmSwap from /proc/[pid]/status, translated from KB to bytes.
|
||||
|
||||
### open_filedesc gauge
|
||||
|
||||
Number of file descriptors, based on counting how many entries are in the directory
|
||||
/proc/[pid]/fd.
|
||||
|
||||
### worst_fd_ratio gauge
|
||||
|
||||
Worst ratio of open filedescs to filedesc limit, amongst all the procs in the
|
||||
group. The limit is the fd soft limit based on /proc/[pid]/limits.
|
||||
|
||||
Normally Prometheus metrics ought to be as "basic" as possible (i.e. the raw
|
||||
values rather than a derived ratio), but we use a ratio here because nothing
|
||||
else makes sense. Suppose there are 10 procs in a given group, each with a
|
||||
soft limit of 4096, and one of them has 4000 open fds and the others all have
|
||||
40, their total fdcount is 4360 and total soft limit is 40960, so the ratio
|
||||
is 1:10, but in fact one of the procs is about to run out of fds. With
|
||||
worst_fd_ratio we're able to know this: in the above example it would be
|
||||
0.97, rather than the 0.10 you'd see if you computed sum(open_filedesc) /
|
||||
sum(limit_filedesc).
|
||||
|
||||
### oldest_start_time_seconds gauge
|
||||
|
||||
Epoch time (seconds since 1970/1/1) at which the oldest process in the group
|
||||
started. This is derived from field starttime(22) from /proc/[pid]/stat, added
|
||||
to boot time to make it relative to epoch.
|
||||
|
||||
### num_threads gauge
|
||||
|
||||
Sum of number of threads of all process in the group. Based on field num_threads(20)
|
||||
from /proc/[pid]/stat.
|
||||
|
||||
### states gauge
|
||||
|
||||
Number of threads in the group in each of various states, based on the field
|
||||
state(3) from /proc/[pid]/stat.
|
||||
|
||||
The extra label `state` can have these values: `Running`, `Sleeping`, `Waiting`, `Zombie`, `Other`.
|
||||
|
||||
## Group Thread Metrics
|
||||
|
||||
All these metrics start with `namedprocess_namegroup_` and have at minimum
|
||||
the labels `groupname` and `threadname`. `threadname` is field comm(2) from
|
||||
/proc/[pid]/stat. Just as groupname breaks the set of processes down into
|
||||
groups, threadname breaks a given process group down into subgroups.
|
||||
|
||||
### thread_count gauge
|
||||
|
||||
Number of threads in this thread subgroup.
|
||||
|
||||
### thread_cpu_seconds_total counter
|
||||
|
||||
Same as cpu_user_seconds_total and cpu_system_seconds_total, but broken down
|
||||
per-thread subgroup. Unlike cpu_user_seconds_total/cpu_system_seconds_total,
|
||||
the label `cpumode` is used to distinguish between `user` and `system` time.
|
||||
|
||||
### thread_io_bytes_total counter
|
||||
|
||||
Same as read_bytes_total and write_bytes_total, but broken down
|
||||
per-thread subgroup. Unlike read_bytes_total/write_bytes_total,
|
||||
the label `iomode` is used to distinguish between `read` and `write` bytes.
|
||||
|
||||
### thread_major_page_faults_total counter
|
||||
|
||||
Same as major_page_faults_total, but broken down per-thread subgroup.
|
||||
|
||||
### thread_minor_page_faults_total counter
|
||||
|
||||
Same as minor_page_faults_total, but broken down per-thread subgroup.
|
||||
|
||||
### thread_context_switches_total counter
|
||||
|
||||
Same as context_switches_total, but broken down per-thread subgroup.
|
||||
|
||||
## Instrumentation cost
|
||||
|
||||
process-exporter will consume CPU in proportion to the number of processes in
|
||||
the system and the rate at which new ones are created. The most expensive
|
||||
parts - applying regexps and executing templates - are only applied once per
|
||||
process seen, unless the command-line option -recheck is provided.
|
||||
|
||||
If you have mostly long-running processes process-exporter overhead should be
|
||||
minimal: each time a scrape occurs, it will parse of /proc/$pid/stat and
|
||||
/proc/$pid/cmdline for every process being monitored and add a few numbers.
|
||||
|
||||
## Dashboards
|
||||
|
||||
An example Grafana dashboard to view the metrics is available at https://grafana.net/dashboards/249
|
||||
|
||||
## Building
|
||||
|
||||
Install [dep](https://github.com/golang/dep), then:
|
||||
|
||||
```
|
||||
make docker
|
||||
dep ensure
|
||||
make
|
||||
```
|
||||
|
||||
Then run the docker, e.g.
|
||||
|
||||
```
|
||||
docker run --privileged --name pexporter -d -v /proc:/host/proc -p 127.0.0.1:9256:9256 process-exporter:master -procfs /host/proc -procnames chromium-browse,bash,prometheus,gvim,upstart:-user -namemapping "upstart,(-user)"
|
||||
```
|
||||
|
||||
This will expose metrics on http://localhost:9256/metrics. Leave off the
|
||||
`127.0.0.1:` to publish on all interfaces. Leave off the --priviliged and
|
||||
add the --user docker run argument if you only need to monitor processes
|
||||
belonging to a single user.
|
||||
|
||||
## History
|
||||
|
||||
An earlier version of this exporter had options to enable auto-discovery of
|
||||
which processes were consuming resources. This functionality has been removed.
|
||||
These options were based on a percentage of resource usage, e.g. if an
|
||||
untracked process consumed X% of CPU during a scrape, start tracking processes
|
||||
with that name. However during any given scrape it's likely that most
|
||||
processes are idle, so we could add a process that consumes minimal resources
|
||||
but which happened to be active during the interval preceding the current
|
||||
scrape. Over time this means that a great many processes wind up being
|
||||
scraped, which becomes unmanageable to visualize. This could be mitigated by
|
||||
looking at resource usage over longer intervals, but ultimately I didn't feel
|
||||
this feature was important enough to invest more time in at this point. It may
|
||||
re-appear at some point in the future, but no promises.
|
||||
|
||||
Another lost feature: the "other" group was used to count usage by non-tracked
|
||||
procs. This was useful to get an idea of what wasn't being monitored. But it
|
||||
comes at a high cost: if you know what processes you care about, you're wasting
|
||||
a lot of CPU to compute the usage of everything else that you don't care about.
|
||||
The new approach is to minimize resources expended on non-tracked processes and
|
||||
to require the user to whitelist the processes to track.
|
||||
|
|
|
|||
1
vendor/github.com/ncabatoff/process-exporter/VERSION
generated
vendored
1
vendor/github.com/ncabatoff/process-exporter/VERSION
generated
vendored
|
|
@ -1 +0,0 @@
|
|||
0.1.0
|
||||
49
vendor/github.com/ncabatoff/process-exporter/cloudbuild.release.yaml
generated
vendored
Normal file
49
vendor/github.com/ncabatoff/process-exporter/cloudbuild.release.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
steps:
|
||||
# - name: string
|
||||
# args: string
|
||||
# env: string
|
||||
# dir: string
|
||||
# id: string
|
||||
# waitFor: string
|
||||
# entrypoint: string
|
||||
# secretEnv: string
|
||||
|
||||
# Setup the workspace
|
||||
- name: gcr.io/cloud-builders/go
|
||||
env: ['PROJECT_ROOT=github.com/ncabatoff/process-exporter']
|
||||
args: ['env']
|
||||
|
||||
# Build project
|
||||
- name: gcr.io/cloud-builders/docker
|
||||
entrypoint: 'bash'
|
||||
args: ['-c', 'docker build -t ncabatoff/process-exporter:`echo $TAG_NAME|sed s/^v//` .']
|
||||
|
||||
# Login to docker hub
|
||||
- name: gcr.io/cloud-builders/docker
|
||||
entrypoint: 'bash'
|
||||
args: ['-c', 'docker login --username=ncabatoff --password=$$DOCKER_PASSWORD']
|
||||
secretEnv: ['DOCKER_PASSWORD']
|
||||
|
||||
# Push to docker hub
|
||||
- name: gcr.io/cloud-builders/docker
|
||||
entrypoint: 'bash'
|
||||
args: ['-c', 'docker push ncabatoff/process-exporter:`echo $TAG_NAME|sed s/^v//`']
|
||||
|
||||
# Create github release
|
||||
- name: goreleaser/goreleaser
|
||||
entrypoint: /bin/sh
|
||||
dir: gopath/src/github.com
|
||||
env: ['GOPATH=/workspace/gopath']
|
||||
args: ['-c', 'cd ncabatoff/process-exporter && git tag $TAG_NAME && /goreleaser' ]
|
||||
secretEnv: ['GITHUB_TOKEN']
|
||||
|
||||
secrets:
|
||||
- kmsKeyName: projects/process-exporter/locations/global/keyRings/cloudbuild/cryptoKeys/mykey
|
||||
secretEnv:
|
||||
DOCKER_PASSWORD: |
|
||||
CiQAeHUuEinm1h2j9mp8r0NjPw1l1bBwzDG+JHPUPf3GvtmdjXESMAD3wUauaxWrxid/zPunG67x
|
||||
5+1CYedV5exh0XwQ32eu4UkniS7HHJNWBudklaG0JA==
|
||||
GITHUB_TOKEN: |
|
||||
CiQAeHUuEhEKAvfIHlUZrCgHNScm0mDKI8Z1w/N3OzDk8Ql6kAUSUQD3wUau7qRc+H7OnTUo6b2Z
|
||||
DKA1eMKHNg729KfHj2ZMqZXinrJloYMbZcZRXP9xv91xCq6QJB5UoFoyYDnXGdvgXC08YUstR6UB
|
||||
H0bwHhe1GQ==
|
||||
32
vendor/github.com/ncabatoff/process-exporter/cloudbuild.yaml
generated
vendored
Normal file
32
vendor/github.com/ncabatoff/process-exporter/cloudbuild.yaml
generated
vendored
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
steps:
|
||||
# - name: string
|
||||
# args: string
|
||||
# env: string
|
||||
# dir: string
|
||||
# id: string
|
||||
# waitFor: string
|
||||
# entrypoint: string
|
||||
# secretEnv: string
|
||||
# - name: gcr.io/cloud-builders/curl
|
||||
# args: ['-L', '-s', '-o', 'dep', 'https://github.com/golang/dep/releases/download/v0.5.0/dep-linux-amd64']
|
||||
# - name: ubuntu
|
||||
# args: ['chmod', '+x', 'dep']
|
||||
# Setup the workspace
|
||||
- name: gcr.io/cloud-builders/go
|
||||
env: ['PROJECT_ROOT=github.com/ncabatoff/process-exporter']
|
||||
args: ['env']
|
||||
# Run dep in the workspace created in previous step
|
||||
# - name: gcr.io/cloud-builders/go
|
||||
# entrypoint: /bin/sh
|
||||
# dir: gopath/src/github.com
|
||||
# env: ['GOPATH=/workspace/gopath']
|
||||
# args: ['-c', 'cd ncabatoff/process-exporter && /workspace/dep ensure -vendor-only' ]
|
||||
- name: gcr.io/cloud-builders/go
|
||||
entrypoint: /bin/sh
|
||||
dir: gopath/src/github.com
|
||||
env: ['GOPATH=/workspace/gopath']
|
||||
args: ['-c', 'make -C ncabatoff/process-exporter style vet test build integ install' ]
|
||||
- name: gcr.io/cloud-builders/docker
|
||||
args: ['build', '--tag=gcr.io/$PROJECT_ID/process-exporter', '.', '-f', 'Dockerfile.cloudbuild']
|
||||
images: ['gcr.io/$PROJECT_ID/process-exporter']
|
||||
|
||||
12
vendor/github.com/ncabatoff/process-exporter/common.go
generated
vendored
12
vendor/github.com/ncabatoff/process-exporter/common.go
generated
vendored
|
|
@ -1,14 +1,18 @@
|
|||
package common
|
||||
|
||||
import "fmt"
|
||||
|
||||
type (
|
||||
NameAndCmdline struct {
|
||||
Name string
|
||||
Cmdline []string
|
||||
ProcAttributes struct {
|
||||
Name string
|
||||
Cmdline []string
|
||||
Username string
|
||||
}
|
||||
|
||||
MatchNamer interface {
|
||||
// MatchAndName returns false if the match failed, otherwise
|
||||
// true and the resulting name.
|
||||
MatchAndName(NameAndCmdline) (bool, string)
|
||||
MatchAndName(ProcAttributes) (bool, string)
|
||||
fmt.Stringer
|
||||
}
|
||||
)
|
||||
|
|
|
|||
268
vendor/github.com/ncabatoff/process-exporter/proc/grouper.go
generated
vendored
268
vendor/github.com/ncabatoff/process-exporter/proc/grouper.go
generated
vendored
|
|
@ -1,173 +1,179 @@
|
|||
package proc
|
||||
|
||||
import (
|
||||
common "github.com/ncabatoff/process-exporter"
|
||||
"time"
|
||||
|
||||
seq "github.com/ncabatoff/go-seq/seq"
|
||||
common "github.com/ncabatoff/process-exporter"
|
||||
)
|
||||
|
||||
type (
|
||||
// Grouper is the top-level interface to the process metrics. All tracked
|
||||
// procs sharing the same group name are aggregated.
|
||||
Grouper struct {
|
||||
namer common.MatchNamer
|
||||
trackChildren bool
|
||||
// track how much was seen last time so we can report the delta
|
||||
GroupStats map[string]Counts
|
||||
tracker *Tracker
|
||||
// groupAccum records the historical accumulation of a group so that
|
||||
// we can avoid ever decreasing the counts we return.
|
||||
groupAccum map[string]Counts
|
||||
tracker *Tracker
|
||||
threadAccum map[string]map[string]Threads
|
||||
debug bool
|
||||
}
|
||||
|
||||
GroupCountMap map[string]GroupCounts
|
||||
// GroupByName maps group name to group metrics.
|
||||
GroupByName map[string]Group
|
||||
|
||||
GroupCounts struct {
|
||||
// Threads collects metrics for threads in a group sharing a thread name.
|
||||
Threads struct {
|
||||
Name string
|
||||
NumThreads int
|
||||
Counts
|
||||
Procs int
|
||||
Memresident uint64
|
||||
Memvirtual uint64
|
||||
}
|
||||
|
||||
// Group describes the metrics of a single group.
|
||||
Group struct {
|
||||
Counts
|
||||
States
|
||||
Wchans map[string]int
|
||||
Procs int
|
||||
Memory
|
||||
OldestStartTime time.Time
|
||||
OpenFDs uint64
|
||||
WorstFDratio float64
|
||||
NumThreads uint64
|
||||
Threads []Threads
|
||||
}
|
||||
)
|
||||
|
||||
func NewGrouper(trackChildren bool, namer common.MatchNamer) *Grouper {
|
||||
// Returns true if x < y. Test designers should ensure they always have
|
||||
// a unique name/numthreads combination for each group.
|
||||
func lessThreads(x, y Threads) bool { return seq.Compare(x, y) < 0 }
|
||||
|
||||
// NewGrouper creates a grouper.
|
||||
func NewGrouper(namer common.MatchNamer, trackChildren, alwaysRecheck, debug bool) *Grouper {
|
||||
g := Grouper{
|
||||
trackChildren: trackChildren,
|
||||
namer: namer,
|
||||
GroupStats: make(map[string]Counts),
|
||||
tracker: NewTracker(),
|
||||
groupAccum: make(map[string]Counts),
|
||||
threadAccum: make(map[string]map[string]Threads),
|
||||
tracker: NewTracker(namer, trackChildren, alwaysRecheck, debug),
|
||||
debug: debug,
|
||||
}
|
||||
return &g
|
||||
}
|
||||
|
||||
func (g *Grouper) checkAncestry(idinfo ProcIdInfo, newprocs map[ProcId]ProcIdInfo) string {
|
||||
ppid := idinfo.ParentPid
|
||||
pProcId := g.tracker.ProcIds[ppid]
|
||||
if pProcId.Pid < 1 {
|
||||
// Reached root of process tree without finding a tracked parent.
|
||||
g.tracker.Ignore(idinfo.ProcId)
|
||||
return ""
|
||||
}
|
||||
|
||||
// Is the parent already known to the tracker?
|
||||
if ptproc, ok := g.tracker.Tracked[pProcId]; ok {
|
||||
if ptproc != nil {
|
||||
// We've found a tracked parent.
|
||||
g.tracker.Track(ptproc.GroupName, idinfo)
|
||||
return ptproc.GroupName
|
||||
} else {
|
||||
// We've found an untracked parent.
|
||||
g.tracker.Ignore(idinfo.ProcId)
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// Is the parent another new process?
|
||||
if pinfoid, ok := newprocs[pProcId]; ok {
|
||||
if name := g.checkAncestry(pinfoid, newprocs); name != "" {
|
||||
// We've found a tracked parent, which implies this entire lineage should be tracked.
|
||||
g.tracker.Track(name, idinfo)
|
||||
return name
|
||||
}
|
||||
}
|
||||
|
||||
// Parent is dead, i.e. we never saw it, or there's no tracked proc in our ancestry.
|
||||
g.tracker.Ignore(idinfo.ProcId)
|
||||
return ""
|
||||
|
||||
}
|
||||
|
||||
// Update tracks any new procs that should be according to policy, and updates
|
||||
// the metrics for already tracked procs. Permission errors are returned as a
|
||||
// count, and will not affect the error return value.
|
||||
func (g *Grouper) Update(iter ProcIter) (int, error) {
|
||||
newProcs, permErrs, err := g.tracker.Update(iter)
|
||||
if err != nil {
|
||||
return permErrs, err
|
||||
}
|
||||
|
||||
// Step 1: track any new proc that should be tracked based on its name and cmdline.
|
||||
untracked := make(map[ProcId]ProcIdInfo)
|
||||
for _, idinfo := range newProcs {
|
||||
wanted, gname := g.namer.MatchAndName(common.NameAndCmdline{Name: idinfo.Name, Cmdline: idinfo.Cmdline})
|
||||
if !wanted {
|
||||
untracked[idinfo.ProcId] = idinfo
|
||||
continue
|
||||
}
|
||||
|
||||
g.tracker.Track(gname, idinfo)
|
||||
}
|
||||
|
||||
// Step 2: track any untracked new proc that should be tracked because its parent is tracked.
|
||||
if !g.trackChildren {
|
||||
return permErrs, nil
|
||||
}
|
||||
|
||||
for _, idinfo := range untracked {
|
||||
if _, ok := g.tracker.Tracked[idinfo.ProcId]; ok {
|
||||
// Already tracked or ignored
|
||||
continue
|
||||
}
|
||||
|
||||
g.checkAncestry(idinfo, untracked)
|
||||
}
|
||||
return permErrs, nil
|
||||
}
|
||||
|
||||
// groups returns the aggregate metrics for all groups tracked. This reflects
|
||||
// solely what's currently running.
|
||||
func (g *Grouper) groups() GroupCountMap {
|
||||
gcounts := make(GroupCountMap)
|
||||
|
||||
func groupadd(grp Group, ts Update) Group {
|
||||
var zeroTime time.Time
|
||||
for _, tinfo := range g.tracker.Tracked {
|
||||
if tinfo == nil {
|
||||
continue
|
||||
}
|
||||
cur := gcounts[tinfo.GroupName]
|
||||
cur.Procs++
|
||||
tstats := tinfo.GetStats()
|
||||
cur.Memresident += tstats.Memory.Resident
|
||||
cur.Memvirtual += tstats.Memory.Virtual
|
||||
cur.OpenFDs += tstats.Filedesc.Open
|
||||
openratio := float64(tstats.Filedesc.Open) / float64(tstats.Filedesc.Limit)
|
||||
if cur.WorstFDratio < openratio {
|
||||
cur.WorstFDratio = openratio
|
||||
}
|
||||
cur.Counts.Cpu += tstats.latest.Cpu
|
||||
cur.Counts.ReadBytes += tstats.latest.ReadBytes
|
||||
cur.Counts.WriteBytes += tstats.latest.WriteBytes
|
||||
if cur.OldestStartTime == zeroTime || tstats.start.Before(cur.OldestStartTime) {
|
||||
cur.OldestStartTime = tstats.start
|
||||
}
|
||||
gcounts[tinfo.GroupName] = cur
|
||||
|
||||
grp.Procs++
|
||||
grp.Memory.ResidentBytes += ts.Memory.ResidentBytes
|
||||
grp.Memory.VirtualBytes += ts.Memory.VirtualBytes
|
||||
grp.Memory.VmSwapBytes += ts.Memory.VmSwapBytes
|
||||
if ts.Filedesc.Open != -1 {
|
||||
grp.OpenFDs += uint64(ts.Filedesc.Open)
|
||||
}
|
||||
openratio := float64(ts.Filedesc.Open) / float64(ts.Filedesc.Limit)
|
||||
if grp.WorstFDratio < openratio {
|
||||
grp.WorstFDratio = openratio
|
||||
}
|
||||
grp.NumThreads += ts.NumThreads
|
||||
grp.Counts.Add(ts.Latest)
|
||||
grp.States.Add(ts.States)
|
||||
if grp.OldestStartTime == zeroTime || ts.Start.Before(grp.OldestStartTime) {
|
||||
grp.OldestStartTime = ts.Start
|
||||
}
|
||||
|
||||
return gcounts
|
||||
if grp.Wchans == nil {
|
||||
grp.Wchans = make(map[string]int)
|
||||
}
|
||||
for wchan, count := range ts.Wchans {
|
||||
grp.Wchans[wchan] += count
|
||||
}
|
||||
|
||||
return grp
|
||||
}
|
||||
|
||||
// Groups returns GroupCounts with Counts that never decrease in value from one
|
||||
// call to the next. Even if processes exit, their CPU and IO contributions up
|
||||
// to that point are included in the results. Even if no processes remain
|
||||
// in a group it will still be included in the results.
|
||||
func (g *Grouper) Groups() GroupCountMap {
|
||||
groups := g.groups()
|
||||
// Update asks the tracker to report on each tracked process by name.
|
||||
// These are aggregated by groupname, augmented by accumulated counts
|
||||
// from the past, and returned. Note that while the Tracker reports
|
||||
// only what counts have changed since last cycle, Grouper.Update
|
||||
// returns counts that never decrease. Even once the last process
|
||||
// with name X disappears, name X will still appear in the results
|
||||
// with the same counts as before; of course, all non-count metrics
|
||||
// will be zero.
|
||||
func (g *Grouper) Update(iter Iter) (CollectErrors, GroupByName, error) {
|
||||
cerrs, tracked, err := g.tracker.Update(iter)
|
||||
if err != nil {
|
||||
return cerrs, nil, err
|
||||
}
|
||||
return cerrs, g.groups(tracked), nil
|
||||
}
|
||||
|
||||
// First add any accumulated counts to what was just observed,
|
||||
// Translate the updates into a new GroupByName and update internal history.
|
||||
func (g *Grouper) groups(tracked []Update) GroupByName {
|
||||
groups := make(GroupByName)
|
||||
threadsByGroup := make(map[string][]ThreadUpdate)
|
||||
|
||||
for _, update := range tracked {
|
||||
groups[update.GroupName] = groupadd(groups[update.GroupName], update)
|
||||
if update.Threads != nil {
|
||||
threadsByGroup[update.GroupName] =
|
||||
append(threadsByGroup[update.GroupName], update.Threads...)
|
||||
}
|
||||
}
|
||||
|
||||
// Add any accumulated counts to what was just observed,
|
||||
// and update the accumulators.
|
||||
for gname, group := range groups {
|
||||
if oldcounts, ok := g.GroupStats[gname]; ok {
|
||||
group.Counts.Cpu += oldcounts.Cpu
|
||||
group.Counts.ReadBytes += oldcounts.ReadBytes
|
||||
group.Counts.WriteBytes += oldcounts.WriteBytes
|
||||
if oldcounts, ok := g.groupAccum[gname]; ok {
|
||||
group.Counts.Add(Delta(oldcounts))
|
||||
}
|
||||
g.GroupStats[gname] = group.Counts
|
||||
g.groupAccum[gname] = group.Counts
|
||||
group.Threads = g.threads(gname, threadsByGroup[gname])
|
||||
groups[gname] = group
|
||||
}
|
||||
|
||||
// Now add any groups that were observed in the past but aren't running now.
|
||||
for gname, gcounts := range g.GroupStats {
|
||||
for gname, gcounts := range g.groupAccum {
|
||||
if _, ok := groups[gname]; !ok {
|
||||
groups[gname] = GroupCounts{Counts: gcounts}
|
||||
groups[gname] = Group{Counts: gcounts}
|
||||
}
|
||||
}
|
||||
|
||||
return groups
|
||||
}
|
||||
|
||||
func (g *Grouper) threads(gname string, tracked []ThreadUpdate) []Threads {
|
||||
if len(tracked) == 0 {
|
||||
delete(g.threadAccum, gname)
|
||||
return nil
|
||||
}
|
||||
|
||||
ret := make([]Threads, 0, len(tracked))
|
||||
threads := make(map[string]Threads)
|
||||
|
||||
// First aggregate the thread metrics by thread name.
|
||||
for _, nc := range tracked {
|
||||
curthr := threads[nc.ThreadName]
|
||||
curthr.NumThreads++
|
||||
curthr.Counts.Add(nc.Latest)
|
||||
curthr.Name = nc.ThreadName
|
||||
threads[nc.ThreadName] = curthr
|
||||
}
|
||||
|
||||
// Add any accumulated counts to what was just observed,
|
||||
// and update the accumulators.
|
||||
if history := g.threadAccum[gname]; history != nil {
|
||||
for tname := range threads {
|
||||
if oldcounts, ok := history[tname]; ok {
|
||||
counts := threads[tname]
|
||||
counts.Add(Delta(oldcounts.Counts))
|
||||
threads[tname] = counts
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
g.threadAccum[gname] = threads
|
||||
|
||||
for _, thr := range threads {
|
||||
ret = append(ret, thr)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
|
|
|||
539
vendor/github.com/ncabatoff/process-exporter/proc/read.go
generated
vendored
539
vendor/github.com/ncabatoff/process-exporter/proc/read.go
generated
vendored
|
|
@ -2,18 +2,21 @@ package proc
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/procfs"
|
||||
"github.com/ncabatoff/procfs"
|
||||
)
|
||||
|
||||
func newProcIdStatic(pid, ppid int, startTime uint64, name string, cmdline []string) ProcIdStatic {
|
||||
return ProcIdStatic{ProcId{pid, startTime}, ProcStatic{name, cmdline, ppid, time.Time{}}}
|
||||
}
|
||||
// ErrProcNotExist indicates a process couldn't be read because it doesn't exist,
|
||||
// typically because it disappeared while we were reading it.
|
||||
var ErrProcNotExist = fmt.Errorf("process does not exist")
|
||||
|
||||
type (
|
||||
// ProcId uniquely identifies a process.
|
||||
ProcId struct {
|
||||
// ID uniquely identifies a process.
|
||||
ID struct {
|
||||
// UNIX process id
|
||||
Pid int
|
||||
// The time the process started after system boot, the value is expressed
|
||||
|
|
@ -21,82 +24,138 @@ type (
|
|||
StartTimeRel uint64
|
||||
}
|
||||
|
||||
// ProcStatic contains data read from /proc/pid/*
|
||||
ProcStatic struct {
|
||||
Name string
|
||||
Cmdline []string
|
||||
ParentPid int
|
||||
StartTime time.Time
|
||||
ThreadID ID
|
||||
|
||||
// Static contains data read from /proc/pid/*
|
||||
Static struct {
|
||||
Name string
|
||||
Cmdline []string
|
||||
ParentPid int
|
||||
StartTime time.Time
|
||||
EffectiveUID int
|
||||
}
|
||||
|
||||
// ProcMetrics contains data read from /proc/pid/*
|
||||
ProcMetrics struct {
|
||||
CpuTime float64
|
||||
ReadBytes uint64
|
||||
WriteBytes uint64
|
||||
// Counts are metric counters common to threads and processes and groups.
|
||||
Counts struct {
|
||||
CPUUserTime float64
|
||||
CPUSystemTime float64
|
||||
ReadBytes uint64
|
||||
WriteBytes uint64
|
||||
MajorPageFaults uint64
|
||||
MinorPageFaults uint64
|
||||
CtxSwitchVoluntary uint64
|
||||
CtxSwitchNonvoluntary uint64
|
||||
}
|
||||
|
||||
// Memory describes a proc's memory usage.
|
||||
Memory struct {
|
||||
ResidentBytes uint64
|
||||
VirtualBytes uint64
|
||||
OpenFDs uint64
|
||||
MaxFDs uint64
|
||||
VmSwapBytes uint64
|
||||
}
|
||||
|
||||
ProcIdStatic struct {
|
||||
ProcId
|
||||
ProcStatic
|
||||
// Filedesc describes a proc's file descriptor usage and soft limit.
|
||||
Filedesc struct {
|
||||
// Open is the count of open file descriptors, -1 if unknown.
|
||||
Open int64
|
||||
// Limit is the fd soft limit for the process.
|
||||
Limit uint64
|
||||
}
|
||||
|
||||
ProcInfo struct {
|
||||
ProcStatic
|
||||
ProcMetrics
|
||||
// States counts how many threads are in each state.
|
||||
States struct {
|
||||
Running int
|
||||
Sleeping int
|
||||
Waiting int
|
||||
Zombie int
|
||||
Other int
|
||||
}
|
||||
|
||||
ProcIdInfo struct {
|
||||
ProcId
|
||||
ProcStatic
|
||||
ProcMetrics
|
||||
// Metrics contains data read from /proc/pid/*
|
||||
Metrics struct {
|
||||
Counts
|
||||
Memory
|
||||
Filedesc
|
||||
NumThreads uint64
|
||||
States
|
||||
Wchan string
|
||||
}
|
||||
|
||||
// Thread contains per-thread data.
|
||||
Thread struct {
|
||||
ThreadID
|
||||
ThreadName string
|
||||
Counts
|
||||
Wchan string
|
||||
States
|
||||
}
|
||||
|
||||
// IDInfo groups all info for a single process.
|
||||
IDInfo struct {
|
||||
ID
|
||||
Static
|
||||
Metrics
|
||||
Threads []Thread
|
||||
}
|
||||
|
||||
// ProcIdInfoThreads struct {
|
||||
// ProcIdInfo
|
||||
// Threads []ProcThread
|
||||
// }
|
||||
|
||||
// Proc wraps the details of the underlying procfs-reading library.
|
||||
// Any of these methods may fail if the process has disapeared.
|
||||
// We try to return as much as possible rather than an error, e.g.
|
||||
// if some /proc files are unreadable.
|
||||
Proc interface {
|
||||
// GetPid() returns the POSIX PID (process id). They may be reused over time.
|
||||
GetPid() int
|
||||
// GetProcId() returns (pid,starttime), which can be considered a unique process id.
|
||||
// It may fail if the caller doesn't have permission to read /proc/<pid>/stat, or if
|
||||
// the process has disapeared.
|
||||
GetProcId() (ProcId, error)
|
||||
// GetProcID() returns (pid,starttime), which can be considered a unique process id.
|
||||
GetProcID() (ID, error)
|
||||
// GetStatic() returns various details read from files under /proc/<pid>/. Technically
|
||||
// name may not be static, but we'll pretend it is.
|
||||
// It may fail if the caller doesn't have permission to read those files, or if
|
||||
// the process has disapeared.
|
||||
GetStatic() (ProcStatic, error)
|
||||
GetStatic() (Static, error)
|
||||
// GetMetrics() returns various metrics read from files under /proc/<pid>/.
|
||||
// It may fail if the caller doesn't have permission to read those files, or if
|
||||
// the process has disapeared.
|
||||
GetMetrics() (ProcMetrics, error)
|
||||
// It returns an error on complete failure. Otherwise, it returns metrics
|
||||
// and 0 on complete success, 1 if some (like I/O) couldn't be read.
|
||||
GetMetrics() (Metrics, int, error)
|
||||
GetStates() (States, error)
|
||||
GetWchan() (string, error)
|
||||
GetCounts() (Counts, int, error)
|
||||
GetThreads() ([]Thread, error)
|
||||
}
|
||||
|
||||
// proc is a wrapper for procfs.Proc that caches results of some reads and implements Proc.
|
||||
proc struct {
|
||||
// proccache implements the Proc interface by acting as wrapper for procfs.Proc
|
||||
// that caches results of some reads.
|
||||
proccache struct {
|
||||
procfs.Proc
|
||||
procid *ProcId
|
||||
stat *procfs.ProcStat
|
||||
cmdline []string
|
||||
io *procfs.ProcIO
|
||||
bootTime uint64
|
||||
procid *ID
|
||||
stat *procfs.ProcStat
|
||||
status *procfs.ProcStatus
|
||||
cmdline []string
|
||||
io *procfs.ProcIO
|
||||
fs *FS
|
||||
wchan *string
|
||||
}
|
||||
|
||||
proc struct {
|
||||
proccache
|
||||
}
|
||||
|
||||
// procs is a fancier []Proc that saves on some copying.
|
||||
procs interface {
|
||||
get(int) Proc
|
||||
length() int
|
||||
}
|
||||
|
||||
// procfsprocs implements procs using procfs.
|
||||
procfsprocs struct {
|
||||
Procs []procfs.Proc
|
||||
bootTime uint64
|
||||
Procs []procfs.Proc
|
||||
fs *FS
|
||||
}
|
||||
|
||||
// ProcIter is an iterator over a sequence of procs.
|
||||
ProcIter interface {
|
||||
// Iter is an iterator over a sequence of procs.
|
||||
Iter interface {
|
||||
// Next returns true if the iterator is not exhausted.
|
||||
Next() bool
|
||||
// Close releases any resources the iterator uses.
|
||||
|
|
@ -105,7 +164,7 @@ type (
|
|||
Proc
|
||||
}
|
||||
|
||||
// procIterator implements the ProcIter interface using procfs.
|
||||
// procIterator implements the Iter interface
|
||||
procIterator struct {
|
||||
// procs is the list of Proc we're iterating over.
|
||||
procs
|
||||
|
|
@ -119,66 +178,101 @@ type (
|
|||
Proc
|
||||
}
|
||||
|
||||
procIdInfos []ProcIdInfo
|
||||
// Source is a source of procs.
|
||||
Source interface {
|
||||
// AllProcs returns all the processes in this source at this moment in time.
|
||||
AllProcs() Iter
|
||||
}
|
||||
|
||||
// FS implements Source.
|
||||
FS struct {
|
||||
procfs.FS
|
||||
BootTime uint64
|
||||
MountPoint string
|
||||
debug bool
|
||||
}
|
||||
)
|
||||
|
||||
func procInfoIter(ps ...ProcIdInfo) ProcIter {
|
||||
return &procIterator{procs: procIdInfos(ps), idx: -1}
|
||||
func (ii IDInfo) String() string {
|
||||
return fmt.Sprintf("%+v:%+v", ii.ID, ii.Static)
|
||||
}
|
||||
|
||||
func Info(p Proc) (ProcIdInfo, error) {
|
||||
id, err := p.GetProcId()
|
||||
if err != nil {
|
||||
return ProcIdInfo{}, err
|
||||
}
|
||||
static, err := p.GetStatic()
|
||||
if err != nil {
|
||||
return ProcIdInfo{}, err
|
||||
}
|
||||
metrics, err := p.GetMetrics()
|
||||
if err != nil {
|
||||
return ProcIdInfo{}, err
|
||||
}
|
||||
return ProcIdInfo{id, static, metrics}, nil
|
||||
// Add adds c2 to the counts.
|
||||
func (c *Counts) Add(c2 Delta) {
|
||||
c.CPUUserTime += c2.CPUUserTime
|
||||
c.CPUSystemTime += c2.CPUSystemTime
|
||||
c.ReadBytes += c2.ReadBytes
|
||||
c.WriteBytes += c2.WriteBytes
|
||||
c.MajorPageFaults += c2.MajorPageFaults
|
||||
c.MinorPageFaults += c2.MinorPageFaults
|
||||
c.CtxSwitchVoluntary += c2.CtxSwitchVoluntary
|
||||
c.CtxSwitchNonvoluntary += c2.CtxSwitchNonvoluntary
|
||||
}
|
||||
|
||||
func (p procIdInfos) get(i int) Proc {
|
||||
return &p[i]
|
||||
// Sub subtracts c2 from the counts.
|
||||
func (c Counts) Sub(c2 Counts) Delta {
|
||||
c.CPUUserTime -= c2.CPUUserTime
|
||||
c.CPUSystemTime -= c2.CPUSystemTime
|
||||
c.ReadBytes -= c2.ReadBytes
|
||||
c.WriteBytes -= c2.WriteBytes
|
||||
c.MajorPageFaults -= c2.MajorPageFaults
|
||||
c.MinorPageFaults -= c2.MinorPageFaults
|
||||
c.CtxSwitchVoluntary -= c2.CtxSwitchVoluntary
|
||||
c.CtxSwitchNonvoluntary -= c2.CtxSwitchNonvoluntary
|
||||
return Delta(c)
|
||||
}
|
||||
|
||||
func (p procIdInfos) length() int {
|
||||
return len(p)
|
||||
func (s *States) Add(s2 States) {
|
||||
s.Other += s2.Other
|
||||
s.Running += s2.Running
|
||||
s.Sleeping += s2.Sleeping
|
||||
s.Waiting += s2.Waiting
|
||||
s.Zombie += s2.Zombie
|
||||
}
|
||||
|
||||
func (p ProcIdInfo) GetPid() int {
|
||||
return p.ProcId.Pid
|
||||
func (p IDInfo) GetThreads() ([]Thread, error) {
|
||||
return p.Threads, nil
|
||||
}
|
||||
|
||||
func (p ProcIdInfo) GetProcId() (ProcId, error) {
|
||||
return p.ProcId, nil
|
||||
// GetPid implements Proc.
|
||||
func (p IDInfo) GetPid() int {
|
||||
return p.ID.Pid
|
||||
}
|
||||
|
||||
func (p ProcIdInfo) GetStatic() (ProcStatic, error) {
|
||||
return p.ProcStatic, nil
|
||||
// GetProcID implements Proc.
|
||||
func (p IDInfo) GetProcID() (ID, error) {
|
||||
return p.ID, nil
|
||||
}
|
||||
|
||||
func (p ProcIdInfo) GetMetrics() (ProcMetrics, error) {
|
||||
return p.ProcMetrics, nil
|
||||
// GetStatic implements Proc.
|
||||
func (p IDInfo) GetStatic() (Static, error) {
|
||||
return p.Static, nil
|
||||
}
|
||||
|
||||
func (p procfsprocs) get(i int) Proc {
|
||||
return &proc{Proc: p.Procs[i], bootTime: p.bootTime}
|
||||
// GetCounts implements Proc.
|
||||
func (p IDInfo) GetCounts() (Counts, int, error) {
|
||||
return p.Metrics.Counts, 0, nil
|
||||
}
|
||||
|
||||
func (p procfsprocs) length() int {
|
||||
return len(p.Procs)
|
||||
// GetMetrics implements Proc.
|
||||
func (p IDInfo) GetMetrics() (Metrics, int, error) {
|
||||
return p.Metrics, 0, nil
|
||||
}
|
||||
|
||||
func (p *proc) GetPid() int {
|
||||
// GetStates implements Proc.
|
||||
func (p IDInfo) GetStates() (States, error) {
|
||||
return p.States, nil
|
||||
}
|
||||
|
||||
func (p IDInfo) GetWchan() (string, error) {
|
||||
return p.Wchan, nil
|
||||
}
|
||||
|
||||
func (p *proccache) GetPid() int {
|
||||
return p.Proc.PID
|
||||
}
|
||||
|
||||
func (p *proc) GetStat() (procfs.ProcStat, error) {
|
||||
func (p *proccache) getStat() (procfs.ProcStat, error) {
|
||||
if p.stat == nil {
|
||||
stat, err := p.Proc.NewStat()
|
||||
if err != nil {
|
||||
|
|
@ -190,19 +284,32 @@ func (p *proc) GetStat() (procfs.ProcStat, error) {
|
|||
return *p.stat, nil
|
||||
}
|
||||
|
||||
func (p *proc) GetProcId() (ProcId, error) {
|
||||
if p.procid == nil {
|
||||
stat, err := p.GetStat()
|
||||
func (p *proccache) getStatus() (procfs.ProcStatus, error) {
|
||||
if p.status == nil {
|
||||
status, err := p.Proc.NewStatus()
|
||||
if err != nil {
|
||||
return ProcId{}, err
|
||||
return procfs.ProcStatus{}, err
|
||||
}
|
||||
p.procid = &ProcId{Pid: p.GetPid(), StartTimeRel: stat.Starttime}
|
||||
p.status = &status
|
||||
}
|
||||
|
||||
return *p.status, nil
|
||||
}
|
||||
|
||||
// GetProcID implements Proc.
|
||||
func (p *proccache) GetProcID() (ID, error) {
|
||||
if p.procid == nil {
|
||||
stat, err := p.getStat()
|
||||
if err != nil {
|
||||
return ID{}, err
|
||||
}
|
||||
p.procid = &ID{Pid: p.GetPid(), StartTimeRel: stat.Starttime}
|
||||
}
|
||||
|
||||
return *p.procid, nil
|
||||
}
|
||||
|
||||
func (p *proc) GetCmdLine() ([]string, error) {
|
||||
func (p *proccache) getCmdLine() ([]string, error) {
|
||||
if p.cmdline == nil {
|
||||
cmdline, err := p.Proc.CmdLine()
|
||||
if err != nil {
|
||||
|
|
@ -213,7 +320,18 @@ func (p *proc) GetCmdLine() ([]string, error) {
|
|||
return p.cmdline, nil
|
||||
}
|
||||
|
||||
func (p *proc) GetIo() (procfs.ProcIO, error) {
|
||||
func (p *proccache) getWchan() (string, error) {
|
||||
if p.wchan == nil {
|
||||
wchan, err := p.Proc.Wchan()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
p.wchan = &wchan
|
||||
}
|
||||
return *p.wchan, nil
|
||||
}
|
||||
|
||||
func (p *proccache) getIo() (procfs.ProcIO, error) {
|
||||
if p.io == nil {
|
||||
io, err := p.Proc.NewIO()
|
||||
if err != nil {
|
||||
|
|
@ -224,56 +342,199 @@ func (p *proc) GetIo() (procfs.ProcIO, error) {
|
|||
return *p.io, nil
|
||||
}
|
||||
|
||||
func (p proc) GetStatic() (ProcStatic, error) {
|
||||
cmdline, err := p.GetCmdLine()
|
||||
// GetStatic returns the ProcStatic corresponding to this proc.
|
||||
func (p *proccache) GetStatic() (Static, error) {
|
||||
// /proc/<pid>/cmdline is normally world-readable.
|
||||
cmdline, err := p.getCmdLine()
|
||||
if err != nil {
|
||||
return ProcStatic{}, err
|
||||
return Static{}, err
|
||||
}
|
||||
stat, err := p.GetStat()
|
||||
|
||||
// /proc/<pid>/stat is normally world-readable.
|
||||
stat, err := p.getStat()
|
||||
if err != nil {
|
||||
return ProcStatic{}, err
|
||||
return Static{}, err
|
||||
}
|
||||
startTime := time.Unix(int64(p.bootTime), 0)
|
||||
startTime := time.Unix(int64(p.fs.BootTime), 0).UTC()
|
||||
startTime = startTime.Add(time.Second / userHZ * time.Duration(stat.Starttime))
|
||||
return ProcStatic{
|
||||
Name: stat.Comm,
|
||||
Cmdline: cmdline,
|
||||
ParentPid: stat.PPID,
|
||||
StartTime: startTime,
|
||||
|
||||
// /proc/<pid>/status is normally world-readable.
|
||||
status, err := p.getStatus()
|
||||
if err != nil {
|
||||
return Static{}, err
|
||||
}
|
||||
|
||||
return Static{
|
||||
Name: stat.Comm,
|
||||
Cmdline: cmdline,
|
||||
ParentPid: stat.PPID,
|
||||
StartTime: startTime,
|
||||
EffectiveUID: status.UIDEffective,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p proc) GetMetrics() (ProcMetrics, error) {
|
||||
io, err := p.GetIo()
|
||||
func (p proc) GetCounts() (Counts, int, error) {
|
||||
stat, err := p.getStat()
|
||||
if err != nil {
|
||||
return ProcMetrics{}, err
|
||||
if err == os.ErrNotExist {
|
||||
err = ErrProcNotExist
|
||||
}
|
||||
return Counts{}, 0, err
|
||||
}
|
||||
stat, err := p.GetStat()
|
||||
|
||||
status, err := p.getStatus()
|
||||
if err != nil {
|
||||
return ProcMetrics{}, err
|
||||
if err == os.ErrNotExist {
|
||||
err = ErrProcNotExist
|
||||
}
|
||||
return Counts{}, 0, err
|
||||
}
|
||||
|
||||
io, err := p.getIo()
|
||||
softerrors := 0
|
||||
if err != nil {
|
||||
softerrors++
|
||||
}
|
||||
return Counts{
|
||||
CPUUserTime: float64(stat.UTime) / userHZ,
|
||||
CPUSystemTime: float64(stat.STime) / userHZ,
|
||||
ReadBytes: io.ReadBytes,
|
||||
WriteBytes: io.WriteBytes,
|
||||
MajorPageFaults: uint64(stat.MajFlt),
|
||||
MinorPageFaults: uint64(stat.MinFlt),
|
||||
CtxSwitchVoluntary: uint64(status.VoluntaryCtxtSwitches),
|
||||
CtxSwitchNonvoluntary: uint64(status.NonvoluntaryCtxtSwitches),
|
||||
}, softerrors, nil
|
||||
}
|
||||
|
||||
func (p proc) GetWchan() (string, error) {
|
||||
return p.getWchan()
|
||||
}
|
||||
|
||||
func (p proc) GetStates() (States, error) {
|
||||
stat, err := p.getStat()
|
||||
if err != nil {
|
||||
return States{}, err
|
||||
}
|
||||
|
||||
var s States
|
||||
switch stat.State {
|
||||
case "R":
|
||||
s.Running++
|
||||
case "S":
|
||||
s.Sleeping++
|
||||
case "D":
|
||||
s.Waiting++
|
||||
case "Z":
|
||||
s.Zombie++
|
||||
default:
|
||||
s.Other++
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// GetMetrics returns the current metrics for the proc. The results are
|
||||
// not cached.
|
||||
func (p proc) GetMetrics() (Metrics, int, error) {
|
||||
counts, softerrors, err := p.GetCounts()
|
||||
if err != nil {
|
||||
return Metrics{}, 0, err
|
||||
}
|
||||
|
||||
// We don't need to check for error here because p will have cached
|
||||
// the successful result of calling getStat in GetCounts.
|
||||
// Since GetMetrics isn't a pointer receiver method, our callers
|
||||
// won't see the effect of the caching between calls.
|
||||
stat, _ := p.getStat()
|
||||
|
||||
// Ditto for states
|
||||
states, _ := p.GetStates()
|
||||
|
||||
status, err := p.getStatus()
|
||||
if err != nil {
|
||||
return Metrics{}, 0, err
|
||||
}
|
||||
|
||||
numfds, err := p.Proc.FileDescriptorsLen()
|
||||
if err != nil {
|
||||
return ProcMetrics{}, err
|
||||
numfds = -1
|
||||
softerrors |= 1
|
||||
}
|
||||
limits, err := p.NewLimits()
|
||||
|
||||
limits, err := p.Proc.NewLimits()
|
||||
if err != nil {
|
||||
return ProcMetrics{}, err
|
||||
return Metrics{}, 0, err
|
||||
}
|
||||
return ProcMetrics{
|
||||
CpuTime: stat.CPUTime(),
|
||||
ReadBytes: io.ReadBytes,
|
||||
WriteBytes: io.WriteBytes,
|
||||
ResidentBytes: uint64(stat.ResidentMemory()),
|
||||
VirtualBytes: uint64(stat.VirtualMemory()),
|
||||
OpenFDs: uint64(numfds),
|
||||
MaxFDs: uint64(limits.OpenFiles),
|
||||
}, nil
|
||||
|
||||
wchan, err := p.getWchan()
|
||||
if err != nil {
|
||||
softerrors |= 1
|
||||
}
|
||||
|
||||
return Metrics{
|
||||
Counts: counts,
|
||||
Memory: Memory{
|
||||
ResidentBytes: uint64(stat.ResidentMemory()),
|
||||
VirtualBytes: uint64(stat.VirtualMemory()),
|
||||
VmSwapBytes: uint64(status.VmSwapKB * 1024),
|
||||
},
|
||||
Filedesc: Filedesc{
|
||||
Open: int64(numfds),
|
||||
Limit: uint64(limits.OpenFiles),
|
||||
},
|
||||
NumThreads: uint64(stat.NumThreads),
|
||||
States: states,
|
||||
Wchan: wchan,
|
||||
}, softerrors, nil
|
||||
}
|
||||
|
||||
type FS struct {
|
||||
procfs.FS
|
||||
BootTime uint64
|
||||
func (p proc) GetThreads() ([]Thread, error) {
|
||||
fs, err := p.fs.threadFs(p.PID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
threads := []Thread{}
|
||||
iter := fs.AllProcs()
|
||||
for iter.Next() {
|
||||
var id ID
|
||||
id, err = iter.GetProcID()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var static Static
|
||||
static, err = iter.GetStatic()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var counts Counts
|
||||
counts, _, err = iter.GetCounts()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
wchan, _ := iter.GetWchan()
|
||||
states, _ := iter.GetStates()
|
||||
|
||||
threads = append(threads, Thread{
|
||||
ThreadID: ThreadID(id),
|
||||
ThreadName: static.Name,
|
||||
Counts: counts,
|
||||
Wchan: wchan,
|
||||
States: states,
|
||||
})
|
||||
}
|
||||
err = iter.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(threads) < 2 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return threads, nil
|
||||
}
|
||||
|
||||
// See https://github.com/prometheus/procfs/blob/master/proc_stat.go for details on userHZ.
|
||||
|
|
@ -281,7 +542,7 @@ const userHZ = 100
|
|||
|
||||
// NewFS returns a new FS mounted under the given mountPoint. It will error
|
||||
// if the mount point can't be read.
|
||||
func NewFS(mountPoint string) (*FS, error) {
|
||||
func NewFS(mountPoint string, debug bool) (*FS, error) {
|
||||
fs, err := procfs.NewFS(mountPoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -290,17 +551,38 @@ func NewFS(mountPoint string) (*FS, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FS{fs, stat.BootTime}, nil
|
||||
return &FS{fs, stat.BootTime, mountPoint, debug}, nil
|
||||
}
|
||||
|
||||
func (fs *FS) AllProcs() ProcIter {
|
||||
func (fs *FS) threadFs(pid int) (*FS, error) {
|
||||
mountPoint := filepath.Join(fs.MountPoint, strconv.Itoa(pid), "task")
|
||||
tfs, err := procfs.NewFS(mountPoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FS{tfs, fs.BootTime, mountPoint, false}, nil
|
||||
}
|
||||
|
||||
// AllProcs implements Source.
|
||||
func (fs *FS) AllProcs() Iter {
|
||||
procs, err := fs.FS.AllProcs()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Error reading procs: %v", err)
|
||||
}
|
||||
return &procIterator{procs: procfsprocs{procs, fs.BootTime}, err: err, idx: -1}
|
||||
return &procIterator{procs: procfsprocs{procs, fs}, err: err, idx: -1}
|
||||
}
|
||||
|
||||
// get implements procs.
|
||||
func (p procfsprocs) get(i int) Proc {
|
||||
return &proc{proccache{Proc: p.Procs[i], fs: p.fs}}
|
||||
}
|
||||
|
||||
// length implements procs.
|
||||
func (p procfsprocs) length() int {
|
||||
return len(p.Procs)
|
||||
}
|
||||
|
||||
// Next implements Iter.
|
||||
func (pi *procIterator) Next() bool {
|
||||
pi.idx++
|
||||
if pi.idx < pi.procs.length() {
|
||||
|
|
@ -311,6 +593,7 @@ func (pi *procIterator) Next() bool {
|
|||
return pi.idx < pi.procs.length()
|
||||
}
|
||||
|
||||
// Close implements Iter.
|
||||
func (pi *procIterator) Close() error {
|
||||
pi.Next()
|
||||
pi.procs = nil
|
||||
|
|
|
|||
499
vendor/github.com/ncabatoff/process-exporter/proc/tracker.go
generated
vendored
499
vendor/github.com/ncabatoff/process-exporter/proc/tracker.go
generated
vendored
|
|
@ -2,179 +2,432 @@ package proc
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"log"
|
||||
"os/user"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
seq "github.com/ncabatoff/go-seq/seq"
|
||||
common "github.com/ncabatoff/process-exporter"
|
||||
)
|
||||
|
||||
type (
|
||||
Counts struct {
|
||||
Cpu float64
|
||||
ReadBytes uint64
|
||||
WriteBytes uint64
|
||||
}
|
||||
|
||||
Memory struct {
|
||||
Resident uint64
|
||||
Virtual uint64
|
||||
}
|
||||
|
||||
Filedesc struct {
|
||||
Open uint64
|
||||
Limit uint64
|
||||
}
|
||||
|
||||
// Tracker tracks processes and records metrics.
|
||||
Tracker struct {
|
||||
// Tracked holds the processes are being monitored. Processes
|
||||
// namer determines what processes to track and names them
|
||||
namer common.MatchNamer
|
||||
// tracked holds the processes are being monitored. Processes
|
||||
// may be blacklisted such that they no longer get tracked by
|
||||
// setting their value in the Tracked map to nil.
|
||||
Tracked map[ProcId]*TrackedProc
|
||||
// ProcIds is a map from pid to ProcId. This is a convenience
|
||||
// setting their value in the tracked map to nil.
|
||||
tracked map[ID]*trackedProc
|
||||
// procIds is a map from pid to ProcId. This is a convenience
|
||||
// to allow finding the Tracked entry of a parent process.
|
||||
ProcIds map[int]ProcId
|
||||
procIds map[int]ID
|
||||
// trackChildren makes Tracker track descendants of procs the
|
||||
// namer wanted tracked.
|
||||
trackChildren bool
|
||||
// never ignore processes, i.e. always re-check untracked processes in case comm has changed
|
||||
alwaysRecheck bool
|
||||
username map[int]string
|
||||
debug bool
|
||||
}
|
||||
|
||||
// TrackedProc accumulates metrics for a process, as well as
|
||||
// Delta is an alias of Counts used to signal that its contents are not
|
||||
// totals, but rather the result of subtracting two totals.
|
||||
Delta Counts
|
||||
|
||||
trackedThread struct {
|
||||
name string
|
||||
accum Counts
|
||||
latest Delta
|
||||
lastUpdate time.Time
|
||||
wchan string
|
||||
}
|
||||
|
||||
// trackedProc accumulates metrics for a process, as well as
|
||||
// remembering an optional GroupName tag associated with it.
|
||||
TrackedProc struct {
|
||||
trackedProc struct {
|
||||
// lastUpdate is used internally during the update cycle to find which procs have exited
|
||||
lastUpdate time.Time
|
||||
// info is the most recently obtained info for this proc
|
||||
info ProcInfo
|
||||
// accum is the total CPU and IO accrued since we started tracking this proc
|
||||
accum Counts
|
||||
// lastaccum is the CPU and IO accrued in the last Update()
|
||||
lastaccum Counts
|
||||
// GroupName is an optional tag for this proc.
|
||||
GroupName string
|
||||
// static
|
||||
static Static
|
||||
metrics Metrics
|
||||
// lastaccum is the increment to the counters seen in the last update.
|
||||
lastaccum Delta
|
||||
// groupName is the tag for this proc given by the namer.
|
||||
groupName string
|
||||
threads map[ThreadID]trackedThread
|
||||
}
|
||||
|
||||
trackedStats struct {
|
||||
aggregate, latest Counts
|
||||
// ThreadUpdate describes what's changed for a thread since the last cycle.
|
||||
ThreadUpdate struct {
|
||||
// ThreadName is the name of the thread based on field of stat.
|
||||
ThreadName string
|
||||
// Latest is how much the counts increased since last cycle.
|
||||
Latest Delta
|
||||
}
|
||||
|
||||
// Update reports on the latest stats for a process.
|
||||
Update struct {
|
||||
// GroupName is the name given by the namer to the process.
|
||||
GroupName string
|
||||
// Latest is how much the counts increased since last cycle.
|
||||
Latest Delta
|
||||
// Memory is the current memory usage.
|
||||
Memory
|
||||
// Filedesc is the current fd usage/limit.
|
||||
Filedesc
|
||||
start time.Time
|
||||
// Start is the time the process started.
|
||||
Start time.Time
|
||||
// NumThreads is the number of threads.
|
||||
NumThreads uint64
|
||||
// States is how many processes are in which run state.
|
||||
States
|
||||
// Wchans is how many threads are in each non-zero wchan.
|
||||
Wchans map[string]int
|
||||
// Threads are the thread updates for this process.
|
||||
Threads []ThreadUpdate
|
||||
}
|
||||
|
||||
// CollectErrors describes non-fatal errors found while collecting proc
|
||||
// metrics.
|
||||
CollectErrors struct {
|
||||
// Read is incremented every time GetMetrics() returns an error.
|
||||
// This means we failed to load even the basics for the process,
|
||||
// and not just because it disappeared on us.
|
||||
Read int
|
||||
// Partial is incremented every time we're unable to collect
|
||||
// some metrics (e.g. I/O) for a tracked proc, but we're still able
|
||||
// to get the basic stuff like cmdline and core stats.
|
||||
Partial int
|
||||
}
|
||||
)
|
||||
|
||||
func (tp *TrackedProc) GetName() string {
|
||||
return tp.info.Name
|
||||
func lessUpdateGroupName(x, y Update) bool { return x.GroupName < y.GroupName }
|
||||
|
||||
func lessThreadUpdate(x, y ThreadUpdate) bool { return seq.Compare(x, y) < 0 }
|
||||
|
||||
func lessCounts(x, y Counts) bool { return seq.Compare(x, y) < 0 }
|
||||
|
||||
func (tp *trackedProc) getUpdate() Update {
|
||||
u := Update{
|
||||
GroupName: tp.groupName,
|
||||
Latest: tp.lastaccum,
|
||||
Memory: tp.metrics.Memory,
|
||||
Filedesc: tp.metrics.Filedesc,
|
||||
Start: tp.static.StartTime,
|
||||
NumThreads: tp.metrics.NumThreads,
|
||||
States: tp.metrics.States,
|
||||
Wchans: make(map[string]int),
|
||||
}
|
||||
if tp.metrics.Wchan != "" {
|
||||
u.Wchans[tp.metrics.Wchan] = 1
|
||||
}
|
||||
if len(tp.threads) > 1 {
|
||||
for _, tt := range tp.threads {
|
||||
u.Threads = append(u.Threads, ThreadUpdate{tt.name, tt.latest})
|
||||
if tt.wchan != "" {
|
||||
u.Wchans[tt.wchan]++
|
||||
}
|
||||
}
|
||||
}
|
||||
return u
|
||||
}
|
||||
|
||||
func (tp *TrackedProc) GetCmdLine() []string {
|
||||
return tp.info.Cmdline
|
||||
}
|
||||
|
||||
func (tp *TrackedProc) GetStats() trackedStats {
|
||||
mem := Memory{Resident: tp.info.ResidentBytes, Virtual: tp.info.VirtualBytes}
|
||||
fd := Filedesc{Open: tp.info.OpenFDs, Limit: tp.info.MaxFDs}
|
||||
return trackedStats{
|
||||
aggregate: tp.accum,
|
||||
latest: tp.lastaccum,
|
||||
Memory: mem,
|
||||
Filedesc: fd,
|
||||
start: tp.info.StartTime,
|
||||
// NewTracker creates a Tracker.
|
||||
func NewTracker(namer common.MatchNamer, trackChildren, alwaysRecheck, debug bool) *Tracker {
|
||||
return &Tracker{
|
||||
namer: namer,
|
||||
tracked: make(map[ID]*trackedProc),
|
||||
procIds: make(map[int]ID),
|
||||
trackChildren: trackChildren,
|
||||
alwaysRecheck: alwaysRecheck,
|
||||
username: make(map[int]string),
|
||||
debug: debug,
|
||||
}
|
||||
}
|
||||
|
||||
func NewTracker() *Tracker {
|
||||
return &Tracker{Tracked: make(map[ProcId]*TrackedProc), ProcIds: make(map[int]ProcId)}
|
||||
func (t *Tracker) track(groupName string, idinfo IDInfo) {
|
||||
tproc := trackedProc{
|
||||
groupName: groupName,
|
||||
static: idinfo.Static,
|
||||
metrics: idinfo.Metrics,
|
||||
}
|
||||
if len(idinfo.Threads) > 0 {
|
||||
tproc.threads = make(map[ThreadID]trackedThread)
|
||||
for _, thr := range idinfo.Threads {
|
||||
tproc.threads[thr.ThreadID] = trackedThread{
|
||||
thr.ThreadName, thr.Counts, Delta{}, time.Time{}, thr.Wchan}
|
||||
}
|
||||
}
|
||||
t.tracked[idinfo.ID] = &tproc
|
||||
}
|
||||
|
||||
func (t *Tracker) Track(groupName string, idinfo ProcIdInfo) {
|
||||
info := ProcInfo{idinfo.ProcStatic, idinfo.ProcMetrics}
|
||||
t.Tracked[idinfo.ProcId] = &TrackedProc{GroupName: groupName, info: info}
|
||||
func (t *Tracker) ignore(id ID) {
|
||||
// only ignore ID if we didn't set recheck to true
|
||||
if t.alwaysRecheck == false {
|
||||
t.tracked[id] = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tracker) Ignore(id ProcId) {
|
||||
t.Tracked[id] = nil
|
||||
func (tp *trackedProc) update(metrics Metrics, now time.Time, cerrs *CollectErrors, threads []Thread) {
|
||||
// newcounts: resource consumption since last cycle
|
||||
newcounts := metrics.Counts
|
||||
tp.lastaccum = newcounts.Sub(tp.metrics.Counts)
|
||||
tp.metrics = metrics
|
||||
tp.lastUpdate = now
|
||||
if len(threads) > 1 {
|
||||
if tp.threads == nil {
|
||||
tp.threads = make(map[ThreadID]trackedThread)
|
||||
}
|
||||
for _, thr := range threads {
|
||||
tt := trackedThread{thr.ThreadName, thr.Counts, Delta{}, now, thr.Wchan}
|
||||
if old, ok := tp.threads[thr.ThreadID]; ok {
|
||||
tt.latest, tt.accum = thr.Counts.Sub(old.accum), thr.Counts
|
||||
}
|
||||
tp.threads[thr.ThreadID] = tt
|
||||
}
|
||||
for id, tt := range tp.threads {
|
||||
if tt.lastUpdate != now {
|
||||
delete(tp.threads, id)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
tp.threads = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Scan procs and update metrics for those which are tracked. Processes that have gone
|
||||
// away get removed from the Tracked map. New processes are returned, along with the count
|
||||
// of permission errors.
|
||||
func (t *Tracker) Update(procs ProcIter) ([]ProcIdInfo, int, error) {
|
||||
now := time.Now()
|
||||
var newProcs []ProcIdInfo
|
||||
var permissionErrors int
|
||||
// handleProc updates the tracker if it's a known and not ignored proc.
|
||||
// If it's neither known nor ignored, newProc will be non-nil.
|
||||
// It is not an error if the process disappears while we are reading
|
||||
// its info out of /proc, it just means nothing will be returned and
|
||||
// the tracker will be unchanged.
|
||||
func (t *Tracker) handleProc(proc Proc, updateTime time.Time) (*IDInfo, CollectErrors) {
|
||||
var cerrs CollectErrors
|
||||
procID, err := proc.GetProcID()
|
||||
if err != nil {
|
||||
return nil, cerrs
|
||||
}
|
||||
|
||||
// Do nothing if we're ignoring this proc.
|
||||
last, known := t.tracked[procID]
|
||||
if known && last == nil {
|
||||
return nil, cerrs
|
||||
}
|
||||
|
||||
metrics, softerrors, err := proc.GetMetrics()
|
||||
if err != nil {
|
||||
if t.debug {
|
||||
log.Printf("error reading metrics for %+v: %v", procID, err)
|
||||
}
|
||||
// This usually happens due to the proc having exited, i.e.
|
||||
// we lost the race. We don't count that as an error.
|
||||
if err != ErrProcNotExist {
|
||||
cerrs.Read++
|
||||
}
|
||||
return nil, cerrs
|
||||
}
|
||||
|
||||
var threads []Thread
|
||||
threads, err = proc.GetThreads()
|
||||
if err != nil {
|
||||
softerrors |= 1
|
||||
}
|
||||
cerrs.Partial += softerrors
|
||||
|
||||
if len(threads) > 0 {
|
||||
metrics.Counts.CtxSwitchNonvoluntary, metrics.Counts.CtxSwitchVoluntary = 0, 0
|
||||
for _, thread := range threads {
|
||||
metrics.Counts.CtxSwitchNonvoluntary += thread.Counts.CtxSwitchNonvoluntary
|
||||
metrics.Counts.CtxSwitchVoluntary += thread.Counts.CtxSwitchVoluntary
|
||||
metrics.States.Add(thread.States)
|
||||
}
|
||||
}
|
||||
|
||||
var newProc *IDInfo
|
||||
if known {
|
||||
last.update(metrics, updateTime, &cerrs, threads)
|
||||
} else {
|
||||
static, err := proc.GetStatic()
|
||||
if err != nil {
|
||||
if t.debug {
|
||||
log.Printf("error reading static details for %+v: %v", procID, err)
|
||||
}
|
||||
return nil, cerrs
|
||||
}
|
||||
newProc = &IDInfo{procID, static, metrics, threads}
|
||||
if t.debug {
|
||||
log.Printf("found new proc: %s", newProc)
|
||||
}
|
||||
|
||||
// Is this a new process with the same pid as one we already know?
|
||||
// Then delete it from the known map, otherwise the cleanup in Update()
|
||||
// will remove the ProcIds entry we're creating here.
|
||||
if oldProcID, ok := t.procIds[procID.Pid]; ok {
|
||||
delete(t.tracked, oldProcID)
|
||||
}
|
||||
t.procIds[procID.Pid] = procID
|
||||
}
|
||||
return newProc, cerrs
|
||||
}
|
||||
|
||||
// update scans procs and updates metrics for those which are tracked. Processes
|
||||
// that have gone away get removed from the Tracked map. New processes are
|
||||
// returned, along with the count of nonfatal errors.
|
||||
func (t *Tracker) update(procs Iter) ([]IDInfo, CollectErrors, error) {
|
||||
var newProcs []IDInfo
|
||||
var colErrs CollectErrors
|
||||
var now = time.Now()
|
||||
|
||||
for procs.Next() {
|
||||
procId, err := procs.GetProcId()
|
||||
if err != nil {
|
||||
continue
|
||||
newProc, cerrs := t.handleProc(procs, now)
|
||||
if newProc != nil {
|
||||
newProcs = append(newProcs, *newProc)
|
||||
}
|
||||
|
||||
last, known := t.Tracked[procId]
|
||||
|
||||
// Are we ignoring this proc?
|
||||
if known && last == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO if just the io file is unreadable, should we still return the other metrics?
|
||||
metrics, err := procs.GetMetrics()
|
||||
if err != nil {
|
||||
if os.IsPermission(err) {
|
||||
permissionErrors++
|
||||
t.Ignore(procId)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if known {
|
||||
var newaccum, lastaccum Counts
|
||||
dcpu := metrics.CpuTime - last.info.CpuTime
|
||||
drbytes := metrics.ReadBytes - last.info.ReadBytes
|
||||
dwbytes := metrics.WriteBytes - last.info.WriteBytes
|
||||
|
||||
lastaccum = Counts{Cpu: dcpu, ReadBytes: drbytes, WriteBytes: dwbytes}
|
||||
newaccum = Counts{
|
||||
Cpu: last.accum.Cpu + lastaccum.Cpu,
|
||||
ReadBytes: last.accum.ReadBytes + lastaccum.ReadBytes,
|
||||
WriteBytes: last.accum.WriteBytes + lastaccum.WriteBytes,
|
||||
}
|
||||
|
||||
last.info.ProcMetrics = metrics
|
||||
last.lastUpdate = now
|
||||
last.accum = newaccum
|
||||
last.lastaccum = lastaccum
|
||||
} else {
|
||||
static, err := procs.GetStatic()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
newProcs = append(newProcs, ProcIdInfo{procId, static, metrics})
|
||||
|
||||
// Is this a new process with the same pid as one we already know?
|
||||
if oldProcId, ok := t.ProcIds[procId.Pid]; ok {
|
||||
// Delete it from known, otherwise the cleanup below will remove the
|
||||
// ProcIds entry we're about to create
|
||||
delete(t.Tracked, oldProcId)
|
||||
}
|
||||
t.ProcIds[procId.Pid] = procId
|
||||
}
|
||||
|
||||
colErrs.Read += cerrs.Read
|
||||
colErrs.Partial += cerrs.Partial
|
||||
}
|
||||
|
||||
err := procs.Close()
|
||||
if err != nil {
|
||||
return nil, permissionErrors, fmt.Errorf("Error reading procs: %v", err)
|
||||
return nil, colErrs, fmt.Errorf("Error reading procs: %v", err)
|
||||
}
|
||||
|
||||
// Rather than allocating a new map each time to detect procs that have
|
||||
// disappeared, we bump the last update time on those that are still
|
||||
// present. Then as a second pass we traverse the map looking for
|
||||
// stale procs and removing them.
|
||||
for procId, pinfo := range t.Tracked {
|
||||
for procID, pinfo := range t.tracked {
|
||||
if pinfo == nil {
|
||||
// TODO is this a bug? we're not tracking the proc so we don't see it go away so ProcIds
|
||||
// and Tracked are leaking?
|
||||
continue
|
||||
}
|
||||
if pinfo.lastUpdate != now {
|
||||
delete(t.Tracked, procId)
|
||||
delete(t.ProcIds, procId.Pid)
|
||||
delete(t.tracked, procID)
|
||||
delete(t.procIds, procID.Pid)
|
||||
}
|
||||
}
|
||||
|
||||
return newProcs, permissionErrors, nil
|
||||
return newProcs, colErrs, nil
|
||||
}
|
||||
|
||||
// checkAncestry walks the process tree recursively towards the root,
|
||||
// stopping at pid 1 or upon finding a parent that's already tracked
|
||||
// or ignored. If we find a tracked parent track this one too; if not,
|
||||
// ignore this one.
|
||||
func (t *Tracker) checkAncestry(idinfo IDInfo, newprocs map[ID]IDInfo) string {
|
||||
ppid := idinfo.ParentPid
|
||||
pProcID := t.procIds[ppid]
|
||||
if pProcID.Pid < 1 {
|
||||
if t.debug {
|
||||
log.Printf("ignoring unmatched proc with no matched parent: %+v", idinfo)
|
||||
}
|
||||
// Reached root of process tree without finding a tracked parent.
|
||||
t.ignore(idinfo.ID)
|
||||
return ""
|
||||
}
|
||||
|
||||
// Is the parent already known to the tracker?
|
||||
if ptproc, ok := t.tracked[pProcID]; ok {
|
||||
if ptproc != nil {
|
||||
if t.debug {
|
||||
log.Printf("matched as %q because child of %+v: %+v",
|
||||
ptproc.groupName, pProcID, idinfo)
|
||||
}
|
||||
// We've found a tracked parent.
|
||||
t.track(ptproc.groupName, idinfo)
|
||||
return ptproc.groupName
|
||||
}
|
||||
// We've found an untracked parent.
|
||||
t.ignore(idinfo.ID)
|
||||
return ""
|
||||
}
|
||||
|
||||
// Is the parent another new process?
|
||||
if pinfoid, ok := newprocs[pProcID]; ok {
|
||||
if name := t.checkAncestry(pinfoid, newprocs); name != "" {
|
||||
if t.debug {
|
||||
log.Printf("matched as %q because child of %+v: %+v",
|
||||
name, pProcID, idinfo)
|
||||
}
|
||||
// We've found a tracked parent, which implies this entire lineage should be tracked.
|
||||
t.track(name, idinfo)
|
||||
return name
|
||||
}
|
||||
}
|
||||
|
||||
// Parent is dead, i.e. we never saw it, or there's no tracked proc in our ancestry.
|
||||
if t.debug {
|
||||
log.Printf("ignoring unmatched proc with no matched parent: %+v", idinfo)
|
||||
}
|
||||
t.ignore(idinfo.ID)
|
||||
return ""
|
||||
}
|
||||
|
||||
func (t *Tracker) lookupUid(uid int) string {
|
||||
if name, ok := t.username[uid]; ok {
|
||||
return name
|
||||
}
|
||||
|
||||
var name string
|
||||
uidstr := strconv.Itoa(uid)
|
||||
u, err := user.LookupId(uidstr)
|
||||
if err != nil {
|
||||
name = uidstr
|
||||
} else {
|
||||
name = u.Username
|
||||
}
|
||||
t.username[uid] = name
|
||||
return name
|
||||
}
|
||||
|
||||
// Update modifies the tracker's internal state based on what it reads from
|
||||
// iter. Tracks any new procs the namer wants tracked, and updates
|
||||
// its metrics for existing tracked procs. Returns nonfatal errors
|
||||
// and the status of all tracked procs, or an error if fatal.
|
||||
func (t *Tracker) Update(iter Iter) (CollectErrors, []Update, error) {
|
||||
newProcs, colErrs, err := t.update(iter)
|
||||
if err != nil {
|
||||
return colErrs, nil, err
|
||||
}
|
||||
|
||||
// Step 1: track any new proc that should be tracked based on its name and cmdline.
|
||||
untracked := make(map[ID]IDInfo)
|
||||
for _, idinfo := range newProcs {
|
||||
nacl := common.ProcAttributes{
|
||||
Name: idinfo.Name,
|
||||
Cmdline: idinfo.Cmdline,
|
||||
Username: t.lookupUid(idinfo.EffectiveUID),
|
||||
}
|
||||
wanted, gname := t.namer.MatchAndName(nacl)
|
||||
if wanted {
|
||||
if t.debug {
|
||||
log.Printf("matched as %q: %+v", gname, idinfo)
|
||||
}
|
||||
t.track(gname, idinfo)
|
||||
} else {
|
||||
untracked[idinfo.ID] = idinfo
|
||||
}
|
||||
}
|
||||
|
||||
// Step 2: track any untracked new proc that should be tracked because its parent is tracked.
|
||||
if t.trackChildren {
|
||||
for _, idinfo := range untracked {
|
||||
if _, ok := t.tracked[idinfo.ID]; ok {
|
||||
// Already tracked or ignored in an earlier iteration
|
||||
continue
|
||||
}
|
||||
|
||||
t.checkAncestry(idinfo, untracked)
|
||||
}
|
||||
}
|
||||
|
||||
tp := []Update{}
|
||||
for _, tproc := range t.tracked {
|
||||
if tproc != nil {
|
||||
tp = append(tp, tproc.getUpdate())
|
||||
}
|
||||
}
|
||||
return colErrs, tp, nil
|
||||
}
|
||||
|
|
|
|||
1
vendor/github.com/ncabatoff/procfs/.gitignore
generated
vendored
Normal file
1
vendor/github.com/ncabatoff/procfs/.gitignore
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
/fixtures/
|
||||
12
vendor/github.com/ncabatoff/procfs/.travis.yml
generated
vendored
Normal file
12
vendor/github.com/ncabatoff/procfs/.travis.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
sudo: false
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
|
||||
go_import_path: github.com/prometheus/procfs
|
||||
|
||||
script:
|
||||
- make style check_license vet test staticcheck
|
||||
18
vendor/github.com/ncabatoff/procfs/CONTRIBUTING.md
generated
vendored
Normal file
18
vendor/github.com/ncabatoff/procfs/CONTRIBUTING.md
generated
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
# Contributing
|
||||
|
||||
Prometheus uses GitHub to manage reviews of pull requests.
|
||||
|
||||
* If you have a trivial fix or improvement, go ahead and create a pull request,
|
||||
addressing (with `@...`) the maintainer of this repository (see
|
||||
[MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
|
||||
|
||||
* If you plan to do something more involved, first discuss your ideas
|
||||
on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
|
||||
This will avoid unnecessary work and surely give you and us a good deal
|
||||
of inspiration.
|
||||
|
||||
* Relevant coding style guidelines are the [Go Code Review
|
||||
Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
|
||||
and the _Formatting and style_ section of Peter Bourgon's [Go: Best
|
||||
Practices for Production
|
||||
Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
|
||||
201
vendor/github.com/ncabatoff/procfs/LICENSE
generated
vendored
Normal file
201
vendor/github.com/ncabatoff/procfs/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
1
vendor/github.com/ncabatoff/procfs/MAINTAINERS.md
generated
vendored
Normal file
1
vendor/github.com/ncabatoff/procfs/MAINTAINERS.md
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
* Tobias Schmidt <tobidt@gmail.com>
|
||||
77
vendor/github.com/ncabatoff/procfs/Makefile
generated
vendored
Normal file
77
vendor/github.com/ncabatoff/procfs/Makefile
generated
vendored
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
# Copyright 2018 The Prometheus Authors
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Ensure GOBIN is not set during build so that promu is installed to the correct path
|
||||
unexport GOBIN
|
||||
|
||||
GO ?= go
|
||||
GOFMT ?= $(GO)fmt
|
||||
FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
|
||||
STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck
|
||||
pkgs = $(shell $(GO) list ./... | grep -v /vendor/)
|
||||
|
||||
PREFIX ?= $(shell pwd)
|
||||
BIN_DIR ?= $(shell pwd)
|
||||
|
||||
ifdef DEBUG
|
||||
bindata_flags = -debug
|
||||
endif
|
||||
|
||||
STATICCHECK_IGNORE =
|
||||
|
||||
all: format staticcheck build test
|
||||
|
||||
style:
|
||||
@echo ">> checking code style"
|
||||
@! $(GOFMT) -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^'
|
||||
|
||||
check_license:
|
||||
@echo ">> checking license header"
|
||||
@./scripts/check_license.sh
|
||||
|
||||
test: fixtures/.unpacked sysfs/fixtures/.unpacked
|
||||
@echo ">> running all tests"
|
||||
@$(GO) test -race $(shell $(GO) list ./... | grep -v /vendor/ | grep -v examples)
|
||||
|
||||
format:
|
||||
@echo ">> formatting code"
|
||||
@$(GO) fmt $(pkgs)
|
||||
|
||||
vet:
|
||||
@echo ">> vetting code"
|
||||
@$(GO) vet $(pkgs)
|
||||
|
||||
staticcheck: $(STATICCHECK)
|
||||
@echo ">> running staticcheck"
|
||||
@$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs)
|
||||
|
||||
%/.unpacked: %.ttar
|
||||
./ttar -C $(dir $*) -x -f $*.ttar
|
||||
touch $@
|
||||
|
||||
update_fixtures: fixtures.ttar sysfs/fixtures.ttar
|
||||
|
||||
%fixtures.ttar: %/fixtures
|
||||
rm -v $(dir $*)fixtures/.unpacked
|
||||
./ttar -C $(dir $*) -c -f $*fixtures.ttar fixtures/
|
||||
|
||||
$(FIRST_GOPATH)/bin/staticcheck:
|
||||
@GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck
|
||||
|
||||
.PHONY: all style check_license format test vet staticcheck
|
||||
|
||||
# Declaring the binaries at their default locations as PHONY targets is a hack
|
||||
# to ensure the latest version is downloaded on every make execution.
|
||||
# If this is not desired, copy/symlink these binaries to a different path and
|
||||
# set the respective environment variables.
|
||||
.PHONY: $(GOPATH)/bin/staticcheck
|
||||
7
vendor/github.com/ncabatoff/procfs/NOTICE
generated
vendored
Normal file
7
vendor/github.com/ncabatoff/procfs/NOTICE
generated
vendored
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
procfs provides functions to retrieve system, kernel and process
|
||||
metrics from the pseudo-filesystem proc.
|
||||
|
||||
Copyright 2014-2015 The Prometheus Authors
|
||||
|
||||
This product includes software developed at
|
||||
SoundCloud Ltd. (http://soundcloud.com/).
|
||||
11
vendor/github.com/ncabatoff/procfs/README.md
generated
vendored
Normal file
11
vendor/github.com/ncabatoff/procfs/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
# procfs
|
||||
|
||||
This procfs package provides functions to retrieve system, kernel and process
|
||||
metrics from the pseudo-filesystem proc.
|
||||
|
||||
*WARNING*: This package is a work in progress. Its API may still break in
|
||||
backwards-incompatible ways without warnings. Use it at your own risk.
|
||||
|
||||
[](https://godoc.org/github.com/prometheus/procfs)
|
||||
[](https://travis-ci.org/prometheus/procfs)
|
||||
[](https://goreportcard.com/report/github.com/prometheus/procfs)
|
||||
95
vendor/github.com/ncabatoff/procfs/buddyinfo.go
generated
vendored
Normal file
95
vendor/github.com/ncabatoff/procfs/buddyinfo.go
generated
vendored
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A BuddyInfo is the details parsed from /proc/buddyinfo.
|
||||
// The data is comprised of an array of free fragments of each size.
|
||||
// The sizes are 2^n*PAGE_SIZE, where n is the array index.
|
||||
type BuddyInfo struct {
|
||||
Node string
|
||||
Zone string
|
||||
Sizes []float64
|
||||
}
|
||||
|
||||
// NewBuddyInfo reads the buddyinfo statistics.
|
||||
func NewBuddyInfo() ([]BuddyInfo, error) {
|
||||
fs, err := NewFS(DefaultMountPoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fs.NewBuddyInfo()
|
||||
}
|
||||
|
||||
// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
|
||||
func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) {
|
||||
file, err := os.Open(fs.Path("buddyinfo"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
return parseBuddyInfo(file)
|
||||
}
|
||||
|
||||
func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
|
||||
var (
|
||||
buddyInfo = []BuddyInfo{}
|
||||
scanner = bufio.NewScanner(r)
|
||||
bucketCount = -1
|
||||
)
|
||||
|
||||
for scanner.Scan() {
|
||||
var err error
|
||||
line := scanner.Text()
|
||||
parts := strings.Fields(line)
|
||||
|
||||
if len(parts) < 4 {
|
||||
return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo")
|
||||
}
|
||||
|
||||
node := strings.TrimRight(parts[1], ",")
|
||||
zone := strings.TrimRight(parts[3], ",")
|
||||
arraySize := len(parts[4:])
|
||||
|
||||
if bucketCount == -1 {
|
||||
bucketCount = arraySize
|
||||
} else {
|
||||
if bucketCount != arraySize {
|
||||
return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize)
|
||||
}
|
||||
}
|
||||
|
||||
sizes := make([]float64, arraySize)
|
||||
for i := 0; i < arraySize; i++ {
|
||||
sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid value in buddyinfo: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes})
|
||||
}
|
||||
|
||||
return buddyInfo, scanner.Err()
|
||||
}
|
||||
45
vendor/github.com/ncabatoff/procfs/doc.go
generated
vendored
Normal file
45
vendor/github.com/ncabatoff/procfs/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
// Copyright 2014 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package procfs provides functions to retrieve system, kernel and process
|
||||
// metrics from the pseudo-filesystem proc.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// package main
|
||||
//
|
||||
// import (
|
||||
// "fmt"
|
||||
// "log"
|
||||
//
|
||||
// "github.com/prometheus/procfs"
|
||||
// )
|
||||
//
|
||||
// func main() {
|
||||
// p, err := procfs.Self()
|
||||
// if err != nil {
|
||||
// log.Fatalf("could not get process: %s", err)
|
||||
// }
|
||||
//
|
||||
// stat, err := p.NewStat()
|
||||
// if err != nil {
|
||||
// log.Fatalf("could not get process stat: %s", err)
|
||||
// }
|
||||
//
|
||||
// fmt.Printf("command: %s\n", stat.Comm)
|
||||
// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
|
||||
// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
|
||||
// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
|
||||
// }
|
||||
//
|
||||
package procfs
|
||||
500
vendor/github.com/ncabatoff/procfs/fixtures.ttar
generated
vendored
Normal file
500
vendor/github.com/ncabatoff/procfs/fixtures.ttar
generated
vendored
Normal file
|
|
@ -0,0 +1,500 @@
|
|||
# Archive created by ttar -c -f fixtures.ttar fixtures/
|
||||
Directory: fixtures
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/26231
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/cmdline
|
||||
Lines: 1
|
||||
vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/comm
|
||||
Lines: 1
|
||||
vim
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/exe
|
||||
SymlinkTo: /usr/bin/vim
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/26231/fd
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/fd/0
|
||||
SymlinkTo: ../../symlinktargets/abc
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/fd/1
|
||||
SymlinkTo: ../../symlinktargets/def
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/fd/10
|
||||
SymlinkTo: ../../symlinktargets/xyz
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/fd/2
|
||||
SymlinkTo: ../../symlinktargets/ghi
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/fd/3
|
||||
SymlinkTo: ../../symlinktargets/uvw
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/io
|
||||
Lines: 7
|
||||
rchar: 750339
|
||||
wchar: 818609
|
||||
syscr: 7405
|
||||
syscw: 5245
|
||||
read_bytes: 1024
|
||||
write_bytes: 2048
|
||||
cancelled_write_bytes: -1024
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/limits
|
||||
Lines: 17
|
||||
Limit Soft Limit Hard Limit Units
|
||||
Max cpu time unlimited unlimited seconds
|
||||
Max file size unlimited unlimited bytes
|
||||
Max data size unlimited unlimited bytes
|
||||
Max stack size 8388608 unlimited bytes
|
||||
Max core file size 0 unlimited bytes
|
||||
Max resident set unlimited unlimited bytes
|
||||
Max processes 62898 62898 processes
|
||||
Max open files 2048 4096 files
|
||||
Max locked memory 65536 65536 bytes
|
||||
Max address space 8589934592 unlimited bytes
|
||||
Max file locks unlimited unlimited locks
|
||||
Max pending signals 62898 62898 signals
|
||||
Max msgqueue size 819200 819200 bytes
|
||||
Max nice priority 0 0
|
||||
Max realtime priority 0 0
|
||||
Max realtime timeout unlimited unlimited us
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/mountstats
|
||||
Lines: 19
|
||||
device rootfs mounted on / with fstype rootfs
|
||||
device sysfs mounted on /sys with fstype sysfs
|
||||
device proc mounted on /proc with fstype proc
|
||||
device /dev/sda1 mounted on / with fstype ext4
|
||||
device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1
|
||||
opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none
|
||||
age: 13968
|
||||
caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255
|
||||
nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured
|
||||
sec: flavor=1,pseudoflavor=1
|
||||
events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0
|
||||
bytes: 1207640230 0 0 0 1210214218 0 295483 0
|
||||
RPC iostats version: 1.0 p/v: 100003/4 (nfs)
|
||||
xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726
|
||||
per-op statistics
|
||||
NULL: 0 0 0 0 0 0 0 0
|
||||
READ: 1298 1298 0 207680 1210292152 6 79386 79407
|
||||
WRITE: 0 0 0 0 0 0 0 0
|
||||
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/26231/net
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/net/dev
|
||||
Lines: 4
|
||||
Inter-| Receive | Transmit
|
||||
face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
|
||||
lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/26231/ns
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/ns/mnt
|
||||
SymlinkTo: mnt:[4026531840]
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/ns/net
|
||||
SymlinkTo: net:[4026531993]
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/stat
|
||||
Lines: 1
|
||||
26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26231/status
|
||||
Lines: 50
|
||||
Name: gvim
|
||||
State: S (sleeping)
|
||||
Tgid: 26231
|
||||
Ngid: 0
|
||||
Pid: 26231
|
||||
PPid: 6429
|
||||
TracerPid: 0
|
||||
Uid: 1000 1000 1000 1000
|
||||
Gid: 1000 1000 1000 1000
|
||||
FDSize: 64
|
||||
Groups: 999 1000 1001
|
||||
NStgid: 26231
|
||||
NSpid: 26231
|
||||
NSpgid: 26231
|
||||
NSsid: 26231
|
||||
VmPeak: 768040 kB
|
||||
VmSize: 713068 kB
|
||||
VmLck: 0 kB
|
||||
VmPin: 0 kB
|
||||
VmHWM: 30652 kB
|
||||
VmRSS: 30632 kB
|
||||
VmData: 377304 kB
|
||||
VmStk: 132 kB
|
||||
VmExe: 2712 kB
|
||||
VmLib: 69672 kB
|
||||
VmPTE: 632 kB
|
||||
VmPMD: 16 kB
|
||||
VmSwap: 0 kB
|
||||
HugetlbPages: 0 kB
|
||||
Threads: 4
|
||||
SigQ: 1/128288
|
||||
SigPnd: 0000000000000000
|
||||
ShdPnd: 0000000000000000
|
||||
SigBlk: 0000000000000000
|
||||
SigIgn: 0000000000003001
|
||||
SigCgt: 00000001ed824efe
|
||||
CapInh: 0000000000000000
|
||||
CapPrm: 0000000000000000
|
||||
CapEff: 0000000000000000
|
||||
CapBnd: 0000003fffffffff
|
||||
CapAmb: 0000000000000000
|
||||
Seccomp: 0
|
||||
Speculation_Store_Bypass: thread vulnerable
|
||||
Cpus_allowed: ffff
|
||||
Cpus_allowed_list: 0-15
|
||||
Mems_allowed: 00000000,00000001
|
||||
Mems_allowed_list: 0
|
||||
voluntary_ctxt_switches: 6340
|
||||
nonvoluntary_ctxt_switches: 361
|
||||
|
||||
Mode: 444
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/26232
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26232/cmdline
|
||||
Lines: 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26232/comm
|
||||
Lines: 1
|
||||
ata_sff
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/26232/fd
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26232/fd/0
|
||||
SymlinkTo: ../../symlinktargets/abc
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26232/fd/1
|
||||
SymlinkTo: ../../symlinktargets/def
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26232/fd/2
|
||||
SymlinkTo: ../../symlinktargets/ghi
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26232/fd/3
|
||||
SymlinkTo: ../../symlinktargets/uvw
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26232/fd/4
|
||||
SymlinkTo: ../../symlinktargets/xyz
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26232/limits
|
||||
Lines: 17
|
||||
Limit Soft Limit Hard Limit Units
|
||||
Max cpu time unlimited unlimited seconds
|
||||
Max file size unlimited unlimited bytes
|
||||
Max data size unlimited unlimited bytes
|
||||
Max stack size 8388608 unlimited bytes
|
||||
Max core file size 0 unlimited bytes
|
||||
Max resident set unlimited unlimited bytes
|
||||
Max processes 29436 29436 processes
|
||||
Max open files 1024 4096 files
|
||||
Max locked memory 65536 65536 bytes
|
||||
Max address space unlimited unlimited bytes
|
||||
Max file locks unlimited unlimited locks
|
||||
Max pending signals 29436 29436 signals
|
||||
Max msgqueue size 819200 819200 bytes
|
||||
Max nice priority 0 0
|
||||
Max realtime priority 0 0
|
||||
Max realtime timeout unlimited unlimited us
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26232/stat
|
||||
Lines: 1
|
||||
33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/26233
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/26233/cmdline
|
||||
Lines: 1
|
||||
com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/584
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/584/stat
|
||||
Lines: 2
|
||||
1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0
|
||||
#!/bin/cat /proc/self/stat
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/buddyinfo
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/buddyinfo/short
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/buddyinfo/short/buddyinfo
|
||||
Lines: 3
|
||||
Node 0, zone
|
||||
Node 0, zone
|
||||
Node 0, zone
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/buddyinfo/sizemismatch
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/buddyinfo/sizemismatch/buddyinfo
|
||||
Lines: 3
|
||||
Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3
|
||||
Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 0
|
||||
Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/buddyinfo/valid
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/buddyinfo/valid/buddyinfo
|
||||
Lines: 3
|
||||
Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3
|
||||
Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0
|
||||
Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/fs
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/fs/xfs
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/fs/xfs/stat
|
||||
Lines: 23
|
||||
extent_alloc 92447 97589 92448 93751
|
||||
abt 0 0 0 0
|
||||
blk_map 1767055 188820 184891 92447 92448 2140766 0
|
||||
bmbt 0 0 0 0
|
||||
dir 185039 92447 92444 136422
|
||||
trans 706 944304 0
|
||||
ig 185045 58807 0 126238 0 33637 22
|
||||
log 2883 113448 9 17360 739
|
||||
push_ail 945014 0 134260 15483 0 3940 464 159985 0 40
|
||||
xstrat 92447 0
|
||||
rw 107739 94045
|
||||
attr 4 0 0 0
|
||||
icluster 8677 7849 135802
|
||||
vnodes 92601 0 0 0 92444 92444 92444 0
|
||||
buf 2666287 7122 2659202 3599 2 7085 0 10297 7085
|
||||
abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147
|
||||
abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023
|
||||
bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
qm 0 0 0 0 0 0 0 0
|
||||
xpc 399724544 92823103 86219234
|
||||
debug 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/mdstat
|
||||
Lines: 26
|
||||
Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
|
||||
md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9]
|
||||
5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU]
|
||||
|
||||
md127 : active raid1 sdi2[0] sdj2[1]
|
||||
312319552 blocks [2/2] [UU]
|
||||
|
||||
md0 : active raid1 sdk[2](S) sdi1[0] sdj1[1]
|
||||
248896 blocks [2/2] [UU]
|
||||
|
||||
md4 : inactive raid1 sda3[0] sdb3[1]
|
||||
4883648 blocks [2/2] [UU]
|
||||
|
||||
md6 : active raid1 sdb2[2] sda2[0]
|
||||
195310144 blocks [2/1] [U_]
|
||||
[=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
|
||||
|
||||
md8 : active raid1 sdb1[1] sda1[0]
|
||||
195310144 blocks [2/2] [UU]
|
||||
[=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
|
||||
|
||||
md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1]
|
||||
7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU]
|
||||
bitmap: 0/30 pages [0KB], 65536KB chunk
|
||||
|
||||
unused devices: <none>
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/net
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/net/dev
|
||||
Lines: 6
|
||||
Inter-| Receive | Transmit
|
||||
face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
|
||||
vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0
|
||||
lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0
|
||||
docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0
|
||||
eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/net/ip_vs
|
||||
Lines: 21
|
||||
IP Virtual Server version 1.2.1 (size=4096)
|
||||
Prot LocalAddress:Port Scheduler Flags
|
||||
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
|
||||
TCP C0A80016:0CEA wlc
|
||||
-> C0A85216:0CEA Tunnel 100 248 2
|
||||
-> C0A85318:0CEA Tunnel 100 248 2
|
||||
-> C0A85315:0CEA Tunnel 100 248 1
|
||||
TCP C0A80039:0CEA wlc
|
||||
-> C0A85416:0CEA Tunnel 0 0 0
|
||||
-> C0A85215:0CEA Tunnel 100 1499 0
|
||||
-> C0A83215:0CEA Tunnel 100 1498 0
|
||||
TCP C0A80037:0CEA wlc
|
||||
-> C0A8321A:0CEA Tunnel 0 0 0
|
||||
-> C0A83120:0CEA Tunnel 100 0 0
|
||||
TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh
|
||||
-> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0
|
||||
-> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0
|
||||
-> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1
|
||||
FWM 10001000 wlc
|
||||
-> C0A8321A:0CEA Route 0 0 1
|
||||
-> C0A83215:0CEA Route 0 0 2
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/net/ip_vs_stats
|
||||
Lines: 6
|
||||
Total Incoming Outgoing Incoming Outgoing
|
||||
Conns Packets Packets Bytes Bytes
|
||||
16AA370 E33656E5 0 51D8C8883AB3 0
|
||||
|
||||
Conns/s Pkts/s Pkts/s Bytes/s Bytes/s
|
||||
4 1FB3C 0 1282A8F 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/net/rpc
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/net/rpc/nfs
|
||||
Lines: 5
|
||||
net 18628 0 18628 6
|
||||
rpc 4329785 0 4338291
|
||||
proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
|
||||
proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39
|
||||
proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/net/rpc/nfsd
|
||||
Lines: 11
|
||||
rc 0 6 18622
|
||||
fh 0 0 0 0 0
|
||||
io 157286400 0
|
||||
th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000
|
||||
ra 32 0 0 0 0 0 0 0 0 0 0 0
|
||||
net 18628 0 18628 6
|
||||
rpc 18628 0 0 0 0
|
||||
proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
|
||||
proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0
|
||||
proc4 2 2 10853
|
||||
proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/net/xfrm_stat
|
||||
Lines: 28
|
||||
XfrmInError 1
|
||||
XfrmInBufferError 2
|
||||
XfrmInHdrError 4
|
||||
XfrmInNoStates 3
|
||||
XfrmInStateProtoError 40
|
||||
XfrmInStateModeError 100
|
||||
XfrmInStateSeqError 6000
|
||||
XfrmInStateExpired 4
|
||||
XfrmInStateMismatch 23451
|
||||
XfrmInStateInvalid 55555
|
||||
XfrmInTmplMismatch 51
|
||||
XfrmInNoPols 65432
|
||||
XfrmInPolBlock 100
|
||||
XfrmInPolError 10000
|
||||
XfrmOutError 1000000
|
||||
XfrmOutBundleGenError 43321
|
||||
XfrmOutBundleCheckError 555
|
||||
XfrmOutNoStates 869
|
||||
XfrmOutStateProtoError 4542
|
||||
XfrmOutStateModeError 4
|
||||
XfrmOutStateSeqError 543
|
||||
XfrmOutStateExpired 565
|
||||
XfrmOutPolBlock 43456
|
||||
XfrmOutPolDead 7656
|
||||
XfrmOutPolError 1454
|
||||
XfrmFwdHdrError 6654
|
||||
XfrmOutStateInvalid 28765
|
||||
XfrmAcquireError 24532
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/self
|
||||
SymlinkTo: 26231
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/stat
|
||||
Lines: 16
|
||||
cpu 301854 612 111922 8979004 3552 2 3944 0 0 0
|
||||
cpu0 44490 19 21045 1087069 220 1 3410 0 0 0
|
||||
cpu1 47869 23 16474 1110787 591 0 46 0 0 0
|
||||
cpu2 46504 36 15916 1112321 441 0 326 0 0 0
|
||||
cpu3 47054 102 15683 1113230 533 0 60 0 0 0
|
||||
cpu4 28413 25 10776 1140321 217 0 8 0 0 0
|
||||
cpu5 29271 101 11586 1136270 672 0 30 0 0 0
|
||||
cpu6 29152 36 10276 1139721 319 0 29 0 0 0
|
||||
cpu7 29098 268 10164 1139282 555 0 31 0 0 0
|
||||
intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
ctxt 38014093
|
||||
btime 1418183276
|
||||
processes 26442
|
||||
procs_running 2
|
||||
procs_blocked 1
|
||||
softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Directory: fixtures/symlinktargets
|
||||
Mode: 755
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/symlinktargets/README
|
||||
Lines: 2
|
||||
This directory contains some empty files that are the symlinks the files in the "fd" directory point to.
|
||||
They are otherwise ignored by the tests
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/symlinktargets/abc
|
||||
Lines: 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/symlinktargets/def
|
||||
Lines: 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/symlinktargets/ghi
|
||||
Lines: 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/symlinktargets/uvw
|
||||
Lines: 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
Path: fixtures/symlinktargets/xyz
|
||||
Lines: 0
|
||||
Mode: 644
|
||||
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||
82
vendor/github.com/ncabatoff/procfs/fs.go
generated
vendored
Normal file
82
vendor/github.com/ncabatoff/procfs/fs.go
generated
vendored
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/ncabatoff/procfs/nfs"
|
||||
"github.com/ncabatoff/procfs/xfs"
|
||||
)
|
||||
|
||||
// FS represents the pseudo-filesystem proc, which provides an interface to
|
||||
// kernel data structures.
|
||||
type FS string
|
||||
|
||||
// DefaultMountPoint is the common mount point of the proc filesystem.
|
||||
const DefaultMountPoint = "/proc"
|
||||
|
||||
// NewFS returns a new FS mounted under the given mountPoint. It will error
|
||||
// if the mount point can't be read.
|
||||
func NewFS(mountPoint string) (FS, error) {
|
||||
info, err := os.Stat(mountPoint)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
|
||||
}
|
||||
if !info.IsDir() {
|
||||
return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
|
||||
}
|
||||
|
||||
return FS(mountPoint), nil
|
||||
}
|
||||
|
||||
// Path returns the path of the given subsystem relative to the procfs root.
|
||||
func (fs FS) Path(p ...string) string {
|
||||
return path.Join(append([]string{string(fs)}, p...)...)
|
||||
}
|
||||
|
||||
// XFSStats retrieves XFS filesystem runtime statistics.
|
||||
func (fs FS) XFSStats() (*xfs.Stats, error) {
|
||||
f, err := os.Open(fs.Path("fs/xfs/stat"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return xfs.ParseStats(f)
|
||||
}
|
||||
|
||||
// NFSClientRPCStats retrieves NFS client RPC statistics.
|
||||
func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) {
|
||||
f, err := os.Open(fs.Path("net/rpc/nfs"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return nfs.ParseClientRPCStats(f)
|
||||
}
|
||||
|
||||
// NFSdServerRPCStats retrieves NFS daemon RPC statistics.
|
||||
func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) {
|
||||
f, err := os.Open(fs.Path("net/rpc/nfsd"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return nfs.ParseServerRPCStats(f)
|
||||
}
|
||||
46
vendor/github.com/ncabatoff/procfs/internal/util/parse.go
generated
vendored
Normal file
46
vendor/github.com/ncabatoff/procfs/internal/util/parse.go
generated
vendored
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package util
|
||||
|
||||
import "strconv"
|
||||
|
||||
// ParseUint32s parses a slice of strings into a slice of uint32s.
|
||||
func ParseUint32s(ss []string) ([]uint32, error) {
|
||||
us := make([]uint32, 0, len(ss))
|
||||
for _, s := range ss {
|
||||
u, err := strconv.ParseUint(s, 10, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
us = append(us, uint32(u))
|
||||
}
|
||||
|
||||
return us, nil
|
||||
}
|
||||
|
||||
// ParseUint64s parses a slice of strings into a slice of uint64s.
|
||||
func ParseUint64s(ss []string) ([]uint64, error) {
|
||||
us := make([]uint64, 0, len(ss))
|
||||
for _, s := range ss {
|
||||
u, err := strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
us = append(us, u)
|
||||
}
|
||||
|
||||
return us, nil
|
||||
}
|
||||
259
vendor/github.com/ncabatoff/procfs/ipvs.go
generated
vendored
Normal file
259
vendor/github.com/ncabatoff/procfs/ipvs.go
generated
vendored
Normal file
|
|
@ -0,0 +1,259 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
|
||||
type IPVSStats struct {
|
||||
// Total count of connections.
|
||||
Connections uint64
|
||||
// Total incoming packages processed.
|
||||
IncomingPackets uint64
|
||||
// Total outgoing packages processed.
|
||||
OutgoingPackets uint64
|
||||
// Total incoming traffic.
|
||||
IncomingBytes uint64
|
||||
// Total outgoing traffic.
|
||||
OutgoingBytes uint64
|
||||
}
|
||||
|
||||
// IPVSBackendStatus holds current metrics of one virtual / real address pair.
|
||||
type IPVSBackendStatus struct {
|
||||
// The local (virtual) IP address.
|
||||
LocalAddress net.IP
|
||||
// The remote (real) IP address.
|
||||
RemoteAddress net.IP
|
||||
// The local (virtual) port.
|
||||
LocalPort uint16
|
||||
// The remote (real) port.
|
||||
RemotePort uint16
|
||||
// The local firewall mark
|
||||
LocalMark string
|
||||
// The transport protocol (TCP, UDP).
|
||||
Proto string
|
||||
// The current number of active connections for this virtual/real address pair.
|
||||
ActiveConn uint64
|
||||
// The current number of inactive connections for this virtual/real address pair.
|
||||
InactConn uint64
|
||||
// The current weight of this virtual/real address pair.
|
||||
Weight uint64
|
||||
}
|
||||
|
||||
// NewIPVSStats reads the IPVS statistics.
|
||||
func NewIPVSStats() (IPVSStats, error) {
|
||||
fs, err := NewFS(DefaultMountPoint)
|
||||
if err != nil {
|
||||
return IPVSStats{}, err
|
||||
}
|
||||
|
||||
return fs.NewIPVSStats()
|
||||
}
|
||||
|
||||
// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
|
||||
func (fs FS) NewIPVSStats() (IPVSStats, error) {
|
||||
file, err := os.Open(fs.Path("net/ip_vs_stats"))
|
||||
if err != nil {
|
||||
return IPVSStats{}, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
return parseIPVSStats(file)
|
||||
}
|
||||
|
||||
// parseIPVSStats performs the actual parsing of `ip_vs_stats`.
|
||||
func parseIPVSStats(file io.Reader) (IPVSStats, error) {
|
||||
var (
|
||||
statContent []byte
|
||||
statLines []string
|
||||
statFields []string
|
||||
stats IPVSStats
|
||||
)
|
||||
|
||||
statContent, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
return IPVSStats{}, err
|
||||
}
|
||||
|
||||
statLines = strings.SplitN(string(statContent), "\n", 4)
|
||||
if len(statLines) != 4 {
|
||||
return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short")
|
||||
}
|
||||
|
||||
statFields = strings.Fields(statLines[2])
|
||||
if len(statFields) != 5 {
|
||||
return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields")
|
||||
}
|
||||
|
||||
stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64)
|
||||
if err != nil {
|
||||
return IPVSStats{}, err
|
||||
}
|
||||
stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64)
|
||||
if err != nil {
|
||||
return IPVSStats{}, err
|
||||
}
|
||||
stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64)
|
||||
if err != nil {
|
||||
return IPVSStats{}, err
|
||||
}
|
||||
stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64)
|
||||
if err != nil {
|
||||
return IPVSStats{}, err
|
||||
}
|
||||
stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64)
|
||||
if err != nil {
|
||||
return IPVSStats{}, err
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs.
|
||||
func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
|
||||
fs, err := NewFS(DefaultMountPoint)
|
||||
if err != nil {
|
||||
return []IPVSBackendStatus{}, err
|
||||
}
|
||||
|
||||
return fs.NewIPVSBackendStatus()
|
||||
}
|
||||
|
||||
// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
|
||||
func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
|
||||
file, err := os.Open(fs.Path("net/ip_vs"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
return parseIPVSBackendStatus(file)
|
||||
}
|
||||
|
||||
func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
|
||||
var (
|
||||
status []IPVSBackendStatus
|
||||
scanner = bufio.NewScanner(file)
|
||||
proto string
|
||||
localMark string
|
||||
localAddress net.IP
|
||||
localPort uint16
|
||||
err error
|
||||
)
|
||||
|
||||
for scanner.Scan() {
|
||||
fields := strings.Fields(scanner.Text())
|
||||
if len(fields) == 0 {
|
||||
continue
|
||||
}
|
||||
switch {
|
||||
case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port":
|
||||
continue
|
||||
case fields[0] == "TCP" || fields[0] == "UDP":
|
||||
if len(fields) < 2 {
|
||||
continue
|
||||
}
|
||||
proto = fields[0]
|
||||
localMark = ""
|
||||
localAddress, localPort, err = parseIPPort(fields[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case fields[0] == "FWM":
|
||||
if len(fields) < 2 {
|
||||
continue
|
||||
}
|
||||
proto = fields[0]
|
||||
localMark = fields[1]
|
||||
localAddress = nil
|
||||
localPort = 0
|
||||
case fields[0] == "->":
|
||||
if len(fields) < 6 {
|
||||
continue
|
||||
}
|
||||
remoteAddress, remotePort, err := parseIPPort(fields[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
weight, err := strconv.ParseUint(fields[3], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
activeConn, err := strconv.ParseUint(fields[4], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inactConn, err := strconv.ParseUint(fields[5], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
status = append(status, IPVSBackendStatus{
|
||||
LocalAddress: localAddress,
|
||||
LocalPort: localPort,
|
||||
LocalMark: localMark,
|
||||
RemoteAddress: remoteAddress,
|
||||
RemotePort: remotePort,
|
||||
Proto: proto,
|
||||
Weight: weight,
|
||||
ActiveConn: activeConn,
|
||||
InactConn: inactConn,
|
||||
})
|
||||
}
|
||||
}
|
||||
return status, nil
|
||||
}
|
||||
|
||||
func parseIPPort(s string) (net.IP, uint16, error) {
|
||||
var (
|
||||
ip net.IP
|
||||
err error
|
||||
)
|
||||
|
||||
switch len(s) {
|
||||
case 13:
|
||||
ip, err = hex.DecodeString(s[0:8])
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
case 46:
|
||||
ip = net.ParseIP(s[1:40])
|
||||
if ip == nil {
|
||||
return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40])
|
||||
}
|
||||
default:
|
||||
return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s)
|
||||
}
|
||||
|
||||
portString := s[len(s)-4:]
|
||||
if len(portString) != 4 {
|
||||
return nil, 0, fmt.Errorf("unexpected port string format: %s", portString)
|
||||
}
|
||||
port, err := strconv.ParseUint(portString, 16, 16)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return ip, uint16(port), nil
|
||||
}
|
||||
151
vendor/github.com/ncabatoff/procfs/mdstat.go
generated
vendored
Normal file
151
vendor/github.com/ncabatoff/procfs/mdstat.go
generated
vendored
Normal file
|
|
@ -0,0 +1,151 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
|
||||
buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
|
||||
)
|
||||
|
||||
// MDStat holds info parsed from /proc/mdstat.
|
||||
type MDStat struct {
|
||||
// Name of the device.
|
||||
Name string
|
||||
// activity-state of the device.
|
||||
ActivityState string
|
||||
// Number of active disks.
|
||||
DisksActive int64
|
||||
// Total number of disks the device consists of.
|
||||
DisksTotal int64
|
||||
// Number of blocks the device holds.
|
||||
BlocksTotal int64
|
||||
// Number of blocks on the device that are in sync.
|
||||
BlocksSynced int64
|
||||
}
|
||||
|
||||
// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos.
|
||||
func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
|
||||
mdStatusFilePath := fs.Path("mdstat")
|
||||
content, err := ioutil.ReadFile(mdStatusFilePath)
|
||||
if err != nil {
|
||||
return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
|
||||
}
|
||||
|
||||
mdStates := []MDStat{}
|
||||
lines := strings.Split(string(content), "\n")
|
||||
for i, l := range lines {
|
||||
if l == "" {
|
||||
continue
|
||||
}
|
||||
if l[0] == ' ' {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
|
||||
continue
|
||||
}
|
||||
|
||||
mainLine := strings.Split(l, " ")
|
||||
if len(mainLine) < 3 {
|
||||
return mdStates, fmt.Errorf("error parsing mdline: %s", l)
|
||||
}
|
||||
mdName := mainLine[0]
|
||||
activityState := mainLine[2]
|
||||
|
||||
if len(lines) <= i+3 {
|
||||
return mdStates, fmt.Errorf(
|
||||
"error parsing %s: too few lines for md device %s",
|
||||
mdStatusFilePath,
|
||||
mdName,
|
||||
)
|
||||
}
|
||||
|
||||
active, total, size, err := evalStatusline(lines[i+1])
|
||||
if err != nil {
|
||||
return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
|
||||
}
|
||||
|
||||
// j is the line number of the syncing-line.
|
||||
j := i + 2
|
||||
if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
|
||||
j = i + 3
|
||||
}
|
||||
|
||||
// If device is syncing at the moment, get the number of currently
|
||||
// synced bytes, otherwise that number equals the size of the device.
|
||||
syncedBlocks := size
|
||||
if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") {
|
||||
syncedBlocks, err = evalBuildline(lines[j])
|
||||
if err != nil {
|
||||
return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
|
||||
}
|
||||
}
|
||||
|
||||
mdStates = append(mdStates, MDStat{
|
||||
Name: mdName,
|
||||
ActivityState: activityState,
|
||||
DisksActive: active,
|
||||
DisksTotal: total,
|
||||
BlocksTotal: size,
|
||||
BlocksSynced: syncedBlocks,
|
||||
})
|
||||
}
|
||||
|
||||
return mdStates, nil
|
||||
}
|
||||
|
||||
func evalStatusline(statusline string) (active, total, size int64, err error) {
|
||||
matches := statuslineRE.FindStringSubmatch(statusline)
|
||||
if len(matches) != 4 {
|
||||
return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline)
|
||||
}
|
||||
|
||||
size, err = strconv.ParseInt(matches[1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
|
||||
}
|
||||
|
||||
total, err = strconv.ParseInt(matches[2], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
|
||||
}
|
||||
|
||||
active, err = strconv.ParseInt(matches[3], 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
|
||||
}
|
||||
|
||||
return active, total, size, nil
|
||||
}
|
||||
|
||||
func evalBuildline(buildline string) (syncedBlocks int64, err error) {
|
||||
matches := buildlineRE.FindStringSubmatch(buildline)
|
||||
if len(matches) != 2 {
|
||||
return 0, fmt.Errorf("unexpected buildline: %s", buildline)
|
||||
}
|
||||
|
||||
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("%s in buildline: %s", err, buildline)
|
||||
}
|
||||
|
||||
return syncedBlocks, nil
|
||||
}
|
||||
606
vendor/github.com/ncabatoff/procfs/mountstats.go
generated
vendored
Normal file
606
vendor/github.com/ncabatoff/procfs/mountstats.go
generated
vendored
Normal file
|
|
@ -0,0 +1,606 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
// While implementing parsing of /proc/[pid]/mountstats, this blog was used
|
||||
// heavily as a reference:
|
||||
// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
|
||||
//
|
||||
// Special thanks to Chris Siebenmann for all of his posts explaining the
|
||||
// various statistics available for NFS.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Constants shared between multiple functions.
|
||||
const (
|
||||
deviceEntryLen = 8
|
||||
|
||||
fieldBytesLen = 8
|
||||
fieldEventsLen = 27
|
||||
|
||||
statVersion10 = "1.0"
|
||||
statVersion11 = "1.1"
|
||||
|
||||
fieldTransport10TCPLen = 10
|
||||
fieldTransport10UDPLen = 7
|
||||
|
||||
fieldTransport11TCPLen = 13
|
||||
fieldTransport11UDPLen = 10
|
||||
)
|
||||
|
||||
// A Mount is a device mount parsed from /proc/[pid]/mountstats.
|
||||
type Mount struct {
|
||||
// Name of the device.
|
||||
Device string
|
||||
// The mount point of the device.
|
||||
Mount string
|
||||
// The filesystem type used by the device.
|
||||
Type string
|
||||
// If available additional statistics related to this Mount.
|
||||
// Use a type assertion to determine if additional statistics are available.
|
||||
Stats MountStats
|
||||
}
|
||||
|
||||
// A MountStats is a type which contains detailed statistics for a specific
|
||||
// type of Mount.
|
||||
type MountStats interface {
|
||||
mountStats()
|
||||
}
|
||||
|
||||
// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts.
|
||||
type MountStatsNFS struct {
|
||||
// The version of statistics provided.
|
||||
StatVersion string
|
||||
// The age of the NFS mount.
|
||||
Age time.Duration
|
||||
// Statistics related to byte counters for various operations.
|
||||
Bytes NFSBytesStats
|
||||
// Statistics related to various NFS event occurrences.
|
||||
Events NFSEventsStats
|
||||
// Statistics broken down by filesystem operation.
|
||||
Operations []NFSOperationStats
|
||||
// Statistics about the NFS RPC transport.
|
||||
Transport NFSTransportStats
|
||||
}
|
||||
|
||||
// mountStats implements MountStats.
|
||||
func (m MountStatsNFS) mountStats() {}
|
||||
|
||||
// A NFSBytesStats contains statistics about the number of bytes read and written
|
||||
// by an NFS client to and from an NFS server.
|
||||
type NFSBytesStats struct {
|
||||
// Number of bytes read using the read() syscall.
|
||||
Read uint64
|
||||
// Number of bytes written using the write() syscall.
|
||||
Write uint64
|
||||
// Number of bytes read using the read() syscall in O_DIRECT mode.
|
||||
DirectRead uint64
|
||||
// Number of bytes written using the write() syscall in O_DIRECT mode.
|
||||
DirectWrite uint64
|
||||
// Number of bytes read from the NFS server, in total.
|
||||
ReadTotal uint64
|
||||
// Number of bytes written to the NFS server, in total.
|
||||
WriteTotal uint64
|
||||
// Number of pages read directly via mmap()'d files.
|
||||
ReadPages uint64
|
||||
// Number of pages written directly via mmap()'d files.
|
||||
WritePages uint64
|
||||
}
|
||||
|
||||
// A NFSEventsStats contains statistics about NFS event occurrences.
|
||||
type NFSEventsStats struct {
|
||||
// Number of times cached inode attributes are re-validated from the server.
|
||||
InodeRevalidate uint64
|
||||
// Number of times cached dentry nodes are re-validated from the server.
|
||||
DnodeRevalidate uint64
|
||||
// Number of times an inode cache is cleared.
|
||||
DataInvalidate uint64
|
||||
// Number of times cached inode attributes are invalidated.
|
||||
AttributeInvalidate uint64
|
||||
// Number of times files or directories have been open()'d.
|
||||
VFSOpen uint64
|
||||
// Number of times a directory lookup has occurred.
|
||||
VFSLookup uint64
|
||||
// Number of times permissions have been checked.
|
||||
VFSAccess uint64
|
||||
// Number of updates (and potential writes) to pages.
|
||||
VFSUpdatePage uint64
|
||||
// Number of pages read directly via mmap()'d files.
|
||||
VFSReadPage uint64
|
||||
// Number of times a group of pages have been read.
|
||||
VFSReadPages uint64
|
||||
// Number of pages written directly via mmap()'d files.
|
||||
VFSWritePage uint64
|
||||
// Number of times a group of pages have been written.
|
||||
VFSWritePages uint64
|
||||
// Number of times directory entries have been read with getdents().
|
||||
VFSGetdents uint64
|
||||
// Number of times attributes have been set on inodes.
|
||||
VFSSetattr uint64
|
||||
// Number of pending writes that have been forcefully flushed to the server.
|
||||
VFSFlush uint64
|
||||
// Number of times fsync() has been called on directories and files.
|
||||
VFSFsync uint64
|
||||
// Number of times locking has been attempted on a file.
|
||||
VFSLock uint64
|
||||
// Number of times files have been closed and released.
|
||||
VFSFileRelease uint64
|
||||
// Unknown. Possibly unused.
|
||||
CongestionWait uint64
|
||||
// Number of times files have been truncated.
|
||||
Truncation uint64
|
||||
// Number of times a file has been grown due to writes beyond its existing end.
|
||||
WriteExtension uint64
|
||||
// Number of times a file was removed while still open by another process.
|
||||
SillyRename uint64
|
||||
// Number of times the NFS server gave less data than expected while reading.
|
||||
ShortRead uint64
|
||||
// Number of times the NFS server wrote less data than expected while writing.
|
||||
ShortWrite uint64
|
||||
// Number of times the NFS server indicated EJUKEBOX; retrieving data from
|
||||
// offline storage.
|
||||
JukeboxDelay uint64
|
||||
// Number of NFS v4.1+ pNFS reads.
|
||||
PNFSRead uint64
|
||||
// Number of NFS v4.1+ pNFS writes.
|
||||
PNFSWrite uint64
|
||||
}
|
||||
|
||||
// A NFSOperationStats contains statistics for a single operation.
|
||||
type NFSOperationStats struct {
|
||||
// The name of the operation.
|
||||
Operation string
|
||||
// Number of requests performed for this operation.
|
||||
Requests uint64
|
||||
// Number of times an actual RPC request has been transmitted for this operation.
|
||||
Transmissions uint64
|
||||
// Number of times a request has had a major timeout.
|
||||
MajorTimeouts uint64
|
||||
// Number of bytes sent for this operation, including RPC headers and payload.
|
||||
BytesSent uint64
|
||||
// Number of bytes received for this operation, including RPC headers and payload.
|
||||
BytesReceived uint64
|
||||
// Duration all requests spent queued for transmission before they were sent.
|
||||
CumulativeQueueTime time.Duration
|
||||
// Duration it took to get a reply back after the request was transmitted.
|
||||
CumulativeTotalResponseTime time.Duration
|
||||
// Duration from when a request was enqueued to when it was completely handled.
|
||||
CumulativeTotalRequestTime time.Duration
|
||||
}
|
||||
|
||||
// A NFSTransportStats contains statistics for the NFS mount RPC requests and
|
||||
// responses.
|
||||
type NFSTransportStats struct {
|
||||
// The transport protocol used for the NFS mount.
|
||||
Protocol string
|
||||
// The local port used for the NFS mount.
|
||||
Port uint64
|
||||
// Number of times the client has had to establish a connection from scratch
|
||||
// to the NFS server.
|
||||
Bind uint64
|
||||
// Number of times the client has made a TCP connection to the NFS server.
|
||||
Connect uint64
|
||||
// Duration (in jiffies, a kernel internal unit of time) the NFS mount has
|
||||
// spent waiting for connections to the server to be established.
|
||||
ConnectIdleTime uint64
|
||||
// Duration since the NFS mount last saw any RPC traffic.
|
||||
IdleTime time.Duration
|
||||
// Number of RPC requests for this mount sent to the NFS server.
|
||||
Sends uint64
|
||||
// Number of RPC responses for this mount received from the NFS server.
|
||||
Receives uint64
|
||||
// Number of times the NFS server sent a response with a transaction ID
|
||||
// unknown to this client.
|
||||
BadTransactionIDs uint64
|
||||
// A running counter, incremented on each request as the current difference
|
||||
// ebetween sends and receives.
|
||||
CumulativeActiveRequests uint64
|
||||
// A running counter, incremented on each request by the current backlog
|
||||
// queue size.
|
||||
CumulativeBacklog uint64
|
||||
|
||||
// Stats below only available with stat version 1.1.
|
||||
|
||||
// Maximum number of simultaneously active RPC requests ever used.
|
||||
MaximumRPCSlotsUsed uint64
|
||||
// A running counter, incremented on each request as the current size of the
|
||||
// sending queue.
|
||||
CumulativeSendingQueue uint64
|
||||
// A running counter, incremented on each request as the current size of the
|
||||
// pending queue.
|
||||
CumulativePendingQueue uint64
|
||||
}
|
||||
|
||||
// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
|
||||
// of Mount structures containing detailed information about each mount.
|
||||
// If available, statistics for each mount are parsed as well.
|
||||
func parseMountStats(r io.Reader) ([]*Mount, error) {
|
||||
const (
|
||||
device = "device"
|
||||
statVersionPrefix = "statvers="
|
||||
|
||||
nfs3Type = "nfs"
|
||||
nfs4Type = "nfs4"
|
||||
)
|
||||
|
||||
var mounts []*Mount
|
||||
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
// Only look for device entries in this function
|
||||
ss := strings.Fields(string(s.Bytes()))
|
||||
if len(ss) == 0 || ss[0] != device {
|
||||
continue
|
||||
}
|
||||
|
||||
m, err := parseMount(ss)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Does this mount also possess statistics information?
|
||||
if len(ss) > deviceEntryLen {
|
||||
// Only NFSv3 and v4 are supported for parsing statistics
|
||||
if m.Type != nfs3Type && m.Type != nfs4Type {
|
||||
return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type)
|
||||
}
|
||||
|
||||
statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
|
||||
|
||||
stats, err := parseMountStatsNFS(s, statVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m.Stats = stats
|
||||
}
|
||||
|
||||
mounts = append(mounts, m)
|
||||
}
|
||||
|
||||
return mounts, s.Err()
|
||||
}
|
||||
|
||||
// parseMount parses an entry in /proc/[pid]/mountstats in the format:
|
||||
// device [device] mounted on [mount] with fstype [type]
|
||||
func parseMount(ss []string) (*Mount, error) {
|
||||
if len(ss) < deviceEntryLen {
|
||||
return nil, fmt.Errorf("invalid device entry: %v", ss)
|
||||
}
|
||||
|
||||
// Check for specific words appearing at specific indices to ensure
|
||||
// the format is consistent with what we expect
|
||||
format := []struct {
|
||||
i int
|
||||
s string
|
||||
}{
|
||||
{i: 0, s: "device"},
|
||||
{i: 2, s: "mounted"},
|
||||
{i: 3, s: "on"},
|
||||
{i: 5, s: "with"},
|
||||
{i: 6, s: "fstype"},
|
||||
}
|
||||
|
||||
for _, f := range format {
|
||||
if ss[f.i] != f.s {
|
||||
return nil, fmt.Errorf("invalid device entry: %v", ss)
|
||||
}
|
||||
}
|
||||
|
||||
return &Mount{
|
||||
Device: ss[1],
|
||||
Mount: ss[4],
|
||||
Type: ss[7],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseMountStatsNFS parses a MountStatsNFS by scanning additional information
|
||||
// related to NFS statistics.
|
||||
func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
|
||||
// Field indicators for parsing specific types of data
|
||||
const (
|
||||
fieldAge = "age:"
|
||||
fieldBytes = "bytes:"
|
||||
fieldEvents = "events:"
|
||||
fieldPerOpStats = "per-op"
|
||||
fieldTransport = "xprt:"
|
||||
)
|
||||
|
||||
stats := &MountStatsNFS{
|
||||
StatVersion: statVersion,
|
||||
}
|
||||
|
||||
for s.Scan() {
|
||||
ss := strings.Fields(string(s.Bytes()))
|
||||
if len(ss) == 0 {
|
||||
break
|
||||
}
|
||||
if len(ss) < 2 {
|
||||
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
|
||||
}
|
||||
|
||||
switch ss[0] {
|
||||
case fieldAge:
|
||||
// Age integer is in seconds
|
||||
d, err := time.ParseDuration(ss[1] + "s")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats.Age = d
|
||||
case fieldBytes:
|
||||
bstats, err := parseNFSBytesStats(ss[1:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats.Bytes = *bstats
|
||||
case fieldEvents:
|
||||
estats, err := parseNFSEventsStats(ss[1:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats.Events = *estats
|
||||
case fieldTransport:
|
||||
if len(ss) < 3 {
|
||||
return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
|
||||
}
|
||||
|
||||
tstats, err := parseNFSTransportStats(ss[1:], statVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats.Transport = *tstats
|
||||
}
|
||||
|
||||
// When encountering "per-operation statistics", we must break this
|
||||
// loop and parse them separately to ensure we can terminate parsing
|
||||
// before reaching another device entry; hence why this 'if' statement
|
||||
// is not just another switch case
|
||||
if ss[0] == fieldPerOpStats {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// NFS per-operation stats appear last before the next device entry
|
||||
perOpStats, err := parseNFSOperationStats(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats.Operations = perOpStats
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
// parseNFSBytesStats parses a NFSBytesStats line using an input set of
|
||||
// integer fields.
|
||||
func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
|
||||
if len(ss) != fieldBytesLen {
|
||||
return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss)
|
||||
}
|
||||
|
||||
ns := make([]uint64, 0, fieldBytesLen)
|
||||
for _, s := range ss {
|
||||
n, err := strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ns = append(ns, n)
|
||||
}
|
||||
|
||||
return &NFSBytesStats{
|
||||
Read: ns[0],
|
||||
Write: ns[1],
|
||||
DirectRead: ns[2],
|
||||
DirectWrite: ns[3],
|
||||
ReadTotal: ns[4],
|
||||
WriteTotal: ns[5],
|
||||
ReadPages: ns[6],
|
||||
WritePages: ns[7],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseNFSEventsStats parses a NFSEventsStats line using an input set of
|
||||
// integer fields.
|
||||
func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
|
||||
if len(ss) != fieldEventsLen {
|
||||
return nil, fmt.Errorf("invalid NFS events stats: %v", ss)
|
||||
}
|
||||
|
||||
ns := make([]uint64, 0, fieldEventsLen)
|
||||
for _, s := range ss {
|
||||
n, err := strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ns = append(ns, n)
|
||||
}
|
||||
|
||||
return &NFSEventsStats{
|
||||
InodeRevalidate: ns[0],
|
||||
DnodeRevalidate: ns[1],
|
||||
DataInvalidate: ns[2],
|
||||
AttributeInvalidate: ns[3],
|
||||
VFSOpen: ns[4],
|
||||
VFSLookup: ns[5],
|
||||
VFSAccess: ns[6],
|
||||
VFSUpdatePage: ns[7],
|
||||
VFSReadPage: ns[8],
|
||||
VFSReadPages: ns[9],
|
||||
VFSWritePage: ns[10],
|
||||
VFSWritePages: ns[11],
|
||||
VFSGetdents: ns[12],
|
||||
VFSSetattr: ns[13],
|
||||
VFSFlush: ns[14],
|
||||
VFSFsync: ns[15],
|
||||
VFSLock: ns[16],
|
||||
VFSFileRelease: ns[17],
|
||||
CongestionWait: ns[18],
|
||||
Truncation: ns[19],
|
||||
WriteExtension: ns[20],
|
||||
SillyRename: ns[21],
|
||||
ShortRead: ns[22],
|
||||
ShortWrite: ns[23],
|
||||
JukeboxDelay: ns[24],
|
||||
PNFSRead: ns[25],
|
||||
PNFSWrite: ns[26],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parseNFSOperationStats parses a slice of NFSOperationStats by scanning
|
||||
// additional information about per-operation statistics until an empty
|
||||
// line is reached.
|
||||
func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
|
||||
const (
|
||||
// Number of expected fields in each per-operation statistics set
|
||||
numFields = 9
|
||||
)
|
||||
|
||||
var ops []NFSOperationStats
|
||||
|
||||
for s.Scan() {
|
||||
ss := strings.Fields(string(s.Bytes()))
|
||||
if len(ss) == 0 {
|
||||
// Must break when reading a blank line after per-operation stats to
|
||||
// enable top-level function to parse the next device entry
|
||||
break
|
||||
}
|
||||
|
||||
if len(ss) != numFields {
|
||||
return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
|
||||
}
|
||||
|
||||
// Skip string operation name for integers
|
||||
ns := make([]uint64, 0, numFields-1)
|
||||
for _, st := range ss[1:] {
|
||||
n, err := strconv.ParseUint(st, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ns = append(ns, n)
|
||||
}
|
||||
|
||||
ops = append(ops, NFSOperationStats{
|
||||
Operation: strings.TrimSuffix(ss[0], ":"),
|
||||
Requests: ns[0],
|
||||
Transmissions: ns[1],
|
||||
MajorTimeouts: ns[2],
|
||||
BytesSent: ns[3],
|
||||
BytesReceived: ns[4],
|
||||
CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond,
|
||||
CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond,
|
||||
CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond,
|
||||
})
|
||||
}
|
||||
|
||||
return ops, s.Err()
|
||||
}
|
||||
|
||||
// parseNFSTransportStats parses a NFSTransportStats line using an input set of
|
||||
// integer fields matched to a specific stats version.
|
||||
func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
|
||||
// Extract the protocol field. It is the only string value in the line
|
||||
protocol := ss[0]
|
||||
ss = ss[1:]
|
||||
|
||||
switch statVersion {
|
||||
case statVersion10:
|
||||
var expectedLength int
|
||||
if protocol == "tcp" {
|
||||
expectedLength = fieldTransport10TCPLen
|
||||
} else if protocol == "udp" {
|
||||
expectedLength = fieldTransport10UDPLen
|
||||
} else {
|
||||
return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss)
|
||||
}
|
||||
if len(ss) != expectedLength {
|
||||
return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
|
||||
}
|
||||
case statVersion11:
|
||||
var expectedLength int
|
||||
if protocol == "tcp" {
|
||||
expectedLength = fieldTransport11TCPLen
|
||||
} else if protocol == "udp" {
|
||||
expectedLength = fieldTransport11UDPLen
|
||||
} else {
|
||||
return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss)
|
||||
}
|
||||
if len(ss) != expectedLength {
|
||||
return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion)
|
||||
}
|
||||
|
||||
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
|
||||
// in a v1.0 response. Since the stat length is bigger for TCP stats, we use
|
||||
// the TCP length here.
|
||||
//
|
||||
// Note: slice length must be set to length of v1.1 stats to avoid a panic when
|
||||
// only v1.0 stats are present.
|
||||
// See: https://github.com/prometheus/node_exporter/issues/571.
|
||||
ns := make([]uint64, fieldTransport11TCPLen)
|
||||
for i, s := range ss {
|
||||
n, err := strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ns[i] = n
|
||||
}
|
||||
|
||||
// The fields differ depending on the transport protocol (TCP or UDP)
|
||||
// From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt
|
||||
//
|
||||
// For the udp RPC transport there is no connection count, connect idle time,
|
||||
// or idle time (fields #3, #4, and #5); all other fields are the same. So
|
||||
// we set them to 0 here.
|
||||
if protocol == "udp" {
|
||||
ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
|
||||
}
|
||||
|
||||
return &NFSTransportStats{
|
||||
Protocol: protocol,
|
||||
Port: ns[0],
|
||||
Bind: ns[1],
|
||||
Connect: ns[2],
|
||||
ConnectIdleTime: ns[3],
|
||||
IdleTime: time.Duration(ns[4]) * time.Second,
|
||||
Sends: ns[5],
|
||||
Receives: ns[6],
|
||||
BadTransactionIDs: ns[7],
|
||||
CumulativeActiveRequests: ns[8],
|
||||
CumulativeBacklog: ns[9],
|
||||
MaximumRPCSlotsUsed: ns[10],
|
||||
CumulativeSendingQueue: ns[11],
|
||||
CumulativePendingQueue: ns[12],
|
||||
}, nil
|
||||
}
|
||||
216
vendor/github.com/ncabatoff/procfs/net_dev.go
generated
vendored
Normal file
216
vendor/github.com/ncabatoff/procfs/net_dev.go
generated
vendored
Normal file
|
|
@ -0,0 +1,216 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev.
|
||||
type NetDevLine struct {
|
||||
Name string `json:"name"` // The name of the interface.
|
||||
RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received.
|
||||
RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received.
|
||||
RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered.
|
||||
RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving.
|
||||
RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors.
|
||||
RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors.
|
||||
RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver.
|
||||
RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver.
|
||||
TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted.
|
||||
TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted.
|
||||
TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered.
|
||||
TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting.
|
||||
TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors.
|
||||
TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface.
|
||||
TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver.
|
||||
TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver.
|
||||
}
|
||||
|
||||
// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys
|
||||
// are interface names.
|
||||
type NetDev map[string]NetDevLine
|
||||
|
||||
// NewNetDev returns kernel/system statistics read from /proc/net/dev.
|
||||
func NewNetDev() (NetDev, error) {
|
||||
fs, err := NewFS(DefaultMountPoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fs.NewNetDev()
|
||||
}
|
||||
|
||||
// NewNetDev returns kernel/system statistics read from /proc/net/dev.
|
||||
func (fs FS) NewNetDev() (NetDev, error) {
|
||||
return newNetDev(fs.Path("net/dev"))
|
||||
}
|
||||
|
||||
// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev.
|
||||
func (p Proc) NewNetDev() (NetDev, error) {
|
||||
return newNetDev(p.path("net/dev"))
|
||||
}
|
||||
|
||||
// newNetDev creates a new NetDev from the contents of the given file.
|
||||
func newNetDev(file string) (NetDev, error) {
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
return NetDev{}, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
nd := NetDev{}
|
||||
s := bufio.NewScanner(f)
|
||||
for n := 0; s.Scan(); n++ {
|
||||
// Skip the 2 header lines.
|
||||
if n < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
line, err := nd.parseLine(s.Text())
|
||||
if err != nil {
|
||||
return nd, err
|
||||
}
|
||||
|
||||
nd[line.Name] = *line
|
||||
}
|
||||
|
||||
return nd, s.Err()
|
||||
}
|
||||
|
||||
// parseLine parses a single line from the /proc/net/dev file. Header lines
|
||||
// must be filtered prior to calling this method.
|
||||
func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) {
|
||||
parts := strings.SplitN(rawLine, ":", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, errors.New("invalid net/dev line, missing colon")
|
||||
}
|
||||
fields := strings.Fields(strings.TrimSpace(parts[1]))
|
||||
|
||||
var err error
|
||||
line := &NetDevLine{}
|
||||
|
||||
// Interface Name
|
||||
line.Name = strings.TrimSpace(parts[0])
|
||||
if line.Name == "" {
|
||||
return nil, errors.New("invalid net/dev line, empty interface name")
|
||||
}
|
||||
|
||||
// RX
|
||||
line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TX
|
||||
line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return line, nil
|
||||
}
|
||||
|
||||
// Total aggregates the values across interfaces and returns a new NetDevLine.
|
||||
// The Name field will be a sorted comma separated list of interface names.
|
||||
func (nd NetDev) Total() NetDevLine {
|
||||
total := NetDevLine{}
|
||||
|
||||
names := make([]string, 0, len(nd))
|
||||
for _, ifc := range nd {
|
||||
names = append(names, ifc.Name)
|
||||
total.RxBytes += ifc.RxBytes
|
||||
total.RxPackets += ifc.RxPackets
|
||||
total.RxPackets += ifc.RxPackets
|
||||
total.RxErrors += ifc.RxErrors
|
||||
total.RxDropped += ifc.RxDropped
|
||||
total.RxFIFO += ifc.RxFIFO
|
||||
total.RxFrame += ifc.RxFrame
|
||||
total.RxCompressed += ifc.RxCompressed
|
||||
total.RxMulticast += ifc.RxMulticast
|
||||
total.TxBytes += ifc.TxBytes
|
||||
total.TxPackets += ifc.TxPackets
|
||||
total.TxErrors += ifc.TxErrors
|
||||
total.TxDropped += ifc.TxDropped
|
||||
total.TxFIFO += ifc.TxFIFO
|
||||
total.TxCollisions += ifc.TxCollisions
|
||||
total.TxCarrier += ifc.TxCarrier
|
||||
total.TxCompressed += ifc.TxCompressed
|
||||
}
|
||||
sort.Strings(names)
|
||||
total.Name = strings.Join(names, ", ")
|
||||
|
||||
return total
|
||||
}
|
||||
263
vendor/github.com/ncabatoff/procfs/nfs/nfs.go
generated
vendored
Normal file
263
vendor/github.com/ncabatoff/procfs/nfs/nfs.go
generated
vendored
Normal file
|
|
@ -0,0 +1,263 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package nfs implements parsing of /proc/net/rpc/nfsd.
|
||||
// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/
|
||||
package nfs
|
||||
|
||||
// ReplyCache models the "rc" line.
|
||||
type ReplyCache struct {
|
||||
Hits uint64
|
||||
Misses uint64
|
||||
NoCache uint64
|
||||
}
|
||||
|
||||
// FileHandles models the "fh" line.
|
||||
type FileHandles struct {
|
||||
Stale uint64
|
||||
TotalLookups uint64
|
||||
AnonLookups uint64
|
||||
DirNoCache uint64
|
||||
NoDirNoCache uint64
|
||||
}
|
||||
|
||||
// InputOutput models the "io" line.
|
||||
type InputOutput struct {
|
||||
Read uint64
|
||||
Write uint64
|
||||
}
|
||||
|
||||
// Threads models the "th" line.
|
||||
type Threads struct {
|
||||
Threads uint64
|
||||
FullCnt uint64
|
||||
}
|
||||
|
||||
// ReadAheadCache models the "ra" line.
|
||||
type ReadAheadCache struct {
|
||||
CacheSize uint64
|
||||
CacheHistogram []uint64
|
||||
NotFound uint64
|
||||
}
|
||||
|
||||
// Network models the "net" line.
|
||||
type Network struct {
|
||||
NetCount uint64
|
||||
UDPCount uint64
|
||||
TCPCount uint64
|
||||
TCPConnect uint64
|
||||
}
|
||||
|
||||
// ClientRPC models the nfs "rpc" line.
|
||||
type ClientRPC struct {
|
||||
RPCCount uint64
|
||||
Retransmissions uint64
|
||||
AuthRefreshes uint64
|
||||
}
|
||||
|
||||
// ServerRPC models the nfsd "rpc" line.
|
||||
type ServerRPC struct {
|
||||
RPCCount uint64
|
||||
BadCnt uint64
|
||||
BadFmt uint64
|
||||
BadAuth uint64
|
||||
BadcInt uint64
|
||||
}
|
||||
|
||||
// V2Stats models the "proc2" line.
|
||||
type V2Stats struct {
|
||||
Null uint64
|
||||
GetAttr uint64
|
||||
SetAttr uint64
|
||||
Root uint64
|
||||
Lookup uint64
|
||||
ReadLink uint64
|
||||
Read uint64
|
||||
WrCache uint64
|
||||
Write uint64
|
||||
Create uint64
|
||||
Remove uint64
|
||||
Rename uint64
|
||||
Link uint64
|
||||
SymLink uint64
|
||||
MkDir uint64
|
||||
RmDir uint64
|
||||
ReadDir uint64
|
||||
FsStat uint64
|
||||
}
|
||||
|
||||
// V3Stats models the "proc3" line.
|
||||
type V3Stats struct {
|
||||
Null uint64
|
||||
GetAttr uint64
|
||||
SetAttr uint64
|
||||
Lookup uint64
|
||||
Access uint64
|
||||
ReadLink uint64
|
||||
Read uint64
|
||||
Write uint64
|
||||
Create uint64
|
||||
MkDir uint64
|
||||
SymLink uint64
|
||||
MkNod uint64
|
||||
Remove uint64
|
||||
RmDir uint64
|
||||
Rename uint64
|
||||
Link uint64
|
||||
ReadDir uint64
|
||||
ReadDirPlus uint64
|
||||
FsStat uint64
|
||||
FsInfo uint64
|
||||
PathConf uint64
|
||||
Commit uint64
|
||||
}
|
||||
|
||||
// ClientV4Stats models the nfs "proc4" line.
|
||||
type ClientV4Stats struct {
|
||||
Null uint64
|
||||
Read uint64
|
||||
Write uint64
|
||||
Commit uint64
|
||||
Open uint64
|
||||
OpenConfirm uint64
|
||||
OpenNoattr uint64
|
||||
OpenDowngrade uint64
|
||||
Close uint64
|
||||
Setattr uint64
|
||||
FsInfo uint64
|
||||
Renew uint64
|
||||
SetClientID uint64
|
||||
SetClientIDConfirm uint64
|
||||
Lock uint64
|
||||
Lockt uint64
|
||||
Locku uint64
|
||||
Access uint64
|
||||
Getattr uint64
|
||||
Lookup uint64
|
||||
LookupRoot uint64
|
||||
Remove uint64
|
||||
Rename uint64
|
||||
Link uint64
|
||||
Symlink uint64
|
||||
Create uint64
|
||||
Pathconf uint64
|
||||
StatFs uint64
|
||||
ReadLink uint64
|
||||
ReadDir uint64
|
||||
ServerCaps uint64
|
||||
DelegReturn uint64
|
||||
GetACL uint64
|
||||
SetACL uint64
|
||||
FsLocations uint64
|
||||
ReleaseLockowner uint64
|
||||
Secinfo uint64
|
||||
FsidPresent uint64
|
||||
ExchangeID uint64
|
||||
CreateSession uint64
|
||||
DestroySession uint64
|
||||
Sequence uint64
|
||||
GetLeaseTime uint64
|
||||
ReclaimComplete uint64
|
||||
LayoutGet uint64
|
||||
GetDeviceInfo uint64
|
||||
LayoutCommit uint64
|
||||
LayoutReturn uint64
|
||||
SecinfoNoName uint64
|
||||
TestStateID uint64
|
||||
FreeStateID uint64
|
||||
GetDeviceList uint64
|
||||
BindConnToSession uint64
|
||||
DestroyClientID uint64
|
||||
Seek uint64
|
||||
Allocate uint64
|
||||
DeAllocate uint64
|
||||
LayoutStats uint64
|
||||
Clone uint64
|
||||
}
|
||||
|
||||
// ServerV4Stats models the nfsd "proc4" line.
|
||||
type ServerV4Stats struct {
|
||||
Null uint64
|
||||
Compound uint64
|
||||
}
|
||||
|
||||
// V4Ops models the "proc4ops" line: NFSv4 operations
|
||||
// Variable list, see:
|
||||
// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations)
|
||||
// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations)
|
||||
// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations)
|
||||
type V4Ops struct {
|
||||
//Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct?
|
||||
Op0Unused uint64
|
||||
Op1Unused uint64
|
||||
Op2Future uint64
|
||||
Access uint64
|
||||
Close uint64
|
||||
Commit uint64
|
||||
Create uint64
|
||||
DelegPurge uint64
|
||||
DelegReturn uint64
|
||||
GetAttr uint64
|
||||
GetFH uint64
|
||||
Link uint64
|
||||
Lock uint64
|
||||
Lockt uint64
|
||||
Locku uint64
|
||||
Lookup uint64
|
||||
LookupRoot uint64
|
||||
Nverify uint64
|
||||
Open uint64
|
||||
OpenAttr uint64
|
||||
OpenConfirm uint64
|
||||
OpenDgrd uint64
|
||||
PutFH uint64
|
||||
PutPubFH uint64
|
||||
PutRootFH uint64
|
||||
Read uint64
|
||||
ReadDir uint64
|
||||
ReadLink uint64
|
||||
Remove uint64
|
||||
Rename uint64
|
||||
Renew uint64
|
||||
RestoreFH uint64
|
||||
SaveFH uint64
|
||||
SecInfo uint64
|
||||
SetAttr uint64
|
||||
Verify uint64
|
||||
Write uint64
|
||||
RelLockOwner uint64
|
||||
}
|
||||
|
||||
// ClientRPCStats models all stats from /proc/net/rpc/nfs.
|
||||
type ClientRPCStats struct {
|
||||
Network Network
|
||||
ClientRPC ClientRPC
|
||||
V2Stats V2Stats
|
||||
V3Stats V3Stats
|
||||
ClientV4Stats ClientV4Stats
|
||||
}
|
||||
|
||||
// ServerRPCStats models all stats from /proc/net/rpc/nfsd.
|
||||
type ServerRPCStats struct {
|
||||
ReplyCache ReplyCache
|
||||
FileHandles FileHandles
|
||||
InputOutput InputOutput
|
||||
Threads Threads
|
||||
ReadAheadCache ReadAheadCache
|
||||
Network Network
|
||||
ServerRPC ServerRPC
|
||||
V2Stats V2Stats
|
||||
V3Stats V3Stats
|
||||
ServerV4Stats ServerV4Stats
|
||||
V4Ops V4Ops
|
||||
}
|
||||
317
vendor/github.com/ncabatoff/procfs/nfs/parse.go
generated
vendored
Normal file
317
vendor/github.com/ncabatoff/procfs/nfs/parse.go
generated
vendored
Normal file
|
|
@ -0,0 +1,317 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package nfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func parseReplyCache(v []uint64) (ReplyCache, error) {
|
||||
if len(v) != 3 {
|
||||
return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v)
|
||||
}
|
||||
|
||||
return ReplyCache{
|
||||
Hits: v[0],
|
||||
Misses: v[1],
|
||||
NoCache: v[2],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseFileHandles(v []uint64) (FileHandles, error) {
|
||||
if len(v) != 5 {
|
||||
return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v)
|
||||
}
|
||||
|
||||
return FileHandles{
|
||||
Stale: v[0],
|
||||
TotalLookups: v[1],
|
||||
AnonLookups: v[2],
|
||||
DirNoCache: v[3],
|
||||
NoDirNoCache: v[4],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseInputOutput(v []uint64) (InputOutput, error) {
|
||||
if len(v) != 2 {
|
||||
return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v)
|
||||
}
|
||||
|
||||
return InputOutput{
|
||||
Read: v[0],
|
||||
Write: v[1],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseThreads(v []uint64) (Threads, error) {
|
||||
if len(v) != 2 {
|
||||
return Threads{}, fmt.Errorf("invalid Threads line %q", v)
|
||||
}
|
||||
|
||||
return Threads{
|
||||
Threads: v[0],
|
||||
FullCnt: v[1],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseReadAheadCache(v []uint64) (ReadAheadCache, error) {
|
||||
if len(v) != 12 {
|
||||
return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v)
|
||||
}
|
||||
|
||||
return ReadAheadCache{
|
||||
CacheSize: v[0],
|
||||
CacheHistogram: v[1:11],
|
||||
NotFound: v[11],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseNetwork(v []uint64) (Network, error) {
|
||||
if len(v) != 4 {
|
||||
return Network{}, fmt.Errorf("invalid Network line %q", v)
|
||||
}
|
||||
|
||||
return Network{
|
||||
NetCount: v[0],
|
||||
UDPCount: v[1],
|
||||
TCPCount: v[2],
|
||||
TCPConnect: v[3],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseServerRPC(v []uint64) (ServerRPC, error) {
|
||||
if len(v) != 5 {
|
||||
return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v)
|
||||
}
|
||||
|
||||
return ServerRPC{
|
||||
RPCCount: v[0],
|
||||
BadCnt: v[1],
|
||||
BadFmt: v[2],
|
||||
BadAuth: v[3],
|
||||
BadcInt: v[4],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseClientRPC(v []uint64) (ClientRPC, error) {
|
||||
if len(v) != 3 {
|
||||
return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v)
|
||||
}
|
||||
|
||||
return ClientRPC{
|
||||
RPCCount: v[0],
|
||||
Retransmissions: v[1],
|
||||
AuthRefreshes: v[2],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseV2Stats(v []uint64) (V2Stats, error) {
|
||||
values := int(v[0])
|
||||
if len(v[1:]) != values || values != 18 {
|
||||
return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v)
|
||||
}
|
||||
|
||||
return V2Stats{
|
||||
Null: v[1],
|
||||
GetAttr: v[2],
|
||||
SetAttr: v[3],
|
||||
Root: v[4],
|
||||
Lookup: v[5],
|
||||
ReadLink: v[6],
|
||||
Read: v[7],
|
||||
WrCache: v[8],
|
||||
Write: v[9],
|
||||
Create: v[10],
|
||||
Remove: v[11],
|
||||
Rename: v[12],
|
||||
Link: v[13],
|
||||
SymLink: v[14],
|
||||
MkDir: v[15],
|
||||
RmDir: v[16],
|
||||
ReadDir: v[17],
|
||||
FsStat: v[18],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseV3Stats(v []uint64) (V3Stats, error) {
|
||||
values := int(v[0])
|
||||
if len(v[1:]) != values || values != 22 {
|
||||
return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v)
|
||||
}
|
||||
|
||||
return V3Stats{
|
||||
Null: v[1],
|
||||
GetAttr: v[2],
|
||||
SetAttr: v[3],
|
||||
Lookup: v[4],
|
||||
Access: v[5],
|
||||
ReadLink: v[6],
|
||||
Read: v[7],
|
||||
Write: v[8],
|
||||
Create: v[9],
|
||||
MkDir: v[10],
|
||||
SymLink: v[11],
|
||||
MkNod: v[12],
|
||||
Remove: v[13],
|
||||
RmDir: v[14],
|
||||
Rename: v[15],
|
||||
Link: v[16],
|
||||
ReadDir: v[17],
|
||||
ReadDirPlus: v[18],
|
||||
FsStat: v[19],
|
||||
FsInfo: v[20],
|
||||
PathConf: v[21],
|
||||
Commit: v[22],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseClientV4Stats(v []uint64) (ClientV4Stats, error) {
|
||||
values := int(v[0])
|
||||
if len(v[1:]) != values {
|
||||
return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v)
|
||||
}
|
||||
|
||||
// This function currently supports mapping 59 NFS v4 client stats. Older
|
||||
// kernels may emit fewer stats, so we must detect this and pad out the
|
||||
// values to match the expected slice size.
|
||||
if values < 59 {
|
||||
newValues := make([]uint64, 60)
|
||||
copy(newValues, v)
|
||||
v = newValues
|
||||
}
|
||||
|
||||
return ClientV4Stats{
|
||||
Null: v[1],
|
||||
Read: v[2],
|
||||
Write: v[3],
|
||||
Commit: v[4],
|
||||
Open: v[5],
|
||||
OpenConfirm: v[6],
|
||||
OpenNoattr: v[7],
|
||||
OpenDowngrade: v[8],
|
||||
Close: v[9],
|
||||
Setattr: v[10],
|
||||
FsInfo: v[11],
|
||||
Renew: v[12],
|
||||
SetClientID: v[13],
|
||||
SetClientIDConfirm: v[14],
|
||||
Lock: v[15],
|
||||
Lockt: v[16],
|
||||
Locku: v[17],
|
||||
Access: v[18],
|
||||
Getattr: v[19],
|
||||
Lookup: v[20],
|
||||
LookupRoot: v[21],
|
||||
Remove: v[22],
|
||||
Rename: v[23],
|
||||
Link: v[24],
|
||||
Symlink: v[25],
|
||||
Create: v[26],
|
||||
Pathconf: v[27],
|
||||
StatFs: v[28],
|
||||
ReadLink: v[29],
|
||||
ReadDir: v[30],
|
||||
ServerCaps: v[31],
|
||||
DelegReturn: v[32],
|
||||
GetACL: v[33],
|
||||
SetACL: v[34],
|
||||
FsLocations: v[35],
|
||||
ReleaseLockowner: v[36],
|
||||
Secinfo: v[37],
|
||||
FsidPresent: v[38],
|
||||
ExchangeID: v[39],
|
||||
CreateSession: v[40],
|
||||
DestroySession: v[41],
|
||||
Sequence: v[42],
|
||||
GetLeaseTime: v[43],
|
||||
ReclaimComplete: v[44],
|
||||
LayoutGet: v[45],
|
||||
GetDeviceInfo: v[46],
|
||||
LayoutCommit: v[47],
|
||||
LayoutReturn: v[48],
|
||||
SecinfoNoName: v[49],
|
||||
TestStateID: v[50],
|
||||
FreeStateID: v[51],
|
||||
GetDeviceList: v[52],
|
||||
BindConnToSession: v[53],
|
||||
DestroyClientID: v[54],
|
||||
Seek: v[55],
|
||||
Allocate: v[56],
|
||||
DeAllocate: v[57],
|
||||
LayoutStats: v[58],
|
||||
Clone: v[59],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseServerV4Stats(v []uint64) (ServerV4Stats, error) {
|
||||
values := int(v[0])
|
||||
if len(v[1:]) != values || values != 2 {
|
||||
return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v)
|
||||
}
|
||||
|
||||
return ServerV4Stats{
|
||||
Null: v[1],
|
||||
Compound: v[2],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseV4Ops(v []uint64) (V4Ops, error) {
|
||||
values := int(v[0])
|
||||
if len(v[1:]) != values || values < 39 {
|
||||
return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v)
|
||||
}
|
||||
|
||||
stats := V4Ops{
|
||||
Op0Unused: v[1],
|
||||
Op1Unused: v[2],
|
||||
Op2Future: v[3],
|
||||
Access: v[4],
|
||||
Close: v[5],
|
||||
Commit: v[6],
|
||||
Create: v[7],
|
||||
DelegPurge: v[8],
|
||||
DelegReturn: v[9],
|
||||
GetAttr: v[10],
|
||||
GetFH: v[11],
|
||||
Link: v[12],
|
||||
Lock: v[13],
|
||||
Lockt: v[14],
|
||||
Locku: v[15],
|
||||
Lookup: v[16],
|
||||
LookupRoot: v[17],
|
||||
Nverify: v[18],
|
||||
Open: v[19],
|
||||
OpenAttr: v[20],
|
||||
OpenConfirm: v[21],
|
||||
OpenDgrd: v[22],
|
||||
PutFH: v[23],
|
||||
PutPubFH: v[24],
|
||||
PutRootFH: v[25],
|
||||
Read: v[26],
|
||||
ReadDir: v[27],
|
||||
ReadLink: v[28],
|
||||
Remove: v[29],
|
||||
Rename: v[30],
|
||||
Renew: v[31],
|
||||
RestoreFH: v[32],
|
||||
SaveFH: v[33],
|
||||
SecInfo: v[34],
|
||||
SetAttr: v[35],
|
||||
Verify: v[36],
|
||||
Write: v[37],
|
||||
RelLockOwner: v[38],
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
67
vendor/github.com/ncabatoff/procfs/nfs/parse_nfs.go
generated
vendored
Normal file
67
vendor/github.com/ncabatoff/procfs/nfs/parse_nfs.go
generated
vendored
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package nfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/ncabatoff/procfs/internal/util"
|
||||
)
|
||||
|
||||
// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs
|
||||
func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) {
|
||||
stats := &ClientRPCStats{}
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
parts := strings.Fields(scanner.Text())
|
||||
// require at least <key> <value>
|
||||
if len(parts) < 2 {
|
||||
return nil, fmt.Errorf("invalid NFS metric line %q", line)
|
||||
}
|
||||
|
||||
values, err := util.ParseUint64s(parts[1:])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing NFS metric line: %s", err)
|
||||
}
|
||||
|
||||
switch metricLine := parts[0]; metricLine {
|
||||
case "net":
|
||||
stats.Network, err = parseNetwork(values)
|
||||
case "rpc":
|
||||
stats.ClientRPC, err = parseClientRPC(values)
|
||||
case "proc2":
|
||||
stats.V2Stats, err = parseV2Stats(values)
|
||||
case "proc3":
|
||||
stats.V3Stats, err = parseV3Stats(values)
|
||||
case "proc4":
|
||||
stats.ClientV4Stats, err = parseClientV4Stats(values)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown NFS metric line %q", metricLine)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("errors parsing NFS metric line: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, fmt.Errorf("error scanning NFS file: %s", err)
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
89
vendor/github.com/ncabatoff/procfs/nfs/parse_nfsd.go
generated
vendored
Normal file
89
vendor/github.com/ncabatoff/procfs/nfs/parse_nfsd.go
generated
vendored
Normal file
|
|
@ -0,0 +1,89 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package nfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/ncabatoff/procfs/internal/util"
|
||||
)
|
||||
|
||||
// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd
|
||||
func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) {
|
||||
stats := &ServerRPCStats{}
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
parts := strings.Fields(scanner.Text())
|
||||
// require at least <key> <value>
|
||||
if len(parts) < 2 {
|
||||
return nil, fmt.Errorf("invalid NFSd metric line %q", line)
|
||||
}
|
||||
label := parts[0]
|
||||
|
||||
var values []uint64
|
||||
var err error
|
||||
if label == "th" {
|
||||
if len(parts) < 3 {
|
||||
return nil, fmt.Errorf("invalid NFSd th metric line %q", line)
|
||||
}
|
||||
values, err = util.ParseUint64s(parts[1:3])
|
||||
} else {
|
||||
values, err = util.ParseUint64s(parts[1:])
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing NFSd metric line: %s", err)
|
||||
}
|
||||
|
||||
switch metricLine := parts[0]; metricLine {
|
||||
case "rc":
|
||||
stats.ReplyCache, err = parseReplyCache(values)
|
||||
case "fh":
|
||||
stats.FileHandles, err = parseFileHandles(values)
|
||||
case "io":
|
||||
stats.InputOutput, err = parseInputOutput(values)
|
||||
case "th":
|
||||
stats.Threads, err = parseThreads(values)
|
||||
case "ra":
|
||||
stats.ReadAheadCache, err = parseReadAheadCache(values)
|
||||
case "net":
|
||||
stats.Network, err = parseNetwork(values)
|
||||
case "rpc":
|
||||
stats.ServerRPC, err = parseServerRPC(values)
|
||||
case "proc2":
|
||||
stats.V2Stats, err = parseV2Stats(values)
|
||||
case "proc3":
|
||||
stats.V3Stats, err = parseV3Stats(values)
|
||||
case "proc4":
|
||||
stats.ServerV4Stats, err = parseServerV4Stats(values)
|
||||
case "proc4ops":
|
||||
stats.V4Ops, err = parseV4Ops(values)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, fmt.Errorf("error scanning NFSd file: %s", err)
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
259
vendor/github.com/ncabatoff/procfs/proc.go
generated
vendored
Normal file
259
vendor/github.com/ncabatoff/procfs/proc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,259 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Proc provides information about a running process.
|
||||
type Proc struct {
|
||||
// The process ID.
|
||||
PID int
|
||||
|
||||
fs FS
|
||||
}
|
||||
|
||||
// Procs represents a list of Proc structs.
|
||||
type Procs []Proc
|
||||
|
||||
func (p Procs) Len() int { return len(p) }
|
||||
func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
|
||||
|
||||
// Self returns a process for the current process read via /proc/self.
|
||||
func Self() (Proc, error) {
|
||||
fs, err := NewFS(DefaultMountPoint)
|
||||
if err != nil {
|
||||
return Proc{}, err
|
||||
}
|
||||
return fs.Self()
|
||||
}
|
||||
|
||||
// NewProc returns a process for the given pid under /proc.
|
||||
func NewProc(pid int) (Proc, error) {
|
||||
fs, err := NewFS(DefaultMountPoint)
|
||||
if err != nil {
|
||||
return Proc{}, err
|
||||
}
|
||||
return fs.NewProc(pid)
|
||||
}
|
||||
|
||||
// AllProcs returns a list of all currently available processes under /proc.
|
||||
func AllProcs() (Procs, error) {
|
||||
fs, err := NewFS(DefaultMountPoint)
|
||||
if err != nil {
|
||||
return Procs{}, err
|
||||
}
|
||||
return fs.AllProcs()
|
||||
}
|
||||
|
||||
// Self returns a process for the current process.
|
||||
func (fs FS) Self() (Proc, error) {
|
||||
p, err := os.Readlink(fs.Path("self"))
|
||||
if err != nil {
|
||||
return Proc{}, err
|
||||
}
|
||||
pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1))
|
||||
if err != nil {
|
||||
return Proc{}, err
|
||||
}
|
||||
return fs.NewProc(pid)
|
||||
}
|
||||
|
||||
// NewProc returns a process for the given pid.
|
||||
func (fs FS) NewProc(pid int) (Proc, error) {
|
||||
if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil {
|
||||
return Proc{}, err
|
||||
}
|
||||
return Proc{PID: pid, fs: fs}, nil
|
||||
}
|
||||
|
||||
// AllProcs returns a list of all currently available processes.
|
||||
func (fs FS) AllProcs() (Procs, error) {
|
||||
d, err := os.Open(fs.Path())
|
||||
if err != nil {
|
||||
return Procs{}, err
|
||||
}
|
||||
defer d.Close()
|
||||
|
||||
names, err := d.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err)
|
||||
}
|
||||
|
||||
p := Procs{}
|
||||
for _, n := range names {
|
||||
pid, err := strconv.ParseInt(n, 10, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
p = append(p, Proc{PID: int(pid), fs: fs})
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// CmdLine returns the command line of a process.
|
||||
func (p Proc) CmdLine() ([]string, error) {
|
||||
f, err := os.Open(p.path("cmdline"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
data, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(data) < 1 {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
|
||||
}
|
||||
|
||||
// Wchan returns the wchan (wait channel) of a process.
|
||||
func (p Proc) Wchan() (string, error) {
|
||||
f, err := os.Open(p.path("wchan"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
data, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
wchan := string(data)
|
||||
if wchan == "" || wchan == "0" {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
return wchan, nil
|
||||
}
|
||||
|
||||
// Comm returns the command name of a process.
|
||||
func (p Proc) Comm() (string, error) {
|
||||
f, err := os.Open(p.path("comm"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
data, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return strings.TrimSpace(string(data)), nil
|
||||
}
|
||||
|
||||
// Executable returns the absolute path of the executable command of a process.
|
||||
func (p Proc) Executable() (string, error) {
|
||||
exe, err := os.Readlink(p.path("exe"))
|
||||
if os.IsNotExist(err) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
return exe, err
|
||||
}
|
||||
|
||||
// FileDescriptors returns the currently open file descriptors of a process.
|
||||
func (p Proc) FileDescriptors() ([]uintptr, error) {
|
||||
names, err := p.fileDescriptors()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fds := make([]uintptr, len(names))
|
||||
for i, n := range names {
|
||||
fd, err := strconv.ParseInt(n, 10, 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse fd %s: %s", n, err)
|
||||
}
|
||||
fds[i] = uintptr(fd)
|
||||
}
|
||||
|
||||
return fds, nil
|
||||
}
|
||||
|
||||
// FileDescriptorTargets returns the targets of all file descriptors of a process.
|
||||
// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string.
|
||||
func (p Proc) FileDescriptorTargets() ([]string, error) {
|
||||
names, err := p.fileDescriptors()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
targets := make([]string, len(names))
|
||||
|
||||
for i, name := range names {
|
||||
target, err := os.Readlink(p.path("fd", name))
|
||||
if err == nil {
|
||||
targets[i] = target
|
||||
}
|
||||
}
|
||||
|
||||
return targets, nil
|
||||
}
|
||||
|
||||
// FileDescriptorsLen returns the number of currently open file descriptors of
|
||||
// a process.
|
||||
func (p Proc) FileDescriptorsLen() (int, error) {
|
||||
fds, err := p.fileDescriptors()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return len(fds), nil
|
||||
}
|
||||
|
||||
// MountStats retrieves statistics and configuration for mount points in a
|
||||
// process's namespace.
|
||||
func (p Proc) MountStats() ([]*Mount, error) {
|
||||
f, err := os.Open(p.path("mountstats"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return parseMountStats(f)
|
||||
}
|
||||
|
||||
func (p Proc) fileDescriptors() ([]string, error) {
|
||||
d, err := os.Open(p.path("fd"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer d.Close()
|
||||
|
||||
names, err := d.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not read %s: %s", d.Name(), err)
|
||||
}
|
||||
|
||||
return names, nil
|
||||
}
|
||||
|
||||
func (p Proc) path(pa ...string) string {
|
||||
return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
|
||||
}
|
||||
65
vendor/github.com/ncabatoff/procfs/proc_io.go
generated
vendored
Normal file
65
vendor/github.com/ncabatoff/procfs/proc_io.go
generated
vendored
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
// ProcIO models the content of /proc/<pid>/io.
|
||||
type ProcIO struct {
|
||||
// Chars read.
|
||||
RChar uint64
|
||||
// Chars written.
|
||||
WChar uint64
|
||||
// Read syscalls.
|
||||
SyscR uint64
|
||||
// Write syscalls.
|
||||
SyscW uint64
|
||||
// Bytes read.
|
||||
ReadBytes uint64
|
||||
// Bytes written.
|
||||
WriteBytes uint64
|
||||
// Bytes written, but taking into account truncation. See
|
||||
// Documentation/filesystems/proc.txt in the kernel sources for
|
||||
// detailed explanation.
|
||||
CancelledWriteBytes int64
|
||||
}
|
||||
|
||||
// NewIO creates a new ProcIO instance from a given Proc instance.
|
||||
func (p Proc) NewIO() (ProcIO, error) {
|
||||
pio := ProcIO{}
|
||||
|
||||
f, err := os.Open(p.path("io"))
|
||||
if err != nil {
|
||||
return pio, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
data, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return pio, err
|
||||
}
|
||||
|
||||
ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
|
||||
"read_bytes: %d\nwrite_bytes: %d\n" +
|
||||
"cancelled_write_bytes: %d\n"
|
||||
|
||||
_, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
|
||||
&pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
|
||||
|
||||
return pio, err
|
||||
}
|
||||
150
vendor/github.com/ncabatoff/procfs/proc_limits.go
generated
vendored
Normal file
150
vendor/github.com/ncabatoff/procfs/proc_limits.go
generated
vendored
Normal file
|
|
@ -0,0 +1,150 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// ProcLimits represents the soft limits for each of the process's resource
|
||||
// limits. For more information see getrlimit(2):
|
||||
// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
|
||||
type ProcLimits struct {
|
||||
// CPU time limit in seconds.
|
||||
CPUTime int64
|
||||
// Maximum size of files that the process may create.
|
||||
FileSize int64
|
||||
// Maximum size of the process's data segment (initialized data,
|
||||
// uninitialized data, and heap).
|
||||
DataSize int64
|
||||
// Maximum size of the process stack in bytes.
|
||||
StackSize int64
|
||||
// Maximum size of a core file.
|
||||
CoreFileSize int64
|
||||
// Limit of the process's resident set in pages.
|
||||
ResidentSet int64
|
||||
// Maximum number of processes that can be created for the real user ID of
|
||||
// the calling process.
|
||||
Processes int64
|
||||
// Value one greater than the maximum file descriptor number that can be
|
||||
// opened by this process.
|
||||
OpenFiles int64
|
||||
// Maximum number of bytes of memory that may be locked into RAM.
|
||||
LockedMemory int64
|
||||
// Maximum size of the process's virtual memory address space in bytes.
|
||||
AddressSpace int64
|
||||
// Limit on the combined number of flock(2) locks and fcntl(2) leases that
|
||||
// this process may establish.
|
||||
FileLocks int64
|
||||
// Limit of signals that may be queued for the real user ID of the calling
|
||||
// process.
|
||||
PendingSignals int64
|
||||
// Limit on the number of bytes that can be allocated for POSIX message
|
||||
// queues for the real user ID of the calling process.
|
||||
MsqqueueSize int64
|
||||
// Limit of the nice priority set using setpriority(2) or nice(2).
|
||||
NicePriority int64
|
||||
// Limit of the real-time priority set using sched_setscheduler(2) or
|
||||
// sched_setparam(2).
|
||||
RealtimePriority int64
|
||||
// Limit (in microseconds) on the amount of CPU time that a process
|
||||
// scheduled under a real-time scheduling policy may consume without making
|
||||
// a blocking system call.
|
||||
RealtimeTimeout int64
|
||||
}
|
||||
|
||||
const (
|
||||
limitsFields = 3
|
||||
limitsUnlimited = "unlimited"
|
||||
)
|
||||
|
||||
var (
|
||||
limitsDelimiter = regexp.MustCompile(" +")
|
||||
)
|
||||
|
||||
// NewLimits returns the current soft limits of the process.
|
||||
func (p Proc) NewLimits() (ProcLimits, error) {
|
||||
f, err := os.Open(p.path("limits"))
|
||||
if err != nil {
|
||||
return ProcLimits{}, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var (
|
||||
l = ProcLimits{}
|
||||
s = bufio.NewScanner(f)
|
||||
)
|
||||
for s.Scan() {
|
||||
fields := limitsDelimiter.Split(s.Text(), limitsFields)
|
||||
if len(fields) != limitsFields {
|
||||
return ProcLimits{}, fmt.Errorf(
|
||||
"couldn't parse %s line %s", f.Name(), s.Text())
|
||||
}
|
||||
|
||||
switch fields[0] {
|
||||
case "Max cpu time":
|
||||
l.CPUTime, err = parseInt(fields[1])
|
||||
case "Max file size":
|
||||
l.FileSize, err = parseInt(fields[1])
|
||||
case "Max data size":
|
||||
l.DataSize, err = parseInt(fields[1])
|
||||
case "Max stack size":
|
||||
l.StackSize, err = parseInt(fields[1])
|
||||
case "Max core file size":
|
||||
l.CoreFileSize, err = parseInt(fields[1])
|
||||
case "Max resident set":
|
||||
l.ResidentSet, err = parseInt(fields[1])
|
||||
case "Max processes":
|
||||
l.Processes, err = parseInt(fields[1])
|
||||
case "Max open files":
|
||||
l.OpenFiles, err = parseInt(fields[1])
|
||||
case "Max locked memory":
|
||||
l.LockedMemory, err = parseInt(fields[1])
|
||||
case "Max address space":
|
||||
l.AddressSpace, err = parseInt(fields[1])
|
||||
case "Max file locks":
|
||||
l.FileLocks, err = parseInt(fields[1])
|
||||
case "Max pending signals":
|
||||
l.PendingSignals, err = parseInt(fields[1])
|
||||
case "Max msgqueue size":
|
||||
l.MsqqueueSize, err = parseInt(fields[1])
|
||||
case "Max nice priority":
|
||||
l.NicePriority, err = parseInt(fields[1])
|
||||
case "Max realtime priority":
|
||||
l.RealtimePriority, err = parseInt(fields[1])
|
||||
case "Max realtime timeout":
|
||||
l.RealtimeTimeout, err = parseInt(fields[1])
|
||||
}
|
||||
if err != nil {
|
||||
return ProcLimits{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return l, s.Err()
|
||||
}
|
||||
|
||||
func parseInt(s string) (int64, error) {
|
||||
if s == limitsUnlimited {
|
||||
return -1, nil
|
||||
}
|
||||
i, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
68
vendor/github.com/ncabatoff/procfs/proc_ns.go
generated
vendored
Normal file
68
vendor/github.com/ncabatoff/procfs/proc_ns.go
generated
vendored
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Namespace represents a single namespace of a process.
|
||||
type Namespace struct {
|
||||
Type string // Namespace type.
|
||||
Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match.
|
||||
}
|
||||
|
||||
// Namespaces contains all of the namespaces that the process is contained in.
|
||||
type Namespaces map[string]Namespace
|
||||
|
||||
// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the
|
||||
// process is a member.
|
||||
func (p Proc) NewNamespaces() (Namespaces, error) {
|
||||
d, err := os.Open(p.path("ns"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer d.Close()
|
||||
|
||||
names, err := d.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read contents of ns dir: %v", err)
|
||||
}
|
||||
|
||||
ns := make(Namespaces, len(names))
|
||||
for _, name := range names {
|
||||
target, err := os.Readlink(p.path("ns", name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fields := strings.SplitN(target, ":", 2)
|
||||
if len(fields) != 2 {
|
||||
return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target)
|
||||
}
|
||||
|
||||
typ := fields[0]
|
||||
inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err)
|
||||
}
|
||||
|
||||
ns[name] = Namespace{typ, uint32(inode)}
|
||||
}
|
||||
|
||||
return ns, nil
|
||||
}
|
||||
188
vendor/github.com/ncabatoff/procfs/proc_stat.go
generated
vendored
Normal file
188
vendor/github.com/ncabatoff/procfs/proc_stat.go
generated
vendored
Normal file
|
|
@ -0,0 +1,188 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
|
||||
// which required cgo. However, that caused a lot of problems regarding
|
||||
// cross-compilation. Alternatives such as running a binary to determine the
|
||||
// value, or trying to derive it in some other way were all problematic. After
|
||||
// much research it was determined that USER_HZ is actually hardcoded to 100 on
|
||||
// all Go-supported platforms as of the time of this writing. This is why we
|
||||
// decided to hardcode it here as well. It is not impossible that there could
|
||||
// be systems with exceptions, but they should be very exotic edge cases, and
|
||||
// in that case, the worst outcome will be two misreported metrics.
|
||||
//
|
||||
// See also the following discussions:
|
||||
//
|
||||
// - https://github.com/prometheus/node_exporter/issues/52
|
||||
// - https://github.com/prometheus/procfs/pull/2
|
||||
// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue
|
||||
const userHZ = 100
|
||||
|
||||
// ProcStat provides status information about the process,
|
||||
// read from /proc/[pid]/stat.
|
||||
type ProcStat struct {
|
||||
// The process ID.
|
||||
PID int
|
||||
// The filename of the executable.
|
||||
Comm string
|
||||
// The process state.
|
||||
State string
|
||||
// The PID of the parent of this process.
|
||||
PPID int
|
||||
// The process group ID of the process.
|
||||
PGRP int
|
||||
// The session ID of the process.
|
||||
Session int
|
||||
// The controlling terminal of the process.
|
||||
TTY int
|
||||
// The ID of the foreground process group of the controlling terminal of
|
||||
// the process.
|
||||
TPGID int
|
||||
// The kernel flags word of the process.
|
||||
Flags uint
|
||||
// The number of minor faults the process has made which have not required
|
||||
// loading a memory page from disk.
|
||||
MinFlt uint
|
||||
// The number of minor faults that the process's waited-for children have
|
||||
// made.
|
||||
CMinFlt uint
|
||||
// The number of major faults the process has made which have required
|
||||
// loading a memory page from disk.
|
||||
MajFlt uint
|
||||
// The number of major faults that the process's waited-for children have
|
||||
// made.
|
||||
CMajFlt uint
|
||||
// Amount of time that this process has been scheduled in user mode,
|
||||
// measured in clock ticks.
|
||||
UTime uint
|
||||
// Amount of time that this process has been scheduled in kernel mode,
|
||||
// measured in clock ticks.
|
||||
STime uint
|
||||
// Amount of time that this process's waited-for children have been
|
||||
// scheduled in user mode, measured in clock ticks.
|
||||
CUTime uint
|
||||
// Amount of time that this process's waited-for children have been
|
||||
// scheduled in kernel mode, measured in clock ticks.
|
||||
CSTime uint
|
||||
// For processes running a real-time scheduling policy, this is the negated
|
||||
// scheduling priority, minus one.
|
||||
Priority int
|
||||
// The nice value, a value in the range 19 (low priority) to -20 (high
|
||||
// priority).
|
||||
Nice int
|
||||
// Number of threads in this process.
|
||||
NumThreads int
|
||||
// The time the process started after system boot, the value is expressed
|
||||
// in clock ticks.
|
||||
Starttime uint64
|
||||
// Virtual memory size in bytes.
|
||||
VSize int
|
||||
// Resident set size in pages.
|
||||
RSS int
|
||||
|
||||
fs FS
|
||||
}
|
||||
|
||||
// NewStat returns the current status information of the process.
|
||||
func (p Proc) NewStat() (ProcStat, error) {
|
||||
f, err := os.Open(p.path("stat"))
|
||||
if err != nil {
|
||||
return ProcStat{}, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
data, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return ProcStat{}, err
|
||||
}
|
||||
|
||||
var (
|
||||
ignore int
|
||||
|
||||
s = ProcStat{PID: p.PID, fs: p.fs}
|
||||
l = bytes.Index(data, []byte("("))
|
||||
r = bytes.LastIndex(data, []byte(")"))
|
||||
)
|
||||
|
||||
if l < 0 || r < 0 {
|
||||
return ProcStat{}, fmt.Errorf(
|
||||
"unexpected format, couldn't extract comm: %s",
|
||||
data,
|
||||
)
|
||||
}
|
||||
|
||||
s.Comm = string(data[l+1 : r])
|
||||
_, err = fmt.Fscan(
|
||||
bytes.NewBuffer(data[r+2:]),
|
||||
&s.State,
|
||||
&s.PPID,
|
||||
&s.PGRP,
|
||||
&s.Session,
|
||||
&s.TTY,
|
||||
&s.TPGID,
|
||||
&s.Flags,
|
||||
&s.MinFlt,
|
||||
&s.CMinFlt,
|
||||
&s.MajFlt,
|
||||
&s.CMajFlt,
|
||||
&s.UTime,
|
||||
&s.STime,
|
||||
&s.CUTime,
|
||||
&s.CSTime,
|
||||
&s.Priority,
|
||||
&s.Nice,
|
||||
&s.NumThreads,
|
||||
&ignore,
|
||||
&s.Starttime,
|
||||
&s.VSize,
|
||||
&s.RSS,
|
||||
)
|
||||
if err != nil {
|
||||
return ProcStat{}, err
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// VirtualMemory returns the virtual memory size in bytes.
|
||||
func (s ProcStat) VirtualMemory() int {
|
||||
return s.VSize
|
||||
}
|
||||
|
||||
// ResidentMemory returns the resident memory size in bytes.
|
||||
func (s ProcStat) ResidentMemory() int {
|
||||
return s.RSS * os.Getpagesize()
|
||||
}
|
||||
|
||||
// StartTime returns the unix timestamp of the process in seconds.
|
||||
func (s ProcStat) StartTime() (float64, error) {
|
||||
stat, err := s.fs.NewStat()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil
|
||||
}
|
||||
|
||||
// CPUTime returns the total CPU user and system time in seconds.
|
||||
func (s ProcStat) CPUTime() float64 {
|
||||
return float64(s.UTime+s.STime) / userHZ
|
||||
}
|
||||
203
vendor/github.com/ncabatoff/procfs/proc_status.go
generated
vendored
Normal file
203
vendor/github.com/ncabatoff/procfs/proc_status.go
generated
vendored
Normal file
|
|
@ -0,0 +1,203 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ProcStatus provides status information about the process,
|
||||
// read from /proc/[pid]/status.
|
||||
type (
|
||||
ProcStatus struct {
|
||||
TID int
|
||||
TracerPid int
|
||||
UIDReal int
|
||||
UIDEffective int
|
||||
UIDSavedSet int
|
||||
UIDFileSystem int
|
||||
GIDReal int
|
||||
GIDEffective int
|
||||
GIDSavedSet int
|
||||
GIDFileSystem int
|
||||
FDSize int
|
||||
VmPeakKB int
|
||||
VmSizeKB int
|
||||
VmLckKB int
|
||||
VmHWMKB int
|
||||
VmRSSKB int
|
||||
VmDataKB int
|
||||
VmStkKB int
|
||||
VmExeKB int
|
||||
VmLibKB int
|
||||
VmPTEKB int
|
||||
VmSwapKB int
|
||||
VoluntaryCtxtSwitches int
|
||||
NonvoluntaryCtxtSwitches int
|
||||
}
|
||||
|
||||
procStatusFiller func(*ProcStatus, string) error
|
||||
procStatusBuilder struct {
|
||||
scanners map[string]procStatusFiller
|
||||
}
|
||||
)
|
||||
|
||||
func (ps *ProcStatus) refTID() []interface{} {
|
||||
return []interface{}{&ps.TID}
|
||||
}
|
||||
func (ps *ProcStatus) refTracerPid() []interface{} {
|
||||
return []interface{}{&ps.TracerPid}
|
||||
}
|
||||
func (ps *ProcStatus) refUID() []interface{} {
|
||||
return []interface{}{&ps.UIDReal, &ps.UIDEffective, &ps.UIDSavedSet, &ps.UIDFileSystem}
|
||||
}
|
||||
func (ps *ProcStatus) refGID() []interface{} {
|
||||
return []interface{}{&ps.GIDReal, &ps.GIDEffective, &ps.GIDSavedSet, &ps.GIDFileSystem}
|
||||
}
|
||||
func (ps *ProcStatus) refFDSize() []interface{} {
|
||||
return []interface{}{&ps.FDSize}
|
||||
}
|
||||
func (ps *ProcStatus) refVmPeakKB() []interface{} {
|
||||
return []interface{}{&ps.VmPeakKB}
|
||||
}
|
||||
func (ps *ProcStatus) refVmSizeKB() []interface{} {
|
||||
return []interface{}{&ps.VmSizeKB}
|
||||
}
|
||||
func (ps *ProcStatus) refVmLckKB() []interface{} {
|
||||
return []interface{}{&ps.VmLckKB}
|
||||
}
|
||||
func (ps *ProcStatus) refVmHWMKB() []interface{} {
|
||||
return []interface{}{&ps.VmHWMKB}
|
||||
}
|
||||
func (ps *ProcStatus) refVmRSSKB() []interface{} {
|
||||
return []interface{}{&ps.VmRSSKB}
|
||||
}
|
||||
func (ps *ProcStatus) refVmDataKB() []interface{} {
|
||||
return []interface{}{&ps.VmDataKB}
|
||||
}
|
||||
func (ps *ProcStatus) refVmStkKB() []interface{} {
|
||||
return []interface{}{&ps.VmStkKB}
|
||||
}
|
||||
func (ps *ProcStatus) refVmExeKB() []interface{} {
|
||||
return []interface{}{&ps.VmExeKB}
|
||||
}
|
||||
func (ps *ProcStatus) refVmLibKB() []interface{} {
|
||||
return []interface{}{&ps.VmLibKB}
|
||||
}
|
||||
func (ps *ProcStatus) refVmPTEKB() []interface{} {
|
||||
return []interface{}{&ps.VmPTEKB}
|
||||
}
|
||||
func (ps *ProcStatus) refVmSwapKB() []interface{} {
|
||||
return []interface{}{&ps.VmSwapKB}
|
||||
}
|
||||
func (ps *ProcStatus) refVoluntaryCtxtSwitches() []interface{} {
|
||||
return []interface{}{&ps.VoluntaryCtxtSwitches}
|
||||
}
|
||||
func (ps *ProcStatus) refNonvoluntaryCtxtSwitches() []interface{} {
|
||||
return []interface{}{&ps.NonvoluntaryCtxtSwitches}
|
||||
}
|
||||
|
||||
func newFiller(format string, ref func(ps *ProcStatus) []interface{}) procStatusFiller {
|
||||
return procStatusFiller(func(ps *ProcStatus, s string) error {
|
||||
_, err := fmt.Sscanf(s, format, ref(ps)...)
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func newProcStatusBuilder() *procStatusBuilder {
|
||||
return &procStatusBuilder{
|
||||
scanners: map[string]procStatusFiller{
|
||||
"Pid": newFiller("%d", (*ProcStatus).refTID),
|
||||
"TracerPid": newFiller("%d", (*ProcStatus).refTracerPid),
|
||||
"Uid": newFiller("%d %d %d %d", (*ProcStatus).refUID),
|
||||
"Gid": newFiller("%d %d %d %d", (*ProcStatus).refGID),
|
||||
"FDSize": newFiller("%d", (*ProcStatus).refFDSize),
|
||||
"VmPeak": newFiller("%d kB", (*ProcStatus).refVmPeakKB),
|
||||
"VmSize": newFiller("%d kB", (*ProcStatus).refVmSizeKB),
|
||||
"VmLck": newFiller("%d kB", (*ProcStatus).refVmLckKB),
|
||||
"VmHWM": newFiller("%d kB", (*ProcStatus).refVmHWMKB),
|
||||
"VmRSS": newFiller("%d kB", (*ProcStatus).refVmRSSKB),
|
||||
"VmData": newFiller("%d kB", (*ProcStatus).refVmDataKB),
|
||||
"VmStk": newFiller("%d kB", (*ProcStatus).refVmStkKB),
|
||||
"VmExe": newFiller("%d kB", (*ProcStatus).refVmExeKB),
|
||||
"VmLib": newFiller("%d kB", (*ProcStatus).refVmLibKB),
|
||||
"VmPTE": newFiller("%d kB", (*ProcStatus).refVmPTEKB),
|
||||
"VmSwap": newFiller("%d kB", (*ProcStatus).refVmSwapKB),
|
||||
"voluntary_ctxt_switches": newFiller("%d", (*ProcStatus).refVoluntaryCtxtSwitches),
|
||||
"nonvoluntary_ctxt_switches": newFiller("%d", (*ProcStatus).refNonvoluntaryCtxtSwitches),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (b *procStatusBuilder) readStatus(r io.Reader) (ProcStatus, error) {
|
||||
contents, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return ProcStatus{}, err
|
||||
}
|
||||
s := string(contents)
|
||||
|
||||
var ps ProcStatus
|
||||
for lineno := 0; s != ""; lineno++ {
|
||||
crpos := strings.IndexByte(s, '\n')
|
||||
if crpos == -1 {
|
||||
return ProcStatus{}, fmt.Errorf("line %d from status file without newline: %s", lineno, s)
|
||||
}
|
||||
line := strings.TrimSpace(s[:crpos])
|
||||
s = s[crpos+1:]
|
||||
if line == "" {
|
||||
if s == "" {
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
pos := strings.IndexByte(line, ':')
|
||||
if pos == -1 {
|
||||
return ProcStatus{}, fmt.Errorf("line %d from status file without ':': %s", lineno, line)
|
||||
}
|
||||
|
||||
field := line[:pos]
|
||||
scanner, ok := b.scanners[field]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
err = scanner(&ps, line[pos+1:])
|
||||
// TODO: flag parse errors with some kind of "warning" error.
|
||||
if err != nil {
|
||||
// Be lenient about parse errors, because otherwise we miss out on some interesting
|
||||
// procs. For example, my Ubuntu kernel (4.4.0-130-generic #156-Ubuntu) is showing
|
||||
// a Chromium status file with "VmLib: 18446744073709442944 kB".
|
||||
continue
|
||||
}
|
||||
}
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
var psb = newProcStatusBuilder()
|
||||
|
||||
// NewStatus returns the current status information of the process.
|
||||
func (p Proc) NewStatus() (ProcStatus, error) {
|
||||
f, err := os.Open(p.path("status"))
|
||||
if err != nil {
|
||||
return ProcStatus{}, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return psb.readStatus(f)
|
||||
}
|
||||
232
vendor/github.com/ncabatoff/procfs/stat.go
generated
vendored
Normal file
232
vendor/github.com/ncabatoff/procfs/stat.go
generated
vendored
Normal file
|
|
@ -0,0 +1,232 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CPUStat shows how much time the cpu spend in various stages.
|
||||
type CPUStat struct {
|
||||
User float64
|
||||
Nice float64
|
||||
System float64
|
||||
Idle float64
|
||||
Iowait float64
|
||||
IRQ float64
|
||||
SoftIRQ float64
|
||||
Steal float64
|
||||
Guest float64
|
||||
GuestNice float64
|
||||
}
|
||||
|
||||
// SoftIRQStat represent the softirq statistics as exported in the procfs stat file.
|
||||
// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html
|
||||
// It is possible to get per-cpu stats by reading /proc/softirqs
|
||||
type SoftIRQStat struct {
|
||||
Hi uint64
|
||||
Timer uint64
|
||||
NetTx uint64
|
||||
NetRx uint64
|
||||
Block uint64
|
||||
BlockIoPoll uint64
|
||||
Tasklet uint64
|
||||
Sched uint64
|
||||
Hrtimer uint64
|
||||
Rcu uint64
|
||||
}
|
||||
|
||||
// Stat represents kernel/system statistics.
|
||||
type Stat struct {
|
||||
// Boot time in seconds since the Epoch.
|
||||
BootTime uint64
|
||||
// Summed up cpu statistics.
|
||||
CPUTotal CPUStat
|
||||
// Per-CPU statistics.
|
||||
CPU []CPUStat
|
||||
// Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
|
||||
IRQTotal uint64
|
||||
// Number of times a numbered IRQ was triggered.
|
||||
IRQ []uint64
|
||||
// Number of times a context switch happened.
|
||||
ContextSwitches uint64
|
||||
// Number of times a process was created.
|
||||
ProcessCreated uint64
|
||||
// Number of processes currently running.
|
||||
ProcessesRunning uint64
|
||||
// Number of processes currently blocked (waiting for IO).
|
||||
ProcessesBlocked uint64
|
||||
// Number of times a softirq was scheduled.
|
||||
SoftIRQTotal uint64
|
||||
// Detailed softirq statistics.
|
||||
SoftIRQ SoftIRQStat
|
||||
}
|
||||
|
||||
// NewStat returns kernel/system statistics read from /proc/stat.
|
||||
func NewStat() (Stat, error) {
|
||||
fs, err := NewFS(DefaultMountPoint)
|
||||
if err != nil {
|
||||
return Stat{}, err
|
||||
}
|
||||
|
||||
return fs.NewStat()
|
||||
}
|
||||
|
||||
// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum).
|
||||
func parseCPUStat(line string) (CPUStat, int64, error) {
|
||||
cpuStat := CPUStat{}
|
||||
var cpu string
|
||||
|
||||
count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f",
|
||||
&cpu,
|
||||
&cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle,
|
||||
&cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal,
|
||||
&cpuStat.Guest, &cpuStat.GuestNice)
|
||||
|
||||
if err != nil && err != io.EOF {
|
||||
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err)
|
||||
}
|
||||
if count == 0 {
|
||||
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line)
|
||||
}
|
||||
|
||||
cpuStat.User /= userHZ
|
||||
cpuStat.Nice /= userHZ
|
||||
cpuStat.System /= userHZ
|
||||
cpuStat.Idle /= userHZ
|
||||
cpuStat.Iowait /= userHZ
|
||||
cpuStat.IRQ /= userHZ
|
||||
cpuStat.SoftIRQ /= userHZ
|
||||
cpuStat.Steal /= userHZ
|
||||
cpuStat.Guest /= userHZ
|
||||
cpuStat.GuestNice /= userHZ
|
||||
|
||||
if cpu == "cpu" {
|
||||
return cpuStat, -1, nil
|
||||
}
|
||||
|
||||
cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
|
||||
if err != nil {
|
||||
return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err)
|
||||
}
|
||||
|
||||
return cpuStat, cpuID, nil
|
||||
}
|
||||
|
||||
// Parse a softirq line.
|
||||
func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
|
||||
softIRQStat := SoftIRQStat{}
|
||||
var total uint64
|
||||
var prefix string
|
||||
|
||||
_, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d",
|
||||
&prefix, &total,
|
||||
&softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx,
|
||||
&softIRQStat.Block, &softIRQStat.BlockIoPoll,
|
||||
&softIRQStat.Tasklet, &softIRQStat.Sched,
|
||||
&softIRQStat.Hrtimer, &softIRQStat.Rcu)
|
||||
|
||||
if err != nil {
|
||||
return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err)
|
||||
}
|
||||
|
||||
return softIRQStat, total, nil
|
||||
}
|
||||
|
||||
// NewStat returns an information about current kernel/system statistics.
|
||||
func (fs FS) NewStat() (Stat, error) {
|
||||
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
||||
|
||||
f, err := os.Open(fs.Path("stat"))
|
||||
if err != nil {
|
||||
return Stat{}, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
stat := Stat{}
|
||||
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
parts := strings.Fields(scanner.Text())
|
||||
// require at least <key> <value>
|
||||
if len(parts) < 2 {
|
||||
continue
|
||||
}
|
||||
switch {
|
||||
case parts[0] == "btime":
|
||||
if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err)
|
||||
}
|
||||
case parts[0] == "intr":
|
||||
if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err)
|
||||
}
|
||||
numberedIRQs := parts[2:]
|
||||
stat.IRQ = make([]uint64, len(numberedIRQs))
|
||||
for i, count := range numberedIRQs {
|
||||
if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "ctxt":
|
||||
if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err)
|
||||
}
|
||||
case parts[0] == "processes":
|
||||
if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err)
|
||||
}
|
||||
case parts[0] == "procs_running":
|
||||
if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err)
|
||||
}
|
||||
case parts[0] == "procs_blocked":
|
||||
if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err)
|
||||
}
|
||||
case parts[0] == "softirq":
|
||||
softIRQStats, total, err := parseSoftIRQStat(line)
|
||||
if err != nil {
|
||||
return Stat{}, err
|
||||
}
|
||||
stat.SoftIRQTotal = total
|
||||
stat.SoftIRQ = softIRQStats
|
||||
case strings.HasPrefix(parts[0], "cpu"):
|
||||
cpuStat, cpuID, err := parseCPUStat(line)
|
||||
if err != nil {
|
||||
return Stat{}, err
|
||||
}
|
||||
if cpuID == -1 {
|
||||
stat.CPUTotal = cpuStat
|
||||
} else {
|
||||
for int64(len(stat.CPU)) <= cpuID {
|
||||
stat.CPU = append(stat.CPU, CPUStat{})
|
||||
}
|
||||
stat.CPU[cpuID] = cpuStat
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
|
||||
}
|
||||
|
||||
return stat, nil
|
||||
}
|
||||
389
vendor/github.com/ncabatoff/procfs/ttar
generated
vendored
Executable file
389
vendor/github.com/ncabatoff/procfs/ttar
generated
vendored
Executable file
|
|
@ -0,0 +1,389 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Purpose: plain text tar format
|
||||
# Limitations: - only suitable for text files, directories, and symlinks
|
||||
# - stores only filename, content, and mode
|
||||
# - not designed for untrusted input
|
||||
#
|
||||
# Note: must work with bash version 3.2 (macOS)
|
||||
|
||||
# Copyright 2017 Roger Luethi
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit -o nounset
|
||||
|
||||
# Sanitize environment (for instance, standard sorting of glob matches)
|
||||
export LC_ALL=C
|
||||
|
||||
path=""
|
||||
CMD=""
|
||||
ARG_STRING="$*"
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# Not all sed implementations can work on null bytes. In order to make ttar
|
||||
# work out of the box on macOS, use Python as a stream editor.
|
||||
|
||||
USE_PYTHON=0
|
||||
|
||||
PYTHON_CREATE_FILTER=$(cat << 'PCF'
|
||||
#!/usr/bin/env python
|
||||
|
||||
import re
|
||||
import sys
|
||||
|
||||
for line in sys.stdin:
|
||||
line = re.sub(r'EOF', r'\EOF', line)
|
||||
line = re.sub(r'NULLBYTE', r'\NULLBYTE', line)
|
||||
line = re.sub('\x00', r'NULLBYTE', line)
|
||||
sys.stdout.write(line)
|
||||
PCF
|
||||
)
|
||||
|
||||
PYTHON_EXTRACT_FILTER=$(cat << 'PEF'
|
||||
#!/usr/bin/env python
|
||||
|
||||
import re
|
||||
import sys
|
||||
|
||||
for line in sys.stdin:
|
||||
line = re.sub(r'(?<!\\)NULLBYTE', '\x00', line)
|
||||
line = re.sub(r'\\NULLBYTE', 'NULLBYTE', line)
|
||||
line = re.sub(r'([^\\])EOF', r'\1', line)
|
||||
line = re.sub(r'\\EOF', 'EOF', line)
|
||||
sys.stdout.write(line)
|
||||
PEF
|
||||
)
|
||||
|
||||
function test_environment {
|
||||
if [[ "$(echo "a" | sed 's/a/\x0/' | wc -c)" -ne 2 ]]; then
|
||||
echo "WARNING sed unable to handle null bytes, using Python (slow)."
|
||||
if ! which python >/dev/null; then
|
||||
echo "ERROR Python not found. Aborting."
|
||||
exit 2
|
||||
fi
|
||||
USE_PYTHON=1
|
||||
fi
|
||||
}
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
function usage {
|
||||
bname=$(basename "$0")
|
||||
cat << USAGE
|
||||
Usage: $bname [-C <DIR>] -c -f <ARCHIVE> <FILE...> (create archive)
|
||||
$bname -t -f <ARCHIVE> (list archive contents)
|
||||
$bname [-C <DIR>] -x -f <ARCHIVE> (extract archive)
|
||||
|
||||
Options:
|
||||
-C <DIR> (change directory)
|
||||
-v (verbose)
|
||||
|
||||
Example: Change to sysfs directory, create ttar file from fixtures directory
|
||||
$bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/
|
||||
USAGE
|
||||
exit "$1"
|
||||
}
|
||||
|
||||
function vecho {
|
||||
if [ "${VERBOSE:-}" == "yes" ]; then
|
||||
echo >&7 "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
function set_cmd {
|
||||
if [ -n "$CMD" ]; then
|
||||
echo "ERROR: more than one command given"
|
||||
echo
|
||||
usage 2
|
||||
fi
|
||||
CMD=$1
|
||||
}
|
||||
|
||||
unset VERBOSE
|
||||
|
||||
while getopts :cf:htxvC: opt; do
|
||||
case $opt in
|
||||
c)
|
||||
set_cmd "create"
|
||||
;;
|
||||
f)
|
||||
ARCHIVE=$OPTARG
|
||||
;;
|
||||
h)
|
||||
usage 0
|
||||
;;
|
||||
t)
|
||||
set_cmd "list"
|
||||
;;
|
||||
x)
|
||||
set_cmd "extract"
|
||||
;;
|
||||
v)
|
||||
VERBOSE=yes
|
||||
exec 7>&1
|
||||
;;
|
||||
C)
|
||||
CDIR=$OPTARG
|
||||
;;
|
||||
*)
|
||||
echo >&2 "ERROR: invalid option -$OPTARG"
|
||||
echo
|
||||
usage 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Remove processed options from arguments
|
||||
shift $(( OPTIND - 1 ));
|
||||
|
||||
if [ "${CMD:-}" == "" ]; then
|
||||
echo >&2 "ERROR: no command given"
|
||||
echo
|
||||
usage 1
|
||||
elif [ "${ARCHIVE:-}" == "" ]; then
|
||||
echo >&2 "ERROR: no archive name given"
|
||||
echo
|
||||
usage 1
|
||||
fi
|
||||
|
||||
function list {
|
||||
local path=""
|
||||
local size=0
|
||||
local line_no=0
|
||||
local ttar_file=$1
|
||||
if [ -n "${2:-}" ]; then
|
||||
echo >&2 "ERROR: too many arguments."
|
||||
echo
|
||||
usage 1
|
||||
fi
|
||||
if [ ! -e "$ttar_file" ]; then
|
||||
echo >&2 "ERROR: file not found ($ttar_file)"
|
||||
echo
|
||||
usage 1
|
||||
fi
|
||||
while read -r line; do
|
||||
line_no=$(( line_no + 1 ))
|
||||
if [ $size -gt 0 ]; then
|
||||
size=$(( size - 1 ))
|
||||
continue
|
||||
fi
|
||||
if [[ $line =~ ^Path:\ (.*)$ ]]; then
|
||||
path=${BASH_REMATCH[1]}
|
||||
elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
|
||||
size=${BASH_REMATCH[1]}
|
||||
echo "$path"
|
||||
elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
|
||||
path=${BASH_REMATCH[1]}
|
||||
echo "$path/"
|
||||
elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
|
||||
echo "$path -> ${BASH_REMATCH[1]}"
|
||||
fi
|
||||
done < "$ttar_file"
|
||||
}
|
||||
|
||||
function extract {
|
||||
local path=""
|
||||
local size=0
|
||||
local line_no=0
|
||||
local ttar_file=$1
|
||||
if [ -n "${2:-}" ]; then
|
||||
echo >&2 "ERROR: too many arguments."
|
||||
echo
|
||||
usage 1
|
||||
fi
|
||||
if [ ! -e "$ttar_file" ]; then
|
||||
echo >&2 "ERROR: file not found ($ttar_file)"
|
||||
echo
|
||||
usage 1
|
||||
fi
|
||||
while IFS= read -r line; do
|
||||
line_no=$(( line_no + 1 ))
|
||||
local eof_without_newline
|
||||
if [ "$size" -gt 0 ]; then
|
||||
if [[ "$line" =~ [^\\]EOF ]]; then
|
||||
# An EOF not preceeded by a backslash indicates that the line
|
||||
# does not end with a newline
|
||||
eof_without_newline=1
|
||||
else
|
||||
eof_without_newline=0
|
||||
fi
|
||||
# Replace NULLBYTE with null byte if at beginning of line
|
||||
# Replace NULLBYTE with null byte unless preceeded by backslash
|
||||
# Remove one backslash in front of NULLBYTE (if any)
|
||||
# Remove EOF unless preceeded by backslash
|
||||
# Remove one backslash in front of EOF
|
||||
if [ $USE_PYTHON -eq 1 ]; then
|
||||
echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path"
|
||||
else
|
||||
# The repeated pattern makes up for sed's lack of negative
|
||||
# lookbehind assertions (for consecutive null bytes).
|
||||
echo -n "$line" | \
|
||||
sed -e 's/^NULLBYTE/\x0/g;
|
||||
s/\([^\\]\)NULLBYTE/\1\x0/g;
|
||||
s/\([^\\]\)NULLBYTE/\1\x0/g;
|
||||
s/\\NULLBYTE/NULLBYTE/g;
|
||||
s/\([^\\]\)EOF/\1/g;
|
||||
s/\\EOF/EOF/g;
|
||||
' >> "$path"
|
||||
fi
|
||||
if [[ "$eof_without_newline" -eq 0 ]]; then
|
||||
echo >> "$path"
|
||||
fi
|
||||
size=$(( size - 1 ))
|
||||
continue
|
||||
fi
|
||||
if [[ $line =~ ^Path:\ (.*)$ ]]; then
|
||||
path=${BASH_REMATCH[1]}
|
||||
if [ -e "$path" ] || [ -L "$path" ]; then
|
||||
rm "$path"
|
||||
fi
|
||||
elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
|
||||
size=${BASH_REMATCH[1]}
|
||||
# Create file even if it is zero-length.
|
||||
touch "$path"
|
||||
vecho " $path"
|
||||
elif [[ $line =~ ^Mode:\ (.*)$ ]]; then
|
||||
mode=${BASH_REMATCH[1]}
|
||||
chmod "$mode" "$path"
|
||||
vecho "$mode"
|
||||
elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
|
||||
path=${BASH_REMATCH[1]}
|
||||
mkdir -p "$path"
|
||||
vecho " $path/"
|
||||
elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
|
||||
ln -s "${BASH_REMATCH[1]}" "$path"
|
||||
vecho " $path -> ${BASH_REMATCH[1]}"
|
||||
elif [[ $line =~ ^# ]]; then
|
||||
# Ignore comments between files
|
||||
continue
|
||||
else
|
||||
echo >&2 "ERROR: Unknown keyword on line $line_no: $line"
|
||||
exit 1
|
||||
fi
|
||||
done < "$ttar_file"
|
||||
}
|
||||
|
||||
function div {
|
||||
echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \
|
||||
"- - - - - -"
|
||||
}
|
||||
|
||||
function get_mode {
|
||||
local mfile=$1
|
||||
if [ -z "${STAT_OPTION:-}" ]; then
|
||||
if stat -c '%a' "$mfile" >/dev/null 2>&1; then
|
||||
# GNU stat
|
||||
STAT_OPTION='-c'
|
||||
STAT_FORMAT='%a'
|
||||
else
|
||||
# BSD stat
|
||||
STAT_OPTION='-f'
|
||||
# Octal output, user/group/other (omit file type, sticky bit)
|
||||
STAT_FORMAT='%OLp'
|
||||
fi
|
||||
fi
|
||||
stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile"
|
||||
}
|
||||
|
||||
function _create {
|
||||
shopt -s nullglob
|
||||
local mode
|
||||
local eof_without_newline
|
||||
while (( "$#" )); do
|
||||
file=$1
|
||||
if [ -L "$file" ]; then
|
||||
echo "Path: $file"
|
||||
symlinkTo=$(readlink "$file")
|
||||
echo "SymlinkTo: $symlinkTo"
|
||||
vecho " $file -> $symlinkTo"
|
||||
div
|
||||
elif [ -d "$file" ]; then
|
||||
# Strip trailing slash (if there is one)
|
||||
file=${file%/}
|
||||
echo "Directory: $file"
|
||||
mode=$(get_mode "$file")
|
||||
echo "Mode: $mode"
|
||||
vecho "$mode $file/"
|
||||
div
|
||||
# Find all files and dirs, including hidden/dot files
|
||||
for x in "$file/"{*,.[^.]*}; do
|
||||
_create "$x"
|
||||
done
|
||||
elif [ -f "$file" ]; then
|
||||
echo "Path: $file"
|
||||
lines=$(wc -l "$file"|awk '{print $1}')
|
||||
eof_without_newline=0
|
||||
if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \
|
||||
[[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then
|
||||
eof_without_newline=1
|
||||
lines=$((lines+1))
|
||||
fi
|
||||
echo "Lines: $lines"
|
||||
# Add backslash in front of EOF
|
||||
# Add backslash in front of NULLBYTE
|
||||
# Replace null byte with NULLBYTE
|
||||
if [ $USE_PYTHON -eq 1 ]; then
|
||||
< "$file" python -c "$PYTHON_CREATE_FILTER"
|
||||
else
|
||||
< "$file" \
|
||||
sed 's/EOF/\\EOF/g;
|
||||
s/NULLBYTE/\\NULLBYTE/g;
|
||||
s/\x0/NULLBYTE/g;
|
||||
'
|
||||
fi
|
||||
if [[ "$eof_without_newline" -eq 1 ]]; then
|
||||
# Finish line with EOF to indicate that the original line did
|
||||
# not end with a linefeed
|
||||
echo "EOF"
|
||||
fi
|
||||
mode=$(get_mode "$file")
|
||||
echo "Mode: $mode"
|
||||
vecho "$mode $file"
|
||||
div
|
||||
else
|
||||
echo >&2 "ERROR: file not found ($file in $(pwd))"
|
||||
exit 2
|
||||
fi
|
||||
shift
|
||||
done
|
||||
}
|
||||
|
||||
function create {
|
||||
ttar_file=$1
|
||||
shift
|
||||
if [ -z "${1:-}" ]; then
|
||||
echo >&2 "ERROR: missing arguments."
|
||||
echo
|
||||
usage 1
|
||||
fi
|
||||
if [ -e "$ttar_file" ]; then
|
||||
rm "$ttar_file"
|
||||
fi
|
||||
exec > "$ttar_file"
|
||||
echo "# Archive created by ttar $ARG_STRING"
|
||||
_create "$@"
|
||||
}
|
||||
|
||||
test_environment
|
||||
|
||||
if [ -n "${CDIR:-}" ]; then
|
||||
if [[ "$ARCHIVE" != /* ]]; then
|
||||
# Relative path: preserve the archive's location before changing
|
||||
# directory
|
||||
ARCHIVE="$(pwd)/$ARCHIVE"
|
||||
fi
|
||||
cd "$CDIR"
|
||||
fi
|
||||
|
||||
"$CMD" "$ARCHIVE" "$@"
|
||||
187
vendor/github.com/ncabatoff/procfs/xfrm.go
generated
vendored
Normal file
187
vendor/github.com/ncabatoff/procfs/xfrm.go
generated
vendored
Normal file
|
|
@ -0,0 +1,187 @@
|
|||
// Copyright 2017 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// XfrmStat models the contents of /proc/net/xfrm_stat.
|
||||
type XfrmStat struct {
|
||||
// All errors which are not matched by other
|
||||
XfrmInError int
|
||||
// No buffer is left
|
||||
XfrmInBufferError int
|
||||
// Header Error
|
||||
XfrmInHdrError int
|
||||
// No state found
|
||||
// i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong
|
||||
XfrmInNoStates int
|
||||
// Transformation protocol specific error
|
||||
// e.g. SA Key is wrong
|
||||
XfrmInStateProtoError int
|
||||
// Transformation mode specific error
|
||||
XfrmInStateModeError int
|
||||
// Sequence error
|
||||
// e.g. sequence number is out of window
|
||||
XfrmInStateSeqError int
|
||||
// State is expired
|
||||
XfrmInStateExpired int
|
||||
// State has mismatch option
|
||||
// e.g. UDP encapsulation type is mismatched
|
||||
XfrmInStateMismatch int
|
||||
// State is invalid
|
||||
XfrmInStateInvalid int
|
||||
// No matching template for states
|
||||
// e.g. Inbound SAs are correct but SP rule is wrong
|
||||
XfrmInTmplMismatch int
|
||||
// No policy is found for states
|
||||
// e.g. Inbound SAs are correct but no SP is found
|
||||
XfrmInNoPols int
|
||||
// Policy discards
|
||||
XfrmInPolBlock int
|
||||
// Policy error
|
||||
XfrmInPolError int
|
||||
// All errors which are not matched by others
|
||||
XfrmOutError int
|
||||
// Bundle generation error
|
||||
XfrmOutBundleGenError int
|
||||
// Bundle check error
|
||||
XfrmOutBundleCheckError int
|
||||
// No state was found
|
||||
XfrmOutNoStates int
|
||||
// Transformation protocol specific error
|
||||
XfrmOutStateProtoError int
|
||||
// Transportation mode specific error
|
||||
XfrmOutStateModeError int
|
||||
// Sequence error
|
||||
// i.e sequence number overflow
|
||||
XfrmOutStateSeqError int
|
||||
// State is expired
|
||||
XfrmOutStateExpired int
|
||||
// Policy discads
|
||||
XfrmOutPolBlock int
|
||||
// Policy is dead
|
||||
XfrmOutPolDead int
|
||||
// Policy Error
|
||||
XfrmOutPolError int
|
||||
XfrmFwdHdrError int
|
||||
XfrmOutStateInvalid int
|
||||
XfrmAcquireError int
|
||||
}
|
||||
|
||||
// NewXfrmStat reads the xfrm_stat statistics.
|
||||
func NewXfrmStat() (XfrmStat, error) {
|
||||
fs, err := NewFS(DefaultMountPoint)
|
||||
if err != nil {
|
||||
return XfrmStat{}, err
|
||||
}
|
||||
|
||||
return fs.NewXfrmStat()
|
||||
}
|
||||
|
||||
// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
|
||||
func (fs FS) NewXfrmStat() (XfrmStat, error) {
|
||||
file, err := os.Open(fs.Path("net/xfrm_stat"))
|
||||
if err != nil {
|
||||
return XfrmStat{}, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
var (
|
||||
x = XfrmStat{}
|
||||
s = bufio.NewScanner(file)
|
||||
)
|
||||
|
||||
for s.Scan() {
|
||||
fields := strings.Fields(s.Text())
|
||||
|
||||
if len(fields) != 2 {
|
||||
return XfrmStat{}, fmt.Errorf(
|
||||
"couldn't parse %s line %s", file.Name(), s.Text())
|
||||
}
|
||||
|
||||
name := fields[0]
|
||||
value, err := strconv.Atoi(fields[1])
|
||||
if err != nil {
|
||||
return XfrmStat{}, err
|
||||
}
|
||||
|
||||
switch name {
|
||||
case "XfrmInError":
|
||||
x.XfrmInError = value
|
||||
case "XfrmInBufferError":
|
||||
x.XfrmInBufferError = value
|
||||
case "XfrmInHdrError":
|
||||
x.XfrmInHdrError = value
|
||||
case "XfrmInNoStates":
|
||||
x.XfrmInNoStates = value
|
||||
case "XfrmInStateProtoError":
|
||||
x.XfrmInStateProtoError = value
|
||||
case "XfrmInStateModeError":
|
||||
x.XfrmInStateModeError = value
|
||||
case "XfrmInStateSeqError":
|
||||
x.XfrmInStateSeqError = value
|
||||
case "XfrmInStateExpired":
|
||||
x.XfrmInStateExpired = value
|
||||
case "XfrmInStateInvalid":
|
||||
x.XfrmInStateInvalid = value
|
||||
case "XfrmInTmplMismatch":
|
||||
x.XfrmInTmplMismatch = value
|
||||
case "XfrmInNoPols":
|
||||
x.XfrmInNoPols = value
|
||||
case "XfrmInPolBlock":
|
||||
x.XfrmInPolBlock = value
|
||||
case "XfrmInPolError":
|
||||
x.XfrmInPolError = value
|
||||
case "XfrmOutError":
|
||||
x.XfrmOutError = value
|
||||
case "XfrmInStateMismatch":
|
||||
x.XfrmInStateMismatch = value
|
||||
case "XfrmOutBundleGenError":
|
||||
x.XfrmOutBundleGenError = value
|
||||
case "XfrmOutBundleCheckError":
|
||||
x.XfrmOutBundleCheckError = value
|
||||
case "XfrmOutNoStates":
|
||||
x.XfrmOutNoStates = value
|
||||
case "XfrmOutStateProtoError":
|
||||
x.XfrmOutStateProtoError = value
|
||||
case "XfrmOutStateModeError":
|
||||
x.XfrmOutStateModeError = value
|
||||
case "XfrmOutStateSeqError":
|
||||
x.XfrmOutStateSeqError = value
|
||||
case "XfrmOutStateExpired":
|
||||
x.XfrmOutStateExpired = value
|
||||
case "XfrmOutPolBlock":
|
||||
x.XfrmOutPolBlock = value
|
||||
case "XfrmOutPolDead":
|
||||
x.XfrmOutPolDead = value
|
||||
case "XfrmOutPolError":
|
||||
x.XfrmOutPolError = value
|
||||
case "XfrmFwdHdrError":
|
||||
x.XfrmFwdHdrError = value
|
||||
case "XfrmOutStateInvalid":
|
||||
x.XfrmOutStateInvalid = value
|
||||
case "XfrmAcquireError":
|
||||
x.XfrmAcquireError = value
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return x, s.Err()
|
||||
}
|
||||
330
vendor/github.com/ncabatoff/procfs/xfs/parse.go
generated
vendored
Normal file
330
vendor/github.com/ncabatoff/procfs/xfs/parse.go
generated
vendored
Normal file
|
|
@ -0,0 +1,330 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package xfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/ncabatoff/procfs/internal/util"
|
||||
)
|
||||
|
||||
// ParseStats parses a Stats from an input io.Reader, using the format
|
||||
// found in /proc/fs/xfs/stat.
|
||||
func ParseStats(r io.Reader) (*Stats, error) {
|
||||
const (
|
||||
// Fields parsed into stats structures.
|
||||
fieldExtentAlloc = "extent_alloc"
|
||||
fieldAbt = "abt"
|
||||
fieldBlkMap = "blk_map"
|
||||
fieldBmbt = "bmbt"
|
||||
fieldDir = "dir"
|
||||
fieldTrans = "trans"
|
||||
fieldIg = "ig"
|
||||
fieldLog = "log"
|
||||
fieldRw = "rw"
|
||||
fieldAttr = "attr"
|
||||
fieldIcluster = "icluster"
|
||||
fieldVnodes = "vnodes"
|
||||
fieldBuf = "buf"
|
||||
fieldXpc = "xpc"
|
||||
|
||||
// Unimplemented at this time due to lack of documentation.
|
||||
fieldPushAil = "push_ail"
|
||||
fieldXstrat = "xstrat"
|
||||
fieldAbtb2 = "abtb2"
|
||||
fieldAbtc2 = "abtc2"
|
||||
fieldBmbt2 = "bmbt2"
|
||||
fieldIbt2 = "ibt2"
|
||||
fieldFibt2 = "fibt2"
|
||||
fieldQm = "qm"
|
||||
fieldDebug = "debug"
|
||||
)
|
||||
|
||||
var xfss Stats
|
||||
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
// Expect at least a string label and a single integer value, ex:
|
||||
// - abt 0
|
||||
// - rw 1 2
|
||||
ss := strings.Fields(string(s.Bytes()))
|
||||
if len(ss) < 2 {
|
||||
continue
|
||||
}
|
||||
label := ss[0]
|
||||
|
||||
// Extended precision counters are uint64 values.
|
||||
if label == fieldXpc {
|
||||
us, err := util.ParseUint64s(ss[1:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
xfss.ExtendedPrecision, err = extendedPrecisionStats(us)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
// All other counters are uint32 values.
|
||||
us, err := util.ParseUint32s(ss[1:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch label {
|
||||
case fieldExtentAlloc:
|
||||
xfss.ExtentAllocation, err = extentAllocationStats(us)
|
||||
case fieldAbt:
|
||||
xfss.AllocationBTree, err = btreeStats(us)
|
||||
case fieldBlkMap:
|
||||
xfss.BlockMapping, err = blockMappingStats(us)
|
||||
case fieldBmbt:
|
||||
xfss.BlockMapBTree, err = btreeStats(us)
|
||||
case fieldDir:
|
||||
xfss.DirectoryOperation, err = directoryOperationStats(us)
|
||||
case fieldTrans:
|
||||
xfss.Transaction, err = transactionStats(us)
|
||||
case fieldIg:
|
||||
xfss.InodeOperation, err = inodeOperationStats(us)
|
||||
case fieldLog:
|
||||
xfss.LogOperation, err = logOperationStats(us)
|
||||
case fieldRw:
|
||||
xfss.ReadWrite, err = readWriteStats(us)
|
||||
case fieldAttr:
|
||||
xfss.AttributeOperation, err = attributeOperationStats(us)
|
||||
case fieldIcluster:
|
||||
xfss.InodeClustering, err = inodeClusteringStats(us)
|
||||
case fieldVnodes:
|
||||
xfss.Vnode, err = vnodeStats(us)
|
||||
case fieldBuf:
|
||||
xfss.Buffer, err = bufferStats(us)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &xfss, s.Err()
|
||||
}
|
||||
|
||||
// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s.
|
||||
func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) {
|
||||
if l := len(us); l != 4 {
|
||||
return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l)
|
||||
}
|
||||
|
||||
return ExtentAllocationStats{
|
||||
ExtentsAllocated: us[0],
|
||||
BlocksAllocated: us[1],
|
||||
ExtentsFreed: us[2],
|
||||
BlocksFreed: us[3],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// btreeStats builds a BTreeStats from a slice of uint32s.
|
||||
func btreeStats(us []uint32) (BTreeStats, error) {
|
||||
if l := len(us); l != 4 {
|
||||
return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l)
|
||||
}
|
||||
|
||||
return BTreeStats{
|
||||
Lookups: us[0],
|
||||
Compares: us[1],
|
||||
RecordsInserted: us[2],
|
||||
RecordsDeleted: us[3],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// BlockMappingStat builds a BlockMappingStats from a slice of uint32s.
|
||||
func blockMappingStats(us []uint32) (BlockMappingStats, error) {
|
||||
if l := len(us); l != 7 {
|
||||
return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l)
|
||||
}
|
||||
|
||||
return BlockMappingStats{
|
||||
Reads: us[0],
|
||||
Writes: us[1],
|
||||
Unmaps: us[2],
|
||||
ExtentListInsertions: us[3],
|
||||
ExtentListDeletions: us[4],
|
||||
ExtentListLookups: us[5],
|
||||
ExtentListCompares: us[6],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s.
|
||||
func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) {
|
||||
if l := len(us); l != 4 {
|
||||
return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l)
|
||||
}
|
||||
|
||||
return DirectoryOperationStats{
|
||||
Lookups: us[0],
|
||||
Creates: us[1],
|
||||
Removes: us[2],
|
||||
Getdents: us[3],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TransactionStats builds a TransactionStats from a slice of uint32s.
|
||||
func transactionStats(us []uint32) (TransactionStats, error) {
|
||||
if l := len(us); l != 3 {
|
||||
return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l)
|
||||
}
|
||||
|
||||
return TransactionStats{
|
||||
Sync: us[0],
|
||||
Async: us[1],
|
||||
Empty: us[2],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// InodeOperationStats builds an InodeOperationStats from a slice of uint32s.
|
||||
func inodeOperationStats(us []uint32) (InodeOperationStats, error) {
|
||||
if l := len(us); l != 7 {
|
||||
return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l)
|
||||
}
|
||||
|
||||
return InodeOperationStats{
|
||||
Attempts: us[0],
|
||||
Found: us[1],
|
||||
Recycle: us[2],
|
||||
Missed: us[3],
|
||||
Duplicate: us[4],
|
||||
Reclaims: us[5],
|
||||
AttributeChange: us[6],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// LogOperationStats builds a LogOperationStats from a slice of uint32s.
|
||||
func logOperationStats(us []uint32) (LogOperationStats, error) {
|
||||
if l := len(us); l != 5 {
|
||||
return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l)
|
||||
}
|
||||
|
||||
return LogOperationStats{
|
||||
Writes: us[0],
|
||||
Blocks: us[1],
|
||||
NoInternalBuffers: us[2],
|
||||
Force: us[3],
|
||||
ForceSleep: us[4],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ReadWriteStats builds a ReadWriteStats from a slice of uint32s.
|
||||
func readWriteStats(us []uint32) (ReadWriteStats, error) {
|
||||
if l := len(us); l != 2 {
|
||||
return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l)
|
||||
}
|
||||
|
||||
return ReadWriteStats{
|
||||
Read: us[0],
|
||||
Write: us[1],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s.
|
||||
func attributeOperationStats(us []uint32) (AttributeOperationStats, error) {
|
||||
if l := len(us); l != 4 {
|
||||
return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l)
|
||||
}
|
||||
|
||||
return AttributeOperationStats{
|
||||
Get: us[0],
|
||||
Set: us[1],
|
||||
Remove: us[2],
|
||||
List: us[3],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s.
|
||||
func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) {
|
||||
if l := len(us); l != 3 {
|
||||
return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l)
|
||||
}
|
||||
|
||||
return InodeClusteringStats{
|
||||
Iflush: us[0],
|
||||
Flush: us[1],
|
||||
FlushInode: us[2],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// VnodeStats builds a VnodeStats from a slice of uint32s.
|
||||
func vnodeStats(us []uint32) (VnodeStats, error) {
|
||||
// The attribute "Free" appears to not be available on older XFS
|
||||
// stats versions. Therefore, 7 or 8 elements may appear in
|
||||
// this slice.
|
||||
l := len(us)
|
||||
if l != 7 && l != 8 {
|
||||
return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l)
|
||||
}
|
||||
|
||||
s := VnodeStats{
|
||||
Active: us[0],
|
||||
Allocate: us[1],
|
||||
Get: us[2],
|
||||
Hold: us[3],
|
||||
Release: us[4],
|
||||
Reclaim: us[5],
|
||||
Remove: us[6],
|
||||
}
|
||||
|
||||
// Skip adding free, unless it is present. The zero value will
|
||||
// be used in place of an actual count.
|
||||
if l == 7 {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
s.Free = us[7]
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// BufferStats builds a BufferStats from a slice of uint32s.
|
||||
func bufferStats(us []uint32) (BufferStats, error) {
|
||||
if l := len(us); l != 9 {
|
||||
return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l)
|
||||
}
|
||||
|
||||
return BufferStats{
|
||||
Get: us[0],
|
||||
Create: us[1],
|
||||
GetLocked: us[2],
|
||||
GetLockedWaited: us[3],
|
||||
BusyLocked: us[4],
|
||||
MissLocked: us[5],
|
||||
PageRetries: us[6],
|
||||
PageFound: us[7],
|
||||
GetRead: us[8],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s.
|
||||
func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) {
|
||||
if l := len(us); l != 3 {
|
||||
return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l)
|
||||
}
|
||||
|
||||
return ExtendedPrecisionStats{
|
||||
FlushBytes: us[0],
|
||||
WriteBytes: us[1],
|
||||
ReadBytes: us[2],
|
||||
}, nil
|
||||
}
|
||||
163
vendor/github.com/ncabatoff/procfs/xfs/xfs.go
generated
vendored
Normal file
163
vendor/github.com/ncabatoff/procfs/xfs/xfs.go
generated
vendored
Normal file
|
|
@ -0,0 +1,163 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package xfs provides access to statistics exposed by the XFS filesystem.
|
||||
package xfs
|
||||
|
||||
// Stats contains XFS filesystem runtime statistics, parsed from
|
||||
// /proc/fs/xfs/stat.
|
||||
//
|
||||
// The names and meanings of each statistic were taken from
|
||||
// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux
|
||||
// kernel source. Most counters are uint32s (same data types used in
|
||||
// xfs_stats.h), but some of the "extended precision stats" are uint64s.
|
||||
type Stats struct {
|
||||
// The name of the filesystem used to source these statistics.
|
||||
// If empty, this indicates aggregated statistics for all XFS
|
||||
// filesystems on the host.
|
||||
Name string
|
||||
|
||||
ExtentAllocation ExtentAllocationStats
|
||||
AllocationBTree BTreeStats
|
||||
BlockMapping BlockMappingStats
|
||||
BlockMapBTree BTreeStats
|
||||
DirectoryOperation DirectoryOperationStats
|
||||
Transaction TransactionStats
|
||||
InodeOperation InodeOperationStats
|
||||
LogOperation LogOperationStats
|
||||
ReadWrite ReadWriteStats
|
||||
AttributeOperation AttributeOperationStats
|
||||
InodeClustering InodeClusteringStats
|
||||
Vnode VnodeStats
|
||||
Buffer BufferStats
|
||||
ExtendedPrecision ExtendedPrecisionStats
|
||||
}
|
||||
|
||||
// ExtentAllocationStats contains statistics regarding XFS extent allocations.
|
||||
type ExtentAllocationStats struct {
|
||||
ExtentsAllocated uint32
|
||||
BlocksAllocated uint32
|
||||
ExtentsFreed uint32
|
||||
BlocksFreed uint32
|
||||
}
|
||||
|
||||
// BTreeStats contains statistics regarding an XFS internal B-tree.
|
||||
type BTreeStats struct {
|
||||
Lookups uint32
|
||||
Compares uint32
|
||||
RecordsInserted uint32
|
||||
RecordsDeleted uint32
|
||||
}
|
||||
|
||||
// BlockMappingStats contains statistics regarding XFS block maps.
|
||||
type BlockMappingStats struct {
|
||||
Reads uint32
|
||||
Writes uint32
|
||||
Unmaps uint32
|
||||
ExtentListInsertions uint32
|
||||
ExtentListDeletions uint32
|
||||
ExtentListLookups uint32
|
||||
ExtentListCompares uint32
|
||||
}
|
||||
|
||||
// DirectoryOperationStats contains statistics regarding XFS directory entries.
|
||||
type DirectoryOperationStats struct {
|
||||
Lookups uint32
|
||||
Creates uint32
|
||||
Removes uint32
|
||||
Getdents uint32
|
||||
}
|
||||
|
||||
// TransactionStats contains statistics regarding XFS metadata transactions.
|
||||
type TransactionStats struct {
|
||||
Sync uint32
|
||||
Async uint32
|
||||
Empty uint32
|
||||
}
|
||||
|
||||
// InodeOperationStats contains statistics regarding XFS inode operations.
|
||||
type InodeOperationStats struct {
|
||||
Attempts uint32
|
||||
Found uint32
|
||||
Recycle uint32
|
||||
Missed uint32
|
||||
Duplicate uint32
|
||||
Reclaims uint32
|
||||
AttributeChange uint32
|
||||
}
|
||||
|
||||
// LogOperationStats contains statistics regarding the XFS log buffer.
|
||||
type LogOperationStats struct {
|
||||
Writes uint32
|
||||
Blocks uint32
|
||||
NoInternalBuffers uint32
|
||||
Force uint32
|
||||
ForceSleep uint32
|
||||
}
|
||||
|
||||
// ReadWriteStats contains statistics regarding the number of read and write
|
||||
// system calls for XFS filesystems.
|
||||
type ReadWriteStats struct {
|
||||
Read uint32
|
||||
Write uint32
|
||||
}
|
||||
|
||||
// AttributeOperationStats contains statistics regarding manipulation of
|
||||
// XFS extended file attributes.
|
||||
type AttributeOperationStats struct {
|
||||
Get uint32
|
||||
Set uint32
|
||||
Remove uint32
|
||||
List uint32
|
||||
}
|
||||
|
||||
// InodeClusteringStats contains statistics regarding XFS inode clustering
|
||||
// operations.
|
||||
type InodeClusteringStats struct {
|
||||
Iflush uint32
|
||||
Flush uint32
|
||||
FlushInode uint32
|
||||
}
|
||||
|
||||
// VnodeStats contains statistics regarding XFS vnode operations.
|
||||
type VnodeStats struct {
|
||||
Active uint32
|
||||
Allocate uint32
|
||||
Get uint32
|
||||
Hold uint32
|
||||
Release uint32
|
||||
Reclaim uint32
|
||||
Remove uint32
|
||||
Free uint32
|
||||
}
|
||||
|
||||
// BufferStats contains statistics regarding XFS read/write I/O buffers.
|
||||
type BufferStats struct {
|
||||
Get uint32
|
||||
Create uint32
|
||||
GetLocked uint32
|
||||
GetLockedWaited uint32
|
||||
BusyLocked uint32
|
||||
MissLocked uint32
|
||||
PageRetries uint32
|
||||
PageFound uint32
|
||||
GetRead uint32
|
||||
}
|
||||
|
||||
// ExtendedPrecisionStats contains high precision counters used to track the
|
||||
// total number of bytes read, written, or flushed, during XFS operations.
|
||||
type ExtendedPrecisionStats struct {
|
||||
FlushBytes uint64
|
||||
WriteBytes uint64
|
||||
ReadBytes uint64
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue