Update dependencies
Signed-off-by: Gabriel Adrian Samfira <gsamfira@cloudbasesolutions.com>
This commit is contained in:
parent
0e1fa0018b
commit
a80b900ee9
49 changed files with 1801 additions and 611 deletions
16
go.mod
16
go.mod
|
|
@ -28,15 +28,15 @@ require (
|
|||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
golang.org/x/crypto v0.37.0
|
||||
golang.org/x/oauth2 v0.29.0
|
||||
golang.org/x/sync v0.13.0
|
||||
golang.org/x/crypto v0.38.0
|
||||
golang.org/x/oauth2 v0.30.0
|
||||
golang.org/x/sync v0.14.0
|
||||
gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||
gorm.io/datatypes v1.2.5
|
||||
gorm.io/driver/mysql v1.5.7
|
||||
gorm.io/driver/sqlite v1.5.7
|
||||
gorm.io/gorm v1.25.12
|
||||
gorm.io/gorm v1.26.0
|
||||
)
|
||||
|
||||
require (
|
||||
|
|
@ -76,7 +76,7 @@ require (
|
|||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.63.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
|
|
@ -86,9 +86,9 @@ require (
|
|||
go.opentelemetry.io/otel v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.35.0 // indirect
|
||||
golang.org/x/net v0.39.0 // indirect
|
||||
golang.org/x/sys v0.32.0 // indirect
|
||||
golang.org/x/text v0.24.0 // indirect
|
||||
golang.org/x/net v0.40.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/text v0.25.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
|
|
|||
32
go.sum
32
go.sum
|
|
@ -155,8 +155,8 @@ github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNw
|
|||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
|
||||
github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
|
||||
github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM=
|
||||
github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
|
|
@ -188,21 +188,21 @@ go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucg
|
|||
go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg=
|
||||
go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
|
||||
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
|
||||
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
|
||||
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
|
||||
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
|
||||
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
|
||||
golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98=
|
||||
golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
|
||||
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
|
||||
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
|
||||
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
|
||||
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
|
||||
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
|
||||
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
|
||||
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
|
||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
|
||||
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
|
|
@ -229,5 +229,5 @@ gorm.io/driver/sqlite v1.5.7/go.mod h1:U+J8craQU6Fzkcvu8oLeAQmi50TkwPEhHDEjQZXDa
|
|||
gorm.io/driver/sqlserver v1.5.4 h1:xA+Y1KDNspv79q43bPyjDMUgHoYHLhXYmdFcYPobg8g=
|
||||
gorm.io/driver/sqlserver v1.5.4/go.mod h1:+frZ/qYmuna11zHPlh5oc2O6ZA/lS88Keb0XSH1Zh/g=
|
||||
gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
|
||||
gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8=
|
||||
gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ=
|
||||
gorm.io/gorm v1.26.0 h1:9lqQVPG5aNNS6AyHdRiwScAVnXHg/L/Srzx55G5fOgs=
|
||||
gorm.io/gorm v1.26.0/go.mod h1:8Z33v652h4//uMA76KjeDH8mJXPm1QNCYrMeatR0DOE=
|
||||
|
|
|
|||
72
vendor/github.com/prometheus/procfs/.golangci.yml
generated
vendored
72
vendor/github.com/prometheus/procfs/.golangci.yml
generated
vendored
|
|
@ -1,31 +1,45 @@
|
|||
---
|
||||
version: "2"
|
||||
linters:
|
||||
enable:
|
||||
- errcheck
|
||||
- forbidigo
|
||||
- godot
|
||||
- gofmt
|
||||
- goimports
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- revive
|
||||
- staticcheck
|
||||
- testifylint
|
||||
- unused
|
||||
|
||||
linters-settings:
|
||||
forbidigo:
|
||||
forbid:
|
||||
- p: ^fmt\.Print.*$
|
||||
msg: Do not commit print statements.
|
||||
godot:
|
||||
capital: true
|
||||
exclude:
|
||||
# Ignore "See: URL"
|
||||
- 'See:'
|
||||
goimports:
|
||||
local-prefixes: github.com/prometheus/procfs
|
||||
misspell:
|
||||
locale: US
|
||||
- forbidigo
|
||||
- godot
|
||||
- misspell
|
||||
- revive
|
||||
- testifylint
|
||||
settings:
|
||||
forbidigo:
|
||||
forbid:
|
||||
- pattern: ^fmt\.Print.*$
|
||||
msg: Do not commit print statements.
|
||||
godot:
|
||||
exclude:
|
||||
# Ignore "See: URL".
|
||||
- 'See:'
|
||||
capital: true
|
||||
misspell:
|
||||
locale: US
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
settings:
|
||||
goimports:
|
||||
local-prefixes:
|
||||
- github.com/prometheus/procfs
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
|
|
|||
4
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
4
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
|
|
@ -33,7 +33,7 @@ GOHOSTOS ?= $(shell $(GO) env GOHOSTOS)
|
|||
GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH)
|
||||
|
||||
GO_VERSION ?= $(shell $(GO) version)
|
||||
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
|
||||
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))Error Parsing File
|
||||
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
|
||||
|
||||
PROMU := $(FIRST_GOPATH)/bin/promu
|
||||
|
|
@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
|||
SKIP_GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v1.60.2
|
||||
GOLANGCI_LINT_VERSION ?= v2.0.2
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||
|
|
|
|||
20
vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
generated
vendored
20
vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
generated
vendored
|
|
@ -20,6 +20,8 @@ package util
|
|||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
|
|
@ -48,3 +50,21 @@ func SysReadFile(file string) (string, error) {
|
|||
|
||||
return string(bytes.TrimSpace(b[:n])), nil
|
||||
}
|
||||
|
||||
// SysReadUintFromFile reads a file using SysReadFile and attempts to parse a uint64 from it.
|
||||
func SysReadUintFromFile(path string) (uint64, error) {
|
||||
data, err := SysReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
|
||||
}
|
||||
|
||||
// SysReadIntFromFile reads a file using SysReadFile and attempts to parse a int64 from it.
|
||||
func SysReadIntFromFile(path string) (int64, error) {
|
||||
data, err := SysReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
|
||||
}
|
||||
|
|
|
|||
23
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
23
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
|
|
@ -601,11 +601,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
|
|||
switch statVersion {
|
||||
case statVersion10:
|
||||
var expectedLength int
|
||||
if protocol == "tcp" {
|
||||
switch protocol {
|
||||
case "tcp":
|
||||
expectedLength = fieldTransport10TCPLen
|
||||
} else if protocol == "udp" {
|
||||
case "udp":
|
||||
expectedLength = fieldTransport10UDPLen
|
||||
} else {
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss)
|
||||
}
|
||||
if len(ss) != expectedLength {
|
||||
|
|
@ -613,13 +614,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
|
|||
}
|
||||
case statVersion11:
|
||||
var expectedLength int
|
||||
if protocol == "tcp" {
|
||||
switch protocol {
|
||||
case "tcp":
|
||||
expectedLength = fieldTransport11TCPLen
|
||||
} else if protocol == "udp" {
|
||||
case "udp":
|
||||
expectedLength = fieldTransport11UDPLen
|
||||
} else if protocol == "rdma" {
|
||||
case "rdma":
|
||||
expectedLength = fieldTransport11RDMAMinLen
|
||||
} else {
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss)
|
||||
}
|
||||
if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) ||
|
||||
|
|
@ -655,11 +657,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
|
|||
// For the udp RPC transport there is no connection count, connect idle time,
|
||||
// or idle time (fields #3, #4, and #5); all other fields are the same. So
|
||||
// we set them to 0 here.
|
||||
if protocol == "udp" {
|
||||
switch protocol {
|
||||
case "udp":
|
||||
ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
|
||||
} else if protocol == "tcp" {
|
||||
case "tcp":
|
||||
ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...)
|
||||
} else if protocol == "rdma" {
|
||||
case "rdma":
|
||||
ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...)
|
||||
}
|
||||
|
||||
|
|
|
|||
21
vendor/github.com/prometheus/procfs/net_protocols.go
generated
vendored
21
vendor/github.com/prometheus/procfs/net_protocols.go
generated
vendored
|
|
@ -115,22 +115,24 @@ func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, erro
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fields[4] == enabled {
|
||||
switch fields[4] {
|
||||
case enabled:
|
||||
line.Pressure = 1
|
||||
} else if fields[4] == disabled {
|
||||
case disabled:
|
||||
line.Pressure = 0
|
||||
} else {
|
||||
default:
|
||||
line.Pressure = -1
|
||||
}
|
||||
line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fields[6] == enabled {
|
||||
switch fields[6] {
|
||||
case enabled:
|
||||
line.Slab = true
|
||||
} else if fields[6] == disabled {
|
||||
case disabled:
|
||||
line.Slab = false
|
||||
} else {
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name)
|
||||
}
|
||||
line.ModuleName = fields[7]
|
||||
|
|
@ -168,11 +170,12 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro
|
|||
}
|
||||
|
||||
for i := 0; i < len(capabilities); i++ {
|
||||
if capabilities[i] == "y" {
|
||||
switch capabilities[i] {
|
||||
case "y":
|
||||
*capabilityFields[i] = true
|
||||
} else if capabilities[i] == "n" {
|
||||
case "n":
|
||||
*capabilityFields[i] = false
|
||||
} else {
|
||||
default:
|
||||
return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
8
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
8
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
|
|
@ -37,9 +37,9 @@ type Proc struct {
|
|||
type Procs []Proc
|
||||
|
||||
var (
|
||||
ErrFileParse = errors.New("Error Parsing File")
|
||||
ErrFileRead = errors.New("Error Reading File")
|
||||
ErrMountPoint = errors.New("Error Accessing Mount point")
|
||||
ErrFileParse = errors.New("error parsing file")
|
||||
ErrFileRead = errors.New("error reading file")
|
||||
ErrMountPoint = errors.New("error accessing mount point")
|
||||
)
|
||||
|
||||
func (p Procs) Len() int { return len(p) }
|
||||
|
|
@ -79,7 +79,7 @@ func (fs FS) Self() (Proc, error) {
|
|||
if err != nil {
|
||||
return Proc{}, err
|
||||
}
|
||||
pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1))
|
||||
pid, err := strconv.Atoi(strings.ReplaceAll(p, string(fs.proc), ""))
|
||||
if err != nil {
|
||||
return Proc{}, err
|
||||
}
|
||||
|
|
|
|||
224
vendor/github.com/prometheus/procfs/proc_netstat.go
generated
vendored
224
vendor/github.com/prometheus/procfs/proc_netstat.go
generated
vendored
|
|
@ -209,232 +209,232 @@ func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
|
|||
case "TcpExt":
|
||||
switch key {
|
||||
case "SyncookiesSent":
|
||||
procNetstat.TcpExt.SyncookiesSent = &value
|
||||
procNetstat.SyncookiesSent = &value
|
||||
case "SyncookiesRecv":
|
||||
procNetstat.TcpExt.SyncookiesRecv = &value
|
||||
procNetstat.SyncookiesRecv = &value
|
||||
case "SyncookiesFailed":
|
||||
procNetstat.TcpExt.SyncookiesFailed = &value
|
||||
procNetstat.SyncookiesFailed = &value
|
||||
case "EmbryonicRsts":
|
||||
procNetstat.TcpExt.EmbryonicRsts = &value
|
||||
procNetstat.EmbryonicRsts = &value
|
||||
case "PruneCalled":
|
||||
procNetstat.TcpExt.PruneCalled = &value
|
||||
procNetstat.PruneCalled = &value
|
||||
case "RcvPruned":
|
||||
procNetstat.TcpExt.RcvPruned = &value
|
||||
procNetstat.RcvPruned = &value
|
||||
case "OfoPruned":
|
||||
procNetstat.TcpExt.OfoPruned = &value
|
||||
procNetstat.OfoPruned = &value
|
||||
case "OutOfWindowIcmps":
|
||||
procNetstat.TcpExt.OutOfWindowIcmps = &value
|
||||
procNetstat.OutOfWindowIcmps = &value
|
||||
case "LockDroppedIcmps":
|
||||
procNetstat.TcpExt.LockDroppedIcmps = &value
|
||||
procNetstat.LockDroppedIcmps = &value
|
||||
case "ArpFilter":
|
||||
procNetstat.TcpExt.ArpFilter = &value
|
||||
procNetstat.ArpFilter = &value
|
||||
case "TW":
|
||||
procNetstat.TcpExt.TW = &value
|
||||
procNetstat.TW = &value
|
||||
case "TWRecycled":
|
||||
procNetstat.TcpExt.TWRecycled = &value
|
||||
procNetstat.TWRecycled = &value
|
||||
case "TWKilled":
|
||||
procNetstat.TcpExt.TWKilled = &value
|
||||
procNetstat.TWKilled = &value
|
||||
case "PAWSActive":
|
||||
procNetstat.TcpExt.PAWSActive = &value
|
||||
procNetstat.PAWSActive = &value
|
||||
case "PAWSEstab":
|
||||
procNetstat.TcpExt.PAWSEstab = &value
|
||||
procNetstat.PAWSEstab = &value
|
||||
case "DelayedACKs":
|
||||
procNetstat.TcpExt.DelayedACKs = &value
|
||||
procNetstat.DelayedACKs = &value
|
||||
case "DelayedACKLocked":
|
||||
procNetstat.TcpExt.DelayedACKLocked = &value
|
||||
procNetstat.DelayedACKLocked = &value
|
||||
case "DelayedACKLost":
|
||||
procNetstat.TcpExt.DelayedACKLost = &value
|
||||
procNetstat.DelayedACKLost = &value
|
||||
case "ListenOverflows":
|
||||
procNetstat.TcpExt.ListenOverflows = &value
|
||||
procNetstat.ListenOverflows = &value
|
||||
case "ListenDrops":
|
||||
procNetstat.TcpExt.ListenDrops = &value
|
||||
procNetstat.ListenDrops = &value
|
||||
case "TCPHPHits":
|
||||
procNetstat.TcpExt.TCPHPHits = &value
|
||||
procNetstat.TCPHPHits = &value
|
||||
case "TCPPureAcks":
|
||||
procNetstat.TcpExt.TCPPureAcks = &value
|
||||
procNetstat.TCPPureAcks = &value
|
||||
case "TCPHPAcks":
|
||||
procNetstat.TcpExt.TCPHPAcks = &value
|
||||
procNetstat.TCPHPAcks = &value
|
||||
case "TCPRenoRecovery":
|
||||
procNetstat.TcpExt.TCPRenoRecovery = &value
|
||||
procNetstat.TCPRenoRecovery = &value
|
||||
case "TCPSackRecovery":
|
||||
procNetstat.TcpExt.TCPSackRecovery = &value
|
||||
procNetstat.TCPSackRecovery = &value
|
||||
case "TCPSACKReneging":
|
||||
procNetstat.TcpExt.TCPSACKReneging = &value
|
||||
procNetstat.TCPSACKReneging = &value
|
||||
case "TCPSACKReorder":
|
||||
procNetstat.TcpExt.TCPSACKReorder = &value
|
||||
procNetstat.TCPSACKReorder = &value
|
||||
case "TCPRenoReorder":
|
||||
procNetstat.TcpExt.TCPRenoReorder = &value
|
||||
procNetstat.TCPRenoReorder = &value
|
||||
case "TCPTSReorder":
|
||||
procNetstat.TcpExt.TCPTSReorder = &value
|
||||
procNetstat.TCPTSReorder = &value
|
||||
case "TCPFullUndo":
|
||||
procNetstat.TcpExt.TCPFullUndo = &value
|
||||
procNetstat.TCPFullUndo = &value
|
||||
case "TCPPartialUndo":
|
||||
procNetstat.TcpExt.TCPPartialUndo = &value
|
||||
procNetstat.TCPPartialUndo = &value
|
||||
case "TCPDSACKUndo":
|
||||
procNetstat.TcpExt.TCPDSACKUndo = &value
|
||||
procNetstat.TCPDSACKUndo = &value
|
||||
case "TCPLossUndo":
|
||||
procNetstat.TcpExt.TCPLossUndo = &value
|
||||
procNetstat.TCPLossUndo = &value
|
||||
case "TCPLostRetransmit":
|
||||
procNetstat.TcpExt.TCPLostRetransmit = &value
|
||||
procNetstat.TCPLostRetransmit = &value
|
||||
case "TCPRenoFailures":
|
||||
procNetstat.TcpExt.TCPRenoFailures = &value
|
||||
procNetstat.TCPRenoFailures = &value
|
||||
case "TCPSackFailures":
|
||||
procNetstat.TcpExt.TCPSackFailures = &value
|
||||
procNetstat.TCPSackFailures = &value
|
||||
case "TCPLossFailures":
|
||||
procNetstat.TcpExt.TCPLossFailures = &value
|
||||
procNetstat.TCPLossFailures = &value
|
||||
case "TCPFastRetrans":
|
||||
procNetstat.TcpExt.TCPFastRetrans = &value
|
||||
procNetstat.TCPFastRetrans = &value
|
||||
case "TCPSlowStartRetrans":
|
||||
procNetstat.TcpExt.TCPSlowStartRetrans = &value
|
||||
procNetstat.TCPSlowStartRetrans = &value
|
||||
case "TCPTimeouts":
|
||||
procNetstat.TcpExt.TCPTimeouts = &value
|
||||
procNetstat.TCPTimeouts = &value
|
||||
case "TCPLossProbes":
|
||||
procNetstat.TcpExt.TCPLossProbes = &value
|
||||
procNetstat.TCPLossProbes = &value
|
||||
case "TCPLossProbeRecovery":
|
||||
procNetstat.TcpExt.TCPLossProbeRecovery = &value
|
||||
procNetstat.TCPLossProbeRecovery = &value
|
||||
case "TCPRenoRecoveryFail":
|
||||
procNetstat.TcpExt.TCPRenoRecoveryFail = &value
|
||||
procNetstat.TCPRenoRecoveryFail = &value
|
||||
case "TCPSackRecoveryFail":
|
||||
procNetstat.TcpExt.TCPSackRecoveryFail = &value
|
||||
procNetstat.TCPSackRecoveryFail = &value
|
||||
case "TCPRcvCollapsed":
|
||||
procNetstat.TcpExt.TCPRcvCollapsed = &value
|
||||
procNetstat.TCPRcvCollapsed = &value
|
||||
case "TCPDSACKOldSent":
|
||||
procNetstat.TcpExt.TCPDSACKOldSent = &value
|
||||
procNetstat.TCPDSACKOldSent = &value
|
||||
case "TCPDSACKOfoSent":
|
||||
procNetstat.TcpExt.TCPDSACKOfoSent = &value
|
||||
procNetstat.TCPDSACKOfoSent = &value
|
||||
case "TCPDSACKRecv":
|
||||
procNetstat.TcpExt.TCPDSACKRecv = &value
|
||||
procNetstat.TCPDSACKRecv = &value
|
||||
case "TCPDSACKOfoRecv":
|
||||
procNetstat.TcpExt.TCPDSACKOfoRecv = &value
|
||||
procNetstat.TCPDSACKOfoRecv = &value
|
||||
case "TCPAbortOnData":
|
||||
procNetstat.TcpExt.TCPAbortOnData = &value
|
||||
procNetstat.TCPAbortOnData = &value
|
||||
case "TCPAbortOnClose":
|
||||
procNetstat.TcpExt.TCPAbortOnClose = &value
|
||||
procNetstat.TCPAbortOnClose = &value
|
||||
case "TCPDeferAcceptDrop":
|
||||
procNetstat.TcpExt.TCPDeferAcceptDrop = &value
|
||||
procNetstat.TCPDeferAcceptDrop = &value
|
||||
case "IPReversePathFilter":
|
||||
procNetstat.TcpExt.IPReversePathFilter = &value
|
||||
procNetstat.IPReversePathFilter = &value
|
||||
case "TCPTimeWaitOverflow":
|
||||
procNetstat.TcpExt.TCPTimeWaitOverflow = &value
|
||||
procNetstat.TCPTimeWaitOverflow = &value
|
||||
case "TCPReqQFullDoCookies":
|
||||
procNetstat.TcpExt.TCPReqQFullDoCookies = &value
|
||||
procNetstat.TCPReqQFullDoCookies = &value
|
||||
case "TCPReqQFullDrop":
|
||||
procNetstat.TcpExt.TCPReqQFullDrop = &value
|
||||
procNetstat.TCPReqQFullDrop = &value
|
||||
case "TCPRetransFail":
|
||||
procNetstat.TcpExt.TCPRetransFail = &value
|
||||
procNetstat.TCPRetransFail = &value
|
||||
case "TCPRcvCoalesce":
|
||||
procNetstat.TcpExt.TCPRcvCoalesce = &value
|
||||
procNetstat.TCPRcvCoalesce = &value
|
||||
case "TCPRcvQDrop":
|
||||
procNetstat.TcpExt.TCPRcvQDrop = &value
|
||||
procNetstat.TCPRcvQDrop = &value
|
||||
case "TCPOFOQueue":
|
||||
procNetstat.TcpExt.TCPOFOQueue = &value
|
||||
procNetstat.TCPOFOQueue = &value
|
||||
case "TCPOFODrop":
|
||||
procNetstat.TcpExt.TCPOFODrop = &value
|
||||
procNetstat.TCPOFODrop = &value
|
||||
case "TCPOFOMerge":
|
||||
procNetstat.TcpExt.TCPOFOMerge = &value
|
||||
procNetstat.TCPOFOMerge = &value
|
||||
case "TCPChallengeACK":
|
||||
procNetstat.TcpExt.TCPChallengeACK = &value
|
||||
procNetstat.TCPChallengeACK = &value
|
||||
case "TCPSYNChallenge":
|
||||
procNetstat.TcpExt.TCPSYNChallenge = &value
|
||||
procNetstat.TCPSYNChallenge = &value
|
||||
case "TCPFastOpenActive":
|
||||
procNetstat.TcpExt.TCPFastOpenActive = &value
|
||||
procNetstat.TCPFastOpenActive = &value
|
||||
case "TCPFastOpenActiveFail":
|
||||
procNetstat.TcpExt.TCPFastOpenActiveFail = &value
|
||||
procNetstat.TCPFastOpenActiveFail = &value
|
||||
case "TCPFastOpenPassive":
|
||||
procNetstat.TcpExt.TCPFastOpenPassive = &value
|
||||
procNetstat.TCPFastOpenPassive = &value
|
||||
case "TCPFastOpenPassiveFail":
|
||||
procNetstat.TcpExt.TCPFastOpenPassiveFail = &value
|
||||
procNetstat.TCPFastOpenPassiveFail = &value
|
||||
case "TCPFastOpenListenOverflow":
|
||||
procNetstat.TcpExt.TCPFastOpenListenOverflow = &value
|
||||
procNetstat.TCPFastOpenListenOverflow = &value
|
||||
case "TCPFastOpenCookieReqd":
|
||||
procNetstat.TcpExt.TCPFastOpenCookieReqd = &value
|
||||
procNetstat.TCPFastOpenCookieReqd = &value
|
||||
case "TCPFastOpenBlackhole":
|
||||
procNetstat.TcpExt.TCPFastOpenBlackhole = &value
|
||||
procNetstat.TCPFastOpenBlackhole = &value
|
||||
case "TCPSpuriousRtxHostQueues":
|
||||
procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value
|
||||
procNetstat.TCPSpuriousRtxHostQueues = &value
|
||||
case "BusyPollRxPackets":
|
||||
procNetstat.TcpExt.BusyPollRxPackets = &value
|
||||
procNetstat.BusyPollRxPackets = &value
|
||||
case "TCPAutoCorking":
|
||||
procNetstat.TcpExt.TCPAutoCorking = &value
|
||||
procNetstat.TCPAutoCorking = &value
|
||||
case "TCPFromZeroWindowAdv":
|
||||
procNetstat.TcpExt.TCPFromZeroWindowAdv = &value
|
||||
procNetstat.TCPFromZeroWindowAdv = &value
|
||||
case "TCPToZeroWindowAdv":
|
||||
procNetstat.TcpExt.TCPToZeroWindowAdv = &value
|
||||
procNetstat.TCPToZeroWindowAdv = &value
|
||||
case "TCPWantZeroWindowAdv":
|
||||
procNetstat.TcpExt.TCPWantZeroWindowAdv = &value
|
||||
procNetstat.TCPWantZeroWindowAdv = &value
|
||||
case "TCPSynRetrans":
|
||||
procNetstat.TcpExt.TCPSynRetrans = &value
|
||||
procNetstat.TCPSynRetrans = &value
|
||||
case "TCPOrigDataSent":
|
||||
procNetstat.TcpExt.TCPOrigDataSent = &value
|
||||
procNetstat.TCPOrigDataSent = &value
|
||||
case "TCPHystartTrainDetect":
|
||||
procNetstat.TcpExt.TCPHystartTrainDetect = &value
|
||||
procNetstat.TCPHystartTrainDetect = &value
|
||||
case "TCPHystartTrainCwnd":
|
||||
procNetstat.TcpExt.TCPHystartTrainCwnd = &value
|
||||
procNetstat.TCPHystartTrainCwnd = &value
|
||||
case "TCPHystartDelayDetect":
|
||||
procNetstat.TcpExt.TCPHystartDelayDetect = &value
|
||||
procNetstat.TCPHystartDelayDetect = &value
|
||||
case "TCPHystartDelayCwnd":
|
||||
procNetstat.TcpExt.TCPHystartDelayCwnd = &value
|
||||
procNetstat.TCPHystartDelayCwnd = &value
|
||||
case "TCPACKSkippedSynRecv":
|
||||
procNetstat.TcpExt.TCPACKSkippedSynRecv = &value
|
||||
procNetstat.TCPACKSkippedSynRecv = &value
|
||||
case "TCPACKSkippedPAWS":
|
||||
procNetstat.TcpExt.TCPACKSkippedPAWS = &value
|
||||
procNetstat.TCPACKSkippedPAWS = &value
|
||||
case "TCPACKSkippedSeq":
|
||||
procNetstat.TcpExt.TCPACKSkippedSeq = &value
|
||||
procNetstat.TCPACKSkippedSeq = &value
|
||||
case "TCPACKSkippedFinWait2":
|
||||
procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value
|
||||
procNetstat.TCPACKSkippedFinWait2 = &value
|
||||
case "TCPACKSkippedTimeWait":
|
||||
procNetstat.TcpExt.TCPACKSkippedTimeWait = &value
|
||||
procNetstat.TCPACKSkippedTimeWait = &value
|
||||
case "TCPACKSkippedChallenge":
|
||||
procNetstat.TcpExt.TCPACKSkippedChallenge = &value
|
||||
procNetstat.TCPACKSkippedChallenge = &value
|
||||
case "TCPWinProbe":
|
||||
procNetstat.TcpExt.TCPWinProbe = &value
|
||||
procNetstat.TCPWinProbe = &value
|
||||
case "TCPKeepAlive":
|
||||
procNetstat.TcpExt.TCPKeepAlive = &value
|
||||
procNetstat.TCPKeepAlive = &value
|
||||
case "TCPMTUPFail":
|
||||
procNetstat.TcpExt.TCPMTUPFail = &value
|
||||
procNetstat.TCPMTUPFail = &value
|
||||
case "TCPMTUPSuccess":
|
||||
procNetstat.TcpExt.TCPMTUPSuccess = &value
|
||||
procNetstat.TCPMTUPSuccess = &value
|
||||
case "TCPWqueueTooBig":
|
||||
procNetstat.TcpExt.TCPWqueueTooBig = &value
|
||||
procNetstat.TCPWqueueTooBig = &value
|
||||
}
|
||||
case "IpExt":
|
||||
switch key {
|
||||
case "InNoRoutes":
|
||||
procNetstat.IpExt.InNoRoutes = &value
|
||||
procNetstat.InNoRoutes = &value
|
||||
case "InTruncatedPkts":
|
||||
procNetstat.IpExt.InTruncatedPkts = &value
|
||||
procNetstat.InTruncatedPkts = &value
|
||||
case "InMcastPkts":
|
||||
procNetstat.IpExt.InMcastPkts = &value
|
||||
procNetstat.InMcastPkts = &value
|
||||
case "OutMcastPkts":
|
||||
procNetstat.IpExt.OutMcastPkts = &value
|
||||
procNetstat.OutMcastPkts = &value
|
||||
case "InBcastPkts":
|
||||
procNetstat.IpExt.InBcastPkts = &value
|
||||
procNetstat.InBcastPkts = &value
|
||||
case "OutBcastPkts":
|
||||
procNetstat.IpExt.OutBcastPkts = &value
|
||||
procNetstat.OutBcastPkts = &value
|
||||
case "InOctets":
|
||||
procNetstat.IpExt.InOctets = &value
|
||||
procNetstat.InOctets = &value
|
||||
case "OutOctets":
|
||||
procNetstat.IpExt.OutOctets = &value
|
||||
procNetstat.OutOctets = &value
|
||||
case "InMcastOctets":
|
||||
procNetstat.IpExt.InMcastOctets = &value
|
||||
procNetstat.InMcastOctets = &value
|
||||
case "OutMcastOctets":
|
||||
procNetstat.IpExt.OutMcastOctets = &value
|
||||
procNetstat.OutMcastOctets = &value
|
||||
case "InBcastOctets":
|
||||
procNetstat.IpExt.InBcastOctets = &value
|
||||
procNetstat.InBcastOctets = &value
|
||||
case "OutBcastOctets":
|
||||
procNetstat.IpExt.OutBcastOctets = &value
|
||||
procNetstat.OutBcastOctets = &value
|
||||
case "InCsumErrors":
|
||||
procNetstat.IpExt.InCsumErrors = &value
|
||||
procNetstat.InCsumErrors = &value
|
||||
case "InNoECTPkts":
|
||||
procNetstat.IpExt.InNoECTPkts = &value
|
||||
procNetstat.InNoECTPkts = &value
|
||||
case "InECT1Pkts":
|
||||
procNetstat.IpExt.InECT1Pkts = &value
|
||||
procNetstat.InECT1Pkts = &value
|
||||
case "InECT0Pkts":
|
||||
procNetstat.IpExt.InECT0Pkts = &value
|
||||
procNetstat.InECT0Pkts = &value
|
||||
case "InCEPkts":
|
||||
procNetstat.IpExt.InCEPkts = &value
|
||||
procNetstat.InCEPkts = &value
|
||||
case "ReasmOverlaps":
|
||||
procNetstat.IpExt.ReasmOverlaps = &value
|
||||
procNetstat.ReasmOverlaps = &value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
120
vendor/github.com/prometheus/procfs/proc_snmp.go
generated
vendored
120
vendor/github.com/prometheus/procfs/proc_snmp.go
generated
vendored
|
|
@ -173,138 +173,138 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
|
|||
case "Ip":
|
||||
switch key {
|
||||
case "Forwarding":
|
||||
procSnmp.Ip.Forwarding = &value
|
||||
procSnmp.Forwarding = &value
|
||||
case "DefaultTTL":
|
||||
procSnmp.Ip.DefaultTTL = &value
|
||||
procSnmp.DefaultTTL = &value
|
||||
case "InReceives":
|
||||
procSnmp.Ip.InReceives = &value
|
||||
procSnmp.InReceives = &value
|
||||
case "InHdrErrors":
|
||||
procSnmp.Ip.InHdrErrors = &value
|
||||
procSnmp.InHdrErrors = &value
|
||||
case "InAddrErrors":
|
||||
procSnmp.Ip.InAddrErrors = &value
|
||||
procSnmp.InAddrErrors = &value
|
||||
case "ForwDatagrams":
|
||||
procSnmp.Ip.ForwDatagrams = &value
|
||||
procSnmp.ForwDatagrams = &value
|
||||
case "InUnknownProtos":
|
||||
procSnmp.Ip.InUnknownProtos = &value
|
||||
procSnmp.InUnknownProtos = &value
|
||||
case "InDiscards":
|
||||
procSnmp.Ip.InDiscards = &value
|
||||
procSnmp.InDiscards = &value
|
||||
case "InDelivers":
|
||||
procSnmp.Ip.InDelivers = &value
|
||||
procSnmp.InDelivers = &value
|
||||
case "OutRequests":
|
||||
procSnmp.Ip.OutRequests = &value
|
||||
procSnmp.OutRequests = &value
|
||||
case "OutDiscards":
|
||||
procSnmp.Ip.OutDiscards = &value
|
||||
procSnmp.OutDiscards = &value
|
||||
case "OutNoRoutes":
|
||||
procSnmp.Ip.OutNoRoutes = &value
|
||||
procSnmp.OutNoRoutes = &value
|
||||
case "ReasmTimeout":
|
||||
procSnmp.Ip.ReasmTimeout = &value
|
||||
procSnmp.ReasmTimeout = &value
|
||||
case "ReasmReqds":
|
||||
procSnmp.Ip.ReasmReqds = &value
|
||||
procSnmp.ReasmReqds = &value
|
||||
case "ReasmOKs":
|
||||
procSnmp.Ip.ReasmOKs = &value
|
||||
procSnmp.ReasmOKs = &value
|
||||
case "ReasmFails":
|
||||
procSnmp.Ip.ReasmFails = &value
|
||||
procSnmp.ReasmFails = &value
|
||||
case "FragOKs":
|
||||
procSnmp.Ip.FragOKs = &value
|
||||
procSnmp.FragOKs = &value
|
||||
case "FragFails":
|
||||
procSnmp.Ip.FragFails = &value
|
||||
procSnmp.FragFails = &value
|
||||
case "FragCreates":
|
||||
procSnmp.Ip.FragCreates = &value
|
||||
procSnmp.FragCreates = &value
|
||||
}
|
||||
case "Icmp":
|
||||
switch key {
|
||||
case "InMsgs":
|
||||
procSnmp.Icmp.InMsgs = &value
|
||||
procSnmp.InMsgs = &value
|
||||
case "InErrors":
|
||||
procSnmp.Icmp.InErrors = &value
|
||||
case "InCsumErrors":
|
||||
procSnmp.Icmp.InCsumErrors = &value
|
||||
case "InDestUnreachs":
|
||||
procSnmp.Icmp.InDestUnreachs = &value
|
||||
procSnmp.InDestUnreachs = &value
|
||||
case "InTimeExcds":
|
||||
procSnmp.Icmp.InTimeExcds = &value
|
||||
procSnmp.InTimeExcds = &value
|
||||
case "InParmProbs":
|
||||
procSnmp.Icmp.InParmProbs = &value
|
||||
procSnmp.InParmProbs = &value
|
||||
case "InSrcQuenchs":
|
||||
procSnmp.Icmp.InSrcQuenchs = &value
|
||||
procSnmp.InSrcQuenchs = &value
|
||||
case "InRedirects":
|
||||
procSnmp.Icmp.InRedirects = &value
|
||||
procSnmp.InRedirects = &value
|
||||
case "InEchos":
|
||||
procSnmp.Icmp.InEchos = &value
|
||||
procSnmp.InEchos = &value
|
||||
case "InEchoReps":
|
||||
procSnmp.Icmp.InEchoReps = &value
|
||||
procSnmp.InEchoReps = &value
|
||||
case "InTimestamps":
|
||||
procSnmp.Icmp.InTimestamps = &value
|
||||
procSnmp.InTimestamps = &value
|
||||
case "InTimestampReps":
|
||||
procSnmp.Icmp.InTimestampReps = &value
|
||||
procSnmp.InTimestampReps = &value
|
||||
case "InAddrMasks":
|
||||
procSnmp.Icmp.InAddrMasks = &value
|
||||
procSnmp.InAddrMasks = &value
|
||||
case "InAddrMaskReps":
|
||||
procSnmp.Icmp.InAddrMaskReps = &value
|
||||
procSnmp.InAddrMaskReps = &value
|
||||
case "OutMsgs":
|
||||
procSnmp.Icmp.OutMsgs = &value
|
||||
procSnmp.OutMsgs = &value
|
||||
case "OutErrors":
|
||||
procSnmp.Icmp.OutErrors = &value
|
||||
procSnmp.OutErrors = &value
|
||||
case "OutDestUnreachs":
|
||||
procSnmp.Icmp.OutDestUnreachs = &value
|
||||
procSnmp.OutDestUnreachs = &value
|
||||
case "OutTimeExcds":
|
||||
procSnmp.Icmp.OutTimeExcds = &value
|
||||
procSnmp.OutTimeExcds = &value
|
||||
case "OutParmProbs":
|
||||
procSnmp.Icmp.OutParmProbs = &value
|
||||
procSnmp.OutParmProbs = &value
|
||||
case "OutSrcQuenchs":
|
||||
procSnmp.Icmp.OutSrcQuenchs = &value
|
||||
procSnmp.OutSrcQuenchs = &value
|
||||
case "OutRedirects":
|
||||
procSnmp.Icmp.OutRedirects = &value
|
||||
procSnmp.OutRedirects = &value
|
||||
case "OutEchos":
|
||||
procSnmp.Icmp.OutEchos = &value
|
||||
procSnmp.OutEchos = &value
|
||||
case "OutEchoReps":
|
||||
procSnmp.Icmp.OutEchoReps = &value
|
||||
procSnmp.OutEchoReps = &value
|
||||
case "OutTimestamps":
|
||||
procSnmp.Icmp.OutTimestamps = &value
|
||||
procSnmp.OutTimestamps = &value
|
||||
case "OutTimestampReps":
|
||||
procSnmp.Icmp.OutTimestampReps = &value
|
||||
procSnmp.OutTimestampReps = &value
|
||||
case "OutAddrMasks":
|
||||
procSnmp.Icmp.OutAddrMasks = &value
|
||||
procSnmp.OutAddrMasks = &value
|
||||
case "OutAddrMaskReps":
|
||||
procSnmp.Icmp.OutAddrMaskReps = &value
|
||||
procSnmp.OutAddrMaskReps = &value
|
||||
}
|
||||
case "IcmpMsg":
|
||||
switch key {
|
||||
case "InType3":
|
||||
procSnmp.IcmpMsg.InType3 = &value
|
||||
procSnmp.InType3 = &value
|
||||
case "OutType3":
|
||||
procSnmp.IcmpMsg.OutType3 = &value
|
||||
procSnmp.OutType3 = &value
|
||||
}
|
||||
case "Tcp":
|
||||
switch key {
|
||||
case "RtoAlgorithm":
|
||||
procSnmp.Tcp.RtoAlgorithm = &value
|
||||
procSnmp.RtoAlgorithm = &value
|
||||
case "RtoMin":
|
||||
procSnmp.Tcp.RtoMin = &value
|
||||
procSnmp.RtoMin = &value
|
||||
case "RtoMax":
|
||||
procSnmp.Tcp.RtoMax = &value
|
||||
procSnmp.RtoMax = &value
|
||||
case "MaxConn":
|
||||
procSnmp.Tcp.MaxConn = &value
|
||||
procSnmp.MaxConn = &value
|
||||
case "ActiveOpens":
|
||||
procSnmp.Tcp.ActiveOpens = &value
|
||||
procSnmp.ActiveOpens = &value
|
||||
case "PassiveOpens":
|
||||
procSnmp.Tcp.PassiveOpens = &value
|
||||
procSnmp.PassiveOpens = &value
|
||||
case "AttemptFails":
|
||||
procSnmp.Tcp.AttemptFails = &value
|
||||
procSnmp.AttemptFails = &value
|
||||
case "EstabResets":
|
||||
procSnmp.Tcp.EstabResets = &value
|
||||
procSnmp.EstabResets = &value
|
||||
case "CurrEstab":
|
||||
procSnmp.Tcp.CurrEstab = &value
|
||||
procSnmp.CurrEstab = &value
|
||||
case "InSegs":
|
||||
procSnmp.Tcp.InSegs = &value
|
||||
procSnmp.InSegs = &value
|
||||
case "OutSegs":
|
||||
procSnmp.Tcp.OutSegs = &value
|
||||
procSnmp.OutSegs = &value
|
||||
case "RetransSegs":
|
||||
procSnmp.Tcp.RetransSegs = &value
|
||||
procSnmp.RetransSegs = &value
|
||||
case "InErrs":
|
||||
procSnmp.Tcp.InErrs = &value
|
||||
procSnmp.InErrs = &value
|
||||
case "OutRsts":
|
||||
procSnmp.Tcp.OutRsts = &value
|
||||
procSnmp.OutRsts = &value
|
||||
case "InCsumErrors":
|
||||
procSnmp.Tcp.InCsumErrors = &value
|
||||
}
|
||||
|
|
|
|||
150
vendor/github.com/prometheus/procfs/proc_snmp6.go
generated
vendored
150
vendor/github.com/prometheus/procfs/proc_snmp6.go
generated
vendored
|
|
@ -182,161 +182,161 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
|
|||
case "Ip6":
|
||||
switch key {
|
||||
case "InReceives":
|
||||
procSnmp6.Ip6.InReceives = &value
|
||||
procSnmp6.InReceives = &value
|
||||
case "InHdrErrors":
|
||||
procSnmp6.Ip6.InHdrErrors = &value
|
||||
procSnmp6.InHdrErrors = &value
|
||||
case "InTooBigErrors":
|
||||
procSnmp6.Ip6.InTooBigErrors = &value
|
||||
procSnmp6.InTooBigErrors = &value
|
||||
case "InNoRoutes":
|
||||
procSnmp6.Ip6.InNoRoutes = &value
|
||||
procSnmp6.InNoRoutes = &value
|
||||
case "InAddrErrors":
|
||||
procSnmp6.Ip6.InAddrErrors = &value
|
||||
procSnmp6.InAddrErrors = &value
|
||||
case "InUnknownProtos":
|
||||
procSnmp6.Ip6.InUnknownProtos = &value
|
||||
procSnmp6.InUnknownProtos = &value
|
||||
case "InTruncatedPkts":
|
||||
procSnmp6.Ip6.InTruncatedPkts = &value
|
||||
procSnmp6.InTruncatedPkts = &value
|
||||
case "InDiscards":
|
||||
procSnmp6.Ip6.InDiscards = &value
|
||||
procSnmp6.InDiscards = &value
|
||||
case "InDelivers":
|
||||
procSnmp6.Ip6.InDelivers = &value
|
||||
procSnmp6.InDelivers = &value
|
||||
case "OutForwDatagrams":
|
||||
procSnmp6.Ip6.OutForwDatagrams = &value
|
||||
procSnmp6.OutForwDatagrams = &value
|
||||
case "OutRequests":
|
||||
procSnmp6.Ip6.OutRequests = &value
|
||||
procSnmp6.OutRequests = &value
|
||||
case "OutDiscards":
|
||||
procSnmp6.Ip6.OutDiscards = &value
|
||||
procSnmp6.OutDiscards = &value
|
||||
case "OutNoRoutes":
|
||||
procSnmp6.Ip6.OutNoRoutes = &value
|
||||
procSnmp6.OutNoRoutes = &value
|
||||
case "ReasmTimeout":
|
||||
procSnmp6.Ip6.ReasmTimeout = &value
|
||||
procSnmp6.ReasmTimeout = &value
|
||||
case "ReasmReqds":
|
||||
procSnmp6.Ip6.ReasmReqds = &value
|
||||
procSnmp6.ReasmReqds = &value
|
||||
case "ReasmOKs":
|
||||
procSnmp6.Ip6.ReasmOKs = &value
|
||||
procSnmp6.ReasmOKs = &value
|
||||
case "ReasmFails":
|
||||
procSnmp6.Ip6.ReasmFails = &value
|
||||
procSnmp6.ReasmFails = &value
|
||||
case "FragOKs":
|
||||
procSnmp6.Ip6.FragOKs = &value
|
||||
procSnmp6.FragOKs = &value
|
||||
case "FragFails":
|
||||
procSnmp6.Ip6.FragFails = &value
|
||||
procSnmp6.FragFails = &value
|
||||
case "FragCreates":
|
||||
procSnmp6.Ip6.FragCreates = &value
|
||||
procSnmp6.FragCreates = &value
|
||||
case "InMcastPkts":
|
||||
procSnmp6.Ip6.InMcastPkts = &value
|
||||
procSnmp6.InMcastPkts = &value
|
||||
case "OutMcastPkts":
|
||||
procSnmp6.Ip6.OutMcastPkts = &value
|
||||
procSnmp6.OutMcastPkts = &value
|
||||
case "InOctets":
|
||||
procSnmp6.Ip6.InOctets = &value
|
||||
procSnmp6.InOctets = &value
|
||||
case "OutOctets":
|
||||
procSnmp6.Ip6.OutOctets = &value
|
||||
procSnmp6.OutOctets = &value
|
||||
case "InMcastOctets":
|
||||
procSnmp6.Ip6.InMcastOctets = &value
|
||||
procSnmp6.InMcastOctets = &value
|
||||
case "OutMcastOctets":
|
||||
procSnmp6.Ip6.OutMcastOctets = &value
|
||||
procSnmp6.OutMcastOctets = &value
|
||||
case "InBcastOctets":
|
||||
procSnmp6.Ip6.InBcastOctets = &value
|
||||
procSnmp6.InBcastOctets = &value
|
||||
case "OutBcastOctets":
|
||||
procSnmp6.Ip6.OutBcastOctets = &value
|
||||
procSnmp6.OutBcastOctets = &value
|
||||
case "InNoECTPkts":
|
||||
procSnmp6.Ip6.InNoECTPkts = &value
|
||||
procSnmp6.InNoECTPkts = &value
|
||||
case "InECT1Pkts":
|
||||
procSnmp6.Ip6.InECT1Pkts = &value
|
||||
procSnmp6.InECT1Pkts = &value
|
||||
case "InECT0Pkts":
|
||||
procSnmp6.Ip6.InECT0Pkts = &value
|
||||
procSnmp6.InECT0Pkts = &value
|
||||
case "InCEPkts":
|
||||
procSnmp6.Ip6.InCEPkts = &value
|
||||
procSnmp6.InCEPkts = &value
|
||||
|
||||
}
|
||||
case "Icmp6":
|
||||
switch key {
|
||||
case "InMsgs":
|
||||
procSnmp6.Icmp6.InMsgs = &value
|
||||
procSnmp6.InMsgs = &value
|
||||
case "InErrors":
|
||||
procSnmp6.Icmp6.InErrors = &value
|
||||
case "OutMsgs":
|
||||
procSnmp6.Icmp6.OutMsgs = &value
|
||||
procSnmp6.OutMsgs = &value
|
||||
case "OutErrors":
|
||||
procSnmp6.Icmp6.OutErrors = &value
|
||||
procSnmp6.OutErrors = &value
|
||||
case "InCsumErrors":
|
||||
procSnmp6.Icmp6.InCsumErrors = &value
|
||||
case "InDestUnreachs":
|
||||
procSnmp6.Icmp6.InDestUnreachs = &value
|
||||
procSnmp6.InDestUnreachs = &value
|
||||
case "InPktTooBigs":
|
||||
procSnmp6.Icmp6.InPktTooBigs = &value
|
||||
procSnmp6.InPktTooBigs = &value
|
||||
case "InTimeExcds":
|
||||
procSnmp6.Icmp6.InTimeExcds = &value
|
||||
procSnmp6.InTimeExcds = &value
|
||||
case "InParmProblems":
|
||||
procSnmp6.Icmp6.InParmProblems = &value
|
||||
procSnmp6.InParmProblems = &value
|
||||
case "InEchos":
|
||||
procSnmp6.Icmp6.InEchos = &value
|
||||
procSnmp6.InEchos = &value
|
||||
case "InEchoReplies":
|
||||
procSnmp6.Icmp6.InEchoReplies = &value
|
||||
procSnmp6.InEchoReplies = &value
|
||||
case "InGroupMembQueries":
|
||||
procSnmp6.Icmp6.InGroupMembQueries = &value
|
||||
procSnmp6.InGroupMembQueries = &value
|
||||
case "InGroupMembResponses":
|
||||
procSnmp6.Icmp6.InGroupMembResponses = &value
|
||||
procSnmp6.InGroupMembResponses = &value
|
||||
case "InGroupMembReductions":
|
||||
procSnmp6.Icmp6.InGroupMembReductions = &value
|
||||
procSnmp6.InGroupMembReductions = &value
|
||||
case "InRouterSolicits":
|
||||
procSnmp6.Icmp6.InRouterSolicits = &value
|
||||
procSnmp6.InRouterSolicits = &value
|
||||
case "InRouterAdvertisements":
|
||||
procSnmp6.Icmp6.InRouterAdvertisements = &value
|
||||
procSnmp6.InRouterAdvertisements = &value
|
||||
case "InNeighborSolicits":
|
||||
procSnmp6.Icmp6.InNeighborSolicits = &value
|
||||
procSnmp6.InNeighborSolicits = &value
|
||||
case "InNeighborAdvertisements":
|
||||
procSnmp6.Icmp6.InNeighborAdvertisements = &value
|
||||
procSnmp6.InNeighborAdvertisements = &value
|
||||
case "InRedirects":
|
||||
procSnmp6.Icmp6.InRedirects = &value
|
||||
procSnmp6.InRedirects = &value
|
||||
case "InMLDv2Reports":
|
||||
procSnmp6.Icmp6.InMLDv2Reports = &value
|
||||
procSnmp6.InMLDv2Reports = &value
|
||||
case "OutDestUnreachs":
|
||||
procSnmp6.Icmp6.OutDestUnreachs = &value
|
||||
procSnmp6.OutDestUnreachs = &value
|
||||
case "OutPktTooBigs":
|
||||
procSnmp6.Icmp6.OutPktTooBigs = &value
|
||||
procSnmp6.OutPktTooBigs = &value
|
||||
case "OutTimeExcds":
|
||||
procSnmp6.Icmp6.OutTimeExcds = &value
|
||||
procSnmp6.OutTimeExcds = &value
|
||||
case "OutParmProblems":
|
||||
procSnmp6.Icmp6.OutParmProblems = &value
|
||||
procSnmp6.OutParmProblems = &value
|
||||
case "OutEchos":
|
||||
procSnmp6.Icmp6.OutEchos = &value
|
||||
procSnmp6.OutEchos = &value
|
||||
case "OutEchoReplies":
|
||||
procSnmp6.Icmp6.OutEchoReplies = &value
|
||||
procSnmp6.OutEchoReplies = &value
|
||||
case "OutGroupMembQueries":
|
||||
procSnmp6.Icmp6.OutGroupMembQueries = &value
|
||||
procSnmp6.OutGroupMembQueries = &value
|
||||
case "OutGroupMembResponses":
|
||||
procSnmp6.Icmp6.OutGroupMembResponses = &value
|
||||
procSnmp6.OutGroupMembResponses = &value
|
||||
case "OutGroupMembReductions":
|
||||
procSnmp6.Icmp6.OutGroupMembReductions = &value
|
||||
procSnmp6.OutGroupMembReductions = &value
|
||||
case "OutRouterSolicits":
|
||||
procSnmp6.Icmp6.OutRouterSolicits = &value
|
||||
procSnmp6.OutRouterSolicits = &value
|
||||
case "OutRouterAdvertisements":
|
||||
procSnmp6.Icmp6.OutRouterAdvertisements = &value
|
||||
procSnmp6.OutRouterAdvertisements = &value
|
||||
case "OutNeighborSolicits":
|
||||
procSnmp6.Icmp6.OutNeighborSolicits = &value
|
||||
procSnmp6.OutNeighborSolicits = &value
|
||||
case "OutNeighborAdvertisements":
|
||||
procSnmp6.Icmp6.OutNeighborAdvertisements = &value
|
||||
procSnmp6.OutNeighborAdvertisements = &value
|
||||
case "OutRedirects":
|
||||
procSnmp6.Icmp6.OutRedirects = &value
|
||||
procSnmp6.OutRedirects = &value
|
||||
case "OutMLDv2Reports":
|
||||
procSnmp6.Icmp6.OutMLDv2Reports = &value
|
||||
procSnmp6.OutMLDv2Reports = &value
|
||||
case "InType1":
|
||||
procSnmp6.Icmp6.InType1 = &value
|
||||
procSnmp6.InType1 = &value
|
||||
case "InType134":
|
||||
procSnmp6.Icmp6.InType134 = &value
|
||||
procSnmp6.InType134 = &value
|
||||
case "InType135":
|
||||
procSnmp6.Icmp6.InType135 = &value
|
||||
procSnmp6.InType135 = &value
|
||||
case "InType136":
|
||||
procSnmp6.Icmp6.InType136 = &value
|
||||
procSnmp6.InType136 = &value
|
||||
case "InType143":
|
||||
procSnmp6.Icmp6.InType143 = &value
|
||||
procSnmp6.InType143 = &value
|
||||
case "OutType133":
|
||||
procSnmp6.Icmp6.OutType133 = &value
|
||||
procSnmp6.OutType133 = &value
|
||||
case "OutType135":
|
||||
procSnmp6.Icmp6.OutType135 = &value
|
||||
procSnmp6.OutType135 = &value
|
||||
case "OutType136":
|
||||
procSnmp6.Icmp6.OutType136 = &value
|
||||
procSnmp6.OutType136 = &value
|
||||
case "OutType143":
|
||||
procSnmp6.Icmp6.OutType143 = &value
|
||||
procSnmp6.OutType143 = &value
|
||||
}
|
||||
case "Udp6":
|
||||
switch key {
|
||||
|
|
@ -355,7 +355,7 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
|
|||
case "InCsumErrors":
|
||||
procSnmp6.Udp6.InCsumErrors = &value
|
||||
case "IgnoredMulti":
|
||||
procSnmp6.Udp6.IgnoredMulti = &value
|
||||
procSnmp6.IgnoredMulti = &value
|
||||
}
|
||||
case "UdpLite6":
|
||||
switch key {
|
||||
|
|
|
|||
2
vendor/github.com/prometheus/procfs/proc_sys.go
generated
vendored
2
vendor/github.com/prometheus/procfs/proc_sys.go
generated
vendored
|
|
@ -21,7 +21,7 @@ import (
|
|||
)
|
||||
|
||||
func sysctlToPath(sysctl string) string {
|
||||
return strings.Replace(sysctl, ".", "/", -1)
|
||||
return strings.ReplaceAll(sysctl, ".", "/")
|
||||
}
|
||||
|
||||
func (fs FS) SysctlStrings(sysctl string) ([]string, error) {
|
||||
|
|
|
|||
22
vendor/github.com/prometheus/procfs/softirqs.go
generated
vendored
22
vendor/github.com/prometheus/procfs/softirqs.go
generated
vendored
|
|
@ -68,8 +68,8 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
if len(parts) < 2 {
|
||||
continue
|
||||
}
|
||||
switch {
|
||||
case parts[0] == "HI:":
|
||||
switch parts[0] {
|
||||
case "HI:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.Hi = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
|
|
@ -77,7 +77,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "TIMER:":
|
||||
case "TIMER:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.Timer = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
|
|
@ -85,7 +85,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "NET_TX:":
|
||||
case "NET_TX:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.NetTx = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
|
|
@ -93,7 +93,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "NET_RX:":
|
||||
case "NET_RX:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.NetRx = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
|
|
@ -101,7 +101,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "BLOCK:":
|
||||
case "BLOCK:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.Block = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
|
|
@ -109,7 +109,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "IRQ_POLL:":
|
||||
case "IRQ_POLL:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.IRQPoll = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
|
|
@ -117,7 +117,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "TASKLET:":
|
||||
case "TASKLET:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.Tasklet = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
|
|
@ -125,7 +125,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "SCHED:":
|
||||
case "SCHED:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.Sched = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
|
|
@ -133,7 +133,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "HRTIMER:":
|
||||
case "HRTIMER:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.HRTimer = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
|
|
@ -141,7 +141,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "RCU:":
|
||||
case "RCU:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.RCU = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
|
|
|
|||
2
vendor/golang.org/x/oauth2/internal/doc.go
generated
vendored
2
vendor/golang.org/x/oauth2/internal/doc.go
generated
vendored
|
|
@ -2,5 +2,5 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package internal contains support packages for oauth2 package.
|
||||
// Package internal contains support packages for [golang.org/x/oauth2].
|
||||
package internal
|
||||
|
|
|
|||
2
vendor/golang.org/x/oauth2/internal/oauth2.go
generated
vendored
2
vendor/golang.org/x/oauth2/internal/oauth2.go
generated
vendored
|
|
@ -13,7 +13,7 @@ import (
|
|||
)
|
||||
|
||||
// ParseKey converts the binary contents of a private key file
|
||||
// to an *rsa.PrivateKey. It detects whether the private key is in a
|
||||
// to an [*rsa.PrivateKey]. It detects whether the private key is in a
|
||||
// PEM container or not. If so, it extracts the private key
|
||||
// from PEM container before conversion. It only supports PEM
|
||||
// containers with no passphrase.
|
||||
|
|
|
|||
50
vendor/golang.org/x/oauth2/internal/token.go
generated
vendored
50
vendor/golang.org/x/oauth2/internal/token.go
generated
vendored
|
|
@ -10,7 +10,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"mime"
|
||||
"net/http"
|
||||
|
|
@ -26,9 +25,9 @@ import (
|
|||
// the requests to access protected resources on the OAuth 2.0
|
||||
// provider's backend.
|
||||
//
|
||||
// This type is a mirror of oauth2.Token and exists to break
|
||||
// This type is a mirror of [golang.org/x/oauth2.Token] and exists to break
|
||||
// an otherwise-circular dependency. Other internal packages
|
||||
// should convert this Token into an oauth2.Token before use.
|
||||
// should convert this Token into an [golang.org/x/oauth2.Token] before use.
|
||||
type Token struct {
|
||||
// AccessToken is the token that authorizes and authenticates
|
||||
// the requests.
|
||||
|
|
@ -50,9 +49,16 @@ type Token struct {
|
|||
// mechanisms for that TokenSource will not be used.
|
||||
Expiry time.Time
|
||||
|
||||
// ExpiresIn is the OAuth2 wire format "expires_in" field,
|
||||
// which specifies how many seconds later the token expires,
|
||||
// relative to an unknown time base approximately around "now".
|
||||
// It is the application's responsibility to populate
|
||||
// `Expiry` from `ExpiresIn` when required.
|
||||
ExpiresIn int64 `json:"expires_in,omitempty"`
|
||||
|
||||
// Raw optionally contains extra metadata from the server
|
||||
// when updating a token.
|
||||
Raw interface{}
|
||||
Raw any
|
||||
}
|
||||
|
||||
// tokenJSON is the struct representing the HTTP response from OAuth2
|
||||
|
|
@ -99,14 +105,6 @@ func (e *expirationTime) UnmarshalJSON(b []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op.
|
||||
//
|
||||
// Deprecated: this function no longer does anything. Caller code that
|
||||
// wants to avoid potential extra HTTP requests made during
|
||||
// auto-probing of the provider's auth style should set
|
||||
// Endpoint.AuthStyle.
|
||||
func RegisterBrokenAuthHeaderProvider(tokenURL string) {}
|
||||
|
||||
// AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type.
|
||||
type AuthStyle int
|
||||
|
||||
|
|
@ -143,6 +141,11 @@ func (lc *LazyAuthStyleCache) Get() *AuthStyleCache {
|
|||
return c
|
||||
}
|
||||
|
||||
type authStyleCacheKey struct {
|
||||
url string
|
||||
clientID string
|
||||
}
|
||||
|
||||
// AuthStyleCache is the set of tokenURLs we've successfully used via
|
||||
// RetrieveToken and which style auth we ended up using.
|
||||
// It's called a cache, but it doesn't (yet?) shrink. It's expected that
|
||||
|
|
@ -150,26 +153,26 @@ func (lc *LazyAuthStyleCache) Get() *AuthStyleCache {
|
|||
// small.
|
||||
type AuthStyleCache struct {
|
||||
mu sync.Mutex
|
||||
m map[string]AuthStyle // keyed by tokenURL
|
||||
m map[authStyleCacheKey]AuthStyle
|
||||
}
|
||||
|
||||
// lookupAuthStyle reports which auth style we last used with tokenURL
|
||||
// when calling RetrieveToken and whether we have ever done so.
|
||||
func (c *AuthStyleCache) lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) {
|
||||
func (c *AuthStyleCache) lookupAuthStyle(tokenURL, clientID string) (style AuthStyle, ok bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
style, ok = c.m[tokenURL]
|
||||
style, ok = c.m[authStyleCacheKey{tokenURL, clientID}]
|
||||
return
|
||||
}
|
||||
|
||||
// setAuthStyle adds an entry to authStyleCache, documented above.
|
||||
func (c *AuthStyleCache) setAuthStyle(tokenURL string, v AuthStyle) {
|
||||
func (c *AuthStyleCache) setAuthStyle(tokenURL, clientID string, v AuthStyle) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.m == nil {
|
||||
c.m = make(map[string]AuthStyle)
|
||||
c.m = make(map[authStyleCacheKey]AuthStyle)
|
||||
}
|
||||
c.m[tokenURL] = v
|
||||
c.m[authStyleCacheKey{tokenURL, clientID}] = v
|
||||
}
|
||||
|
||||
// newTokenRequest returns a new *http.Request to retrieve a new token
|
||||
|
|
@ -210,9 +213,9 @@ func cloneURLValues(v url.Values) url.Values {
|
|||
}
|
||||
|
||||
func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle, styleCache *AuthStyleCache) (*Token, error) {
|
||||
needsAuthStyleProbe := authStyle == 0
|
||||
needsAuthStyleProbe := authStyle == AuthStyleUnknown
|
||||
if needsAuthStyleProbe {
|
||||
if style, ok := styleCache.lookupAuthStyle(tokenURL); ok {
|
||||
if style, ok := styleCache.lookupAuthStyle(tokenURL, clientID); ok {
|
||||
authStyle = style
|
||||
needsAuthStyleProbe = false
|
||||
} else {
|
||||
|
|
@ -242,7 +245,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string,
|
|||
token, err = doTokenRoundTrip(ctx, req)
|
||||
}
|
||||
if needsAuthStyleProbe && err == nil {
|
||||
styleCache.setAuthStyle(tokenURL, authStyle)
|
||||
styleCache.setAuthStyle(tokenURL, clientID, authStyle)
|
||||
}
|
||||
// Don't overwrite `RefreshToken` with an empty value
|
||||
// if this was a token refreshing request.
|
||||
|
|
@ -257,7 +260,7 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
|
||||
body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20))
|
||||
r.Body.Close()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
|
||||
|
|
@ -312,7 +315,8 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
|
|||
TokenType: tj.TokenType,
|
||||
RefreshToken: tj.RefreshToken,
|
||||
Expiry: tj.expiry(),
|
||||
Raw: make(map[string]interface{}),
|
||||
ExpiresIn: int64(tj.ExpiresIn),
|
||||
Raw: make(map[string]any),
|
||||
}
|
||||
json.Unmarshal(body, &token.Raw) // no error checks for optional fields
|
||||
}
|
||||
|
|
|
|||
4
vendor/golang.org/x/oauth2/internal/transport.go
generated
vendored
4
vendor/golang.org/x/oauth2/internal/transport.go
generated
vendored
|
|
@ -9,8 +9,8 @@ import (
|
|||
"net/http"
|
||||
)
|
||||
|
||||
// HTTPClient is the context key to use with golang.org/x/net/context's
|
||||
// WithValue function to associate an *http.Client value with a context.
|
||||
// HTTPClient is the context key to use with [context.WithValue]
|
||||
// to associate an [*http.Client] value with a context.
|
||||
var HTTPClient ContextKey
|
||||
|
||||
// ContextKey is just an empty struct. It exists so HTTPClient can be
|
||||
|
|
|
|||
55
vendor/golang.org/x/oauth2/oauth2.go
generated
vendored
55
vendor/golang.org/x/oauth2/oauth2.go
generated
vendored
|
|
@ -22,9 +22,9 @@ import (
|
|||
)
|
||||
|
||||
// NoContext is the default context you should supply if not using
|
||||
// your own context.Context (see https://golang.org/x/net/context).
|
||||
// your own [context.Context].
|
||||
//
|
||||
// Deprecated: Use context.Background() or context.TODO() instead.
|
||||
// Deprecated: Use [context.Background] or [context.TODO] instead.
|
||||
var NoContext = context.TODO()
|
||||
|
||||
// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op.
|
||||
|
|
@ -37,8 +37,8 @@ func RegisterBrokenAuthHeaderProvider(tokenURL string) {}
|
|||
|
||||
// Config describes a typical 3-legged OAuth2 flow, with both the
|
||||
// client application information and the server's endpoint URLs.
|
||||
// For the client credentials 2-legged OAuth2 flow, see the clientcredentials
|
||||
// package (https://golang.org/x/oauth2/clientcredentials).
|
||||
// For the client credentials 2-legged OAuth2 flow, see the
|
||||
// [golang.org/x/oauth2/clientcredentials] package.
|
||||
type Config struct {
|
||||
// ClientID is the application's ID.
|
||||
ClientID string
|
||||
|
|
@ -46,7 +46,7 @@ type Config struct {
|
|||
// ClientSecret is the application's secret.
|
||||
ClientSecret string
|
||||
|
||||
// Endpoint contains the resource server's token endpoint
|
||||
// Endpoint contains the authorization server's token endpoint
|
||||
// URLs. These are constants specific to each server and are
|
||||
// often available via site-specific packages, such as
|
||||
// google.Endpoint or github.Endpoint.
|
||||
|
|
@ -135,7 +135,7 @@ type setParam struct{ k, v string }
|
|||
|
||||
func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) }
|
||||
|
||||
// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters
|
||||
// SetAuthURLParam builds an [AuthCodeOption] which passes key/value parameters
|
||||
// to a provider's authorization endpoint.
|
||||
func SetAuthURLParam(key, value string) AuthCodeOption {
|
||||
return setParam{key, value}
|
||||
|
|
@ -148,8 +148,8 @@ func SetAuthURLParam(key, value string) AuthCodeOption {
|
|||
// request and callback. The authorization server includes this value when
|
||||
// redirecting the user agent back to the client.
|
||||
//
|
||||
// Opts may include AccessTypeOnline or AccessTypeOffline, as well
|
||||
// as ApprovalForce.
|
||||
// Opts may include [AccessTypeOnline] or [AccessTypeOffline], as well
|
||||
// as [ApprovalForce].
|
||||
//
|
||||
// To protect against CSRF attacks, opts should include a PKCE challenge
|
||||
// (S256ChallengeOption). Not all servers support PKCE. An alternative is to
|
||||
|
|
@ -194,7 +194,7 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
|
|||
// and when other authorization grant types are not available."
|
||||
// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info.
|
||||
//
|
||||
// The provided context optionally controls which HTTP client is used. See the HTTPClient variable.
|
||||
// The provided context optionally controls which HTTP client is used. See the [HTTPClient] variable.
|
||||
func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {
|
||||
v := url.Values{
|
||||
"grant_type": {"password"},
|
||||
|
|
@ -212,10 +212,10 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor
|
|||
// It is used after a resource provider redirects the user back
|
||||
// to the Redirect URI (the URL obtained from AuthCodeURL).
|
||||
//
|
||||
// The provided context optionally controls which HTTP client is used. See the HTTPClient variable.
|
||||
// The provided context optionally controls which HTTP client is used. See the [HTTPClient] variable.
|
||||
//
|
||||
// The code will be in the *http.Request.FormValue("code"). Before
|
||||
// calling Exchange, be sure to validate FormValue("state") if you are
|
||||
// The code will be in the [http.Request.FormValue]("code"). Before
|
||||
// calling Exchange, be sure to validate [http.Request.FormValue]("state") if you are
|
||||
// using it to protect against CSRF attacks.
|
||||
//
|
||||
// If using PKCE to protect against CSRF attacks, opts should include a
|
||||
|
|
@ -242,10 +242,10 @@ func (c *Config) Client(ctx context.Context, t *Token) *http.Client {
|
|||
return NewClient(ctx, c.TokenSource(ctx, t))
|
||||
}
|
||||
|
||||
// TokenSource returns a TokenSource that returns t until t expires,
|
||||
// TokenSource returns a [TokenSource] that returns t until t expires,
|
||||
// automatically refreshing it as necessary using the provided context.
|
||||
//
|
||||
// Most users will use Config.Client instead.
|
||||
// Most users will use [Config.Client] instead.
|
||||
func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {
|
||||
tkr := &tokenRefresher{
|
||||
ctx: ctx,
|
||||
|
|
@ -260,7 +260,7 @@ func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {
|
|||
}
|
||||
}
|
||||
|
||||
// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token"
|
||||
// tokenRefresher is a TokenSource that makes "grant_type=refresh_token"
|
||||
// HTTP requests to renew a token using a RefreshToken.
|
||||
type tokenRefresher struct {
|
||||
ctx context.Context // used to get HTTP requests
|
||||
|
|
@ -305,8 +305,7 @@ type reuseTokenSource struct {
|
|||
}
|
||||
|
||||
// Token returns the current token if it's still valid, else will
|
||||
// refresh the current token (using r.Context for HTTP client
|
||||
// information) and return the new one.
|
||||
// refresh the current token and return the new one.
|
||||
func (s *reuseTokenSource) Token() (*Token, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
|
@ -322,7 +321,7 @@ func (s *reuseTokenSource) Token() (*Token, error) {
|
|||
return t, nil
|
||||
}
|
||||
|
||||
// StaticTokenSource returns a TokenSource that always returns the same token.
|
||||
// StaticTokenSource returns a [TokenSource] that always returns the same token.
|
||||
// Because the provided token t is never refreshed, StaticTokenSource is only
|
||||
// useful for tokens that never expire.
|
||||
func StaticTokenSource(t *Token) TokenSource {
|
||||
|
|
@ -338,16 +337,16 @@ func (s staticTokenSource) Token() (*Token, error) {
|
|||
return s.t, nil
|
||||
}
|
||||
|
||||
// HTTPClient is the context key to use with golang.org/x/net/context's
|
||||
// WithValue function to associate an *http.Client value with a context.
|
||||
// HTTPClient is the context key to use with [context.WithValue]
|
||||
// to associate a [*http.Client] value with a context.
|
||||
var HTTPClient internal.ContextKey
|
||||
|
||||
// NewClient creates an *http.Client from a Context and TokenSource.
|
||||
// NewClient creates an [*http.Client] from a [context.Context] and [TokenSource].
|
||||
// The returned client is not valid beyond the lifetime of the context.
|
||||
//
|
||||
// Note that if a custom *http.Client is provided via the Context it
|
||||
// Note that if a custom [*http.Client] is provided via the [context.Context] it
|
||||
// is used only for token acquisition and is not used to configure the
|
||||
// *http.Client returned from NewClient.
|
||||
// [*http.Client] returned from NewClient.
|
||||
//
|
||||
// As a special case, if src is nil, a non-OAuth2 client is returned
|
||||
// using the provided context. This exists to support related OAuth2
|
||||
|
|
@ -368,7 +367,7 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client {
|
|||
}
|
||||
}
|
||||
|
||||
// ReuseTokenSource returns a TokenSource which repeatedly returns the
|
||||
// ReuseTokenSource returns a [TokenSource] which repeatedly returns the
|
||||
// same token as long as it's valid, starting with t.
|
||||
// When its cached token is invalid, a new token is obtained from src.
|
||||
//
|
||||
|
|
@ -376,10 +375,10 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client {
|
|||
// (such as a file on disk) between runs of a program, rather than
|
||||
// obtaining new tokens unnecessarily.
|
||||
//
|
||||
// The initial token t may be nil, in which case the TokenSource is
|
||||
// The initial token t may be nil, in which case the [TokenSource] is
|
||||
// wrapped in a caching version if it isn't one already. This also
|
||||
// means it's always safe to wrap ReuseTokenSource around any other
|
||||
// TokenSource without adverse effects.
|
||||
// [TokenSource] without adverse effects.
|
||||
func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
|
||||
// Don't wrap a reuseTokenSource in itself. That would work,
|
||||
// but cause an unnecessary number of mutex operations.
|
||||
|
|
@ -397,8 +396,8 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
|
|||
}
|
||||
}
|
||||
|
||||
// ReuseTokenSourceWithExpiry returns a TokenSource that acts in the same manner as the
|
||||
// TokenSource returned by ReuseTokenSource, except the expiry buffer is
|
||||
// ReuseTokenSourceWithExpiry returns a [TokenSource] that acts in the same manner as the
|
||||
// [TokenSource] returned by [ReuseTokenSource], except the expiry buffer is
|
||||
// configurable. The expiration time of a token is calculated as
|
||||
// t.Expiry.Add(-earlyExpiry).
|
||||
func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource {
|
||||
|
|
|
|||
15
vendor/golang.org/x/oauth2/pkce.go
generated
vendored
15
vendor/golang.org/x/oauth2/pkce.go
generated
vendored
|
|
@ -1,6 +1,7 @@
|
|||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package oauth2
|
||||
|
||||
import (
|
||||
|
|
@ -20,9 +21,9 @@ const (
|
|||
// This follows recommendations in RFC 7636.
|
||||
//
|
||||
// A fresh verifier should be generated for each authorization.
|
||||
// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL
|
||||
// (or Config.DeviceAuth) and VerifierOption(verifier) to Config.Exchange
|
||||
// (or Config.DeviceAccessToken).
|
||||
// The resulting verifier should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth]
|
||||
// with [S256ChallengeOption], and to [Config.Exchange] or [Config.DeviceAccessToken]
|
||||
// with [VerifierOption].
|
||||
func GenerateVerifier() string {
|
||||
// "RECOMMENDED that the output of a suitable random number generator be
|
||||
// used to create a 32-octet sequence. The octet sequence is then
|
||||
|
|
@ -36,22 +37,22 @@ func GenerateVerifier() string {
|
|||
return base64.RawURLEncoding.EncodeToString(data)
|
||||
}
|
||||
|
||||
// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be
|
||||
// passed to Config.Exchange or Config.DeviceAccessToken only.
|
||||
// VerifierOption returns a PKCE code verifier [AuthCodeOption]. It should only be
|
||||
// passed to [Config.Exchange] or [Config.DeviceAccessToken].
|
||||
func VerifierOption(verifier string) AuthCodeOption {
|
||||
return setParam{k: codeVerifierKey, v: verifier}
|
||||
}
|
||||
|
||||
// S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256.
|
||||
//
|
||||
// Prefer to use S256ChallengeOption where possible.
|
||||
// Prefer to use [S256ChallengeOption] where possible.
|
||||
func S256ChallengeFromVerifier(verifier string) string {
|
||||
sha := sha256.Sum256([]byte(verifier))
|
||||
return base64.RawURLEncoding.EncodeToString(sha[:])
|
||||
}
|
||||
|
||||
// S256ChallengeOption derives a PKCE code challenge derived from verifier with
|
||||
// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAuth
|
||||
// method S256. It should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth]
|
||||
// only.
|
||||
func S256ChallengeOption(verifier string) AuthCodeOption {
|
||||
return challengeOption{
|
||||
|
|
|
|||
15
vendor/golang.org/x/oauth2/token.go
generated
vendored
15
vendor/golang.org/x/oauth2/token.go
generated
vendored
|
|
@ -44,7 +44,7 @@ type Token struct {
|
|||
|
||||
// Expiry is the optional expiration time of the access token.
|
||||
//
|
||||
// If zero, TokenSource implementations will reuse the same
|
||||
// If zero, [TokenSource] implementations will reuse the same
|
||||
// token forever and RefreshToken or equivalent
|
||||
// mechanisms for that TokenSource will not be used.
|
||||
Expiry time.Time `json:"expiry,omitempty"`
|
||||
|
|
@ -58,7 +58,7 @@ type Token struct {
|
|||
|
||||
// raw optionally contains extra metadata from the server
|
||||
// when updating a token.
|
||||
raw interface{}
|
||||
raw any
|
||||
|
||||
// expiryDelta is used to calculate when a token is considered
|
||||
// expired, by subtracting from Expiry. If zero, defaultExpiryDelta
|
||||
|
|
@ -86,16 +86,16 @@ func (t *Token) Type() string {
|
|||
// SetAuthHeader sets the Authorization header to r using the access
|
||||
// token in t.
|
||||
//
|
||||
// This method is unnecessary when using Transport or an HTTP Client
|
||||
// This method is unnecessary when using [Transport] or an HTTP Client
|
||||
// returned by this package.
|
||||
func (t *Token) SetAuthHeader(r *http.Request) {
|
||||
r.Header.Set("Authorization", t.Type()+" "+t.AccessToken)
|
||||
}
|
||||
|
||||
// WithExtra returns a new Token that's a clone of t, but using the
|
||||
// WithExtra returns a new [Token] that's a clone of t, but using the
|
||||
// provided raw extra map. This is only intended for use by packages
|
||||
// implementing derivative OAuth2 flows.
|
||||
func (t *Token) WithExtra(extra interface{}) *Token {
|
||||
func (t *Token) WithExtra(extra any) *Token {
|
||||
t2 := new(Token)
|
||||
*t2 = *t
|
||||
t2.raw = extra
|
||||
|
|
@ -105,8 +105,8 @@ func (t *Token) WithExtra(extra interface{}) *Token {
|
|||
// Extra returns an extra field.
|
||||
// Extra fields are key-value pairs returned by the server as a
|
||||
// part of the token retrieval response.
|
||||
func (t *Token) Extra(key string) interface{} {
|
||||
if raw, ok := t.raw.(map[string]interface{}); ok {
|
||||
func (t *Token) Extra(key string) any {
|
||||
if raw, ok := t.raw.(map[string]any); ok {
|
||||
return raw[key]
|
||||
}
|
||||
|
||||
|
|
@ -163,6 +163,7 @@ func tokenFromInternal(t *internal.Token) *Token {
|
|||
TokenType: t.TokenType,
|
||||
RefreshToken: t.RefreshToken,
|
||||
Expiry: t.Expiry,
|
||||
ExpiresIn: t.ExpiresIn,
|
||||
raw: t.Raw,
|
||||
}
|
||||
}
|
||||
|
|
|
|||
24
vendor/golang.org/x/oauth2/transport.go
generated
vendored
24
vendor/golang.org/x/oauth2/transport.go
generated
vendored
|
|
@ -11,12 +11,12 @@ import (
|
|||
"sync"
|
||||
)
|
||||
|
||||
// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests,
|
||||
// wrapping a base RoundTripper and adding an Authorization header
|
||||
// with a token from the supplied Sources.
|
||||
// Transport is an [http.RoundTripper] that makes OAuth 2.0 HTTP requests,
|
||||
// wrapping a base [http.RoundTripper] and adding an Authorization header
|
||||
// with a token from the supplied [TokenSource].
|
||||
//
|
||||
// Transport is a low-level mechanism. Most code will use the
|
||||
// higher-level Config.Client method instead.
|
||||
// higher-level [Config.Client] method instead.
|
||||
type Transport struct {
|
||||
// Source supplies the token to add to outgoing requests'
|
||||
// Authorization headers.
|
||||
|
|
@ -47,7 +47,7 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
req2 := cloneRequest(req) // per RoundTripper contract
|
||||
req2 := req.Clone(req.Context())
|
||||
token.SetAuthHeader(req2)
|
||||
|
||||
// req.Body is assumed to be closed by the base RoundTripper.
|
||||
|
|
@ -73,17 +73,3 @@ func (t *Transport) base() http.RoundTripper {
|
|||
}
|
||||
return http.DefaultTransport
|
||||
}
|
||||
|
||||
// cloneRequest returns a clone of the provided *http.Request.
|
||||
// The clone is a shallow copy of the struct and its Header map.
|
||||
func cloneRequest(r *http.Request) *http.Request {
|
||||
// shallow copy of the struct
|
||||
r2 := new(http.Request)
|
||||
*r2 = *r
|
||||
// deep copy of the Header
|
||||
r2.Header = make(http.Header, len(r.Header))
|
||||
for k, s := range r.Header {
|
||||
r2.Header[k] = append([]string(nil), s...)
|
||||
}
|
||||
return r2
|
||||
}
|
||||
|
|
|
|||
107
vendor/golang.org/x/sync/errgroup/errgroup.go
generated
vendored
107
vendor/golang.org/x/sync/errgroup/errgroup.go
generated
vendored
|
|
@ -12,6 +12,8 @@ package errgroup
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
)
|
||||
|
||||
|
|
@ -31,6 +33,10 @@ type Group struct {
|
|||
|
||||
errOnce sync.Once
|
||||
err error
|
||||
|
||||
mu sync.Mutex
|
||||
panicValue any // = PanicError | PanicValue; non-nil if some Group.Go goroutine panicked.
|
||||
abnormal bool // some Group.Go goroutine terminated abnormally (panic or goexit).
|
||||
}
|
||||
|
||||
func (g *Group) done() {
|
||||
|
|
@ -50,13 +56,22 @@ func WithContext(ctx context.Context) (*Group, context.Context) {
|
|||
return &Group{cancel: cancel}, ctx
|
||||
}
|
||||
|
||||
// Wait blocks until all function calls from the Go method have returned, then
|
||||
// returns the first non-nil error (if any) from them.
|
||||
// Wait blocks until all function calls from the Go method have returned
|
||||
// normally, then returns the first non-nil error (if any) from them.
|
||||
//
|
||||
// If any of the calls panics, Wait panics with a [PanicValue];
|
||||
// and if any of them calls [runtime.Goexit], Wait calls runtime.Goexit.
|
||||
func (g *Group) Wait() error {
|
||||
g.wg.Wait()
|
||||
if g.cancel != nil {
|
||||
g.cancel(g.err)
|
||||
}
|
||||
if g.panicValue != nil {
|
||||
panic(g.panicValue)
|
||||
}
|
||||
if g.abnormal {
|
||||
runtime.Goexit()
|
||||
}
|
||||
return g.err
|
||||
}
|
||||
|
||||
|
|
@ -65,18 +80,56 @@ func (g *Group) Wait() error {
|
|||
// It blocks until the new goroutine can be added without the number of
|
||||
// active goroutines in the group exceeding the configured limit.
|
||||
//
|
||||
// The first call to return a non-nil error cancels the group's context, if the
|
||||
// group was created by calling WithContext. The error will be returned by Wait.
|
||||
// It blocks until the new goroutine can be added without the number of
|
||||
// goroutines in the group exceeding the configured limit.
|
||||
//
|
||||
// The first goroutine in the group that returns a non-nil error, panics, or
|
||||
// invokes [runtime.Goexit] will cancel the associated Context, if any.
|
||||
func (g *Group) Go(f func() error) {
|
||||
if g.sem != nil {
|
||||
g.sem <- token{}
|
||||
}
|
||||
|
||||
g.add(f)
|
||||
}
|
||||
|
||||
func (g *Group) add(f func() error) {
|
||||
g.wg.Add(1)
|
||||
go func() {
|
||||
defer g.done()
|
||||
normalReturn := false
|
||||
defer func() {
|
||||
if normalReturn {
|
||||
return
|
||||
}
|
||||
v := recover()
|
||||
g.mu.Lock()
|
||||
defer g.mu.Unlock()
|
||||
if !g.abnormal {
|
||||
if g.cancel != nil {
|
||||
g.cancel(g.err)
|
||||
}
|
||||
g.abnormal = true
|
||||
}
|
||||
if v != nil && g.panicValue == nil {
|
||||
switch v := v.(type) {
|
||||
case error:
|
||||
g.panicValue = PanicError{
|
||||
Recovered: v,
|
||||
Stack: debug.Stack(),
|
||||
}
|
||||
default:
|
||||
g.panicValue = PanicValue{
|
||||
Recovered: v,
|
||||
Stack: debug.Stack(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if err := f(); err != nil {
|
||||
err := f()
|
||||
normalReturn = true
|
||||
if err != nil {
|
||||
g.errOnce.Do(func() {
|
||||
g.err = err
|
||||
if g.cancel != nil {
|
||||
|
|
@ -101,19 +154,7 @@ func (g *Group) TryGo(f func() error) bool {
|
|||
}
|
||||
}
|
||||
|
||||
g.wg.Add(1)
|
||||
go func() {
|
||||
defer g.done()
|
||||
|
||||
if err := f(); err != nil {
|
||||
g.errOnce.Do(func() {
|
||||
g.err = err
|
||||
if g.cancel != nil {
|
||||
g.cancel(g.err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}()
|
||||
g.add(f)
|
||||
return true
|
||||
}
|
||||
|
||||
|
|
@ -135,3 +176,33 @@ func (g *Group) SetLimit(n int) {
|
|||
}
|
||||
g.sem = make(chan token, n)
|
||||
}
|
||||
|
||||
// PanicError wraps an error recovered from an unhandled panic
|
||||
// when calling a function passed to Go or TryGo.
|
||||
type PanicError struct {
|
||||
Recovered error
|
||||
Stack []byte // result of call to [debug.Stack]
|
||||
}
|
||||
|
||||
func (p PanicError) Error() string {
|
||||
// A Go Error method conventionally does not include a stack dump, so omit it
|
||||
// here. (Callers who care can extract it from the Stack field.)
|
||||
return fmt.Sprintf("recovered from errgroup.Group: %v", p.Recovered)
|
||||
}
|
||||
|
||||
func (p PanicError) Unwrap() error { return p.Recovered }
|
||||
|
||||
// PanicValue wraps a value that does not implement the error interface,
|
||||
// recovered from an unhandled panic when calling a function passed to Go or
|
||||
// TryGo.
|
||||
type PanicValue struct {
|
||||
Recovered any
|
||||
Stack []byte // result of call to [debug.Stack]
|
||||
}
|
||||
|
||||
func (p PanicValue) String() string {
|
||||
if len(p.Stack) > 0 {
|
||||
return fmt.Sprintf("recovered from errgroup.Group: %v\n%s", p.Recovered, p.Stack)
|
||||
}
|
||||
return fmt.Sprintf("recovered from errgroup.Group: %v", p.Recovered)
|
||||
}
|
||||
|
|
|
|||
11
vendor/golang.org/x/sys/cpu/cpu.go
generated
vendored
11
vendor/golang.org/x/sys/cpu/cpu.go
generated
vendored
|
|
@ -232,6 +232,17 @@ var RISCV64 struct {
|
|||
HasZba bool // Address generation instructions extension
|
||||
HasZbb bool // Basic bit-manipulation extension
|
||||
HasZbs bool // Single-bit instructions extension
|
||||
HasZvbb bool // Vector Basic Bit-manipulation
|
||||
HasZvbc bool // Vector Carryless Multiplication
|
||||
HasZvkb bool // Vector Cryptography Bit-manipulation
|
||||
HasZvkt bool // Vector Data-Independent Execution Latency
|
||||
HasZvkg bool // Vector GCM/GMAC
|
||||
HasZvkn bool // NIST Algorithm Suite (AES/SHA256/SHA512)
|
||||
HasZvknc bool // NIST Algorithm Suite with carryless multiply
|
||||
HasZvkng bool // NIST Algorithm Suite with GCM
|
||||
HasZvks bool // ShangMi Algorithm Suite
|
||||
HasZvksc bool // ShangMi Algorithm Suite with carryless multiplication
|
||||
HasZvksg bool // ShangMi Algorithm Suite with GCM
|
||||
_ CacheLinePad
|
||||
}
|
||||
|
||||
|
|
|
|||
23
vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go
generated
vendored
23
vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go
generated
vendored
|
|
@ -58,6 +58,15 @@ const (
|
|||
riscv_HWPROBE_EXT_ZBA = 0x8
|
||||
riscv_HWPROBE_EXT_ZBB = 0x10
|
||||
riscv_HWPROBE_EXT_ZBS = 0x20
|
||||
riscv_HWPROBE_EXT_ZVBB = 0x20000
|
||||
riscv_HWPROBE_EXT_ZVBC = 0x40000
|
||||
riscv_HWPROBE_EXT_ZVKB = 0x80000
|
||||
riscv_HWPROBE_EXT_ZVKG = 0x100000
|
||||
riscv_HWPROBE_EXT_ZVKNED = 0x200000
|
||||
riscv_HWPROBE_EXT_ZVKNHB = 0x800000
|
||||
riscv_HWPROBE_EXT_ZVKSED = 0x1000000
|
||||
riscv_HWPROBE_EXT_ZVKSH = 0x2000000
|
||||
riscv_HWPROBE_EXT_ZVKT = 0x4000000
|
||||
riscv_HWPROBE_KEY_CPUPERF_0 = 0x5
|
||||
riscv_HWPROBE_MISALIGNED_FAST = 0x3
|
||||
riscv_HWPROBE_MISALIGNED_MASK = 0x7
|
||||
|
|
@ -99,6 +108,20 @@ func doinit() {
|
|||
RISCV64.HasZba = isSet(v, riscv_HWPROBE_EXT_ZBA)
|
||||
RISCV64.HasZbb = isSet(v, riscv_HWPROBE_EXT_ZBB)
|
||||
RISCV64.HasZbs = isSet(v, riscv_HWPROBE_EXT_ZBS)
|
||||
RISCV64.HasZvbb = isSet(v, riscv_HWPROBE_EXT_ZVBB)
|
||||
RISCV64.HasZvbc = isSet(v, riscv_HWPROBE_EXT_ZVBC)
|
||||
RISCV64.HasZvkb = isSet(v, riscv_HWPROBE_EXT_ZVKB)
|
||||
RISCV64.HasZvkg = isSet(v, riscv_HWPROBE_EXT_ZVKG)
|
||||
RISCV64.HasZvkt = isSet(v, riscv_HWPROBE_EXT_ZVKT)
|
||||
// Cryptography shorthand extensions
|
||||
RISCV64.HasZvkn = isSet(v, riscv_HWPROBE_EXT_ZVKNED) &&
|
||||
isSet(v, riscv_HWPROBE_EXT_ZVKNHB) && RISCV64.HasZvkb && RISCV64.HasZvkt
|
||||
RISCV64.HasZvknc = RISCV64.HasZvkn && RISCV64.HasZvbc
|
||||
RISCV64.HasZvkng = RISCV64.HasZvkn && RISCV64.HasZvkg
|
||||
RISCV64.HasZvks = isSet(v, riscv_HWPROBE_EXT_ZVKSED) &&
|
||||
isSet(v, riscv_HWPROBE_EXT_ZVKSH) && RISCV64.HasZvkb && RISCV64.HasZvkt
|
||||
RISCV64.HasZvksc = RISCV64.HasZvks && RISCV64.HasZvbc
|
||||
RISCV64.HasZvksg = RISCV64.HasZvks && RISCV64.HasZvkg
|
||||
}
|
||||
if pairs[1].key != -1 {
|
||||
v := pairs[1].value & riscv_HWPROBE_MISALIGNED_MASK
|
||||
|
|
|
|||
12
vendor/golang.org/x/sys/cpu/cpu_riscv64.go
generated
vendored
12
vendor/golang.org/x/sys/cpu/cpu_riscv64.go
generated
vendored
|
|
@ -16,5 +16,17 @@ func initOptions() {
|
|||
{Name: "zba", Feature: &RISCV64.HasZba},
|
||||
{Name: "zbb", Feature: &RISCV64.HasZbb},
|
||||
{Name: "zbs", Feature: &RISCV64.HasZbs},
|
||||
// RISC-V Cryptography Extensions
|
||||
{Name: "zvbb", Feature: &RISCV64.HasZvbb},
|
||||
{Name: "zvbc", Feature: &RISCV64.HasZvbc},
|
||||
{Name: "zvkb", Feature: &RISCV64.HasZvkb},
|
||||
{Name: "zvkg", Feature: &RISCV64.HasZvkg},
|
||||
{Name: "zvkt", Feature: &RISCV64.HasZvkt},
|
||||
{Name: "zvkn", Feature: &RISCV64.HasZvkn},
|
||||
{Name: "zvknc", Feature: &RISCV64.HasZvknc},
|
||||
{Name: "zvkng", Feature: &RISCV64.HasZvkng},
|
||||
{Name: "zvks", Feature: &RISCV64.HasZvks},
|
||||
{Name: "zvksc", Feature: &RISCV64.HasZvksc},
|
||||
{Name: "zvksg", Feature: &RISCV64.HasZvksg},
|
||||
}
|
||||
}
|
||||
|
|
|
|||
49
vendor/golang.org/x/sys/windows/security_windows.go
generated
vendored
49
vendor/golang.org/x/sys/windows/security_windows.go
generated
vendored
|
|
@ -1303,7 +1303,10 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DE
|
|||
return nil, err
|
||||
}
|
||||
if absoluteSDSize > 0 {
|
||||
absoluteSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, absoluteSDSize)[0]))
|
||||
absoluteSD = new(SECURITY_DESCRIPTOR)
|
||||
if unsafe.Sizeof(*absoluteSD) < uintptr(absoluteSDSize) {
|
||||
panic("sizeof(SECURITY_DESCRIPTOR) too small")
|
||||
}
|
||||
}
|
||||
var (
|
||||
dacl *ACL
|
||||
|
|
@ -1312,19 +1315,55 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DE
|
|||
group *SID
|
||||
)
|
||||
if daclSize > 0 {
|
||||
dacl = (*ACL)(unsafe.Pointer(&make([]byte, daclSize)[0]))
|
||||
dacl = (*ACL)(unsafe.Pointer(unsafe.SliceData(make([]byte, daclSize))))
|
||||
}
|
||||
if saclSize > 0 {
|
||||
sacl = (*ACL)(unsafe.Pointer(&make([]byte, saclSize)[0]))
|
||||
sacl = (*ACL)(unsafe.Pointer(unsafe.SliceData(make([]byte, saclSize))))
|
||||
}
|
||||
if ownerSize > 0 {
|
||||
owner = (*SID)(unsafe.Pointer(&make([]byte, ownerSize)[0]))
|
||||
owner = (*SID)(unsafe.Pointer(unsafe.SliceData(make([]byte, ownerSize))))
|
||||
}
|
||||
if groupSize > 0 {
|
||||
group = (*SID)(unsafe.Pointer(&make([]byte, groupSize)[0]))
|
||||
group = (*SID)(unsafe.Pointer(unsafe.SliceData(make([]byte, groupSize))))
|
||||
}
|
||||
// We call into Windows via makeAbsoluteSD, which sets up
|
||||
// pointers within absoluteSD that point to other chunks of memory
|
||||
// we pass into makeAbsoluteSD, and that happens outside the view of the GC.
|
||||
// We therefore take some care here to then verify the pointers are as we expect
|
||||
// and set them explicitly in view of the GC. See https://go.dev/issue/73199.
|
||||
// TODO: consider weak pointers once Go 1.24 is appropriate. See suggestion in https://go.dev/cl/663575.
|
||||
err = makeAbsoluteSD(selfRelativeSD, absoluteSD, &absoluteSDSize,
|
||||
dacl, &daclSize, sacl, &saclSize, owner, &ownerSize, group, &groupSize)
|
||||
if err != nil {
|
||||
// Don't return absoluteSD, which might be partially initialized.
|
||||
return nil, err
|
||||
}
|
||||
// Before using any fields, verify absoluteSD is in the format we expect according to Windows.
|
||||
// See https://learn.microsoft.com/en-us/windows/win32/secauthz/absolute-and-self-relative-security-descriptors
|
||||
absControl, _, err := absoluteSD.Control()
|
||||
if err != nil {
|
||||
panic("absoluteSD: " + err.Error())
|
||||
}
|
||||
if absControl&SE_SELF_RELATIVE != 0 {
|
||||
panic("absoluteSD not in absolute format")
|
||||
}
|
||||
if absoluteSD.dacl != dacl {
|
||||
panic("dacl pointer mismatch")
|
||||
}
|
||||
if absoluteSD.sacl != sacl {
|
||||
panic("sacl pointer mismatch")
|
||||
}
|
||||
if absoluteSD.owner != owner {
|
||||
panic("owner pointer mismatch")
|
||||
}
|
||||
if absoluteSD.group != group {
|
||||
panic("group pointer mismatch")
|
||||
}
|
||||
absoluteSD.dacl = dacl
|
||||
absoluteSD.sacl = sacl
|
||||
absoluteSD.owner = owner
|
||||
absoluteSD.group = group
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
6
vendor/golang.org/x/sys/windows/syscall_windows.go
generated
vendored
6
vendor/golang.org/x/sys/windows/syscall_windows.go
generated
vendored
|
|
@ -870,6 +870,7 @@ const socket_error = uintptr(^uint32(0))
|
|||
//sys WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecvFrom
|
||||
//sys WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASendTo
|
||||
//sys WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.WSASocketW
|
||||
//sys WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) [failretval!=0] = ws2_32.WSADuplicateSocketW
|
||||
//sys GetHostByName(name string) (h *Hostent, err error) [failretval==nil] = ws2_32.gethostbyname
|
||||
//sys GetServByName(name string, proto string) (s *Servent, err error) [failretval==nil] = ws2_32.getservbyname
|
||||
//sys Ntohs(netshort uint16) (u uint16) = ws2_32.ntohs
|
||||
|
|
@ -1698,8 +1699,9 @@ func NewNTUnicodeString(s string) (*NTUnicodeString, error) {
|
|||
|
||||
// Slice returns a uint16 slice that aliases the data in the NTUnicodeString.
|
||||
func (s *NTUnicodeString) Slice() []uint16 {
|
||||
slice := unsafe.Slice(s.Buffer, s.MaximumLength)
|
||||
return slice[:s.Length]
|
||||
// Note: this rounds the length down, if it happens
|
||||
// to (incorrectly) be odd. Probably safer than rounding up.
|
||||
return unsafe.Slice(s.Buffer, s.MaximumLength/2)[:s.Length/2]
|
||||
}
|
||||
|
||||
func (s *NTUnicodeString) String() string {
|
||||
|
|
|
|||
212
vendor/golang.org/x/sys/windows/types_windows.go
generated
vendored
212
vendor/golang.org/x/sys/windows/types_windows.go
generated
vendored
|
|
@ -2700,6 +2700,8 @@ type CommTimeouts struct {
|
|||
|
||||
// NTUnicodeString is a UTF-16 string for NT native APIs, corresponding to UNICODE_STRING.
|
||||
type NTUnicodeString struct {
|
||||
// Note: Length and MaximumLength are in *bytes*, not uint16s.
|
||||
// They should always be even.
|
||||
Length uint16
|
||||
MaximumLength uint16
|
||||
Buffer *uint16
|
||||
|
|
@ -3628,3 +3630,213 @@ const (
|
|||
KLF_NOTELLSHELL = 0x00000080
|
||||
KLF_SETFORPROCESS = 0x00000100
|
||||
)
|
||||
|
||||
// Virtual Key codes
|
||||
// https://docs.microsoft.com/en-us/windows/win32/inputdev/virtual-key-codes
|
||||
const (
|
||||
VK_LBUTTON = 0x01
|
||||
VK_RBUTTON = 0x02
|
||||
VK_CANCEL = 0x03
|
||||
VK_MBUTTON = 0x04
|
||||
VK_XBUTTON1 = 0x05
|
||||
VK_XBUTTON2 = 0x06
|
||||
VK_BACK = 0x08
|
||||
VK_TAB = 0x09
|
||||
VK_CLEAR = 0x0C
|
||||
VK_RETURN = 0x0D
|
||||
VK_SHIFT = 0x10
|
||||
VK_CONTROL = 0x11
|
||||
VK_MENU = 0x12
|
||||
VK_PAUSE = 0x13
|
||||
VK_CAPITAL = 0x14
|
||||
VK_KANA = 0x15
|
||||
VK_HANGEUL = 0x15
|
||||
VK_HANGUL = 0x15
|
||||
VK_IME_ON = 0x16
|
||||
VK_JUNJA = 0x17
|
||||
VK_FINAL = 0x18
|
||||
VK_HANJA = 0x19
|
||||
VK_KANJI = 0x19
|
||||
VK_IME_OFF = 0x1A
|
||||
VK_ESCAPE = 0x1B
|
||||
VK_CONVERT = 0x1C
|
||||
VK_NONCONVERT = 0x1D
|
||||
VK_ACCEPT = 0x1E
|
||||
VK_MODECHANGE = 0x1F
|
||||
VK_SPACE = 0x20
|
||||
VK_PRIOR = 0x21
|
||||
VK_NEXT = 0x22
|
||||
VK_END = 0x23
|
||||
VK_HOME = 0x24
|
||||
VK_LEFT = 0x25
|
||||
VK_UP = 0x26
|
||||
VK_RIGHT = 0x27
|
||||
VK_DOWN = 0x28
|
||||
VK_SELECT = 0x29
|
||||
VK_PRINT = 0x2A
|
||||
VK_EXECUTE = 0x2B
|
||||
VK_SNAPSHOT = 0x2C
|
||||
VK_INSERT = 0x2D
|
||||
VK_DELETE = 0x2E
|
||||
VK_HELP = 0x2F
|
||||
VK_LWIN = 0x5B
|
||||
VK_RWIN = 0x5C
|
||||
VK_APPS = 0x5D
|
||||
VK_SLEEP = 0x5F
|
||||
VK_NUMPAD0 = 0x60
|
||||
VK_NUMPAD1 = 0x61
|
||||
VK_NUMPAD2 = 0x62
|
||||
VK_NUMPAD3 = 0x63
|
||||
VK_NUMPAD4 = 0x64
|
||||
VK_NUMPAD5 = 0x65
|
||||
VK_NUMPAD6 = 0x66
|
||||
VK_NUMPAD7 = 0x67
|
||||
VK_NUMPAD8 = 0x68
|
||||
VK_NUMPAD9 = 0x69
|
||||
VK_MULTIPLY = 0x6A
|
||||
VK_ADD = 0x6B
|
||||
VK_SEPARATOR = 0x6C
|
||||
VK_SUBTRACT = 0x6D
|
||||
VK_DECIMAL = 0x6E
|
||||
VK_DIVIDE = 0x6F
|
||||
VK_F1 = 0x70
|
||||
VK_F2 = 0x71
|
||||
VK_F3 = 0x72
|
||||
VK_F4 = 0x73
|
||||
VK_F5 = 0x74
|
||||
VK_F6 = 0x75
|
||||
VK_F7 = 0x76
|
||||
VK_F8 = 0x77
|
||||
VK_F9 = 0x78
|
||||
VK_F10 = 0x79
|
||||
VK_F11 = 0x7A
|
||||
VK_F12 = 0x7B
|
||||
VK_F13 = 0x7C
|
||||
VK_F14 = 0x7D
|
||||
VK_F15 = 0x7E
|
||||
VK_F16 = 0x7F
|
||||
VK_F17 = 0x80
|
||||
VK_F18 = 0x81
|
||||
VK_F19 = 0x82
|
||||
VK_F20 = 0x83
|
||||
VK_F21 = 0x84
|
||||
VK_F22 = 0x85
|
||||
VK_F23 = 0x86
|
||||
VK_F24 = 0x87
|
||||
VK_NUMLOCK = 0x90
|
||||
VK_SCROLL = 0x91
|
||||
VK_OEM_NEC_EQUAL = 0x92
|
||||
VK_OEM_FJ_JISHO = 0x92
|
||||
VK_OEM_FJ_MASSHOU = 0x93
|
||||
VK_OEM_FJ_TOUROKU = 0x94
|
||||
VK_OEM_FJ_LOYA = 0x95
|
||||
VK_OEM_FJ_ROYA = 0x96
|
||||
VK_LSHIFT = 0xA0
|
||||
VK_RSHIFT = 0xA1
|
||||
VK_LCONTROL = 0xA2
|
||||
VK_RCONTROL = 0xA3
|
||||
VK_LMENU = 0xA4
|
||||
VK_RMENU = 0xA5
|
||||
VK_BROWSER_BACK = 0xA6
|
||||
VK_BROWSER_FORWARD = 0xA7
|
||||
VK_BROWSER_REFRESH = 0xA8
|
||||
VK_BROWSER_STOP = 0xA9
|
||||
VK_BROWSER_SEARCH = 0xAA
|
||||
VK_BROWSER_FAVORITES = 0xAB
|
||||
VK_BROWSER_HOME = 0xAC
|
||||
VK_VOLUME_MUTE = 0xAD
|
||||
VK_VOLUME_DOWN = 0xAE
|
||||
VK_VOLUME_UP = 0xAF
|
||||
VK_MEDIA_NEXT_TRACK = 0xB0
|
||||
VK_MEDIA_PREV_TRACK = 0xB1
|
||||
VK_MEDIA_STOP = 0xB2
|
||||
VK_MEDIA_PLAY_PAUSE = 0xB3
|
||||
VK_LAUNCH_MAIL = 0xB4
|
||||
VK_LAUNCH_MEDIA_SELECT = 0xB5
|
||||
VK_LAUNCH_APP1 = 0xB6
|
||||
VK_LAUNCH_APP2 = 0xB7
|
||||
VK_OEM_1 = 0xBA
|
||||
VK_OEM_PLUS = 0xBB
|
||||
VK_OEM_COMMA = 0xBC
|
||||
VK_OEM_MINUS = 0xBD
|
||||
VK_OEM_PERIOD = 0xBE
|
||||
VK_OEM_2 = 0xBF
|
||||
VK_OEM_3 = 0xC0
|
||||
VK_OEM_4 = 0xDB
|
||||
VK_OEM_5 = 0xDC
|
||||
VK_OEM_6 = 0xDD
|
||||
VK_OEM_7 = 0xDE
|
||||
VK_OEM_8 = 0xDF
|
||||
VK_OEM_AX = 0xE1
|
||||
VK_OEM_102 = 0xE2
|
||||
VK_ICO_HELP = 0xE3
|
||||
VK_ICO_00 = 0xE4
|
||||
VK_PROCESSKEY = 0xE5
|
||||
VK_ICO_CLEAR = 0xE6
|
||||
VK_OEM_RESET = 0xE9
|
||||
VK_OEM_JUMP = 0xEA
|
||||
VK_OEM_PA1 = 0xEB
|
||||
VK_OEM_PA2 = 0xEC
|
||||
VK_OEM_PA3 = 0xED
|
||||
VK_OEM_WSCTRL = 0xEE
|
||||
VK_OEM_CUSEL = 0xEF
|
||||
VK_OEM_ATTN = 0xF0
|
||||
VK_OEM_FINISH = 0xF1
|
||||
VK_OEM_COPY = 0xF2
|
||||
VK_OEM_AUTO = 0xF3
|
||||
VK_OEM_ENLW = 0xF4
|
||||
VK_OEM_BACKTAB = 0xF5
|
||||
VK_ATTN = 0xF6
|
||||
VK_CRSEL = 0xF7
|
||||
VK_EXSEL = 0xF8
|
||||
VK_EREOF = 0xF9
|
||||
VK_PLAY = 0xFA
|
||||
VK_ZOOM = 0xFB
|
||||
VK_NONAME = 0xFC
|
||||
VK_PA1 = 0xFD
|
||||
VK_OEM_CLEAR = 0xFE
|
||||
)
|
||||
|
||||
// Mouse button constants.
|
||||
// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str
|
||||
const (
|
||||
FROM_LEFT_1ST_BUTTON_PRESSED = 0x0001
|
||||
RIGHTMOST_BUTTON_PRESSED = 0x0002
|
||||
FROM_LEFT_2ND_BUTTON_PRESSED = 0x0004
|
||||
FROM_LEFT_3RD_BUTTON_PRESSED = 0x0008
|
||||
FROM_LEFT_4TH_BUTTON_PRESSED = 0x0010
|
||||
)
|
||||
|
||||
// Control key state constaints.
|
||||
// https://docs.microsoft.com/en-us/windows/console/key-event-record-str
|
||||
// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str
|
||||
const (
|
||||
CAPSLOCK_ON = 0x0080
|
||||
ENHANCED_KEY = 0x0100
|
||||
LEFT_ALT_PRESSED = 0x0002
|
||||
LEFT_CTRL_PRESSED = 0x0008
|
||||
NUMLOCK_ON = 0x0020
|
||||
RIGHT_ALT_PRESSED = 0x0001
|
||||
RIGHT_CTRL_PRESSED = 0x0004
|
||||
SCROLLLOCK_ON = 0x0040
|
||||
SHIFT_PRESSED = 0x0010
|
||||
)
|
||||
|
||||
// Mouse event record event flags.
|
||||
// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str
|
||||
const (
|
||||
MOUSE_MOVED = 0x0001
|
||||
DOUBLE_CLICK = 0x0002
|
||||
MOUSE_WHEELED = 0x0004
|
||||
MOUSE_HWHEELED = 0x0008
|
||||
)
|
||||
|
||||
// Input Record Event Types
|
||||
// https://learn.microsoft.com/en-us/windows/console/input-record-str
|
||||
const (
|
||||
FOCUS_EVENT = 0x0010
|
||||
KEY_EVENT = 0x0001
|
||||
MENU_EVENT = 0x0008
|
||||
MOUSE_EVENT = 0x0002
|
||||
WINDOW_BUFFER_SIZE_EVENT = 0x0004
|
||||
)
|
||||
|
|
|
|||
9
vendor/golang.org/x/sys/windows/zsyscall_windows.go
generated
vendored
9
vendor/golang.org/x/sys/windows/zsyscall_windows.go
generated
vendored
|
|
@ -511,6 +511,7 @@ var (
|
|||
procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW")
|
||||
procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW")
|
||||
procWSACleanup = modws2_32.NewProc("WSACleanup")
|
||||
procWSADuplicateSocketW = modws2_32.NewProc("WSADuplicateSocketW")
|
||||
procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW")
|
||||
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
|
||||
procWSAIoctl = modws2_32.NewProc("WSAIoctl")
|
||||
|
|
@ -4391,6 +4392,14 @@ func WSACleanup() (err error) {
|
|||
return
|
||||
}
|
||||
|
||||
func WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procWSADuplicateSocketW.Addr(), 3, uintptr(s), uintptr(processID), uintptr(unsafe.Pointer(info)))
|
||||
if r1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) {
|
||||
r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength)))
|
||||
n = int32(r0)
|
||||
|
|
|
|||
15
vendor/gorm.io/gorm/.golangci.yml
generated
vendored
15
vendor/gorm.io/gorm/.golangci.yml
generated
vendored
|
|
@ -1,7 +1,9 @@
|
|||
version: "2"
|
||||
|
||||
linters:
|
||||
default: standard
|
||||
enable:
|
||||
- cyclop
|
||||
- exportloopref
|
||||
- gocritic
|
||||
- gosec
|
||||
- ineffassign
|
||||
|
|
@ -9,12 +11,9 @@ linters:
|
|||
- prealloc
|
||||
- unconvert
|
||||
- unparam
|
||||
- goimports
|
||||
- whitespace
|
||||
|
||||
linters-settings:
|
||||
whitespace:
|
||||
multi-func: true
|
||||
goimports:
|
||||
local-prefixes: gorm.io/gorm
|
||||
|
||||
formatters:
|
||||
enable:
|
||||
- gofumpt
|
||||
- goimports
|
||||
|
|
|
|||
128
vendor/gorm.io/gorm/CODE_OF_CONDUCT.md
generated
vendored
Normal file
128
vendor/gorm.io/gorm/CODE_OF_CONDUCT.md
generated
vendored
Normal file
|
|
@ -0,0 +1,128 @@
|
|||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to participate in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, religion, or sexual identity
|
||||
and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community includes:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series
|
||||
of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period. This
|
||||
includes avoiding interactions in community spaces and external channels
|
||||
like social media. Violating these terms may lead to a temporary or
|
||||
permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any interaction or public
|
||||
communication with the community for a specified period. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within
|
||||
the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.0, available at
|
||||
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
||||
enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
https://www.contributor-covenant.org/faq. Translations are available at
|
||||
https://www.contributor-covenant.org/translations.
|
||||
2
vendor/gorm.io/gorm/LICENSE
generated
vendored
2
vendor/gorm.io/gorm/LICENSE
generated
vendored
|
|
@ -1,6 +1,6 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013-NOW Jinzhu <wosmvp@gmail.com>
|
||||
Copyright (c) 2013-present Jinzhu <wosmvp@gmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
10
vendor/gorm.io/gorm/callbacks/associations.go
generated
vendored
10
vendor/gorm.io/gorm/callbacks/associations.go
generated
vendored
|
|
@ -47,7 +47,7 @@ func SaveBeforeAssociations(create bool) func(db *gorm.DB) {
|
|||
)
|
||||
|
||||
if !isPtr {
|
||||
fieldType = reflect.PtrTo(fieldType)
|
||||
fieldType = reflect.PointerTo(fieldType)
|
||||
}
|
||||
|
||||
elems := reflect.MakeSlice(reflect.SliceOf(fieldType), 0, 10)
|
||||
|
|
@ -126,7 +126,7 @@ func SaveAfterAssociations(create bool) func(db *gorm.DB) {
|
|||
)
|
||||
|
||||
if !isPtr {
|
||||
fieldType = reflect.PtrTo(fieldType)
|
||||
fieldType = reflect.PointerTo(fieldType)
|
||||
}
|
||||
|
||||
elems := reflect.MakeSlice(reflect.SliceOf(fieldType), 0, 10)
|
||||
|
|
@ -195,7 +195,7 @@ func SaveAfterAssociations(create bool) func(db *gorm.DB) {
|
|||
fieldType := rel.Field.IndirectFieldType.Elem()
|
||||
isPtr := fieldType.Kind() == reflect.Ptr
|
||||
if !isPtr {
|
||||
fieldType = reflect.PtrTo(fieldType)
|
||||
fieldType = reflect.PointerTo(fieldType)
|
||||
}
|
||||
elems := reflect.MakeSlice(reflect.SliceOf(fieldType), 0, 10)
|
||||
identityMap := map[string]bool{}
|
||||
|
|
@ -268,11 +268,11 @@ func SaveAfterAssociations(create bool) func(db *gorm.DB) {
|
|||
fieldType := rel.Field.IndirectFieldType.Elem()
|
||||
isPtr := fieldType.Kind() == reflect.Ptr
|
||||
if !isPtr {
|
||||
fieldType = reflect.PtrTo(fieldType)
|
||||
fieldType = reflect.PointerTo(fieldType)
|
||||
}
|
||||
elems := reflect.MakeSlice(reflect.SliceOf(fieldType), 0, 10)
|
||||
distinctElems := reflect.MakeSlice(reflect.SliceOf(fieldType), 0, 10)
|
||||
joins := reflect.MakeSlice(reflect.SliceOf(reflect.PtrTo(rel.JoinTable.ModelType)), 0, 10)
|
||||
joins := reflect.MakeSlice(reflect.SliceOf(reflect.PointerTo(rel.JoinTable.ModelType)), 0, 10)
|
||||
objs := []reflect.Value{}
|
||||
|
||||
appendToJoins := func(obj reflect.Value, elem reflect.Value) {
|
||||
|
|
|
|||
9
vendor/gorm.io/gorm/clause/returning.go
generated
vendored
9
vendor/gorm.io/gorm/clause/returning.go
generated
vendored
|
|
@ -26,9 +26,12 @@ func (returning Returning) Build(builder Builder) {
|
|||
|
||||
// MergeClause merge order by clauses
|
||||
func (returning Returning) MergeClause(clause *Clause) {
|
||||
if v, ok := clause.Expression.(Returning); ok {
|
||||
returning.Columns = append(v.Columns, returning.Columns...)
|
||||
if v, ok := clause.Expression.(Returning); ok && len(returning.Columns) > 0 {
|
||||
if v.Columns != nil {
|
||||
returning.Columns = append(v.Columns, returning.Columns...)
|
||||
} else {
|
||||
returning.Columns = nil
|
||||
}
|
||||
}
|
||||
|
||||
clause.Expression = returning
|
||||
}
|
||||
|
|
|
|||
6
vendor/gorm.io/gorm/finisher_api.go
generated
vendored
6
vendor/gorm.io/gorm/finisher_api.go
generated
vendored
|
|
@ -4,6 +4,7 @@ import (
|
|||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/maphash"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
|
|
@ -623,14 +624,15 @@ func (db *DB) Transaction(fc func(tx *DB) error, opts ...*sql.TxOptions) (err er
|
|||
if committer, ok := db.Statement.ConnPool.(TxCommitter); ok && committer != nil {
|
||||
// nested transaction
|
||||
if !db.DisableNestedTransaction {
|
||||
err = db.SavePoint(fmt.Sprintf("sp%p", fc)).Error
|
||||
spID := new(maphash.Hash).Sum64()
|
||||
err = db.SavePoint(fmt.Sprintf("sp%d", spID)).Error
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
// Make sure to rollback when panic, Block error or Commit error
|
||||
if panicked || err != nil {
|
||||
db.RollbackTo(fmt.Sprintf("sp%p", fc))
|
||||
db.RollbackTo(fmt.Sprintf("sp%d", spID))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
|
|
|||
18
vendor/gorm.io/gorm/gorm.go
generated
vendored
18
vendor/gorm.io/gorm/gorm.go
generated
vendored
|
|
@ -34,6 +34,11 @@ type Config struct {
|
|||
DryRun bool
|
||||
// PrepareStmt executes the given query in cached statement
|
||||
PrepareStmt bool
|
||||
// PrepareStmt cache support LRU expired,
|
||||
// default maxsize=int64 Max value and ttl=1h
|
||||
PrepareStmtMaxSize int
|
||||
PrepareStmtTTL time.Duration
|
||||
|
||||
// DisableAutomaticPing
|
||||
DisableAutomaticPing bool
|
||||
// DisableForeignKeyConstraintWhenMigrating
|
||||
|
|
@ -105,6 +110,8 @@ type DB struct {
|
|||
type Session struct {
|
||||
DryRun bool
|
||||
PrepareStmt bool
|
||||
PrepareStmtMaxSize int
|
||||
PrepareStmtTTL time.Duration
|
||||
NewDB bool
|
||||
Initialized bool
|
||||
SkipHooks bool
|
||||
|
|
@ -183,16 +190,21 @@ func Open(dialector Dialector, opts ...Option) (db *DB, err error) {
|
|||
|
||||
if config.Dialector != nil {
|
||||
err = config.Dialector.Initialize(db)
|
||||
|
||||
if err != nil {
|
||||
if db, _ := db.DB(); db != nil {
|
||||
_ = db.Close()
|
||||
}
|
||||
}
|
||||
|
||||
if config.TranslateError {
|
||||
if _, ok := db.Dialector.(ErrorTranslator); !ok {
|
||||
config.Logger.Warn(context.Background(), "The TranslateError option is enabled, but the Dialector %s does not implement ErrorTranslator.", db.Dialector.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if config.PrepareStmt {
|
||||
preparedStmt := NewPreparedStmtDB(db.ConnPool)
|
||||
preparedStmt := NewPreparedStmtDB(db.ConnPool, config.PrepareStmtMaxSize, config.PrepareStmtTTL)
|
||||
db.cacheStore.Store(preparedStmtDBKey, preparedStmt)
|
||||
db.ConnPool = preparedStmt
|
||||
}
|
||||
|
|
@ -263,7 +275,7 @@ func (db *DB) Session(config *Session) *DB {
|
|||
if v, ok := db.cacheStore.Load(preparedStmtDBKey); ok {
|
||||
preparedStmt = v.(*PreparedStmtDB)
|
||||
} else {
|
||||
preparedStmt = NewPreparedStmtDB(db.ConnPool)
|
||||
preparedStmt = NewPreparedStmtDB(db.ConnPool, config.PrepareStmtMaxSize, config.PrepareStmtTTL)
|
||||
db.cacheStore.Store(preparedStmtDBKey, preparedStmt)
|
||||
}
|
||||
|
||||
|
|
|
|||
493
vendor/gorm.io/gorm/internal/lru/lru.go
generated
vendored
Normal file
493
vendor/gorm.io/gorm/internal/lru/lru.go
generated
vendored
Normal file
|
|
@ -0,0 +1,493 @@
|
|||
package lru
|
||||
|
||||
// golang -lru
|
||||
// https://github.com/hashicorp/golang-lru
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// EvictCallback is used to get a callback when a cache entry is evicted
|
||||
type EvictCallback[K comparable, V any] func(key K, value V)
|
||||
|
||||
// LRU implements a thread-safe LRU with expirable entries.
|
||||
type LRU[K comparable, V any] struct {
|
||||
size int
|
||||
evictList *LruList[K, V]
|
||||
items map[K]*Entry[K, V]
|
||||
onEvict EvictCallback[K, V]
|
||||
|
||||
// expirable options
|
||||
mu sync.Mutex
|
||||
ttl time.Duration
|
||||
done chan struct{}
|
||||
|
||||
// buckets for expiration
|
||||
buckets []bucket[K, V]
|
||||
// uint8 because it's number between 0 and numBuckets
|
||||
nextCleanupBucket uint8
|
||||
}
|
||||
|
||||
// bucket is a container for holding entries to be expired
|
||||
type bucket[K comparable, V any] struct {
|
||||
entries map[K]*Entry[K, V]
|
||||
newestEntry time.Time
|
||||
}
|
||||
|
||||
// noEvictionTTL - very long ttl to prevent eviction
|
||||
const noEvictionTTL = time.Hour * 24 * 365 * 10
|
||||
|
||||
// because of uint8 usage for nextCleanupBucket, should not exceed 256.
|
||||
// casting it as uint8 explicitly requires type conversions in multiple places
|
||||
const numBuckets = 100
|
||||
|
||||
// NewLRU returns a new thread-safe cache with expirable entries.
|
||||
//
|
||||
// Size parameter set to 0 makes cache of unlimited size, e.g. turns LRU mechanism off.
|
||||
//
|
||||
// Providing 0 TTL turns expiring off.
|
||||
//
|
||||
// Delete expired entries every 1/100th of ttl value. Goroutine which deletes expired entries runs indefinitely.
|
||||
func NewLRU[K comparable, V any](size int, onEvict EvictCallback[K, V], ttl time.Duration) *LRU[K, V] {
|
||||
if size < 0 {
|
||||
size = 0
|
||||
}
|
||||
if ttl <= 0 {
|
||||
ttl = noEvictionTTL
|
||||
}
|
||||
|
||||
res := LRU[K, V]{
|
||||
ttl: ttl,
|
||||
size: size,
|
||||
evictList: NewList[K, V](),
|
||||
items: make(map[K]*Entry[K, V]),
|
||||
onEvict: onEvict,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
// initialize the buckets
|
||||
res.buckets = make([]bucket[K, V], numBuckets)
|
||||
for i := 0; i < numBuckets; i++ {
|
||||
res.buckets[i] = bucket[K, V]{entries: make(map[K]*Entry[K, V])}
|
||||
}
|
||||
|
||||
// enable deleteExpired() running in separate goroutine for cache with non-zero TTL
|
||||
//
|
||||
// Important: done channel is never closed, so deleteExpired() goroutine will never exit,
|
||||
// it's decided to add functionality to close it in the version later than v2.
|
||||
if res.ttl != noEvictionTTL {
|
||||
go func(done <-chan struct{}) {
|
||||
ticker := time.NewTicker(res.ttl / numBuckets)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
case <-ticker.C:
|
||||
res.deleteExpired()
|
||||
}
|
||||
}
|
||||
}(res.done)
|
||||
}
|
||||
return &res
|
||||
}
|
||||
|
||||
// Purge clears the cache completely.
|
||||
// onEvict is called for each evicted key.
|
||||
func (c *LRU[K, V]) Purge() {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
for k, v := range c.items {
|
||||
if c.onEvict != nil {
|
||||
c.onEvict(k, v.Value)
|
||||
}
|
||||
delete(c.items, k)
|
||||
}
|
||||
for _, b := range c.buckets {
|
||||
for _, ent := range b.entries {
|
||||
delete(b.entries, ent.Key)
|
||||
}
|
||||
}
|
||||
c.evictList.Init()
|
||||
}
|
||||
|
||||
// Add adds a value to the cache. Returns true if an eviction occurred.
|
||||
// Returns false if there was no eviction: the item was already in the cache,
|
||||
// or the size was not exceeded.
|
||||
func (c *LRU[K, V]) Add(key K, value V) (evicted bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
now := time.Now()
|
||||
|
||||
// Check for existing item
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.evictList.MoveToFront(ent)
|
||||
c.removeFromBucket(ent) // remove the entry from its current bucket as expiresAt is renewed
|
||||
ent.Value = value
|
||||
ent.ExpiresAt = now.Add(c.ttl)
|
||||
c.addToBucket(ent)
|
||||
return false
|
||||
}
|
||||
|
||||
// Add new item
|
||||
ent := c.evictList.PushFrontExpirable(key, value, now.Add(c.ttl))
|
||||
c.items[key] = ent
|
||||
c.addToBucket(ent) // adds the entry to the appropriate bucket and sets entry.expireBucket
|
||||
|
||||
evict := c.size > 0 && c.evictList.Length() > c.size
|
||||
// Verify size not exceeded
|
||||
if evict {
|
||||
c.removeOldest()
|
||||
}
|
||||
return evict
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *LRU[K, V]) Get(key K) (value V, ok bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
var ent *Entry[K, V]
|
||||
if ent, ok = c.items[key]; ok {
|
||||
// Expired item check
|
||||
if time.Now().After(ent.ExpiresAt) {
|
||||
return value, false
|
||||
}
|
||||
c.evictList.MoveToFront(ent)
|
||||
return ent.Value, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Contains checks if a key is in the cache, without updating the recent-ness
|
||||
// or deleting it for being stale.
|
||||
func (c *LRU[K, V]) Contains(key K) (ok bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
_, ok = c.items[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Peek returns the key value (or undefined if not found) without updating
|
||||
// the "recently used"-ness of the key.
|
||||
func (c *LRU[K, V]) Peek(key K) (value V, ok bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
var ent *Entry[K, V]
|
||||
if ent, ok = c.items[key]; ok {
|
||||
// Expired item check
|
||||
if time.Now().After(ent.ExpiresAt) {
|
||||
return value, false
|
||||
}
|
||||
return ent.Value, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Remove removes the provided key from the cache, returning if the
|
||||
// key was contained.
|
||||
func (c *LRU[K, V]) Remove(key K) bool {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.removeElement(ent)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RemoveOldest removes the oldest item from the cache.
|
||||
func (c *LRU[K, V]) RemoveOldest() (key K, value V, ok bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if ent := c.evictList.Back(); ent != nil {
|
||||
c.removeElement(ent)
|
||||
return ent.Key, ent.Value, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetOldest returns the oldest entry
|
||||
func (c *LRU[K, V]) GetOldest() (key K, value V, ok bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if ent := c.evictList.Back(); ent != nil {
|
||||
return ent.Key, ent.Value, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *LRU[K, V]) KeyValues() map[K]V {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
maps := make(map[K]V)
|
||||
now := time.Now()
|
||||
for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() {
|
||||
if now.After(ent.ExpiresAt) {
|
||||
continue
|
||||
}
|
||||
maps[ent.Key] = ent.Value
|
||||
// keys = append(keys, ent.Key)
|
||||
}
|
||||
return maps
|
||||
}
|
||||
|
||||
// Keys returns a slice of the keys in the cache, from oldest to newest.
|
||||
// Expired entries are filtered out.
|
||||
func (c *LRU[K, V]) Keys() []K {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
keys := make([]K, 0, len(c.items))
|
||||
now := time.Now()
|
||||
for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() {
|
||||
if now.After(ent.ExpiresAt) {
|
||||
continue
|
||||
}
|
||||
keys = append(keys, ent.Key)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// Values returns a slice of the values in the cache, from oldest to newest.
|
||||
// Expired entries are filtered out.
|
||||
func (c *LRU[K, V]) Values() []V {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
values := make([]V, 0, len(c.items))
|
||||
now := time.Now()
|
||||
for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() {
|
||||
if now.After(ent.ExpiresAt) {
|
||||
continue
|
||||
}
|
||||
values = append(values, ent.Value)
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *LRU[K, V]) Len() int {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.evictList.Length()
|
||||
}
|
||||
|
||||
// Resize changes the cache size. Size of 0 means unlimited.
|
||||
func (c *LRU[K, V]) Resize(size int) (evicted int) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if size <= 0 {
|
||||
c.size = 0
|
||||
return 0
|
||||
}
|
||||
diff := c.evictList.Length() - size
|
||||
if diff < 0 {
|
||||
diff = 0
|
||||
}
|
||||
for i := 0; i < diff; i++ {
|
||||
c.removeOldest()
|
||||
}
|
||||
c.size = size
|
||||
return diff
|
||||
}
|
||||
|
||||
// Close destroys cleanup goroutine. To clean up the cache, run Purge() before Close().
|
||||
// func (c *LRU[K, V]) Close() {
|
||||
// c.mu.Lock()
|
||||
// defer c.mu.Unlock()
|
||||
// select {
|
||||
// case <-c.done:
|
||||
// return
|
||||
// default:
|
||||
// }
|
||||
// close(c.done)
|
||||
// }
|
||||
|
||||
// removeOldest removes the oldest item from the cache. Has to be called with lock!
|
||||
func (c *LRU[K, V]) removeOldest() {
|
||||
if ent := c.evictList.Back(); ent != nil {
|
||||
c.removeElement(ent)
|
||||
}
|
||||
}
|
||||
|
||||
// removeElement is used to remove a given list element from the cache. Has to be called with lock!
|
||||
func (c *LRU[K, V]) removeElement(e *Entry[K, V]) {
|
||||
c.evictList.Remove(e)
|
||||
delete(c.items, e.Key)
|
||||
c.removeFromBucket(e)
|
||||
if c.onEvict != nil {
|
||||
c.onEvict(e.Key, e.Value)
|
||||
}
|
||||
}
|
||||
|
||||
// deleteExpired deletes expired records from the oldest bucket, waiting for the newest entry
|
||||
// in it to expire first.
|
||||
func (c *LRU[K, V]) deleteExpired() {
|
||||
c.mu.Lock()
|
||||
bucketIdx := c.nextCleanupBucket
|
||||
timeToExpire := time.Until(c.buckets[bucketIdx].newestEntry)
|
||||
// wait for newest entry to expire before cleanup without holding lock
|
||||
if timeToExpire > 0 {
|
||||
c.mu.Unlock()
|
||||
time.Sleep(timeToExpire)
|
||||
c.mu.Lock()
|
||||
}
|
||||
for _, ent := range c.buckets[bucketIdx].entries {
|
||||
c.removeElement(ent)
|
||||
}
|
||||
c.nextCleanupBucket = (c.nextCleanupBucket + 1) % numBuckets
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// addToBucket adds entry to expire bucket so that it will be cleaned up when the time comes. Has to be called with lock!
|
||||
func (c *LRU[K, V]) addToBucket(e *Entry[K, V]) {
|
||||
bucketID := (numBuckets + c.nextCleanupBucket - 1) % numBuckets
|
||||
e.ExpireBucket = bucketID
|
||||
c.buckets[bucketID].entries[e.Key] = e
|
||||
if c.buckets[bucketID].newestEntry.Before(e.ExpiresAt) {
|
||||
c.buckets[bucketID].newestEntry = e.ExpiresAt
|
||||
}
|
||||
}
|
||||
|
||||
// removeFromBucket removes the entry from its corresponding bucket. Has to be called with lock!
|
||||
func (c *LRU[K, V]) removeFromBucket(e *Entry[K, V]) {
|
||||
delete(c.buckets[e.ExpireBucket].entries, e.Key)
|
||||
}
|
||||
|
||||
// Cap returns the capacity of the cache
|
||||
func (c *LRU[K, V]) Cap() int {
|
||||
return c.size
|
||||
}
|
||||
|
||||
// Entry is an LRU Entry
|
||||
type Entry[K comparable, V any] struct {
|
||||
// Next and previous pointers in the doubly-linked list of elements.
|
||||
// To simplify the implementation, internally a list l is implemented
|
||||
// as a ring, such that &l.root is both the next element of the last
|
||||
// list element (l.Back()) and the previous element of the first list
|
||||
// element (l.Front()).
|
||||
next, prev *Entry[K, V]
|
||||
|
||||
// The list to which this element belongs.
|
||||
list *LruList[K, V]
|
||||
|
||||
// The LRU Key of this element.
|
||||
Key K
|
||||
|
||||
// The Value stored with this element.
|
||||
Value V
|
||||
|
||||
// The time this element would be cleaned up, optional
|
||||
ExpiresAt time.Time
|
||||
|
||||
// The expiry bucket item was put in, optional
|
||||
ExpireBucket uint8
|
||||
}
|
||||
|
||||
// PrevEntry returns the previous list element or nil.
|
||||
func (e *Entry[K, V]) PrevEntry() *Entry[K, V] {
|
||||
if p := e.prev; e.list != nil && p != &e.list.root {
|
||||
return p
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LruList represents a doubly linked list.
|
||||
// The zero Value for LruList is an empty list ready to use.
|
||||
type LruList[K comparable, V any] struct {
|
||||
root Entry[K, V] // sentinel list element, only &root, root.prev, and root.next are used
|
||||
len int // current list Length excluding (this) sentinel element
|
||||
}
|
||||
|
||||
// Init initializes or clears list l.
|
||||
func (l *LruList[K, V]) Init() *LruList[K, V] {
|
||||
l.root.next = &l.root
|
||||
l.root.prev = &l.root
|
||||
l.len = 0
|
||||
return l
|
||||
}
|
||||
|
||||
// NewList returns an initialized list.
|
||||
func NewList[K comparable, V any]() *LruList[K, V] { return new(LruList[K, V]).Init() }
|
||||
|
||||
// Length returns the number of elements of list l.
|
||||
// The complexity is O(1).
|
||||
func (l *LruList[K, V]) Length() int { return l.len }
|
||||
|
||||
// Back returns the last element of list l or nil if the list is empty.
|
||||
func (l *LruList[K, V]) Back() *Entry[K, V] {
|
||||
if l.len == 0 {
|
||||
return nil
|
||||
}
|
||||
return l.root.prev
|
||||
}
|
||||
|
||||
// lazyInit lazily initializes a zero List Value.
|
||||
func (l *LruList[K, V]) lazyInit() {
|
||||
if l.root.next == nil {
|
||||
l.Init()
|
||||
}
|
||||
}
|
||||
|
||||
// insert inserts e after at, increments l.len, and returns e.
|
||||
func (l *LruList[K, V]) insert(e, at *Entry[K, V]) *Entry[K, V] {
|
||||
e.prev = at
|
||||
e.next = at.next
|
||||
e.prev.next = e
|
||||
e.next.prev = e
|
||||
e.list = l
|
||||
l.len++
|
||||
return e
|
||||
}
|
||||
|
||||
// insertValue is a convenience wrapper for insert(&Entry{Value: v, ExpiresAt: ExpiresAt}, at).
|
||||
func (l *LruList[K, V]) insertValue(k K, v V, expiresAt time.Time, at *Entry[K, V]) *Entry[K, V] {
|
||||
return l.insert(&Entry[K, V]{Value: v, Key: k, ExpiresAt: expiresAt}, at)
|
||||
}
|
||||
|
||||
// Remove removes e from its list, decrements l.len
|
||||
func (l *LruList[K, V]) Remove(e *Entry[K, V]) V {
|
||||
e.prev.next = e.next
|
||||
e.next.prev = e.prev
|
||||
e.next = nil // avoid memory leaks
|
||||
e.prev = nil // avoid memory leaks
|
||||
e.list = nil
|
||||
l.len--
|
||||
|
||||
return e.Value
|
||||
}
|
||||
|
||||
// move moves e to next to at.
|
||||
func (l *LruList[K, V]) move(e, at *Entry[K, V]) {
|
||||
if e == at {
|
||||
return
|
||||
}
|
||||
e.prev.next = e.next
|
||||
e.next.prev = e.prev
|
||||
|
||||
e.prev = at
|
||||
e.next = at.next
|
||||
e.prev.next = e
|
||||
e.next.prev = e
|
||||
}
|
||||
|
||||
// PushFront inserts a new element e with value v at the front of list l and returns e.
|
||||
func (l *LruList[K, V]) PushFront(k K, v V) *Entry[K, V] {
|
||||
l.lazyInit()
|
||||
return l.insertValue(k, v, time.Time{}, &l.root)
|
||||
}
|
||||
|
||||
// PushFrontExpirable inserts a new expirable element e with Value v at the front of list l and returns e.
|
||||
func (l *LruList[K, V]) PushFrontExpirable(k K, v V, expiresAt time.Time) *Entry[K, V] {
|
||||
l.lazyInit()
|
||||
return l.insertValue(k, v, expiresAt, &l.root)
|
||||
}
|
||||
|
||||
// MoveToFront moves element e to the front of list l.
|
||||
// If e is not an element of l, the list is not modified.
|
||||
// The element must not be nil.
|
||||
func (l *LruList[K, V]) MoveToFront(e *Entry[K, V]) {
|
||||
if e.list != l || l.root.next == e {
|
||||
return
|
||||
}
|
||||
// see comment in List.Remove about initialization of l
|
||||
l.move(e, &l.root)
|
||||
}
|
||||
182
vendor/gorm.io/gorm/internal/stmt_store/stmt_store.go
generated
vendored
Normal file
182
vendor/gorm.io/gorm/internal/stmt_store/stmt_store.go
generated
vendored
Normal file
|
|
@ -0,0 +1,182 @@
|
|||
package stmt_store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gorm.io/gorm/internal/lru"
|
||||
)
|
||||
|
||||
type Stmt struct {
|
||||
*sql.Stmt
|
||||
Transaction bool
|
||||
prepared chan struct{}
|
||||
prepareErr error
|
||||
}
|
||||
|
||||
func (stmt *Stmt) Error() error {
|
||||
return stmt.prepareErr
|
||||
}
|
||||
|
||||
func (stmt *Stmt) Close() error {
|
||||
<-stmt.prepared
|
||||
|
||||
if stmt.Stmt != nil {
|
||||
return stmt.Stmt.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Store defines an interface for managing the caching operations of SQL statements (Stmt).
|
||||
// This interface provides methods for creating new statements, retrieving all cache keys,
|
||||
// getting cached statements, setting cached statements, and deleting cached statements.
|
||||
type Store interface {
|
||||
// New creates a new Stmt object and caches it.
|
||||
// Parameters:
|
||||
// ctx: The context for the request, which can carry deadlines, cancellation signals, etc.
|
||||
// key: The key representing the SQL query, used for caching and preparing the statement.
|
||||
// isTransaction: Indicates whether this operation is part of a transaction, which may affect the caching strategy.
|
||||
// connPool: A connection pool that provides database connections.
|
||||
// locker: A synchronization lock that is unlocked after initialization to avoid deadlocks.
|
||||
// Returns:
|
||||
// *Stmt: A newly created statement object for executing SQL operations.
|
||||
// error: An error if the statement preparation fails.
|
||||
New(ctx context.Context, key string, isTransaction bool, connPool ConnPool, locker sync.Locker) (*Stmt, error)
|
||||
|
||||
// Keys returns a slice of all cache keys in the store.
|
||||
Keys() []string
|
||||
|
||||
// Get retrieves a Stmt object from the store based on the given key.
|
||||
// Parameters:
|
||||
// key: The key used to look up the Stmt object.
|
||||
// Returns:
|
||||
// *Stmt: The found Stmt object, or nil if not found.
|
||||
// bool: Indicates whether the corresponding Stmt object was successfully found.
|
||||
Get(key string) (*Stmt, bool)
|
||||
|
||||
// Set stores the given Stmt object in the store and associates it with the specified key.
|
||||
// Parameters:
|
||||
// key: The key used to associate the Stmt object.
|
||||
// value: The Stmt object to be stored.
|
||||
Set(key string, value *Stmt)
|
||||
|
||||
// Delete removes the Stmt object corresponding to the specified key from the store.
|
||||
// Parameters:
|
||||
// key: The key associated with the Stmt object to be deleted.
|
||||
Delete(key string)
|
||||
}
|
||||
|
||||
// defaultMaxSize defines the default maximum capacity of the cache.
|
||||
// Its value is the maximum value of the int64 type, which means that when the cache size is not specified,
|
||||
// the cache can theoretically store as many elements as possible.
|
||||
// (1 << 63) - 1 is the maximum value that an int64 type can represent.
|
||||
const (
|
||||
defaultMaxSize = (1 << 63) - 1
|
||||
// defaultTTL defines the default time-to-live (TTL) for each cache entry.
|
||||
// When the TTL for cache entries is not specified, each cache entry will expire after 24 hours.
|
||||
defaultTTL = time.Hour * 24
|
||||
)
|
||||
|
||||
// New creates and returns a new Store instance.
|
||||
//
|
||||
// Parameters:
|
||||
// - size: The maximum capacity of the cache. If the provided size is less than or equal to 0,
|
||||
// it defaults to defaultMaxSize.
|
||||
// - ttl: The time-to-live duration for each cache entry. If the provided ttl is less than or equal to 0,
|
||||
// it defaults to defaultTTL.
|
||||
//
|
||||
// This function defines an onEvicted callback that is invoked when a cache entry is evicted.
|
||||
// The callback ensures that if the evicted value (v) is not nil, its Close method is called asynchronously
|
||||
// to release associated resources.
|
||||
//
|
||||
// Returns:
|
||||
// - A Store instance implemented by lruStore, which internally uses an LRU cache with the specified size,
|
||||
// eviction callback, and TTL.
|
||||
func New(size int, ttl time.Duration) Store {
|
||||
if size <= 0 {
|
||||
size = defaultMaxSize
|
||||
}
|
||||
|
||||
if ttl <= 0 {
|
||||
ttl = defaultTTL
|
||||
}
|
||||
|
||||
onEvicted := func(k string, v *Stmt) {
|
||||
if v != nil {
|
||||
go v.Close()
|
||||
}
|
||||
}
|
||||
return &lruStore{lru: lru.NewLRU[string, *Stmt](size, onEvicted, ttl)}
|
||||
}
|
||||
|
||||
type lruStore struct {
|
||||
lru *lru.LRU[string, *Stmt]
|
||||
}
|
||||
|
||||
func (s *lruStore) Keys() []string {
|
||||
return s.lru.Keys()
|
||||
}
|
||||
|
||||
func (s *lruStore) Get(key string) (*Stmt, bool) {
|
||||
stmt, ok := s.lru.Get(key)
|
||||
if ok && stmt != nil {
|
||||
<-stmt.prepared
|
||||
}
|
||||
return stmt, ok
|
||||
}
|
||||
|
||||
func (s *lruStore) Set(key string, value *Stmt) {
|
||||
s.lru.Add(key, value)
|
||||
}
|
||||
|
||||
func (s *lruStore) Delete(key string) {
|
||||
s.lru.Remove(key)
|
||||
}
|
||||
|
||||
type ConnPool interface {
|
||||
PrepareContext(ctx context.Context, query string) (*sql.Stmt, error)
|
||||
}
|
||||
|
||||
// New creates a new Stmt object for executing SQL queries.
|
||||
// It caches the Stmt object for future use and handles preparation and error states.
|
||||
// Parameters:
|
||||
//
|
||||
// ctx: Context for the request, used to carry deadlines, cancellation signals, etc.
|
||||
// key: The key representing the SQL query, used for caching and preparing the statement.
|
||||
// isTransaction: Indicates whether this operation is part of a transaction, affecting cache strategy.
|
||||
// conn: A connection pool that provides database connections.
|
||||
// locker: A synchronization lock that is unlocked after initialization to avoid deadlocks.
|
||||
//
|
||||
// Returns:
|
||||
//
|
||||
// *Stmt: A newly created statement object for executing SQL operations.
|
||||
// error: An error if the statement preparation fails.
|
||||
func (s *lruStore) New(ctx context.Context, key string, isTransaction bool, conn ConnPool, locker sync.Locker) (_ *Stmt, err error) {
|
||||
// Create a Stmt object and set its Transaction property.
|
||||
// The prepared channel is used to synchronize the statement preparation state.
|
||||
cacheStmt := &Stmt{
|
||||
Transaction: isTransaction,
|
||||
prepared: make(chan struct{}),
|
||||
}
|
||||
// Cache the Stmt object with the associated key.
|
||||
s.Set(key, cacheStmt)
|
||||
// Unlock after completing initialization to prevent deadlocks.
|
||||
locker.Unlock()
|
||||
|
||||
// Ensure the prepared channel is closed after the function execution completes.
|
||||
defer close(cacheStmt.prepared)
|
||||
|
||||
// Prepare the SQL statement using the provided connection.
|
||||
cacheStmt.Stmt, err = conn.PrepareContext(ctx, key)
|
||||
if err != nil {
|
||||
// If statement preparation fails, record the error and remove the invalid Stmt object from the cache.
|
||||
cacheStmt.prepareErr = err
|
||||
s.Delete(key)
|
||||
return &Stmt{}, err
|
||||
}
|
||||
|
||||
// Return the successfully prepared Stmt object.
|
||||
return cacheStmt, nil
|
||||
}
|
||||
12
vendor/gorm.io/gorm/logger/logger.go
generated
vendored
12
vendor/gorm.io/gorm/logger/logger.go
generated
vendored
|
|
@ -80,6 +80,11 @@ var (
|
|||
})
|
||||
// Recorder logger records running SQL into a recorder instance
|
||||
Recorder = traceRecorder{Interface: Default, BeginAt: time.Now()}
|
||||
|
||||
// RecorderParamsFilter defaults to no-op, allows to be run-over by a different implementation
|
||||
RecorderParamsFilter = func(ctx context.Context, sql string, params ...interface{}) (string, []interface{}) {
|
||||
return sql, params
|
||||
}
|
||||
)
|
||||
|
||||
// New initialize logger
|
||||
|
|
@ -211,3 +216,10 @@ func (l *traceRecorder) Trace(ctx context.Context, begin time.Time, fc func() (s
|
|||
l.SQL, l.RowsAffected = fc()
|
||||
l.Err = err
|
||||
}
|
||||
|
||||
func (l *traceRecorder) ParamsFilter(ctx context.Context, sql string, params ...interface{}) (string, []interface{}) {
|
||||
if RecorderParamsFilter == nil {
|
||||
return sql, params
|
||||
}
|
||||
return RecorderParamsFilter(ctx, sql, params...)
|
||||
}
|
||||
|
|
|
|||
4
vendor/gorm.io/gorm/migrator/migrator.go
generated
vendored
4
vendor/gorm.io/gorm/migrator/migrator.go
generated
vendored
|
|
@ -524,8 +524,8 @@ func (m Migrator) MigrateColumn(value interface{}, field *schema.Field, columnTy
|
|||
|
||||
// check nullable
|
||||
if nullable, ok := columnType.Nullable(); ok && nullable == field.NotNull {
|
||||
// not primary key & database is nullable
|
||||
if !field.PrimaryKey && nullable {
|
||||
// not primary key & current database is non-nullable(to be nullable)
|
||||
if !field.PrimaryKey && !nullable {
|
||||
alterColumn = true
|
||||
}
|
||||
}
|
||||
|
|
|
|||
144
vendor/gorm.io/gorm/prepare_stmt.go
generated
vendored
144
vendor/gorm.io/gorm/prepare_stmt.go
generated
vendored
|
|
@ -7,29 +7,35 @@ import (
|
|||
"errors"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gorm.io/gorm/internal/stmt_store"
|
||||
)
|
||||
|
||||
type Stmt struct {
|
||||
*sql.Stmt
|
||||
Transaction bool
|
||||
prepared chan struct{}
|
||||
prepareErr error
|
||||
}
|
||||
|
||||
type PreparedStmtDB struct {
|
||||
Stmts map[string]*Stmt
|
||||
Stmts stmt_store.Store
|
||||
Mux *sync.RWMutex
|
||||
ConnPool
|
||||
}
|
||||
|
||||
func NewPreparedStmtDB(connPool ConnPool) *PreparedStmtDB {
|
||||
// NewPreparedStmtDB creates and initializes a new instance of PreparedStmtDB.
|
||||
//
|
||||
// Parameters:
|
||||
// - connPool: A connection pool that implements the ConnPool interface, used for managing database connections.
|
||||
// - maxSize: The maximum number of prepared statements that can be stored in the statement store.
|
||||
// - ttl: The time-to-live duration for each prepared statement in the store. Statements older than this duration will be automatically removed.
|
||||
//
|
||||
// Returns:
|
||||
// - A pointer to a PreparedStmtDB instance, which manages prepared statements using the provided connection pool and configuration.
|
||||
func NewPreparedStmtDB(connPool ConnPool, maxSize int, ttl time.Duration) *PreparedStmtDB {
|
||||
return &PreparedStmtDB{
|
||||
ConnPool: connPool,
|
||||
Stmts: make(map[string]*Stmt),
|
||||
Mux: &sync.RWMutex{},
|
||||
ConnPool: connPool, // Assigns the provided connection pool to manage database connections.
|
||||
Stmts: stmt_store.New(maxSize, ttl), // Initializes a new statement store with the specified maximum size and TTL.
|
||||
Mux: &sync.RWMutex{}, // Sets up a read-write mutex for synchronizing access to the statement store.
|
||||
}
|
||||
}
|
||||
|
||||
// GetDBConn returns the underlying *sql.DB connection
|
||||
func (db *PreparedStmtDB) GetDBConn() (*sql.DB, error) {
|
||||
if sqldb, ok := db.ConnPool.(*sql.DB); ok {
|
||||
return sqldb, nil
|
||||
|
|
@ -42,98 +48,41 @@ func (db *PreparedStmtDB) GetDBConn() (*sql.DB, error) {
|
|||
return nil, ErrInvalidDB
|
||||
}
|
||||
|
||||
// Close closes all prepared statements in the store
|
||||
func (db *PreparedStmtDB) Close() {
|
||||
db.Mux.Lock()
|
||||
defer db.Mux.Unlock()
|
||||
|
||||
for _, stmt := range db.Stmts {
|
||||
go func(s *Stmt) {
|
||||
// make sure the stmt must finish preparation first
|
||||
<-s.prepared
|
||||
if s.Stmt != nil {
|
||||
_ = s.Close()
|
||||
}
|
||||
}(stmt)
|
||||
for _, key := range db.Stmts.Keys() {
|
||||
db.Stmts.Delete(key)
|
||||
}
|
||||
// setting db.Stmts to nil to avoid further using
|
||||
db.Stmts = nil
|
||||
}
|
||||
|
||||
func (sdb *PreparedStmtDB) Reset() {
|
||||
sdb.Mux.Lock()
|
||||
defer sdb.Mux.Unlock()
|
||||
|
||||
for _, stmt := range sdb.Stmts {
|
||||
go func(s *Stmt) {
|
||||
// make sure the stmt must finish preparation first
|
||||
<-s.prepared
|
||||
if s.Stmt != nil {
|
||||
_ = s.Close()
|
||||
}
|
||||
}(stmt)
|
||||
}
|
||||
sdb.Stmts = make(map[string]*Stmt)
|
||||
// Reset Deprecated use Close instead
|
||||
func (db *PreparedStmtDB) Reset() {
|
||||
db.Close()
|
||||
}
|
||||
|
||||
func (db *PreparedStmtDB) prepare(ctx context.Context, conn ConnPool, isTransaction bool, query string) (Stmt, error) {
|
||||
func (db *PreparedStmtDB) prepare(ctx context.Context, conn ConnPool, isTransaction bool, query string) (_ *stmt_store.Stmt, err error) {
|
||||
db.Mux.RLock()
|
||||
if stmt, ok := db.Stmts[query]; ok && (!stmt.Transaction || isTransaction) {
|
||||
db.Mux.RUnlock()
|
||||
// wait for other goroutines prepared
|
||||
<-stmt.prepared
|
||||
if stmt.prepareErr != nil {
|
||||
return Stmt{}, stmt.prepareErr
|
||||
if db.Stmts != nil {
|
||||
if stmt, ok := db.Stmts.Get(query); ok && (!stmt.Transaction || isTransaction) {
|
||||
db.Mux.RUnlock()
|
||||
return stmt, stmt.Error()
|
||||
}
|
||||
|
||||
return *stmt, nil
|
||||
}
|
||||
db.Mux.RUnlock()
|
||||
|
||||
// retry
|
||||
db.Mux.Lock()
|
||||
// double check
|
||||
if stmt, ok := db.Stmts[query]; ok && (!stmt.Transaction || isTransaction) {
|
||||
db.Mux.Unlock()
|
||||
// wait for other goroutines prepared
|
||||
<-stmt.prepared
|
||||
if stmt.prepareErr != nil {
|
||||
return Stmt{}, stmt.prepareErr
|
||||
if db.Stmts != nil {
|
||||
if stmt, ok := db.Stmts.Get(query); ok && (!stmt.Transaction || isTransaction) {
|
||||
db.Mux.Unlock()
|
||||
return stmt, stmt.Error()
|
||||
}
|
||||
|
||||
return *stmt, nil
|
||||
}
|
||||
// check db.Stmts first to avoid Segmentation Fault(setting value to nil map)
|
||||
// which cause by calling Close and executing SQL concurrently
|
||||
if db.Stmts == nil {
|
||||
db.Mux.Unlock()
|
||||
return Stmt{}, ErrInvalidDB
|
||||
}
|
||||
// cache preparing stmt first
|
||||
cacheStmt := Stmt{Transaction: isTransaction, prepared: make(chan struct{})}
|
||||
db.Stmts[query] = &cacheStmt
|
||||
db.Mux.Unlock()
|
||||
|
||||
// prepare completed
|
||||
defer close(cacheStmt.prepared)
|
||||
|
||||
// Reason why cannot lock conn.PrepareContext
|
||||
// suppose the maxopen is 1, g1 is creating record and g2 is querying record.
|
||||
// 1. g1 begin tx, g1 is requeue because of waiting for the system call, now `db.ConnPool` db.numOpen == 1.
|
||||
// 2. g2 select lock `conn.PrepareContext(ctx, query)`, now db.numOpen == db.maxOpen , wait for release.
|
||||
// 3. g1 tx exec insert, wait for unlock `conn.PrepareContext(ctx, query)` to finish tx and release.
|
||||
stmt, err := conn.PrepareContext(ctx, query)
|
||||
if err != nil {
|
||||
cacheStmt.prepareErr = err
|
||||
db.Mux.Lock()
|
||||
delete(db.Stmts, query)
|
||||
db.Mux.Unlock()
|
||||
return Stmt{}, err
|
||||
}
|
||||
|
||||
db.Mux.Lock()
|
||||
cacheStmt.Stmt = stmt
|
||||
db.Mux.Unlock()
|
||||
|
||||
return cacheStmt, nil
|
||||
return db.Stmts.New(ctx, query, isTransaction, conn, db.Mux)
|
||||
}
|
||||
|
||||
func (db *PreparedStmtDB) BeginTx(ctx context.Context, opt *sql.TxOptions) (ConnPool, error) {
|
||||
|
|
@ -162,10 +111,7 @@ func (db *PreparedStmtDB) ExecContext(ctx context.Context, query string, args ..
|
|||
if err == nil {
|
||||
result, err = stmt.ExecContext(ctx, args...)
|
||||
if errors.Is(err, driver.ErrBadConn) {
|
||||
db.Mux.Lock()
|
||||
defer db.Mux.Unlock()
|
||||
go stmt.Close()
|
||||
delete(db.Stmts, query)
|
||||
db.Stmts.Delete(query)
|
||||
}
|
||||
}
|
||||
return result, err
|
||||
|
|
@ -176,11 +122,7 @@ func (db *PreparedStmtDB) QueryContext(ctx context.Context, query string, args .
|
|||
if err == nil {
|
||||
rows, err = stmt.QueryContext(ctx, args...)
|
||||
if errors.Is(err, driver.ErrBadConn) {
|
||||
db.Mux.Lock()
|
||||
defer db.Mux.Unlock()
|
||||
|
||||
go stmt.Close()
|
||||
delete(db.Stmts, query)
|
||||
db.Stmts.Delete(query)
|
||||
}
|
||||
}
|
||||
return rows, err
|
||||
|
|
@ -230,11 +172,7 @@ func (tx *PreparedStmtTX) ExecContext(ctx context.Context, query string, args ..
|
|||
if err == nil {
|
||||
result, err = tx.Tx.StmtContext(ctx, stmt.Stmt).ExecContext(ctx, args...)
|
||||
if errors.Is(err, driver.ErrBadConn) {
|
||||
tx.PreparedStmtDB.Mux.Lock()
|
||||
defer tx.PreparedStmtDB.Mux.Unlock()
|
||||
|
||||
go stmt.Close()
|
||||
delete(tx.PreparedStmtDB.Stmts, query)
|
||||
tx.PreparedStmtDB.Stmts.Delete(query)
|
||||
}
|
||||
}
|
||||
return result, err
|
||||
|
|
@ -245,11 +183,7 @@ func (tx *PreparedStmtTX) QueryContext(ctx context.Context, query string, args .
|
|||
if err == nil {
|
||||
rows, err = tx.Tx.StmtContext(ctx, stmt.Stmt).QueryContext(ctx, args...)
|
||||
if errors.Is(err, driver.ErrBadConn) {
|
||||
tx.PreparedStmtDB.Mux.Lock()
|
||||
defer tx.PreparedStmtDB.Mux.Unlock()
|
||||
|
||||
go stmt.Close()
|
||||
delete(tx.PreparedStmtDB.Stmts, query)
|
||||
tx.PreparedStmtDB.Stmts.Delete(query)
|
||||
}
|
||||
}
|
||||
return rows, err
|
||||
|
|
|
|||
4
vendor/gorm.io/gorm/scan.go
generated
vendored
4
vendor/gorm.io/gorm/scan.go
generated
vendored
|
|
@ -15,7 +15,7 @@ func prepareValues(values []interface{}, db *DB, columnTypes []*sql.ColumnType,
|
|||
if db.Statement.Schema != nil {
|
||||
for idx, name := range columns {
|
||||
if field := db.Statement.Schema.LookUpField(name); field != nil {
|
||||
values[idx] = reflect.New(reflect.PtrTo(field.FieldType)).Interface()
|
||||
values[idx] = reflect.New(reflect.PointerTo(field.FieldType)).Interface()
|
||||
continue
|
||||
}
|
||||
values[idx] = new(interface{})
|
||||
|
|
@ -23,7 +23,7 @@ func prepareValues(values []interface{}, db *DB, columnTypes []*sql.ColumnType,
|
|||
} else if len(columnTypes) > 0 {
|
||||
for idx, columnType := range columnTypes {
|
||||
if columnType.ScanType() != nil {
|
||||
values[idx] = reflect.New(reflect.PtrTo(columnType.ScanType())).Interface()
|
||||
values[idx] = reflect.New(reflect.PointerTo(columnType.ScanType())).Interface()
|
||||
} else {
|
||||
values[idx] = new(interface{})
|
||||
}
|
||||
|
|
|
|||
2
vendor/gorm.io/gorm/schema/field.go
generated
vendored
2
vendor/gorm.io/gorm/schema/field.go
generated
vendored
|
|
@ -996,6 +996,6 @@ func (field *Field) setupNewValuePool() {
|
|||
}
|
||||
|
||||
if field.NewValuePool == nil {
|
||||
field.NewValuePool = poolInitializer(reflect.PtrTo(field.IndirectFieldType))
|
||||
field.NewValuePool = poolInitializer(reflect.PointerTo(field.IndirectFieldType))
|
||||
}
|
||||
}
|
||||
|
|
|
|||
31
vendor/gorm.io/gorm/schema/index.go
generated
vendored
31
vendor/gorm.io/gorm/schema/index.go
generated
vendored
|
|
@ -23,12 +23,13 @@ type IndexOption struct {
|
|||
Sort string // DESC, ASC
|
||||
Collate string
|
||||
Length int
|
||||
priority int
|
||||
Priority int
|
||||
}
|
||||
|
||||
// ParseIndexes parse schema indexes
|
||||
func (schema *Schema) ParseIndexes() map[string]Index {
|
||||
indexes := map[string]Index{}
|
||||
func (schema *Schema) ParseIndexes() []*Index {
|
||||
indexesByName := map[string]*Index{}
|
||||
indexes := []*Index{}
|
||||
|
||||
for _, field := range schema.Fields {
|
||||
if field.TagSettings["INDEX"] != "" || field.TagSettings["UNIQUEINDEX"] != "" {
|
||||
|
|
@ -38,7 +39,12 @@ func (schema *Schema) ParseIndexes() map[string]Index {
|
|||
break
|
||||
}
|
||||
for _, index := range fieldIndexes {
|
||||
idx := indexes[index.Name]
|
||||
idx := indexesByName[index.Name]
|
||||
if idx == nil {
|
||||
idx = &Index{Name: index.Name}
|
||||
indexesByName[index.Name] = idx
|
||||
indexes = append(indexes, idx)
|
||||
}
|
||||
idx.Name = index.Name
|
||||
if idx.Class == "" {
|
||||
idx.Class = index.Class
|
||||
|
|
@ -58,10 +64,8 @@ func (schema *Schema) ParseIndexes() map[string]Index {
|
|||
|
||||
idx.Fields = append(idx.Fields, index.Fields...)
|
||||
sort.Slice(idx.Fields, func(i, j int) bool {
|
||||
return idx.Fields[i].priority < idx.Fields[j].priority
|
||||
return idx.Fields[i].Priority < idx.Fields[j].Priority
|
||||
})
|
||||
|
||||
indexes[index.Name] = idx
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -78,12 +82,12 @@ func (schema *Schema) LookIndex(name string) *Index {
|
|||
indexes := schema.ParseIndexes()
|
||||
for _, index := range indexes {
|
||||
if index.Name == name {
|
||||
return &index
|
||||
return index
|
||||
}
|
||||
|
||||
for _, field := range index.Fields {
|
||||
if field.Name == name {
|
||||
return &index
|
||||
return index
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -111,17 +115,14 @@ func parseFieldIndexes(field *Field) (indexes []Index, err error) {
|
|||
idx = len(tag)
|
||||
}
|
||||
|
||||
if idx != -1 {
|
||||
name = tag[0:idx]
|
||||
}
|
||||
|
||||
name = tag[0:idx]
|
||||
if name == "" {
|
||||
subName := field.Name
|
||||
const key = "COMPOSITE"
|
||||
if composite, found := settings[key]; found {
|
||||
if len(composite) == 0 || composite == key {
|
||||
err = fmt.Errorf(
|
||||
"The composite tag of %s.%s cannot be empty",
|
||||
"the composite tag of %s.%s cannot be empty",
|
||||
field.Schema.Name,
|
||||
field.Name)
|
||||
return
|
||||
|
|
@ -154,7 +155,7 @@ func parseFieldIndexes(field *Field) (indexes []Index, err error) {
|
|||
Sort: settings["SORT"],
|
||||
Collate: settings["COLLATE"],
|
||||
Length: length,
|
||||
priority: priority,
|
||||
Priority: priority,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
|
|
|||
6
vendor/gorm.io/gorm/schema/relationship.go
generated
vendored
6
vendor/gorm.io/gorm/schema/relationship.go
generated
vendored
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/jinzhu/inflection"
|
||||
"golang.org/x/text/cases"
|
||||
|
|
@ -32,6 +33,8 @@ type Relationships struct {
|
|||
Relations map[string]*Relationship
|
||||
|
||||
EmbeddedRelations map[string]*Relationships
|
||||
|
||||
Mux sync.RWMutex
|
||||
}
|
||||
|
||||
type Relationship struct {
|
||||
|
|
@ -98,9 +101,10 @@ func (schema *Schema) parseRelation(field *Field) *Relationship {
|
|||
}
|
||||
|
||||
if relation.Type == has {
|
||||
// don't add relations to embedded schema, which might be shared
|
||||
if relation.FieldSchema != relation.Schema && relation.Polymorphic == nil && field.OwnerSchema == nil {
|
||||
relation.FieldSchema.Relationships.Mux.Lock()
|
||||
relation.FieldSchema.Relationships.Relations["_"+relation.Schema.Name+"_"+relation.Name] = relation
|
||||
relation.FieldSchema.Relationships.Mux.Unlock()
|
||||
}
|
||||
|
||||
switch field.IndirectFieldType.Kind() {
|
||||
|
|
|
|||
2
vendor/gorm.io/gorm/schema/utils.go
generated
vendored
2
vendor/gorm.io/gorm/schema/utils.go
generated
vendored
|
|
@ -71,7 +71,7 @@ func appendSettingFromTag(tag reflect.StructTag, value string) reflect.StructTag
|
|||
// GetRelationsValues get relations's values from a reflect value
|
||||
func GetRelationsValues(ctx context.Context, reflectValue reflect.Value, rels []*Relationship) (reflectResults reflect.Value) {
|
||||
for _, rel := range rels {
|
||||
reflectResults = reflect.MakeSlice(reflect.SliceOf(reflect.PtrTo(rel.FieldSchema.ModelType)), 0, 1)
|
||||
reflectResults = reflect.MakeSlice(reflect.SliceOf(reflect.PointerTo(rel.FieldSchema.ModelType)), 0, 1)
|
||||
|
||||
appendToResults := func(value reflect.Value) {
|
||||
if _, isZero := rel.Field.ValueOf(ctx, value); !isZero {
|
||||
|
|
|
|||
20
vendor/modules.txt
vendored
20
vendor/modules.txt
vendored
|
|
@ -212,8 +212,8 @@ github.com/prometheus/client_model/go
|
|||
## explicit; go 1.21
|
||||
github.com/prometheus/common/expfmt
|
||||
github.com/prometheus/common/model
|
||||
# github.com/prometheus/procfs v0.16.0
|
||||
## explicit; go 1.21
|
||||
# github.com/prometheus/procfs v0.16.1
|
||||
## explicit; go 1.23.0
|
||||
github.com/prometheus/procfs
|
||||
github.com/prometheus/procfs/internal/fs
|
||||
github.com/prometheus/procfs/internal/util
|
||||
|
|
@ -277,7 +277,7 @@ go.opentelemetry.io/otel/trace
|
|||
go.opentelemetry.io/otel/trace/embedded
|
||||
go.opentelemetry.io/otel/trace/internal/telemetry
|
||||
go.opentelemetry.io/otel/trace/noop
|
||||
# golang.org/x/crypto v0.37.0
|
||||
# golang.org/x/crypto v0.38.0
|
||||
## explicit; go 1.23.0
|
||||
golang.org/x/crypto/bcrypt
|
||||
golang.org/x/crypto/blowfish
|
||||
|
|
@ -286,23 +286,23 @@ golang.org/x/crypto/chacha20poly1305
|
|||
golang.org/x/crypto/hkdf
|
||||
golang.org/x/crypto/internal/alias
|
||||
golang.org/x/crypto/internal/poly1305
|
||||
# golang.org/x/net v0.39.0
|
||||
# golang.org/x/net v0.40.0
|
||||
## explicit; go 1.23.0
|
||||
golang.org/x/net/internal/socks
|
||||
golang.org/x/net/proxy
|
||||
# golang.org/x/oauth2 v0.29.0
|
||||
# golang.org/x/oauth2 v0.30.0
|
||||
## explicit; go 1.23.0
|
||||
golang.org/x/oauth2
|
||||
golang.org/x/oauth2/internal
|
||||
# golang.org/x/sync v0.13.0
|
||||
# golang.org/x/sync v0.14.0
|
||||
## explicit; go 1.23.0
|
||||
golang.org/x/sync/errgroup
|
||||
# golang.org/x/sys v0.32.0
|
||||
# golang.org/x/sys v0.33.0
|
||||
## explicit; go 1.23.0
|
||||
golang.org/x/sys/cpu
|
||||
golang.org/x/sys/unix
|
||||
golang.org/x/sys/windows
|
||||
# golang.org/x/text v0.24.0
|
||||
# golang.org/x/text v0.25.0
|
||||
## explicit; go 1.23.0
|
||||
golang.org/x/text/cases
|
||||
golang.org/x/text/internal
|
||||
|
|
@ -362,11 +362,13 @@ gorm.io/driver/mysql
|
|||
# gorm.io/driver/sqlite v1.5.7
|
||||
## explicit; go 1.20
|
||||
gorm.io/driver/sqlite
|
||||
# gorm.io/gorm v1.25.12
|
||||
# gorm.io/gorm v1.26.0
|
||||
## explicit; go 1.18
|
||||
gorm.io/gorm
|
||||
gorm.io/gorm/callbacks
|
||||
gorm.io/gorm/clause
|
||||
gorm.io/gorm/internal/lru
|
||||
gorm.io/gorm/internal/stmt_store
|
||||
gorm.io/gorm/logger
|
||||
gorm.io/gorm/migrator
|
||||
gorm.io/gorm/schema
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ func (w *Worker) HandleJobsCompleted(jobs []params.ScaleSetJobMessage) (err erro
|
|||
for _, job := range jobs {
|
||||
if err := w.recordOrUpdateJob(job); err != nil {
|
||||
// recording scale set jobs are purely informational for now.
|
||||
slog.ErrorContext(w.ctx, "recording job", "job", job, "error", err)
|
||||
slog.ErrorContext(w.ctx, "failed to save job data", "job", job, "error", err)
|
||||
}
|
||||
|
||||
if job.RunnerName == "" {
|
||||
|
|
@ -106,7 +106,7 @@ func (w *Worker) HandleJobsStarted(jobs []params.ScaleSetJobMessage) (err error)
|
|||
for _, job := range jobs {
|
||||
if err := w.recordOrUpdateJob(job); err != nil {
|
||||
// recording scale set jobs are purely informational for now.
|
||||
slog.ErrorContext(w.ctx, "recording job", "job", job, "error", err)
|
||||
slog.ErrorContext(w.ctx, "failed to save job data", "job", job, "error", err)
|
||||
}
|
||||
|
||||
if job.RunnerName == "" {
|
||||
|
|
@ -138,7 +138,7 @@ func (w *Worker) HandleJobsAvailable(jobs []params.ScaleSetJobMessage) error {
|
|||
for _, job := range jobs {
|
||||
if err := w.recordOrUpdateJob(job); err != nil {
|
||||
// recording scale set jobs are purely informational for now.
|
||||
slog.ErrorContext(w.ctx, "recording job", "job", job, "error", err)
|
||||
slog.ErrorContext(w.ctx, "failed to save job data", "job", job, "error", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -118,11 +118,13 @@ func (l *scaleSetListener) handleSessionMessage(msg params.RunnerScaleSetMessage
|
|||
var completedJobs []params.ScaleSetJobMessage
|
||||
var availableJobs []params.ScaleSetJobMessage
|
||||
var startedJobs []params.ScaleSetJobMessage
|
||||
var assignedJobs []params.ScaleSetJobMessage
|
||||
|
||||
for _, job := range body {
|
||||
switch job.MessageType {
|
||||
case params.MessageTypeJobAssigned:
|
||||
slog.InfoContext(l.ctx, "new job assigned", "job_id", job.RunnerRequestID, "job_name", job.JobDisplayName)
|
||||
assignedJobs = append(assignedJobs, job)
|
||||
case params.MessageTypeJobStarted:
|
||||
slog.InfoContext(l.ctx, "job started", "job_id", job.RunnerRequestID, "job_name", job.JobDisplayName, "runner_name", job.RunnerName)
|
||||
startedJobs = append(startedJobs, job)
|
||||
|
|
@ -137,6 +139,12 @@ func (l *scaleSetListener) handleSessionMessage(msg params.RunnerScaleSetMessage
|
|||
}
|
||||
}
|
||||
|
||||
if len(assignedJobs) > 0 {
|
||||
if err := l.scaleSetHelper.HandleJobsAvailable(assignedJobs); err != nil {
|
||||
slog.ErrorContext(l.ctx, "error handling available jobs", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(availableJobs) > 0 {
|
||||
jobIDs := make([]int64, len(availableJobs))
|
||||
for idx, job := range availableJobs {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue