Make the default github.com endpoint mutable
The k8s operator seems to want to define its own endpoint. This change allows the removal of the default gh endpoint if no credentials are tied to it. Signed-off-by: Gabriel Adrian Samfira <gsamfira@cloudbasesolutions.com>
This commit is contained in:
parent
1e0cb72aa3
commit
750446acec
92 changed files with 2568 additions and 1159 deletions
|
|
@ -1,4 +1,4 @@
|
|||
// Code generated by mockery v2.42.0. DO NOT EDIT.
|
||||
// Code generated by mockery v2.53.3. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
|
@ -50,7 +50,7 @@ func (_m *Store) BreakLockJobIsQueued(ctx context.Context, jobID int64) error {
|
|||
return r0
|
||||
}
|
||||
|
||||
// ControllerInfo provides a mock function with given fields:
|
||||
// ControllerInfo provides a mock function with no fields
|
||||
func (_m *Store) ControllerInfo() (params.ControllerInfo, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
|
|
@ -1034,7 +1034,7 @@ func (_m *Store) HasAdminUser(ctx context.Context) bool {
|
|||
return r0
|
||||
}
|
||||
|
||||
// InitController provides a mock function with given fields:
|
||||
// InitController provides a mock function with no fields
|
||||
func (_m *Store) InitController() (params.ControllerInfo, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
|
|
|
|||
|
|
@ -28,10 +28,6 @@ import (
|
|||
"github.com/cloudbase/garm/params"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultGithubEndpoint string = "github.com"
|
||||
)
|
||||
|
||||
func (s *sqlDatabase) sqlToCommonGithubCredentials(creds GithubCredentials) (params.GithubCredentials, error) {
|
||||
if len(creds.Payload) == 0 {
|
||||
return params.GithubCredentials{}, errors.New("empty credentials payload")
|
||||
|
|
@ -168,10 +164,6 @@ func (s *sqlDatabase) ListGithubEndpoints(_ context.Context) ([]params.GithubEnd
|
|||
}
|
||||
|
||||
func (s *sqlDatabase) UpdateGithubEndpoint(_ context.Context, name string, param params.UpdateGithubEndpointParams) (ghEndpoint params.GithubEndpoint, err error) {
|
||||
if name == defaultGithubEndpoint {
|
||||
return params.GithubEndpoint{}, errors.Wrap(runnerErrors.ErrBadRequest, "cannot update default github endpoint")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err == nil {
|
||||
s.sendNotify(common.GithubEndpointEntityType, common.UpdateOperation, ghEndpoint)
|
||||
|
|
@ -185,6 +177,16 @@ func (s *sqlDatabase) UpdateGithubEndpoint(_ context.Context, name string, param
|
|||
}
|
||||
return errors.Wrap(err, "fetching github endpoint")
|
||||
}
|
||||
|
||||
var credsCount int64
|
||||
if err := tx.Model(&GithubCredentials{}).Where("endpoint_name = ?", endpoint.Name).Count(&credsCount).Error; err != nil {
|
||||
if !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return errors.Wrap(err, "fetching github credentials")
|
||||
}
|
||||
}
|
||||
if credsCount > 0 && (param.APIBaseURL != nil || param.BaseURL != nil || param.UploadBaseURL != nil) {
|
||||
return errors.Wrap(runnerErrors.ErrBadRequest, "cannot update endpoint URLs with existing credentials")
|
||||
}
|
||||
if param.APIBaseURL != nil {
|
||||
endpoint.APIBaseURL = *param.APIBaseURL
|
||||
}
|
||||
|
|
@ -236,10 +238,6 @@ func (s *sqlDatabase) GetGithubEndpoint(_ context.Context, name string) (params.
|
|||
}
|
||||
|
||||
func (s *sqlDatabase) DeleteGithubEndpoint(_ context.Context, name string) (err error) {
|
||||
if name == defaultGithubEndpoint {
|
||||
return errors.Wrap(runnerErrors.ErrBadRequest, "cannot delete default github endpoint")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err == nil {
|
||||
s.sendNotify(common.GithubEndpointEntityType, common.DeleteOperation, params.GithubEndpoint{Name: name})
|
||||
|
|
@ -283,7 +281,7 @@ func (s *sqlDatabase) DeleteGithubEndpoint(_ context.Context, name string) (err
|
|||
}
|
||||
|
||||
if credsCount > 0 || repoCnt > 0 || orgCnt > 0 || entCnt > 0 {
|
||||
return errors.New("cannot delete endpoint with associated entities")
|
||||
return errors.Wrap(runnerErrors.ErrBadRequest, "cannot delete endpoint with associated entities")
|
||||
}
|
||||
|
||||
if err := tx.Unscoped().Delete(&endpoint).Error; err != nil {
|
||||
|
|
|
|||
|
|
@ -40,6 +40,7 @@ const (
|
|||
testEndpointDescription string = "test description"
|
||||
testCredsName string = "test-creds"
|
||||
testCredsDescription string = "test creds"
|
||||
defaultGithubEndpoint string = "github.com"
|
||||
)
|
||||
|
||||
type GithubTestSuite struct {
|
||||
|
|
@ -56,20 +57,13 @@ func (s *GithubTestSuite) SetupTest() {
|
|||
s.db = db
|
||||
}
|
||||
|
||||
func (s *GithubTestSuite) TestDefaultEndpointGetsCreatedAutomatically() {
|
||||
func (s *GithubTestSuite) TestDefaultEndpointGetsCreatedAutomaticallyIfNoOtherEndpointExists() {
|
||||
ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
|
||||
endpoint, err := s.db.GetGithubEndpoint(ctx, defaultGithubEndpoint)
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(endpoint)
|
||||
}
|
||||
|
||||
func (s *GithubTestSuite) TestDeletingDefaultEndpointFails() {
|
||||
ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
|
||||
err := s.db.DeleteGithubEndpoint(ctx, defaultGithubEndpoint)
|
||||
s.Require().Error(err)
|
||||
s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
|
||||
}
|
||||
|
||||
func (s *GithubTestSuite) TestCreatingEndpoint() {
|
||||
ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
|
||||
|
||||
|
|
@ -153,6 +147,39 @@ func (s *GithubTestSuite) TestDeletingEndpoint() {
|
|||
s.Require().ErrorIs(err, runnerErrors.ErrNotFound)
|
||||
}
|
||||
|
||||
func (s *GithubTestSuite) TestDeleteGithubEndpointFailsWhenCredentialsExist() {
|
||||
ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
|
||||
|
||||
createEpParams := params.CreateGithubEndpointParams{
|
||||
Name: testEndpointName,
|
||||
Description: testEndpointDescription,
|
||||
APIBaseURL: testAPIBaseURL,
|
||||
UploadBaseURL: testUploadBaseURL,
|
||||
BaseURL: testBaseURL,
|
||||
}
|
||||
|
||||
endpoint, err := s.db.CreateGithubEndpoint(ctx, createEpParams)
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(endpoint)
|
||||
|
||||
credParams := params.CreateGithubCredentialsParams{
|
||||
Name: testCredsName,
|
||||
Description: testCredsDescription,
|
||||
Endpoint: testEndpointName,
|
||||
AuthType: params.GithubAuthTypePAT,
|
||||
PAT: params.GithubPAT{
|
||||
OAuth2Token: "test",
|
||||
},
|
||||
}
|
||||
|
||||
_, err = s.db.CreateGithubCredentials(ctx, credParams)
|
||||
s.Require().NoError(err)
|
||||
|
||||
err = s.db.DeleteGithubEndpoint(ctx, testEndpointName)
|
||||
s.Require().Error(err)
|
||||
s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
|
||||
}
|
||||
|
||||
func (s *GithubTestSuite) TestUpdateEndpoint() {
|
||||
ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
|
||||
|
||||
|
|
@ -168,7 +195,7 @@ func (s *GithubTestSuite) TestUpdateEndpoint() {
|
|||
s.Require().NoError(err)
|
||||
s.Require().NotNil(endpoint)
|
||||
|
||||
newDescription := "new description"
|
||||
newDescription := "the new description"
|
||||
newAPIBaseURL := "https://new-api.example.com"
|
||||
newUploadBaseURL := "https://new-uploads.example.com"
|
||||
newBaseURL := "https://new.example.com"
|
||||
|
|
@ -192,6 +219,72 @@ func (s *GithubTestSuite) TestUpdateEndpoint() {
|
|||
s.Require().Equal(caCertBundle, updatedEndpoint.CACertBundle)
|
||||
}
|
||||
|
||||
func (s *GithubTestSuite) TestUpdateEndpointUDLsFailsIfCredentialsAreAssociated() {
|
||||
ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
|
||||
|
||||
createEpParams := params.CreateGithubEndpointParams{
|
||||
Name: testEndpointName,
|
||||
Description: testEndpointDescription,
|
||||
APIBaseURL: testAPIBaseURL,
|
||||
UploadBaseURL: testUploadBaseURL,
|
||||
BaseURL: testBaseURL,
|
||||
}
|
||||
|
||||
endpoint, err := s.db.CreateGithubEndpoint(ctx, createEpParams)
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(endpoint)
|
||||
|
||||
credParams := params.CreateGithubCredentialsParams{
|
||||
Name: testCredsName,
|
||||
Description: testCredsDescription,
|
||||
Endpoint: testEndpointName,
|
||||
AuthType: params.GithubAuthTypePAT,
|
||||
PAT: params.GithubPAT{
|
||||
OAuth2Token: "test",
|
||||
},
|
||||
}
|
||||
|
||||
_, err = s.db.CreateGithubCredentials(ctx, credParams)
|
||||
s.Require().NoError(err)
|
||||
|
||||
newDescription := "new description"
|
||||
newBaseURL := "https://new.example.com"
|
||||
newAPIBaseURL := "https://new-api.example.com"
|
||||
newUploadBaseURL := "https://new-uploads.example.com"
|
||||
updateEpParams := params.UpdateGithubEndpointParams{
|
||||
BaseURL: &newBaseURL,
|
||||
}
|
||||
|
||||
_, err = s.db.UpdateGithubEndpoint(ctx, testEndpointName, updateEpParams)
|
||||
s.Require().Error(err)
|
||||
s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
|
||||
s.Require().EqualError(err, "updating github endpoint: cannot update endpoint URLs with existing credentials: invalid request")
|
||||
|
||||
updateEpParams = params.UpdateGithubEndpointParams{
|
||||
UploadBaseURL: &newUploadBaseURL,
|
||||
}
|
||||
|
||||
_, err = s.db.UpdateGithubEndpoint(ctx, testEndpointName, updateEpParams)
|
||||
s.Require().Error(err)
|
||||
s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
|
||||
s.Require().EqualError(err, "updating github endpoint: cannot update endpoint URLs with existing credentials: invalid request")
|
||||
|
||||
updateEpParams = params.UpdateGithubEndpointParams{
|
||||
APIBaseURL: &newAPIBaseURL,
|
||||
}
|
||||
_, err = s.db.UpdateGithubEndpoint(ctx, testEndpointName, updateEpParams)
|
||||
s.Require().Error(err)
|
||||
s.Require().ErrorIs(err, runnerErrors.ErrBadRequest)
|
||||
s.Require().EqualError(err, "updating github endpoint: cannot update endpoint URLs with existing credentials: invalid request")
|
||||
|
||||
updateEpParams = params.UpdateGithubEndpointParams{
|
||||
Description: &newDescription,
|
||||
}
|
||||
ret, err := s.db.UpdateGithubEndpoint(ctx, testEndpointName, updateEpParams)
|
||||
s.Require().NoError(err)
|
||||
s.Require().Equal(newDescription, ret.Description)
|
||||
}
|
||||
|
||||
func (s *GithubTestSuite) TestUpdatingNonExistingEndpointReturnsNotFoundError() {
|
||||
ctx := garmTesting.ImpersonateAdminContext(context.Background(), s.db, s.T())
|
||||
|
||||
|
|
|
|||
|
|
@ -212,9 +212,18 @@ func (s *sqlDatabase) ensureGithubEndpoint() error {
|
|||
UploadBaseURL: appdefaults.GithubDefaultUploadBaseURL,
|
||||
}
|
||||
|
||||
if _, err := s.CreateGithubEndpoint(context.Background(), createEndpointParams); err != nil {
|
||||
if !errors.Is(err, runnerErrors.ErrDuplicateEntity) {
|
||||
return errors.Wrap(err, "creating default github endpoint")
|
||||
var epCount int64
|
||||
if err := s.conn.Model(&GithubEndpoint{}).Count(&epCount).Error; err != nil {
|
||||
if !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return errors.Wrap(err, "counting github endpoints")
|
||||
}
|
||||
}
|
||||
|
||||
if epCount == 0 {
|
||||
if _, err := s.CreateGithubEndpoint(context.Background(), createEndpointParams); err != nil {
|
||||
if !errors.Is(err, runnerErrors.ErrDuplicateEntity) {
|
||||
return errors.Wrap(err, "creating default github endpoint")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
24
go.mod
24
go.mod
|
|
@ -28,15 +28,15 @@ require (
|
|||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
golang.org/x/crypto v0.37.0
|
||||
golang.org/x/oauth2 v0.29.0
|
||||
golang.org/x/sync v0.13.0
|
||||
golang.org/x/crypto v0.38.0
|
||||
golang.org/x/oauth2 v0.30.0
|
||||
golang.org/x/sync v0.14.0
|
||||
gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||
gorm.io/datatypes v1.2.5
|
||||
gorm.io/driver/mysql v1.5.7
|
||||
gorm.io/driver/sqlite v1.5.7
|
||||
gorm.io/gorm v1.25.12
|
||||
gorm.io/gorm v1.26.1
|
||||
)
|
||||
|
||||
require (
|
||||
|
|
@ -76,20 +76,20 @@ require (
|
|||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.63.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.0 // indirect
|
||||
github.com/prometheus/common v0.64.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569 // indirect
|
||||
go.mongodb.org/mongo-driver v1.17.3 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/otel v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.35.0 // indirect
|
||||
golang.org/x/net v0.39.0 // indirect
|
||||
golang.org/x/sys v0.32.0 // indirect
|
||||
golang.org/x/text v0.24.0 // indirect
|
||||
go.opentelemetry.io/otel v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.36.0 // indirect
|
||||
golang.org/x/net v0.40.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/text v0.25.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
|
|
|||
48
go.sum
48
go.sum
|
|
@ -155,10 +155,10 @@ github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/
|
|||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
|
||||
github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
|
||||
github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM=
|
||||
github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg=
|
||||
github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
|
||||
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
|
|
@ -182,29 +182,29 @@ go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeH
|
|||
go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
|
||||
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
|
||||
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
|
||||
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
|
||||
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
|
||||
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
|
||||
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
|
||||
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
|
||||
go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw=
|
||||
go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg=
|
||||
go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
|
||||
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
|
||||
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
|
||||
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
|
||||
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
|
||||
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
|
||||
golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98=
|
||||
golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
|
||||
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
|
||||
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
|
||||
golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
|
||||
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
|
||||
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
|
||||
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
|
||||
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
|
||||
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
|
||||
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
|
||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
|
||||
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
|
|
@ -231,5 +231,5 @@ gorm.io/driver/sqlite v1.5.7/go.mod h1:U+J8craQU6Fzkcvu8oLeAQmi50TkwPEhHDEjQZXDa
|
|||
gorm.io/driver/sqlserver v1.5.4 h1:xA+Y1KDNspv79q43bPyjDMUgHoYHLhXYmdFcYPobg8g=
|
||||
gorm.io/driver/sqlserver v1.5.4/go.mod h1:+frZ/qYmuna11zHPlh5oc2O6ZA/lS88Keb0XSH1Zh/g=
|
||||
gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
|
||||
gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8=
|
||||
gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ=
|
||||
gorm.io/gorm v1.26.1 h1:ghB2gUI9FkS46luZtn6DLZ0f6ooBJ5IbVej2ENFDjRw=
|
||||
gorm.io/gorm v1.26.1/go.mod h1:8Z33v652h4//uMA76KjeDH8mJXPm1QNCYrMeatR0DOE=
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Code generated by mockery v2.42.0. DO NOT EDIT.
|
||||
// Code generated by mockery v2.53.3. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Code generated by mockery v2.42.0. DO NOT EDIT.
|
||||
// Code generated by mockery v2.53.3. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Code generated by mockery v2.42.0. DO NOT EDIT.
|
||||
// Code generated by mockery v2.53.3. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
|
@ -60,7 +60,7 @@ func (_m *PoolManager) GetWebhookInfo(ctx context.Context) (params.HookInfo, err
|
|||
return r0, r1
|
||||
}
|
||||
|
||||
// GithubRunnerRegistrationToken provides a mock function with given fields:
|
||||
// GithubRunnerRegistrationToken provides a mock function with no fields
|
||||
func (_m *PoolManager) GithubRunnerRegistrationToken() (string, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
|
|
@ -106,7 +106,7 @@ func (_m *PoolManager) HandleWorkflowJob(job params.WorkflowJob) error {
|
|||
return r0
|
||||
}
|
||||
|
||||
// ID provides a mock function with given fields:
|
||||
// ID provides a mock function with no fields
|
||||
func (_m *PoolManager) ID() string {
|
||||
ret := _m.Called()
|
||||
|
||||
|
|
@ -152,7 +152,7 @@ func (_m *PoolManager) InstallWebhook(ctx context.Context, param params.InstallW
|
|||
return r0, r1
|
||||
}
|
||||
|
||||
// RootCABundle provides a mock function with given fields:
|
||||
// RootCABundle provides a mock function with no fields
|
||||
func (_m *PoolManager) RootCABundle() (params.CertificateBundle, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
|
|
@ -180,7 +180,7 @@ func (_m *PoolManager) RootCABundle() (params.CertificateBundle, error) {
|
|||
return r0, r1
|
||||
}
|
||||
|
||||
// Start provides a mock function with given fields:
|
||||
// Start provides a mock function with no fields
|
||||
func (_m *PoolManager) Start() error {
|
||||
ret := _m.Called()
|
||||
|
||||
|
|
@ -198,7 +198,7 @@ func (_m *PoolManager) Start() error {
|
|||
return r0
|
||||
}
|
||||
|
||||
// Status provides a mock function with given fields:
|
||||
// Status provides a mock function with no fields
|
||||
func (_m *PoolManager) Status() params.PoolManagerStatus {
|
||||
ret := _m.Called()
|
||||
|
||||
|
|
@ -216,7 +216,7 @@ func (_m *PoolManager) Status() params.PoolManagerStatus {
|
|||
return r0
|
||||
}
|
||||
|
||||
// Stop provides a mock function with given fields:
|
||||
// Stop provides a mock function with no fields
|
||||
func (_m *PoolManager) Stop() error {
|
||||
ret := _m.Called()
|
||||
|
||||
|
|
@ -252,7 +252,7 @@ func (_m *PoolManager) UninstallWebhook(ctx context.Context) error {
|
|||
return r0
|
||||
}
|
||||
|
||||
// Wait provides a mock function with given fields:
|
||||
// Wait provides a mock function with no fields
|
||||
func (_m *PoolManager) Wait() error {
|
||||
ret := _m.Called()
|
||||
|
||||
|
|
@ -270,7 +270,7 @@ func (_m *PoolManager) Wait() error {
|
|||
return r0
|
||||
}
|
||||
|
||||
// WebhookSecret provides a mock function with given fields:
|
||||
// WebhookSecret provides a mock function with no fields
|
||||
func (_m *PoolManager) WebhookSecret() string {
|
||||
ret := _m.Called()
|
||||
|
||||
|
|
|
|||
|
|
@ -1,15 +1,17 @@
|
|||
// Code generated by mockery v2.42.0. DO NOT EDIT.
|
||||
// Code generated by mockery v2.53.3. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
common "github.com/cloudbase/garm/runner/common"
|
||||
|
||||
garm_provider_commonparams "github.com/cloudbase/garm-provider-common/params"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
params "github.com/cloudbase/garm/params"
|
||||
"github.com/cloudbase/garm/runner/common"
|
||||
)
|
||||
|
||||
// Provider is an autogenerated mock type for the Provider type
|
||||
|
|
@ -17,7 +19,7 @@ type Provider struct {
|
|||
mock.Mock
|
||||
}
|
||||
|
||||
// AsParams provides a mock function with given fields:
|
||||
// AsParams provides a mock function with no fields
|
||||
func (_m *Provider) AsParams() params.Provider {
|
||||
ret := _m.Called()
|
||||
|
||||
|
|
@ -35,9 +37,9 @@ func (_m *Provider) AsParams() params.Provider {
|
|||
return r0
|
||||
}
|
||||
|
||||
// CreateInstance provides a mock function with given fields: ctx, bootstrapParams
|
||||
// CreateInstance provides a mock function with given fields: ctx, bootstrapParams, createInstanceParams
|
||||
func (_m *Provider) CreateInstance(ctx context.Context, bootstrapParams garm_provider_commonparams.BootstrapInstance, createInstanceParams common.CreateInstanceParams) (garm_provider_commonparams.ProviderInstance, error) {
|
||||
ret := _m.Called(ctx, bootstrapParams)
|
||||
ret := _m.Called(ctx, bootstrapParams, createInstanceParams)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for CreateInstance")
|
||||
|
|
@ -45,17 +47,17 @@ func (_m *Provider) CreateInstance(ctx context.Context, bootstrapParams garm_pro
|
|||
|
||||
var r0 garm_provider_commonparams.ProviderInstance
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, garm_provider_commonparams.BootstrapInstance) (garm_provider_commonparams.ProviderInstance, error)); ok {
|
||||
return rf(ctx, bootstrapParams)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, garm_provider_commonparams.BootstrapInstance, common.CreateInstanceParams) (garm_provider_commonparams.ProviderInstance, error)); ok {
|
||||
return rf(ctx, bootstrapParams, createInstanceParams)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, garm_provider_commonparams.BootstrapInstance) garm_provider_commonparams.ProviderInstance); ok {
|
||||
r0 = rf(ctx, bootstrapParams)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, garm_provider_commonparams.BootstrapInstance, common.CreateInstanceParams) garm_provider_commonparams.ProviderInstance); ok {
|
||||
r0 = rf(ctx, bootstrapParams, createInstanceParams)
|
||||
} else {
|
||||
r0 = ret.Get(0).(garm_provider_commonparams.ProviderInstance)
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, garm_provider_commonparams.BootstrapInstance) error); ok {
|
||||
r1 = rf(ctx, bootstrapParams)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, garm_provider_commonparams.BootstrapInstance, common.CreateInstanceParams) error); ok {
|
||||
r1 = rf(ctx, bootstrapParams, createInstanceParams)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
|
@ -63,17 +65,17 @@ func (_m *Provider) CreateInstance(ctx context.Context, bootstrapParams garm_pro
|
|||
return r0, r1
|
||||
}
|
||||
|
||||
// DeleteInstance provides a mock function with given fields: ctx, instance
|
||||
// DeleteInstance provides a mock function with given fields: ctx, instance, deleteInstanceParams
|
||||
func (_m *Provider) DeleteInstance(ctx context.Context, instance string, deleteInstanceParams common.DeleteInstanceParams) error {
|
||||
ret := _m.Called(ctx, instance)
|
||||
ret := _m.Called(ctx, instance, deleteInstanceParams)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DeleteInstance")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
|
||||
r0 = rf(ctx, instance)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, common.DeleteInstanceParams) error); ok {
|
||||
r0 = rf(ctx, instance, deleteInstanceParams)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
|
@ -81,7 +83,7 @@ func (_m *Provider) DeleteInstance(ctx context.Context, instance string, deleteI
|
|||
return r0
|
||||
}
|
||||
|
||||
// DisableJITConfig provides a mock function with given fields:
|
||||
// DisableJITConfig provides a mock function with no fields
|
||||
func (_m *Provider) DisableJITConfig() bool {
|
||||
ret := _m.Called()
|
||||
|
||||
|
|
@ -99,9 +101,9 @@ func (_m *Provider) DisableJITConfig() bool {
|
|||
return r0
|
||||
}
|
||||
|
||||
// GetInstance provides a mock function with given fields: ctx, instance
|
||||
// GetInstance provides a mock function with given fields: ctx, instance, getInstanceParams
|
||||
func (_m *Provider) GetInstance(ctx context.Context, instance string, getInstanceParams common.GetInstanceParams) (garm_provider_commonparams.ProviderInstance, error) {
|
||||
ret := _m.Called(ctx, instance)
|
||||
ret := _m.Called(ctx, instance, getInstanceParams)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetInstance")
|
||||
|
|
@ -109,17 +111,17 @@ func (_m *Provider) GetInstance(ctx context.Context, instance string, getInstanc
|
|||
|
||||
var r0 garm_provider_commonparams.ProviderInstance
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) (garm_provider_commonparams.ProviderInstance, error)); ok {
|
||||
return rf(ctx, instance)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, common.GetInstanceParams) (garm_provider_commonparams.ProviderInstance, error)); ok {
|
||||
return rf(ctx, instance, getInstanceParams)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) garm_provider_commonparams.ProviderInstance); ok {
|
||||
r0 = rf(ctx, instance)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, common.GetInstanceParams) garm_provider_commonparams.ProviderInstance); ok {
|
||||
r0 = rf(ctx, instance, getInstanceParams)
|
||||
} else {
|
||||
r0 = ret.Get(0).(garm_provider_commonparams.ProviderInstance)
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
|
||||
r1 = rf(ctx, instance)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string, common.GetInstanceParams) error); ok {
|
||||
r1 = rf(ctx, instance, getInstanceParams)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
|
@ -127,9 +129,9 @@ func (_m *Provider) GetInstance(ctx context.Context, instance string, getInstanc
|
|||
return r0, r1
|
||||
}
|
||||
|
||||
// ListInstances provides a mock function with given fields: ctx, poolID
|
||||
// ListInstances provides a mock function with given fields: ctx, poolID, listInstancesParams
|
||||
func (_m *Provider) ListInstances(ctx context.Context, poolID string, listInstancesParams common.ListInstancesParams) ([]garm_provider_commonparams.ProviderInstance, error) {
|
||||
ret := _m.Called(ctx, poolID)
|
||||
ret := _m.Called(ctx, poolID, listInstancesParams)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for ListInstances")
|
||||
|
|
@ -137,19 +139,19 @@ func (_m *Provider) ListInstances(ctx context.Context, poolID string, listInstan
|
|||
|
||||
var r0 []garm_provider_commonparams.ProviderInstance
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) ([]garm_provider_commonparams.ProviderInstance, error)); ok {
|
||||
return rf(ctx, poolID)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, common.ListInstancesParams) ([]garm_provider_commonparams.ProviderInstance, error)); ok {
|
||||
return rf(ctx, poolID, listInstancesParams)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) []garm_provider_commonparams.ProviderInstance); ok {
|
||||
r0 = rf(ctx, poolID)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, common.ListInstancesParams) []garm_provider_commonparams.ProviderInstance); ok {
|
||||
r0 = rf(ctx, poolID, listInstancesParams)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]garm_provider_commonparams.ProviderInstance)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
|
||||
r1 = rf(ctx, poolID)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string, common.ListInstancesParams) error); ok {
|
||||
r1 = rf(ctx, poolID, listInstancesParams)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
|
@ -157,17 +159,17 @@ func (_m *Provider) ListInstances(ctx context.Context, poolID string, listInstan
|
|||
return r0, r1
|
||||
}
|
||||
|
||||
// RemoveAllInstances provides a mock function with given fields: ctx
|
||||
func (_m *Provider) RemoveAllInstances(ctx context.Context, removeAllInstances common.RemoveAllInstancesParams) error {
|
||||
ret := _m.Called(ctx)
|
||||
// RemoveAllInstances provides a mock function with given fields: ctx, removeAllInstancesParams
|
||||
func (_m *Provider) RemoveAllInstances(ctx context.Context, removeAllInstancesParams common.RemoveAllInstancesParams) error {
|
||||
ret := _m.Called(ctx, removeAllInstancesParams)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for RemoveAllInstances")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
|
||||
r0 = rf(ctx)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, common.RemoveAllInstancesParams) error); ok {
|
||||
r0 = rf(ctx, removeAllInstancesParams)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
|
@ -175,17 +177,17 @@ func (_m *Provider) RemoveAllInstances(ctx context.Context, removeAllInstances c
|
|||
return r0
|
||||
}
|
||||
|
||||
// Start provides a mock function with given fields: ctx, instance
|
||||
// Start provides a mock function with given fields: ctx, instance, startParams
|
||||
func (_m *Provider) Start(ctx context.Context, instance string, startParams common.StartParams) error {
|
||||
ret := _m.Called(ctx, instance)
|
||||
ret := _m.Called(ctx, instance, startParams)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Start")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
|
||||
r0 = rf(ctx, instance)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, common.StartParams) error); ok {
|
||||
r0 = rf(ctx, instance, startParams)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
|
@ -193,17 +195,17 @@ func (_m *Provider) Start(ctx context.Context, instance string, startParams comm
|
|||
return r0
|
||||
}
|
||||
|
||||
// Stop provides a mock function with given fields: ctx, instance
|
||||
// Stop provides a mock function with given fields: ctx, instance, stopParams
|
||||
func (_m *Provider) Stop(ctx context.Context, instance string, stopParams common.StopParams) error {
|
||||
ret := _m.Called(ctx, instance)
|
||||
ret := _m.Called(ctx, instance, stopParams)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Stop")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
|
||||
r0 = rf(ctx, instance)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, common.StopParams) error); ok {
|
||||
r0 = rf(ctx, instance, stopParams)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Code generated by mockery v2.42.0. DO NOT EDIT.
|
||||
// Code generated by mockery v2.53.3. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
|
@ -193,7 +193,7 @@ func (_m *PoolManagerController) GetEnterprisePoolManager(enterprise params.Ente
|
|||
return r0, r1
|
||||
}
|
||||
|
||||
// GetEnterprisePoolManagers provides a mock function with given fields:
|
||||
// GetEnterprisePoolManagers provides a mock function with no fields
|
||||
func (_m *PoolManagerController) GetEnterprisePoolManagers() (map[string]common.PoolManager, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
|
|
@ -253,7 +253,7 @@ func (_m *PoolManagerController) GetOrgPoolManager(org params.Organization) (com
|
|||
return r0, r1
|
||||
}
|
||||
|
||||
// GetOrgPoolManagers provides a mock function with given fields:
|
||||
// GetOrgPoolManagers provides a mock function with no fields
|
||||
func (_m *PoolManagerController) GetOrgPoolManagers() (map[string]common.PoolManager, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
|
|
@ -313,7 +313,7 @@ func (_m *PoolManagerController) GetRepoPoolManager(repo params.Repository) (com
|
|||
return r0, r1
|
||||
}
|
||||
|
||||
// GetRepoPoolManagers provides a mock function with given fields:
|
||||
// GetRepoPoolManagers provides a mock function with no fields
|
||||
func (_m *PoolManagerController) GetRepoPoolManagers() (map[string]common.PoolManager, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
|
|
|
|||
4
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
4
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
|
|
@ -345,8 +345,8 @@ func (p *TextParser) startLabelName() stateFn {
|
|||
}
|
||||
// Special summary/histogram treatment. Don't add 'quantile' and 'le'
|
||||
// labels to 'real' labels.
|
||||
if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
|
||||
!(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
|
||||
if (p.currentMF.GetType() != dto.MetricType_SUMMARY || p.currentLabelPair.GetName() != model.QuantileLabel) &&
|
||||
(p.currentMF.GetType() != dto.MetricType_HISTOGRAM || p.currentLabelPair.GetName() != model.BucketLabel) {
|
||||
p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair)
|
||||
}
|
||||
// Check for duplicate label names.
|
||||
|
|
|
|||
3
vendor/github.com/prometheus/common/model/labels.go
generated
vendored
3
vendor/github.com/prometheus/common/model/labels.go
generated
vendored
|
|
@ -122,7 +122,8 @@ func (ln LabelName) IsValidLegacy() bool {
|
|||
return false
|
||||
}
|
||||
for i, b := range ln {
|
||||
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
|
||||
// TODO: Apply De Morgan's law. Make sure there are tests for this.
|
||||
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
|
|||
72
vendor/github.com/prometheus/procfs/.golangci.yml
generated
vendored
72
vendor/github.com/prometheus/procfs/.golangci.yml
generated
vendored
|
|
@ -1,31 +1,45 @@
|
|||
---
|
||||
version: "2"
|
||||
linters:
|
||||
enable:
|
||||
- errcheck
|
||||
- forbidigo
|
||||
- godot
|
||||
- gofmt
|
||||
- goimports
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- revive
|
||||
- staticcheck
|
||||
- testifylint
|
||||
- unused
|
||||
|
||||
linters-settings:
|
||||
forbidigo:
|
||||
forbid:
|
||||
- p: ^fmt\.Print.*$
|
||||
msg: Do not commit print statements.
|
||||
godot:
|
||||
capital: true
|
||||
exclude:
|
||||
# Ignore "See: URL"
|
||||
- 'See:'
|
||||
goimports:
|
||||
local-prefixes: github.com/prometheus/procfs
|
||||
misspell:
|
||||
locale: US
|
||||
- forbidigo
|
||||
- godot
|
||||
- misspell
|
||||
- revive
|
||||
- testifylint
|
||||
settings:
|
||||
forbidigo:
|
||||
forbid:
|
||||
- pattern: ^fmt\.Print.*$
|
||||
msg: Do not commit print statements.
|
||||
godot:
|
||||
exclude:
|
||||
# Ignore "See: URL".
|
||||
- 'See:'
|
||||
capital: true
|
||||
misspell:
|
||||
locale: US
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
settings:
|
||||
goimports:
|
||||
local-prefixes:
|
||||
- github.com/prometheus/procfs
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
|
|
|||
4
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
4
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
|
|
@ -33,7 +33,7 @@ GOHOSTOS ?= $(shell $(GO) env GOHOSTOS)
|
|||
GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH)
|
||||
|
||||
GO_VERSION ?= $(shell $(GO) version)
|
||||
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
|
||||
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))Error Parsing File
|
||||
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
|
||||
|
||||
PROMU := $(FIRST_GOPATH)/bin/promu
|
||||
|
|
@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
|||
SKIP_GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v1.60.2
|
||||
GOLANGCI_LINT_VERSION ?= v2.0.2
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||
|
|
|
|||
20
vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
generated
vendored
20
vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
generated
vendored
|
|
@ -20,6 +20,8 @@ package util
|
|||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
|
|
@ -48,3 +50,21 @@ func SysReadFile(file string) (string, error) {
|
|||
|
||||
return string(bytes.TrimSpace(b[:n])), nil
|
||||
}
|
||||
|
||||
// SysReadUintFromFile reads a file using SysReadFile and attempts to parse a uint64 from it.
|
||||
func SysReadUintFromFile(path string) (uint64, error) {
|
||||
data, err := SysReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
|
||||
}
|
||||
|
||||
// SysReadIntFromFile reads a file using SysReadFile and attempts to parse a int64 from it.
|
||||
func SysReadIntFromFile(path string) (int64, error) {
|
||||
data, err := SysReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
|
||||
}
|
||||
|
|
|
|||
23
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
23
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
|
|
@ -601,11 +601,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
|
|||
switch statVersion {
|
||||
case statVersion10:
|
||||
var expectedLength int
|
||||
if protocol == "tcp" {
|
||||
switch protocol {
|
||||
case "tcp":
|
||||
expectedLength = fieldTransport10TCPLen
|
||||
} else if protocol == "udp" {
|
||||
case "udp":
|
||||
expectedLength = fieldTransport10UDPLen
|
||||
} else {
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss)
|
||||
}
|
||||
if len(ss) != expectedLength {
|
||||
|
|
@ -613,13 +614,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
|
|||
}
|
||||
case statVersion11:
|
||||
var expectedLength int
|
||||
if protocol == "tcp" {
|
||||
switch protocol {
|
||||
case "tcp":
|
||||
expectedLength = fieldTransport11TCPLen
|
||||
} else if protocol == "udp" {
|
||||
case "udp":
|
||||
expectedLength = fieldTransport11UDPLen
|
||||
} else if protocol == "rdma" {
|
||||
case "rdma":
|
||||
expectedLength = fieldTransport11RDMAMinLen
|
||||
} else {
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss)
|
||||
}
|
||||
if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) ||
|
||||
|
|
@ -655,11 +657,12 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
|
|||
// For the udp RPC transport there is no connection count, connect idle time,
|
||||
// or idle time (fields #3, #4, and #5); all other fields are the same. So
|
||||
// we set them to 0 here.
|
||||
if protocol == "udp" {
|
||||
switch protocol {
|
||||
case "udp":
|
||||
ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
|
||||
} else if protocol == "tcp" {
|
||||
case "tcp":
|
||||
ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...)
|
||||
} else if protocol == "rdma" {
|
||||
case "rdma":
|
||||
ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...)
|
||||
}
|
||||
|
||||
|
|
|
|||
21
vendor/github.com/prometheus/procfs/net_protocols.go
generated
vendored
21
vendor/github.com/prometheus/procfs/net_protocols.go
generated
vendored
|
|
@ -115,22 +115,24 @@ func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, erro
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fields[4] == enabled {
|
||||
switch fields[4] {
|
||||
case enabled:
|
||||
line.Pressure = 1
|
||||
} else if fields[4] == disabled {
|
||||
case disabled:
|
||||
line.Pressure = 0
|
||||
} else {
|
||||
default:
|
||||
line.Pressure = -1
|
||||
}
|
||||
line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fields[6] == enabled {
|
||||
switch fields[6] {
|
||||
case enabled:
|
||||
line.Slab = true
|
||||
} else if fields[6] == disabled {
|
||||
case disabled:
|
||||
line.Slab = false
|
||||
} else {
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name)
|
||||
}
|
||||
line.ModuleName = fields[7]
|
||||
|
|
@ -168,11 +170,12 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro
|
|||
}
|
||||
|
||||
for i := 0; i < len(capabilities); i++ {
|
||||
if capabilities[i] == "y" {
|
||||
switch capabilities[i] {
|
||||
case "y":
|
||||
*capabilityFields[i] = true
|
||||
} else if capabilities[i] == "n" {
|
||||
case "n":
|
||||
*capabilityFields[i] = false
|
||||
} else {
|
||||
default:
|
||||
return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
8
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
8
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
|
|
@ -37,9 +37,9 @@ type Proc struct {
|
|||
type Procs []Proc
|
||||
|
||||
var (
|
||||
ErrFileParse = errors.New("Error Parsing File")
|
||||
ErrFileRead = errors.New("Error Reading File")
|
||||
ErrMountPoint = errors.New("Error Accessing Mount point")
|
||||
ErrFileParse = errors.New("error parsing file")
|
||||
ErrFileRead = errors.New("error reading file")
|
||||
ErrMountPoint = errors.New("error accessing mount point")
|
||||
)
|
||||
|
||||
func (p Procs) Len() int { return len(p) }
|
||||
|
|
@ -79,7 +79,7 @@ func (fs FS) Self() (Proc, error) {
|
|||
if err != nil {
|
||||
return Proc{}, err
|
||||
}
|
||||
pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1))
|
||||
pid, err := strconv.Atoi(strings.ReplaceAll(p, string(fs.proc), ""))
|
||||
if err != nil {
|
||||
return Proc{}, err
|
||||
}
|
||||
|
|
|
|||
224
vendor/github.com/prometheus/procfs/proc_netstat.go
generated
vendored
224
vendor/github.com/prometheus/procfs/proc_netstat.go
generated
vendored
|
|
@ -209,232 +209,232 @@ func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
|
|||
case "TcpExt":
|
||||
switch key {
|
||||
case "SyncookiesSent":
|
||||
procNetstat.TcpExt.SyncookiesSent = &value
|
||||
procNetstat.SyncookiesSent = &value
|
||||
case "SyncookiesRecv":
|
||||
procNetstat.TcpExt.SyncookiesRecv = &value
|
||||
procNetstat.SyncookiesRecv = &value
|
||||
case "SyncookiesFailed":
|
||||
procNetstat.TcpExt.SyncookiesFailed = &value
|
||||
procNetstat.SyncookiesFailed = &value
|
||||
case "EmbryonicRsts":
|
||||
procNetstat.TcpExt.EmbryonicRsts = &value
|
||||
procNetstat.EmbryonicRsts = &value
|
||||
case "PruneCalled":
|
||||
procNetstat.TcpExt.PruneCalled = &value
|
||||
procNetstat.PruneCalled = &value
|
||||
case "RcvPruned":
|
||||
procNetstat.TcpExt.RcvPruned = &value
|
||||
procNetstat.RcvPruned = &value
|
||||
case "OfoPruned":
|
||||
procNetstat.TcpExt.OfoPruned = &value
|
||||
procNetstat.OfoPruned = &value
|
||||
case "OutOfWindowIcmps":
|
||||
procNetstat.TcpExt.OutOfWindowIcmps = &value
|
||||
procNetstat.OutOfWindowIcmps = &value
|
||||
case "LockDroppedIcmps":
|
||||
procNetstat.TcpExt.LockDroppedIcmps = &value
|
||||
procNetstat.LockDroppedIcmps = &value
|
||||
case "ArpFilter":
|
||||
procNetstat.TcpExt.ArpFilter = &value
|
||||
procNetstat.ArpFilter = &value
|
||||
case "TW":
|
||||
procNetstat.TcpExt.TW = &value
|
||||
procNetstat.TW = &value
|
||||
case "TWRecycled":
|
||||
procNetstat.TcpExt.TWRecycled = &value
|
||||
procNetstat.TWRecycled = &value
|
||||
case "TWKilled":
|
||||
procNetstat.TcpExt.TWKilled = &value
|
||||
procNetstat.TWKilled = &value
|
||||
case "PAWSActive":
|
||||
procNetstat.TcpExt.PAWSActive = &value
|
||||
procNetstat.PAWSActive = &value
|
||||
case "PAWSEstab":
|
||||
procNetstat.TcpExt.PAWSEstab = &value
|
||||
procNetstat.PAWSEstab = &value
|
||||
case "DelayedACKs":
|
||||
procNetstat.TcpExt.DelayedACKs = &value
|
||||
procNetstat.DelayedACKs = &value
|
||||
case "DelayedACKLocked":
|
||||
procNetstat.TcpExt.DelayedACKLocked = &value
|
||||
procNetstat.DelayedACKLocked = &value
|
||||
case "DelayedACKLost":
|
||||
procNetstat.TcpExt.DelayedACKLost = &value
|
||||
procNetstat.DelayedACKLost = &value
|
||||
case "ListenOverflows":
|
||||
procNetstat.TcpExt.ListenOverflows = &value
|
||||
procNetstat.ListenOverflows = &value
|
||||
case "ListenDrops":
|
||||
procNetstat.TcpExt.ListenDrops = &value
|
||||
procNetstat.ListenDrops = &value
|
||||
case "TCPHPHits":
|
||||
procNetstat.TcpExt.TCPHPHits = &value
|
||||
procNetstat.TCPHPHits = &value
|
||||
case "TCPPureAcks":
|
||||
procNetstat.TcpExt.TCPPureAcks = &value
|
||||
procNetstat.TCPPureAcks = &value
|
||||
case "TCPHPAcks":
|
||||
procNetstat.TcpExt.TCPHPAcks = &value
|
||||
procNetstat.TCPHPAcks = &value
|
||||
case "TCPRenoRecovery":
|
||||
procNetstat.TcpExt.TCPRenoRecovery = &value
|
||||
procNetstat.TCPRenoRecovery = &value
|
||||
case "TCPSackRecovery":
|
||||
procNetstat.TcpExt.TCPSackRecovery = &value
|
||||
procNetstat.TCPSackRecovery = &value
|
||||
case "TCPSACKReneging":
|
||||
procNetstat.TcpExt.TCPSACKReneging = &value
|
||||
procNetstat.TCPSACKReneging = &value
|
||||
case "TCPSACKReorder":
|
||||
procNetstat.TcpExt.TCPSACKReorder = &value
|
||||
procNetstat.TCPSACKReorder = &value
|
||||
case "TCPRenoReorder":
|
||||
procNetstat.TcpExt.TCPRenoReorder = &value
|
||||
procNetstat.TCPRenoReorder = &value
|
||||
case "TCPTSReorder":
|
||||
procNetstat.TcpExt.TCPTSReorder = &value
|
||||
procNetstat.TCPTSReorder = &value
|
||||
case "TCPFullUndo":
|
||||
procNetstat.TcpExt.TCPFullUndo = &value
|
||||
procNetstat.TCPFullUndo = &value
|
||||
case "TCPPartialUndo":
|
||||
procNetstat.TcpExt.TCPPartialUndo = &value
|
||||
procNetstat.TCPPartialUndo = &value
|
||||
case "TCPDSACKUndo":
|
||||
procNetstat.TcpExt.TCPDSACKUndo = &value
|
||||
procNetstat.TCPDSACKUndo = &value
|
||||
case "TCPLossUndo":
|
||||
procNetstat.TcpExt.TCPLossUndo = &value
|
||||
procNetstat.TCPLossUndo = &value
|
||||
case "TCPLostRetransmit":
|
||||
procNetstat.TcpExt.TCPLostRetransmit = &value
|
||||
procNetstat.TCPLostRetransmit = &value
|
||||
case "TCPRenoFailures":
|
||||
procNetstat.TcpExt.TCPRenoFailures = &value
|
||||
procNetstat.TCPRenoFailures = &value
|
||||
case "TCPSackFailures":
|
||||
procNetstat.TcpExt.TCPSackFailures = &value
|
||||
procNetstat.TCPSackFailures = &value
|
||||
case "TCPLossFailures":
|
||||
procNetstat.TcpExt.TCPLossFailures = &value
|
||||
procNetstat.TCPLossFailures = &value
|
||||
case "TCPFastRetrans":
|
||||
procNetstat.TcpExt.TCPFastRetrans = &value
|
||||
procNetstat.TCPFastRetrans = &value
|
||||
case "TCPSlowStartRetrans":
|
||||
procNetstat.TcpExt.TCPSlowStartRetrans = &value
|
||||
procNetstat.TCPSlowStartRetrans = &value
|
||||
case "TCPTimeouts":
|
||||
procNetstat.TcpExt.TCPTimeouts = &value
|
||||
procNetstat.TCPTimeouts = &value
|
||||
case "TCPLossProbes":
|
||||
procNetstat.TcpExt.TCPLossProbes = &value
|
||||
procNetstat.TCPLossProbes = &value
|
||||
case "TCPLossProbeRecovery":
|
||||
procNetstat.TcpExt.TCPLossProbeRecovery = &value
|
||||
procNetstat.TCPLossProbeRecovery = &value
|
||||
case "TCPRenoRecoveryFail":
|
||||
procNetstat.TcpExt.TCPRenoRecoveryFail = &value
|
||||
procNetstat.TCPRenoRecoveryFail = &value
|
||||
case "TCPSackRecoveryFail":
|
||||
procNetstat.TcpExt.TCPSackRecoveryFail = &value
|
||||
procNetstat.TCPSackRecoveryFail = &value
|
||||
case "TCPRcvCollapsed":
|
||||
procNetstat.TcpExt.TCPRcvCollapsed = &value
|
||||
procNetstat.TCPRcvCollapsed = &value
|
||||
case "TCPDSACKOldSent":
|
||||
procNetstat.TcpExt.TCPDSACKOldSent = &value
|
||||
procNetstat.TCPDSACKOldSent = &value
|
||||
case "TCPDSACKOfoSent":
|
||||
procNetstat.TcpExt.TCPDSACKOfoSent = &value
|
||||
procNetstat.TCPDSACKOfoSent = &value
|
||||
case "TCPDSACKRecv":
|
||||
procNetstat.TcpExt.TCPDSACKRecv = &value
|
||||
procNetstat.TCPDSACKRecv = &value
|
||||
case "TCPDSACKOfoRecv":
|
||||
procNetstat.TcpExt.TCPDSACKOfoRecv = &value
|
||||
procNetstat.TCPDSACKOfoRecv = &value
|
||||
case "TCPAbortOnData":
|
||||
procNetstat.TcpExt.TCPAbortOnData = &value
|
||||
procNetstat.TCPAbortOnData = &value
|
||||
case "TCPAbortOnClose":
|
||||
procNetstat.TcpExt.TCPAbortOnClose = &value
|
||||
procNetstat.TCPAbortOnClose = &value
|
||||
case "TCPDeferAcceptDrop":
|
||||
procNetstat.TcpExt.TCPDeferAcceptDrop = &value
|
||||
procNetstat.TCPDeferAcceptDrop = &value
|
||||
case "IPReversePathFilter":
|
||||
procNetstat.TcpExt.IPReversePathFilter = &value
|
||||
procNetstat.IPReversePathFilter = &value
|
||||
case "TCPTimeWaitOverflow":
|
||||
procNetstat.TcpExt.TCPTimeWaitOverflow = &value
|
||||
procNetstat.TCPTimeWaitOverflow = &value
|
||||
case "TCPReqQFullDoCookies":
|
||||
procNetstat.TcpExt.TCPReqQFullDoCookies = &value
|
||||
procNetstat.TCPReqQFullDoCookies = &value
|
||||
case "TCPReqQFullDrop":
|
||||
procNetstat.TcpExt.TCPReqQFullDrop = &value
|
||||
procNetstat.TCPReqQFullDrop = &value
|
||||
case "TCPRetransFail":
|
||||
procNetstat.TcpExt.TCPRetransFail = &value
|
||||
procNetstat.TCPRetransFail = &value
|
||||
case "TCPRcvCoalesce":
|
||||
procNetstat.TcpExt.TCPRcvCoalesce = &value
|
||||
procNetstat.TCPRcvCoalesce = &value
|
||||
case "TCPRcvQDrop":
|
||||
procNetstat.TcpExt.TCPRcvQDrop = &value
|
||||
procNetstat.TCPRcvQDrop = &value
|
||||
case "TCPOFOQueue":
|
||||
procNetstat.TcpExt.TCPOFOQueue = &value
|
||||
procNetstat.TCPOFOQueue = &value
|
||||
case "TCPOFODrop":
|
||||
procNetstat.TcpExt.TCPOFODrop = &value
|
||||
procNetstat.TCPOFODrop = &value
|
||||
case "TCPOFOMerge":
|
||||
procNetstat.TcpExt.TCPOFOMerge = &value
|
||||
procNetstat.TCPOFOMerge = &value
|
||||
case "TCPChallengeACK":
|
||||
procNetstat.TcpExt.TCPChallengeACK = &value
|
||||
procNetstat.TCPChallengeACK = &value
|
||||
case "TCPSYNChallenge":
|
||||
procNetstat.TcpExt.TCPSYNChallenge = &value
|
||||
procNetstat.TCPSYNChallenge = &value
|
||||
case "TCPFastOpenActive":
|
||||
procNetstat.TcpExt.TCPFastOpenActive = &value
|
||||
procNetstat.TCPFastOpenActive = &value
|
||||
case "TCPFastOpenActiveFail":
|
||||
procNetstat.TcpExt.TCPFastOpenActiveFail = &value
|
||||
procNetstat.TCPFastOpenActiveFail = &value
|
||||
case "TCPFastOpenPassive":
|
||||
procNetstat.TcpExt.TCPFastOpenPassive = &value
|
||||
procNetstat.TCPFastOpenPassive = &value
|
||||
case "TCPFastOpenPassiveFail":
|
||||
procNetstat.TcpExt.TCPFastOpenPassiveFail = &value
|
||||
procNetstat.TCPFastOpenPassiveFail = &value
|
||||
case "TCPFastOpenListenOverflow":
|
||||
procNetstat.TcpExt.TCPFastOpenListenOverflow = &value
|
||||
procNetstat.TCPFastOpenListenOverflow = &value
|
||||
case "TCPFastOpenCookieReqd":
|
||||
procNetstat.TcpExt.TCPFastOpenCookieReqd = &value
|
||||
procNetstat.TCPFastOpenCookieReqd = &value
|
||||
case "TCPFastOpenBlackhole":
|
||||
procNetstat.TcpExt.TCPFastOpenBlackhole = &value
|
||||
procNetstat.TCPFastOpenBlackhole = &value
|
||||
case "TCPSpuriousRtxHostQueues":
|
||||
procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value
|
||||
procNetstat.TCPSpuriousRtxHostQueues = &value
|
||||
case "BusyPollRxPackets":
|
||||
procNetstat.TcpExt.BusyPollRxPackets = &value
|
||||
procNetstat.BusyPollRxPackets = &value
|
||||
case "TCPAutoCorking":
|
||||
procNetstat.TcpExt.TCPAutoCorking = &value
|
||||
procNetstat.TCPAutoCorking = &value
|
||||
case "TCPFromZeroWindowAdv":
|
||||
procNetstat.TcpExt.TCPFromZeroWindowAdv = &value
|
||||
procNetstat.TCPFromZeroWindowAdv = &value
|
||||
case "TCPToZeroWindowAdv":
|
||||
procNetstat.TcpExt.TCPToZeroWindowAdv = &value
|
||||
procNetstat.TCPToZeroWindowAdv = &value
|
||||
case "TCPWantZeroWindowAdv":
|
||||
procNetstat.TcpExt.TCPWantZeroWindowAdv = &value
|
||||
procNetstat.TCPWantZeroWindowAdv = &value
|
||||
case "TCPSynRetrans":
|
||||
procNetstat.TcpExt.TCPSynRetrans = &value
|
||||
procNetstat.TCPSynRetrans = &value
|
||||
case "TCPOrigDataSent":
|
||||
procNetstat.TcpExt.TCPOrigDataSent = &value
|
||||
procNetstat.TCPOrigDataSent = &value
|
||||
case "TCPHystartTrainDetect":
|
||||
procNetstat.TcpExt.TCPHystartTrainDetect = &value
|
||||
procNetstat.TCPHystartTrainDetect = &value
|
||||
case "TCPHystartTrainCwnd":
|
||||
procNetstat.TcpExt.TCPHystartTrainCwnd = &value
|
||||
procNetstat.TCPHystartTrainCwnd = &value
|
||||
case "TCPHystartDelayDetect":
|
||||
procNetstat.TcpExt.TCPHystartDelayDetect = &value
|
||||
procNetstat.TCPHystartDelayDetect = &value
|
||||
case "TCPHystartDelayCwnd":
|
||||
procNetstat.TcpExt.TCPHystartDelayCwnd = &value
|
||||
procNetstat.TCPHystartDelayCwnd = &value
|
||||
case "TCPACKSkippedSynRecv":
|
||||
procNetstat.TcpExt.TCPACKSkippedSynRecv = &value
|
||||
procNetstat.TCPACKSkippedSynRecv = &value
|
||||
case "TCPACKSkippedPAWS":
|
||||
procNetstat.TcpExt.TCPACKSkippedPAWS = &value
|
||||
procNetstat.TCPACKSkippedPAWS = &value
|
||||
case "TCPACKSkippedSeq":
|
||||
procNetstat.TcpExt.TCPACKSkippedSeq = &value
|
||||
procNetstat.TCPACKSkippedSeq = &value
|
||||
case "TCPACKSkippedFinWait2":
|
||||
procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value
|
||||
procNetstat.TCPACKSkippedFinWait2 = &value
|
||||
case "TCPACKSkippedTimeWait":
|
||||
procNetstat.TcpExt.TCPACKSkippedTimeWait = &value
|
||||
procNetstat.TCPACKSkippedTimeWait = &value
|
||||
case "TCPACKSkippedChallenge":
|
||||
procNetstat.TcpExt.TCPACKSkippedChallenge = &value
|
||||
procNetstat.TCPACKSkippedChallenge = &value
|
||||
case "TCPWinProbe":
|
||||
procNetstat.TcpExt.TCPWinProbe = &value
|
||||
procNetstat.TCPWinProbe = &value
|
||||
case "TCPKeepAlive":
|
||||
procNetstat.TcpExt.TCPKeepAlive = &value
|
||||
procNetstat.TCPKeepAlive = &value
|
||||
case "TCPMTUPFail":
|
||||
procNetstat.TcpExt.TCPMTUPFail = &value
|
||||
procNetstat.TCPMTUPFail = &value
|
||||
case "TCPMTUPSuccess":
|
||||
procNetstat.TcpExt.TCPMTUPSuccess = &value
|
||||
procNetstat.TCPMTUPSuccess = &value
|
||||
case "TCPWqueueTooBig":
|
||||
procNetstat.TcpExt.TCPWqueueTooBig = &value
|
||||
procNetstat.TCPWqueueTooBig = &value
|
||||
}
|
||||
case "IpExt":
|
||||
switch key {
|
||||
case "InNoRoutes":
|
||||
procNetstat.IpExt.InNoRoutes = &value
|
||||
procNetstat.InNoRoutes = &value
|
||||
case "InTruncatedPkts":
|
||||
procNetstat.IpExt.InTruncatedPkts = &value
|
||||
procNetstat.InTruncatedPkts = &value
|
||||
case "InMcastPkts":
|
||||
procNetstat.IpExt.InMcastPkts = &value
|
||||
procNetstat.InMcastPkts = &value
|
||||
case "OutMcastPkts":
|
||||
procNetstat.IpExt.OutMcastPkts = &value
|
||||
procNetstat.OutMcastPkts = &value
|
||||
case "InBcastPkts":
|
||||
procNetstat.IpExt.InBcastPkts = &value
|
||||
procNetstat.InBcastPkts = &value
|
||||
case "OutBcastPkts":
|
||||
procNetstat.IpExt.OutBcastPkts = &value
|
||||
procNetstat.OutBcastPkts = &value
|
||||
case "InOctets":
|
||||
procNetstat.IpExt.InOctets = &value
|
||||
procNetstat.InOctets = &value
|
||||
case "OutOctets":
|
||||
procNetstat.IpExt.OutOctets = &value
|
||||
procNetstat.OutOctets = &value
|
||||
case "InMcastOctets":
|
||||
procNetstat.IpExt.InMcastOctets = &value
|
||||
procNetstat.InMcastOctets = &value
|
||||
case "OutMcastOctets":
|
||||
procNetstat.IpExt.OutMcastOctets = &value
|
||||
procNetstat.OutMcastOctets = &value
|
||||
case "InBcastOctets":
|
||||
procNetstat.IpExt.InBcastOctets = &value
|
||||
procNetstat.InBcastOctets = &value
|
||||
case "OutBcastOctets":
|
||||
procNetstat.IpExt.OutBcastOctets = &value
|
||||
procNetstat.OutBcastOctets = &value
|
||||
case "InCsumErrors":
|
||||
procNetstat.IpExt.InCsumErrors = &value
|
||||
procNetstat.InCsumErrors = &value
|
||||
case "InNoECTPkts":
|
||||
procNetstat.IpExt.InNoECTPkts = &value
|
||||
procNetstat.InNoECTPkts = &value
|
||||
case "InECT1Pkts":
|
||||
procNetstat.IpExt.InECT1Pkts = &value
|
||||
procNetstat.InECT1Pkts = &value
|
||||
case "InECT0Pkts":
|
||||
procNetstat.IpExt.InECT0Pkts = &value
|
||||
procNetstat.InECT0Pkts = &value
|
||||
case "InCEPkts":
|
||||
procNetstat.IpExt.InCEPkts = &value
|
||||
procNetstat.InCEPkts = &value
|
||||
case "ReasmOverlaps":
|
||||
procNetstat.IpExt.ReasmOverlaps = &value
|
||||
procNetstat.ReasmOverlaps = &value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
120
vendor/github.com/prometheus/procfs/proc_snmp.go
generated
vendored
120
vendor/github.com/prometheus/procfs/proc_snmp.go
generated
vendored
|
|
@ -173,138 +173,138 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
|
|||
case "Ip":
|
||||
switch key {
|
||||
case "Forwarding":
|
||||
procSnmp.Ip.Forwarding = &value
|
||||
procSnmp.Forwarding = &value
|
||||
case "DefaultTTL":
|
||||
procSnmp.Ip.DefaultTTL = &value
|
||||
procSnmp.DefaultTTL = &value
|
||||
case "InReceives":
|
||||
procSnmp.Ip.InReceives = &value
|
||||
procSnmp.InReceives = &value
|
||||
case "InHdrErrors":
|
||||
procSnmp.Ip.InHdrErrors = &value
|
||||
procSnmp.InHdrErrors = &value
|
||||
case "InAddrErrors":
|
||||
procSnmp.Ip.InAddrErrors = &value
|
||||
procSnmp.InAddrErrors = &value
|
||||
case "ForwDatagrams":
|
||||
procSnmp.Ip.ForwDatagrams = &value
|
||||
procSnmp.ForwDatagrams = &value
|
||||
case "InUnknownProtos":
|
||||
procSnmp.Ip.InUnknownProtos = &value
|
||||
procSnmp.InUnknownProtos = &value
|
||||
case "InDiscards":
|
||||
procSnmp.Ip.InDiscards = &value
|
||||
procSnmp.InDiscards = &value
|
||||
case "InDelivers":
|
||||
procSnmp.Ip.InDelivers = &value
|
||||
procSnmp.InDelivers = &value
|
||||
case "OutRequests":
|
||||
procSnmp.Ip.OutRequests = &value
|
||||
procSnmp.OutRequests = &value
|
||||
case "OutDiscards":
|
||||
procSnmp.Ip.OutDiscards = &value
|
||||
procSnmp.OutDiscards = &value
|
||||
case "OutNoRoutes":
|
||||
procSnmp.Ip.OutNoRoutes = &value
|
||||
procSnmp.OutNoRoutes = &value
|
||||
case "ReasmTimeout":
|
||||
procSnmp.Ip.ReasmTimeout = &value
|
||||
procSnmp.ReasmTimeout = &value
|
||||
case "ReasmReqds":
|
||||
procSnmp.Ip.ReasmReqds = &value
|
||||
procSnmp.ReasmReqds = &value
|
||||
case "ReasmOKs":
|
||||
procSnmp.Ip.ReasmOKs = &value
|
||||
procSnmp.ReasmOKs = &value
|
||||
case "ReasmFails":
|
||||
procSnmp.Ip.ReasmFails = &value
|
||||
procSnmp.ReasmFails = &value
|
||||
case "FragOKs":
|
||||
procSnmp.Ip.FragOKs = &value
|
||||
procSnmp.FragOKs = &value
|
||||
case "FragFails":
|
||||
procSnmp.Ip.FragFails = &value
|
||||
procSnmp.FragFails = &value
|
||||
case "FragCreates":
|
||||
procSnmp.Ip.FragCreates = &value
|
||||
procSnmp.FragCreates = &value
|
||||
}
|
||||
case "Icmp":
|
||||
switch key {
|
||||
case "InMsgs":
|
||||
procSnmp.Icmp.InMsgs = &value
|
||||
procSnmp.InMsgs = &value
|
||||
case "InErrors":
|
||||
procSnmp.Icmp.InErrors = &value
|
||||
case "InCsumErrors":
|
||||
procSnmp.Icmp.InCsumErrors = &value
|
||||
case "InDestUnreachs":
|
||||
procSnmp.Icmp.InDestUnreachs = &value
|
||||
procSnmp.InDestUnreachs = &value
|
||||
case "InTimeExcds":
|
||||
procSnmp.Icmp.InTimeExcds = &value
|
||||
procSnmp.InTimeExcds = &value
|
||||
case "InParmProbs":
|
||||
procSnmp.Icmp.InParmProbs = &value
|
||||
procSnmp.InParmProbs = &value
|
||||
case "InSrcQuenchs":
|
||||
procSnmp.Icmp.InSrcQuenchs = &value
|
||||
procSnmp.InSrcQuenchs = &value
|
||||
case "InRedirects":
|
||||
procSnmp.Icmp.InRedirects = &value
|
||||
procSnmp.InRedirects = &value
|
||||
case "InEchos":
|
||||
procSnmp.Icmp.InEchos = &value
|
||||
procSnmp.InEchos = &value
|
||||
case "InEchoReps":
|
||||
procSnmp.Icmp.InEchoReps = &value
|
||||
procSnmp.InEchoReps = &value
|
||||
case "InTimestamps":
|
||||
procSnmp.Icmp.InTimestamps = &value
|
||||
procSnmp.InTimestamps = &value
|
||||
case "InTimestampReps":
|
||||
procSnmp.Icmp.InTimestampReps = &value
|
||||
procSnmp.InTimestampReps = &value
|
||||
case "InAddrMasks":
|
||||
procSnmp.Icmp.InAddrMasks = &value
|
||||
procSnmp.InAddrMasks = &value
|
||||
case "InAddrMaskReps":
|
||||
procSnmp.Icmp.InAddrMaskReps = &value
|
||||
procSnmp.InAddrMaskReps = &value
|
||||
case "OutMsgs":
|
||||
procSnmp.Icmp.OutMsgs = &value
|
||||
procSnmp.OutMsgs = &value
|
||||
case "OutErrors":
|
||||
procSnmp.Icmp.OutErrors = &value
|
||||
procSnmp.OutErrors = &value
|
||||
case "OutDestUnreachs":
|
||||
procSnmp.Icmp.OutDestUnreachs = &value
|
||||
procSnmp.OutDestUnreachs = &value
|
||||
case "OutTimeExcds":
|
||||
procSnmp.Icmp.OutTimeExcds = &value
|
||||
procSnmp.OutTimeExcds = &value
|
||||
case "OutParmProbs":
|
||||
procSnmp.Icmp.OutParmProbs = &value
|
||||
procSnmp.OutParmProbs = &value
|
||||
case "OutSrcQuenchs":
|
||||
procSnmp.Icmp.OutSrcQuenchs = &value
|
||||
procSnmp.OutSrcQuenchs = &value
|
||||
case "OutRedirects":
|
||||
procSnmp.Icmp.OutRedirects = &value
|
||||
procSnmp.OutRedirects = &value
|
||||
case "OutEchos":
|
||||
procSnmp.Icmp.OutEchos = &value
|
||||
procSnmp.OutEchos = &value
|
||||
case "OutEchoReps":
|
||||
procSnmp.Icmp.OutEchoReps = &value
|
||||
procSnmp.OutEchoReps = &value
|
||||
case "OutTimestamps":
|
||||
procSnmp.Icmp.OutTimestamps = &value
|
||||
procSnmp.OutTimestamps = &value
|
||||
case "OutTimestampReps":
|
||||
procSnmp.Icmp.OutTimestampReps = &value
|
||||
procSnmp.OutTimestampReps = &value
|
||||
case "OutAddrMasks":
|
||||
procSnmp.Icmp.OutAddrMasks = &value
|
||||
procSnmp.OutAddrMasks = &value
|
||||
case "OutAddrMaskReps":
|
||||
procSnmp.Icmp.OutAddrMaskReps = &value
|
||||
procSnmp.OutAddrMaskReps = &value
|
||||
}
|
||||
case "IcmpMsg":
|
||||
switch key {
|
||||
case "InType3":
|
||||
procSnmp.IcmpMsg.InType3 = &value
|
||||
procSnmp.InType3 = &value
|
||||
case "OutType3":
|
||||
procSnmp.IcmpMsg.OutType3 = &value
|
||||
procSnmp.OutType3 = &value
|
||||
}
|
||||
case "Tcp":
|
||||
switch key {
|
||||
case "RtoAlgorithm":
|
||||
procSnmp.Tcp.RtoAlgorithm = &value
|
||||
procSnmp.RtoAlgorithm = &value
|
||||
case "RtoMin":
|
||||
procSnmp.Tcp.RtoMin = &value
|
||||
procSnmp.RtoMin = &value
|
||||
case "RtoMax":
|
||||
procSnmp.Tcp.RtoMax = &value
|
||||
procSnmp.RtoMax = &value
|
||||
case "MaxConn":
|
||||
procSnmp.Tcp.MaxConn = &value
|
||||
procSnmp.MaxConn = &value
|
||||
case "ActiveOpens":
|
||||
procSnmp.Tcp.ActiveOpens = &value
|
||||
procSnmp.ActiveOpens = &value
|
||||
case "PassiveOpens":
|
||||
procSnmp.Tcp.PassiveOpens = &value
|
||||
procSnmp.PassiveOpens = &value
|
||||
case "AttemptFails":
|
||||
procSnmp.Tcp.AttemptFails = &value
|
||||
procSnmp.AttemptFails = &value
|
||||
case "EstabResets":
|
||||
procSnmp.Tcp.EstabResets = &value
|
||||
procSnmp.EstabResets = &value
|
||||
case "CurrEstab":
|
||||
procSnmp.Tcp.CurrEstab = &value
|
||||
procSnmp.CurrEstab = &value
|
||||
case "InSegs":
|
||||
procSnmp.Tcp.InSegs = &value
|
||||
procSnmp.InSegs = &value
|
||||
case "OutSegs":
|
||||
procSnmp.Tcp.OutSegs = &value
|
||||
procSnmp.OutSegs = &value
|
||||
case "RetransSegs":
|
||||
procSnmp.Tcp.RetransSegs = &value
|
||||
procSnmp.RetransSegs = &value
|
||||
case "InErrs":
|
||||
procSnmp.Tcp.InErrs = &value
|
||||
procSnmp.InErrs = &value
|
||||
case "OutRsts":
|
||||
procSnmp.Tcp.OutRsts = &value
|
||||
procSnmp.OutRsts = &value
|
||||
case "InCsumErrors":
|
||||
procSnmp.Tcp.InCsumErrors = &value
|
||||
}
|
||||
|
|
|
|||
150
vendor/github.com/prometheus/procfs/proc_snmp6.go
generated
vendored
150
vendor/github.com/prometheus/procfs/proc_snmp6.go
generated
vendored
|
|
@ -182,161 +182,161 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
|
|||
case "Ip6":
|
||||
switch key {
|
||||
case "InReceives":
|
||||
procSnmp6.Ip6.InReceives = &value
|
||||
procSnmp6.InReceives = &value
|
||||
case "InHdrErrors":
|
||||
procSnmp6.Ip6.InHdrErrors = &value
|
||||
procSnmp6.InHdrErrors = &value
|
||||
case "InTooBigErrors":
|
||||
procSnmp6.Ip6.InTooBigErrors = &value
|
||||
procSnmp6.InTooBigErrors = &value
|
||||
case "InNoRoutes":
|
||||
procSnmp6.Ip6.InNoRoutes = &value
|
||||
procSnmp6.InNoRoutes = &value
|
||||
case "InAddrErrors":
|
||||
procSnmp6.Ip6.InAddrErrors = &value
|
||||
procSnmp6.InAddrErrors = &value
|
||||
case "InUnknownProtos":
|
||||
procSnmp6.Ip6.InUnknownProtos = &value
|
||||
procSnmp6.InUnknownProtos = &value
|
||||
case "InTruncatedPkts":
|
||||
procSnmp6.Ip6.InTruncatedPkts = &value
|
||||
procSnmp6.InTruncatedPkts = &value
|
||||
case "InDiscards":
|
||||
procSnmp6.Ip6.InDiscards = &value
|
||||
procSnmp6.InDiscards = &value
|
||||
case "InDelivers":
|
||||
procSnmp6.Ip6.InDelivers = &value
|
||||
procSnmp6.InDelivers = &value
|
||||
case "OutForwDatagrams":
|
||||
procSnmp6.Ip6.OutForwDatagrams = &value
|
||||
procSnmp6.OutForwDatagrams = &value
|
||||
case "OutRequests":
|
||||
procSnmp6.Ip6.OutRequests = &value
|
||||
procSnmp6.OutRequests = &value
|
||||
case "OutDiscards":
|
||||
procSnmp6.Ip6.OutDiscards = &value
|
||||
procSnmp6.OutDiscards = &value
|
||||
case "OutNoRoutes":
|
||||
procSnmp6.Ip6.OutNoRoutes = &value
|
||||
procSnmp6.OutNoRoutes = &value
|
||||
case "ReasmTimeout":
|
||||
procSnmp6.Ip6.ReasmTimeout = &value
|
||||
procSnmp6.ReasmTimeout = &value
|
||||
case "ReasmReqds":
|
||||
procSnmp6.Ip6.ReasmReqds = &value
|
||||
procSnmp6.ReasmReqds = &value
|
||||
case "ReasmOKs":
|
||||
procSnmp6.Ip6.ReasmOKs = &value
|
||||
procSnmp6.ReasmOKs = &value
|
||||
case "ReasmFails":
|
||||
procSnmp6.Ip6.ReasmFails = &value
|
||||
procSnmp6.ReasmFails = &value
|
||||
case "FragOKs":
|
||||
procSnmp6.Ip6.FragOKs = &value
|
||||
procSnmp6.FragOKs = &value
|
||||
case "FragFails":
|
||||
procSnmp6.Ip6.FragFails = &value
|
||||
procSnmp6.FragFails = &value
|
||||
case "FragCreates":
|
||||
procSnmp6.Ip6.FragCreates = &value
|
||||
procSnmp6.FragCreates = &value
|
||||
case "InMcastPkts":
|
||||
procSnmp6.Ip6.InMcastPkts = &value
|
||||
procSnmp6.InMcastPkts = &value
|
||||
case "OutMcastPkts":
|
||||
procSnmp6.Ip6.OutMcastPkts = &value
|
||||
procSnmp6.OutMcastPkts = &value
|
||||
case "InOctets":
|
||||
procSnmp6.Ip6.InOctets = &value
|
||||
procSnmp6.InOctets = &value
|
||||
case "OutOctets":
|
||||
procSnmp6.Ip6.OutOctets = &value
|
||||
procSnmp6.OutOctets = &value
|
||||
case "InMcastOctets":
|
||||
procSnmp6.Ip6.InMcastOctets = &value
|
||||
procSnmp6.InMcastOctets = &value
|
||||
case "OutMcastOctets":
|
||||
procSnmp6.Ip6.OutMcastOctets = &value
|
||||
procSnmp6.OutMcastOctets = &value
|
||||
case "InBcastOctets":
|
||||
procSnmp6.Ip6.InBcastOctets = &value
|
||||
procSnmp6.InBcastOctets = &value
|
||||
case "OutBcastOctets":
|
||||
procSnmp6.Ip6.OutBcastOctets = &value
|
||||
procSnmp6.OutBcastOctets = &value
|
||||
case "InNoECTPkts":
|
||||
procSnmp6.Ip6.InNoECTPkts = &value
|
||||
procSnmp6.InNoECTPkts = &value
|
||||
case "InECT1Pkts":
|
||||
procSnmp6.Ip6.InECT1Pkts = &value
|
||||
procSnmp6.InECT1Pkts = &value
|
||||
case "InECT0Pkts":
|
||||
procSnmp6.Ip6.InECT0Pkts = &value
|
||||
procSnmp6.InECT0Pkts = &value
|
||||
case "InCEPkts":
|
||||
procSnmp6.Ip6.InCEPkts = &value
|
||||
procSnmp6.InCEPkts = &value
|
||||
|
||||
}
|
||||
case "Icmp6":
|
||||
switch key {
|
||||
case "InMsgs":
|
||||
procSnmp6.Icmp6.InMsgs = &value
|
||||
procSnmp6.InMsgs = &value
|
||||
case "InErrors":
|
||||
procSnmp6.Icmp6.InErrors = &value
|
||||
case "OutMsgs":
|
||||
procSnmp6.Icmp6.OutMsgs = &value
|
||||
procSnmp6.OutMsgs = &value
|
||||
case "OutErrors":
|
||||
procSnmp6.Icmp6.OutErrors = &value
|
||||
procSnmp6.OutErrors = &value
|
||||
case "InCsumErrors":
|
||||
procSnmp6.Icmp6.InCsumErrors = &value
|
||||
case "InDestUnreachs":
|
||||
procSnmp6.Icmp6.InDestUnreachs = &value
|
||||
procSnmp6.InDestUnreachs = &value
|
||||
case "InPktTooBigs":
|
||||
procSnmp6.Icmp6.InPktTooBigs = &value
|
||||
procSnmp6.InPktTooBigs = &value
|
||||
case "InTimeExcds":
|
||||
procSnmp6.Icmp6.InTimeExcds = &value
|
||||
procSnmp6.InTimeExcds = &value
|
||||
case "InParmProblems":
|
||||
procSnmp6.Icmp6.InParmProblems = &value
|
||||
procSnmp6.InParmProblems = &value
|
||||
case "InEchos":
|
||||
procSnmp6.Icmp6.InEchos = &value
|
||||
procSnmp6.InEchos = &value
|
||||
case "InEchoReplies":
|
||||
procSnmp6.Icmp6.InEchoReplies = &value
|
||||
procSnmp6.InEchoReplies = &value
|
||||
case "InGroupMembQueries":
|
||||
procSnmp6.Icmp6.InGroupMembQueries = &value
|
||||
procSnmp6.InGroupMembQueries = &value
|
||||
case "InGroupMembResponses":
|
||||
procSnmp6.Icmp6.InGroupMembResponses = &value
|
||||
procSnmp6.InGroupMembResponses = &value
|
||||
case "InGroupMembReductions":
|
||||
procSnmp6.Icmp6.InGroupMembReductions = &value
|
||||
procSnmp6.InGroupMembReductions = &value
|
||||
case "InRouterSolicits":
|
||||
procSnmp6.Icmp6.InRouterSolicits = &value
|
||||
procSnmp6.InRouterSolicits = &value
|
||||
case "InRouterAdvertisements":
|
||||
procSnmp6.Icmp6.InRouterAdvertisements = &value
|
||||
procSnmp6.InRouterAdvertisements = &value
|
||||
case "InNeighborSolicits":
|
||||
procSnmp6.Icmp6.InNeighborSolicits = &value
|
||||
procSnmp6.InNeighborSolicits = &value
|
||||
case "InNeighborAdvertisements":
|
||||
procSnmp6.Icmp6.InNeighborAdvertisements = &value
|
||||
procSnmp6.InNeighborAdvertisements = &value
|
||||
case "InRedirects":
|
||||
procSnmp6.Icmp6.InRedirects = &value
|
||||
procSnmp6.InRedirects = &value
|
||||
case "InMLDv2Reports":
|
||||
procSnmp6.Icmp6.InMLDv2Reports = &value
|
||||
procSnmp6.InMLDv2Reports = &value
|
||||
case "OutDestUnreachs":
|
||||
procSnmp6.Icmp6.OutDestUnreachs = &value
|
||||
procSnmp6.OutDestUnreachs = &value
|
||||
case "OutPktTooBigs":
|
||||
procSnmp6.Icmp6.OutPktTooBigs = &value
|
||||
procSnmp6.OutPktTooBigs = &value
|
||||
case "OutTimeExcds":
|
||||
procSnmp6.Icmp6.OutTimeExcds = &value
|
||||
procSnmp6.OutTimeExcds = &value
|
||||
case "OutParmProblems":
|
||||
procSnmp6.Icmp6.OutParmProblems = &value
|
||||
procSnmp6.OutParmProblems = &value
|
||||
case "OutEchos":
|
||||
procSnmp6.Icmp6.OutEchos = &value
|
||||
procSnmp6.OutEchos = &value
|
||||
case "OutEchoReplies":
|
||||
procSnmp6.Icmp6.OutEchoReplies = &value
|
||||
procSnmp6.OutEchoReplies = &value
|
||||
case "OutGroupMembQueries":
|
||||
procSnmp6.Icmp6.OutGroupMembQueries = &value
|
||||
procSnmp6.OutGroupMembQueries = &value
|
||||
case "OutGroupMembResponses":
|
||||
procSnmp6.Icmp6.OutGroupMembResponses = &value
|
||||
procSnmp6.OutGroupMembResponses = &value
|
||||
case "OutGroupMembReductions":
|
||||
procSnmp6.Icmp6.OutGroupMembReductions = &value
|
||||
procSnmp6.OutGroupMembReductions = &value
|
||||
case "OutRouterSolicits":
|
||||
procSnmp6.Icmp6.OutRouterSolicits = &value
|
||||
procSnmp6.OutRouterSolicits = &value
|
||||
case "OutRouterAdvertisements":
|
||||
procSnmp6.Icmp6.OutRouterAdvertisements = &value
|
||||
procSnmp6.OutRouterAdvertisements = &value
|
||||
case "OutNeighborSolicits":
|
||||
procSnmp6.Icmp6.OutNeighborSolicits = &value
|
||||
procSnmp6.OutNeighborSolicits = &value
|
||||
case "OutNeighborAdvertisements":
|
||||
procSnmp6.Icmp6.OutNeighborAdvertisements = &value
|
||||
procSnmp6.OutNeighborAdvertisements = &value
|
||||
case "OutRedirects":
|
||||
procSnmp6.Icmp6.OutRedirects = &value
|
||||
procSnmp6.OutRedirects = &value
|
||||
case "OutMLDv2Reports":
|
||||
procSnmp6.Icmp6.OutMLDv2Reports = &value
|
||||
procSnmp6.OutMLDv2Reports = &value
|
||||
case "InType1":
|
||||
procSnmp6.Icmp6.InType1 = &value
|
||||
procSnmp6.InType1 = &value
|
||||
case "InType134":
|
||||
procSnmp6.Icmp6.InType134 = &value
|
||||
procSnmp6.InType134 = &value
|
||||
case "InType135":
|
||||
procSnmp6.Icmp6.InType135 = &value
|
||||
procSnmp6.InType135 = &value
|
||||
case "InType136":
|
||||
procSnmp6.Icmp6.InType136 = &value
|
||||
procSnmp6.InType136 = &value
|
||||
case "InType143":
|
||||
procSnmp6.Icmp6.InType143 = &value
|
||||
procSnmp6.InType143 = &value
|
||||
case "OutType133":
|
||||
procSnmp6.Icmp6.OutType133 = &value
|
||||
procSnmp6.OutType133 = &value
|
||||
case "OutType135":
|
||||
procSnmp6.Icmp6.OutType135 = &value
|
||||
procSnmp6.OutType135 = &value
|
||||
case "OutType136":
|
||||
procSnmp6.Icmp6.OutType136 = &value
|
||||
procSnmp6.OutType136 = &value
|
||||
case "OutType143":
|
||||
procSnmp6.Icmp6.OutType143 = &value
|
||||
procSnmp6.OutType143 = &value
|
||||
}
|
||||
case "Udp6":
|
||||
switch key {
|
||||
|
|
@ -355,7 +355,7 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
|
|||
case "InCsumErrors":
|
||||
procSnmp6.Udp6.InCsumErrors = &value
|
||||
case "IgnoredMulti":
|
||||
procSnmp6.Udp6.IgnoredMulti = &value
|
||||
procSnmp6.IgnoredMulti = &value
|
||||
}
|
||||
case "UdpLite6":
|
||||
switch key {
|
||||
|
|
|
|||
2
vendor/github.com/prometheus/procfs/proc_sys.go
generated
vendored
2
vendor/github.com/prometheus/procfs/proc_sys.go
generated
vendored
|
|
@ -21,7 +21,7 @@ import (
|
|||
)
|
||||
|
||||
func sysctlToPath(sysctl string) string {
|
||||
return strings.Replace(sysctl, ".", "/", -1)
|
||||
return strings.ReplaceAll(sysctl, ".", "/")
|
||||
}
|
||||
|
||||
func (fs FS) SysctlStrings(sysctl string) ([]string, error) {
|
||||
|
|
|
|||
22
vendor/github.com/prometheus/procfs/softirqs.go
generated
vendored
22
vendor/github.com/prometheus/procfs/softirqs.go
generated
vendored
|
|
@ -68,8 +68,8 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
if len(parts) < 2 {
|
||||
continue
|
||||
}
|
||||
switch {
|
||||
case parts[0] == "HI:":
|
||||
switch parts[0] {
|
||||
case "HI:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.Hi = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
|
|
@ -77,7 +77,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "TIMER:":
|
||||
case "TIMER:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.Timer = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
|
|
@ -85,7 +85,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "NET_TX:":
|
||||
case "NET_TX:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.NetTx = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
|
|
@ -93,7 +93,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "NET_RX:":
|
||||
case "NET_RX:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.NetRx = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
|
|
@ -101,7 +101,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "BLOCK:":
|
||||
case "BLOCK:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.Block = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
|
|
@ -109,7 +109,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "IRQ_POLL:":
|
||||
case "IRQ_POLL:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.IRQPoll = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
|
|
@ -117,7 +117,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "TASKLET:":
|
||||
case "TASKLET:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.Tasklet = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
|
|
@ -125,7 +125,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "SCHED:":
|
||||
case "SCHED:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.Sched = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
|
|
@ -133,7 +133,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "HRTIMER:":
|
||||
case "HRTIMER:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.HRTimer = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
|
|
@ -141,7 +141,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
|||
return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
|
||||
}
|
||||
}
|
||||
case parts[0] == "RCU:":
|
||||
case "RCU:":
|
||||
perCPU := parts[1:]
|
||||
softirqs.RCU = make([]uint64, len(perCPU))
|
||||
for i, count := range perCPU {
|
||||
|
|
|
|||
452
vendor/go.opentelemetry.io/otel/.golangci.yml
generated
vendored
452
vendor/go.opentelemetry.io/otel/.golangci.yml
generated
vendored
|
|
@ -1,13 +1,9 @@
|
|||
# See https://github.com/golangci/golangci-lint#config-file
|
||||
version: "2"
|
||||
run:
|
||||
issues-exit-code: 1 #Default
|
||||
tests: true #Default
|
||||
|
||||
issues-exit-code: 1
|
||||
tests: true
|
||||
linters:
|
||||
# Disable everything by default so upgrades to not include new "default
|
||||
# enabled" linters.
|
||||
disable-all: true
|
||||
# Specifically enable linters we want to use.
|
||||
default: none
|
||||
enable:
|
||||
- asasalint
|
||||
- bodyclose
|
||||
|
|
@ -15,10 +11,7 @@ linters:
|
|||
- errcheck
|
||||
- errorlint
|
||||
- godot
|
||||
- gofumpt
|
||||
- goimports
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
|
|
@ -26,227 +19,230 @@ linters:
|
|||
- revive
|
||||
- staticcheck
|
||||
- testifylint
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
- unparam
|
||||
- unused
|
||||
- usestdlibvars
|
||||
- usetesting
|
||||
|
||||
settings:
|
||||
depguard:
|
||||
rules:
|
||||
auto/sdk:
|
||||
files:
|
||||
- '!internal/global/trace.go'
|
||||
- ~internal/global/trace_test.go
|
||||
deny:
|
||||
- pkg: go.opentelemetry.io/auto/sdk
|
||||
desc: Do not use SDK from automatic instrumentation.
|
||||
non-tests:
|
||||
files:
|
||||
- '!$test'
|
||||
- '!**/*test/*.go'
|
||||
- '!**/internal/matchers/*.go'
|
||||
deny:
|
||||
- pkg: testing
|
||||
- pkg: github.com/stretchr/testify
|
||||
- pkg: crypto/md5
|
||||
- pkg: crypto/sha1
|
||||
- pkg: crypto/**/pkix
|
||||
otel-internal:
|
||||
files:
|
||||
- '**/sdk/*.go'
|
||||
- '**/sdk/**/*.go'
|
||||
- '**/exporters/*.go'
|
||||
- '**/exporters/**/*.go'
|
||||
- '**/schema/*.go'
|
||||
- '**/schema/**/*.go'
|
||||
- '**/metric/*.go'
|
||||
- '**/metric/**/*.go'
|
||||
- '**/bridge/*.go'
|
||||
- '**/bridge/**/*.go'
|
||||
- '**/trace/*.go'
|
||||
- '**/trace/**/*.go'
|
||||
- '**/log/*.go'
|
||||
- '**/log/**/*.go'
|
||||
deny:
|
||||
- pkg: go.opentelemetry.io/otel/internal$
|
||||
desc: Do not use cross-module internal packages.
|
||||
- pkg: go.opentelemetry.io/otel/internal/internaltest
|
||||
desc: Do not use cross-module internal packages.
|
||||
- pkg: go.opentelemetry.io/otel/internal/matchers
|
||||
desc: Do not use cross-module internal packages.
|
||||
otlp-internal:
|
||||
files:
|
||||
- '!**/exporters/otlp/internal/**/*.go'
|
||||
deny:
|
||||
- pkg: go.opentelemetry.io/otel/exporters/otlp/internal
|
||||
desc: Do not use cross-module internal packages.
|
||||
otlpmetric-internal:
|
||||
files:
|
||||
- '!**/exporters/otlp/otlpmetric/internal/*.go'
|
||||
- '!**/exporters/otlp/otlpmetric/internal/**/*.go'
|
||||
deny:
|
||||
- pkg: go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal
|
||||
desc: Do not use cross-module internal packages.
|
||||
otlptrace-internal:
|
||||
files:
|
||||
- '!**/exporters/otlp/otlptrace/*.go'
|
||||
- '!**/exporters/otlp/otlptrace/internal/**.go'
|
||||
deny:
|
||||
- pkg: go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal
|
||||
desc: Do not use cross-module internal packages.
|
||||
godot:
|
||||
exclude:
|
||||
# Exclude links.
|
||||
- '^ *\[[^]]+\]:'
|
||||
# Exclude sentence fragments for lists.
|
||||
- ^[ ]*[-•]
|
||||
# Exclude sentences prefixing a list.
|
||||
- :$
|
||||
misspell:
|
||||
locale: US
|
||||
ignore-rules:
|
||||
- cancelled
|
||||
perfsprint:
|
||||
int-conversion: true
|
||||
err-error: true
|
||||
errorf: true
|
||||
sprintf1: true
|
||||
strconcat: true
|
||||
revive:
|
||||
confidence: 0.01
|
||||
rules:
|
||||
- name: blank-imports
|
||||
- name: bool-literal-in-expr
|
||||
- name: constant-logical-expr
|
||||
- name: context-as-argument
|
||||
arguments:
|
||||
- allowTypesBefore: '*testing.T'
|
||||
disabled: true
|
||||
- name: context-keys-type
|
||||
- name: deep-exit
|
||||
- name: defer
|
||||
arguments:
|
||||
- - call-chain
|
||||
- loop
|
||||
- name: dot-imports
|
||||
- name: duplicated-imports
|
||||
- name: early-return
|
||||
arguments:
|
||||
- preserveScope
|
||||
- name: empty-block
|
||||
- name: empty-lines
|
||||
- name: error-naming
|
||||
- name: error-return
|
||||
- name: error-strings
|
||||
- name: errorf
|
||||
- name: exported
|
||||
arguments:
|
||||
- sayRepetitiveInsteadOfStutters
|
||||
- name: flag-parameter
|
||||
- name: identical-branches
|
||||
- name: if-return
|
||||
- name: import-shadowing
|
||||
- name: increment-decrement
|
||||
- name: indent-error-flow
|
||||
arguments:
|
||||
- preserveScope
|
||||
- name: package-comments
|
||||
- name: range
|
||||
- name: range-val-in-closure
|
||||
- name: range-val-address
|
||||
- name: redefines-builtin-id
|
||||
- name: string-format
|
||||
arguments:
|
||||
- - panic
|
||||
- /^[^\n]*$/
|
||||
- must not contain line breaks
|
||||
- name: struct-tag
|
||||
- name: superfluous-else
|
||||
arguments:
|
||||
- preserveScope
|
||||
- name: time-equal
|
||||
- name: unconditional-recursion
|
||||
- name: unexported-return
|
||||
- name: unhandled-error
|
||||
arguments:
|
||||
- fmt.Fprint
|
||||
- fmt.Fprintf
|
||||
- fmt.Fprintln
|
||||
- fmt.Print
|
||||
- fmt.Printf
|
||||
- fmt.Println
|
||||
- name: unnecessary-stmt
|
||||
- name: useless-break
|
||||
- name: var-declaration
|
||||
- name: var-naming
|
||||
arguments:
|
||||
- ["ID"] # AllowList
|
||||
- ["Otel", "Aws", "Gcp"] # DenyList
|
||||
- name: waitgroup-by-value
|
||||
testifylint:
|
||||
enable-all: true
|
||||
disable:
|
||||
- float-compare
|
||||
- go-require
|
||||
- require-error
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
rules:
|
||||
# TODO: Having appropriate comments for exported objects helps development,
|
||||
# even for objects in internal packages. Appropriate comments for all
|
||||
# exported objects should be added and this exclusion removed.
|
||||
- linters:
|
||||
- revive
|
||||
path: .*internal/.*
|
||||
text: exported (method|function|type|const) (.+) should have comment or be unexported
|
||||
# Yes, they are, but it's okay in a test.
|
||||
- linters:
|
||||
- revive
|
||||
path: _test\.go
|
||||
text: exported func.*returns unexported type.*which can be annoying to use
|
||||
# Example test functions should be treated like main.
|
||||
- linters:
|
||||
- revive
|
||||
path: example.*_test\.go
|
||||
text: calls to (.+) only in main[(][)] or init[(][)] functions
|
||||
# It's okay to not run gosec and perfsprint in a test.
|
||||
- linters:
|
||||
- gosec
|
||||
- perfsprint
|
||||
path: _test\.go
|
||||
# Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
|
||||
# as we commonly use it in tests and examples.
|
||||
- linters:
|
||||
- gosec
|
||||
text: 'G404:'
|
||||
# Ignoring gosec G402: TLS MinVersion too low
|
||||
# as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well.
|
||||
- linters:
|
||||
- gosec
|
||||
text: 'G402: TLS MinVersion too low.'
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
issues:
|
||||
# Maximum issues count per one linter.
|
||||
# Set to 0 to disable.
|
||||
# Default: 50
|
||||
# Setting to unlimited so the linter only is run once to debug all issues.
|
||||
max-issues-per-linter: 0
|
||||
# Maximum count of issues with the same text.
|
||||
# Set to 0 to disable.
|
||||
# Default: 3
|
||||
# Setting to unlimited so the linter only is run once to debug all issues.
|
||||
max-same-issues: 0
|
||||
# Excluding configuration per-path, per-linter, per-text and per-source.
|
||||
exclude-rules:
|
||||
# TODO: Having appropriate comments for exported objects helps development,
|
||||
# even for objects in internal packages. Appropriate comments for all
|
||||
# exported objects should be added and this exclusion removed.
|
||||
- path: '.*internal/.*'
|
||||
text: "exported (method|function|type|const) (.+) should have comment or be unexported"
|
||||
linters:
|
||||
- revive
|
||||
# Yes, they are, but it's okay in a test.
|
||||
- path: _test\.go
|
||||
text: "exported func.*returns unexported type.*which can be annoying to use"
|
||||
linters:
|
||||
- revive
|
||||
# Example test functions should be treated like main.
|
||||
- path: example.*_test\.go
|
||||
text: "calls to (.+) only in main[(][)] or init[(][)] functions"
|
||||
linters:
|
||||
- revive
|
||||
# It's okay to not run gosec and perfsprint in a test.
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- gosec
|
||||
- perfsprint
|
||||
# Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
|
||||
# as we commonly use it in tests and examples.
|
||||
- text: "G404:"
|
||||
linters:
|
||||
- gosec
|
||||
# Ignoring gosec G402: TLS MinVersion too low
|
||||
# as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well.
|
||||
- text: "G402: TLS MinVersion too low."
|
||||
linters:
|
||||
- gosec
|
||||
include:
|
||||
# revive exported should have comment or be unexported.
|
||||
- EXC0012
|
||||
# revive package comment should be of the form ...
|
||||
- EXC0013
|
||||
|
||||
linters-settings:
|
||||
depguard:
|
||||
rules:
|
||||
non-tests:
|
||||
files:
|
||||
- "!$test"
|
||||
- "!**/*test/*.go"
|
||||
- "!**/internal/matchers/*.go"
|
||||
deny:
|
||||
- pkg: "testing"
|
||||
- pkg: "github.com/stretchr/testify"
|
||||
- pkg: "crypto/md5"
|
||||
- pkg: "crypto/sha1"
|
||||
- pkg: "crypto/**/pkix"
|
||||
auto/sdk:
|
||||
files:
|
||||
- "!internal/global/trace.go"
|
||||
- "~internal/global/trace_test.go"
|
||||
deny:
|
||||
- pkg: "go.opentelemetry.io/auto/sdk"
|
||||
desc: Do not use SDK from automatic instrumentation.
|
||||
otlp-internal:
|
||||
files:
|
||||
- "!**/exporters/otlp/internal/**/*.go"
|
||||
deny:
|
||||
- pkg: "go.opentelemetry.io/otel/exporters/otlp/internal"
|
||||
desc: Do not use cross-module internal packages.
|
||||
otlptrace-internal:
|
||||
files:
|
||||
- "!**/exporters/otlp/otlptrace/*.go"
|
||||
- "!**/exporters/otlp/otlptrace/internal/**.go"
|
||||
deny:
|
||||
- pkg: "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal"
|
||||
desc: Do not use cross-module internal packages.
|
||||
otlpmetric-internal:
|
||||
files:
|
||||
- "!**/exporters/otlp/otlpmetric/internal/*.go"
|
||||
- "!**/exporters/otlp/otlpmetric/internal/**/*.go"
|
||||
deny:
|
||||
- pkg: "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal"
|
||||
desc: Do not use cross-module internal packages.
|
||||
otel-internal:
|
||||
files:
|
||||
- "**/sdk/*.go"
|
||||
- "**/sdk/**/*.go"
|
||||
- "**/exporters/*.go"
|
||||
- "**/exporters/**/*.go"
|
||||
- "**/schema/*.go"
|
||||
- "**/schema/**/*.go"
|
||||
- "**/metric/*.go"
|
||||
- "**/metric/**/*.go"
|
||||
- "**/bridge/*.go"
|
||||
- "**/bridge/**/*.go"
|
||||
- "**/trace/*.go"
|
||||
- "**/trace/**/*.go"
|
||||
- "**/log/*.go"
|
||||
- "**/log/**/*.go"
|
||||
deny:
|
||||
- pkg: "go.opentelemetry.io/otel/internal$"
|
||||
desc: Do not use cross-module internal packages.
|
||||
- pkg: "go.opentelemetry.io/otel/internal/attribute"
|
||||
desc: Do not use cross-module internal packages.
|
||||
- pkg: "go.opentelemetry.io/otel/internal/internaltest"
|
||||
desc: Do not use cross-module internal packages.
|
||||
- pkg: "go.opentelemetry.io/otel/internal/matchers"
|
||||
desc: Do not use cross-module internal packages.
|
||||
godot:
|
||||
exclude:
|
||||
# Exclude links.
|
||||
- '^ *\[[^]]+\]:'
|
||||
# Exclude sentence fragments for lists.
|
||||
- '^[ ]*[-•]'
|
||||
# Exclude sentences prefixing a list.
|
||||
- ':$'
|
||||
goimports:
|
||||
local-prefixes: go.opentelemetry.io
|
||||
misspell:
|
||||
locale: US
|
||||
ignore-words:
|
||||
- cancelled
|
||||
perfsprint:
|
||||
err-error: true
|
||||
errorf: true
|
||||
int-conversion: true
|
||||
sprintf1: true
|
||||
strconcat: true
|
||||
revive:
|
||||
# Sets the default failure confidence.
|
||||
# This means that linting errors with less than 0.8 confidence will be ignored.
|
||||
# Default: 0.8
|
||||
confidence: 0.01
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md
|
||||
rules:
|
||||
- name: blank-imports
|
||||
- name: bool-literal-in-expr
|
||||
- name: constant-logical-expr
|
||||
- name: context-as-argument
|
||||
disabled: true
|
||||
arguments:
|
||||
- allowTypesBefore: "*testing.T"
|
||||
- name: context-keys-type
|
||||
- name: deep-exit
|
||||
- name: defer
|
||||
arguments:
|
||||
- ["call-chain", "loop"]
|
||||
- name: dot-imports
|
||||
- name: duplicated-imports
|
||||
- name: early-return
|
||||
arguments:
|
||||
- "preserveScope"
|
||||
- name: empty-block
|
||||
- name: empty-lines
|
||||
- name: error-naming
|
||||
- name: error-return
|
||||
- name: error-strings
|
||||
- name: errorf
|
||||
- name: exported
|
||||
arguments:
|
||||
- "sayRepetitiveInsteadOfStutters"
|
||||
- name: flag-parameter
|
||||
- name: identical-branches
|
||||
- name: if-return
|
||||
- name: import-shadowing
|
||||
- name: increment-decrement
|
||||
- name: indent-error-flow
|
||||
arguments:
|
||||
- "preserveScope"
|
||||
- name: package-comments
|
||||
- name: range
|
||||
- name: range-val-in-closure
|
||||
- name: range-val-address
|
||||
- name: redefines-builtin-id
|
||||
- name: string-format
|
||||
arguments:
|
||||
- - panic
|
||||
- '/^[^\n]*$/'
|
||||
- must not contain line breaks
|
||||
- name: struct-tag
|
||||
- name: superfluous-else
|
||||
arguments:
|
||||
- "preserveScope"
|
||||
- name: time-equal
|
||||
- name: unconditional-recursion
|
||||
- name: unexported-return
|
||||
- name: unhandled-error
|
||||
arguments:
|
||||
- "fmt.Fprint"
|
||||
- "fmt.Fprintf"
|
||||
- "fmt.Fprintln"
|
||||
- "fmt.Print"
|
||||
- "fmt.Printf"
|
||||
- "fmt.Println"
|
||||
- name: unnecessary-stmt
|
||||
- name: useless-break
|
||||
- name: var-declaration
|
||||
- name: var-naming
|
||||
arguments:
|
||||
- ["ID"] # AllowList
|
||||
- ["Otel", "Aws", "Gcp"] # DenyList
|
||||
- name: waitgroup-by-value
|
||||
testifylint:
|
||||
enable-all: true
|
||||
disable:
|
||||
- float-compare
|
||||
- go-require
|
||||
- require-error
|
||||
formatters:
|
||||
enable:
|
||||
- gofumpt
|
||||
- goimports
|
||||
- golines
|
||||
settings:
|
||||
goimports:
|
||||
local-prefixes:
|
||||
- go.opentelemetry.io
|
||||
golines:
|
||||
max-len: 120
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
|
|
|||
54
vendor/go.opentelemetry.io/otel/CHANGELOG.md
generated
vendored
54
vendor/go.opentelemetry.io/otel/CHANGELOG.md
generated
vendored
|
|
@ -11,6 +11,57 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
|
|||
<!-- Released section -->
|
||||
<!-- Don't change this section unless doing release -->
|
||||
|
||||
## [1.36.0/0.58.0/0.12.0] 2025-05-20
|
||||
|
||||
### Added
|
||||
|
||||
- Add exponential histogram support in `go.opentelemetry.io/otel/exporters/prometheus`. (#6421)
|
||||
- The `go.opentelemetry.io/otel/semconv/v1.31.0` package.
|
||||
The package contains semantic conventions from the `v1.31.0` version of the OpenTelemetry Semantic Conventions.
|
||||
See the [migration documentation](./semconv/v1.31.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.30.0`. (#6479)
|
||||
- Add `Recording`, `Scope`, and `Record` types in `go.opentelemetry.io/otel/log/logtest`. (#6507)
|
||||
- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#6751)
|
||||
- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#6752)
|
||||
- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6688)
|
||||
- Add `ValuesGetter` in `go.opentelemetry.io/otel/propagation`, a `TextMapCarrier` that supports retrieving multiple values for a single key. (#5973)
|
||||
- Add `Values` method to `HeaderCarrier` to implement the new `ValuesGetter` interface in `go.opentelemetry.io/otel/propagation`. (#5973)
|
||||
- Update `Baggage` in `go.opentelemetry.io/otel/propagation` to retrieve multiple values for a key when the carrier implements `ValuesGetter`. (#5973)
|
||||
- Add `AssertEqual` function in `go.opentelemetry.io/otel/log/logtest`. (#6662)
|
||||
- The `go.opentelemetry.io/otel/semconv/v1.32.0` package.
|
||||
The package contains semantic conventions from the `v1.32.0` version of the OpenTelemetry Semantic Conventions.
|
||||
See the [migration documentation](./semconv/v1.32.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.31.0`(#6782)
|
||||
- Add `Transform` option in `go.opentelemetry.io/otel/log/logtest`. (#6794)
|
||||
- Add `Desc` option in `go.opentelemetry.io/otel/log/logtest`. (#6796)
|
||||
|
||||
### Removed
|
||||
|
||||
- Drop support for [Go 1.22]. (#6381, #6418)
|
||||
- Remove `Resource` field from `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. (#6494)
|
||||
- Remove `RecordFactory` type from `go.opentelemetry.io/otel/log/logtest`. (#6492)
|
||||
- Remove `ScopeRecords`, `EmittedRecord`, and `RecordFactory` types from `go.opentelemetry.io/otel/log/logtest`. (#6507)
|
||||
- Remove `AssertRecordEqual` function in `go.opentelemetry.io/otel/log/logtest`, use `AssertEqual` instead. (#6662)
|
||||
|
||||
### Changed
|
||||
|
||||
- ⚠️ Update `github.com/prometheus/client_golang` to `v1.21.1`, which changes the `NameValidationScheme` to `UTF8Validation`.
|
||||
This allows metrics names to keep original delimiters (e.g. `.`), rather than replacing with underscores.
|
||||
This can be reverted by setting `github.com/prometheus/common/model.NameValidationScheme` to `LegacyValidation` in `github.com/prometheus/common/model`. (#6433)
|
||||
- Initialize map with `len(keys)` in `NewAllowKeysFilter` and `NewDenyKeysFilter` to avoid unnecessary allocations in `go.opentelemetry.io/otel/attribute`. (#6455)
|
||||
- `go.opentelemetry.io/otel/log/logtest` is now a separate Go module. (#6465)
|
||||
- `go.opentelemetry.io/otel/sdk/log/logtest` is now a separate Go module. (#6466)
|
||||
- `Recorder` in `go.opentelemetry.io/otel/log/logtest` no longer separately stores records emitted by loggers with the same instrumentation scope. (#6507)
|
||||
- Improve performance of `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` by not exporting when exporter cannot accept more. (#6569, #6641)
|
||||
|
||||
### Deprecated
|
||||
|
||||
- Deprecate support for `model.LegacyValidation` for `go.opentelemetry.io/otel/exporters/prometheus`. (#6449)
|
||||
|
||||
### Fixes
|
||||
|
||||
- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6392)
|
||||
- Ensure the `noopSpan.tracerProvider` method is not inlined in `go.opentelemetry.io/otel/trace` so the `go.opentelemetry.io/auto` instrumentation can instrument non-recording spans. (#6456)
|
||||
- Use a `sync.Pool` instead of allocating `metricdata.ResourceMetrics` in `go.opentelemetry.io/otel/exporters/prometheus`. (#6472)
|
||||
|
||||
## [1.35.0/0.57.0/0.11.0] 2025-03-05
|
||||
|
||||
This release is the last to support [Go 1.22].
|
||||
|
|
@ -3237,7 +3288,8 @@ It contains api and sdk for trace and meter.
|
|||
- CircleCI build CI manifest files.
|
||||
- CODEOWNERS file to track owners of this project.
|
||||
|
||||
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.35.0...HEAD
|
||||
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.36.0...HEAD
|
||||
[1.36.0/0.58.0/0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.36.0
|
||||
[1.35.0/0.57.0/0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.35.0
|
||||
[1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0
|
||||
[1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0
|
||||
|
|
|
|||
1
vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
generated
vendored
1
vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
generated
vendored
|
|
@ -643,6 +643,7 @@ should be canceled.
|
|||
|
||||
### Triagers
|
||||
|
||||
- [Alex Kats](https://github.com/akats7), Capital One
|
||||
- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent
|
||||
|
||||
### Approvers
|
||||
|
|
|
|||
19
vendor/go.opentelemetry.io/otel/Makefile
generated
vendored
19
vendor/go.opentelemetry.io/otel/Makefile
generated
vendored
|
|
@ -43,8 +43,11 @@ $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink
|
|||
SEMCONVKIT = $(TOOLS)/semconvkit
|
||||
$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit
|
||||
|
||||
VERIFYREADMES = $(TOOLS)/verifyreadmes
|
||||
$(TOOLS)/verifyreadmes: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/verifyreadmes
|
||||
|
||||
GOLANGCI_LINT = $(TOOLS)/golangci-lint
|
||||
$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint
|
||||
$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/v2/cmd/golangci-lint
|
||||
|
||||
MISSPELL = $(TOOLS)/misspell
|
||||
$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell
|
||||
|
|
@ -68,7 +71,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck
|
|||
$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck
|
||||
|
||||
.PHONY: tools
|
||||
tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
|
||||
tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(VERIFYREADMES) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
|
||||
|
||||
# Virtualized python tools via docker
|
||||
|
||||
|
|
@ -213,11 +216,8 @@ go-mod-tidy/%: crosslink
|
|||
&& cd $(DIR) \
|
||||
&& $(GO) mod tidy -compat=1.21
|
||||
|
||||
.PHONY: lint-modules
|
||||
lint-modules: go-mod-tidy
|
||||
|
||||
.PHONY: lint
|
||||
lint: misspell lint-modules golangci-lint govulncheck
|
||||
lint: misspell go-mod-tidy golangci-lint govulncheck
|
||||
|
||||
.PHONY: vanity-import-check
|
||||
vanity-import-check: $(PORTO)
|
||||
|
|
@ -319,10 +319,11 @@ add-tags: verify-mods
|
|||
@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
|
||||
$(MULTIMOD) tag -m ${MODSET} -c ${COMMIT}
|
||||
|
||||
MARKDOWNIMAGE := $(shell awk '$$4=="markdown" {print $$2}' $(DEPENDENCIES_DOCKERFILE))
|
||||
.PHONY: lint-markdown
|
||||
lint-markdown:
|
||||
docker run -v "$(CURDIR):$(WORKDIR)" avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md
|
||||
docker run --rm -u $(DOCKER_USER) -v "$(CURDIR):$(WORKDIR)" $(MARKDOWNIMAGE) -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md
|
||||
|
||||
.PHONY: verify-readmes
|
||||
verify-readmes:
|
||||
./verify_readmes.sh
|
||||
verify-readmes: $(VERIFYREADMES)
|
||||
$(VERIFYREADMES)
|
||||
|
|
|
|||
8
vendor/go.opentelemetry.io/otel/README.md
generated
vendored
8
vendor/go.opentelemetry.io/otel/README.md
generated
vendored
|
|
@ -6,6 +6,7 @@
|
|||
[](https://goreportcard.com/report/go.opentelemetry.io/otel)
|
||||
[](https://scorecard.dev/viewer/?uri=github.com/open-telemetry/opentelemetry-go)
|
||||
[](https://www.bestpractices.dev/projects/9996)
|
||||
[](https://issues.oss-fuzz.com/issues?q=project:opentelemetry-go)
|
||||
[](https://cloud-native.slack.com/archives/C01NPAXACKT)
|
||||
|
||||
OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/).
|
||||
|
|
@ -53,25 +54,18 @@ Currently, this project supports the following environments.
|
|||
|----------|------------|--------------|
|
||||
| Ubuntu | 1.24 | amd64 |
|
||||
| Ubuntu | 1.23 | amd64 |
|
||||
| Ubuntu | 1.22 | amd64 |
|
||||
| Ubuntu | 1.24 | 386 |
|
||||
| Ubuntu | 1.23 | 386 |
|
||||
| Ubuntu | 1.22 | 386 |
|
||||
| Ubuntu | 1.24 | arm64 |
|
||||
| Ubuntu | 1.23 | arm64 |
|
||||
| Ubuntu | 1.22 | arm64 |
|
||||
| macOS 13 | 1.24 | amd64 |
|
||||
| macOS 13 | 1.23 | amd64 |
|
||||
| macOS 13 | 1.22 | amd64 |
|
||||
| macOS | 1.24 | arm64 |
|
||||
| macOS | 1.23 | arm64 |
|
||||
| macOS | 1.22 | arm64 |
|
||||
| Windows | 1.24 | amd64 |
|
||||
| Windows | 1.23 | amd64 |
|
||||
| Windows | 1.22 | amd64 |
|
||||
| Windows | 1.24 | 386 |
|
||||
| Windows | 1.23 | 386 |
|
||||
| Windows | 1.22 | 386 |
|
||||
|
||||
While this project should work for other systems, no compatibility guarantees
|
||||
are made for those systems currently.
|
||||
|
|
|
|||
18
vendor/go.opentelemetry.io/otel/RELEASING.md
generated
vendored
18
vendor/go.opentelemetry.io/otel/RELEASING.md
generated
vendored
|
|
@ -1,5 +1,9 @@
|
|||
# Release Process
|
||||
|
||||
## Create a `Version Release` issue
|
||||
|
||||
Create a `Version Release` issue to track the release process.
|
||||
|
||||
## Semantic Convention Generation
|
||||
|
||||
New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated.
|
||||
|
|
@ -123,6 +127,16 @@ Importantly, bump any package versions referenced to be the latest one you just
|
|||
[Go instrumentation documentation]: https://opentelemetry.io/docs/languages/go/
|
||||
[content/en/docs/languages/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/languages/go
|
||||
|
||||
### Close the milestone
|
||||
|
||||
Once a release is made, ensure all issues that were fixed and PRs that were merged as part of this release are added to the corresponding milestone.
|
||||
This helps track what changes were included in each release.
|
||||
|
||||
- To find issues that haven't been included in a milestone, use this [GitHub search query](https://github.com/open-telemetry/opentelemetry-go/issues?q=is%3Aissue%20no%3Amilestone%20is%3Aclosed%20sort%3Aupdated-desc%20reason%3Acompleted%20-label%3AStale%20linked%3Apr)
|
||||
- To find merged PRs that haven't been included in a milestone, use this [GitHub search query](https://github.com/open-telemetry/opentelemetry-go/pulls?q=is%3Apr+no%3Amilestone+is%3Amerged).
|
||||
|
||||
Once all related issues and PRs have been added to the milestone, close the milestone.
|
||||
|
||||
### Demo Repository
|
||||
|
||||
Bump the dependencies in the following Go services:
|
||||
|
|
@ -130,3 +144,7 @@ Bump the dependencies in the following Go services:
|
|||
- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting)
|
||||
- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout)
|
||||
- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog)
|
||||
|
||||
### Close the `Version Release` issue
|
||||
|
||||
Once the todo list in the `Version Release` issue is complete, close the issue.
|
||||
|
|
|
|||
4
vendor/go.opentelemetry.io/otel/attribute/filter.go
generated
vendored
4
vendor/go.opentelemetry.io/otel/attribute/filter.go
generated
vendored
|
|
@ -19,7 +19,7 @@ func NewAllowKeysFilter(keys ...Key) Filter {
|
|||
return func(kv KeyValue) bool { return false }
|
||||
}
|
||||
|
||||
allowed := make(map[Key]struct{})
|
||||
allowed := make(map[Key]struct{}, len(keys))
|
||||
for _, k := range keys {
|
||||
allowed[k] = struct{}{}
|
||||
}
|
||||
|
|
@ -38,7 +38,7 @@ func NewDenyKeysFilter(keys ...Key) Filter {
|
|||
return func(kv KeyValue) bool { return true }
|
||||
}
|
||||
|
||||
forbid := make(map[Key]struct{})
|
||||
forbid := make(map[Key]struct{}, len(keys))
|
||||
for _, k := range keys {
|
||||
forbid[k] = struct{}{}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
Package attribute provide several helper functions for some commonly used
|
||||
logic of processing attributes.
|
||||
*/
|
||||
package attribute // import "go.opentelemetry.io/otel/internal/attribute"
|
||||
package attribute // import "go.opentelemetry.io/otel/attribute/internal"
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
37
vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go
generated
vendored
Normal file
37
vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go
generated
vendored
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package attribute // import "go.opentelemetry.io/otel/attribute"
|
||||
|
||||
import (
|
||||
"math"
|
||||
)
|
||||
|
||||
func boolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag.
|
||||
if b {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func rawToBool(r uint64) bool {
|
||||
return r != 0
|
||||
}
|
||||
|
||||
func int64ToRaw(i int64) uint64 {
|
||||
// Assumes original was a valid int64 (overflow not checked).
|
||||
return uint64(i) // nolint: gosec
|
||||
}
|
||||
|
||||
func rawToInt64(r uint64) int64 {
|
||||
// Assumes original was a valid int64 (overflow not checked).
|
||||
return int64(r) // nolint: gosec
|
||||
}
|
||||
|
||||
func float64ToRaw(f float64) uint64 {
|
||||
return math.Float64bits(f)
|
||||
}
|
||||
|
||||
func rawToFloat64(r uint64) float64 {
|
||||
return math.Float64frombits(r)
|
||||
}
|
||||
15
vendor/go.opentelemetry.io/otel/attribute/value.go
generated
vendored
15
vendor/go.opentelemetry.io/otel/attribute/value.go
generated
vendored
|
|
@ -9,8 +9,7 @@ import (
|
|||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"go.opentelemetry.io/otel/internal"
|
||||
"go.opentelemetry.io/otel/internal/attribute"
|
||||
attribute "go.opentelemetry.io/otel/attribute/internal"
|
||||
)
|
||||
|
||||
//go:generate stringer -type=Type
|
||||
|
|
@ -51,7 +50,7 @@ const (
|
|||
func BoolValue(v bool) Value {
|
||||
return Value{
|
||||
vtype: BOOL,
|
||||
numeric: internal.BoolToRaw(v),
|
||||
numeric: boolToRaw(v),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -82,7 +81,7 @@ func IntSliceValue(v []int) Value {
|
|||
func Int64Value(v int64) Value {
|
||||
return Value{
|
||||
vtype: INT64,
|
||||
numeric: internal.Int64ToRaw(v),
|
||||
numeric: int64ToRaw(v),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -95,7 +94,7 @@ func Int64SliceValue(v []int64) Value {
|
|||
func Float64Value(v float64) Value {
|
||||
return Value{
|
||||
vtype: FLOAT64,
|
||||
numeric: internal.Float64ToRaw(v),
|
||||
numeric: float64ToRaw(v),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -125,7 +124,7 @@ func (v Value) Type() Type {
|
|||
// AsBool returns the bool value. Make sure that the Value's type is
|
||||
// BOOL.
|
||||
func (v Value) AsBool() bool {
|
||||
return internal.RawToBool(v.numeric)
|
||||
return rawToBool(v.numeric)
|
||||
}
|
||||
|
||||
// AsBoolSlice returns the []bool value. Make sure that the Value's type is
|
||||
|
|
@ -144,7 +143,7 @@ func (v Value) asBoolSlice() []bool {
|
|||
// AsInt64 returns the int64 value. Make sure that the Value's type is
|
||||
// INT64.
|
||||
func (v Value) AsInt64() int64 {
|
||||
return internal.RawToInt64(v.numeric)
|
||||
return rawToInt64(v.numeric)
|
||||
}
|
||||
|
||||
// AsInt64Slice returns the []int64 value. Make sure that the Value's type is
|
||||
|
|
@ -163,7 +162,7 @@ func (v Value) asInt64Slice() []int64 {
|
|||
// AsFloat64 returns the float64 value. Make sure that the Value's
|
||||
// type is FLOAT64.
|
||||
func (v Value) AsFloat64() float64 {
|
||||
return internal.RawToFloat64(v.numeric)
|
||||
return rawToFloat64(v.numeric)
|
||||
}
|
||||
|
||||
// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is
|
||||
|
|
|
|||
5
vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
generated
vendored
5
vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
generated
vendored
|
|
@ -1,3 +1,4 @@
|
|||
# This is a renovate-friendly source of Docker images.
|
||||
FROM python:3.13.2-slim-bullseye@sha256:31b581c8218e1f3c58672481b3b7dba8e898852866b408c6a984c22832523935 AS python
|
||||
FROM otel/weaver:v0.13.2@sha256:ae7346b992e477f629ea327e0979e8a416a97f7956ab1f7e95ac1f44edf1a893 AS weaver
|
||||
FROM python:3.13.3-slim-bullseye@sha256:9e3f9243e06fd68eb9519074b49878eda20ad39a855fac51aaffb741de20726e AS python
|
||||
FROM otel/weaver:v0.15.0@sha256:1cf1c72eaed57dad813c2e359133b8a15bd4facf305aae5b13bdca6d3eccff56 AS weaver
|
||||
FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown
|
||||
|
|
|
|||
30
vendor/go.opentelemetry.io/otel/get_main_pkgs.sh
generated
vendored
30
vendor/go.opentelemetry.io/otel/get_main_pkgs.sh
generated
vendored
|
|
@ -1,30 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright The OpenTelemetry Authors
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
top_dir='.'
|
||||
if [[ $# -gt 0 ]]; then
|
||||
top_dir="${1}"
|
||||
fi
|
||||
|
||||
p=$(pwd)
|
||||
mod_dirs=()
|
||||
|
||||
# Note `mapfile` does not exist in older bash versions:
|
||||
# https://stackoverflow.com/questions/41475261/need-alternative-to-readarray-mapfile-for-script-on-older-version-of-bash
|
||||
|
||||
while IFS= read -r line; do
|
||||
mod_dirs+=("$line")
|
||||
done < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort)
|
||||
|
||||
for mod_dir in "${mod_dirs[@]}"; do
|
||||
cd "${mod_dir}"
|
||||
|
||||
while IFS= read -r line; do
|
||||
echo ".${line#${p}}"
|
||||
done < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|')
|
||||
cd "${p}"
|
||||
done
|
||||
18
vendor/go.opentelemetry.io/otel/internal/gen.go
generated
vendored
18
vendor/go.opentelemetry.io/otel/internal/gen.go
generated
vendored
|
|
@ -1,18 +0,0 @@
|
|||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package internal // import "go.opentelemetry.io/otel/internal"
|
||||
|
||||
//go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go
|
||||
//go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go
|
||||
//go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go
|
||||
|
||||
//go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go
|
||||
//go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go
|
||||
//go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go
|
||||
//go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go
|
||||
//go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go
|
||||
//go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go
|
||||
//go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go
|
||||
//go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go
|
||||
//go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go
|
||||
1
vendor/go.opentelemetry.io/otel/internal/global/handler.go
generated
vendored
1
vendor/go.opentelemetry.io/otel/internal/global/handler.go
generated
vendored
|
|
@ -1,6 +1,7 @@
|
|||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package global provides the OpenTelemetry global API.
|
||||
package global // import "go.opentelemetry.io/otel/internal/global"
|
||||
|
||||
import (
|
||||
|
|
|
|||
45
vendor/go.opentelemetry.io/otel/internal/global/meter.go
generated
vendored
45
vendor/go.opentelemetry.io/otel/internal/global/meter.go
generated
vendored
|
|
@ -169,7 +169,10 @@ func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption)
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
|
||||
func (m *meter) Int64UpDownCounter(
|
||||
name string,
|
||||
options ...metric.Int64UpDownCounterOption,
|
||||
) (metric.Int64UpDownCounter, error) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
|
|
@ -238,7 +241,10 @@ func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (met
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
|
||||
func (m *meter) Int64ObservableCounter(
|
||||
name string,
|
||||
options ...metric.Int64ObservableCounterOption,
|
||||
) (metric.Int64ObservableCounter, error) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
|
|
@ -261,7 +267,10 @@ func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64Obser
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
|
||||
func (m *meter) Int64ObservableUpDownCounter(
|
||||
name string,
|
||||
options ...metric.Int64ObservableUpDownCounterOption,
|
||||
) (metric.Int64ObservableUpDownCounter, error) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
|
|
@ -284,7 +293,10 @@ func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int6
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
|
||||
func (m *meter) Int64ObservableGauge(
|
||||
name string,
|
||||
options ...metric.Int64ObservableGaugeOption,
|
||||
) (metric.Int64ObservableGauge, error) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
|
|
@ -330,7 +342,10 @@ func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOpti
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
|
||||
func (m *meter) Float64UpDownCounter(
|
||||
name string,
|
||||
options ...metric.Float64UpDownCounterOption,
|
||||
) (metric.Float64UpDownCounter, error) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
|
|
@ -353,7 +368,10 @@ func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDow
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
|
||||
func (m *meter) Float64Histogram(
|
||||
name string,
|
||||
options ...metric.Float64HistogramOption,
|
||||
) (metric.Float64Histogram, error) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
|
|
@ -399,7 +417,10 @@ func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption)
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
|
||||
func (m *meter) Float64ObservableCounter(
|
||||
name string,
|
||||
options ...metric.Float64ObservableCounterOption,
|
||||
) (metric.Float64ObservableCounter, error) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
|
|
@ -422,7 +443,10 @@ func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64O
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
|
||||
func (m *meter) Float64ObservableUpDownCounter(
|
||||
name string,
|
||||
options ...metric.Float64ObservableUpDownCounterOption,
|
||||
) (metric.Float64ObservableUpDownCounter, error) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
|
|
@ -445,7 +469,10 @@ func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Fl
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
|
||||
func (m *meter) Float64ObservableGauge(
|
||||
name string,
|
||||
options ...metric.Float64ObservableGaugeOption,
|
||||
) (metric.Float64ObservableGauge, error) {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
|
|
|
|||
13
vendor/go.opentelemetry.io/otel/internal/global/trace.go
generated
vendored
13
vendor/go.opentelemetry.io/otel/internal/global/trace.go
generated
vendored
|
|
@ -158,7 +158,18 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart
|
|||
// a nonRecordingSpan by default.
|
||||
var autoInstEnabled = new(bool)
|
||||
|
||||
func (t *tracer) newSpan(ctx context.Context, autoSpan *bool, name string, opts []trace.SpanStartOption) (context.Context, trace.Span) {
|
||||
// newSpan is called by tracer.Start so auto-instrumentation can attach an eBPF
|
||||
// uprobe to this code.
|
||||
//
|
||||
// "noinline" pragma prevents the method from ever being inlined.
|
||||
//
|
||||
//go:noinline
|
||||
func (t *tracer) newSpan(
|
||||
ctx context.Context,
|
||||
autoSpan *bool,
|
||||
name string,
|
||||
opts []trace.SpanStartOption,
|
||||
) (context.Context, trace.Span) {
|
||||
// autoInstEnabled is passed to newSpan via the autoSpan parameter. This is
|
||||
// so the auto-instrumentation can define a uprobe for (*t).newSpan and be
|
||||
// provided with the address of the bool autoInstEnabled points to. It
|
||||
|
|
|
|||
48
vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
generated
vendored
48
vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
generated
vendored
|
|
@ -1,48 +0,0 @@
|
|||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package internal // import "go.opentelemetry.io/otel/internal"
|
||||
|
||||
import (
|
||||
"math"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag.
|
||||
if b {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func RawToBool(r uint64) bool {
|
||||
return r != 0
|
||||
}
|
||||
|
||||
func Int64ToRaw(i int64) uint64 {
|
||||
// Assumes original was a valid int64 (overflow not checked).
|
||||
return uint64(i) // nolint: gosec
|
||||
}
|
||||
|
||||
func RawToInt64(r uint64) int64 {
|
||||
// Assumes original was a valid int64 (overflow not checked).
|
||||
return int64(r) // nolint: gosec
|
||||
}
|
||||
|
||||
func Float64ToRaw(f float64) uint64 {
|
||||
return math.Float64bits(f)
|
||||
}
|
||||
|
||||
func RawToFloat64(r uint64) float64 {
|
||||
return math.Float64frombits(r)
|
||||
}
|
||||
|
||||
func RawPtrToFloat64Ptr(r *uint64) *float64 {
|
||||
// Assumes original was a valid *float64 (overflow not checked).
|
||||
return (*float64)(unsafe.Pointer(r)) // nolint: gosec
|
||||
}
|
||||
|
||||
func RawPtrToInt64Ptr(r *uint64) *int64 {
|
||||
// Assumes original was a valid *int64 (overflow not checked).
|
||||
return (*int64)(unsafe.Pointer(r)) // nolint: gosec
|
||||
}
|
||||
12
vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
generated
vendored
12
vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
generated
vendored
|
|
@ -106,7 +106,9 @@ type Float64ObservableUpDownCounterConfig struct {
|
|||
|
||||
// NewFloat64ObservableUpDownCounterConfig returns a new
|
||||
// [Float64ObservableUpDownCounterConfig] with all opts applied.
|
||||
func NewFloat64ObservableUpDownCounterConfig(opts ...Float64ObservableUpDownCounterOption) Float64ObservableUpDownCounterConfig {
|
||||
func NewFloat64ObservableUpDownCounterConfig(
|
||||
opts ...Float64ObservableUpDownCounterOption,
|
||||
) Float64ObservableUpDownCounterConfig {
|
||||
var config Float64ObservableUpDownCounterConfig
|
||||
for _, o := range opts {
|
||||
config = o.applyFloat64ObservableUpDownCounter(config)
|
||||
|
|
@ -239,12 +241,16 @@ type float64CallbackOpt struct {
|
|||
cback Float64Callback
|
||||
}
|
||||
|
||||
func (o float64CallbackOpt) applyFloat64ObservableCounter(cfg Float64ObservableCounterConfig) Float64ObservableCounterConfig {
|
||||
func (o float64CallbackOpt) applyFloat64ObservableCounter(
|
||||
cfg Float64ObservableCounterConfig,
|
||||
) Float64ObservableCounterConfig {
|
||||
cfg.callbacks = append(cfg.callbacks, o.cback)
|
||||
return cfg
|
||||
}
|
||||
|
||||
func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(cfg Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig {
|
||||
func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(
|
||||
cfg Float64ObservableUpDownCounterConfig,
|
||||
) Float64ObservableUpDownCounterConfig {
|
||||
cfg.callbacks = append(cfg.callbacks, o.cback)
|
||||
return cfg
|
||||
}
|
||||
|
|
|
|||
8
vendor/go.opentelemetry.io/otel/metric/asyncint64.go
generated
vendored
8
vendor/go.opentelemetry.io/otel/metric/asyncint64.go
generated
vendored
|
|
@ -105,7 +105,9 @@ type Int64ObservableUpDownCounterConfig struct {
|
|||
|
||||
// NewInt64ObservableUpDownCounterConfig returns a new
|
||||
// [Int64ObservableUpDownCounterConfig] with all opts applied.
|
||||
func NewInt64ObservableUpDownCounterConfig(opts ...Int64ObservableUpDownCounterOption) Int64ObservableUpDownCounterConfig {
|
||||
func NewInt64ObservableUpDownCounterConfig(
|
||||
opts ...Int64ObservableUpDownCounterOption,
|
||||
) Int64ObservableUpDownCounterConfig {
|
||||
var config Int64ObservableUpDownCounterConfig
|
||||
for _, o := range opts {
|
||||
config = o.applyInt64ObservableUpDownCounter(config)
|
||||
|
|
@ -242,7 +244,9 @@ func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounter
|
|||
return cfg
|
||||
}
|
||||
|
||||
func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(cfg Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig {
|
||||
func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(
|
||||
cfg Int64ObservableUpDownCounterConfig,
|
||||
) Int64ObservableUpDownCounterConfig {
|
||||
cfg.callbacks = append(cfg.callbacks, o.cback)
|
||||
return cfg
|
||||
}
|
||||
|
|
|
|||
16
vendor/go.opentelemetry.io/otel/metric/instrument.go
generated
vendored
16
vendor/go.opentelemetry.io/otel/metric/instrument.go
generated
vendored
|
|
@ -63,7 +63,9 @@ func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig)
|
|||
return c
|
||||
}
|
||||
|
||||
func (o descOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig {
|
||||
func (o descOpt) applyFloat64ObservableUpDownCounter(
|
||||
c Float64ObservableUpDownCounterConfig,
|
||||
) Float64ObservableUpDownCounterConfig {
|
||||
c.description = string(o)
|
||||
return c
|
||||
}
|
||||
|
|
@ -98,7 +100,9 @@ func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int
|
|||
return c
|
||||
}
|
||||
|
||||
func (o descOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig {
|
||||
func (o descOpt) applyInt64ObservableUpDownCounter(
|
||||
c Int64ObservableUpDownCounterConfig,
|
||||
) Int64ObservableUpDownCounterConfig {
|
||||
c.description = string(o)
|
||||
return c
|
||||
}
|
||||
|
|
@ -138,7 +142,9 @@ func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig)
|
|||
return c
|
||||
}
|
||||
|
||||
func (o unitOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig {
|
||||
func (o unitOpt) applyFloat64ObservableUpDownCounter(
|
||||
c Float64ObservableUpDownCounterConfig,
|
||||
) Float64ObservableUpDownCounterConfig {
|
||||
c.unit = string(o)
|
||||
return c
|
||||
}
|
||||
|
|
@ -173,7 +179,9 @@ func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int
|
|||
return c
|
||||
}
|
||||
|
||||
func (o unitOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig {
|
||||
func (o unitOpt) applyInt64ObservableUpDownCounter(
|
||||
c Int64ObservableUpDownCounterConfig,
|
||||
) Int64ObservableUpDownCounterConfig {
|
||||
c.unit = string(o)
|
||||
return c
|
||||
}
|
||||
|
|
|
|||
10
vendor/go.opentelemetry.io/otel/metric/meter.go
generated
vendored
10
vendor/go.opentelemetry.io/otel/metric/meter.go
generated
vendored
|
|
@ -110,7 +110,10 @@ type Meter interface {
|
|||
// The name needs to conform to the OpenTelemetry instrument name syntax.
|
||||
// See the Instrument Name section of the package documentation for more
|
||||
// information.
|
||||
Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error)
|
||||
Int64ObservableUpDownCounter(
|
||||
name string,
|
||||
options ...Int64ObservableUpDownCounterOption,
|
||||
) (Int64ObservableUpDownCounter, error)
|
||||
|
||||
// Int64ObservableGauge returns a new Int64ObservableGauge instrument
|
||||
// identified by name and configured with options. The instrument is used
|
||||
|
|
@ -194,7 +197,10 @@ type Meter interface {
|
|||
// The name needs to conform to the OpenTelemetry instrument name syntax.
|
||||
// See the Instrument Name section of the package documentation for more
|
||||
// information.
|
||||
Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error)
|
||||
Float64ObservableUpDownCounter(
|
||||
name string,
|
||||
options ...Float64ObservableUpDownCounterOption,
|
||||
) (Float64ObservableUpDownCounter, error)
|
||||
|
||||
// Float64ObservableGauge returns a new Float64ObservableGauge instrument
|
||||
// identified by name and configured with options. The instrument is used
|
||||
|
|
|
|||
36
vendor/go.opentelemetry.io/otel/propagation/baggage.go
generated
vendored
36
vendor/go.opentelemetry.io/otel/propagation/baggage.go
generated
vendored
|
|
@ -28,7 +28,21 @@ func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) {
|
|||
}
|
||||
|
||||
// Extract returns a copy of parent with the baggage from the carrier added.
|
||||
// If carrier implements [ValuesGetter] (e.g. [HeaderCarrier]), Values is invoked
|
||||
// for multiple values extraction. Otherwise, Get is called.
|
||||
func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context {
|
||||
if multiCarrier, ok := carrier.(ValuesGetter); ok {
|
||||
return extractMultiBaggage(parent, multiCarrier)
|
||||
}
|
||||
return extractSingleBaggage(parent, carrier)
|
||||
}
|
||||
|
||||
// Fields returns the keys who's values are set with Inject.
|
||||
func (b Baggage) Fields() []string {
|
||||
return []string{baggageHeader}
|
||||
}
|
||||
|
||||
func extractSingleBaggage(parent context.Context, carrier TextMapCarrier) context.Context {
|
||||
bStr := carrier.Get(baggageHeader)
|
||||
if bStr == "" {
|
||||
return parent
|
||||
|
|
@ -41,7 +55,23 @@ func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context
|
|||
return baggage.ContextWithBaggage(parent, bag)
|
||||
}
|
||||
|
||||
// Fields returns the keys who's values are set with Inject.
|
||||
func (b Baggage) Fields() []string {
|
||||
return []string{baggageHeader}
|
||||
func extractMultiBaggage(parent context.Context, carrier ValuesGetter) context.Context {
|
||||
bVals := carrier.Values(baggageHeader)
|
||||
if len(bVals) == 0 {
|
||||
return parent
|
||||
}
|
||||
var members []baggage.Member
|
||||
for _, bStr := range bVals {
|
||||
currBag, err := baggage.Parse(bStr)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
members = append(members, currBag.Members()...)
|
||||
}
|
||||
|
||||
b, err := baggage.New(members...)
|
||||
if err != nil || b.Len() == 0 {
|
||||
return parent
|
||||
}
|
||||
return baggage.ContextWithBaggage(parent, b)
|
||||
}
|
||||
|
|
|
|||
30
vendor/go.opentelemetry.io/otel/propagation/propagation.go
generated
vendored
30
vendor/go.opentelemetry.io/otel/propagation/propagation.go
generated
vendored
|
|
@ -9,6 +9,7 @@ import (
|
|||
)
|
||||
|
||||
// TextMapCarrier is the storage medium used by a TextMapPropagator.
|
||||
// See ValuesGetter for how a TextMapCarrier can get multiple values for a key.
|
||||
type TextMapCarrier interface {
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
|
|
@ -29,6 +30,18 @@ type TextMapCarrier interface {
|
|||
// must never be done outside of a new major release.
|
||||
}
|
||||
|
||||
// ValuesGetter can return multiple values for a single key,
|
||||
// with contrast to TextMapCarrier.Get which returns a single value.
|
||||
type ValuesGetter interface {
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
|
||||
// Values returns all values associated with the passed key.
|
||||
Values(key string) []string
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
}
|
||||
|
||||
// MapCarrier is a TextMapCarrier that uses a map held in memory as a storage
|
||||
// medium for propagated key-value pairs.
|
||||
type MapCarrier map[string]string
|
||||
|
|
@ -55,14 +68,25 @@ func (c MapCarrier) Keys() []string {
|
|||
return keys
|
||||
}
|
||||
|
||||
// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface.
|
||||
// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier and ValuesGetter interfaces.
|
||||
type HeaderCarrier http.Header
|
||||
|
||||
// Get returns the value associated with the passed key.
|
||||
// Compile time check that HeaderCarrier implements ValuesGetter.
|
||||
var _ TextMapCarrier = HeaderCarrier{}
|
||||
|
||||
// Compile time check that HeaderCarrier implements TextMapCarrier.
|
||||
var _ ValuesGetter = HeaderCarrier{}
|
||||
|
||||
// Get returns the first value associated with the passed key.
|
||||
func (hc HeaderCarrier) Get(key string) string {
|
||||
return http.Header(hc).Get(key)
|
||||
}
|
||||
|
||||
// Values returns all values associated with the passed key.
|
||||
func (hc HeaderCarrier) Values(key string) []string {
|
||||
return http.Header(hc).Values(key)
|
||||
}
|
||||
|
||||
// Set stores the key-value pair.
|
||||
func (hc HeaderCarrier) Set(key string, value string) {
|
||||
http.Header(hc).Set(key, value)
|
||||
|
|
@ -89,6 +113,8 @@ type TextMapPropagator interface {
|
|||
// must never be done outside of a new major release.
|
||||
|
||||
// Extract reads cross-cutting concerns from the carrier into a Context.
|
||||
// Implementations may check if the carrier implements ValuesGetter,
|
||||
// to support extraction of multiple values per key.
|
||||
Extract(ctx context.Context, carrier TextMapCarrier) context.Context
|
||||
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||
// must never be done outside of a new major release.
|
||||
|
|
|
|||
7
vendor/go.opentelemetry.io/otel/renovate.json
generated
vendored
7
vendor/go.opentelemetry.io/otel/renovate.json
generated
vendored
|
|
@ -1,7 +1,8 @@
|
|||
{
|
||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||
"extends": [
|
||||
"config:best-practices"
|
||||
"config:best-practices",
|
||||
"helpers:pinGitHubActionDigestsToSemver"
|
||||
],
|
||||
"ignorePaths": [],
|
||||
"labels": ["Skip Changelog", "dependencies"],
|
||||
|
|
@ -25,6 +26,10 @@
|
|||
{
|
||||
"matchPackageNames": ["golang.org/x/**"],
|
||||
"groupName": "golang.org/x"
|
||||
},
|
||||
{
|
||||
"matchPackageNames": ["go.opentelemetry.io/otel/sdk/log/logtest"],
|
||||
"enabled": false
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
|||
1
vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go
generated
vendored
1
vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go
generated
vendored
|
|
@ -1,6 +1,7 @@
|
|||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Package internal provides common semconv functionality.
|
||||
package internal // import "go.opentelemetry.io/otel/semconv/internal/v2"
|
||||
|
||||
import (
|
||||
|
|
|
|||
5
vendor/go.opentelemetry.io/otel/trace/auto.go
generated
vendored
5
vendor/go.opentelemetry.io/otel/trace/auto.go
generated
vendored
|
|
@ -57,14 +57,15 @@ type autoTracer struct {
|
|||
var _ Tracer = autoTracer{}
|
||||
|
||||
func (t autoTracer) Start(ctx context.Context, name string, opts ...SpanStartOption) (context.Context, Span) {
|
||||
var psc SpanContext
|
||||
var psc, sc SpanContext
|
||||
sampled := true
|
||||
span := new(autoSpan)
|
||||
|
||||
// Ask eBPF for sampling decision and span context info.
|
||||
t.start(ctx, span, &psc, &sampled, &span.spanContext)
|
||||
t.start(ctx, span, &psc, &sampled, &sc)
|
||||
|
||||
span.sampled.Store(sampled)
|
||||
span.spanContext = sc
|
||||
|
||||
ctx = ContextWithSpan(ctx, span)
|
||||
|
||||
|
|
|
|||
56
vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go
generated
vendored
56
vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go
generated
vendored
|
|
@ -251,13 +251,20 @@ func (s *Span) UnmarshalJSON(data []byte) error {
|
|||
type SpanFlags int32
|
||||
|
||||
const (
|
||||
// SpanFlagsTraceFlagsMask is a mask for trace-flags.
|
||||
//
|
||||
// Bits 0-7 are used for trace flags.
|
||||
SpanFlagsTraceFlagsMask SpanFlags = 255
|
||||
// Bits 8 and 9 are used to indicate that the parent span or link span is remote.
|
||||
// Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
|
||||
// Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote.
|
||||
// SpanFlagsContextHasIsRemoteMask is a mask for HAS_IS_REMOTE status.
|
||||
//
|
||||
// Bits 8 and 9 are used to indicate that the parent span or link span is
|
||||
// remote. Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
|
||||
SpanFlagsContextHasIsRemoteMask SpanFlags = 256
|
||||
// SpanFlagsContextHasIsRemoteMask indicates the Span is remote.
|
||||
// SpanFlagsContextIsRemoteMask is a mask for IS_REMOTE status.
|
||||
//
|
||||
// Bits 8 and 9 are used to indicate that the parent span or link span is
|
||||
// remote. Bit 9 (`IS_REMOTE`) indicates whether the span or link is
|
||||
// remote.
|
||||
SpanFlagsContextIsRemoteMask SpanFlags = 512
|
||||
)
|
||||
|
||||
|
|
@ -266,27 +273,31 @@ const (
|
|||
type SpanKind int32
|
||||
|
||||
const (
|
||||
// Indicates that the span represents an internal operation within an application,
|
||||
// as opposed to an operation happening at the boundaries. Default value.
|
||||
// SpanKindInternal indicates that the span represents an internal
|
||||
// operation within an application, as opposed to an operation happening at
|
||||
// the boundaries.
|
||||
SpanKindInternal SpanKind = 1
|
||||
// Indicates that the span covers server-side handling of an RPC or other
|
||||
// remote network request.
|
||||
// SpanKindServer indicates that the span covers server-side handling of an
|
||||
// RPC or other remote network request.
|
||||
SpanKindServer SpanKind = 2
|
||||
// Indicates that the span describes a request to some remote service.
|
||||
// SpanKindClient indicates that the span describes a request to some
|
||||
// remote service.
|
||||
SpanKindClient SpanKind = 3
|
||||
// Indicates that the span describes a producer sending a message to a broker.
|
||||
// Unlike CLIENT and SERVER, there is often no direct critical path latency relationship
|
||||
// between producer and consumer spans. A PRODUCER span ends when the message was accepted
|
||||
// by the broker while the logical processing of the message might span a much longer time.
|
||||
// SpanKindProducer indicates that the span describes a producer sending a
|
||||
// message to a broker. Unlike SpanKindClient and SpanKindServer, there is
|
||||
// often no direct critical path latency relationship between producer and
|
||||
// consumer spans. A SpanKindProducer span ends when the message was
|
||||
// accepted by the broker while the logical processing of the message might
|
||||
// span a much longer time.
|
||||
SpanKindProducer SpanKind = 4
|
||||
// Indicates that the span describes consumer receiving a message from a broker.
|
||||
// Like the PRODUCER kind, there is often no direct critical path latency relationship
|
||||
// between producer and consumer spans.
|
||||
// SpanKindConsumer indicates that the span describes a consumer receiving
|
||||
// a message from a broker. Like SpanKindProducer, there is often no direct
|
||||
// critical path latency relationship between producer and consumer spans.
|
||||
SpanKindConsumer SpanKind = 5
|
||||
)
|
||||
|
||||
// Event is a time-stamped annotation of the span, consisting of user-supplied
|
||||
// text description and key-value pairs.
|
||||
// SpanEvent is a time-stamped annotation of the span, consisting of
|
||||
// user-supplied text description and key-value pairs.
|
||||
type SpanEvent struct {
|
||||
// time_unix_nano is the time the event occurred.
|
||||
Time time.Time `json:"timeUnixNano,omitempty"`
|
||||
|
|
@ -369,10 +380,11 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// A pointer from the current span to another span in the same trace or in a
|
||||
// different trace. For example, this can be used in batching operations,
|
||||
// where a single batch handler processes multiple requests from different
|
||||
// traces or when the handler receives a request from a different project.
|
||||
// SpanLink is a reference from the current span to another span in the same
|
||||
// trace or in a different trace. For example, this can be used in batching
|
||||
// operations, where a single batch handler processes multiple requests from
|
||||
// different traces or when the handler receives a request from a different
|
||||
// project.
|
||||
type SpanLink struct {
|
||||
// A unique identifier of a trace that this linked span is part of. The ID is a
|
||||
// 16-byte array.
|
||||
|
|
|
|||
12
vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go
generated
vendored
12
vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go
generated
vendored
|
|
@ -3,17 +3,19 @@
|
|||
|
||||
package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
|
||||
|
||||
// StatusCode is the status of a Span.
|
||||
//
|
||||
// For the semantics of status codes see
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
|
||||
type StatusCode int32
|
||||
|
||||
const (
|
||||
// The default status.
|
||||
// StatusCodeUnset is the default status.
|
||||
StatusCodeUnset StatusCode = 0
|
||||
// The Span has been validated by an Application developer or Operator to
|
||||
// have completed successfully.
|
||||
// StatusCodeOK is used when the Span has been validated by an Application
|
||||
// developer or Operator to have completed successfully.
|
||||
StatusCodeOK StatusCode = 1
|
||||
// The Span contains an error.
|
||||
// StatusCodeError is used when the Span contains an error.
|
||||
StatusCodeError StatusCode = 2
|
||||
)
|
||||
|
||||
|
|
@ -30,7 +32,7 @@ func (s StatusCode) String() string {
|
|||
return "<unknown telemetry.StatusCode>"
|
||||
}
|
||||
|
||||
// The Status type defines a logical error model that is suitable for different
|
||||
// Status defines a logical error model that is suitable for different
|
||||
// programming environments, including REST APIs and RPC APIs.
|
||||
type Status struct {
|
||||
// A developer-facing human readable error message.
|
||||
|
|
|
|||
4
vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go
generated
vendored
4
vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go
generated
vendored
|
|
@ -71,7 +71,7 @@ func (td *Traces) UnmarshalJSON(data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// A collection of ScopeSpans from a Resource.
|
||||
// ResourceSpans is a collection of ScopeSpans from a Resource.
|
||||
type ResourceSpans struct {
|
||||
// The resource for the spans in this message.
|
||||
// If this field is not set then no resource info is known.
|
||||
|
|
@ -128,7 +128,7 @@ func (rs *ResourceSpans) UnmarshalJSON(data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// A collection of Spans produced by an InstrumentationScope.
|
||||
// ScopeSpans is a collection of Spans produced by an InstrumentationScope.
|
||||
type ScopeSpans struct {
|
||||
// The instrumentation scope information for the spans in this message.
|
||||
// Semantically when InstrumentationScope isn't set, it is equivalent with
|
||||
|
|
|
|||
2
vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go
generated
vendored
|
|
@ -316,7 +316,7 @@ func (v Value) String() string {
|
|||
case ValueKindBool:
|
||||
return strconv.FormatBool(v.asBool())
|
||||
case ValueKindBytes:
|
||||
return fmt.Sprint(v.asBytes())
|
||||
return string(v.asBytes())
|
||||
case ValueKindMap:
|
||||
return fmt.Sprint(v.asMap())
|
||||
case ValueKindSlice:
|
||||
|
|
|
|||
2
vendor/go.opentelemetry.io/otel/trace/noop.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/trace/noop.go
generated
vendored
|
|
@ -95,6 +95,8 @@ var autoInstEnabled = new(bool)
|
|||
// tracerProvider return a noopTracerProvider if autoEnabled is false,
|
||||
// otherwise it will return a TracerProvider from the sdk package used in
|
||||
// auto-instrumentation.
|
||||
//
|
||||
//go:noinline
|
||||
func (noopSpan) tracerProvider(autoEnabled *bool) TracerProvider {
|
||||
if *autoEnabled {
|
||||
return newAutoTracerProvider()
|
||||
|
|
|
|||
21
vendor/go.opentelemetry.io/otel/verify_readmes.sh
generated
vendored
21
vendor/go.opentelemetry.io/otel/verify_readmes.sh
generated
vendored
|
|
@ -1,21 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright The OpenTelemetry Authors
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
dirs=$(find . -type d -not -path "*/internal*" -not -path "*/test*" -not -path "*/example*" -not -path "*/.*" | sort)
|
||||
|
||||
missingReadme=false
|
||||
for dir in $dirs; do
|
||||
if [ ! -f "$dir/README.md" ]; then
|
||||
echo "couldn't find README.md for $dir"
|
||||
missingReadme=true
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$missingReadme" = true ] ; then
|
||||
echo "Error: some READMEs couldn't be found."
|
||||
exit 1
|
||||
fi
|
||||
2
vendor/go.opentelemetry.io/otel/version.go
generated
vendored
2
vendor/go.opentelemetry.io/otel/version.go
generated
vendored
|
|
@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel"
|
|||
|
||||
// Version is the current release version of OpenTelemetry in use.
|
||||
func Version() string {
|
||||
return "1.35.0"
|
||||
return "1.36.0"
|
||||
}
|
||||
|
|
|
|||
8
vendor/go.opentelemetry.io/otel/versions.yaml
generated
vendored
8
vendor/go.opentelemetry.io/otel/versions.yaml
generated
vendored
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
module-sets:
|
||||
stable-v1:
|
||||
version: v1.35.0
|
||||
version: v1.36.0
|
||||
modules:
|
||||
- go.opentelemetry.io/otel
|
||||
- go.opentelemetry.io/otel/bridge/opencensus
|
||||
|
|
@ -23,11 +23,11 @@ module-sets:
|
|||
- go.opentelemetry.io/otel/sdk/metric
|
||||
- go.opentelemetry.io/otel/trace
|
||||
experimental-metrics:
|
||||
version: v0.57.0
|
||||
version: v0.58.0
|
||||
modules:
|
||||
- go.opentelemetry.io/otel/exporters/prometheus
|
||||
experimental-logs:
|
||||
version: v0.11.0
|
||||
version: v0.12.0
|
||||
modules:
|
||||
- go.opentelemetry.io/otel/log
|
||||
- go.opentelemetry.io/otel/sdk/log
|
||||
|
|
@ -40,4 +40,6 @@ module-sets:
|
|||
- go.opentelemetry.io/otel/schema
|
||||
excluded-modules:
|
||||
- go.opentelemetry.io/otel/internal/tools
|
||||
- go.opentelemetry.io/otel/log/logtest
|
||||
- go.opentelemetry.io/otel/sdk/log/logtest
|
||||
- go.opentelemetry.io/otel/trace/internal/telemetry/test
|
||||
|
|
|
|||
2
vendor/golang.org/x/oauth2/internal/doc.go
generated
vendored
2
vendor/golang.org/x/oauth2/internal/doc.go
generated
vendored
|
|
@ -2,5 +2,5 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package internal contains support packages for oauth2 package.
|
||||
// Package internal contains support packages for [golang.org/x/oauth2].
|
||||
package internal
|
||||
|
|
|
|||
2
vendor/golang.org/x/oauth2/internal/oauth2.go
generated
vendored
2
vendor/golang.org/x/oauth2/internal/oauth2.go
generated
vendored
|
|
@ -13,7 +13,7 @@ import (
|
|||
)
|
||||
|
||||
// ParseKey converts the binary contents of a private key file
|
||||
// to an *rsa.PrivateKey. It detects whether the private key is in a
|
||||
// to an [*rsa.PrivateKey]. It detects whether the private key is in a
|
||||
// PEM container or not. If so, it extracts the private key
|
||||
// from PEM container before conversion. It only supports PEM
|
||||
// containers with no passphrase.
|
||||
|
|
|
|||
50
vendor/golang.org/x/oauth2/internal/token.go
generated
vendored
50
vendor/golang.org/x/oauth2/internal/token.go
generated
vendored
|
|
@ -10,7 +10,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"mime"
|
||||
"net/http"
|
||||
|
|
@ -26,9 +25,9 @@ import (
|
|||
// the requests to access protected resources on the OAuth 2.0
|
||||
// provider's backend.
|
||||
//
|
||||
// This type is a mirror of oauth2.Token and exists to break
|
||||
// This type is a mirror of [golang.org/x/oauth2.Token] and exists to break
|
||||
// an otherwise-circular dependency. Other internal packages
|
||||
// should convert this Token into an oauth2.Token before use.
|
||||
// should convert this Token into an [golang.org/x/oauth2.Token] before use.
|
||||
type Token struct {
|
||||
// AccessToken is the token that authorizes and authenticates
|
||||
// the requests.
|
||||
|
|
@ -50,9 +49,16 @@ type Token struct {
|
|||
// mechanisms for that TokenSource will not be used.
|
||||
Expiry time.Time
|
||||
|
||||
// ExpiresIn is the OAuth2 wire format "expires_in" field,
|
||||
// which specifies how many seconds later the token expires,
|
||||
// relative to an unknown time base approximately around "now".
|
||||
// It is the application's responsibility to populate
|
||||
// `Expiry` from `ExpiresIn` when required.
|
||||
ExpiresIn int64 `json:"expires_in,omitempty"`
|
||||
|
||||
// Raw optionally contains extra metadata from the server
|
||||
// when updating a token.
|
||||
Raw interface{}
|
||||
Raw any
|
||||
}
|
||||
|
||||
// tokenJSON is the struct representing the HTTP response from OAuth2
|
||||
|
|
@ -99,14 +105,6 @@ func (e *expirationTime) UnmarshalJSON(b []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op.
|
||||
//
|
||||
// Deprecated: this function no longer does anything. Caller code that
|
||||
// wants to avoid potential extra HTTP requests made during
|
||||
// auto-probing of the provider's auth style should set
|
||||
// Endpoint.AuthStyle.
|
||||
func RegisterBrokenAuthHeaderProvider(tokenURL string) {}
|
||||
|
||||
// AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type.
|
||||
type AuthStyle int
|
||||
|
||||
|
|
@ -143,6 +141,11 @@ func (lc *LazyAuthStyleCache) Get() *AuthStyleCache {
|
|||
return c
|
||||
}
|
||||
|
||||
type authStyleCacheKey struct {
|
||||
url string
|
||||
clientID string
|
||||
}
|
||||
|
||||
// AuthStyleCache is the set of tokenURLs we've successfully used via
|
||||
// RetrieveToken and which style auth we ended up using.
|
||||
// It's called a cache, but it doesn't (yet?) shrink. It's expected that
|
||||
|
|
@ -150,26 +153,26 @@ func (lc *LazyAuthStyleCache) Get() *AuthStyleCache {
|
|||
// small.
|
||||
type AuthStyleCache struct {
|
||||
mu sync.Mutex
|
||||
m map[string]AuthStyle // keyed by tokenURL
|
||||
m map[authStyleCacheKey]AuthStyle
|
||||
}
|
||||
|
||||
// lookupAuthStyle reports which auth style we last used with tokenURL
|
||||
// when calling RetrieveToken and whether we have ever done so.
|
||||
func (c *AuthStyleCache) lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) {
|
||||
func (c *AuthStyleCache) lookupAuthStyle(tokenURL, clientID string) (style AuthStyle, ok bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
style, ok = c.m[tokenURL]
|
||||
style, ok = c.m[authStyleCacheKey{tokenURL, clientID}]
|
||||
return
|
||||
}
|
||||
|
||||
// setAuthStyle adds an entry to authStyleCache, documented above.
|
||||
func (c *AuthStyleCache) setAuthStyle(tokenURL string, v AuthStyle) {
|
||||
func (c *AuthStyleCache) setAuthStyle(tokenURL, clientID string, v AuthStyle) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.m == nil {
|
||||
c.m = make(map[string]AuthStyle)
|
||||
c.m = make(map[authStyleCacheKey]AuthStyle)
|
||||
}
|
||||
c.m[tokenURL] = v
|
||||
c.m[authStyleCacheKey{tokenURL, clientID}] = v
|
||||
}
|
||||
|
||||
// newTokenRequest returns a new *http.Request to retrieve a new token
|
||||
|
|
@ -210,9 +213,9 @@ func cloneURLValues(v url.Values) url.Values {
|
|||
}
|
||||
|
||||
func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle, styleCache *AuthStyleCache) (*Token, error) {
|
||||
needsAuthStyleProbe := authStyle == 0
|
||||
needsAuthStyleProbe := authStyle == AuthStyleUnknown
|
||||
if needsAuthStyleProbe {
|
||||
if style, ok := styleCache.lookupAuthStyle(tokenURL); ok {
|
||||
if style, ok := styleCache.lookupAuthStyle(tokenURL, clientID); ok {
|
||||
authStyle = style
|
||||
needsAuthStyleProbe = false
|
||||
} else {
|
||||
|
|
@ -242,7 +245,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string,
|
|||
token, err = doTokenRoundTrip(ctx, req)
|
||||
}
|
||||
if needsAuthStyleProbe && err == nil {
|
||||
styleCache.setAuthStyle(tokenURL, authStyle)
|
||||
styleCache.setAuthStyle(tokenURL, clientID, authStyle)
|
||||
}
|
||||
// Don't overwrite `RefreshToken` with an empty value
|
||||
// if this was a token refreshing request.
|
||||
|
|
@ -257,7 +260,7 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
|
||||
body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20))
|
||||
r.Body.Close()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
|
||||
|
|
@ -312,7 +315,8 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
|
|||
TokenType: tj.TokenType,
|
||||
RefreshToken: tj.RefreshToken,
|
||||
Expiry: tj.expiry(),
|
||||
Raw: make(map[string]interface{}),
|
||||
ExpiresIn: int64(tj.ExpiresIn),
|
||||
Raw: make(map[string]any),
|
||||
}
|
||||
json.Unmarshal(body, &token.Raw) // no error checks for optional fields
|
||||
}
|
||||
|
|
|
|||
4
vendor/golang.org/x/oauth2/internal/transport.go
generated
vendored
4
vendor/golang.org/x/oauth2/internal/transport.go
generated
vendored
|
|
@ -9,8 +9,8 @@ import (
|
|||
"net/http"
|
||||
)
|
||||
|
||||
// HTTPClient is the context key to use with golang.org/x/net/context's
|
||||
// WithValue function to associate an *http.Client value with a context.
|
||||
// HTTPClient is the context key to use with [context.WithValue]
|
||||
// to associate an [*http.Client] value with a context.
|
||||
var HTTPClient ContextKey
|
||||
|
||||
// ContextKey is just an empty struct. It exists so HTTPClient can be
|
||||
|
|
|
|||
55
vendor/golang.org/x/oauth2/oauth2.go
generated
vendored
55
vendor/golang.org/x/oauth2/oauth2.go
generated
vendored
|
|
@ -22,9 +22,9 @@ import (
|
|||
)
|
||||
|
||||
// NoContext is the default context you should supply if not using
|
||||
// your own context.Context (see https://golang.org/x/net/context).
|
||||
// your own [context.Context].
|
||||
//
|
||||
// Deprecated: Use context.Background() or context.TODO() instead.
|
||||
// Deprecated: Use [context.Background] or [context.TODO] instead.
|
||||
var NoContext = context.TODO()
|
||||
|
||||
// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op.
|
||||
|
|
@ -37,8 +37,8 @@ func RegisterBrokenAuthHeaderProvider(tokenURL string) {}
|
|||
|
||||
// Config describes a typical 3-legged OAuth2 flow, with both the
|
||||
// client application information and the server's endpoint URLs.
|
||||
// For the client credentials 2-legged OAuth2 flow, see the clientcredentials
|
||||
// package (https://golang.org/x/oauth2/clientcredentials).
|
||||
// For the client credentials 2-legged OAuth2 flow, see the
|
||||
// [golang.org/x/oauth2/clientcredentials] package.
|
||||
type Config struct {
|
||||
// ClientID is the application's ID.
|
||||
ClientID string
|
||||
|
|
@ -46,7 +46,7 @@ type Config struct {
|
|||
// ClientSecret is the application's secret.
|
||||
ClientSecret string
|
||||
|
||||
// Endpoint contains the resource server's token endpoint
|
||||
// Endpoint contains the authorization server's token endpoint
|
||||
// URLs. These are constants specific to each server and are
|
||||
// often available via site-specific packages, such as
|
||||
// google.Endpoint or github.Endpoint.
|
||||
|
|
@ -135,7 +135,7 @@ type setParam struct{ k, v string }
|
|||
|
||||
func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) }
|
||||
|
||||
// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters
|
||||
// SetAuthURLParam builds an [AuthCodeOption] which passes key/value parameters
|
||||
// to a provider's authorization endpoint.
|
||||
func SetAuthURLParam(key, value string) AuthCodeOption {
|
||||
return setParam{key, value}
|
||||
|
|
@ -148,8 +148,8 @@ func SetAuthURLParam(key, value string) AuthCodeOption {
|
|||
// request and callback. The authorization server includes this value when
|
||||
// redirecting the user agent back to the client.
|
||||
//
|
||||
// Opts may include AccessTypeOnline or AccessTypeOffline, as well
|
||||
// as ApprovalForce.
|
||||
// Opts may include [AccessTypeOnline] or [AccessTypeOffline], as well
|
||||
// as [ApprovalForce].
|
||||
//
|
||||
// To protect against CSRF attacks, opts should include a PKCE challenge
|
||||
// (S256ChallengeOption). Not all servers support PKCE. An alternative is to
|
||||
|
|
@ -194,7 +194,7 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
|
|||
// and when other authorization grant types are not available."
|
||||
// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info.
|
||||
//
|
||||
// The provided context optionally controls which HTTP client is used. See the HTTPClient variable.
|
||||
// The provided context optionally controls which HTTP client is used. See the [HTTPClient] variable.
|
||||
func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {
|
||||
v := url.Values{
|
||||
"grant_type": {"password"},
|
||||
|
|
@ -212,10 +212,10 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor
|
|||
// It is used after a resource provider redirects the user back
|
||||
// to the Redirect URI (the URL obtained from AuthCodeURL).
|
||||
//
|
||||
// The provided context optionally controls which HTTP client is used. See the HTTPClient variable.
|
||||
// The provided context optionally controls which HTTP client is used. See the [HTTPClient] variable.
|
||||
//
|
||||
// The code will be in the *http.Request.FormValue("code"). Before
|
||||
// calling Exchange, be sure to validate FormValue("state") if you are
|
||||
// The code will be in the [http.Request.FormValue]("code"). Before
|
||||
// calling Exchange, be sure to validate [http.Request.FormValue]("state") if you are
|
||||
// using it to protect against CSRF attacks.
|
||||
//
|
||||
// If using PKCE to protect against CSRF attacks, opts should include a
|
||||
|
|
@ -242,10 +242,10 @@ func (c *Config) Client(ctx context.Context, t *Token) *http.Client {
|
|||
return NewClient(ctx, c.TokenSource(ctx, t))
|
||||
}
|
||||
|
||||
// TokenSource returns a TokenSource that returns t until t expires,
|
||||
// TokenSource returns a [TokenSource] that returns t until t expires,
|
||||
// automatically refreshing it as necessary using the provided context.
|
||||
//
|
||||
// Most users will use Config.Client instead.
|
||||
// Most users will use [Config.Client] instead.
|
||||
func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {
|
||||
tkr := &tokenRefresher{
|
||||
ctx: ctx,
|
||||
|
|
@ -260,7 +260,7 @@ func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {
|
|||
}
|
||||
}
|
||||
|
||||
// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token"
|
||||
// tokenRefresher is a TokenSource that makes "grant_type=refresh_token"
|
||||
// HTTP requests to renew a token using a RefreshToken.
|
||||
type tokenRefresher struct {
|
||||
ctx context.Context // used to get HTTP requests
|
||||
|
|
@ -305,8 +305,7 @@ type reuseTokenSource struct {
|
|||
}
|
||||
|
||||
// Token returns the current token if it's still valid, else will
|
||||
// refresh the current token (using r.Context for HTTP client
|
||||
// information) and return the new one.
|
||||
// refresh the current token and return the new one.
|
||||
func (s *reuseTokenSource) Token() (*Token, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
|
@ -322,7 +321,7 @@ func (s *reuseTokenSource) Token() (*Token, error) {
|
|||
return t, nil
|
||||
}
|
||||
|
||||
// StaticTokenSource returns a TokenSource that always returns the same token.
|
||||
// StaticTokenSource returns a [TokenSource] that always returns the same token.
|
||||
// Because the provided token t is never refreshed, StaticTokenSource is only
|
||||
// useful for tokens that never expire.
|
||||
func StaticTokenSource(t *Token) TokenSource {
|
||||
|
|
@ -338,16 +337,16 @@ func (s staticTokenSource) Token() (*Token, error) {
|
|||
return s.t, nil
|
||||
}
|
||||
|
||||
// HTTPClient is the context key to use with golang.org/x/net/context's
|
||||
// WithValue function to associate an *http.Client value with a context.
|
||||
// HTTPClient is the context key to use with [context.WithValue]
|
||||
// to associate a [*http.Client] value with a context.
|
||||
var HTTPClient internal.ContextKey
|
||||
|
||||
// NewClient creates an *http.Client from a Context and TokenSource.
|
||||
// NewClient creates an [*http.Client] from a [context.Context] and [TokenSource].
|
||||
// The returned client is not valid beyond the lifetime of the context.
|
||||
//
|
||||
// Note that if a custom *http.Client is provided via the Context it
|
||||
// Note that if a custom [*http.Client] is provided via the [context.Context] it
|
||||
// is used only for token acquisition and is not used to configure the
|
||||
// *http.Client returned from NewClient.
|
||||
// [*http.Client] returned from NewClient.
|
||||
//
|
||||
// As a special case, if src is nil, a non-OAuth2 client is returned
|
||||
// using the provided context. This exists to support related OAuth2
|
||||
|
|
@ -368,7 +367,7 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client {
|
|||
}
|
||||
}
|
||||
|
||||
// ReuseTokenSource returns a TokenSource which repeatedly returns the
|
||||
// ReuseTokenSource returns a [TokenSource] which repeatedly returns the
|
||||
// same token as long as it's valid, starting with t.
|
||||
// When its cached token is invalid, a new token is obtained from src.
|
||||
//
|
||||
|
|
@ -376,10 +375,10 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client {
|
|||
// (such as a file on disk) between runs of a program, rather than
|
||||
// obtaining new tokens unnecessarily.
|
||||
//
|
||||
// The initial token t may be nil, in which case the TokenSource is
|
||||
// The initial token t may be nil, in which case the [TokenSource] is
|
||||
// wrapped in a caching version if it isn't one already. This also
|
||||
// means it's always safe to wrap ReuseTokenSource around any other
|
||||
// TokenSource without adverse effects.
|
||||
// [TokenSource] without adverse effects.
|
||||
func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
|
||||
// Don't wrap a reuseTokenSource in itself. That would work,
|
||||
// but cause an unnecessary number of mutex operations.
|
||||
|
|
@ -397,8 +396,8 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
|
|||
}
|
||||
}
|
||||
|
||||
// ReuseTokenSourceWithExpiry returns a TokenSource that acts in the same manner as the
|
||||
// TokenSource returned by ReuseTokenSource, except the expiry buffer is
|
||||
// ReuseTokenSourceWithExpiry returns a [TokenSource] that acts in the same manner as the
|
||||
// [TokenSource] returned by [ReuseTokenSource], except the expiry buffer is
|
||||
// configurable. The expiration time of a token is calculated as
|
||||
// t.Expiry.Add(-earlyExpiry).
|
||||
func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource {
|
||||
|
|
|
|||
15
vendor/golang.org/x/oauth2/pkce.go
generated
vendored
15
vendor/golang.org/x/oauth2/pkce.go
generated
vendored
|
|
@ -1,6 +1,7 @@
|
|||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package oauth2
|
||||
|
||||
import (
|
||||
|
|
@ -20,9 +21,9 @@ const (
|
|||
// This follows recommendations in RFC 7636.
|
||||
//
|
||||
// A fresh verifier should be generated for each authorization.
|
||||
// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL
|
||||
// (or Config.DeviceAuth) and VerifierOption(verifier) to Config.Exchange
|
||||
// (or Config.DeviceAccessToken).
|
||||
// The resulting verifier should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth]
|
||||
// with [S256ChallengeOption], and to [Config.Exchange] or [Config.DeviceAccessToken]
|
||||
// with [VerifierOption].
|
||||
func GenerateVerifier() string {
|
||||
// "RECOMMENDED that the output of a suitable random number generator be
|
||||
// used to create a 32-octet sequence. The octet sequence is then
|
||||
|
|
@ -36,22 +37,22 @@ func GenerateVerifier() string {
|
|||
return base64.RawURLEncoding.EncodeToString(data)
|
||||
}
|
||||
|
||||
// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be
|
||||
// passed to Config.Exchange or Config.DeviceAccessToken only.
|
||||
// VerifierOption returns a PKCE code verifier [AuthCodeOption]. It should only be
|
||||
// passed to [Config.Exchange] or [Config.DeviceAccessToken].
|
||||
func VerifierOption(verifier string) AuthCodeOption {
|
||||
return setParam{k: codeVerifierKey, v: verifier}
|
||||
}
|
||||
|
||||
// S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256.
|
||||
//
|
||||
// Prefer to use S256ChallengeOption where possible.
|
||||
// Prefer to use [S256ChallengeOption] where possible.
|
||||
func S256ChallengeFromVerifier(verifier string) string {
|
||||
sha := sha256.Sum256([]byte(verifier))
|
||||
return base64.RawURLEncoding.EncodeToString(sha[:])
|
||||
}
|
||||
|
||||
// S256ChallengeOption derives a PKCE code challenge derived from verifier with
|
||||
// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAuth
|
||||
// method S256. It should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth]
|
||||
// only.
|
||||
func S256ChallengeOption(verifier string) AuthCodeOption {
|
||||
return challengeOption{
|
||||
|
|
|
|||
15
vendor/golang.org/x/oauth2/token.go
generated
vendored
15
vendor/golang.org/x/oauth2/token.go
generated
vendored
|
|
@ -44,7 +44,7 @@ type Token struct {
|
|||
|
||||
// Expiry is the optional expiration time of the access token.
|
||||
//
|
||||
// If zero, TokenSource implementations will reuse the same
|
||||
// If zero, [TokenSource] implementations will reuse the same
|
||||
// token forever and RefreshToken or equivalent
|
||||
// mechanisms for that TokenSource will not be used.
|
||||
Expiry time.Time `json:"expiry,omitempty"`
|
||||
|
|
@ -58,7 +58,7 @@ type Token struct {
|
|||
|
||||
// raw optionally contains extra metadata from the server
|
||||
// when updating a token.
|
||||
raw interface{}
|
||||
raw any
|
||||
|
||||
// expiryDelta is used to calculate when a token is considered
|
||||
// expired, by subtracting from Expiry. If zero, defaultExpiryDelta
|
||||
|
|
@ -86,16 +86,16 @@ func (t *Token) Type() string {
|
|||
// SetAuthHeader sets the Authorization header to r using the access
|
||||
// token in t.
|
||||
//
|
||||
// This method is unnecessary when using Transport or an HTTP Client
|
||||
// This method is unnecessary when using [Transport] or an HTTP Client
|
||||
// returned by this package.
|
||||
func (t *Token) SetAuthHeader(r *http.Request) {
|
||||
r.Header.Set("Authorization", t.Type()+" "+t.AccessToken)
|
||||
}
|
||||
|
||||
// WithExtra returns a new Token that's a clone of t, but using the
|
||||
// WithExtra returns a new [Token] that's a clone of t, but using the
|
||||
// provided raw extra map. This is only intended for use by packages
|
||||
// implementing derivative OAuth2 flows.
|
||||
func (t *Token) WithExtra(extra interface{}) *Token {
|
||||
func (t *Token) WithExtra(extra any) *Token {
|
||||
t2 := new(Token)
|
||||
*t2 = *t
|
||||
t2.raw = extra
|
||||
|
|
@ -105,8 +105,8 @@ func (t *Token) WithExtra(extra interface{}) *Token {
|
|||
// Extra returns an extra field.
|
||||
// Extra fields are key-value pairs returned by the server as a
|
||||
// part of the token retrieval response.
|
||||
func (t *Token) Extra(key string) interface{} {
|
||||
if raw, ok := t.raw.(map[string]interface{}); ok {
|
||||
func (t *Token) Extra(key string) any {
|
||||
if raw, ok := t.raw.(map[string]any); ok {
|
||||
return raw[key]
|
||||
}
|
||||
|
||||
|
|
@ -163,6 +163,7 @@ func tokenFromInternal(t *internal.Token) *Token {
|
|||
TokenType: t.TokenType,
|
||||
RefreshToken: t.RefreshToken,
|
||||
Expiry: t.Expiry,
|
||||
ExpiresIn: t.ExpiresIn,
|
||||
raw: t.Raw,
|
||||
}
|
||||
}
|
||||
|
|
|
|||
24
vendor/golang.org/x/oauth2/transport.go
generated
vendored
24
vendor/golang.org/x/oauth2/transport.go
generated
vendored
|
|
@ -11,12 +11,12 @@ import (
|
|||
"sync"
|
||||
)
|
||||
|
||||
// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests,
|
||||
// wrapping a base RoundTripper and adding an Authorization header
|
||||
// with a token from the supplied Sources.
|
||||
// Transport is an [http.RoundTripper] that makes OAuth 2.0 HTTP requests,
|
||||
// wrapping a base [http.RoundTripper] and adding an Authorization header
|
||||
// with a token from the supplied [TokenSource].
|
||||
//
|
||||
// Transport is a low-level mechanism. Most code will use the
|
||||
// higher-level Config.Client method instead.
|
||||
// higher-level [Config.Client] method instead.
|
||||
type Transport struct {
|
||||
// Source supplies the token to add to outgoing requests'
|
||||
// Authorization headers.
|
||||
|
|
@ -47,7 +47,7 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
req2 := cloneRequest(req) // per RoundTripper contract
|
||||
req2 := req.Clone(req.Context())
|
||||
token.SetAuthHeader(req2)
|
||||
|
||||
// req.Body is assumed to be closed by the base RoundTripper.
|
||||
|
|
@ -73,17 +73,3 @@ func (t *Transport) base() http.RoundTripper {
|
|||
}
|
||||
return http.DefaultTransport
|
||||
}
|
||||
|
||||
// cloneRequest returns a clone of the provided *http.Request.
|
||||
// The clone is a shallow copy of the struct and its Header map.
|
||||
func cloneRequest(r *http.Request) *http.Request {
|
||||
// shallow copy of the struct
|
||||
r2 := new(http.Request)
|
||||
*r2 = *r
|
||||
// deep copy of the Header
|
||||
r2.Header = make(http.Header, len(r.Header))
|
||||
for k, s := range r.Header {
|
||||
r2.Header[k] = append([]string(nil), s...)
|
||||
}
|
||||
return r2
|
||||
}
|
||||
|
|
|
|||
107
vendor/golang.org/x/sync/errgroup/errgroup.go
generated
vendored
107
vendor/golang.org/x/sync/errgroup/errgroup.go
generated
vendored
|
|
@ -12,6 +12,8 @@ package errgroup
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
)
|
||||
|
||||
|
|
@ -31,6 +33,10 @@ type Group struct {
|
|||
|
||||
errOnce sync.Once
|
||||
err error
|
||||
|
||||
mu sync.Mutex
|
||||
panicValue any // = PanicError | PanicValue; non-nil if some Group.Go goroutine panicked.
|
||||
abnormal bool // some Group.Go goroutine terminated abnormally (panic or goexit).
|
||||
}
|
||||
|
||||
func (g *Group) done() {
|
||||
|
|
@ -50,13 +56,22 @@ func WithContext(ctx context.Context) (*Group, context.Context) {
|
|||
return &Group{cancel: cancel}, ctx
|
||||
}
|
||||
|
||||
// Wait blocks until all function calls from the Go method have returned, then
|
||||
// returns the first non-nil error (if any) from them.
|
||||
// Wait blocks until all function calls from the Go method have returned
|
||||
// normally, then returns the first non-nil error (if any) from them.
|
||||
//
|
||||
// If any of the calls panics, Wait panics with a [PanicValue];
|
||||
// and if any of them calls [runtime.Goexit], Wait calls runtime.Goexit.
|
||||
func (g *Group) Wait() error {
|
||||
g.wg.Wait()
|
||||
if g.cancel != nil {
|
||||
g.cancel(g.err)
|
||||
}
|
||||
if g.panicValue != nil {
|
||||
panic(g.panicValue)
|
||||
}
|
||||
if g.abnormal {
|
||||
runtime.Goexit()
|
||||
}
|
||||
return g.err
|
||||
}
|
||||
|
||||
|
|
@ -65,18 +80,56 @@ func (g *Group) Wait() error {
|
|||
// It blocks until the new goroutine can be added without the number of
|
||||
// active goroutines in the group exceeding the configured limit.
|
||||
//
|
||||
// The first call to return a non-nil error cancels the group's context, if the
|
||||
// group was created by calling WithContext. The error will be returned by Wait.
|
||||
// It blocks until the new goroutine can be added without the number of
|
||||
// goroutines in the group exceeding the configured limit.
|
||||
//
|
||||
// The first goroutine in the group that returns a non-nil error, panics, or
|
||||
// invokes [runtime.Goexit] will cancel the associated Context, if any.
|
||||
func (g *Group) Go(f func() error) {
|
||||
if g.sem != nil {
|
||||
g.sem <- token{}
|
||||
}
|
||||
|
||||
g.add(f)
|
||||
}
|
||||
|
||||
func (g *Group) add(f func() error) {
|
||||
g.wg.Add(1)
|
||||
go func() {
|
||||
defer g.done()
|
||||
normalReturn := false
|
||||
defer func() {
|
||||
if normalReturn {
|
||||
return
|
||||
}
|
||||
v := recover()
|
||||
g.mu.Lock()
|
||||
defer g.mu.Unlock()
|
||||
if !g.abnormal {
|
||||
if g.cancel != nil {
|
||||
g.cancel(g.err)
|
||||
}
|
||||
g.abnormal = true
|
||||
}
|
||||
if v != nil && g.panicValue == nil {
|
||||
switch v := v.(type) {
|
||||
case error:
|
||||
g.panicValue = PanicError{
|
||||
Recovered: v,
|
||||
Stack: debug.Stack(),
|
||||
}
|
||||
default:
|
||||
g.panicValue = PanicValue{
|
||||
Recovered: v,
|
||||
Stack: debug.Stack(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if err := f(); err != nil {
|
||||
err := f()
|
||||
normalReturn = true
|
||||
if err != nil {
|
||||
g.errOnce.Do(func() {
|
||||
g.err = err
|
||||
if g.cancel != nil {
|
||||
|
|
@ -101,19 +154,7 @@ func (g *Group) TryGo(f func() error) bool {
|
|||
}
|
||||
}
|
||||
|
||||
g.wg.Add(1)
|
||||
go func() {
|
||||
defer g.done()
|
||||
|
||||
if err := f(); err != nil {
|
||||
g.errOnce.Do(func() {
|
||||
g.err = err
|
||||
if g.cancel != nil {
|
||||
g.cancel(g.err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}()
|
||||
g.add(f)
|
||||
return true
|
||||
}
|
||||
|
||||
|
|
@ -135,3 +176,33 @@ func (g *Group) SetLimit(n int) {
|
|||
}
|
||||
g.sem = make(chan token, n)
|
||||
}
|
||||
|
||||
// PanicError wraps an error recovered from an unhandled panic
|
||||
// when calling a function passed to Go or TryGo.
|
||||
type PanicError struct {
|
||||
Recovered error
|
||||
Stack []byte // result of call to [debug.Stack]
|
||||
}
|
||||
|
||||
func (p PanicError) Error() string {
|
||||
// A Go Error method conventionally does not include a stack dump, so omit it
|
||||
// here. (Callers who care can extract it from the Stack field.)
|
||||
return fmt.Sprintf("recovered from errgroup.Group: %v", p.Recovered)
|
||||
}
|
||||
|
||||
func (p PanicError) Unwrap() error { return p.Recovered }
|
||||
|
||||
// PanicValue wraps a value that does not implement the error interface,
|
||||
// recovered from an unhandled panic when calling a function passed to Go or
|
||||
// TryGo.
|
||||
type PanicValue struct {
|
||||
Recovered any
|
||||
Stack []byte // result of call to [debug.Stack]
|
||||
}
|
||||
|
||||
func (p PanicValue) String() string {
|
||||
if len(p.Stack) > 0 {
|
||||
return fmt.Sprintf("recovered from errgroup.Group: %v\n%s", p.Recovered, p.Stack)
|
||||
}
|
||||
return fmt.Sprintf("recovered from errgroup.Group: %v", p.Recovered)
|
||||
}
|
||||
|
|
|
|||
11
vendor/golang.org/x/sys/cpu/cpu.go
generated
vendored
11
vendor/golang.org/x/sys/cpu/cpu.go
generated
vendored
|
|
@ -232,6 +232,17 @@ var RISCV64 struct {
|
|||
HasZba bool // Address generation instructions extension
|
||||
HasZbb bool // Basic bit-manipulation extension
|
||||
HasZbs bool // Single-bit instructions extension
|
||||
HasZvbb bool // Vector Basic Bit-manipulation
|
||||
HasZvbc bool // Vector Carryless Multiplication
|
||||
HasZvkb bool // Vector Cryptography Bit-manipulation
|
||||
HasZvkt bool // Vector Data-Independent Execution Latency
|
||||
HasZvkg bool // Vector GCM/GMAC
|
||||
HasZvkn bool // NIST Algorithm Suite (AES/SHA256/SHA512)
|
||||
HasZvknc bool // NIST Algorithm Suite with carryless multiply
|
||||
HasZvkng bool // NIST Algorithm Suite with GCM
|
||||
HasZvks bool // ShangMi Algorithm Suite
|
||||
HasZvksc bool // ShangMi Algorithm Suite with carryless multiplication
|
||||
HasZvksg bool // ShangMi Algorithm Suite with GCM
|
||||
_ CacheLinePad
|
||||
}
|
||||
|
||||
|
|
|
|||
23
vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go
generated
vendored
23
vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go
generated
vendored
|
|
@ -58,6 +58,15 @@ const (
|
|||
riscv_HWPROBE_EXT_ZBA = 0x8
|
||||
riscv_HWPROBE_EXT_ZBB = 0x10
|
||||
riscv_HWPROBE_EXT_ZBS = 0x20
|
||||
riscv_HWPROBE_EXT_ZVBB = 0x20000
|
||||
riscv_HWPROBE_EXT_ZVBC = 0x40000
|
||||
riscv_HWPROBE_EXT_ZVKB = 0x80000
|
||||
riscv_HWPROBE_EXT_ZVKG = 0x100000
|
||||
riscv_HWPROBE_EXT_ZVKNED = 0x200000
|
||||
riscv_HWPROBE_EXT_ZVKNHB = 0x800000
|
||||
riscv_HWPROBE_EXT_ZVKSED = 0x1000000
|
||||
riscv_HWPROBE_EXT_ZVKSH = 0x2000000
|
||||
riscv_HWPROBE_EXT_ZVKT = 0x4000000
|
||||
riscv_HWPROBE_KEY_CPUPERF_0 = 0x5
|
||||
riscv_HWPROBE_MISALIGNED_FAST = 0x3
|
||||
riscv_HWPROBE_MISALIGNED_MASK = 0x7
|
||||
|
|
@ -99,6 +108,20 @@ func doinit() {
|
|||
RISCV64.HasZba = isSet(v, riscv_HWPROBE_EXT_ZBA)
|
||||
RISCV64.HasZbb = isSet(v, riscv_HWPROBE_EXT_ZBB)
|
||||
RISCV64.HasZbs = isSet(v, riscv_HWPROBE_EXT_ZBS)
|
||||
RISCV64.HasZvbb = isSet(v, riscv_HWPROBE_EXT_ZVBB)
|
||||
RISCV64.HasZvbc = isSet(v, riscv_HWPROBE_EXT_ZVBC)
|
||||
RISCV64.HasZvkb = isSet(v, riscv_HWPROBE_EXT_ZVKB)
|
||||
RISCV64.HasZvkg = isSet(v, riscv_HWPROBE_EXT_ZVKG)
|
||||
RISCV64.HasZvkt = isSet(v, riscv_HWPROBE_EXT_ZVKT)
|
||||
// Cryptography shorthand extensions
|
||||
RISCV64.HasZvkn = isSet(v, riscv_HWPROBE_EXT_ZVKNED) &&
|
||||
isSet(v, riscv_HWPROBE_EXT_ZVKNHB) && RISCV64.HasZvkb && RISCV64.HasZvkt
|
||||
RISCV64.HasZvknc = RISCV64.HasZvkn && RISCV64.HasZvbc
|
||||
RISCV64.HasZvkng = RISCV64.HasZvkn && RISCV64.HasZvkg
|
||||
RISCV64.HasZvks = isSet(v, riscv_HWPROBE_EXT_ZVKSED) &&
|
||||
isSet(v, riscv_HWPROBE_EXT_ZVKSH) && RISCV64.HasZvkb && RISCV64.HasZvkt
|
||||
RISCV64.HasZvksc = RISCV64.HasZvks && RISCV64.HasZvbc
|
||||
RISCV64.HasZvksg = RISCV64.HasZvks && RISCV64.HasZvkg
|
||||
}
|
||||
if pairs[1].key != -1 {
|
||||
v := pairs[1].value & riscv_HWPROBE_MISALIGNED_MASK
|
||||
|
|
|
|||
12
vendor/golang.org/x/sys/cpu/cpu_riscv64.go
generated
vendored
12
vendor/golang.org/x/sys/cpu/cpu_riscv64.go
generated
vendored
|
|
@ -16,5 +16,17 @@ func initOptions() {
|
|||
{Name: "zba", Feature: &RISCV64.HasZba},
|
||||
{Name: "zbb", Feature: &RISCV64.HasZbb},
|
||||
{Name: "zbs", Feature: &RISCV64.HasZbs},
|
||||
// RISC-V Cryptography Extensions
|
||||
{Name: "zvbb", Feature: &RISCV64.HasZvbb},
|
||||
{Name: "zvbc", Feature: &RISCV64.HasZvbc},
|
||||
{Name: "zvkb", Feature: &RISCV64.HasZvkb},
|
||||
{Name: "zvkg", Feature: &RISCV64.HasZvkg},
|
||||
{Name: "zvkt", Feature: &RISCV64.HasZvkt},
|
||||
{Name: "zvkn", Feature: &RISCV64.HasZvkn},
|
||||
{Name: "zvknc", Feature: &RISCV64.HasZvknc},
|
||||
{Name: "zvkng", Feature: &RISCV64.HasZvkng},
|
||||
{Name: "zvks", Feature: &RISCV64.HasZvks},
|
||||
{Name: "zvksc", Feature: &RISCV64.HasZvksc},
|
||||
{Name: "zvksg", Feature: &RISCV64.HasZvksg},
|
||||
}
|
||||
}
|
||||
|
|
|
|||
49
vendor/golang.org/x/sys/windows/security_windows.go
generated
vendored
49
vendor/golang.org/x/sys/windows/security_windows.go
generated
vendored
|
|
@ -1303,7 +1303,10 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DE
|
|||
return nil, err
|
||||
}
|
||||
if absoluteSDSize > 0 {
|
||||
absoluteSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, absoluteSDSize)[0]))
|
||||
absoluteSD = new(SECURITY_DESCRIPTOR)
|
||||
if unsafe.Sizeof(*absoluteSD) < uintptr(absoluteSDSize) {
|
||||
panic("sizeof(SECURITY_DESCRIPTOR) too small")
|
||||
}
|
||||
}
|
||||
var (
|
||||
dacl *ACL
|
||||
|
|
@ -1312,19 +1315,55 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DE
|
|||
group *SID
|
||||
)
|
||||
if daclSize > 0 {
|
||||
dacl = (*ACL)(unsafe.Pointer(&make([]byte, daclSize)[0]))
|
||||
dacl = (*ACL)(unsafe.Pointer(unsafe.SliceData(make([]byte, daclSize))))
|
||||
}
|
||||
if saclSize > 0 {
|
||||
sacl = (*ACL)(unsafe.Pointer(&make([]byte, saclSize)[0]))
|
||||
sacl = (*ACL)(unsafe.Pointer(unsafe.SliceData(make([]byte, saclSize))))
|
||||
}
|
||||
if ownerSize > 0 {
|
||||
owner = (*SID)(unsafe.Pointer(&make([]byte, ownerSize)[0]))
|
||||
owner = (*SID)(unsafe.Pointer(unsafe.SliceData(make([]byte, ownerSize))))
|
||||
}
|
||||
if groupSize > 0 {
|
||||
group = (*SID)(unsafe.Pointer(&make([]byte, groupSize)[0]))
|
||||
group = (*SID)(unsafe.Pointer(unsafe.SliceData(make([]byte, groupSize))))
|
||||
}
|
||||
// We call into Windows via makeAbsoluteSD, which sets up
|
||||
// pointers within absoluteSD that point to other chunks of memory
|
||||
// we pass into makeAbsoluteSD, and that happens outside the view of the GC.
|
||||
// We therefore take some care here to then verify the pointers are as we expect
|
||||
// and set them explicitly in view of the GC. See https://go.dev/issue/73199.
|
||||
// TODO: consider weak pointers once Go 1.24 is appropriate. See suggestion in https://go.dev/cl/663575.
|
||||
err = makeAbsoluteSD(selfRelativeSD, absoluteSD, &absoluteSDSize,
|
||||
dacl, &daclSize, sacl, &saclSize, owner, &ownerSize, group, &groupSize)
|
||||
if err != nil {
|
||||
// Don't return absoluteSD, which might be partially initialized.
|
||||
return nil, err
|
||||
}
|
||||
// Before using any fields, verify absoluteSD is in the format we expect according to Windows.
|
||||
// See https://learn.microsoft.com/en-us/windows/win32/secauthz/absolute-and-self-relative-security-descriptors
|
||||
absControl, _, err := absoluteSD.Control()
|
||||
if err != nil {
|
||||
panic("absoluteSD: " + err.Error())
|
||||
}
|
||||
if absControl&SE_SELF_RELATIVE != 0 {
|
||||
panic("absoluteSD not in absolute format")
|
||||
}
|
||||
if absoluteSD.dacl != dacl {
|
||||
panic("dacl pointer mismatch")
|
||||
}
|
||||
if absoluteSD.sacl != sacl {
|
||||
panic("sacl pointer mismatch")
|
||||
}
|
||||
if absoluteSD.owner != owner {
|
||||
panic("owner pointer mismatch")
|
||||
}
|
||||
if absoluteSD.group != group {
|
||||
panic("group pointer mismatch")
|
||||
}
|
||||
absoluteSD.dacl = dacl
|
||||
absoluteSD.sacl = sacl
|
||||
absoluteSD.owner = owner
|
||||
absoluteSD.group = group
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
6
vendor/golang.org/x/sys/windows/syscall_windows.go
generated
vendored
6
vendor/golang.org/x/sys/windows/syscall_windows.go
generated
vendored
|
|
@ -870,6 +870,7 @@ const socket_error = uintptr(^uint32(0))
|
|||
//sys WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecvFrom
|
||||
//sys WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASendTo
|
||||
//sys WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.WSASocketW
|
||||
//sys WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) [failretval!=0] = ws2_32.WSADuplicateSocketW
|
||||
//sys GetHostByName(name string) (h *Hostent, err error) [failretval==nil] = ws2_32.gethostbyname
|
||||
//sys GetServByName(name string, proto string) (s *Servent, err error) [failretval==nil] = ws2_32.getservbyname
|
||||
//sys Ntohs(netshort uint16) (u uint16) = ws2_32.ntohs
|
||||
|
|
@ -1698,8 +1699,9 @@ func NewNTUnicodeString(s string) (*NTUnicodeString, error) {
|
|||
|
||||
// Slice returns a uint16 slice that aliases the data in the NTUnicodeString.
|
||||
func (s *NTUnicodeString) Slice() []uint16 {
|
||||
slice := unsafe.Slice(s.Buffer, s.MaximumLength)
|
||||
return slice[:s.Length]
|
||||
// Note: this rounds the length down, if it happens
|
||||
// to (incorrectly) be odd. Probably safer than rounding up.
|
||||
return unsafe.Slice(s.Buffer, s.MaximumLength/2)[:s.Length/2]
|
||||
}
|
||||
|
||||
func (s *NTUnicodeString) String() string {
|
||||
|
|
|
|||
212
vendor/golang.org/x/sys/windows/types_windows.go
generated
vendored
212
vendor/golang.org/x/sys/windows/types_windows.go
generated
vendored
|
|
@ -2700,6 +2700,8 @@ type CommTimeouts struct {
|
|||
|
||||
// NTUnicodeString is a UTF-16 string for NT native APIs, corresponding to UNICODE_STRING.
|
||||
type NTUnicodeString struct {
|
||||
// Note: Length and MaximumLength are in *bytes*, not uint16s.
|
||||
// They should always be even.
|
||||
Length uint16
|
||||
MaximumLength uint16
|
||||
Buffer *uint16
|
||||
|
|
@ -3628,3 +3630,213 @@ const (
|
|||
KLF_NOTELLSHELL = 0x00000080
|
||||
KLF_SETFORPROCESS = 0x00000100
|
||||
)
|
||||
|
||||
// Virtual Key codes
|
||||
// https://docs.microsoft.com/en-us/windows/win32/inputdev/virtual-key-codes
|
||||
const (
|
||||
VK_LBUTTON = 0x01
|
||||
VK_RBUTTON = 0x02
|
||||
VK_CANCEL = 0x03
|
||||
VK_MBUTTON = 0x04
|
||||
VK_XBUTTON1 = 0x05
|
||||
VK_XBUTTON2 = 0x06
|
||||
VK_BACK = 0x08
|
||||
VK_TAB = 0x09
|
||||
VK_CLEAR = 0x0C
|
||||
VK_RETURN = 0x0D
|
||||
VK_SHIFT = 0x10
|
||||
VK_CONTROL = 0x11
|
||||
VK_MENU = 0x12
|
||||
VK_PAUSE = 0x13
|
||||
VK_CAPITAL = 0x14
|
||||
VK_KANA = 0x15
|
||||
VK_HANGEUL = 0x15
|
||||
VK_HANGUL = 0x15
|
||||
VK_IME_ON = 0x16
|
||||
VK_JUNJA = 0x17
|
||||
VK_FINAL = 0x18
|
||||
VK_HANJA = 0x19
|
||||
VK_KANJI = 0x19
|
||||
VK_IME_OFF = 0x1A
|
||||
VK_ESCAPE = 0x1B
|
||||
VK_CONVERT = 0x1C
|
||||
VK_NONCONVERT = 0x1D
|
||||
VK_ACCEPT = 0x1E
|
||||
VK_MODECHANGE = 0x1F
|
||||
VK_SPACE = 0x20
|
||||
VK_PRIOR = 0x21
|
||||
VK_NEXT = 0x22
|
||||
VK_END = 0x23
|
||||
VK_HOME = 0x24
|
||||
VK_LEFT = 0x25
|
||||
VK_UP = 0x26
|
||||
VK_RIGHT = 0x27
|
||||
VK_DOWN = 0x28
|
||||
VK_SELECT = 0x29
|
||||
VK_PRINT = 0x2A
|
||||
VK_EXECUTE = 0x2B
|
||||
VK_SNAPSHOT = 0x2C
|
||||
VK_INSERT = 0x2D
|
||||
VK_DELETE = 0x2E
|
||||
VK_HELP = 0x2F
|
||||
VK_LWIN = 0x5B
|
||||
VK_RWIN = 0x5C
|
||||
VK_APPS = 0x5D
|
||||
VK_SLEEP = 0x5F
|
||||
VK_NUMPAD0 = 0x60
|
||||
VK_NUMPAD1 = 0x61
|
||||
VK_NUMPAD2 = 0x62
|
||||
VK_NUMPAD3 = 0x63
|
||||
VK_NUMPAD4 = 0x64
|
||||
VK_NUMPAD5 = 0x65
|
||||
VK_NUMPAD6 = 0x66
|
||||
VK_NUMPAD7 = 0x67
|
||||
VK_NUMPAD8 = 0x68
|
||||
VK_NUMPAD9 = 0x69
|
||||
VK_MULTIPLY = 0x6A
|
||||
VK_ADD = 0x6B
|
||||
VK_SEPARATOR = 0x6C
|
||||
VK_SUBTRACT = 0x6D
|
||||
VK_DECIMAL = 0x6E
|
||||
VK_DIVIDE = 0x6F
|
||||
VK_F1 = 0x70
|
||||
VK_F2 = 0x71
|
||||
VK_F3 = 0x72
|
||||
VK_F4 = 0x73
|
||||
VK_F5 = 0x74
|
||||
VK_F6 = 0x75
|
||||
VK_F7 = 0x76
|
||||
VK_F8 = 0x77
|
||||
VK_F9 = 0x78
|
||||
VK_F10 = 0x79
|
||||
VK_F11 = 0x7A
|
||||
VK_F12 = 0x7B
|
||||
VK_F13 = 0x7C
|
||||
VK_F14 = 0x7D
|
||||
VK_F15 = 0x7E
|
||||
VK_F16 = 0x7F
|
||||
VK_F17 = 0x80
|
||||
VK_F18 = 0x81
|
||||
VK_F19 = 0x82
|
||||
VK_F20 = 0x83
|
||||
VK_F21 = 0x84
|
||||
VK_F22 = 0x85
|
||||
VK_F23 = 0x86
|
||||
VK_F24 = 0x87
|
||||
VK_NUMLOCK = 0x90
|
||||
VK_SCROLL = 0x91
|
||||
VK_OEM_NEC_EQUAL = 0x92
|
||||
VK_OEM_FJ_JISHO = 0x92
|
||||
VK_OEM_FJ_MASSHOU = 0x93
|
||||
VK_OEM_FJ_TOUROKU = 0x94
|
||||
VK_OEM_FJ_LOYA = 0x95
|
||||
VK_OEM_FJ_ROYA = 0x96
|
||||
VK_LSHIFT = 0xA0
|
||||
VK_RSHIFT = 0xA1
|
||||
VK_LCONTROL = 0xA2
|
||||
VK_RCONTROL = 0xA3
|
||||
VK_LMENU = 0xA4
|
||||
VK_RMENU = 0xA5
|
||||
VK_BROWSER_BACK = 0xA6
|
||||
VK_BROWSER_FORWARD = 0xA7
|
||||
VK_BROWSER_REFRESH = 0xA8
|
||||
VK_BROWSER_STOP = 0xA9
|
||||
VK_BROWSER_SEARCH = 0xAA
|
||||
VK_BROWSER_FAVORITES = 0xAB
|
||||
VK_BROWSER_HOME = 0xAC
|
||||
VK_VOLUME_MUTE = 0xAD
|
||||
VK_VOLUME_DOWN = 0xAE
|
||||
VK_VOLUME_UP = 0xAF
|
||||
VK_MEDIA_NEXT_TRACK = 0xB0
|
||||
VK_MEDIA_PREV_TRACK = 0xB1
|
||||
VK_MEDIA_STOP = 0xB2
|
||||
VK_MEDIA_PLAY_PAUSE = 0xB3
|
||||
VK_LAUNCH_MAIL = 0xB4
|
||||
VK_LAUNCH_MEDIA_SELECT = 0xB5
|
||||
VK_LAUNCH_APP1 = 0xB6
|
||||
VK_LAUNCH_APP2 = 0xB7
|
||||
VK_OEM_1 = 0xBA
|
||||
VK_OEM_PLUS = 0xBB
|
||||
VK_OEM_COMMA = 0xBC
|
||||
VK_OEM_MINUS = 0xBD
|
||||
VK_OEM_PERIOD = 0xBE
|
||||
VK_OEM_2 = 0xBF
|
||||
VK_OEM_3 = 0xC0
|
||||
VK_OEM_4 = 0xDB
|
||||
VK_OEM_5 = 0xDC
|
||||
VK_OEM_6 = 0xDD
|
||||
VK_OEM_7 = 0xDE
|
||||
VK_OEM_8 = 0xDF
|
||||
VK_OEM_AX = 0xE1
|
||||
VK_OEM_102 = 0xE2
|
||||
VK_ICO_HELP = 0xE3
|
||||
VK_ICO_00 = 0xE4
|
||||
VK_PROCESSKEY = 0xE5
|
||||
VK_ICO_CLEAR = 0xE6
|
||||
VK_OEM_RESET = 0xE9
|
||||
VK_OEM_JUMP = 0xEA
|
||||
VK_OEM_PA1 = 0xEB
|
||||
VK_OEM_PA2 = 0xEC
|
||||
VK_OEM_PA3 = 0xED
|
||||
VK_OEM_WSCTRL = 0xEE
|
||||
VK_OEM_CUSEL = 0xEF
|
||||
VK_OEM_ATTN = 0xF0
|
||||
VK_OEM_FINISH = 0xF1
|
||||
VK_OEM_COPY = 0xF2
|
||||
VK_OEM_AUTO = 0xF3
|
||||
VK_OEM_ENLW = 0xF4
|
||||
VK_OEM_BACKTAB = 0xF5
|
||||
VK_ATTN = 0xF6
|
||||
VK_CRSEL = 0xF7
|
||||
VK_EXSEL = 0xF8
|
||||
VK_EREOF = 0xF9
|
||||
VK_PLAY = 0xFA
|
||||
VK_ZOOM = 0xFB
|
||||
VK_NONAME = 0xFC
|
||||
VK_PA1 = 0xFD
|
||||
VK_OEM_CLEAR = 0xFE
|
||||
)
|
||||
|
||||
// Mouse button constants.
|
||||
// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str
|
||||
const (
|
||||
FROM_LEFT_1ST_BUTTON_PRESSED = 0x0001
|
||||
RIGHTMOST_BUTTON_PRESSED = 0x0002
|
||||
FROM_LEFT_2ND_BUTTON_PRESSED = 0x0004
|
||||
FROM_LEFT_3RD_BUTTON_PRESSED = 0x0008
|
||||
FROM_LEFT_4TH_BUTTON_PRESSED = 0x0010
|
||||
)
|
||||
|
||||
// Control key state constaints.
|
||||
// https://docs.microsoft.com/en-us/windows/console/key-event-record-str
|
||||
// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str
|
||||
const (
|
||||
CAPSLOCK_ON = 0x0080
|
||||
ENHANCED_KEY = 0x0100
|
||||
LEFT_ALT_PRESSED = 0x0002
|
||||
LEFT_CTRL_PRESSED = 0x0008
|
||||
NUMLOCK_ON = 0x0020
|
||||
RIGHT_ALT_PRESSED = 0x0001
|
||||
RIGHT_CTRL_PRESSED = 0x0004
|
||||
SCROLLLOCK_ON = 0x0040
|
||||
SHIFT_PRESSED = 0x0010
|
||||
)
|
||||
|
||||
// Mouse event record event flags.
|
||||
// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str
|
||||
const (
|
||||
MOUSE_MOVED = 0x0001
|
||||
DOUBLE_CLICK = 0x0002
|
||||
MOUSE_WHEELED = 0x0004
|
||||
MOUSE_HWHEELED = 0x0008
|
||||
)
|
||||
|
||||
// Input Record Event Types
|
||||
// https://learn.microsoft.com/en-us/windows/console/input-record-str
|
||||
const (
|
||||
FOCUS_EVENT = 0x0010
|
||||
KEY_EVENT = 0x0001
|
||||
MENU_EVENT = 0x0008
|
||||
MOUSE_EVENT = 0x0002
|
||||
WINDOW_BUFFER_SIZE_EVENT = 0x0004
|
||||
)
|
||||
|
|
|
|||
9
vendor/golang.org/x/sys/windows/zsyscall_windows.go
generated
vendored
9
vendor/golang.org/x/sys/windows/zsyscall_windows.go
generated
vendored
|
|
@ -511,6 +511,7 @@ var (
|
|||
procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW")
|
||||
procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW")
|
||||
procWSACleanup = modws2_32.NewProc("WSACleanup")
|
||||
procWSADuplicateSocketW = modws2_32.NewProc("WSADuplicateSocketW")
|
||||
procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW")
|
||||
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
|
||||
procWSAIoctl = modws2_32.NewProc("WSAIoctl")
|
||||
|
|
@ -4391,6 +4392,14 @@ func WSACleanup() (err error) {
|
|||
return
|
||||
}
|
||||
|
||||
func WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procWSADuplicateSocketW.Addr(), 3, uintptr(s), uintptr(processID), uintptr(unsafe.Pointer(info)))
|
||||
if r1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) {
|
||||
r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength)))
|
||||
n = int32(r0)
|
||||
|
|
|
|||
15
vendor/gorm.io/gorm/.golangci.yml
generated
vendored
15
vendor/gorm.io/gorm/.golangci.yml
generated
vendored
|
|
@ -1,7 +1,9 @@
|
|||
version: "2"
|
||||
|
||||
linters:
|
||||
default: standard
|
||||
enable:
|
||||
- cyclop
|
||||
- exportloopref
|
||||
- gocritic
|
||||
- gosec
|
||||
- ineffassign
|
||||
|
|
@ -9,12 +11,9 @@ linters:
|
|||
- prealloc
|
||||
- unconvert
|
||||
- unparam
|
||||
- goimports
|
||||
- whitespace
|
||||
|
||||
linters-settings:
|
||||
whitespace:
|
||||
multi-func: true
|
||||
goimports:
|
||||
local-prefixes: gorm.io/gorm
|
||||
|
||||
formatters:
|
||||
enable:
|
||||
- gofumpt
|
||||
- goimports
|
||||
|
|
|
|||
128
vendor/gorm.io/gorm/CODE_OF_CONDUCT.md
generated
vendored
Normal file
128
vendor/gorm.io/gorm/CODE_OF_CONDUCT.md
generated
vendored
Normal file
|
|
@ -0,0 +1,128 @@
|
|||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to participate in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, religion, or sexual identity
|
||||
and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community includes:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series
|
||||
of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period. This
|
||||
includes avoiding interactions in community spaces and external channels
|
||||
like social media. Violating these terms may lead to a temporary or
|
||||
permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any interaction or public
|
||||
communication with the community for a specified period. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within
|
||||
the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.0, available at
|
||||
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
||||
enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
https://www.contributor-covenant.org/faq. Translations are available at
|
||||
https://www.contributor-covenant.org/translations.
|
||||
2
vendor/gorm.io/gorm/LICENSE
generated
vendored
2
vendor/gorm.io/gorm/LICENSE
generated
vendored
|
|
@ -1,6 +1,6 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013-NOW Jinzhu <wosmvp@gmail.com>
|
||||
Copyright (c) 2013-present Jinzhu <wosmvp@gmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
10
vendor/gorm.io/gorm/callbacks/associations.go
generated
vendored
10
vendor/gorm.io/gorm/callbacks/associations.go
generated
vendored
|
|
@ -47,7 +47,7 @@ func SaveBeforeAssociations(create bool) func(db *gorm.DB) {
|
|||
)
|
||||
|
||||
if !isPtr {
|
||||
fieldType = reflect.PtrTo(fieldType)
|
||||
fieldType = reflect.PointerTo(fieldType)
|
||||
}
|
||||
|
||||
elems := reflect.MakeSlice(reflect.SliceOf(fieldType), 0, 10)
|
||||
|
|
@ -126,7 +126,7 @@ func SaveAfterAssociations(create bool) func(db *gorm.DB) {
|
|||
)
|
||||
|
||||
if !isPtr {
|
||||
fieldType = reflect.PtrTo(fieldType)
|
||||
fieldType = reflect.PointerTo(fieldType)
|
||||
}
|
||||
|
||||
elems := reflect.MakeSlice(reflect.SliceOf(fieldType), 0, 10)
|
||||
|
|
@ -195,7 +195,7 @@ func SaveAfterAssociations(create bool) func(db *gorm.DB) {
|
|||
fieldType := rel.Field.IndirectFieldType.Elem()
|
||||
isPtr := fieldType.Kind() == reflect.Ptr
|
||||
if !isPtr {
|
||||
fieldType = reflect.PtrTo(fieldType)
|
||||
fieldType = reflect.PointerTo(fieldType)
|
||||
}
|
||||
elems := reflect.MakeSlice(reflect.SliceOf(fieldType), 0, 10)
|
||||
identityMap := map[string]bool{}
|
||||
|
|
@ -268,11 +268,11 @@ func SaveAfterAssociations(create bool) func(db *gorm.DB) {
|
|||
fieldType := rel.Field.IndirectFieldType.Elem()
|
||||
isPtr := fieldType.Kind() == reflect.Ptr
|
||||
if !isPtr {
|
||||
fieldType = reflect.PtrTo(fieldType)
|
||||
fieldType = reflect.PointerTo(fieldType)
|
||||
}
|
||||
elems := reflect.MakeSlice(reflect.SliceOf(fieldType), 0, 10)
|
||||
distinctElems := reflect.MakeSlice(reflect.SliceOf(fieldType), 0, 10)
|
||||
joins := reflect.MakeSlice(reflect.SliceOf(reflect.PtrTo(rel.JoinTable.ModelType)), 0, 10)
|
||||
joins := reflect.MakeSlice(reflect.SliceOf(reflect.PointerTo(rel.JoinTable.ModelType)), 0, 10)
|
||||
objs := []reflect.Value{}
|
||||
|
||||
appendToJoins := func(obj reflect.Value, elem reflect.Value) {
|
||||
|
|
|
|||
9
vendor/gorm.io/gorm/clause/returning.go
generated
vendored
9
vendor/gorm.io/gorm/clause/returning.go
generated
vendored
|
|
@ -26,9 +26,12 @@ func (returning Returning) Build(builder Builder) {
|
|||
|
||||
// MergeClause merge order by clauses
|
||||
func (returning Returning) MergeClause(clause *Clause) {
|
||||
if v, ok := clause.Expression.(Returning); ok {
|
||||
returning.Columns = append(v.Columns, returning.Columns...)
|
||||
if v, ok := clause.Expression.(Returning); ok && len(returning.Columns) > 0 {
|
||||
if v.Columns != nil {
|
||||
returning.Columns = append(v.Columns, returning.Columns...)
|
||||
} else {
|
||||
returning.Columns = nil
|
||||
}
|
||||
}
|
||||
|
||||
clause.Expression = returning
|
||||
}
|
||||
|
|
|
|||
6
vendor/gorm.io/gorm/finisher_api.go
generated
vendored
6
vendor/gorm.io/gorm/finisher_api.go
generated
vendored
|
|
@ -4,6 +4,7 @@ import (
|
|||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/maphash"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
|
|
@ -623,14 +624,15 @@ func (db *DB) Transaction(fc func(tx *DB) error, opts ...*sql.TxOptions) (err er
|
|||
if committer, ok := db.Statement.ConnPool.(TxCommitter); ok && committer != nil {
|
||||
// nested transaction
|
||||
if !db.DisableNestedTransaction {
|
||||
err = db.SavePoint(fmt.Sprintf("sp%p", fc)).Error
|
||||
spID := new(maphash.Hash).Sum64()
|
||||
err = db.SavePoint(fmt.Sprintf("sp%d", spID)).Error
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
// Make sure to rollback when panic, Block error or Commit error
|
||||
if panicked || err != nil {
|
||||
db.RollbackTo(fmt.Sprintf("sp%p", fc))
|
||||
db.RollbackTo(fmt.Sprintf("sp%d", spID))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
|
|
|||
16
vendor/gorm.io/gorm/gorm.go
generated
vendored
16
vendor/gorm.io/gorm/gorm.go
generated
vendored
|
|
@ -34,6 +34,11 @@ type Config struct {
|
|||
DryRun bool
|
||||
// PrepareStmt executes the given query in cached statement
|
||||
PrepareStmt bool
|
||||
// PrepareStmt cache support LRU expired,
|
||||
// default maxsize=int64 Max value and ttl=1h
|
||||
PrepareStmtMaxSize int
|
||||
PrepareStmtTTL time.Duration
|
||||
|
||||
// DisableAutomaticPing
|
||||
DisableAutomaticPing bool
|
||||
// DisableForeignKeyConstraintWhenMigrating
|
||||
|
|
@ -183,16 +188,21 @@ func Open(dialector Dialector, opts ...Option) (db *DB, err error) {
|
|||
|
||||
if config.Dialector != nil {
|
||||
err = config.Dialector.Initialize(db)
|
||||
|
||||
if err != nil {
|
||||
if db, _ := db.DB(); db != nil {
|
||||
_ = db.Close()
|
||||
}
|
||||
}
|
||||
|
||||
if config.TranslateError {
|
||||
if _, ok := db.Dialector.(ErrorTranslator); !ok {
|
||||
config.Logger.Warn(context.Background(), "The TranslateError option is enabled, but the Dialector %s does not implement ErrorTranslator.", db.Dialector.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if config.PrepareStmt {
|
||||
preparedStmt := NewPreparedStmtDB(db.ConnPool)
|
||||
preparedStmt := NewPreparedStmtDB(db.ConnPool, config.PrepareStmtMaxSize, config.PrepareStmtTTL)
|
||||
db.cacheStore.Store(preparedStmtDBKey, preparedStmt)
|
||||
db.ConnPool = preparedStmt
|
||||
}
|
||||
|
|
@ -263,7 +273,7 @@ func (db *DB) Session(config *Session) *DB {
|
|||
if v, ok := db.cacheStore.Load(preparedStmtDBKey); ok {
|
||||
preparedStmt = v.(*PreparedStmtDB)
|
||||
} else {
|
||||
preparedStmt = NewPreparedStmtDB(db.ConnPool)
|
||||
preparedStmt = NewPreparedStmtDB(db.ConnPool, db.PrepareStmtMaxSize, db.PrepareStmtTTL)
|
||||
db.cacheStore.Store(preparedStmtDBKey, preparedStmt)
|
||||
}
|
||||
|
||||
|
|
|
|||
493
vendor/gorm.io/gorm/internal/lru/lru.go
generated
vendored
Normal file
493
vendor/gorm.io/gorm/internal/lru/lru.go
generated
vendored
Normal file
|
|
@ -0,0 +1,493 @@
|
|||
package lru
|
||||
|
||||
// golang -lru
|
||||
// https://github.com/hashicorp/golang-lru
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// EvictCallback is used to get a callback when a cache entry is evicted
|
||||
type EvictCallback[K comparable, V any] func(key K, value V)
|
||||
|
||||
// LRU implements a thread-safe LRU with expirable entries.
|
||||
type LRU[K comparable, V any] struct {
|
||||
size int
|
||||
evictList *LruList[K, V]
|
||||
items map[K]*Entry[K, V]
|
||||
onEvict EvictCallback[K, V]
|
||||
|
||||
// expirable options
|
||||
mu sync.Mutex
|
||||
ttl time.Duration
|
||||
done chan struct{}
|
||||
|
||||
// buckets for expiration
|
||||
buckets []bucket[K, V]
|
||||
// uint8 because it's number between 0 and numBuckets
|
||||
nextCleanupBucket uint8
|
||||
}
|
||||
|
||||
// bucket is a container for holding entries to be expired
|
||||
type bucket[K comparable, V any] struct {
|
||||
entries map[K]*Entry[K, V]
|
||||
newestEntry time.Time
|
||||
}
|
||||
|
||||
// noEvictionTTL - very long ttl to prevent eviction
|
||||
const noEvictionTTL = time.Hour * 24 * 365 * 10
|
||||
|
||||
// because of uint8 usage for nextCleanupBucket, should not exceed 256.
|
||||
// casting it as uint8 explicitly requires type conversions in multiple places
|
||||
const numBuckets = 100
|
||||
|
||||
// NewLRU returns a new thread-safe cache with expirable entries.
|
||||
//
|
||||
// Size parameter set to 0 makes cache of unlimited size, e.g. turns LRU mechanism off.
|
||||
//
|
||||
// Providing 0 TTL turns expiring off.
|
||||
//
|
||||
// Delete expired entries every 1/100th of ttl value. Goroutine which deletes expired entries runs indefinitely.
|
||||
func NewLRU[K comparable, V any](size int, onEvict EvictCallback[K, V], ttl time.Duration) *LRU[K, V] {
|
||||
if size < 0 {
|
||||
size = 0
|
||||
}
|
||||
if ttl <= 0 {
|
||||
ttl = noEvictionTTL
|
||||
}
|
||||
|
||||
res := LRU[K, V]{
|
||||
ttl: ttl,
|
||||
size: size,
|
||||
evictList: NewList[K, V](),
|
||||
items: make(map[K]*Entry[K, V]),
|
||||
onEvict: onEvict,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
// initialize the buckets
|
||||
res.buckets = make([]bucket[K, V], numBuckets)
|
||||
for i := 0; i < numBuckets; i++ {
|
||||
res.buckets[i] = bucket[K, V]{entries: make(map[K]*Entry[K, V])}
|
||||
}
|
||||
|
||||
// enable deleteExpired() running in separate goroutine for cache with non-zero TTL
|
||||
//
|
||||
// Important: done channel is never closed, so deleteExpired() goroutine will never exit,
|
||||
// it's decided to add functionality to close it in the version later than v2.
|
||||
if res.ttl != noEvictionTTL {
|
||||
go func(done <-chan struct{}) {
|
||||
ticker := time.NewTicker(res.ttl / numBuckets)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
case <-ticker.C:
|
||||
res.deleteExpired()
|
||||
}
|
||||
}
|
||||
}(res.done)
|
||||
}
|
||||
return &res
|
||||
}
|
||||
|
||||
// Purge clears the cache completely.
|
||||
// onEvict is called for each evicted key.
|
||||
func (c *LRU[K, V]) Purge() {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
for k, v := range c.items {
|
||||
if c.onEvict != nil {
|
||||
c.onEvict(k, v.Value)
|
||||
}
|
||||
delete(c.items, k)
|
||||
}
|
||||
for _, b := range c.buckets {
|
||||
for _, ent := range b.entries {
|
||||
delete(b.entries, ent.Key)
|
||||
}
|
||||
}
|
||||
c.evictList.Init()
|
||||
}
|
||||
|
||||
// Add adds a value to the cache. Returns true if an eviction occurred.
|
||||
// Returns false if there was no eviction: the item was already in the cache,
|
||||
// or the size was not exceeded.
|
||||
func (c *LRU[K, V]) Add(key K, value V) (evicted bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
now := time.Now()
|
||||
|
||||
// Check for existing item
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.evictList.MoveToFront(ent)
|
||||
c.removeFromBucket(ent) // remove the entry from its current bucket as expiresAt is renewed
|
||||
ent.Value = value
|
||||
ent.ExpiresAt = now.Add(c.ttl)
|
||||
c.addToBucket(ent)
|
||||
return false
|
||||
}
|
||||
|
||||
// Add new item
|
||||
ent := c.evictList.PushFrontExpirable(key, value, now.Add(c.ttl))
|
||||
c.items[key] = ent
|
||||
c.addToBucket(ent) // adds the entry to the appropriate bucket and sets entry.expireBucket
|
||||
|
||||
evict := c.size > 0 && c.evictList.Length() > c.size
|
||||
// Verify size not exceeded
|
||||
if evict {
|
||||
c.removeOldest()
|
||||
}
|
||||
return evict
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *LRU[K, V]) Get(key K) (value V, ok bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
var ent *Entry[K, V]
|
||||
if ent, ok = c.items[key]; ok {
|
||||
// Expired item check
|
||||
if time.Now().After(ent.ExpiresAt) {
|
||||
return value, false
|
||||
}
|
||||
c.evictList.MoveToFront(ent)
|
||||
return ent.Value, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Contains checks if a key is in the cache, without updating the recent-ness
|
||||
// or deleting it for being stale.
|
||||
func (c *LRU[K, V]) Contains(key K) (ok bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
_, ok = c.items[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Peek returns the key value (or undefined if not found) without updating
|
||||
// the "recently used"-ness of the key.
|
||||
func (c *LRU[K, V]) Peek(key K) (value V, ok bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
var ent *Entry[K, V]
|
||||
if ent, ok = c.items[key]; ok {
|
||||
// Expired item check
|
||||
if time.Now().After(ent.ExpiresAt) {
|
||||
return value, false
|
||||
}
|
||||
return ent.Value, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Remove removes the provided key from the cache, returning if the
|
||||
// key was contained.
|
||||
func (c *LRU[K, V]) Remove(key K) bool {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.removeElement(ent)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RemoveOldest removes the oldest item from the cache.
|
||||
func (c *LRU[K, V]) RemoveOldest() (key K, value V, ok bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if ent := c.evictList.Back(); ent != nil {
|
||||
c.removeElement(ent)
|
||||
return ent.Key, ent.Value, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetOldest returns the oldest entry
|
||||
func (c *LRU[K, V]) GetOldest() (key K, value V, ok bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if ent := c.evictList.Back(); ent != nil {
|
||||
return ent.Key, ent.Value, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *LRU[K, V]) KeyValues() map[K]V {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
maps := make(map[K]V)
|
||||
now := time.Now()
|
||||
for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() {
|
||||
if now.After(ent.ExpiresAt) {
|
||||
continue
|
||||
}
|
||||
maps[ent.Key] = ent.Value
|
||||
// keys = append(keys, ent.Key)
|
||||
}
|
||||
return maps
|
||||
}
|
||||
|
||||
// Keys returns a slice of the keys in the cache, from oldest to newest.
|
||||
// Expired entries are filtered out.
|
||||
func (c *LRU[K, V]) Keys() []K {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
keys := make([]K, 0, len(c.items))
|
||||
now := time.Now()
|
||||
for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() {
|
||||
if now.After(ent.ExpiresAt) {
|
||||
continue
|
||||
}
|
||||
keys = append(keys, ent.Key)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// Values returns a slice of the values in the cache, from oldest to newest.
|
||||
// Expired entries are filtered out.
|
||||
func (c *LRU[K, V]) Values() []V {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
values := make([]V, 0, len(c.items))
|
||||
now := time.Now()
|
||||
for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() {
|
||||
if now.After(ent.ExpiresAt) {
|
||||
continue
|
||||
}
|
||||
values = append(values, ent.Value)
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *LRU[K, V]) Len() int {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.evictList.Length()
|
||||
}
|
||||
|
||||
// Resize changes the cache size. Size of 0 means unlimited.
|
||||
func (c *LRU[K, V]) Resize(size int) (evicted int) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if size <= 0 {
|
||||
c.size = 0
|
||||
return 0
|
||||
}
|
||||
diff := c.evictList.Length() - size
|
||||
if diff < 0 {
|
||||
diff = 0
|
||||
}
|
||||
for i := 0; i < diff; i++ {
|
||||
c.removeOldest()
|
||||
}
|
||||
c.size = size
|
||||
return diff
|
||||
}
|
||||
|
||||
// Close destroys cleanup goroutine. To clean up the cache, run Purge() before Close().
|
||||
// func (c *LRU[K, V]) Close() {
|
||||
// c.mu.Lock()
|
||||
// defer c.mu.Unlock()
|
||||
// select {
|
||||
// case <-c.done:
|
||||
// return
|
||||
// default:
|
||||
// }
|
||||
// close(c.done)
|
||||
// }
|
||||
|
||||
// removeOldest removes the oldest item from the cache. Has to be called with lock!
|
||||
func (c *LRU[K, V]) removeOldest() {
|
||||
if ent := c.evictList.Back(); ent != nil {
|
||||
c.removeElement(ent)
|
||||
}
|
||||
}
|
||||
|
||||
// removeElement is used to remove a given list element from the cache. Has to be called with lock!
|
||||
func (c *LRU[K, V]) removeElement(e *Entry[K, V]) {
|
||||
c.evictList.Remove(e)
|
||||
delete(c.items, e.Key)
|
||||
c.removeFromBucket(e)
|
||||
if c.onEvict != nil {
|
||||
c.onEvict(e.Key, e.Value)
|
||||
}
|
||||
}
|
||||
|
||||
// deleteExpired deletes expired records from the oldest bucket, waiting for the newest entry
|
||||
// in it to expire first.
|
||||
func (c *LRU[K, V]) deleteExpired() {
|
||||
c.mu.Lock()
|
||||
bucketIdx := c.nextCleanupBucket
|
||||
timeToExpire := time.Until(c.buckets[bucketIdx].newestEntry)
|
||||
// wait for newest entry to expire before cleanup without holding lock
|
||||
if timeToExpire > 0 {
|
||||
c.mu.Unlock()
|
||||
time.Sleep(timeToExpire)
|
||||
c.mu.Lock()
|
||||
}
|
||||
for _, ent := range c.buckets[bucketIdx].entries {
|
||||
c.removeElement(ent)
|
||||
}
|
||||
c.nextCleanupBucket = (c.nextCleanupBucket + 1) % numBuckets
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// addToBucket adds entry to expire bucket so that it will be cleaned up when the time comes. Has to be called with lock!
|
||||
func (c *LRU[K, V]) addToBucket(e *Entry[K, V]) {
|
||||
bucketID := (numBuckets + c.nextCleanupBucket - 1) % numBuckets
|
||||
e.ExpireBucket = bucketID
|
||||
c.buckets[bucketID].entries[e.Key] = e
|
||||
if c.buckets[bucketID].newestEntry.Before(e.ExpiresAt) {
|
||||
c.buckets[bucketID].newestEntry = e.ExpiresAt
|
||||
}
|
||||
}
|
||||
|
||||
// removeFromBucket removes the entry from its corresponding bucket. Has to be called with lock!
|
||||
func (c *LRU[K, V]) removeFromBucket(e *Entry[K, V]) {
|
||||
delete(c.buckets[e.ExpireBucket].entries, e.Key)
|
||||
}
|
||||
|
||||
// Cap returns the capacity of the cache
|
||||
func (c *LRU[K, V]) Cap() int {
|
||||
return c.size
|
||||
}
|
||||
|
||||
// Entry is an LRU Entry
|
||||
type Entry[K comparable, V any] struct {
|
||||
// Next and previous pointers in the doubly-linked list of elements.
|
||||
// To simplify the implementation, internally a list l is implemented
|
||||
// as a ring, such that &l.root is both the next element of the last
|
||||
// list element (l.Back()) and the previous element of the first list
|
||||
// element (l.Front()).
|
||||
next, prev *Entry[K, V]
|
||||
|
||||
// The list to which this element belongs.
|
||||
list *LruList[K, V]
|
||||
|
||||
// The LRU Key of this element.
|
||||
Key K
|
||||
|
||||
// The Value stored with this element.
|
||||
Value V
|
||||
|
||||
// The time this element would be cleaned up, optional
|
||||
ExpiresAt time.Time
|
||||
|
||||
// The expiry bucket item was put in, optional
|
||||
ExpireBucket uint8
|
||||
}
|
||||
|
||||
// PrevEntry returns the previous list element or nil.
|
||||
func (e *Entry[K, V]) PrevEntry() *Entry[K, V] {
|
||||
if p := e.prev; e.list != nil && p != &e.list.root {
|
||||
return p
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LruList represents a doubly linked list.
|
||||
// The zero Value for LruList is an empty list ready to use.
|
||||
type LruList[K comparable, V any] struct {
|
||||
root Entry[K, V] // sentinel list element, only &root, root.prev, and root.next are used
|
||||
len int // current list Length excluding (this) sentinel element
|
||||
}
|
||||
|
||||
// Init initializes or clears list l.
|
||||
func (l *LruList[K, V]) Init() *LruList[K, V] {
|
||||
l.root.next = &l.root
|
||||
l.root.prev = &l.root
|
||||
l.len = 0
|
||||
return l
|
||||
}
|
||||
|
||||
// NewList returns an initialized list.
|
||||
func NewList[K comparable, V any]() *LruList[K, V] { return new(LruList[K, V]).Init() }
|
||||
|
||||
// Length returns the number of elements of list l.
|
||||
// The complexity is O(1).
|
||||
func (l *LruList[K, V]) Length() int { return l.len }
|
||||
|
||||
// Back returns the last element of list l or nil if the list is empty.
|
||||
func (l *LruList[K, V]) Back() *Entry[K, V] {
|
||||
if l.len == 0 {
|
||||
return nil
|
||||
}
|
||||
return l.root.prev
|
||||
}
|
||||
|
||||
// lazyInit lazily initializes a zero List Value.
|
||||
func (l *LruList[K, V]) lazyInit() {
|
||||
if l.root.next == nil {
|
||||
l.Init()
|
||||
}
|
||||
}
|
||||
|
||||
// insert inserts e after at, increments l.len, and returns e.
|
||||
func (l *LruList[K, V]) insert(e, at *Entry[K, V]) *Entry[K, V] {
|
||||
e.prev = at
|
||||
e.next = at.next
|
||||
e.prev.next = e
|
||||
e.next.prev = e
|
||||
e.list = l
|
||||
l.len++
|
||||
return e
|
||||
}
|
||||
|
||||
// insertValue is a convenience wrapper for insert(&Entry{Value: v, ExpiresAt: ExpiresAt}, at).
|
||||
func (l *LruList[K, V]) insertValue(k K, v V, expiresAt time.Time, at *Entry[K, V]) *Entry[K, V] {
|
||||
return l.insert(&Entry[K, V]{Value: v, Key: k, ExpiresAt: expiresAt}, at)
|
||||
}
|
||||
|
||||
// Remove removes e from its list, decrements l.len
|
||||
func (l *LruList[K, V]) Remove(e *Entry[K, V]) V {
|
||||
e.prev.next = e.next
|
||||
e.next.prev = e.prev
|
||||
e.next = nil // avoid memory leaks
|
||||
e.prev = nil // avoid memory leaks
|
||||
e.list = nil
|
||||
l.len--
|
||||
|
||||
return e.Value
|
||||
}
|
||||
|
||||
// move moves e to next to at.
|
||||
func (l *LruList[K, V]) move(e, at *Entry[K, V]) {
|
||||
if e == at {
|
||||
return
|
||||
}
|
||||
e.prev.next = e.next
|
||||
e.next.prev = e.prev
|
||||
|
||||
e.prev = at
|
||||
e.next = at.next
|
||||
e.prev.next = e
|
||||
e.next.prev = e
|
||||
}
|
||||
|
||||
// PushFront inserts a new element e with value v at the front of list l and returns e.
|
||||
func (l *LruList[K, V]) PushFront(k K, v V) *Entry[K, V] {
|
||||
l.lazyInit()
|
||||
return l.insertValue(k, v, time.Time{}, &l.root)
|
||||
}
|
||||
|
||||
// PushFrontExpirable inserts a new expirable element e with Value v at the front of list l and returns e.
|
||||
func (l *LruList[K, V]) PushFrontExpirable(k K, v V, expiresAt time.Time) *Entry[K, V] {
|
||||
l.lazyInit()
|
||||
return l.insertValue(k, v, expiresAt, &l.root)
|
||||
}
|
||||
|
||||
// MoveToFront moves element e to the front of list l.
|
||||
// If e is not an element of l, the list is not modified.
|
||||
// The element must not be nil.
|
||||
func (l *LruList[K, V]) MoveToFront(e *Entry[K, V]) {
|
||||
if e.list != l || l.root.next == e {
|
||||
return
|
||||
}
|
||||
// see comment in List.Remove about initialization of l
|
||||
l.move(e, &l.root)
|
||||
}
|
||||
183
vendor/gorm.io/gorm/internal/stmt_store/stmt_store.go
generated
vendored
Normal file
183
vendor/gorm.io/gorm/internal/stmt_store/stmt_store.go
generated
vendored
Normal file
|
|
@ -0,0 +1,183 @@
|
|||
package stmt_store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gorm.io/gorm/internal/lru"
|
||||
)
|
||||
|
||||
type Stmt struct {
|
||||
*sql.Stmt
|
||||
Transaction bool
|
||||
prepared chan struct{}
|
||||
prepareErr error
|
||||
}
|
||||
|
||||
func (stmt *Stmt) Error() error {
|
||||
return stmt.prepareErr
|
||||
}
|
||||
|
||||
func (stmt *Stmt) Close() error {
|
||||
<-stmt.prepared
|
||||
|
||||
if stmt.Stmt != nil {
|
||||
return stmt.Stmt.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Store defines an interface for managing the caching operations of SQL statements (Stmt).
|
||||
// This interface provides methods for creating new statements, retrieving all cache keys,
|
||||
// getting cached statements, setting cached statements, and deleting cached statements.
|
||||
type Store interface {
|
||||
// New creates a new Stmt object and caches it.
|
||||
// Parameters:
|
||||
// ctx: The context for the request, which can carry deadlines, cancellation signals, etc.
|
||||
// key: The key representing the SQL query, used for caching and preparing the statement.
|
||||
// isTransaction: Indicates whether this operation is part of a transaction, which may affect the caching strategy.
|
||||
// connPool: A connection pool that provides database connections.
|
||||
// locker: A synchronization lock that is unlocked after initialization to avoid deadlocks.
|
||||
// Returns:
|
||||
// *Stmt: A newly created statement object for executing SQL operations.
|
||||
// error: An error if the statement preparation fails.
|
||||
New(ctx context.Context, key string, isTransaction bool, connPool ConnPool, locker sync.Locker) (*Stmt, error)
|
||||
|
||||
// Keys returns a slice of all cache keys in the store.
|
||||
Keys() []string
|
||||
|
||||
// Get retrieves a Stmt object from the store based on the given key.
|
||||
// Parameters:
|
||||
// key: The key used to look up the Stmt object.
|
||||
// Returns:
|
||||
// *Stmt: The found Stmt object, or nil if not found.
|
||||
// bool: Indicates whether the corresponding Stmt object was successfully found.
|
||||
Get(key string) (*Stmt, bool)
|
||||
|
||||
// Set stores the given Stmt object in the store and associates it with the specified key.
|
||||
// Parameters:
|
||||
// key: The key used to associate the Stmt object.
|
||||
// value: The Stmt object to be stored.
|
||||
Set(key string, value *Stmt)
|
||||
|
||||
// Delete removes the Stmt object corresponding to the specified key from the store.
|
||||
// Parameters:
|
||||
// key: The key associated with the Stmt object to be deleted.
|
||||
Delete(key string)
|
||||
}
|
||||
|
||||
// defaultMaxSize defines the default maximum capacity of the cache.
|
||||
// Its value is the maximum value of the int64 type, which means that when the cache size is not specified,
|
||||
// the cache can theoretically store as many elements as possible.
|
||||
// (1 << 63) - 1 is the maximum value that an int64 type can represent.
|
||||
const (
|
||||
defaultMaxSize = math.MaxInt
|
||||
// defaultTTL defines the default time-to-live (TTL) for each cache entry.
|
||||
// When the TTL for cache entries is not specified, each cache entry will expire after 24 hours.
|
||||
defaultTTL = time.Hour * 24
|
||||
)
|
||||
|
||||
// New creates and returns a new Store instance.
|
||||
//
|
||||
// Parameters:
|
||||
// - size: The maximum capacity of the cache. If the provided size is less than or equal to 0,
|
||||
// it defaults to defaultMaxSize.
|
||||
// - ttl: The time-to-live duration for each cache entry. If the provided ttl is less than or equal to 0,
|
||||
// it defaults to defaultTTL.
|
||||
//
|
||||
// This function defines an onEvicted callback that is invoked when a cache entry is evicted.
|
||||
// The callback ensures that if the evicted value (v) is not nil, its Close method is called asynchronously
|
||||
// to release associated resources.
|
||||
//
|
||||
// Returns:
|
||||
// - A Store instance implemented by lruStore, which internally uses an LRU cache with the specified size,
|
||||
// eviction callback, and TTL.
|
||||
func New(size int, ttl time.Duration) Store {
|
||||
if size <= 0 {
|
||||
size = defaultMaxSize
|
||||
}
|
||||
|
||||
if ttl <= 0 {
|
||||
ttl = defaultTTL
|
||||
}
|
||||
|
||||
onEvicted := func(k string, v *Stmt) {
|
||||
if v != nil {
|
||||
go v.Close()
|
||||
}
|
||||
}
|
||||
return &lruStore{lru: lru.NewLRU[string, *Stmt](size, onEvicted, ttl)}
|
||||
}
|
||||
|
||||
type lruStore struct {
|
||||
lru *lru.LRU[string, *Stmt]
|
||||
}
|
||||
|
||||
func (s *lruStore) Keys() []string {
|
||||
return s.lru.Keys()
|
||||
}
|
||||
|
||||
func (s *lruStore) Get(key string) (*Stmt, bool) {
|
||||
stmt, ok := s.lru.Get(key)
|
||||
if ok && stmt != nil {
|
||||
<-stmt.prepared
|
||||
}
|
||||
return stmt, ok
|
||||
}
|
||||
|
||||
func (s *lruStore) Set(key string, value *Stmt) {
|
||||
s.lru.Add(key, value)
|
||||
}
|
||||
|
||||
func (s *lruStore) Delete(key string) {
|
||||
s.lru.Remove(key)
|
||||
}
|
||||
|
||||
type ConnPool interface {
|
||||
PrepareContext(ctx context.Context, query string) (*sql.Stmt, error)
|
||||
}
|
||||
|
||||
// New creates a new Stmt object for executing SQL queries.
|
||||
// It caches the Stmt object for future use and handles preparation and error states.
|
||||
// Parameters:
|
||||
//
|
||||
// ctx: Context for the request, used to carry deadlines, cancellation signals, etc.
|
||||
// key: The key representing the SQL query, used for caching and preparing the statement.
|
||||
// isTransaction: Indicates whether this operation is part of a transaction, affecting cache strategy.
|
||||
// conn: A connection pool that provides database connections.
|
||||
// locker: A synchronization lock that is unlocked after initialization to avoid deadlocks.
|
||||
//
|
||||
// Returns:
|
||||
//
|
||||
// *Stmt: A newly created statement object for executing SQL operations.
|
||||
// error: An error if the statement preparation fails.
|
||||
func (s *lruStore) New(ctx context.Context, key string, isTransaction bool, conn ConnPool, locker sync.Locker) (_ *Stmt, err error) {
|
||||
// Create a Stmt object and set its Transaction property.
|
||||
// The prepared channel is used to synchronize the statement preparation state.
|
||||
cacheStmt := &Stmt{
|
||||
Transaction: isTransaction,
|
||||
prepared: make(chan struct{}),
|
||||
}
|
||||
// Cache the Stmt object with the associated key.
|
||||
s.Set(key, cacheStmt)
|
||||
// Unlock after completing initialization to prevent deadlocks.
|
||||
locker.Unlock()
|
||||
|
||||
// Ensure the prepared channel is closed after the function execution completes.
|
||||
defer close(cacheStmt.prepared)
|
||||
|
||||
// Prepare the SQL statement using the provided connection.
|
||||
cacheStmt.Stmt, err = conn.PrepareContext(ctx, key)
|
||||
if err != nil {
|
||||
// If statement preparation fails, record the error and remove the invalid Stmt object from the cache.
|
||||
cacheStmt.prepareErr = err
|
||||
s.Delete(key)
|
||||
return &Stmt{}, err
|
||||
}
|
||||
|
||||
// Return the successfully prepared Stmt object.
|
||||
return cacheStmt, nil
|
||||
}
|
||||
12
vendor/gorm.io/gorm/logger/logger.go
generated
vendored
12
vendor/gorm.io/gorm/logger/logger.go
generated
vendored
|
|
@ -80,6 +80,11 @@ var (
|
|||
})
|
||||
// Recorder logger records running SQL into a recorder instance
|
||||
Recorder = traceRecorder{Interface: Default, BeginAt: time.Now()}
|
||||
|
||||
// RecorderParamsFilter defaults to no-op, allows to be run-over by a different implementation
|
||||
RecorderParamsFilter = func(ctx context.Context, sql string, params ...interface{}) (string, []interface{}) {
|
||||
return sql, params
|
||||
}
|
||||
)
|
||||
|
||||
// New initialize logger
|
||||
|
|
@ -211,3 +216,10 @@ func (l *traceRecorder) Trace(ctx context.Context, begin time.Time, fc func() (s
|
|||
l.SQL, l.RowsAffected = fc()
|
||||
l.Err = err
|
||||
}
|
||||
|
||||
func (l *traceRecorder) ParamsFilter(ctx context.Context, sql string, params ...interface{}) (string, []interface{}) {
|
||||
if RecorderParamsFilter == nil {
|
||||
return sql, params
|
||||
}
|
||||
return RecorderParamsFilter(ctx, sql, params...)
|
||||
}
|
||||
|
|
|
|||
4
vendor/gorm.io/gorm/migrator/migrator.go
generated
vendored
4
vendor/gorm.io/gorm/migrator/migrator.go
generated
vendored
|
|
@ -524,8 +524,8 @@ func (m Migrator) MigrateColumn(value interface{}, field *schema.Field, columnTy
|
|||
|
||||
// check nullable
|
||||
if nullable, ok := columnType.Nullable(); ok && nullable == field.NotNull {
|
||||
// not primary key & database is nullable
|
||||
if !field.PrimaryKey && nullable {
|
||||
// not primary key & current database is non-nullable(to be nullable)
|
||||
if !field.PrimaryKey && !nullable {
|
||||
alterColumn = true
|
||||
}
|
||||
}
|
||||
|
|
|
|||
144
vendor/gorm.io/gorm/prepare_stmt.go
generated
vendored
144
vendor/gorm.io/gorm/prepare_stmt.go
generated
vendored
|
|
@ -7,29 +7,35 @@ import (
|
|||
"errors"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gorm.io/gorm/internal/stmt_store"
|
||||
)
|
||||
|
||||
type Stmt struct {
|
||||
*sql.Stmt
|
||||
Transaction bool
|
||||
prepared chan struct{}
|
||||
prepareErr error
|
||||
}
|
||||
|
||||
type PreparedStmtDB struct {
|
||||
Stmts map[string]*Stmt
|
||||
Stmts stmt_store.Store
|
||||
Mux *sync.RWMutex
|
||||
ConnPool
|
||||
}
|
||||
|
||||
func NewPreparedStmtDB(connPool ConnPool) *PreparedStmtDB {
|
||||
// NewPreparedStmtDB creates and initializes a new instance of PreparedStmtDB.
|
||||
//
|
||||
// Parameters:
|
||||
// - connPool: A connection pool that implements the ConnPool interface, used for managing database connections.
|
||||
// - maxSize: The maximum number of prepared statements that can be stored in the statement store.
|
||||
// - ttl: The time-to-live duration for each prepared statement in the store. Statements older than this duration will be automatically removed.
|
||||
//
|
||||
// Returns:
|
||||
// - A pointer to a PreparedStmtDB instance, which manages prepared statements using the provided connection pool and configuration.
|
||||
func NewPreparedStmtDB(connPool ConnPool, maxSize int, ttl time.Duration) *PreparedStmtDB {
|
||||
return &PreparedStmtDB{
|
||||
ConnPool: connPool,
|
||||
Stmts: make(map[string]*Stmt),
|
||||
Mux: &sync.RWMutex{},
|
||||
ConnPool: connPool, // Assigns the provided connection pool to manage database connections.
|
||||
Stmts: stmt_store.New(maxSize, ttl), // Initializes a new statement store with the specified maximum size and TTL.
|
||||
Mux: &sync.RWMutex{}, // Sets up a read-write mutex for synchronizing access to the statement store.
|
||||
}
|
||||
}
|
||||
|
||||
// GetDBConn returns the underlying *sql.DB connection
|
||||
func (db *PreparedStmtDB) GetDBConn() (*sql.DB, error) {
|
||||
if sqldb, ok := db.ConnPool.(*sql.DB); ok {
|
||||
return sqldb, nil
|
||||
|
|
@ -42,98 +48,41 @@ func (db *PreparedStmtDB) GetDBConn() (*sql.DB, error) {
|
|||
return nil, ErrInvalidDB
|
||||
}
|
||||
|
||||
// Close closes all prepared statements in the store
|
||||
func (db *PreparedStmtDB) Close() {
|
||||
db.Mux.Lock()
|
||||
defer db.Mux.Unlock()
|
||||
|
||||
for _, stmt := range db.Stmts {
|
||||
go func(s *Stmt) {
|
||||
// make sure the stmt must finish preparation first
|
||||
<-s.prepared
|
||||
if s.Stmt != nil {
|
||||
_ = s.Close()
|
||||
}
|
||||
}(stmt)
|
||||
for _, key := range db.Stmts.Keys() {
|
||||
db.Stmts.Delete(key)
|
||||
}
|
||||
// setting db.Stmts to nil to avoid further using
|
||||
db.Stmts = nil
|
||||
}
|
||||
|
||||
func (sdb *PreparedStmtDB) Reset() {
|
||||
sdb.Mux.Lock()
|
||||
defer sdb.Mux.Unlock()
|
||||
|
||||
for _, stmt := range sdb.Stmts {
|
||||
go func(s *Stmt) {
|
||||
// make sure the stmt must finish preparation first
|
||||
<-s.prepared
|
||||
if s.Stmt != nil {
|
||||
_ = s.Close()
|
||||
}
|
||||
}(stmt)
|
||||
}
|
||||
sdb.Stmts = make(map[string]*Stmt)
|
||||
// Reset Deprecated use Close instead
|
||||
func (db *PreparedStmtDB) Reset() {
|
||||
db.Close()
|
||||
}
|
||||
|
||||
func (db *PreparedStmtDB) prepare(ctx context.Context, conn ConnPool, isTransaction bool, query string) (Stmt, error) {
|
||||
func (db *PreparedStmtDB) prepare(ctx context.Context, conn ConnPool, isTransaction bool, query string) (_ *stmt_store.Stmt, err error) {
|
||||
db.Mux.RLock()
|
||||
if stmt, ok := db.Stmts[query]; ok && (!stmt.Transaction || isTransaction) {
|
||||
db.Mux.RUnlock()
|
||||
// wait for other goroutines prepared
|
||||
<-stmt.prepared
|
||||
if stmt.prepareErr != nil {
|
||||
return Stmt{}, stmt.prepareErr
|
||||
if db.Stmts != nil {
|
||||
if stmt, ok := db.Stmts.Get(query); ok && (!stmt.Transaction || isTransaction) {
|
||||
db.Mux.RUnlock()
|
||||
return stmt, stmt.Error()
|
||||
}
|
||||
|
||||
return *stmt, nil
|
||||
}
|
||||
db.Mux.RUnlock()
|
||||
|
||||
// retry
|
||||
db.Mux.Lock()
|
||||
// double check
|
||||
if stmt, ok := db.Stmts[query]; ok && (!stmt.Transaction || isTransaction) {
|
||||
db.Mux.Unlock()
|
||||
// wait for other goroutines prepared
|
||||
<-stmt.prepared
|
||||
if stmt.prepareErr != nil {
|
||||
return Stmt{}, stmt.prepareErr
|
||||
if db.Stmts != nil {
|
||||
if stmt, ok := db.Stmts.Get(query); ok && (!stmt.Transaction || isTransaction) {
|
||||
db.Mux.Unlock()
|
||||
return stmt, stmt.Error()
|
||||
}
|
||||
|
||||
return *stmt, nil
|
||||
}
|
||||
// check db.Stmts first to avoid Segmentation Fault(setting value to nil map)
|
||||
// which cause by calling Close and executing SQL concurrently
|
||||
if db.Stmts == nil {
|
||||
db.Mux.Unlock()
|
||||
return Stmt{}, ErrInvalidDB
|
||||
}
|
||||
// cache preparing stmt first
|
||||
cacheStmt := Stmt{Transaction: isTransaction, prepared: make(chan struct{})}
|
||||
db.Stmts[query] = &cacheStmt
|
||||
db.Mux.Unlock()
|
||||
|
||||
// prepare completed
|
||||
defer close(cacheStmt.prepared)
|
||||
|
||||
// Reason why cannot lock conn.PrepareContext
|
||||
// suppose the maxopen is 1, g1 is creating record and g2 is querying record.
|
||||
// 1. g1 begin tx, g1 is requeue because of waiting for the system call, now `db.ConnPool` db.numOpen == 1.
|
||||
// 2. g2 select lock `conn.PrepareContext(ctx, query)`, now db.numOpen == db.maxOpen , wait for release.
|
||||
// 3. g1 tx exec insert, wait for unlock `conn.PrepareContext(ctx, query)` to finish tx and release.
|
||||
stmt, err := conn.PrepareContext(ctx, query)
|
||||
if err != nil {
|
||||
cacheStmt.prepareErr = err
|
||||
db.Mux.Lock()
|
||||
delete(db.Stmts, query)
|
||||
db.Mux.Unlock()
|
||||
return Stmt{}, err
|
||||
}
|
||||
|
||||
db.Mux.Lock()
|
||||
cacheStmt.Stmt = stmt
|
||||
db.Mux.Unlock()
|
||||
|
||||
return cacheStmt, nil
|
||||
return db.Stmts.New(ctx, query, isTransaction, conn, db.Mux)
|
||||
}
|
||||
|
||||
func (db *PreparedStmtDB) BeginTx(ctx context.Context, opt *sql.TxOptions) (ConnPool, error) {
|
||||
|
|
@ -162,10 +111,7 @@ func (db *PreparedStmtDB) ExecContext(ctx context.Context, query string, args ..
|
|||
if err == nil {
|
||||
result, err = stmt.ExecContext(ctx, args...)
|
||||
if errors.Is(err, driver.ErrBadConn) {
|
||||
db.Mux.Lock()
|
||||
defer db.Mux.Unlock()
|
||||
go stmt.Close()
|
||||
delete(db.Stmts, query)
|
||||
db.Stmts.Delete(query)
|
||||
}
|
||||
}
|
||||
return result, err
|
||||
|
|
@ -176,11 +122,7 @@ func (db *PreparedStmtDB) QueryContext(ctx context.Context, query string, args .
|
|||
if err == nil {
|
||||
rows, err = stmt.QueryContext(ctx, args...)
|
||||
if errors.Is(err, driver.ErrBadConn) {
|
||||
db.Mux.Lock()
|
||||
defer db.Mux.Unlock()
|
||||
|
||||
go stmt.Close()
|
||||
delete(db.Stmts, query)
|
||||
db.Stmts.Delete(query)
|
||||
}
|
||||
}
|
||||
return rows, err
|
||||
|
|
@ -230,11 +172,7 @@ func (tx *PreparedStmtTX) ExecContext(ctx context.Context, query string, args ..
|
|||
if err == nil {
|
||||
result, err = tx.Tx.StmtContext(ctx, stmt.Stmt).ExecContext(ctx, args...)
|
||||
if errors.Is(err, driver.ErrBadConn) {
|
||||
tx.PreparedStmtDB.Mux.Lock()
|
||||
defer tx.PreparedStmtDB.Mux.Unlock()
|
||||
|
||||
go stmt.Close()
|
||||
delete(tx.PreparedStmtDB.Stmts, query)
|
||||
tx.PreparedStmtDB.Stmts.Delete(query)
|
||||
}
|
||||
}
|
||||
return result, err
|
||||
|
|
@ -245,11 +183,7 @@ func (tx *PreparedStmtTX) QueryContext(ctx context.Context, query string, args .
|
|||
if err == nil {
|
||||
rows, err = tx.Tx.StmtContext(ctx, stmt.Stmt).QueryContext(ctx, args...)
|
||||
if errors.Is(err, driver.ErrBadConn) {
|
||||
tx.PreparedStmtDB.Mux.Lock()
|
||||
defer tx.PreparedStmtDB.Mux.Unlock()
|
||||
|
||||
go stmt.Close()
|
||||
delete(tx.PreparedStmtDB.Stmts, query)
|
||||
tx.PreparedStmtDB.Stmts.Delete(query)
|
||||
}
|
||||
}
|
||||
return rows, err
|
||||
|
|
|
|||
4
vendor/gorm.io/gorm/scan.go
generated
vendored
4
vendor/gorm.io/gorm/scan.go
generated
vendored
|
|
@ -15,7 +15,7 @@ func prepareValues(values []interface{}, db *DB, columnTypes []*sql.ColumnType,
|
|||
if db.Statement.Schema != nil {
|
||||
for idx, name := range columns {
|
||||
if field := db.Statement.Schema.LookUpField(name); field != nil {
|
||||
values[idx] = reflect.New(reflect.PtrTo(field.FieldType)).Interface()
|
||||
values[idx] = reflect.New(reflect.PointerTo(field.FieldType)).Interface()
|
||||
continue
|
||||
}
|
||||
values[idx] = new(interface{})
|
||||
|
|
@ -23,7 +23,7 @@ func prepareValues(values []interface{}, db *DB, columnTypes []*sql.ColumnType,
|
|||
} else if len(columnTypes) > 0 {
|
||||
for idx, columnType := range columnTypes {
|
||||
if columnType.ScanType() != nil {
|
||||
values[idx] = reflect.New(reflect.PtrTo(columnType.ScanType())).Interface()
|
||||
values[idx] = reflect.New(reflect.PointerTo(columnType.ScanType())).Interface()
|
||||
} else {
|
||||
values[idx] = new(interface{})
|
||||
}
|
||||
|
|
|
|||
2
vendor/gorm.io/gorm/schema/field.go
generated
vendored
2
vendor/gorm.io/gorm/schema/field.go
generated
vendored
|
|
@ -996,6 +996,6 @@ func (field *Field) setupNewValuePool() {
|
|||
}
|
||||
|
||||
if field.NewValuePool == nil {
|
||||
field.NewValuePool = poolInitializer(reflect.PtrTo(field.IndirectFieldType))
|
||||
field.NewValuePool = poolInitializer(reflect.PointerTo(field.IndirectFieldType))
|
||||
}
|
||||
}
|
||||
|
|
|
|||
31
vendor/gorm.io/gorm/schema/index.go
generated
vendored
31
vendor/gorm.io/gorm/schema/index.go
generated
vendored
|
|
@ -23,12 +23,13 @@ type IndexOption struct {
|
|||
Sort string // DESC, ASC
|
||||
Collate string
|
||||
Length int
|
||||
priority int
|
||||
Priority int
|
||||
}
|
||||
|
||||
// ParseIndexes parse schema indexes
|
||||
func (schema *Schema) ParseIndexes() map[string]Index {
|
||||
indexes := map[string]Index{}
|
||||
func (schema *Schema) ParseIndexes() []*Index {
|
||||
indexesByName := map[string]*Index{}
|
||||
indexes := []*Index{}
|
||||
|
||||
for _, field := range schema.Fields {
|
||||
if field.TagSettings["INDEX"] != "" || field.TagSettings["UNIQUEINDEX"] != "" {
|
||||
|
|
@ -38,7 +39,12 @@ func (schema *Schema) ParseIndexes() map[string]Index {
|
|||
break
|
||||
}
|
||||
for _, index := range fieldIndexes {
|
||||
idx := indexes[index.Name]
|
||||
idx := indexesByName[index.Name]
|
||||
if idx == nil {
|
||||
idx = &Index{Name: index.Name}
|
||||
indexesByName[index.Name] = idx
|
||||
indexes = append(indexes, idx)
|
||||
}
|
||||
idx.Name = index.Name
|
||||
if idx.Class == "" {
|
||||
idx.Class = index.Class
|
||||
|
|
@ -58,10 +64,8 @@ func (schema *Schema) ParseIndexes() map[string]Index {
|
|||
|
||||
idx.Fields = append(idx.Fields, index.Fields...)
|
||||
sort.Slice(idx.Fields, func(i, j int) bool {
|
||||
return idx.Fields[i].priority < idx.Fields[j].priority
|
||||
return idx.Fields[i].Priority < idx.Fields[j].Priority
|
||||
})
|
||||
|
||||
indexes[index.Name] = idx
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -78,12 +82,12 @@ func (schema *Schema) LookIndex(name string) *Index {
|
|||
indexes := schema.ParseIndexes()
|
||||
for _, index := range indexes {
|
||||
if index.Name == name {
|
||||
return &index
|
||||
return index
|
||||
}
|
||||
|
||||
for _, field := range index.Fields {
|
||||
if field.Name == name {
|
||||
return &index
|
||||
return index
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -111,17 +115,14 @@ func parseFieldIndexes(field *Field) (indexes []Index, err error) {
|
|||
idx = len(tag)
|
||||
}
|
||||
|
||||
if idx != -1 {
|
||||
name = tag[0:idx]
|
||||
}
|
||||
|
||||
name = tag[0:idx]
|
||||
if name == "" {
|
||||
subName := field.Name
|
||||
const key = "COMPOSITE"
|
||||
if composite, found := settings[key]; found {
|
||||
if len(composite) == 0 || composite == key {
|
||||
err = fmt.Errorf(
|
||||
"The composite tag of %s.%s cannot be empty",
|
||||
"the composite tag of %s.%s cannot be empty",
|
||||
field.Schema.Name,
|
||||
field.Name)
|
||||
return
|
||||
|
|
@ -154,7 +155,7 @@ func parseFieldIndexes(field *Field) (indexes []Index, err error) {
|
|||
Sort: settings["SORT"],
|
||||
Collate: settings["COLLATE"],
|
||||
Length: length,
|
||||
priority: priority,
|
||||
Priority: priority,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
|
|
|||
6
vendor/gorm.io/gorm/schema/relationship.go
generated
vendored
6
vendor/gorm.io/gorm/schema/relationship.go
generated
vendored
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/jinzhu/inflection"
|
||||
"golang.org/x/text/cases"
|
||||
|
|
@ -32,6 +33,8 @@ type Relationships struct {
|
|||
Relations map[string]*Relationship
|
||||
|
||||
EmbeddedRelations map[string]*Relationships
|
||||
|
||||
Mux sync.RWMutex
|
||||
}
|
||||
|
||||
type Relationship struct {
|
||||
|
|
@ -98,9 +101,10 @@ func (schema *Schema) parseRelation(field *Field) *Relationship {
|
|||
}
|
||||
|
||||
if relation.Type == has {
|
||||
// don't add relations to embedded schema, which might be shared
|
||||
if relation.FieldSchema != relation.Schema && relation.Polymorphic == nil && field.OwnerSchema == nil {
|
||||
relation.FieldSchema.Relationships.Mux.Lock()
|
||||
relation.FieldSchema.Relationships.Relations["_"+relation.Schema.Name+"_"+relation.Name] = relation
|
||||
relation.FieldSchema.Relationships.Mux.Unlock()
|
||||
}
|
||||
|
||||
switch field.IndirectFieldType.Kind() {
|
||||
|
|
|
|||
2
vendor/gorm.io/gorm/schema/utils.go
generated
vendored
2
vendor/gorm.io/gorm/schema/utils.go
generated
vendored
|
|
@ -71,7 +71,7 @@ func appendSettingFromTag(tag reflect.StructTag, value string) reflect.StructTag
|
|||
// GetRelationsValues get relations's values from a reflect value
|
||||
func GetRelationsValues(ctx context.Context, reflectValue reflect.Value, rels []*Relationship) (reflectResults reflect.Value) {
|
||||
for _, rel := range rels {
|
||||
reflectResults = reflect.MakeSlice(reflect.SliceOf(reflect.PtrTo(rel.FieldSchema.ModelType)), 0, 1)
|
||||
reflectResults = reflect.MakeSlice(reflect.SliceOf(reflect.PointerTo(rel.FieldSchema.ModelType)), 0, 1)
|
||||
|
||||
appendToResults := func(value reflect.Value) {
|
||||
if _, isZero := rel.Field.ValueOf(ctx, value); !isZero {
|
||||
|
|
|
|||
39
vendor/modules.txt
vendored
39
vendor/modules.txt
vendored
|
|
@ -211,12 +211,12 @@ github.com/prometheus/client_golang/prometheus/promhttp/internal
|
|||
# github.com/prometheus/client_model v0.6.2
|
||||
## explicit; go 1.22.0
|
||||
github.com/prometheus/client_model/go
|
||||
# github.com/prometheus/common v0.63.0
|
||||
## explicit; go 1.21
|
||||
# github.com/prometheus/common v0.64.0
|
||||
## explicit; go 1.23.0
|
||||
github.com/prometheus/common/expfmt
|
||||
github.com/prometheus/common/model
|
||||
# github.com/prometheus/procfs v0.16.0
|
||||
## explicit; go 1.21
|
||||
# github.com/prometheus/procfs v0.16.1
|
||||
## explicit; go 1.23.0
|
||||
github.com/prometheus/procfs
|
||||
github.com/prometheus/procfs/internal/fs
|
||||
github.com/prometheus/procfs/internal/util
|
||||
|
|
@ -255,14 +255,13 @@ go.mongodb.org/mongo-driver/x/bsonx/bsoncore
|
|||
## explicit; go 1.22.0
|
||||
go.opentelemetry.io/auto/sdk
|
||||
go.opentelemetry.io/auto/sdk/internal/telemetry
|
||||
# go.opentelemetry.io/otel v1.35.0
|
||||
## explicit; go 1.22.0
|
||||
# go.opentelemetry.io/otel v1.36.0
|
||||
## explicit; go 1.23.0
|
||||
go.opentelemetry.io/otel
|
||||
go.opentelemetry.io/otel/attribute
|
||||
go.opentelemetry.io/otel/attribute/internal
|
||||
go.opentelemetry.io/otel/baggage
|
||||
go.opentelemetry.io/otel/codes
|
||||
go.opentelemetry.io/otel/internal
|
||||
go.opentelemetry.io/otel/internal/attribute
|
||||
go.opentelemetry.io/otel/internal/baggage
|
||||
go.opentelemetry.io/otel/internal/global
|
||||
go.opentelemetry.io/otel/propagation
|
||||
|
|
@ -270,17 +269,17 @@ go.opentelemetry.io/otel/semconv/internal/v2
|
|||
go.opentelemetry.io/otel/semconv/v1.17.0
|
||||
go.opentelemetry.io/otel/semconv/v1.17.0/httpconv
|
||||
go.opentelemetry.io/otel/semconv/v1.26.0
|
||||
# go.opentelemetry.io/otel/metric v1.35.0
|
||||
## explicit; go 1.22.0
|
||||
# go.opentelemetry.io/otel/metric v1.36.0
|
||||
## explicit; go 1.23.0
|
||||
go.opentelemetry.io/otel/metric
|
||||
go.opentelemetry.io/otel/metric/embedded
|
||||
# go.opentelemetry.io/otel/trace v1.35.0
|
||||
## explicit; go 1.22.0
|
||||
# go.opentelemetry.io/otel/trace v1.36.0
|
||||
## explicit; go 1.23.0
|
||||
go.opentelemetry.io/otel/trace
|
||||
go.opentelemetry.io/otel/trace/embedded
|
||||
go.opentelemetry.io/otel/trace/internal/telemetry
|
||||
go.opentelemetry.io/otel/trace/noop
|
||||
# golang.org/x/crypto v0.37.0
|
||||
# golang.org/x/crypto v0.38.0
|
||||
## explicit; go 1.23.0
|
||||
golang.org/x/crypto/bcrypt
|
||||
golang.org/x/crypto/blowfish
|
||||
|
|
@ -289,23 +288,23 @@ golang.org/x/crypto/chacha20poly1305
|
|||
golang.org/x/crypto/hkdf
|
||||
golang.org/x/crypto/internal/alias
|
||||
golang.org/x/crypto/internal/poly1305
|
||||
# golang.org/x/net v0.39.0
|
||||
# golang.org/x/net v0.40.0
|
||||
## explicit; go 1.23.0
|
||||
golang.org/x/net/internal/socks
|
||||
golang.org/x/net/proxy
|
||||
# golang.org/x/oauth2 v0.29.0
|
||||
# golang.org/x/oauth2 v0.30.0
|
||||
## explicit; go 1.23.0
|
||||
golang.org/x/oauth2
|
||||
golang.org/x/oauth2/internal
|
||||
# golang.org/x/sync v0.13.0
|
||||
# golang.org/x/sync v0.14.0
|
||||
## explicit; go 1.23.0
|
||||
golang.org/x/sync/errgroup
|
||||
# golang.org/x/sys v0.32.0
|
||||
# golang.org/x/sys v0.33.0
|
||||
## explicit; go 1.23.0
|
||||
golang.org/x/sys/cpu
|
||||
golang.org/x/sys/unix
|
||||
golang.org/x/sys/windows
|
||||
# golang.org/x/text v0.24.0
|
||||
# golang.org/x/text v0.25.0
|
||||
## explicit; go 1.23.0
|
||||
golang.org/x/text/cases
|
||||
golang.org/x/text/internal
|
||||
|
|
@ -365,11 +364,13 @@ gorm.io/driver/mysql
|
|||
# gorm.io/driver/sqlite v1.5.7
|
||||
## explicit; go 1.20
|
||||
gorm.io/driver/sqlite
|
||||
# gorm.io/gorm v1.25.12
|
||||
# gorm.io/gorm v1.26.1
|
||||
## explicit; go 1.18
|
||||
gorm.io/gorm
|
||||
gorm.io/gorm/callbacks
|
||||
gorm.io/gorm/clause
|
||||
gorm.io/gorm/internal/lru
|
||||
gorm.io/gorm/internal/stmt_store
|
||||
gorm.io/gorm/logger
|
||||
gorm.io/gorm/migrator
|
||||
gorm.io/gorm/schema
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue