Update dependencies

This change updates all dependencies.

Signed-off-by: Gabriel Adrian Samfira <gsamfira@cloudbasesolutions.com>
This commit is contained in:
Gabriel Adrian Samfira 2025-02-24 07:59:10 +00:00
parent f2b43bac77
commit 5415121a70
289 changed files with 7700 additions and 3245 deletions

34
go.mod
View file

@ -1,12 +1,12 @@
module github.com/cloudbase/garm
go 1.22.0
go 1.23.0
toolchain go1.23.0
toolchain go1.23.6
require (
github.com/BurntSushi/toml v1.4.0
github.com/bradleyfalzon/ghinstallation/v2 v2.13.0
github.com/bradleyfalzon/ghinstallation/v2 v2.14.0
github.com/cloudbase/garm-provider-common v0.1.4
github.com/felixge/httpsnoop v1.0.4
github.com/go-openapi/errors v0.22.0
@ -25,10 +25,10 @@ require (
github.com/manifoldco/promptui v0.9.0
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.20.5
github.com/spf13/cobra v1.8.1
github.com/prometheus/client_golang v1.21.0
github.com/spf13/cobra v1.9.1
github.com/stretchr/testify v1.10.0
golang.org/x/crypto v0.33.0
golang.org/x/crypto v0.34.0
golang.org/x/oauth2 v0.26.0
golang.org/x/sync v0.11.0
gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0
@ -54,9 +54,9 @@ require (
github.com/go-openapi/loads v0.22.0 // indirect
github.com/go-openapi/spec v0.21.0 // indirect
github.com/go-openapi/validate v0.24.0 // indirect
github.com/go-sql-driver/mysql v1.8.1 // indirect
github.com/go-sql-driver/mysql v1.9.0 // indirect
github.com/golang-jwt/jwt/v4 v4.5.1 // indirect
github.com/google/go-github/v68 v68.0.0 // indirect
github.com/google/go-github/v69 v69.2.0 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
@ -65,7 +65,7 @@ require (
github.com/juju/errors v1.0.0 // indirect
github.com/juju/loggo v1.0.0 // indirect
github.com/juju/testing v1.0.2 // indirect
github.com/klauspost/compress v1.17.11 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/mailru/easyjson v0.9.0 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
@ -77,20 +77,20 @@ require (
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.61.0 // indirect
github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/pflag v1.0.6 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569 // indirect
go.mongodb.org/mongo-driver v1.17.1 // indirect
go.mongodb.org/mongo-driver v1.17.2 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/otel v1.33.0 // indirect
go.opentelemetry.io/otel/metric v1.33.0 // indirect
go.opentelemetry.io/otel/trace v1.33.0 // indirect
golang.org/x/net v0.33.0 // indirect
go.opentelemetry.io/otel v1.34.0 // indirect
go.opentelemetry.io/otel/metric v1.34.0 // indirect
go.opentelemetry.io/otel/trace v1.34.0 // indirect
golang.org/x/net v0.35.0 // indirect
golang.org/x/sys v0.30.0 // indirect
golang.org/x/text v0.22.0 // indirect
google.golang.org/protobuf v1.36.0 // indirect
google.golang.org/protobuf v1.36.5 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

62
go.sum
View file

@ -6,8 +6,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bradleyfalzon/ghinstallation/v2 v2.13.0 h1:5FhjW93/YLQJDmPdeyMPw7IjAPzqsr+0jHPfrPz0sZI=
github.com/bradleyfalzon/ghinstallation/v2 v2.13.0/go.mod h1:EJ6fgedVEHa2kUyBTTvslJCXJafS/mhJNNKEOCspZXQ=
github.com/bradleyfalzon/ghinstallation/v2 v2.14.0 h1:0D4vKCHOvYrDU8u61TnE2JfNT4VRrBLphmxtqazTO+M=
github.com/bradleyfalzon/ghinstallation/v2 v2.14.0/go.mod h1:LOVmdZYVZ8jqdr4n9wWm1ocDiMz9IfMGfRkaYC1a52A=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
@ -21,7 +21,7 @@ github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04=
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
github.com/cloudbase/garm-provider-common v0.1.4 h1:spRjl0PV4r8vKaCTNp6xBQbRKfls/cmbBEl/i/eGWSo=
github.com/cloudbase/garm-provider-common v0.1.4/go.mod h1:sK26i2NpjjAjhanNKiWw8iPkqt+XeohTKpFnEP7JdZ4=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -53,8 +53,8 @@ github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ
github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58=
github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
github.com/go-sql-driver/mysql v1.9.0 h1:Y0zIbQXhQKmQgTp44Y1dp3wTXcn804QoTptLZT1vtvo=
github.com/go-sql-driver/mysql v1.9.0/go.mod h1:pDetrLJeA3oMujJuvXc8RJoasr589B6A9fwzD3QMrqw=
github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo=
github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
@ -68,8 +68,8 @@ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-github/v57 v57.0.0 h1:L+Y3UPTY8ALM8x+TV0lg+IEBI+upibemtBD8Q9u7zHs=
github.com/google/go-github/v57 v57.0.0/go.mod h1:s0omdnye0hvK/ecLvpsGfJMiRt85PimQh4oygmLIxHw=
github.com/google/go-github/v68 v68.0.0 h1:ZW57zeNZiXTdQ16qrDiZ0k6XucrxZ2CGmoTvcCyQG6s=
github.com/google/go-github/v68 v68.0.0/go.mod h1:K9HAUBovM2sLwM408A18h+wd9vqdLOEqTUCbnRIcx68=
github.com/google/go-github/v69 v69.2.0 h1:wR+Wi/fN2zdUx9YxSmYE0ktiX9IAR/BeePzeaUUbEHE=
github.com/google/go-github/v69 v69.2.0/go.mod h1:xne4jymxLR6Uj9b7J7PyTpkMYstEMMwGZa0Aehh1azM=
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
@ -111,8 +111,8 @@ github.com/juju/testing v1.0.2 h1:OR90RqCd9CJONxXamZAjLknpZdtqDyxqW8IwCbgw3i4=
github.com/juju/testing v1.0.2/go.mod h1:h3Vd2rzB57KrdsBEy6R7bmSKPzP76BnNavt7i8PerwQ=
github.com/juju/utils/v3 v3.0.0 h1:Gg3n63mGPbBuoXCo+EPJuMi44hGZfloI8nlCIebHu2Q=
github.com/juju/utils/v3 v3.0.0/go.mod h1:8csUcj1VRkfjNIRzBFWzLFCMLwLqsRWvkmhfVAUwbC4=
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@ -151,12 +151,12 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_golang v1.21.0 h1:DIsaGmiaBkSangBgMtWdNfxbMNdku5IK6iNhrEqWvdA=
github.com/prometheus/client_golang v1.21.0/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ=
github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s=
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
@ -165,10 +165,10 @@ github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUc
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
@ -178,22 +178,22 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569 h1:xzABM9let0HLLqFypcxvLmlvEciCHL7+Lv+4vwZqecI=
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569/go.mod h1:2Ly+NIftZN4de9zRmENdYbvPQeaVIYKWpLFStLFEBgI=
go.mongodb.org/mongo-driver v1.17.1 h1:Wic5cJIwJgSpBhe3lx3+/RybR5PiYRMpVFgO7cOHyIM=
go.mongodb.org/mongo-driver v1.17.1/go.mod h1:wwWm/+BuOddhcq3n68LKRmgk2wXzmF6s0SFOa0GINL4=
go.mongodb.org/mongo-driver v1.17.2 h1:gvZyk8352qSfzyZ2UMWcpDpMSGEr1eqE4T793SqyhzM=
go.mongodb.org/mongo-driver v1.17.2/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY=
go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw=
go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg=
go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus=
golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
golang.org/x/crypto v0.34.0 h1:+/C6tk6rf/+t5DhUketUbD1aNGqiSX3j15Z6xuIDlBA=
golang.org/x/crypto v0.34.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE=
golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
@ -206,8 +206,8 @@ golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.36.0 h1:mjIs9gYtt56AzC4ZaffQuh88TZurBGhIJMBZGSxNerQ=
google.golang.org/protobuf v1.36.0/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 h1:FVCohIoYO7IJoDDVpV2pdq7SgrMH6wHnuTyrdrxJNoY=
gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0/go.mod h1:OdE7CF6DbADk7lN8LIKRzRJTTZXIjtWgA5THM5lhBAw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View file

@ -0,0 +1,21 @@
run:
timeout: 10m
linters:
disable-all: true
enable:
- errcheck
- gocritic
- gocyclo
- gofmt
- goimports
- gosec
- gosimple
- govet
- ineffassign
- misspell
- promlinter
- revive
- staticcheck
- unconvert
- unused

View file

@ -13,7 +13,7 @@ import (
"sync"
"time"
"github.com/google/go-github/v68/github"
"github.com/google/go-github/v69/github"
)
const (

View file

@ -20,7 +20,10 @@ Andrew Reid <andrew.reid at tixtrack.com>
Animesh Ray <mail.rayanimesh at gmail.com>
Arne Hormann <arnehormann at gmail.com>
Ariel Mashraki <ariel at mashraki.co.il>
Artur Melanchyk <artur.melanchyk@gmail.com>
Asta Xie <xiemengjun at gmail.com>
B Lamarche <blam413 at gmail.com>
Bes Dollma <bdollma@thousandeyes.com>
Brian Hendriks <brian at dolthub.com>
Bulat Gaifullin <gaifullinbf at gmail.com>
Caine Jette <jette at alum.mit.edu>
@ -33,6 +36,7 @@ Daniel Montoya <dsmontoyam at gmail.com>
Daniel Nichter <nil at codenode.com>
Daniël van Eeden <git at myname.nl>
Dave Protasowski <dprotaso at gmail.com>
Dirkjan Bussink <d.bussink at gmail.com>
DisposaBoy <disposaboy at dby.me>
Egor Smolyakov <egorsmkv at gmail.com>
Erwan Martin <hello at erwan.io>
@ -50,6 +54,7 @@ ICHINOSE Shogo <shogo82148 at gmail.com>
Ilia Cimpoes <ichimpoesh at gmail.com>
INADA Naoki <songofacandy at gmail.com>
Jacek Szwec <szwec.jacek at gmail.com>
Jakub Adamus <kratky at zobak.cz>
James Harr <james.harr at gmail.com>
Janek Vedock <janekvedock at comcast.net>
Jason Ng <oblitorum at gmail.com>
@ -60,6 +65,7 @@ Jennifer Purevsuren <jennifer at dolthub.com>
Jerome Meyer <jxmeyer at gmail.com>
Jiajia Zhong <zhong2plus at gmail.com>
Jian Zhen <zhenjl at gmail.com>
Joe Mann <contact at joemann.co.uk>
Joshua Prunier <joshua.prunier at gmail.com>
Julien Lefevre <julien.lefevr at gmail.com>
Julien Schmidt <go-sql-driver at julienschmidt.com>
@ -80,6 +86,7 @@ Lunny Xiao <xiaolunwen at gmail.com>
Luke Scott <luke at webconnex.com>
Maciej Zimnoch <maciej.zimnoch at codilime.com>
Michael Woolnough <michael.woolnough at gmail.com>
Nao Yokotsuka <yokotukanao at gmail.com>
Nathanial Murphy <nathanial.murphy at gmail.com>
Nicola Peduzzi <thenikso at gmail.com>
Oliver Bone <owbone at github.com>
@ -89,6 +96,7 @@ Paul Bonser <misterpib at gmail.com>
Paulius Lozys <pauliuslozys at gmail.com>
Peter Schultz <peter.schultz at classmarkets.com>
Phil Porada <philporada at gmail.com>
Minh Quang <minhquang4334 at gmail.com>
Rebecca Chin <rchin at pivotal.io>
Reed Allman <rdallman10 at gmail.com>
Richard Wilkes <wilkes at me.com>
@ -139,4 +147,5 @@ PingCAP Inc.
Pivotal Inc.
Shattered Silicon Ltd.
Stripe Inc.
ThousandEyes
Zendesk Inc.

View file

@ -1,3 +1,28 @@
# Changelog
## v1.9.0 (2025-02-18)
### Major Changes
- Implement zlib compression. (#1487)
- Supported Go version is updated to Go 1.21+. (#1639)
- Add support for VECTOR type introduced in MySQL 9.0. (#1609)
- Config object can have custom dial function. (#1527)
### Bugfixes
- Fix auth errors when username/password are too long. (#1625)
- Check if MySQL supports CLIENT_CONNECT_ATTRS before sending client attributes. (#1640)
- Fix auth switch request handling. (#1666)
### Other changes
- Add "filename:line" prefix to log in go-mysql. Custom loggers now show it. (#1589)
- Improve error handling. It reduces the "busy buffer" errors. (#1595, #1601, #1641)
- Use `strconv.Atoi` to parse max_allowed_packet. (#1661)
- `rejectReadOnly` option now handles ER_READ_ONLY_MODE (1290) error too. (#1660)
## Version 1.8.1 (2024-03-26)
Bugfixes:

View file

@ -38,11 +38,12 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac
* Secure `LOAD DATA LOCAL INFILE` support with file allowlisting and `io.Reader` support
* Optional `time.Time` parsing
* Optional placeholder interpolation
* Supports zlib compression.
## Requirements
* Go 1.19 or higher. We aim to support the 3 latest versions of Go.
* MySQL (5.7+) and MariaDB (10.3+) are supported.
* Go 1.21 or higher. We aim to support the 3 latest versions of Go.
* MySQL (5.7+) and MariaDB (10.5+) are supported.
* [TiDB](https://github.com/pingcap/tidb) is supported by PingCAP.
* Do not ask questions about TiDB in our issue tracker or forum.
* [Document](https://docs.pingcap.com/tidb/v6.1/dev-guide-sample-application-golang)
@ -267,6 +268,16 @@ SELECT u.id FROM users as u
will return `u.id` instead of just `id` if `columnsWithAlias=true`.
##### `compress`
```
Type: bool
Valid Values: true, false
Default: false
```
Toggles zlib compression. false by default.
##### `interpolateParams`
```
@ -519,6 +530,9 @@ This driver supports the [`ColumnType` interface](https://golang.org/pkg/databas
Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details.
> [!IMPORTANT]
> The `QueryContext`, `ExecContext`, etc. variants provided by `database/sql` will cause the connection to be closed if the provided context is cancelled or timed out before the result is received by the driver.
### `LOAD DATA LOCAL INFILE` support
For this feature you need direct access to the package. Therefore you must change the import path (no `_`):

View file

@ -1,19 +0,0 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package.
//
// Copyright 2022 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
//go:build go1.19
// +build go1.19
package mysql
import "sync/atomic"
/******************************************************************************
* Sync utils *
******************************************************************************/
type atomicBool = atomic.Bool

View file

@ -1,47 +0,0 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package.
//
// Copyright 2022 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
//go:build !go1.19
// +build !go1.19
package mysql
import "sync/atomic"
/******************************************************************************
* Sync utils *
******************************************************************************/
// atomicBool is an implementation of atomic.Bool for older version of Go.
// it is a wrapper around uint32 for usage as a boolean value with
// atomic access.
type atomicBool struct {
_ noCopy
value uint32
}
// Load returns whether the current boolean value is true
func (ab *atomicBool) Load() bool {
return atomic.LoadUint32(&ab.value) > 0
}
// Store sets the value of the bool regardless of the previous value
func (ab *atomicBool) Store(value bool) {
if value {
atomic.StoreUint32(&ab.value, 1)
} else {
atomic.StoreUint32(&ab.value, 0)
}
}
// Swap sets the value of the bool and returns the old value.
func (ab *atomicBool) Swap(value bool) bool {
if value {
return atomic.SwapUint32(&ab.value, 1) > 0
}
return atomic.SwapUint32(&ab.value, 0) > 0
}

View file

@ -10,54 +10,42 @@ package mysql
import (
"io"
"net"
"time"
)
const defaultBufSize = 4096
const maxCachedBufSize = 256 * 1024
// readerFunc is a function that compatible with io.Reader.
// We use this function type instead of io.Reader because we want to
// just pass mc.readWithTimeout.
type readerFunc func([]byte) (int, error)
// A buffer which is used for both reading and writing.
// This is possible since communication on each connection is synchronous.
// In other words, we can't write and read simultaneously on the same connection.
// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
// Also highly optimized for this particular use case.
// This buffer is backed by two byte slices in a double-buffering scheme
type buffer struct {
buf []byte // buf is a byte buffer who's length and capacity are equal.
nc net.Conn
idx int
length int
timeout time.Duration
dbuf [2][]byte // dbuf is an array with the two byte slices that back this buffer
flipcnt uint // flipccnt is the current buffer counter for double-buffering
buf []byte // read buffer.
cachedBuf []byte // buffer that will be reused. len(cachedBuf) <= maxCachedBufSize.
}
// newBuffer allocates and returns a new buffer.
func newBuffer(nc net.Conn) buffer {
fg := make([]byte, defaultBufSize)
func newBuffer() buffer {
return buffer{
buf: fg,
nc: nc,
dbuf: [2][]byte{fg, nil},
cachedBuf: make([]byte, defaultBufSize),
}
}
// flip replaces the active buffer with the background buffer
// this is a delayed flip that simply increases the buffer counter;
// the actual flip will be performed the next time we call `buffer.fill`
func (b *buffer) flip() {
b.flipcnt += 1
// busy returns true if the read buffer is not empty.
func (b *buffer) busy() bool {
return len(b.buf) > 0
}
// fill reads into the buffer until at least _need_ bytes are in it
func (b *buffer) fill(need int) error {
n := b.length
// fill data into its double-buffering target: if we've called
// flip on this buffer, we'll be copying to the background buffer,
// and then filling it with network data; otherwise we'll just move
// the contents of the current buffer to the front before filling it
dest := b.dbuf[b.flipcnt&1]
// fill reads into the read buffer until at least _need_ bytes are in it.
func (b *buffer) fill(need int, r readerFunc) error {
// we'll move the contents of the current buffer to dest before filling it.
dest := b.cachedBuf
// grow buffer if necessary to fit the whole packet.
if need > len(dest) {
@ -67,64 +55,48 @@ func (b *buffer) fill(need int) error {
// if the allocated buffer is not too large, move it to backing storage
// to prevent extra allocations on applications that perform large reads
if len(dest) <= maxCachedBufSize {
b.dbuf[b.flipcnt&1] = dest
b.cachedBuf = dest
}
}
// if we're filling the fg buffer, move the existing data to the start of it.
// if we're filling the bg buffer, copy over the data
if n > 0 {
copy(dest[:n], b.buf[b.idx:])
}
b.buf = dest
b.idx = 0
// move the existing data to the start of the buffer.
n := len(b.buf)
copy(dest[:n], b.buf)
for {
if b.timeout > 0 {
if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil {
return err
}
}
nn, err := b.nc.Read(b.buf[n:])
nn, err := r(dest[n:])
n += nn
switch err {
case nil:
if n < need {
continue
}
b.length = n
return nil
case io.EOF:
if n >= need {
b.length = n
return nil
}
return io.ErrUnexpectedEOF
default:
return err
if err == nil && n < need {
continue
}
b.buf = dest[:n]
if err == io.EOF {
if n < need {
err = io.ErrUnexpectedEOF
} else {
err = nil
}
}
return err
}
}
// returns next N bytes from buffer.
// The returned slice is only guaranteed to be valid until the next read
func (b *buffer) readNext(need int) ([]byte, error) {
if b.length < need {
func (b *buffer) readNext(need int, r readerFunc) ([]byte, error) {
if len(b.buf) < need {
// refill
if err := b.fill(need); err != nil {
if err := b.fill(need, r); err != nil {
return nil, err
}
}
offset := b.idx
b.idx += need
b.length -= need
return b.buf[offset:b.idx], nil
data := b.buf[:need]
b.buf = b.buf[need:]
return data, nil
}
// takeBuffer returns a buffer with the requested size.
@ -132,18 +104,18 @@ func (b *buffer) readNext(need int) ([]byte, error) {
// Otherwise a bigger buffer is made.
// Only one buffer (total) can be used at a time.
func (b *buffer) takeBuffer(length int) ([]byte, error) {
if b.length > 0 {
if b.busy() {
return nil, ErrBusyBuffer
}
// test (cheap) general case first
if length <= cap(b.buf) {
return b.buf[:length], nil
if length <= len(b.cachedBuf) {
return b.cachedBuf[:length], nil
}
if length < maxPacketSize {
b.buf = make([]byte, length)
return b.buf, nil
if length < maxCachedBufSize {
b.cachedBuf = make([]byte, length)
return b.cachedBuf, nil
}
// buffer is larger than we want to store.
@ -154,10 +126,10 @@ func (b *buffer) takeBuffer(length int) ([]byte, error) {
// known to be smaller than defaultBufSize.
// Only one buffer (total) can be used at a time.
func (b *buffer) takeSmallBuffer(length int) ([]byte, error) {
if b.length > 0 {
if b.busy() {
return nil, ErrBusyBuffer
}
return b.buf[:length], nil
return b.cachedBuf[:length], nil
}
// takeCompleteBuffer returns the complete existing buffer.
@ -165,18 +137,15 @@ func (b *buffer) takeSmallBuffer(length int) ([]byte, error) {
// cap and len of the returned buffer will be equal.
// Only one buffer (total) can be used at a time.
func (b *buffer) takeCompleteBuffer() ([]byte, error) {
if b.length > 0 {
if b.busy() {
return nil, ErrBusyBuffer
}
return b.buf, nil
return b.cachedBuf, nil
}
// store stores buf, an updated buffer, if its suitable to do so.
func (b *buffer) store(buf []byte) error {
if b.length > 0 {
return ErrBusyBuffer
} else if cap(buf) <= maxPacketSize && cap(buf) > cap(b.buf) {
b.buf = buf[:cap(buf)]
func (b *buffer) store(buf []byte) {
if cap(buf) <= maxCachedBufSize && cap(buf) > cap(b.cachedBuf) {
b.cachedBuf = buf[:cap(buf)]
}
return nil
}

View file

@ -8,7 +8,7 @@
package mysql
const defaultCollation = "utf8mb4_general_ci"
const defaultCollationID = 45 // utf8mb4_general_ci
const binaryCollationID = 63
// A list of available collations mapped to the internal ID.

214
vendor/github.com/go-sql-driver/mysql/compress.go generated vendored Normal file
View file

@ -0,0 +1,214 @@
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
//
// Copyright 2024 The Go-MySQL-Driver Authors. All rights reserved.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
package mysql
import (
"bytes"
"compress/zlib"
"fmt"
"io"
"sync"
)
var (
zrPool *sync.Pool // Do not use directly. Use zDecompress() instead.
zwPool *sync.Pool // Do not use directly. Use zCompress() instead.
)
func init() {
zrPool = &sync.Pool{
New: func() any { return nil },
}
zwPool = &sync.Pool{
New: func() any {
zw, err := zlib.NewWriterLevel(new(bytes.Buffer), 2)
if err != nil {
panic(err) // compress/zlib return non-nil error only if level is invalid
}
return zw
},
}
}
func zDecompress(src []byte, dst *bytes.Buffer) (int, error) {
br := bytes.NewReader(src)
var zr io.ReadCloser
var err error
if a := zrPool.Get(); a == nil {
if zr, err = zlib.NewReader(br); err != nil {
return 0, err
}
} else {
zr = a.(io.ReadCloser)
if err := zr.(zlib.Resetter).Reset(br, nil); err != nil {
return 0, err
}
}
n, _ := dst.ReadFrom(zr) // ignore err because zr.Close() will return it again.
err = zr.Close() // zr.Close() may return chuecksum error.
zrPool.Put(zr)
return int(n), err
}
func zCompress(src []byte, dst io.Writer) error {
zw := zwPool.Get().(*zlib.Writer)
zw.Reset(dst)
if _, err := zw.Write(src); err != nil {
return err
}
err := zw.Close()
zwPool.Put(zw)
return err
}
type compIO struct {
mc *mysqlConn
buff bytes.Buffer
}
func newCompIO(mc *mysqlConn) *compIO {
return &compIO{
mc: mc,
}
}
func (c *compIO) reset() {
c.buff.Reset()
}
func (c *compIO) readNext(need int, r readerFunc) ([]byte, error) {
for c.buff.Len() < need {
if err := c.readCompressedPacket(r); err != nil {
return nil, err
}
}
data := c.buff.Next(need)
return data[:need:need], nil // prevent caller writes into c.buff
}
func (c *compIO) readCompressedPacket(r readerFunc) error {
header, err := c.mc.buf.readNext(7, r) // size of compressed header
if err != nil {
return err
}
_ = header[6] // bounds check hint to compiler; guaranteed by readNext
// compressed header structure
comprLength := getUint24(header[0:3])
compressionSequence := uint8(header[3])
uncompressedLength := getUint24(header[4:7])
if debug {
fmt.Printf("uncompress cmplen=%v uncomplen=%v pkt_cmp_seq=%v expected_cmp_seq=%v\n",
comprLength, uncompressedLength, compressionSequence, c.mc.sequence)
}
// Do not return ErrPktSync here.
// Server may return error packet (e.g. 1153 Got a packet bigger than 'max_allowed_packet' bytes)
// before receiving all packets from client. In this case, seqnr is younger than expected.
// NOTE: Both of mariadbclient and mysqlclient do not check seqnr. Only server checks it.
if debug && compressionSequence != c.mc.sequence {
fmt.Printf("WARN: unexpected cmpress seq nr: expected %v, got %v",
c.mc.sequence, compressionSequence)
}
c.mc.sequence = compressionSequence + 1
c.mc.compressSequence = c.mc.sequence
comprData, err := c.mc.buf.readNext(comprLength, r)
if err != nil {
return err
}
// if payload is uncompressed, its length will be specified as zero, and its
// true length is contained in comprLength
if uncompressedLength == 0 {
c.buff.Write(comprData)
return nil
}
// use existing capacity in bytesBuf if possible
c.buff.Grow(uncompressedLength)
nread, err := zDecompress(comprData, &c.buff)
if err != nil {
return err
}
if nread != uncompressedLength {
return fmt.Errorf("invalid compressed packet: uncompressed length in header is %d, actual %d",
uncompressedLength, nread)
}
return nil
}
const minCompressLength = 150
const maxPayloadLen = maxPacketSize - 4
// writePackets sends one or some packets with compression.
// Use this instead of mc.netConn.Write() when mc.compress is true.
func (c *compIO) writePackets(packets []byte) (int, error) {
totalBytes := len(packets)
blankHeader := make([]byte, 7)
buf := &c.buff
for len(packets) > 0 {
payloadLen := min(maxPayloadLen, len(packets))
payload := packets[:payloadLen]
uncompressedLen := payloadLen
buf.Reset()
buf.Write(blankHeader) // Buffer.Write() never returns error
// If payload is less than minCompressLength, don't compress.
if uncompressedLen < minCompressLength {
buf.Write(payload)
uncompressedLen = 0
} else {
err := zCompress(payload, buf)
if debug && err != nil {
fmt.Printf("zCompress error: %v", err)
}
// do not compress if compressed data is larger than uncompressed data
// I intentionally miss 7 byte header in the buf; zCompress must compress more than 7 bytes.
if err != nil || buf.Len() >= uncompressedLen {
buf.Reset()
buf.Write(blankHeader)
buf.Write(payload)
uncompressedLen = 0
}
}
if n, err := c.writeCompressedPacket(buf.Bytes(), uncompressedLen); err != nil {
// To allow returning ErrBadConn when sending really 0 bytes, we sum
// up compressed bytes that is returned by underlying Write().
return totalBytes - len(packets) + n, err
}
packets = packets[payloadLen:]
}
return totalBytes, nil
}
// writeCompressedPacket writes a compressed packet with header.
// data should start with 7 size space for header followed by payload.
func (c *compIO) writeCompressedPacket(data []byte, uncompressedLen int) (int, error) {
mc := c.mc
comprLength := len(data) - 7
if debug {
fmt.Printf(
"writeCompressedPacket: comprLength=%v, uncompressedLen=%v, seq=%v",
comprLength, uncompressedLen, mc.compressSequence)
}
// compression header
putUint24(data[0:3], comprLength)
data[3] = mc.compressSequence
putUint24(data[4:7], uncompressedLen)
mc.compressSequence++
return mc.writeWithTimeout(data)
}

View file

@ -13,10 +13,13 @@ import (
"database/sql"
"database/sql/driver"
"encoding/json"
"fmt"
"io"
"net"
"runtime"
"strconv"
"strings"
"sync/atomic"
"time"
)
@ -25,15 +28,17 @@ type mysqlConn struct {
netConn net.Conn
rawConn net.Conn // underlying connection when netConn is TLS connection.
result mysqlResult // managed by clearResult() and handleOkPacket().
compIO *compIO
cfg *Config
connector *connector
maxAllowedPacket int
maxWriteSize int
writeTimeout time.Duration
flags clientFlag
status statusFlag
sequence uint8
compressSequence uint8
parseTime bool
compress bool
// for context support (Go 1.8+)
watching bool
@ -41,71 +46,92 @@ type mysqlConn struct {
closech chan struct{}
finished chan<- struct{}
canceled atomicError // set non-nil if conn is canceled
closed atomicBool // set when conn is closed, before closech is closed
closed atomic.Bool // set when conn is closed, before closech is closed
}
// Helper function to call per-connection logger.
func (mc *mysqlConn) log(v ...any) {
_, filename, lineno, ok := runtime.Caller(1)
if ok {
pos := strings.LastIndexByte(filename, '/')
if pos != -1 {
filename = filename[pos+1:]
}
prefix := fmt.Sprintf("%s:%d ", filename, lineno)
v = append([]any{prefix}, v...)
}
mc.cfg.Logger.Print(v...)
}
func (mc *mysqlConn) readWithTimeout(b []byte) (int, error) {
to := mc.cfg.ReadTimeout
if to > 0 {
if err := mc.netConn.SetReadDeadline(time.Now().Add(to)); err != nil {
return 0, err
}
}
return mc.netConn.Read(b)
}
func (mc *mysqlConn) writeWithTimeout(b []byte) (int, error) {
to := mc.cfg.WriteTimeout
if to > 0 {
if err := mc.netConn.SetWriteDeadline(time.Now().Add(to)); err != nil {
return 0, err
}
}
return mc.netConn.Write(b)
}
func (mc *mysqlConn) resetSequence() {
mc.sequence = 0
mc.compressSequence = 0
}
// syncSequence must be called when finished writing some packet and before start reading.
func (mc *mysqlConn) syncSequence() {
// Syncs compressionSequence to sequence.
// This is not documented but done in `net_flush()` in MySQL and MariaDB.
// https://github.com/mariadb-corporation/mariadb-connector-c/blob/8228164f850b12353da24df1b93a1e53cc5e85e9/libmariadb/ma_net.c#L170-L171
// https://github.com/mysql/mysql-server/blob/824e2b4064053f7daf17d7f3f84b7a3ed92e5fb4/sql-common/net_serv.cc#L293
if mc.compress {
mc.sequence = mc.compressSequence
mc.compIO.reset()
}
}
// Handles parameters set in DSN after the connection is established
func (mc *mysqlConn) handleParams() (err error) {
var cmdSet strings.Builder
for param, val := range mc.cfg.Params {
switch param {
// Charset: character_set_connection, character_set_client, character_set_results
case "charset":
charsets := strings.Split(val, ",")
for _, cs := range charsets {
// ignore errors here - a charset may not exist
if mc.cfg.Collation != "" {
err = mc.exec("SET NAMES " + cs + " COLLATE " + mc.cfg.Collation)
} else {
err = mc.exec("SET NAMES " + cs)
}
if err == nil {
break
}
}
if err != nil {
return
}
// Other system vars accumulated in a single SET command
default:
if cmdSet.Len() == 0 {
// Heuristic: 29 chars for each other key=value to reduce reallocations
cmdSet.Grow(4 + len(param) + 3 + len(val) + 30*(len(mc.cfg.Params)-1))
cmdSet.WriteString("SET ")
} else {
cmdSet.WriteString(", ")
}
cmdSet.WriteString(param)
cmdSet.WriteString(" = ")
cmdSet.WriteString(val)
if cmdSet.Len() == 0 {
// Heuristic: 29 chars for each other key=value to reduce reallocations
cmdSet.Grow(4 + len(param) + 3 + len(val) + 30*(len(mc.cfg.Params)-1))
cmdSet.WriteString("SET ")
} else {
cmdSet.WriteString(", ")
}
cmdSet.WriteString(param)
cmdSet.WriteString(" = ")
cmdSet.WriteString(val)
}
if cmdSet.Len() > 0 {
err = mc.exec(cmdSet.String())
if err != nil {
return
}
}
return
}
// markBadConn replaces errBadConnNoWrite with driver.ErrBadConn.
// This function is used to return driver.ErrBadConn only when safe to retry.
func (mc *mysqlConn) markBadConn(err error) error {
if mc == nil {
return err
if err == errBadConnNoWrite {
return driver.ErrBadConn
}
if err != errBadConnNoWrite {
return err
}
return driver.ErrBadConn
return err
}
func (mc *mysqlConn) Begin() (driver.Tx, error) {
@ -114,7 +140,6 @@ func (mc *mysqlConn) Begin() (driver.Tx, error) {
func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) {
if mc.closed.Load() {
mc.log(ErrInvalidConn)
return nil, driver.ErrBadConn
}
var q string
@ -135,10 +160,14 @@ func (mc *mysqlConn) Close() (err error) {
if !mc.closed.Load() {
err = mc.writeCommandPacket(comQuit)
}
mc.close()
return
}
// close closes the network connection and clear results without sending COM_QUIT.
func (mc *mysqlConn) close() {
mc.cleanup()
mc.clearResult()
return
}
// Closes the network connection and unsets internal variables. Do not call this
@ -157,7 +186,7 @@ func (mc *mysqlConn) cleanup() {
return
}
if err := conn.Close(); err != nil {
mc.log(err)
mc.log("closing connection:", err)
}
// This function can be called from multiple goroutines.
// So we can not mc.clearResult() here.
@ -176,7 +205,6 @@ func (mc *mysqlConn) error() error {
func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
if mc.closed.Load() {
mc.log(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
@ -217,8 +245,10 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
buf, err := mc.buf.takeCompleteBuffer()
if err != nil {
// can not take the buffer. Something must be wrong with the connection
mc.log(err)
return "", ErrInvalidConn
mc.cleanup()
// interpolateParams would be called before sending any query.
// So its safe to retry.
return "", driver.ErrBadConn
}
buf = buf[:0]
argPos := 0
@ -309,7 +339,6 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
if mc.closed.Load() {
mc.log(ErrInvalidConn)
return nil, driver.ErrBadConn
}
if len(args) != 0 {
@ -369,7 +398,6 @@ func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error)
handleOk := mc.clearResult()
if mc.closed.Load() {
mc.log(ErrInvalidConn)
return nil, driver.ErrBadConn
}
if len(args) != 0 {
@ -385,31 +413,34 @@ func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error)
}
// Send command
err := mc.writeCommandPacketStr(comQuery, query)
if err == nil {
// Read Result
var resLen int
resLen, err = handleOk.readResultSetHeaderPacket()
if err == nil {
rows := new(textRows)
rows.mc = mc
if err != nil {
return nil, mc.markBadConn(err)
}
if resLen == 0 {
rows.rs.done = true
// Read Result
var resLen int
resLen, err = handleOk.readResultSetHeaderPacket()
if err != nil {
return nil, err
}
switch err := rows.NextResultSet(); err {
case nil, io.EOF:
return rows, nil
default:
return nil, err
}
}
rows := new(textRows)
rows.mc = mc
// Columns
rows.rs.columns, err = mc.readColumns(resLen)
return rows, err
if resLen == 0 {
rows.rs.done = true
switch err := rows.NextResultSet(); err {
case nil, io.EOF:
return rows, nil
default:
return nil, err
}
}
return nil, mc.markBadConn(err)
// Columns
rows.rs.columns, err = mc.readColumns(resLen)
return rows, err
}
// Gets the value of the given MySQL System Variable
@ -443,7 +474,7 @@ func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
return nil, err
}
// finish is called when the query has canceled.
// cancel is called when the query has canceled.
func (mc *mysqlConn) cancel(err error) {
mc.canceled.Set(err)
mc.cleanup()
@ -464,7 +495,6 @@ func (mc *mysqlConn) finish() {
// Ping implements driver.Pinger interface
func (mc *mysqlConn) Ping(ctx context.Context) (err error) {
if mc.closed.Load() {
mc.log(ErrInvalidConn)
return driver.ErrBadConn
}
@ -650,7 +680,7 @@ func (mc *mysqlConn) CheckNamedValue(nv *driver.NamedValue) (err error) {
// ResetSession implements driver.SessionResetter.
// (From Go 1.10)
func (mc *mysqlConn) ResetSession(ctx context.Context) error {
if mc.closed.Load() {
if mc.closed.Load() || mc.buf.busy() {
return driver.ErrBadConn
}
@ -684,5 +714,8 @@ func (mc *mysqlConn) ResetSession(ctx context.Context) error {
// IsValid implements driver.Validator interface
// (From Go 1.15)
func (mc *mysqlConn) IsValid() bool {
return !mc.closed.Load()
return !mc.closed.Load() && !mc.buf.busy()
}
var _ driver.SessionResetter = &mysqlConn{}
var _ driver.Validator = &mysqlConn{}

View file

@ -11,6 +11,7 @@ package mysql
import (
"context"
"database/sql/driver"
"fmt"
"net"
"os"
"strconv"
@ -87,20 +88,25 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
mc.parseTime = mc.cfg.ParseTime
// Connect to Server
dialsLock.RLock()
dial, ok := dials[mc.cfg.Net]
dialsLock.RUnlock()
if ok {
dctx := ctx
if mc.cfg.Timeout > 0 {
var cancel context.CancelFunc
dctx, cancel = context.WithTimeout(ctx, c.cfg.Timeout)
defer cancel()
}
mc.netConn, err = dial(dctx, mc.cfg.Addr)
dctx := ctx
if mc.cfg.Timeout > 0 {
var cancel context.CancelFunc
dctx, cancel = context.WithTimeout(ctx, c.cfg.Timeout)
defer cancel()
}
if c.cfg.DialFunc != nil {
mc.netConn, err = c.cfg.DialFunc(dctx, mc.cfg.Net, mc.cfg.Addr)
} else {
nd := net.Dialer{Timeout: mc.cfg.Timeout}
mc.netConn, err = nd.DialContext(ctx, mc.cfg.Net, mc.cfg.Addr)
dialsLock.RLock()
dial, ok := dials[mc.cfg.Net]
dialsLock.RUnlock()
if ok {
mc.netConn, err = dial(dctx, mc.cfg.Addr)
} else {
nd := net.Dialer{}
mc.netConn, err = nd.DialContext(dctx, mc.cfg.Net, mc.cfg.Addr)
}
}
if err != nil {
return nil, err
@ -122,11 +128,7 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
}
defer mc.finish()
mc.buf = newBuffer(mc.netConn)
// Set I/O timeouts
mc.buf.timeout = mc.cfg.ReadTimeout
mc.writeTimeout = mc.cfg.WriteTimeout
mc.buf = newBuffer()
// Reading Handshake Initialization Packet
authData, plugin, err := mc.readHandshakePacket()
@ -165,6 +167,10 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
return nil, err
}
if mc.cfg.compress && mc.flags&clientCompress == clientCompress {
mc.compress = true
mc.compIO = newCompIO(mc)
}
if mc.cfg.MaxAllowedPacket > 0 {
mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket
} else {
@ -174,12 +180,36 @@ func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
mc.Close()
return nil, err
}
mc.maxAllowedPacket = stringToInt(maxap) - 1
n, err := strconv.Atoi(string(maxap))
if err != nil {
mc.Close()
return nil, fmt.Errorf("invalid max_allowed_packet value (%q): %w", maxap, err)
}
mc.maxAllowedPacket = n - 1
}
if mc.maxAllowedPacket < maxPacketSize {
mc.maxWriteSize = mc.maxAllowedPacket
}
// Charset: character_set_connection, character_set_client, character_set_results
if len(mc.cfg.charsets) > 0 {
for _, cs := range mc.cfg.charsets {
// ignore errors here - a charset may not exist
if mc.cfg.Collation != "" {
err = mc.exec("SET NAMES " + cs + " COLLATE " + mc.cfg.Collation)
} else {
err = mc.exec("SET NAMES " + cs)
}
if err == nil {
break
}
}
if err != nil {
mc.Close()
return nil, err
}
}
// Handle DSN Params
err = mc.handleParams()
if err != nil {

View file

@ -11,6 +11,8 @@ package mysql
import "runtime"
const (
debug = false // for debugging. Set true only in development.
defaultAuthPlugin = "mysql_native_password"
defaultMaxAllowedPacket = 64 << 20 // 64 MiB. See https://github.com/go-sql-driver/mysql/issues/1355
minProtocolVersion = 10
@ -125,7 +127,10 @@ const (
fieldTypeBit
)
const (
fieldTypeJSON fieldType = iota + 0xf5
fieldTypeVector fieldType = iota + 0xf2
fieldTypeInvalid
fieldTypeBool
fieldTypeJSON
fieldTypeNewDecimal
fieldTypeEnum
fieldTypeSet

View file

@ -44,7 +44,8 @@ type Config struct {
DBName string // Database name
Params map[string]string // Connection parameters
ConnectionAttributes string // Connection Attributes, comma-delimited string of user-defined "key:value" pairs
Collation string // Connection collation
charsets []string // Connection charset. When set, this will be set in SET NAMES <charset> query
Collation string // Connection collation. When set, this will be set in SET NAMES <charset> COLLATE <collation> query
Loc *time.Location // Location for time.Time values
MaxAllowedPacket int // Max packet size allowed
ServerPubKey string // Server public key name
@ -54,6 +55,8 @@ type Config struct {
ReadTimeout time.Duration // I/O read timeout
WriteTimeout time.Duration // I/O write timeout
Logger Logger // Logger
// DialFunc specifies the dial function for creating connections
DialFunc func(ctx context.Context, network, addr string) (net.Conn, error)
// boolean fields
@ -70,7 +73,10 @@ type Config struct {
ParseTime bool // Parse time values to time.Time
RejectReadOnly bool // Reject read-only connections
// unexported fields. new options should be come here
// unexported fields. new options should be come here.
// boolean first. alphabetical order.
compress bool // Enable zlib compression
beforeConnect func(context.Context, *Config) error // Invoked before a connection is established
pubKey *rsa.PublicKey // Server public key
@ -90,7 +96,6 @@ func NewConfig() *Config {
AllowNativePasswords: true,
CheckConnLiveness: true,
}
return cfg
}
@ -122,6 +127,14 @@ func BeforeConnect(fn func(context.Context, *Config) error) Option {
}
}
// EnableCompress sets the compression mode.
func EnableCompression(yes bool) Option {
return func(cfg *Config) error {
cfg.compress = yes
return nil
}
}
func (cfg *Config) Clone() *Config {
cp := *cfg
if cp.TLS != nil {
@ -282,6 +295,10 @@ func (cfg *Config) FormatDSN() string {
writeDSNParam(&buf, &hasParam, "clientFoundRows", "true")
}
if charsets := cfg.charsets; len(charsets) > 0 {
writeDSNParam(&buf, &hasParam, "charset", strings.Join(charsets, ","))
}
if col := cfg.Collation; col != "" {
writeDSNParam(&buf, &hasParam, "collation", col)
}
@ -290,6 +307,10 @@ func (cfg *Config) FormatDSN() string {
writeDSNParam(&buf, &hasParam, "columnsWithAlias", "true")
}
if cfg.compress {
writeDSNParam(&buf, &hasParam, "compress", "true")
}
if cfg.InterpolateParams {
writeDSNParam(&buf, &hasParam, "interpolateParams", "true")
}
@ -501,6 +522,10 @@ func parseDSNParams(cfg *Config, params string) (err error) {
return errors.New("invalid bool value: " + value)
}
// charset
case "charset":
cfg.charsets = strings.Split(value, ",")
// Collation
case "collation":
cfg.Collation = value
@ -514,7 +539,11 @@ func parseDSNParams(cfg *Config, params string) (err error) {
// Compression
case "compress":
return errors.New("compression not implemented yet")
var isBool bool
cfg.compress, isBool = readBool(value)
if !isBool {
return errors.New("invalid bool value: " + value)
}
// Enable client side placeholder substitution
case "interpolateParams":

View file

@ -32,12 +32,12 @@ var (
// errBadConnNoWrite is used for connection errors where nothing was sent to the database yet.
// If this happens first in a function starting a database interaction, it should be replaced by driver.ErrBadConn
// to trigger a resend.
// to trigger a resend. Use mc.markBadConn(err) to do this.
// See https://github.com/go-sql-driver/mysql/pull/302
errBadConnNoWrite = errors.New("bad connection")
)
var defaultLogger = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
var defaultLogger = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime))
// Logger is used to log critical error messages.
type Logger interface {

View file

@ -112,6 +112,8 @@ func (mf *mysqlField) typeDatabaseName() string {
return "VARCHAR"
case fieldTypeYear:
return "YEAR"
case fieldTypeVector:
return "VECTOR"
default:
return ""
}
@ -198,7 +200,7 @@ func (mf *mysqlField) scanType() reflect.Type {
return scanTypeNullFloat
case fieldTypeBit, fieldTypeTinyBLOB, fieldTypeMediumBLOB, fieldTypeLongBLOB,
fieldTypeBLOB, fieldTypeVarString, fieldTypeString, fieldTypeGeometry:
fieldTypeBLOB, fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeVector:
if mf.charSet == binaryCollationID {
return scanTypeBytes
}

View file

@ -17,7 +17,7 @@ import (
)
var (
fileRegister map[string]bool
fileRegister map[string]struct{}
fileRegisterLock sync.RWMutex
readerRegister map[string]func() io.Reader
readerRegisterLock sync.RWMutex
@ -37,10 +37,10 @@ func RegisterLocalFile(filePath string) {
fileRegisterLock.Lock()
// lazy map init
if fileRegister == nil {
fileRegister = make(map[string]bool)
fileRegister = make(map[string]struct{})
}
fileRegister[strings.Trim(filePath, `"`)] = true
fileRegister[strings.Trim(filePath, `"`)] = struct{}{}
fileRegisterLock.Unlock()
}
@ -95,7 +95,6 @@ const defaultPacketSize = 16 * 1024 // 16KB is small enough for disk readahead a
func (mc *okHandler) handleInFileRequest(name string) (err error) {
var rdr io.Reader
var data []byte
packetSize := defaultPacketSize
if mc.maxWriteSize < packetSize {
packetSize = mc.maxWriteSize
@ -124,9 +123,9 @@ func (mc *okHandler) handleInFileRequest(name string) (err error) {
} else { // File
name = strings.Trim(name, `"`)
fileRegisterLock.RLock()
fr := fileRegister[name]
_, exists := fileRegister[name]
fileRegisterLock.RUnlock()
if mc.cfg.AllowAllFiles || fr {
if mc.cfg.AllowAllFiles || exists {
var file *os.File
var fi os.FileInfo
@ -147,9 +146,11 @@ func (mc *okHandler) handleInFileRequest(name string) (err error) {
}
// send content packets
var data []byte
// if packetSize == 0, the Reader contains no data
if err == nil && packetSize > 0 {
data := make([]byte, 4+packetSize)
data = make([]byte, 4+packetSize)
var n int
for err == nil {
n, err = rdr.Read(data[4:])
@ -171,6 +172,7 @@ func (mc *okHandler) handleInFileRequest(name string) (err error) {
if ioErr := mc.conn().writePacket(data[:4]); ioErr != nil {
return ioErr
}
mc.conn().syncSequence()
// read OK packet
if err == nil {

View file

@ -21,36 +21,56 @@ import (
"time"
)
// Packets documentation:
// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
// MySQL client/server protocol documentations.
// https://dev.mysql.com/doc/dev/mysql-server/latest/PAGE_PROTOCOL.html
// https://mariadb.com/kb/en/clientserver-protocol/
// Read packet to buffer 'data'
func (mc *mysqlConn) readPacket() ([]byte, error) {
var prevData []byte
invalidSequence := false
readNext := mc.buf.readNext
if mc.compress {
readNext = mc.compIO.readNext
}
for {
// read packet header
data, err := mc.buf.readNext(4)
data, err := readNext(4, mc.readWithTimeout)
if err != nil {
mc.close()
if cerr := mc.canceled.Value(); cerr != nil {
return nil, cerr
}
mc.log(err)
mc.Close()
return nil, ErrInvalidConn
}
// packet length [24 bit]
pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16)
pktLen := getUint24(data[:3])
seq := data[3]
// check packet sync [8 bit]
if data[3] != mc.sequence {
mc.Close()
if data[3] > mc.sequence {
return nil, ErrPktSyncMul
if mc.compress {
// MySQL and MariaDB doesn't check packet nr in compressed packet.
if debug && seq != mc.compressSequence {
fmt.Printf("[debug] mismatched compression sequence nr: expected: %v, got %v",
mc.compressSequence, seq)
}
return nil, ErrPktSync
mc.compressSequence = seq + 1
} else {
// check packet sync [8 bit]
if seq != mc.sequence {
mc.log(fmt.Sprintf("[warn] unexpected seq nr: expected %v, got %v", mc.sequence, seq))
// For large packets, we stop reading as soon as sync error.
if len(prevData) > 0 {
mc.close()
return nil, ErrPktSyncMul
}
invalidSequence = true
}
mc.sequence++
}
mc.sequence++
// packets with length 0 terminate a previous packet which is a
// multiple of (2^24)-1 bytes long
@ -58,32 +78,38 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
// there was no previous packet
if prevData == nil {
mc.log(ErrMalformPkt)
mc.Close()
mc.close()
return nil, ErrInvalidConn
}
return prevData, nil
}
// read packet body [pktLen bytes]
data, err = mc.buf.readNext(pktLen)
data, err = readNext(pktLen, mc.readWithTimeout)
if err != nil {
mc.close()
if cerr := mc.canceled.Value(); cerr != nil {
return nil, cerr
}
mc.log(err)
mc.Close()
return nil, ErrInvalidConn
}
// return data if this was the last packet
if pktLen < maxPacketSize {
// zero allocations for non-split packets
if prevData == nil {
return data, nil
if prevData != nil {
data = append(prevData, data...)
}
return append(prevData, data...), nil
if invalidSequence {
mc.close()
// return sync error only for regular packet.
// error packets may have wrong sequence number.
if data[0] != iERR {
return nil, ErrPktSync
}
}
return data, nil
}
prevData = append(prevData, data...)
@ -93,60 +119,52 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
// Write packet buffer 'data'
func (mc *mysqlConn) writePacket(data []byte) error {
pktLen := len(data) - 4
if pktLen > mc.maxAllowedPacket {
return ErrPktTooLarge
}
writeFunc := mc.writeWithTimeout
if mc.compress {
writeFunc = mc.compIO.writePackets
}
for {
var size int
if pktLen >= maxPacketSize {
data[0] = 0xff
data[1] = 0xff
data[2] = 0xff
size = maxPacketSize
} else {
data[0] = byte(pktLen)
data[1] = byte(pktLen >> 8)
data[2] = byte(pktLen >> 16)
size = pktLen
}
size := min(maxPacketSize, pktLen)
putUint24(data[:3], size)
data[3] = mc.sequence
// Write packet
if mc.writeTimeout > 0 {
if err := mc.netConn.SetWriteDeadline(time.Now().Add(mc.writeTimeout)); err != nil {
return err
}
if debug {
fmt.Printf("writePacket: size=%v seq=%v", size, mc.sequence)
}
n, err := mc.netConn.Write(data[:4+size])
if err == nil && n == 4+size {
mc.sequence++
if size != maxPacketSize {
return nil
}
pktLen -= size
data = data[size:]
continue
}
// Handle error
if err == nil { // n != len(data)
n, err := writeFunc(data[:4+size])
if err != nil {
mc.cleanup()
mc.log(ErrMalformPkt)
} else {
if cerr := mc.canceled.Value(); cerr != nil {
return cerr
}
if n == 0 && pktLen == len(data)-4 {
// only for the first loop iteration when nothing was written yet
mc.log(err)
return errBadConnNoWrite
} else {
return err
}
mc.cleanup()
mc.log(err)
}
return ErrInvalidConn
if n != 4+size {
// io.Writer(b) must return a non-nil error if it cannot write len(b) bytes.
// The io.ErrShortWrite error is used to indicate that this rule has not been followed.
mc.cleanup()
return io.ErrShortWrite
}
mc.sequence++
if size != maxPacketSize {
return nil
}
pktLen -= size
data = data[size:]
}
}
@ -159,11 +177,6 @@ func (mc *mysqlConn) writePacket(data []byte) error {
func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err error) {
data, err = mc.readPacket()
if err != nil {
// for init we can rewrite this to ErrBadConn for sql.Driver to retry, since
// in connection initialization we don't risk retrying non-idempotent actions.
if err == ErrInvalidConn {
return nil, "", driver.ErrBadConn
}
return
}
@ -207,10 +220,13 @@ func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err erro
if len(data) > pos {
// character set [1 byte]
// status flags [2 bytes]
pos += 3
// capability flags (upper 2 bytes) [2 bytes]
mc.flags |= clientFlag(binary.LittleEndian.Uint16(data[pos:pos+2])) << 16
pos += 2
// length of auth-plugin-data [1 byte]
// reserved (all [00]) [10 bytes]
pos += 1 + 2 + 2 + 1 + 10
pos += 11
// second part of the password cipher [minimum 13 bytes],
// where len=MAX(13, length of auth-plugin-data - 8)
@ -258,13 +274,17 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
clientLocalFiles |
clientPluginAuth |
clientMultiResults |
clientConnectAttrs |
mc.flags&clientConnectAttrs |
mc.flags&clientLongFlag
sendConnectAttrs := mc.flags&clientConnectAttrs != 0
if mc.cfg.ClientFoundRows {
clientFlags |= clientFoundRows
}
if mc.cfg.compress && mc.flags&clientCompress == clientCompress {
clientFlags |= clientCompress
}
// To enable TLS / SSL
if mc.cfg.TLS != nil {
clientFlags |= clientSSL
@ -293,43 +313,37 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
}
// encode length of the connection attributes
var connAttrsLEIBuf [9]byte
connAttrsLen := len(mc.connector.encodedAttributes)
connAttrsLEI := appendLengthEncodedInteger(connAttrsLEIBuf[:0], uint64(connAttrsLen))
pktLen += len(connAttrsLEI) + len(mc.connector.encodedAttributes)
var connAttrsLEI []byte
if sendConnectAttrs {
var connAttrsLEIBuf [9]byte
connAttrsLen := len(mc.connector.encodedAttributes)
connAttrsLEI = appendLengthEncodedInteger(connAttrsLEIBuf[:0], uint64(connAttrsLen))
pktLen += len(connAttrsLEI) + len(mc.connector.encodedAttributes)
}
// Calculate packet length and get buffer with that size
data, err := mc.buf.takeBuffer(pktLen + 4)
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
mc.log(err)
return errBadConnNoWrite
mc.cleanup()
return err
}
// ClientFlags [32 bit]
data[4] = byte(clientFlags)
data[5] = byte(clientFlags >> 8)
data[6] = byte(clientFlags >> 16)
data[7] = byte(clientFlags >> 24)
binary.LittleEndian.PutUint32(data[4:], uint32(clientFlags))
// MaxPacketSize [32 bit] (none)
data[8] = 0x00
data[9] = 0x00
data[10] = 0x00
data[11] = 0x00
binary.LittleEndian.PutUint32(data[8:], 0)
// Collation ID [1 byte]
cname := mc.cfg.Collation
if cname == "" {
cname = defaultCollation
}
var found bool
data[12], found = collations[cname]
if !found {
// Note possibility for false negatives:
// could be triggered although the collation is valid if the
// collations map does not contain entries the server supports.
return fmt.Errorf("unknown collation: %q", cname)
data[12] = defaultCollationID
if cname := mc.cfg.Collation; cname != "" {
colID, ok := collations[cname]
if ok {
data[12] = colID
} else if len(mc.cfg.charsets) > 0 {
// When cfg.charset is set, the collation is set by `SET NAMES <charset> COLLATE <collation>`.
return fmt.Errorf("unknown collation: %q", cname)
}
}
// Filler [23 bytes] (all 0x00)
@ -349,10 +363,12 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
// Switch to TLS
tlsConn := tls.Client(mc.netConn, mc.cfg.TLS)
if err := tlsConn.Handshake(); err != nil {
if cerr := mc.canceled.Value(); cerr != nil {
return cerr
}
return err
}
mc.netConn = tlsConn
mc.buf.nc = tlsConn
}
// User [null terminated string]
@ -378,8 +394,10 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
pos++
// Connection Attributes
pos += copy(data[pos:], connAttrsLEI)
pos += copy(data[pos:], []byte(mc.connector.encodedAttributes))
if sendConnectAttrs {
pos += copy(data[pos:], connAttrsLEI)
pos += copy(data[pos:], []byte(mc.connector.encodedAttributes))
}
// Send Auth packet
return mc.writePacket(data[:pos])
@ -388,11 +406,10 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error {
pktLen := 4 + len(authData)
data, err := mc.buf.takeSmallBuffer(pktLen)
data, err := mc.buf.takeBuffer(pktLen)
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
mc.log(err)
return errBadConnNoWrite
mc.cleanup()
return err
}
// Add the auth data [EOF]
@ -406,13 +423,11 @@ func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error {
func (mc *mysqlConn) writeCommandPacket(command byte) error {
// Reset Packet Sequence
mc.sequence = 0
mc.resetSequence()
data, err := mc.buf.takeSmallBuffer(4 + 1)
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
mc.log(err)
return errBadConnNoWrite
return err
}
// Add command byte
@ -424,14 +439,12 @@ func (mc *mysqlConn) writeCommandPacket(command byte) error {
func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error {
// Reset Packet Sequence
mc.sequence = 0
mc.resetSequence()
pktLen := 1 + len(arg)
data, err := mc.buf.takeBuffer(pktLen + 4)
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
mc.log(err)
return errBadConnNoWrite
return err
}
// Add command byte
@ -441,28 +454,25 @@ func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error {
copy(data[5:], arg)
// Send CMD packet
return mc.writePacket(data)
err = mc.writePacket(data)
mc.syncSequence()
return err
}
func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
// Reset Packet Sequence
mc.sequence = 0
mc.resetSequence()
data, err := mc.buf.takeSmallBuffer(4 + 1 + 4)
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
mc.log(err)
return errBadConnNoWrite
return err
}
// Add command byte
data[4] = command
// Add arg [32 bit]
data[5] = byte(arg)
data[6] = byte(arg >> 8)
data[7] = byte(arg >> 16)
data[8] = byte(arg >> 24)
binary.LittleEndian.PutUint32(data[5:], arg)
// Send CMD packet
return mc.writePacket(data)
@ -500,6 +510,9 @@ func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
}
plugin := string(data[1:pluginEndIndex])
authData := data[pluginEndIndex+1:]
if len(authData) > 0 && authData[len(authData)-1] == 0 {
authData = authData[:len(authData)-1]
}
return authData, plugin, nil
default: // Error otherwise
@ -521,32 +534,33 @@ func (mc *okHandler) readResultOK() error {
}
// Result Set Header Packet
// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset
// https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_com_query_response.html
func (mc *okHandler) readResultSetHeaderPacket() (int, error) {
// handleOkPacket replaces both values; other cases leave the values unchanged.
mc.result.affectedRows = append(mc.result.affectedRows, 0)
mc.result.insertIds = append(mc.result.insertIds, 0)
data, err := mc.conn().readPacket()
if err == nil {
switch data[0] {
case iOK:
return 0, mc.handleOkPacket(data)
case iERR:
return 0, mc.conn().handleErrorPacket(data)
case iLocalInFile:
return 0, mc.handleInFileRequest(string(data[1:]))
}
// column count
num, _, _ := readLengthEncodedInteger(data)
// ignore remaining data in the packet. see #1478.
return int(num), nil
if err != nil {
return 0, err
}
return 0, err
switch data[0] {
case iOK:
return 0, mc.handleOkPacket(data)
case iERR:
return 0, mc.conn().handleErrorPacket(data)
case iLocalInFile:
return 0, mc.handleInFileRequest(string(data[1:]))
}
// column count
// https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_com_query_response_text_resultset.html
num, _, _ := readLengthEncodedInteger(data)
// ignore remaining data in the packet. see #1478.
return int(num), nil
}
// Error Packet
@ -563,7 +577,8 @@ func (mc *mysqlConn) handleErrorPacket(data []byte) error {
// 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION
// 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover)
if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly {
// 1836: ER_READ_ONLY_MODE
if (errno == 1792 || errno == 1290 || errno == 1836) && mc.cfg.RejectReadOnly {
// Oops; we are connected to a read-only connection, and won't be able
// to issue any write statements. Since RejectReadOnly is configured,
// we throw away this connection hoping this one would have write
@ -930,19 +945,15 @@ func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error {
pktLen = dataOffset + argLen
}
stmt.mc.sequence = 0
stmt.mc.resetSequence()
// Add command byte [1 byte]
data[4] = comStmtSendLongData
// Add stmtID [32 bit]
data[5] = byte(stmt.id)
data[6] = byte(stmt.id >> 8)
data[7] = byte(stmt.id >> 16)
data[8] = byte(stmt.id >> 24)
binary.LittleEndian.PutUint32(data[5:], stmt.id)
// Add paramID [16 bit]
data[9] = byte(paramID)
data[10] = byte(paramID >> 8)
binary.LittleEndian.PutUint16(data[9:], uint16(paramID))
// Send CMD packet
err := stmt.mc.writePacket(data[:4+pktLen])
@ -951,11 +962,10 @@ func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error {
continue
}
return err
}
// Reset Packet Sequence
stmt.mc.sequence = 0
stmt.mc.resetSequence()
return nil
}
@ -980,7 +990,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
}
// Reset packet-sequence
mc.sequence = 0
mc.resetSequence()
var data []byte
var err error
@ -992,28 +1002,20 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
// In this case the len(data) == cap(data) which is used to optimise the flow below.
}
if err != nil {
// cannot take the buffer. Something must be wrong with the connection
mc.log(err)
return errBadConnNoWrite
return err
}
// command [1 byte]
data[4] = comStmtExecute
// statement_id [4 bytes]
data[5] = byte(stmt.id)
data[6] = byte(stmt.id >> 8)
data[7] = byte(stmt.id >> 16)
data[8] = byte(stmt.id >> 24)
binary.LittleEndian.PutUint32(data[5:], stmt.id)
// flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte]
data[9] = 0x00
// iteration_count (uint32(1)) [4 bytes]
data[10] = 0x01
data[11] = 0x00
data[12] = 0x00
data[13] = 0x00
binary.LittleEndian.PutUint32(data[10:], 1)
if len(args) > 0 {
pos := minPktLen
@ -1067,50 +1069,17 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
case int64:
paramTypes[i+i] = byte(fieldTypeLongLong)
paramTypes[i+i+1] = 0x00
if cap(paramValues)-len(paramValues)-8 >= 0 {
paramValues = paramValues[:len(paramValues)+8]
binary.LittleEndian.PutUint64(
paramValues[len(paramValues)-8:],
uint64(v),
)
} else {
paramValues = append(paramValues,
uint64ToBytes(uint64(v))...,
)
}
paramValues = binary.LittleEndian.AppendUint64(paramValues, uint64(v))
case uint64:
paramTypes[i+i] = byte(fieldTypeLongLong)
paramTypes[i+i+1] = 0x80 // type is unsigned
if cap(paramValues)-len(paramValues)-8 >= 0 {
paramValues = paramValues[:len(paramValues)+8]
binary.LittleEndian.PutUint64(
paramValues[len(paramValues)-8:],
uint64(v),
)
} else {
paramValues = append(paramValues,
uint64ToBytes(uint64(v))...,
)
}
paramValues = binary.LittleEndian.AppendUint64(paramValues, uint64(v))
case float64:
paramTypes[i+i] = byte(fieldTypeDouble)
paramTypes[i+i+1] = 0x00
if cap(paramValues)-len(paramValues)-8 >= 0 {
paramValues = paramValues[:len(paramValues)+8]
binary.LittleEndian.PutUint64(
paramValues[len(paramValues)-8:],
math.Float64bits(v),
)
} else {
paramValues = append(paramValues,
uint64ToBytes(math.Float64bits(v))...,
)
}
paramValues = binary.LittleEndian.AppendUint64(paramValues, math.Float64bits(v))
case bool:
paramTypes[i+i] = byte(fieldTypeTiny)
@ -1191,17 +1160,16 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
// In that case we must build the data packet with the new values buffer
if valuesCap != cap(paramValues) {
data = append(data[:pos], paramValues...)
if err = mc.buf.store(data); err != nil {
mc.log(err)
return errBadConnNoWrite
}
mc.buf.store(data) // allow this buffer to be reused
}
pos += len(paramValues)
data = data[:pos]
}
return mc.writePacket(data)
err = mc.writePacket(data)
mc.syncSequence()
return err
}
// For each remaining resultset in the stream, discards its rows and updates
@ -1325,7 +1293,8 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON:
fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON,
fieldTypeVector:
var isNull bool
var n int
dest[i], isNull, n, err = readLengthEncodedString(data[pos:])

View file

@ -111,13 +111,6 @@ func (rows *mysqlRows) Close() (err error) {
return err
}
// flip the buffer for this connection if we need to drain it.
// note that for a successful query (i.e. one where rows.next()
// has been called until it returns false), `rows.mc` will be nil
// by the time the user calls `(*Rows).Close`, so we won't reach this
// see: https://github.com/golang/go/commit/651ddbdb5056ded455f47f9c494c67b389622a47
mc.buf.flip()
// Remove unread packets from stream
if !rows.rs.done {
err = mc.readUntilEOF()

View file

@ -24,11 +24,12 @@ type mysqlStmt struct {
func (stmt *mysqlStmt) Close() error {
if stmt.mc == nil || stmt.mc.closed.Load() {
// driver.Stmt.Close can be called more than once, thus this function
// has to be idempotent.
// See also Issue #450 and golang/go#16019.
//errLog.Print(ErrInvalidConn)
return driver.ErrBadConn
// driver.Stmt.Close could be called more than once, thus this function
// had to be idempotent. See also Issue #450 and golang/go#16019.
// This bug has been fixed in Go 1.8.
// https://github.com/golang/go/commit/90b8a0ca2d0b565c7c7199ffcf77b15ea6b6db3a
// But we keep this function idempotent because it is safer.
return nil
}
err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id)
@ -51,7 +52,6 @@ func (stmt *mysqlStmt) CheckNamedValue(nv *driver.NamedValue) (err error) {
func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
if stmt.mc.closed.Load() {
stmt.mc.log(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
@ -95,7 +95,6 @@ func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
if stmt.mc.closed.Load() {
stmt.mc.log(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command

View file

@ -490,17 +490,16 @@ func formatBinaryTime(src []byte, length uint8) (driver.Value, error) {
* Convert from and to bytes *
******************************************************************************/
func uint64ToBytes(n uint64) []byte {
return []byte{
byte(n),
byte(n >> 8),
byte(n >> 16),
byte(n >> 24),
byte(n >> 32),
byte(n >> 40),
byte(n >> 48),
byte(n >> 56),
}
// 24bit integer: used for packet headers.
func putUint24(data []byte, n int) {
data[2] = byte(n >> 16)
data[1] = byte(n >> 8)
data[0] = byte(n)
}
func getUint24(data []byte) int {
return int(data[2])<<16 | int(data[1])<<8 | int(data[0])
}
func uint64ToString(n uint64) []byte {
@ -525,16 +524,6 @@ func uint64ToString(n uint64) []byte {
return a[i:]
}
// treats string value as unsigned integer representation
func stringToInt(b []byte) int {
val := 0
for i := range b {
val *= 10
val += int(b[i] - 0x30)
}
return val
}
// returns the string read as a bytes slice, whether the value is NULL,
// the number of bytes read and an error, in case the string is longer than
// the input slice
@ -586,18 +575,15 @@ func readLengthEncodedInteger(b []byte) (uint64, bool, int) {
// 252: value of following 2
case 0xfc:
return uint64(b[1]) | uint64(b[2])<<8, false, 3
return uint64(binary.LittleEndian.Uint16(b[1:])), false, 3
// 253: value of following 3
case 0xfd:
return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4
return uint64(getUint24(b[1:])), false, 4
// 254: value of following 8
case 0xfe:
return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 |
uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 |
uint64(b[7])<<48 | uint64(b[8])<<56,
false, 9
return uint64(binary.LittleEndian.Uint64(b[1:])), false, 9
}
// 0-250: value of first byte
@ -611,13 +597,14 @@ func appendLengthEncodedInteger(b []byte, n uint64) []byte {
return append(b, byte(n))
case n <= 0xffff:
return append(b, 0xfc, byte(n), byte(n>>8))
b = append(b, 0xfc)
return binary.LittleEndian.AppendUint16(b, uint16(n))
case n <= 0xffffff:
return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16))
}
return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24),
byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56))
b = append(b, 0xfe)
return binary.LittleEndian.AppendUint64(b, n)
}
func appendLengthEncodedString(b []byte, s string) []byte {

View file

@ -1,115 +0,0 @@
// Copyright 2023 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package github
import (
"context"
"fmt"
)
// GetAllOrganizationRulesets gets all the rulesets for the specified organization.
//
// GitHub API docs: https://docs.github.com/rest/orgs/rules#get-all-organization-repository-rulesets
//
//meta:operation GET /orgs/{org}/rulesets
func (s *OrganizationsService) GetAllOrganizationRulesets(ctx context.Context, org string) ([]*Ruleset, *Response, error) {
u := fmt.Sprintf("orgs/%v/rulesets", org)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var rulesets []*Ruleset
resp, err := s.client.Do(ctx, req, &rulesets)
if err != nil {
return nil, resp, err
}
return rulesets, resp, nil
}
// CreateOrganizationRuleset creates a ruleset for the specified organization.
//
// GitHub API docs: https://docs.github.com/rest/orgs/rules#create-an-organization-repository-ruleset
//
//meta:operation POST /orgs/{org}/rulesets
func (s *OrganizationsService) CreateOrganizationRuleset(ctx context.Context, org string, rs *Ruleset) (*Ruleset, *Response, error) {
u := fmt.Sprintf("orgs/%v/rulesets", org)
req, err := s.client.NewRequest("POST", u, rs)
if err != nil {
return nil, nil, err
}
var ruleset *Ruleset
resp, err := s.client.Do(ctx, req, &ruleset)
if err != nil {
return nil, resp, err
}
return ruleset, resp, nil
}
// GetOrganizationRuleset gets a ruleset from the specified organization.
//
// GitHub API docs: https://docs.github.com/rest/orgs/rules#get-an-organization-repository-ruleset
//
//meta:operation GET /orgs/{org}/rulesets/{ruleset_id}
func (s *OrganizationsService) GetOrganizationRuleset(ctx context.Context, org string, rulesetID int64) (*Ruleset, *Response, error) {
u := fmt.Sprintf("orgs/%v/rulesets/%v", org, rulesetID)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var ruleset *Ruleset
resp, err := s.client.Do(ctx, req, &ruleset)
if err != nil {
return nil, resp, err
}
return ruleset, resp, nil
}
// UpdateOrganizationRuleset updates a ruleset from the specified organization.
//
// GitHub API docs: https://docs.github.com/rest/orgs/rules#update-an-organization-repository-ruleset
//
//meta:operation PUT /orgs/{org}/rulesets/{ruleset_id}
func (s *OrganizationsService) UpdateOrganizationRuleset(ctx context.Context, org string, rulesetID int64, rs *Ruleset) (*Ruleset, *Response, error) {
u := fmt.Sprintf("orgs/%v/rulesets/%v", org, rulesetID)
req, err := s.client.NewRequest("PUT", u, rs)
if err != nil {
return nil, nil, err
}
var ruleset *Ruleset
resp, err := s.client.Do(ctx, req, &ruleset)
if err != nil {
return nil, resp, err
}
return ruleset, resp, nil
}
// DeleteOrganizationRuleset deletes a ruleset from the specified organization.
//
// GitHub API docs: https://docs.github.com/rest/orgs/rules#delete-an-organization-repository-ruleset
//
//meta:operation DELETE /orgs/{org}/rulesets/{ruleset_id}
func (s *OrganizationsService) DeleteOrganizationRuleset(ctx context.Context, org string, rulesetID int64) (*Response, error) {
u := fmt.Sprintf("orgs/%v/rulesets/%v", org, rulesetID)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
return s.client.Do(ctx, req, nil)
}

View file

@ -1,995 +0,0 @@
// Copyright 2023 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package github
import (
"context"
"encoding/json"
"fmt"
)
// BypassActor represents the bypass actors from a ruleset.
type BypassActor struct {
ActorID *int64 `json:"actor_id,omitempty"`
// Possible values for ActorType are: RepositoryRole, Team, Integration, OrganizationAdmin
ActorType *string `json:"actor_type,omitempty"`
// Possible values for BypassMode are: always, pull_request
BypassMode *string `json:"bypass_mode,omitempty"`
}
// RulesetLink represents a single link object from GitHub ruleset request _links.
type RulesetLink struct {
HRef *string `json:"href,omitempty"`
}
// RulesetLinks represents the "_links" object in a Ruleset.
type RulesetLinks struct {
Self *RulesetLink `json:"self,omitempty"`
}
// RulesetRefConditionParameters represents the conditions object for ref_names.
type RulesetRefConditionParameters struct {
Include []string `json:"include"`
Exclude []string `json:"exclude"`
}
// RulesetRepositoryNamesConditionParameters represents the conditions object for repository_names.
type RulesetRepositoryNamesConditionParameters struct {
Include []string `json:"include"`
Exclude []string `json:"exclude"`
Protected *bool `json:"protected,omitempty"`
}
// RulesetRepositoryIDsConditionParameters represents the conditions object for repository_ids.
type RulesetRepositoryIDsConditionParameters struct {
RepositoryIDs []int64 `json:"repository_ids,omitempty"`
}
// RulesetRepositoryPropertyTargetParameters represents a repository_property name and values to be used for targeting.
type RulesetRepositoryPropertyTargetParameters struct {
Name string `json:"name"`
Values []string `json:"property_values"`
Source *string `json:"source,omitempty"`
}
// RulesetRepositoryPropertyConditionParameters represents the conditions object for repository_property.
type RulesetRepositoryPropertyConditionParameters struct {
Include []RulesetRepositoryPropertyTargetParameters `json:"include"`
Exclude []RulesetRepositoryPropertyTargetParameters `json:"exclude"`
}
// RulesetConditions represents the conditions object in a ruleset.
// Set either RepositoryName or RepositoryID or RepositoryProperty, not more than one.
type RulesetConditions struct {
RefName *RulesetRefConditionParameters `json:"ref_name,omitempty"`
RepositoryName *RulesetRepositoryNamesConditionParameters `json:"repository_name,omitempty"`
RepositoryID *RulesetRepositoryIDsConditionParameters `json:"repository_id,omitempty"`
RepositoryProperty *RulesetRepositoryPropertyConditionParameters `json:"repository_property,omitempty"`
}
// RulePatternParameters represents the rule pattern parameters.
type RulePatternParameters struct {
Name *string `json:"name,omitempty"`
// If Negate is true, the rule will fail if the pattern matches.
Negate *bool `json:"negate,omitempty"`
// Possible values for Operator are: starts_with, ends_with, contains, regex
Operator string `json:"operator"`
Pattern string `json:"pattern"`
}
// RuleFileParameters represents a list of file paths.
type RuleFileParameters struct {
RestrictedFilePaths *[]string `json:"restricted_file_paths"`
}
// RuleMaxFilePathLengthParameters represents the max_file_path_length rule parameters.
type RuleMaxFilePathLengthParameters struct {
MaxFilePathLength int `json:"max_file_path_length"`
}
// RuleFileExtensionRestrictionParameters represents the file_extension_restriction rule parameters.
type RuleFileExtensionRestrictionParameters struct {
RestrictedFileExtensions []string `json:"restricted_file_extensions"`
}
// RuleMaxFileSizeParameters represents the max_file_size rule parameters.
type RuleMaxFileSizeParameters struct {
MaxFileSize int64 `json:"max_file_size"`
}
// UpdateAllowsFetchAndMergeRuleParameters represents the update rule parameters.
type UpdateAllowsFetchAndMergeRuleParameters struct {
UpdateAllowsFetchAndMerge bool `json:"update_allows_fetch_and_merge"`
}
// RequiredDeploymentEnvironmentsRuleParameters represents the required_deployments rule parameters.
type RequiredDeploymentEnvironmentsRuleParameters struct {
RequiredDeploymentEnvironments []string `json:"required_deployment_environments"`
}
// PullRequestRuleParameters represents the pull_request rule parameters.
type PullRequestRuleParameters struct {
DismissStaleReviewsOnPush bool `json:"dismiss_stale_reviews_on_push"`
RequireCodeOwnerReview bool `json:"require_code_owner_review"`
RequireLastPushApproval bool `json:"require_last_push_approval"`
RequiredApprovingReviewCount int `json:"required_approving_review_count"`
RequiredReviewThreadResolution bool `json:"required_review_thread_resolution"`
}
// RuleRequiredStatusChecks represents the RequiredStatusChecks for the RequiredStatusChecksRuleParameters object.
type RuleRequiredStatusChecks struct {
Context string `json:"context"`
IntegrationID *int64 `json:"integration_id,omitempty"`
}
// MergeQueueRuleParameters represents the merge_queue rule parameters.
type MergeQueueRuleParameters struct {
CheckResponseTimeoutMinutes int `json:"check_response_timeout_minutes"`
// Possible values for GroupingStrategy are: ALLGREEN, HEADGREEN
GroupingStrategy string `json:"grouping_strategy"`
MaxEntriesToBuild int `json:"max_entries_to_build"`
MaxEntriesToMerge int `json:"max_entries_to_merge"`
// Possible values for MergeMethod are: MERGE, SQUASH, REBASE
MergeMethod string `json:"merge_method"`
MinEntriesToMerge int `json:"min_entries_to_merge"`
MinEntriesToMergeWaitMinutes int `json:"min_entries_to_merge_wait_minutes"`
}
// RequiredStatusChecksRuleParameters represents the required_status_checks rule parameters.
type RequiredStatusChecksRuleParameters struct {
DoNotEnforceOnCreate *bool `json:"do_not_enforce_on_create,omitempty"`
RequiredStatusChecks []RuleRequiredStatusChecks `json:"required_status_checks"`
StrictRequiredStatusChecksPolicy bool `json:"strict_required_status_checks_policy"`
}
// RuleRequiredWorkflow represents the Workflow for the RequireWorkflowsRuleParameters object.
type RuleRequiredWorkflow struct {
Path string `json:"path"`
Ref *string `json:"ref,omitempty"`
RepositoryID *int64 `json:"repository_id,omitempty"`
Sha *string `json:"sha,omitempty"`
}
// RequiredWorkflowsRuleParameters represents the workflows rule parameters.
type RequiredWorkflowsRuleParameters struct {
DoNotEnforceOnCreate bool `json:"do_not_enforce_on_create,omitempty"`
RequiredWorkflows []*RuleRequiredWorkflow `json:"workflows"`
}
// RuleRequiredCodeScanningTool represents a single required code-scanning tool for the RequiredCodeScanningParameters object.
type RuleRequiredCodeScanningTool struct {
AlertsThreshold string `json:"alerts_threshold"`
SecurityAlertsThreshold string `json:"security_alerts_threshold"`
Tool string `json:"tool"`
}
// RequiredCodeScanningRuleParameters represents the code_scanning rule parameters.
type RequiredCodeScanningRuleParameters struct {
RequiredCodeScanningTools []*RuleRequiredCodeScanningTool `json:"code_scanning_tools"`
}
// RepositoryRule represents a GitHub Rule.
type RepositoryRule struct {
Type string `json:"type"`
Parameters *json.RawMessage `json:"parameters,omitempty"`
RulesetSourceType string `json:"ruleset_source_type"`
RulesetSource string `json:"ruleset_source"`
RulesetID int64 `json:"ruleset_id"`
}
// RepositoryRulesetEditedChanges represents the changes made to a repository ruleset.
type RepositoryRulesetEditedChanges struct {
Name *RepositoryRulesetEditedSource `json:"name,omitempty"`
Enforcement *RepositoryRulesetEditedSource `json:"enforcement,omitempty"`
Conditions *RepositoryRulesetEditedConditions `json:"conditions,omitempty"`
Rules *RepositoryRulesetEditedRules `json:"rules,omitempty"`
}
// RepositoryRulesetEditedSource represents a source change for the ruleset.
type RepositoryRulesetEditedSource struct {
From *string `json:"from,omitempty"`
}
// RepositoryRulesetEditedSources represents multiple source changes for the ruleset.
type RepositoryRulesetEditedSources struct {
From []string `json:"from,omitempty"`
}
// RepositoryRulesetEditedConditions holds changes to conditions in a ruleset.
type RepositoryRulesetEditedConditions struct {
Added []*RepositoryRulesetRefCondition `json:"added,omitempty"`
Deleted []*RepositoryRulesetRefCondition `json:"deleted,omitempty"`
Updated []*RepositoryRulesetEditedUpdatedConditions `json:"updated,omitempty"`
}
// RepositoryRulesetEditedRules holds changes to rules in a ruleset.
type RepositoryRulesetEditedRules struct {
Added []*RepositoryRulesetRule `json:"added,omitempty"`
Deleted []*RepositoryRulesetRule `json:"deleted,omitempty"`
Updated []*RepositoryRulesetUpdatedRules `json:"updated,omitempty"`
}
// RepositoryRulesetRefCondition represents a reference condition for the ruleset.
type RepositoryRulesetRefCondition struct {
RefName *RulesetRefConditionParameters `json:"ref_name,omitempty"`
}
// RepositoryRulesetEditedUpdatedConditions holds updates to conditions in a ruleset.
type RepositoryRulesetEditedUpdatedConditions struct {
Condition *RepositoryRulesetRefCondition `json:"condition,omitempty"`
Changes *RepositoryRulesetUpdatedConditionsEdited `json:"changes,omitempty"`
}
// RepositoryRulesetUpdatedConditionsEdited holds the edited updates to conditions in a ruleset.
type RepositoryRulesetUpdatedConditionsEdited struct {
ConditionType *RepositoryRulesetEditedSource `json:"condition_type,omitempty"`
Target *RepositoryRulesetEditedSource `json:"target,omitempty"`
Include *RepositoryRulesetEditedSources `json:"include,omitempty"`
Exclude *RepositoryRulesetEditedSources `json:"exclude,omitempty"`
}
// RepositoryRulesetUpdatedRules holds updates to rules in a ruleset.
type RepositoryRulesetUpdatedRules struct {
Rule *RepositoryRulesetRule `json:"rule,omitempty"`
Changes *RepositoryRulesetEditedRuleChanges `json:"changes,omitempty"`
}
// RepositoryRulesetEditedRuleChanges holds changes made to a rule in a ruleset.
type RepositoryRulesetEditedRuleChanges struct {
Configuration *RepositoryRulesetEditedSources `json:"configuration,omitempty"`
RuleType *RepositoryRulesetEditedSources `json:"rule_type,omitempty"`
Pattern *RepositoryRulesetEditedSources `json:"pattern,omitempty"`
}
// RepositoryRuleset represents the structure of a ruleset associated with a GitHub repository.
type RepositoryRuleset struct {
ID int64 `json:"id"`
Name string `json:"name"`
// Possible values for target: "branch", "tag", "push"
Target *string `json:"target,omitempty"`
// Possible values for source type: "Repository", "Organization"
SourceType *string `json:"source_type,omitempty"`
Source string `json:"source"`
// Possible values for enforcement: "disabled", "active", "evaluate"
Enforcement string `json:"enforcement"`
BypassActors []*BypassActor `json:"bypass_actors,omitempty"`
// Possible values for current user can bypass: "always", "pull_requests_only", "never"
CurrentUserCanBypass *string `json:"current_user_can_bypass,omitempty"`
NodeID *string `json:"node_id,omitempty"`
Links *RepositoryRulesetLink `json:"_links,omitempty"`
Conditions json.RawMessage `json:"conditions,omitempty"`
Rules []*RepositoryRulesetRule `json:"rules,omitempty"`
CreatedAt *Timestamp `json:"created_at,omitempty"`
UpdatedAt *Timestamp `json:"updated_at,omitempty"`
}
// RepositoryRulesetRule represents individual rules which are present in a repository's ruleset.
type RepositoryRulesetRule struct {
Creation *RepositoryRulesetRuleType `json:"creation,omitempty"`
Update *RepositoryRulesetUpdateRule `json:"update,omitempty"`
Deletion *RepositoryRulesetRuleType `json:"deletion,omitempty"`
RequiredLinearHistory *RepositoryRulesetRuleType `json:"required_linear_history,omitempty"`
MergeQueue *RepositoryRulesetMergeQueueRule `json:"merge_queue,omitempty"`
RequiredDeployments *RepositoryRulesetRequiredDeploymentsRule `json:"required_deployments,omitempty"`
RequiredSignatures *RepositoryRulesetRuleType `json:"required_signatures,omitempty"`
PullRequest *RepositoryRulesetPullRequestRule `json:"pull_request,omitempty"`
RequiredStatusChecks *RepositoryRulesetRequiredStatusChecksRule `json:"required_status_checks,omitempty"`
NonFastForward *RepositoryRulesetRuleType `json:"non_fast_forward,omitempty"`
CommitMessagePattern *RepositoryRulesetPatternRule `json:"commit_message_pattern,omitempty"`
CommitAuthorEmailPattern *RepositoryRulesetPatternRule `json:"commit_author_email_pattern,omitempty"`
CommitterEmailPattern *RepositoryRulesetPatternRule `json:"committer_email_pattern,omitempty"`
BranchNamePattern *RepositoryRulesetPatternRule `json:"branch_name_pattern,omitempty"`
TagNamePattern *RepositoryRulesetPatternRule `json:"tag_name_pattern,omitempty"`
FilePathRestriction *RepositoryRulesetFilePathRestrictionRule `json:"file_path_restriction,omitempty"`
MaxFilePathLength *RepositoryRulesetMaxFilePathLengthRule `json:"max_file_path_length,omitempty"`
FileExtensionRestriction *RepositoryRulesetFileExtensionRestrictionRule `json:"file_extension_restriction,omitempty"`
MaxFileSize *RepositoryRulesetMaxFileSizeRule `json:"max_file_size,omitempty"`
Workflows *RepositoryRulesetWorkflowsRule `json:"workflows,omitempty"`
CodeScanning *RepositoryRulesetCodeScanningRule `json:"code_scanning,omitempty"`
}
// RepositoryRulesetLink represents Links associated with a repository's rulesets. These links are used to provide more information about the ruleset.
type RepositoryRulesetLink struct {
Self *RulesetLink `json:"self,omitempty"`
HTML *RulesetLink `json:"html,omitempty"`
}
// RepositoryRulesetRuleType represents the type of a ruleset rule.
type RepositoryRulesetRuleType struct {
Type string `json:"type"`
}
// RepositoryRulesetUpdateRule defines an update rule for the repository.
type RepositoryRulesetUpdateRule struct {
// Type can be one of: "update".
Type string `json:"type"`
Parameters *UpdateAllowsFetchAndMergeRuleParameters `json:"parameters,omitempty"`
}
// RepositoryRulesetMergeQueueRule defines a merge queue rule for the repository.
type RepositoryRulesetMergeQueueRule struct {
// Type can be one of: "merge_queue".
Type string `json:"type"`
Parameters *MergeQueueRuleParameters `json:"parameters,omitempty"`
}
// RepositoryRulesetRequiredDeploymentsRule defines a rule for required deployments.
type RepositoryRulesetRequiredDeploymentsRule struct {
// Type can be one of: "required_deployments".
Type string `json:"type"`
Parameters *RequiredDeploymentEnvironmentsRuleParameters `json:"parameters,omitempty"`
}
// RepositoryRulesetPullRequestRule defines a rule for pull requests.
type RepositoryRulesetPullRequestRule struct {
// Type can be one of: "pull_request".
Type string `json:"type"`
Parameters *PullRequestRuleParameters `json:"parameters,omitempty"`
}
// RepositoryRulesetRequiredStatusChecksRule defines a rule for required status checks.
type RepositoryRulesetRequiredStatusChecksRule struct {
// Type can be one of: "required_status_checks".
Type string `json:"type"`
Parameters *RequiredStatusChecksRuleParameters `json:"parameters,omitempty"`
}
// RepositoryRulesetPatternRule defines a pattern rule for the repository.
type RepositoryRulesetPatternRule struct {
Type string `json:"type"`
Parameters *RulePatternParameters `json:"parameters,omitempty"`
}
// RepositoryRulesetFilePathRestrictionRule defines a file path restriction rule for the repository.
type RepositoryRulesetFilePathRestrictionRule struct {
// Type can be one of: "file_path_restriction".
Type string `json:"type"`
Parameters *RuleFileParameters `json:"parameters,omitempty"`
}
// RepositoryRulesetMaxFilePathLengthRule defines a maximum file path length rule for the repository.
type RepositoryRulesetMaxFilePathLengthRule struct {
// Type can be one of: "max_file_path_length".
Type string `json:"type"`
Parameters *RuleMaxFilePathLengthParameters `json:"parameters,omitempty"`
}
// RepositoryRulesetFileExtensionRestrictionRule defines a file extension restriction rule for the repository.
type RepositoryRulesetFileExtensionRestrictionRule struct {
// Type can be one of: "file_extension_restriction".
Type string `json:"type"`
Parameters *RuleFileExtensionRestrictionParameters `json:"parameters,omitempty"`
}
// RepositoryRulesetMaxFileSizeRule defines a maximum file size rule for the repository.
type RepositoryRulesetMaxFileSizeRule struct {
// Type can be one of: "max_file_size".
Type string `json:"type"`
Parameters *RuleMaxFileSizeParameters `json:"parameters,omitempty"`
}
// RepositoryRulesetWorkflowsRule defines a workflow rule for the repository.
type RepositoryRulesetWorkflowsRule struct {
// Type can be one of: "workflows".
Type string `json:"type"`
Parameters *RequiredWorkflowsRuleParameters `json:"parameters,omitempty"`
}
// RepositoryRulesetCodeScanningRule defines a code scanning rule for the repository.
type RepositoryRulesetCodeScanningRule struct {
// Type can be one of: "code_scanning".
Type string `json:"type"`
Parameters *RuleCodeScanningParameters `json:"parameters,omitempty"`
}
// RuleCodeScanningParameters defines parameters for code scanning rules.
type RuleCodeScanningParameters struct {
CodeScanningTools []*CodeScanningTool `json:"code_scanning_tools,omitempty"`
}
// CodeScanningTool defines a specific tool used for code scanning.
type CodeScanningTool struct {
AlertsThreshold string `json:"alerts_threshold"`
SecurityAlertsThreshold string `json:"security_alerts_threshold"`
Tool string `json:"tool"`
}
// UnmarshalJSON implements the json.Unmarshaler interface.
// This helps us handle the fact that RepositoryRule parameter field can be of numerous types.
func (r *RepositoryRule) UnmarshalJSON(data []byte) error {
type rule RepositoryRule
var repositoryRule rule
if err := json.Unmarshal(data, &repositoryRule); err != nil {
return err
}
r.RulesetID = repositoryRule.RulesetID
r.RulesetSourceType = repositoryRule.RulesetSourceType
r.RulesetSource = repositoryRule.RulesetSource
r.Type = repositoryRule.Type
switch repositoryRule.Type {
case "creation", "deletion", "non_fast_forward", "required_linear_history", "required_signatures":
r.Parameters = nil
case "update":
if repositoryRule.Parameters == nil {
r.Parameters = nil
return nil
}
params := UpdateAllowsFetchAndMergeRuleParameters{}
if err := json.Unmarshal(*repositoryRule.Parameters, &params); err != nil {
return err
}
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
r.Parameters = &rawParams
case "merge_queue":
if repositoryRule.Parameters == nil {
r.Parameters = nil
return nil
}
params := MergeQueueRuleParameters{}
if err := json.Unmarshal(*repositoryRule.Parameters, &params); err != nil {
return err
}
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
r.Parameters = &rawParams
case "required_deployments":
params := RequiredDeploymentEnvironmentsRuleParameters{}
if err := json.Unmarshal(*repositoryRule.Parameters, &params); err != nil {
return err
}
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
r.Parameters = &rawParams
case "commit_message_pattern", "commit_author_email_pattern", "committer_email_pattern", "branch_name_pattern", "tag_name_pattern":
params := RulePatternParameters{}
if err := json.Unmarshal(*repositoryRule.Parameters, &params); err != nil {
return err
}
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
r.Parameters = &rawParams
case "pull_request":
params := PullRequestRuleParameters{}
if err := json.Unmarshal(*repositoryRule.Parameters, &params); err != nil {
return err
}
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
r.Parameters = &rawParams
case "required_status_checks":
params := RequiredStatusChecksRuleParameters{}
if err := json.Unmarshal(*repositoryRule.Parameters, &params); err != nil {
return err
}
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
r.Parameters = &rawParams
case "workflows":
params := RequiredWorkflowsRuleParameters{}
if err := json.Unmarshal(*repositoryRule.Parameters, &params); err != nil {
return err
}
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
r.Parameters = &rawParams
case "file_path_restriction":
params := RuleFileParameters{}
if err := json.Unmarshal(*repositoryRule.Parameters, &params); err != nil {
return err
}
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
r.Parameters = &rawParams
case "code_scanning":
params := RequiredCodeScanningRuleParameters{}
if err := json.Unmarshal(*repositoryRule.Parameters, &params); err != nil {
return err
}
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
r.Parameters = &rawParams
case "max_file_path_length":
params := RuleMaxFilePathLengthParameters{}
if err := json.Unmarshal(*repositoryRule.Parameters, &params); err != nil {
return err
}
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
r.Parameters = &rawParams
case "file_extension_restriction":
params := RuleFileExtensionRestrictionParameters{}
if err := json.Unmarshal(*repositoryRule.Parameters, &params); err != nil {
return err
}
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
r.Parameters = &rawParams
case "max_file_size":
params := RuleMaxFileSizeParameters{}
if err := json.Unmarshal(*repositoryRule.Parameters, &params); err != nil {
return err
}
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
r.Parameters = &rawParams
default:
r.Type = ""
r.Parameters = nil
return fmt.Errorf("RepositoryRule.Type %q is not yet implemented, unable to unmarshal (%#v)", repositoryRule.Type, repositoryRule)
}
return nil
}
// NewMergeQueueRule creates a rule to only allow merges via a merge queue.
func NewMergeQueueRule(params *MergeQueueRuleParameters) (rule *RepositoryRule) {
if params != nil {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "merge_queue",
Parameters: &rawParams,
}
}
return &RepositoryRule{
Type: "merge_queue",
}
}
// NewCreationRule creates a rule to only allow users with bypass permission to create matching refs.
func NewCreationRule() (rule *RepositoryRule) {
return &RepositoryRule{
Type: "creation",
}
}
// NewUpdateRule creates a rule to only allow users with bypass permission to update matching refs.
func NewUpdateRule(params *UpdateAllowsFetchAndMergeRuleParameters) (rule *RepositoryRule) {
if params != nil {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "update",
Parameters: &rawParams,
}
}
return &RepositoryRule{
Type: "update",
}
}
// NewDeletionRule creates a rule to only allow users with bypass permissions to delete matching refs.
func NewDeletionRule() (rule *RepositoryRule) {
return &RepositoryRule{
Type: "deletion",
}
}
// NewRequiredLinearHistoryRule creates a rule to prevent merge commits from being pushed to matching branches.
func NewRequiredLinearHistoryRule() (rule *RepositoryRule) {
return &RepositoryRule{
Type: "required_linear_history",
}
}
// NewRequiredDeploymentsRule creates a rule to require environments to be successfully deployed before they can be merged into the matching branches.
func NewRequiredDeploymentsRule(params *RequiredDeploymentEnvironmentsRuleParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "required_deployments",
Parameters: &rawParams,
}
}
// NewRequiredSignaturesRule creates a rule a to require commits pushed to matching branches to have verified signatures.
func NewRequiredSignaturesRule() (rule *RepositoryRule) {
return &RepositoryRule{
Type: "required_signatures",
}
}
// NewPullRequestRule creates a rule to require all commits be made to a non-target branch and submitted via a pull request before they can be merged.
func NewPullRequestRule(params *PullRequestRuleParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "pull_request",
Parameters: &rawParams,
}
}
// NewRequiredStatusChecksRule creates a rule to require which status checks must pass before branches can be merged into a branch rule.
func NewRequiredStatusChecksRule(params *RequiredStatusChecksRuleParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "required_status_checks",
Parameters: &rawParams,
}
}
// NewNonFastForwardRule creates a rule as part to prevent users with push access from force pushing to matching branches.
func NewNonFastForwardRule() (rule *RepositoryRule) {
return &RepositoryRule{
Type: "non_fast_forward",
}
}
// NewCommitMessagePatternRule creates a rule to restrict commit message patterns being pushed to matching branches.
func NewCommitMessagePatternRule(params *RulePatternParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "commit_message_pattern",
Parameters: &rawParams,
}
}
// NewCommitAuthorEmailPatternRule creates a rule to restrict commits with author email patterns being merged into matching branches.
func NewCommitAuthorEmailPatternRule(params *RulePatternParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "commit_author_email_pattern",
Parameters: &rawParams,
}
}
// NewCommitterEmailPatternRule creates a rule to restrict commits with committer email patterns being merged into matching branches.
func NewCommitterEmailPatternRule(params *RulePatternParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "committer_email_pattern",
Parameters: &rawParams,
}
}
// NewBranchNamePatternRule creates a rule to restrict branch patterns from being merged into matching branches.
func NewBranchNamePatternRule(params *RulePatternParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "branch_name_pattern",
Parameters: &rawParams,
}
}
// NewTagNamePatternRule creates a rule to restrict tag patterns contained in non-target branches from being merged into matching branches.
func NewTagNamePatternRule(params *RulePatternParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "tag_name_pattern",
Parameters: &rawParams,
}
}
// NewRequiredWorkflowsRule creates a rule to require which status checks must pass before branches can be merged into a branch rule.
func NewRequiredWorkflowsRule(params *RequiredWorkflowsRuleParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "workflows",
Parameters: &rawParams,
}
}
// NewRequiredCodeScanningRule creates a rule to require which tools must provide code scanning results before the reference is updated.
func NewRequiredCodeScanningRule(params *RequiredCodeScanningRuleParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "code_scanning",
Parameters: &rawParams,
}
}
// NewFilePathRestrictionRule creates a rule to restrict file paths from being pushed to.
func NewFilePathRestrictionRule(params *RuleFileParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "file_path_restriction",
Parameters: &rawParams,
}
}
// NewMaxFilePathLengthRule creates a rule to restrict file paths longer than the limit from being pushed.
func NewMaxFilePathLengthRule(params *RuleMaxFilePathLengthParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "max_file_path_length",
Parameters: &rawParams,
}
}
// NewFileExtensionRestrictionRule creates a rule to restrict file extensions from being pushed to a commit.
func NewFileExtensionRestrictionRule(params *RuleFileExtensionRestrictionParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "file_extension_restriction",
Parameters: &rawParams,
}
}
// NewMaxFileSizeRule creates a rule to restrict file sizes from being pushed to a commit.
func NewMaxFileSizeRule(params *RuleMaxFileSizeParameters) (rule *RepositoryRule) {
bytes, _ := json.Marshal(params)
rawParams := json.RawMessage(bytes)
return &RepositoryRule{
Type: "max_file_size",
Parameters: &rawParams,
}
}
// Ruleset represents a GitHub ruleset object.
type Ruleset struct {
ID *int64 `json:"id,omitempty"`
Name string `json:"name"`
// Possible values for Target are branch, tag, push
Target *string `json:"target,omitempty"`
// Possible values for SourceType are: Repository, Organization
SourceType *string `json:"source_type,omitempty"`
Source string `json:"source"`
// Possible values for Enforcement are: disabled, active, evaluate
Enforcement string `json:"enforcement"`
BypassActors []*BypassActor `json:"bypass_actors,omitempty"`
NodeID *string `json:"node_id,omitempty"`
Links *RulesetLinks `json:"_links,omitempty"`
Conditions *RulesetConditions `json:"conditions,omitempty"`
Rules []*RepositoryRule `json:"rules,omitempty"`
UpdatedAt *Timestamp `json:"updated_at,omitempty"`
CreatedAt *Timestamp `json:"created_at,omitempty"`
}
// rulesetNoOmitBypassActors represents a GitHub ruleset object. The struct does not omit bypassActors if the field is nil or an empty array is passed.
type rulesetNoOmitBypassActors struct {
ID *int64 `json:"id,omitempty"`
Name string `json:"name"`
// Possible values for Target are branch, tag
Target *string `json:"target,omitempty"`
// Possible values for SourceType are: Repository, Organization
SourceType *string `json:"source_type,omitempty"`
Source string `json:"source"`
// Possible values for Enforcement are: disabled, active, evaluate
Enforcement string `json:"enforcement"`
BypassActors []*BypassActor `json:"bypass_actors"`
NodeID *string `json:"node_id,omitempty"`
Links *RulesetLinks `json:"_links,omitempty"`
Conditions *RulesetConditions `json:"conditions,omitempty"`
Rules []*RepositoryRule `json:"rules,omitempty"`
}
// GetRulesForBranch gets all the rules that apply to the specified branch.
//
// GitHub API docs: https://docs.github.com/rest/repos/rules#get-rules-for-a-branch
//
//meta:operation GET /repos/{owner}/{repo}/rules/branches/{branch}
func (s *RepositoriesService) GetRulesForBranch(ctx context.Context, owner, repo, branch string) ([]*RepositoryRule, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/rules/branches/%v", owner, repo, branch)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var rules []*RepositoryRule
resp, err := s.client.Do(ctx, req, &rules)
if err != nil {
return nil, resp, err
}
return rules, resp, nil
}
// GetAllRulesets gets all the rules that apply to the specified repository.
// If includesParents is true, rulesets configured at the organization level that apply to the repository will be returned.
//
// GitHub API docs: https://docs.github.com/rest/repos/rules#get-all-repository-rulesets
//
//meta:operation GET /repos/{owner}/{repo}/rulesets
func (s *RepositoriesService) GetAllRulesets(ctx context.Context, owner, repo string, includesParents bool) ([]*Ruleset, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/rulesets?includes_parents=%v", owner, repo, includesParents)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var ruleset []*Ruleset
resp, err := s.client.Do(ctx, req, &ruleset)
if err != nil {
return nil, resp, err
}
return ruleset, resp, nil
}
// CreateRuleset creates a ruleset for the specified repository.
//
// GitHub API docs: https://docs.github.com/rest/repos/rules#create-a-repository-ruleset
//
//meta:operation POST /repos/{owner}/{repo}/rulesets
func (s *RepositoriesService) CreateRuleset(ctx context.Context, owner, repo string, rs *Ruleset) (*Ruleset, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/rulesets", owner, repo)
req, err := s.client.NewRequest("POST", u, rs)
if err != nil {
return nil, nil, err
}
var ruleset *Ruleset
resp, err := s.client.Do(ctx, req, &ruleset)
if err != nil {
return nil, resp, err
}
return ruleset, resp, nil
}
// GetRuleset gets a ruleset for the specified repository.
// If includesParents is true, rulesets configured at the organization level that apply to the repository will be returned.
//
// GitHub API docs: https://docs.github.com/rest/repos/rules#get-a-repository-ruleset
//
//meta:operation GET /repos/{owner}/{repo}/rulesets/{ruleset_id}
func (s *RepositoriesService) GetRuleset(ctx context.Context, owner, repo string, rulesetID int64, includesParents bool) (*Ruleset, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/rulesets/%v?includes_parents=%v", owner, repo, rulesetID, includesParents)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var ruleset *Ruleset
resp, err := s.client.Do(ctx, req, &ruleset)
if err != nil {
return nil, resp, err
}
return ruleset, resp, nil
}
// UpdateRuleset updates a ruleset for the specified repository.
//
// GitHub API docs: https://docs.github.com/rest/repos/rules#update-a-repository-ruleset
//
//meta:operation PUT /repos/{owner}/{repo}/rulesets/{ruleset_id}
func (s *RepositoriesService) UpdateRuleset(ctx context.Context, owner, repo string, rulesetID int64, rs *Ruleset) (*Ruleset, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/rulesets/%v", owner, repo, rulesetID)
req, err := s.client.NewRequest("PUT", u, rs)
if err != nil {
return nil, nil, err
}
var ruleset *Ruleset
resp, err := s.client.Do(ctx, req, &ruleset)
if err != nil {
return nil, resp, err
}
return ruleset, resp, nil
}
// UpdateRulesetNoBypassActor updates a ruleset for the specified repository.
//
// This function is necessary as the UpdateRuleset function does not marshal ByPassActor if passed as nil or an empty array.
//
// GitHub API docs: https://docs.github.com/rest/repos/rules#update-a-repository-ruleset
//
//meta:operation PUT /repos/{owner}/{repo}/rulesets/{ruleset_id}
func (s *RepositoriesService) UpdateRulesetNoBypassActor(ctx context.Context, owner, repo string, rulesetID int64, rs *Ruleset) (*Ruleset, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/rulesets/%v", owner, repo, rulesetID)
rsNoBypassActor := &rulesetNoOmitBypassActors{}
if rs != nil {
rsNoBypassActor = &rulesetNoOmitBypassActors{
ID: rs.ID,
Name: rs.Name,
Target: rs.Target,
SourceType: rs.SourceType,
Source: rs.Source,
Enforcement: rs.Enforcement,
BypassActors: rs.BypassActors,
NodeID: rs.NodeID,
Links: rs.Links,
Conditions: rs.Conditions,
Rules: rs.Rules,
}
}
req, err := s.client.NewRequest("PUT", u, rsNoBypassActor)
if err != nil {
return nil, nil, err
}
var ruleSet *Ruleset
resp, err := s.client.Do(ctx, req, &ruleSet)
if err != nil {
return nil, resp, err
}
return ruleSet, resp, nil
}
// DeleteRuleset deletes a ruleset for the specified repository.
//
// GitHub API docs: https://docs.github.com/rest/repos/rules#delete-a-repository-ruleset
//
//meta:operation DELETE /repos/{owner}/{repo}/rulesets/{ruleset_id}
func (s *RepositoriesService) DeleteRuleset(ctx context.Context, owner, repo string, rulesetID int64) (*Response, error) {
u := fmt.Sprintf("repos/%v/%v/rulesets/%v", owner, repo, rulesetID)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
return s.client.Do(ctx, req, nil)
}

View file

@ -142,6 +142,14 @@ func (s *ActionsService) GetArtifact(ctx context.Context, owner, repo string, ar
func (s *ActionsService) DownloadArtifact(ctx context.Context, owner, repo string, artifactID int64, maxRedirects int) (*url.URL, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/actions/artifacts/%v/zip", owner, repo, artifactID)
if s.client.RateLimitRedirectionalEndpoints {
return s.downloadArtifactWithRateLimit(ctx, u, maxRedirects)
}
return s.downloadArtifactWithoutRateLimit(ctx, u, maxRedirects)
}
func (s *ActionsService) downloadArtifactWithoutRateLimit(ctx context.Context, u string, maxRedirects int) (*url.URL, *Response, error) {
resp, err := s.client.roundTripWithOptionalFollowRedirect(ctx, u, maxRedirects)
if err != nil {
return nil, nil, err
@ -149,7 +157,7 @@ func (s *ActionsService) DownloadArtifact(ctx context.Context, owner, repo strin
defer resp.Body.Close()
if resp.StatusCode != http.StatusFound {
return nil, newResponse(resp), fmt.Errorf("unexpected status code: %s", resp.Status)
return nil, newResponse(resp), fmt.Errorf("unexpected status code: %v", resp.Status)
}
parsedURL, err := url.Parse(resp.Header.Get("Location"))
@ -160,6 +168,26 @@ func (s *ActionsService) DownloadArtifact(ctx context.Context, owner, repo strin
return parsedURL, newResponse(resp), nil
}
func (s *ActionsService) downloadArtifactWithRateLimit(ctx context.Context, u string, maxRedirects int) (*url.URL, *Response, error) {
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
url, resp, err := s.client.bareDoUntilFound(ctx, req, maxRedirects)
if err != nil {
return nil, resp, err
}
defer resp.Body.Close()
// If we didn't receive a valid Location in a 302 response
if url == nil {
return nil, resp, fmt.Errorf("unexpected status code: %v", resp.Status)
}
return url, resp, nil
}
// DeleteArtifact deletes a workflow run artifact.
//
// GitHub API docs: https://docs.github.com/rest/actions/artifacts#delete-an-artifact

View file

@ -150,6 +150,14 @@ func (s *ActionsService) GetWorkflowJobByID(ctx context.Context, owner, repo str
func (s *ActionsService) GetWorkflowJobLogs(ctx context.Context, owner, repo string, jobID int64, maxRedirects int) (*url.URL, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/actions/jobs/%v/logs", owner, repo, jobID)
if s.client.RateLimitRedirectionalEndpoints {
return s.getWorkflowJobLogsWithRateLimit(ctx, u, maxRedirects)
}
return s.getWorkflowJobLogsWithoutRateLimit(ctx, u, maxRedirects)
}
func (s *ActionsService) getWorkflowJobLogsWithoutRateLimit(ctx context.Context, u string, maxRedirects int) (*url.URL, *Response, error) {
resp, err := s.client.roundTripWithOptionalFollowRedirect(ctx, u, maxRedirects)
if err != nil {
return nil, nil, err
@ -157,9 +165,29 @@ func (s *ActionsService) GetWorkflowJobLogs(ctx context.Context, owner, repo str
defer resp.Body.Close()
if resp.StatusCode != http.StatusFound {
return nil, newResponse(resp), fmt.Errorf("unexpected status code: %s", resp.Status)
return nil, newResponse(resp), fmt.Errorf("unexpected status code: %v", resp.Status)
}
parsedURL, err := url.Parse(resp.Header.Get("Location"))
return parsedURL, newResponse(resp), err
}
func (s *ActionsService) getWorkflowJobLogsWithRateLimit(ctx context.Context, u string, maxRedirects int) (*url.URL, *Response, error) {
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
url, resp, err := s.client.bareDoUntilFound(ctx, req, maxRedirects)
if err != nil {
return nil, resp, err
}
defer resp.Body.Close()
// If we didn't receive a valid Location in a 302 response
if url == nil {
return nil, resp, fmt.Errorf("unexpected status code: %v", resp.Status)
}
return url, resp, nil
}

View file

@ -204,6 +204,7 @@ func (s *ActionsService) ListRepositoryWorkflowRuns(ctx context.Context, owner,
}
// GetWorkflowRunByID gets a specific workflow run by ID.
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve the workflow run ID from a DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#get-a-workflow-run
//
@ -226,6 +227,7 @@ func (s *ActionsService) GetWorkflowRunByID(ctx context.Context, owner, repo str
}
// GetWorkflowRunAttempt gets a specific workflow run attempt.
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve the workflow run ID from a DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#get-a-workflow-run-attempt
//
@ -252,6 +254,7 @@ func (s *ActionsService) GetWorkflowRunAttempt(ctx context.Context, owner, repo
}
// GetWorkflowRunAttemptLogs gets a redirect URL to download a plain text file of logs for a workflow run for attempt number.
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve a workflow run ID from the DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#download-workflow-run-attempt-logs
//
@ -259,6 +262,14 @@ func (s *ActionsService) GetWorkflowRunAttempt(ctx context.Context, owner, repo
func (s *ActionsService) GetWorkflowRunAttemptLogs(ctx context.Context, owner, repo string, runID int64, attemptNumber int, maxRedirects int) (*url.URL, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/attempts/%v/logs", owner, repo, runID, attemptNumber)
if s.client.RateLimitRedirectionalEndpoints {
return s.getWorkflowRunAttemptLogsWithRateLimit(ctx, u, maxRedirects)
}
return s.getWorkflowRunAttemptLogsWithoutRateLimit(ctx, u, maxRedirects)
}
func (s *ActionsService) getWorkflowRunAttemptLogsWithoutRateLimit(ctx context.Context, u string, maxRedirects int) (*url.URL, *Response, error) {
resp, err := s.client.roundTripWithOptionalFollowRedirect(ctx, u, maxRedirects)
if err != nil {
return nil, nil, err
@ -266,14 +277,35 @@ func (s *ActionsService) GetWorkflowRunAttemptLogs(ctx context.Context, owner, r
defer resp.Body.Close()
if resp.StatusCode != http.StatusFound {
return nil, newResponse(resp), fmt.Errorf("unexpected status code: %s", resp.Status)
return nil, newResponse(resp), fmt.Errorf("unexpected status code: %v", resp.Status)
}
parsedURL, err := url.Parse(resp.Header.Get("Location"))
return parsedURL, newResponse(resp), err
}
func (s *ActionsService) getWorkflowRunAttemptLogsWithRateLimit(ctx context.Context, u string, maxRedirects int) (*url.URL, *Response, error) {
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
url, resp, err := s.client.bareDoUntilFound(ctx, req, maxRedirects)
if err != nil {
return nil, resp, err
}
defer resp.Body.Close()
// If we didn't receive a valid Location in a 302 response
if url == nil {
return nil, resp, fmt.Errorf("unexpected status code: %v", resp.Status)
}
return url, resp, nil
}
// RerunWorkflowByID re-runs a workflow by ID.
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve the workflow run ID a the DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#re-run-a-workflow
//
@ -290,6 +322,7 @@ func (s *ActionsService) RerunWorkflowByID(ctx context.Context, owner, repo stri
}
// RerunFailedJobsByID re-runs all of the failed jobs and their dependent jobs in a workflow run by ID.
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve the workflow run ID from a DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#re-run-failed-jobs-from-a-workflow-run
//
@ -307,6 +340,8 @@ func (s *ActionsService) RerunFailedJobsByID(ctx context.Context, owner, repo st
// RerunJobByID re-runs a job and its dependent jobs in a workflow run by ID.
//
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve the workflow run ID from a DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#re-run-a-job-from-a-workflow-run
//
//meta:operation POST /repos/{owner}/{repo}/actions/jobs/{job_id}/rerun
@ -322,6 +357,7 @@ func (s *ActionsService) RerunJobByID(ctx context.Context, owner, repo string, j
}
// CancelWorkflowRunByID cancels a workflow run by ID.
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve the workflow run ID from a DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#cancel-a-workflow-run
//
@ -338,6 +374,7 @@ func (s *ActionsService) CancelWorkflowRunByID(ctx context.Context, owner, repo
}
// GetWorkflowRunLogs gets a redirect URL to download a plain text file of logs for a workflow run.
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve the workflow run ID from a DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#download-workflow-run-logs
//
@ -345,6 +382,14 @@ func (s *ActionsService) CancelWorkflowRunByID(ctx context.Context, owner, repo
func (s *ActionsService) GetWorkflowRunLogs(ctx context.Context, owner, repo string, runID int64, maxRedirects int) (*url.URL, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/logs", owner, repo, runID)
if s.client.RateLimitRedirectionalEndpoints {
return s.getWorkflowRunLogsWithRateLimit(ctx, u, maxRedirects)
}
return s.getWorkflowRunLogsWithoutRateLimit(ctx, u, maxRedirects)
}
func (s *ActionsService) getWorkflowRunLogsWithoutRateLimit(ctx context.Context, u string, maxRedirects int) (*url.URL, *Response, error) {
resp, err := s.client.roundTripWithOptionalFollowRedirect(ctx, u, maxRedirects)
if err != nil {
return nil, nil, err
@ -359,7 +404,28 @@ func (s *ActionsService) GetWorkflowRunLogs(ctx context.Context, owner, repo str
return parsedURL, newResponse(resp), err
}
func (s *ActionsService) getWorkflowRunLogsWithRateLimit(ctx context.Context, u string, maxRedirects int) (*url.URL, *Response, error) {
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
url, resp, err := s.client.bareDoUntilFound(ctx, req, maxRedirects)
if err != nil {
return nil, resp, err
}
defer resp.Body.Close()
// If we didn't receive a valid Location in a 302 response
if url == nil {
return nil, resp, fmt.Errorf("unexpected status code: %v", resp.Status)
}
return url, resp, nil
}
// DeleteWorkflowRun deletes a workflow run by ID.
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve the workflow run ID from a DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#delete-a-workflow-run
//
@ -376,6 +442,7 @@ func (s *ActionsService) DeleteWorkflowRun(ctx context.Context, owner, repo stri
}
// DeleteWorkflowRunLogs deletes all logs for a workflow run.
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve the workflow run ID from a DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#delete-workflow-run-logs
//
@ -392,6 +459,7 @@ func (s *ActionsService) DeleteWorkflowRunLogs(ctx context.Context, owner, repo
}
// GetWorkflowRunUsageByID gets a specific workflow usage run by run ID in the unit of billable milliseconds.
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve the workflow run ID from a DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#get-workflow-run-usage
//
@ -414,6 +482,7 @@ func (s *ActionsService) GetWorkflowRunUsageByID(ctx context.Context, owner, rep
}
// GetPendingDeployments get all deployment environments for a workflow run that are waiting for protection rules to pass.
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve the workflow run ID from a DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#get-pending-deployments-for-a-workflow-run
//
@ -436,6 +505,7 @@ func (s *ActionsService) GetPendingDeployments(ctx context.Context, owner, repo
}
// PendingDeployments approve or reject pending deployments that are waiting on approval by a required reviewer.
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve the workflow run ID from a DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#review-pending-deployments-for-a-workflow-run
//
@ -458,6 +528,7 @@ func (s *ActionsService) PendingDeployments(ctx context.Context, owner, repo str
}
// ReviewCustomDeploymentProtectionRule approves or rejects custom deployment protection rules provided by a GitHub App for a workflow run.
// You can use the helper function *DeploymentProtectionRuleEvent.GetRunID() to easily retrieve the workflow run ID from a DeploymentProtectionRuleEvent.
//
// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#review-custom-deployment-protection-rules-for-a-workflow-run
//

View file

@ -118,13 +118,13 @@ func (s GistStats) String() string {
return Stringify(s)
}
// PullStats represents the number of total, merged, mergable and unmergeable
// PullStats represents the number of total, merged, mergeable and unmergeable
// pull-requests.
type PullStats struct {
TotalPulls *int `json:"total_pulls,omitempty"`
MergedPulls *int `json:"merged_pulls,omitempty"`
MergablePulls *int `json:"mergeable_pulls,omitempty"`
UnmergablePulls *int `json:"unmergeable_pulls,omitempty"`
TotalPulls *int `json:"total_pulls,omitempty"`
MergedPulls *int `json:"merged_pulls,omitempty"`
MergeablePulls *int `json:"mergeable_pulls,omitempty"`
UnmergeablePulls *int `json:"unmergeable_pulls,omitempty"`
}
func (s PullStats) String() string {

View file

@ -87,8 +87,8 @@ type CheckSuite struct {
// The following fields are only populated by Webhook events.
HeadCommit *Commit `json:"head_commit,omitempty"`
LatestCheckRunsCount *int64 `json:"latest_check_runs_count,omitempty"`
Rerequstable *bool `json:"rerequestable,omitempty"`
RunsRerequstable *bool `json:"runs_rerequestable,omitempty"`
Rerequestable *bool `json:"rerequestable,omitempty"`
RunsRerequestable *bool `json:"runs_rerequestable,omitempty"`
}
func (c CheckRun) String() string {

View file

@ -141,6 +141,15 @@ type AlertListOptions struct {
// The name of a code scanning tool. Only results by this tool will be listed.
ToolName string `url:"tool_name,omitempty"`
// The GUID of a code scanning tool. Only results by this tool will be listed.
ToolGUID string `url:"tool_guid,omitempty"`
// The direction to sort the results by. Possible values are: asc, desc. Default: desc.
Direction string `url:"direction,omitempty"`
// The property by which to sort the results. Possible values are: created, updated. Default: created.
Sort string `url:"sort,omitempty"`
ListCursorOptions
// Add ListOptions so offset pagination with integer type "page" query parameter is accepted
@ -391,7 +400,7 @@ func (s *CodeScanningService) UploadSarif(ctx context.Context, owner, repo strin
return nil, nil, err
}
// This will always return an error without unmarshalling the data
// This will always return an error without unmarshaling the data
resp, err := s.client.Do(ctx, req, nil)
// Even though there was an error, we still return the response
// in case the caller wants to inspect it further.

View file

@ -307,7 +307,7 @@ func (s *CopilotService) ListCopilotSeats(ctx context.Context, org string, opts
//
// To paginate through all seats, populate 'Page' with the number of the last page.
//
// GitHub API docs: https://docs.github.com/rest/copilot/copilot-user-management#list-all-copilot-seat-assignments-for-an-enterprise
// GitHub API docs: https://docs.github.com/enterprise-cloud@latest/rest/copilot/copilot-user-management#list-all-copilot-seat-assignments-for-an-enterprise
//
//meta:operation GET /enterprises/{enterprise}/copilot/billing/seats
func (s *CopilotService) ListCopilotEnterpriseSeats(ctx context.Context, enterprise string, opts *ListOptions) (*ListCopilotSeatsResponse, *Response, error) {
@ -467,7 +467,7 @@ func (s *CopilotService) GetSeatDetails(ctx context.Context, org, user string) (
// GetEnterpriseMetrics gets Copilot usage metrics for an enterprise.
//
// GitHub API docs: https://docs.github.com/rest/copilot/copilot-metrics#get-copilot-metrics-for-an-enterprise
// GitHub API docs: https://docs.github.com/enterprise-cloud@latest/rest/copilot/copilot-metrics#get-copilot-metrics-for-an-enterprise
//
//meta:operation GET /enterprises/{enterprise}/copilot/metrics
func (s *CopilotService) GetEnterpriseMetrics(ctx context.Context, enterprise string, opts *CopilotMetricsListOptions) ([]*CopilotMetrics, *Response, error) {
@ -493,7 +493,7 @@ func (s *CopilotService) GetEnterpriseMetrics(ctx context.Context, enterprise st
// GetEnterpriseTeamMetrics gets Copilot usage metrics for an enterprise team.
//
// GitHub API docs: https://docs.github.com/rest/copilot/copilot-metrics#get-copilot-metrics-for-an-enterprise-team
// GitHub API docs: https://docs.github.com/enterprise-cloud@latest/rest/copilot/copilot-metrics#get-copilot-metrics-for-an-enterprise-team
//
//meta:operation GET /enterprises/{enterprise}/team/{team_slug}/copilot/metrics
func (s *CopilotService) GetEnterpriseTeamMetrics(ctx context.Context, enterprise, team string, opts *CopilotMetricsListOptions) ([]*CopilotMetrics, *Response, error) {

View file

@ -8,7 +8,7 @@ Package github provides a client for using the GitHub API.
Usage:
import "github.com/google/go-github/v68/github" // with go modules enabled (GO111MODULE=on or outside GOPATH)
import "github.com/google/go-github/v69/github" // with go modules enabled (GO111MODULE=on or outside GOPATH)
import "github.com/google/go-github/github" // with go modules disabled
Construct a new GitHub client, then use the various services on the client to
@ -138,11 +138,17 @@ To detect this condition of error, you can check if its type is
# Conditional Requests
The GitHub API has good support for conditional requests which will help
prevent you from burning through your rate limit, as well as help speed up your
application. go-github does not handle conditional requests directly, but is
instead designed to work with a caching http.Transport. We recommend using
https://github.com/gregjones/httpcache for that.
The GitHub REST API has good support for conditional HTTP requests
via the ETag header which will help prevent you from burning through your
rate limit, as well as help speed up your application. go-github does not
handle conditional requests directly, but is instead designed to work with a
caching http.Transport.
Typically, an RFC 7234 compliant HTTP cache such as https://github.com/gregjones/httpcache
is recommended. Alternatively, the https://github.com/bored-engineer/github-conditional-http-transport
package relies on (undocumented) GitHub specific cache logic and is
recommended when making requests using short-lived credentials such as a
GitHub App installation token.
Learn more about GitHub conditional requests at
https://docs.github.com/rest/overview/resources-in-the-rest-api#conditional-requests.

View file

@ -0,0 +1,163 @@
// Copyright 2025 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package github
import (
"context"
)
// NodeQueryOptions specifies the optional parameters to the EnterpriseService
// Node management APIs.
type NodeQueryOptions struct {
// UUID filters issues based on the node UUID.
UUID *string `url:"uuid,omitempty"`
// ClusterRoles filters the cluster roles from the cluster configuration file.
ClusterRoles *string `url:"cluster_roles,omitempty"`
}
// ClusterStatus represents a response from the ClusterStatus and ReplicationStatus methods.
type ClusterStatus struct {
Status *string `json:"status,omitempty"`
Nodes []*ClusterStatusNode `json:"nodes"`
}
// ClusterStatusNode represents the status of a cluster node.
type ClusterStatusNode struct {
Hostname *string `json:"hostname,omitempty"`
Status *string `json:"status,omitempty"`
Services []*ClusterStatusNodeServiceItem `json:"services"`
}
// ClusterStatusNodeServiceItem represents the status of a service running on a cluster node.
type ClusterStatusNodeServiceItem struct {
Status *string `json:"status,omitempty"`
Name *string `json:"name,omitempty"`
Details *string `json:"details,omitempty"`
}
// SystemRequirements represents a response from the CheckSystemRequirements method.
type SystemRequirements struct {
Status *string `json:"status,omitempty"`
Nodes []*SystemRequirementsNode `json:"nodes"`
}
// SystemRequirementsNode represents the status of a system node.
type SystemRequirementsNode struct {
Hostname *string `json:"hostname,omitempty"`
Status *string `json:"status,omitempty"`
RolesStatus []*SystemRequirementsNodeRoleStatus `json:"roles_status"`
}
// SystemRequirementsNodeRoleStatus represents the status of a role on a system node.
type SystemRequirementsNodeRoleStatus struct {
Status *string `json:"status,omitempty"`
Role *string `json:"role,omitempty"`
}
// NodeReleaseVersion represents a response from the GetNodeReleaseVersions method.
type NodeReleaseVersion struct {
Hostname *string `json:"hostname,omitempty"`
Version *ReleaseVersion `json:"version"`
}
// ReleaseVersion holds the release version information of the node.
type ReleaseVersion struct {
Version *string `json:"version,omitempty"`
Platform *string `json:"platform,omitempty"`
BuildID *string `json:"build_id,omitempty"`
BuildDate *string `json:"build_date,omitempty"`
}
// CheckSystemRequirements checks if GHES system nodes meet the system requirements.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#get-the-system-requirement-check-results-for-configured-cluster-nodes
//
//meta:operation GET /manage/v1/checks/system-requirements
func (s *EnterpriseService) CheckSystemRequirements(ctx context.Context) (*SystemRequirements, *Response, error) {
u := "manage/v1/checks/system-requirements"
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
systemRequirements := new(SystemRequirements)
resp, err := s.client.Do(ctx, req, systemRequirements)
if err != nil {
return nil, resp, err
}
return systemRequirements, resp, nil
}
// ClusterStatus gets the status of all services running on each cluster node.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#get-the-status-of-services-running-on-all-cluster-nodes
//
//meta:operation GET /manage/v1/cluster/status
func (s *EnterpriseService) ClusterStatus(ctx context.Context) (*ClusterStatus, *Response, error) {
u := "manage/v1/cluster/status"
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
clusterStatus := new(ClusterStatus)
resp, err := s.client.Do(ctx, req, clusterStatus)
if err != nil {
return nil, resp, err
}
return clusterStatus, resp, nil
}
// ReplicationStatus gets the status of all services running on each replica node.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#get-the-status-of-services-running-on-all-replica-nodes
//
//meta:operation GET /manage/v1/replication/status
func (s *EnterpriseService) ReplicationStatus(ctx context.Context, opts *NodeQueryOptions) (*ClusterStatus, *Response, error) {
u, err := addOptions("manage/v1/replication/status", opts)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
status := new(ClusterStatus)
resp, err := s.client.Do(ctx, req, status)
if err != nil {
return nil, resp, err
}
return status, resp, nil
}
// GetNodeReleaseVersions gets the version information deployed to each node.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#get-all-ghes-release-versions-for-all-nodes
//
//meta:operation GET /manage/v1/version
func (s *EnterpriseService) GetNodeReleaseVersions(ctx context.Context, opts *NodeQueryOptions) ([]*NodeReleaseVersion, *Response, error) {
u, err := addOptions("manage/v1/version", opts)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var releaseVersions []*NodeReleaseVersion
resp, err := s.client.Do(ctx, req, &releaseVersions)
if err != nil {
return nil, resp, err
}
return releaseVersions, resp, nil
}

View file

@ -0,0 +1,516 @@
// Copyright 2025 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package github
import (
"context"
"errors"
)
// ConfigApplyOptions is a struct to hold the options for the ConfigApply API and the response.
type ConfigApplyOptions struct {
// RunID is the ID of the run to get the status of. If empty a random one will be generated.
RunID *string `json:"run_id,omitempty"`
}
// ConfigApplyStatus is a struct to hold the response from the ConfigApply API.
type ConfigApplyStatus struct {
Running *bool `json:"running,omitempty"`
Successful *bool `json:"successful,omitempty"`
Nodes []*ConfigApplyStatusNode `json:"nodes"`
}
// ConfigApplyStatusNode is a struct to hold the response from the ConfigApply API.
type ConfigApplyStatusNode struct {
Hostname *string `json:"hostname,omitempty"`
Running *bool `json:"running,omitempty"`
Successful *bool `json:"successful,omitempty"`
RunID *string `json:"run_id,omitempty"`
}
// ConfigApplyEventsOptions is used to enable pagination.
type ConfigApplyEventsOptions struct {
LastRequestID *string `url:"last_request_id,omitempty"`
}
// ConfigApplyEvents is a struct to hold the response from the ConfigApplyEvents API.
type ConfigApplyEvents struct {
Nodes []*ConfigApplyEventsNode `json:"nodes"`
}
// ConfigApplyEventsNode is a struct to hold the response from the ConfigApplyEvents API.
type ConfigApplyEventsNode struct {
Node *string `json:"node,omitempty"`
LastRequestID *string `json:"last_request_id,omitempty"`
Events []*ConfigApplyEventsNodeEvent `json:"events"`
}
// ConfigApplyEventsNodeEvent is a struct to hold the response from the ConfigApplyEvents API.
type ConfigApplyEventsNodeEvent struct {
Timestamp *Timestamp `json:"timestamp,omitempty"`
SeverityText *string `json:"severity_text,omitempty"`
Body *string `json:"body,omitempty"`
EventName *string `json:"event_name,omitempty"`
Topology *string `json:"topology,omitempty"`
Hostname *string `json:"hostname,omitempty"`
ConfigRunID *string `json:"config_run_id,omitempty"`
TraceID *string `json:"trace_id,omitempty"`
SpanID *string `json:"span_id,omitempty"`
SpanParentID *int64 `json:"span_parent_id,omitempty"`
SpanDepth *int `json:"span_depth,omitempty"`
}
// InitialConfigOptions is a struct to hold the options for the InitialConfig API.
type InitialConfigOptions struct {
License string `url:"license"`
Password string `url:"password"`
}
// LicenseStatus is a struct to hold the response from the License API.
type LicenseStatus struct {
AdvancedSecurityEnabled *bool `json:"advancedSecurityEnabled,omitempty"`
AdvancedSecuritySeats *int `json:"advancedSecuritySeats,omitempty"`
ClusterSupport *bool `json:"clusterSupport,omitempty"`
Company *string `json:"company,omitempty"`
CroquetSupport *bool `json:"croquetSupport,omitempty"`
CustomTerms *bool `json:"customTerms,omitempty"`
Evaluation *bool `json:"evaluation,omitempty"`
ExpireAt *Timestamp `json:"expireAt,omitempty"`
InsightsEnabled *bool `json:"insightsEnabled,omitempty"`
InsightsExpireAt *Timestamp `json:"insightsExpireAt,omitempty"`
LearningLabEvaluationExpires *Timestamp `json:"learningLabEvaluationExpires,omitempty"`
LearningLabSeats *int `json:"learningLabSeats,omitempty"`
Perpetual *bool `json:"perpetual,omitempty"`
ReferenceNumber *string `json:"referenceNumber,omitempty"`
Seats *int `json:"seats,omitempty"`
SSHAllowed *bool `json:"sshAllowed,omitempty"`
SupportKey *string `json:"supportKey,omitempty"`
UnlimitedSeating *bool `json:"unlimitedSeating,omitempty"`
}
// UploadLicenseOptions is a struct to hold the options for the UploadLicense API.
type UploadLicenseOptions struct {
License string `url:"license"`
}
// LicenseCheck is a struct to hold the response from the LicenseStatus API.
type LicenseCheck struct {
Status *string `json:"status,omitempty"`
}
// ConfigSettings is a struct to hold the response from the Settings API.
// There are many fields that link to other structs.
type ConfigSettings struct {
PrivateMode *bool `json:"private_mode,omitempty"`
PublicPages *bool `json:"public_pages,omitempty"`
SubdomainIsolation *bool `json:"subdomain_isolation,omitempty"`
SignupEnabled *bool `json:"signup_enabled,omitempty"`
GithubHostname *string `json:"github_hostname,omitempty"`
IdenticonsHost *string `json:"identicons_host,omitempty"`
HTTPProxy *string `json:"http_proxy,omitempty"`
AuthMode *string `json:"auth_mode,omitempty"`
ExpireSessions *bool `json:"expire_sessions,omitempty"`
AdminPassword *string `json:"admin_password,omitempty"`
ConfigurationID *int64 `json:"configuration_id,omitempty"`
ConfigurationRunCount *int `json:"configuration_run_count,omitempty"`
Avatar *ConfigSettingsAvatar `json:"avatar,omitempty"`
Customer *ConfigSettingsCustomer `json:"customer,omitempty"`
License *ConfigSettingsLicenseSettings `json:"license,omitempty"`
GithubSSL *ConfigSettingsGithubSSL `json:"github_ssl,omitempty"`
LDAP *ConfigSettingsLDAP `json:"ldap,omitempty"`
CAS *ConfigSettingsCAS `json:"cas,omitempty"`
SAML *ConfigSettingsSAML `json:"saml,omitempty"`
GithubOAuth *ConfigSettingsGithubOAuth `json:"github_oauth,omitempty"`
SMTP *ConfigSettingsSMTP `json:"smtp,omitempty"`
NTP *ConfigSettingsNTP `json:"ntp,omitempty"`
Timezone *string `json:"timezone,omitempty"`
SNMP *ConfigSettingsSNMP `json:"snmp,omitempty"`
Syslog *ConfigSettingsSyslog `json:"syslog,omitempty"`
Assets *string `json:"assets,omitempty"`
Pages *ConfigSettingsPagesSettings `json:"pages,omitempty"`
Collectd *ConfigSettingsCollectd `json:"collectd,omitempty"`
Mapping *ConfigSettingsMapping `json:"mapping,omitempty"`
LoadBalancer *string `json:"load_balancer,omitempty"`
}
// ConfigSettingsAvatar is a struct to hold the response from the Settings API.
type ConfigSettingsAvatar struct {
Enabled *bool `json:"enabled,omitempty"`
URI *string `json:"uri,omitempty"`
}
// ConfigSettingsCustomer is a struct to hold the response from the Settings API.
type ConfigSettingsCustomer struct {
Name *string `json:"name,omitempty"`
Email *string `json:"email,omitempty"`
UUID *string `json:"uuid,omitempty"`
Secret *string `json:"secret,omitempty"`
PublicKeyData *string `json:"public_key_data,omitempty"`
}
// ConfigSettingsLicenseSettings is a struct to hold the response from the Settings API.
type ConfigSettingsLicenseSettings struct {
Seats *int `json:"seats,omitempty"`
Evaluation *bool `json:"evaluation,omitempty"`
Perpetual *bool `json:"perpetual,omitempty"`
UnlimitedSeating *bool `json:"unlimited_seating,omitempty"`
SupportKey *string `json:"support_key,omitempty"`
SSHAllowed *bool `json:"ssh_allowed,omitempty"`
ClusterSupport *bool `json:"cluster_support,omitempty"`
ExpireAt *Timestamp `json:"expire_at,omitempty"`
}
// ConfigSettingsGithubSSL is a struct to hold the response from the Settings API.
type ConfigSettingsGithubSSL struct {
Enabled *bool `json:"enabled,omitempty"`
Cert *string `json:"cert,omitempty"`
Key *string `json:"key,omitempty"`
}
// ConfigSettingsLDAP is a struct to hold the response from the Settings API.
type ConfigSettingsLDAP struct {
Host *string `json:"host,omitempty"`
Port *int `json:"port,omitempty"`
Base []string `json:"base,omitempty"`
UID *string `json:"uid,omitempty"`
BindDN *string `json:"bind_dn,omitempty"`
Password *string `json:"password,omitempty"`
Method *string `json:"method,omitempty"`
SearchStrategy *string `json:"search_strategy,omitempty"`
UserGroups []string `json:"user_groups,omitempty"`
AdminGroup *string `json:"admin_group,omitempty"`
VirtualAttributeEnabled *bool `json:"virtual_attribute_enabled,omitempty"`
RecursiveGroupSearch *bool `json:"recursive_group_search,omitempty"`
PosixSupport *bool `json:"posix_support,omitempty"`
UserSyncEmails *bool `json:"user_sync_emails,omitempty"`
UserSyncKeys *bool `json:"user_sync_keys,omitempty"`
UserSyncInterval *int `json:"user_sync_interval,omitempty"`
TeamSyncInterval *int `json:"team_sync_interval,omitempty"`
SyncEnabled *bool `json:"sync_enabled,omitempty"`
Reconciliation *ConfigSettingsLDAPReconciliation `json:"reconciliation,omitempty"`
Profile *ConfigSettingsLDAPProfile `json:"profile,omitempty"`
}
// ConfigSettingsLDAPReconciliation is part of the ConfigSettingsLDAP struct.
type ConfigSettingsLDAPReconciliation struct {
User *string `json:"user,omitempty"`
Org *string `json:"org,omitempty"`
}
// ConfigSettingsLDAPProfile is part of the ConfigSettingsLDAP struct.
type ConfigSettingsLDAPProfile struct {
UID *string `json:"uid,omitempty"`
Name *string `json:"name,omitempty"`
Mail *string `json:"mail,omitempty"`
Key *string `json:"key,omitempty"`
}
// ConfigSettingsCAS is a struct to hold the response from the Settings API.
type ConfigSettingsCAS struct {
URL *string `json:"url,omitempty"`
}
// ConfigSettingsSAML is a struct to hold the response from the Settings API.
type ConfigSettingsSAML struct {
SSOURL *string `json:"sso_url,omitempty"`
Certificate *string `json:"certificate,omitempty"`
CertificatePath *string `json:"certificate_path,omitempty"`
Issuer *string `json:"issuer,omitempty"`
IDPInitiatedSSO *bool `json:"idp_initiated_sso,omitempty"`
DisableAdminDemote *bool `json:"disable_admin_demote,omitempty"`
}
// ConfigSettingsGithubOAuth is a struct to hold the response from the Settings API.
type ConfigSettingsGithubOAuth struct {
ClientID *string `json:"client_id,omitempty"`
ClientSecret *string `json:"client_secret,omitempty"`
OrganizationName *string `json:"organization_name,omitempty"`
OrganizationTeam *string `json:"organization_team,omitempty"`
}
// ConfigSettingsSMTP is a struct to hold the response from the Settings API.
type ConfigSettingsSMTP struct {
Enabled *bool `json:"enabled,omitempty"`
Address *string `json:"address,omitempty"`
Authentication *string `json:"authentication,omitempty"`
Port *string `json:"port,omitempty"`
Domain *string `json:"domain,omitempty"`
Username *string `json:"username,omitempty"`
UserName *string `json:"user_name,omitempty"`
EnableStarttlsAuto *bool `json:"enable_starttls_auto,omitempty"`
Password *string `json:"password,omitempty"`
DiscardToNoreplyAddress *bool `json:"discard-to-noreply-address,omitempty"`
SupportAddress *string `json:"support_address,omitempty"`
SupportAddressType *string `json:"support_address_type,omitempty"`
NoreplyAddress *string `json:"noreply_address,omitempty"`
}
// ConfigSettingsNTP is a struct to hold the response from the Settings API.
type ConfigSettingsNTP struct {
PrimaryServer *string `json:"primary_server,omitempty"`
SecondaryServer *string `json:"secondary_server,omitempty"`
}
// ConfigSettingsSNMP is a struct to hold the response from the Settings API.
type ConfigSettingsSNMP struct {
Enabled *bool `json:"enabled,omitempty"`
Community *string `json:"community,omitempty"`
}
// ConfigSettingsSyslog is a struct to hold the response from the Settings API.
type ConfigSettingsSyslog struct {
Enabled *bool `json:"enabled,omitempty"`
Server *string `json:"server,omitempty"`
ProtocolName *string `json:"protocol_name,omitempty"`
}
// ConfigSettingsPagesSettings is a struct to hold the response from the Settings API.
type ConfigSettingsPagesSettings struct {
Enabled *bool `json:"enabled,omitempty"`
}
// ConfigSettingsCollectd is a struct to hold the response from the Settings API.
type ConfigSettingsCollectd struct {
Enabled *bool `json:"enabled,omitempty"`
Server *string `json:"server,omitempty"`
Port *int `json:"port,omitempty"`
Encryption *string `json:"encryption,omitempty"`
Username *string `json:"username,omitempty"`
Password *string `json:"password,omitempty"`
}
// ConfigSettingsMapping is a struct to hold the response from the Settings API.
type ConfigSettingsMapping struct {
Enabled *bool `json:"enabled,omitempty"`
Tileserver *string `json:"tileserver,omitempty"`
Basemap *string `json:"basemap,omitempty"`
Token *string `json:"token,omitempty"`
}
// NodeMetadataStatus is a struct to hold the response from the NodeMetadata API.
type NodeMetadataStatus struct {
Topology *string `json:"topology,omitempty"`
Nodes []*NodeDetails `json:"nodes"`
}
// NodeDetails is a struct to hold the response from the NodeMetadata API.
type NodeDetails struct {
Hostname *string `json:"hostname,omitempty"`
UUID *string `json:"uuid,omitempty"`
ClusterRoles []string `json:"cluster_roles,omitempty"`
}
// ConfigApplyEvents gets events from the command ghe-config-apply.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#list-events-from-ghe-config-apply
//
//meta:operation GET /manage/v1/config/apply/events
func (s *EnterpriseService) ConfigApplyEvents(ctx context.Context, opts *ConfigApplyEventsOptions) (*ConfigApplyEvents, *Response, error) {
u, err := addOptions("manage/v1/config/apply/events", opts)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
configApplyEvents := new(ConfigApplyEvents)
resp, err := s.client.Do(ctx, req, configApplyEvents)
if err != nil {
return nil, resp, err
}
return configApplyEvents, resp, nil
}
// InitialConfig initializes the GitHub Enterprise instance with a license and password.
// After initializing the instance, you need to run an apply to apply the configuration.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#initialize-instance-configuration-with-license-and-password
//
//meta:operation POST /manage/v1/config/init
func (s *EnterpriseService) InitialConfig(ctx context.Context, license, password string) (*Response, error) {
u := "manage/v1/config/init"
opts := &InitialConfigOptions{
License: license,
Password: password,
}
req, err := s.client.NewRequest("POST", u, opts)
if err != nil {
return nil, err
}
return s.client.Do(ctx, req, nil)
}
// License gets the current license information for the GitHub Enterprise instance.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#get-the-enterprise-license-information
//
//meta:operation GET /manage/v1/config/license
func (s *EnterpriseService) License(ctx context.Context) ([]*LicenseStatus, *Response, error) {
u := "manage/v1/config/license"
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var licenseStatus []*LicenseStatus
resp, err := s.client.Do(ctx, req, &licenseStatus)
if err != nil {
return nil, resp, err
}
return licenseStatus, resp, nil
}
// UploadLicense uploads a new license to the GitHub Enterprise instance.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#upload-an-enterprise-license
//
//meta:operation PUT /manage/v1/config/license
func (s *EnterpriseService) UploadLicense(ctx context.Context, license string) (*Response, error) {
u := "manage/v1/config/license"
opts := &UploadLicenseOptions{
License: license,
}
req, err := s.client.NewRequest("PUT", u, opts)
if err != nil {
return nil, err
}
return s.client.Do(ctx, req, nil)
}
// LicenseStatus gets the current license status for the GitHub Enterprise instance.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#check-a-license
//
//meta:operation GET /manage/v1/config/license/check
func (s *EnterpriseService) LicenseStatus(ctx context.Context) ([]*LicenseCheck, *Response, error) {
u := "manage/v1/config/license/check"
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var checks []*LicenseCheck
resp, err := s.client.Do(ctx, req, &checks)
if err != nil {
return nil, resp, err
}
return checks, resp, nil
}
// NodeMetadata gets the metadata for all nodes in the GitHub Enterprise instance.
// This is required for clustered setups.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#get-ghes-node-metadata-for-all-nodes
//
//meta:operation GET /manage/v1/config/nodes
func (s *EnterpriseService) NodeMetadata(ctx context.Context, opts *NodeQueryOptions) (*NodeMetadataStatus, *Response, error) {
u, err := addOptions("manage/v1/config/nodes", opts)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
status := new(NodeMetadataStatus)
resp, err := s.client.Do(ctx, req, status)
if err != nil {
return nil, resp, err
}
return status, resp, nil
}
// Settings gets the current configuration settings for the GitHub Enterprise instance.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#get-the-ghes-settings
//
//meta:operation GET /manage/v1/config/settings
func (s *EnterpriseService) Settings(ctx context.Context) (*ConfigSettings, *Response, error) {
u := "manage/v1/config/settings"
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
configSettings := new(ConfigSettings)
resp, err := s.client.Do(ctx, req, configSettings)
if err != nil {
return nil, resp, err
}
return configSettings, resp, nil
}
// UpdateSettings updates the configuration settings for the GitHub Enterprise instance.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#set-settings
//
//meta:operation PUT /manage/v1/config/settings
func (s *EnterpriseService) UpdateSettings(ctx context.Context, opts *ConfigSettings) (*Response, error) {
u := "manage/v1/config/settings"
if opts == nil {
return nil, errors.New("opts should not be nil")
}
req, err := s.client.NewRequest("PUT", u, opts)
if err != nil {
return nil, err
}
return s.client.Do(ctx, req, nil)
}
// ConfigApply triggers a configuration apply run on the GitHub Enterprise instance.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#trigger-a-ghe-config-apply-run
//
//meta:operation POST /manage/v1/config/apply
func (s *EnterpriseService) ConfigApply(ctx context.Context, opts *ConfigApplyOptions) (*ConfigApplyOptions, *Response, error) {
u := "manage/v1/config/apply"
req, err := s.client.NewRequest("POST", u, opts)
if err != nil {
return nil, nil, err
}
configApplyOptions := new(ConfigApplyOptions)
resp, err := s.client.Do(ctx, req, configApplyOptions)
if err != nil {
return nil, resp, err
}
return configApplyOptions, resp, nil
}
// ConfigApplyStatus gets the status of a ghe-config-apply run on the GitHub Enterprise instance.
// You can request lat one or specific id one.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#get-the-status-of-a-ghe-config-apply-run
//
//meta:operation GET /manage/v1/config/apply
func (s *EnterpriseService) ConfigApplyStatus(ctx context.Context, opts *ConfigApplyOptions) (*ConfigApplyStatus, *Response, error) {
u := "manage/v1/config/apply"
req, err := s.client.NewRequest("GET", u, opts)
if err != nil {
return nil, nil, err
}
status := new(ConfigApplyStatus)
resp, err := s.client.Do(ctx, req, status)
if err != nil {
return nil, resp, err
}
return status, resp, nil
}

View file

@ -0,0 +1,94 @@
// Copyright 2025 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package github
import (
"context"
)
// MaintenanceOperationStatus represents the message to be displayed when the instance gets a maintenance operation request.
type MaintenanceOperationStatus struct {
Hostname *string `json:"hostname,omitempty"`
UUID *string `json:"uuid,omitempty"`
Message *string `json:"message,omitempty"`
}
// MaintenanceStatus represents the status of maintenance mode for all nodes.
type MaintenanceStatus struct {
Hostname *string `json:"hostname,omitempty"`
UUID *string `json:"uuid,omitempty"`
Status *string `json:"status,omitempty"`
ScheduledTime *Timestamp `json:"scheduled_time,omitempty"`
ConnectionServices []*ConnectionServiceItem `json:"connection_services,omitempty"`
CanUnsetMaintenance *bool `json:"can_unset_maintenance,omitempty"`
IPExceptionList []string `json:"ip_exception_list,omitempty"`
MaintenanceModeMessage *string `json:"maintenance_mode_message,omitempty"`
}
// ConnectionServiceItem represents the connection services for the maintenance status.
type ConnectionServiceItem struct {
Name *string `json:"name,omitempty"`
Number *int `json:"number,omitempty"`
}
// MaintenanceOptions represents the options for setting the maintenance mode for the instance.
// When can be a string, so we can't use a Timestamp type.
type MaintenanceOptions struct {
Enabled bool `json:"enabled"`
UUID *string `json:"uuid,omitempty"`
When *string `json:"when,omitempty"`
IPExceptionList []string `json:"ip_exception_list,omitempty"`
MaintenanceModeMessage *string `json:"maintenance_mode_message,omitempty"`
}
// GetMaintenanceStatus gets the status of maintenance mode for all nodes.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#get-the-status-of-maintenance-mode
//
//meta:operation GET /manage/v1/maintenance
func (s *EnterpriseService) GetMaintenanceStatus(ctx context.Context, opts *NodeQueryOptions) ([]*MaintenanceStatus, *Response, error) {
u, err := addOptions("manage/v1/maintenance", opts)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var status []*MaintenanceStatus
resp, err := s.client.Do(ctx, req, &status)
if err != nil {
return nil, resp, err
}
return status, resp, nil
}
// CreateMaintenance sets the maintenance mode for the instance.
// With the enable parameter we can control to put instance into maintenance mode or not. With false we can disable the maintenance mode.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#set-the-status-of-maintenance-mode
//
//meta:operation POST /manage/v1/maintenance
func (s *EnterpriseService) CreateMaintenance(ctx context.Context, enable bool, opts *MaintenanceOptions) ([]*MaintenanceOperationStatus, *Response, error) {
u := "manage/v1/maintenance"
opts.Enabled = enable
req, err := s.client.NewRequest("POST", u, opts)
if err != nil {
return nil, nil, err
}
var i []*MaintenanceOperationStatus
resp, err := s.client.Do(ctx, req, &i)
if err != nil {
return nil, resp, err
}
return i, resp, nil
}

View file

@ -0,0 +1,99 @@
// Copyright 2025 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package github
import (
"context"
)
// SSHKeyStatus represents the status of a SSH key operation.
type SSHKeyStatus struct {
Hostname *string `json:"hostname,omitempty"`
UUID *string `json:"uuid,omitempty"`
Message *string `json:"message,omitempty"`
Modified *bool `json:"modified,omitempty"`
}
// SSHKeyOptions specifies the parameters to the SSH create and delete functions.
type SSHKeyOptions struct {
// Key is the SSH key to add to the instance.
Key string `json:"key"`
}
// ClusterSSHKey represents the SSH keys configured for the instance.
type ClusterSSHKey struct {
Key *string `json:"key,omitempty"`
Fingerprint *string `json:"fingerprint,omitempty"`
}
// DeleteSSHKey deletes the SSH key from the instance.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#delete-a-ssh-key
//
//meta:operation DELETE /manage/v1/access/ssh
func (s *EnterpriseService) DeleteSSHKey(ctx context.Context, key string) ([]*SSHKeyStatus, *Response, error) {
u := "manage/v1/access/ssh"
opts := &SSHKeyOptions{
Key: key,
}
req, err := s.client.NewRequest("DELETE", u, opts)
if err != nil {
return nil, nil, err
}
var sshStatus []*SSHKeyStatus
resp, err := s.client.Do(ctx, req, &sshStatus)
if err != nil {
return nil, resp, err
}
return sshStatus, resp, nil
}
// GetSSHKey gets the SSH keys configured for the instance.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#get-the-configured-ssh-keys
//
//meta:operation GET /manage/v1/access/ssh
func (s *EnterpriseService) GetSSHKey(ctx context.Context) ([]*ClusterSSHKey, *Response, error) {
u := "manage/v1/access/ssh"
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var sshKeys []*ClusterSSHKey
resp, err := s.client.Do(ctx, req, &sshKeys)
if err != nil {
return nil, resp, err
}
return sshKeys, resp, nil
}
// CreateSSHKey adds a new SSH key to the instance.
//
// GitHub API docs: https://docs.github.com/enterprise-server@3.15/rest/enterprise-admin/manage-ghes#set-a-new-ssh-key
//
//meta:operation POST /manage/v1/access/ssh
func (s *EnterpriseService) CreateSSHKey(ctx context.Context, key string) ([]*SSHKeyStatus, *Response, error) {
u := "manage/v1/access/ssh"
opts := &SSHKeyOptions{
Key: key,
}
req, err := s.client.NewRequest("POST", u, opts)
if err != nil {
return nil, nil, err
}
var sshKeyResponse []*SSHKeyStatus
resp, err := s.client.Do(ctx, req, &sshKeyResponse)
if err != nil {
return nil, resp, err
}
return sshKeyResponse, resp, nil
}

View file

@ -0,0 +1,118 @@
// Copyright 2025 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package github
import (
"context"
"fmt"
)
// CreateRepositoryRuleset creates a repository ruleset for the specified enterprise.
//
// GitHub API docs: https://docs.github.com/enterprise-cloud@latest/rest/enterprise-admin/rules#create-an-enterprise-repository-ruleset
//
//meta:operation POST /enterprises/{enterprise}/rulesets
func (s *EnterpriseService) CreateRepositoryRuleset(ctx context.Context, enterprise string, ruleset RepositoryRuleset) (*RepositoryRuleset, *Response, error) {
u := fmt.Sprintf("enterprises/%v/rulesets", enterprise)
req, err := s.client.NewRequest("POST", u, ruleset)
if err != nil {
return nil, nil, err
}
var rs *RepositoryRuleset
resp, err := s.client.Do(ctx, req, &rs)
if err != nil {
return nil, resp, err
}
return rs, resp, nil
}
// GetRepositoryRuleset gets a repository ruleset for the specified enterprise.
//
// GitHub API docs: https://docs.github.com/enterprise-cloud@latest/rest/enterprise-admin/rules#get-an-enterprise-repository-ruleset
//
//meta:operation GET /enterprises/{enterprise}/rulesets/{ruleset_id}
func (s *EnterpriseService) GetRepositoryRuleset(ctx context.Context, enterprise string, rulesetID int64) (*RepositoryRuleset, *Response, error) {
u := fmt.Sprintf("enterprises/%v/rulesets/%v", enterprise, rulesetID)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var ruleset *RepositoryRuleset
resp, err := s.client.Do(ctx, req, &ruleset)
if err != nil {
return nil, resp, err
}
return ruleset, resp, nil
}
// UpdateRepositoryRuleset updates a repository ruleset for the specified enterprise.
//
// GitHub API docs: https://docs.github.com/enterprise-cloud@latest/rest/enterprise-admin/rules#update-an-enterprise-repository-ruleset
//
//meta:operation PUT /enterprises/{enterprise}/rulesets/{ruleset_id}
func (s *EnterpriseService) UpdateRepositoryRuleset(ctx context.Context, enterprise string, rulesetID int64, ruleset RepositoryRuleset) (*RepositoryRuleset, *Response, error) {
u := fmt.Sprintf("enterprises/%v/rulesets/%v", enterprise, rulesetID)
req, err := s.client.NewRequest("PUT", u, ruleset)
if err != nil {
return nil, nil, err
}
var rs *RepositoryRuleset
resp, err := s.client.Do(ctx, req, &rs)
if err != nil {
return nil, resp, err
}
return rs, resp, nil
}
// UpdateRepositoryRulesetClearBypassActor clears the bypass actors for a repository ruleset for the specified enterprise.
//
// This function is necessary as the UpdateRepositoryRuleset function does not marshal ByPassActor if passed as an empty array.
//
// GitHub API docs: https://docs.github.com/enterprise-cloud@latest/rest/enterprise-admin/rules#update-an-enterprise-repository-ruleset
//
//meta:operation PUT /enterprises/{enterprise}/rulesets/{ruleset_id}
func (s *EnterpriseService) UpdateRepositoryRulesetClearBypassActor(ctx context.Context, enterprise string, rulesetID int64) (*Response, error) {
u := fmt.Sprintf("enterprises/%v/rulesets/%v", enterprise, rulesetID)
rsClearBypassActor := rulesetClearBypassActors{}
req, err := s.client.NewRequest("PUT", u, rsClearBypassActor)
if err != nil {
return nil, err
}
resp, err := s.client.Do(ctx, req, nil)
if err != nil {
return resp, err
}
return resp, nil
}
// DeleteRepositoryRuleset deletes a repository ruleset from the specified enterprise.
//
// GitHub API docs: https://docs.github.com/enterprise-cloud@latest/rest/enterprise-admin/rules#delete-an-enterprise-repository-ruleset
//
//meta:operation DELETE /enterprises/{enterprise}/rulesets/{ruleset_id}
func (s *EnterpriseService) DeleteRepositoryRuleset(ctx context.Context, enterprise string, rulesetID int64) (*Response, error) {
u := fmt.Sprintf("enterprises/%v/rulesets/%v", enterprise, rulesetID)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
return s.client.Do(ctx, req, nil)
}

View file

@ -1521,14 +1521,73 @@ type RepositoryImportEvent struct {
//
// GitHub API docs: https://docs.github.com/en/webhooks/webhook-events-and-payloads#repository_ruleset
type RepositoryRulesetEvent struct {
Action *string `json:"action,omitempty"`
Enterprise *Enterprise `json:"enterprise,omitempty"`
Installation *Installation `json:"installation,omitempty"`
Organization *Organization `json:"organization,omitempty"`
Repository *Repository `json:"repository,omitempty"`
RepositoryRuleset *RepositoryRuleset `json:"repository_ruleset"`
Changes *RepositoryRulesetEditedChanges `json:"changes,omitempty"`
Sender *User `json:"sender"`
Action *string `json:"action,omitempty"`
Enterprise *Enterprise `json:"enterprise,omitempty"`
Installation *Installation `json:"installation,omitempty"`
Organization *Organization `json:"organization,omitempty"`
Repository *Repository `json:"repository,omitempty"`
RepositoryRuleset *RepositoryRuleset `json:"repository_ruleset"`
Changes *RepositoryRulesetChanges `json:"changes,omitempty"`
Sender *User `json:"sender"`
}
// RepositoryRulesetChanges represents the changes made to a repository ruleset.
type RepositoryRulesetChanges struct {
Name *RepositoryRulesetChangeSource `json:"name,omitempty"`
Enforcement *RepositoryRulesetChangeSource `json:"enforcement,omitempty"`
Conditions *RepositoryRulesetChangedConditions `json:"conditions,omitempty"`
Rules *RepositoryRulesetChangedRules `json:"rules,omitempty"`
}
// RepositoryRulesetChangeSource represents a source change for the ruleset.
type RepositoryRulesetChangeSource struct {
From *string `json:"from,omitempty"`
}
// RepositoryRulesetChangeSources represents multiple source changes for the ruleset.
type RepositoryRulesetChangeSources struct {
From []string `json:"from,omitempty"`
}
// RepositoryRulesetChangedConditions holds changes to conditions in a ruleset.
type RepositoryRulesetChangedConditions struct {
Added []*RepositoryRulesetConditions `json:"added,omitempty"`
Deleted []*RepositoryRulesetConditions `json:"deleted,omitempty"`
Updated []*RepositoryRulesetUpdatedConditions `json:"updated,omitempty"`
}
// RepositoryRulesetUpdatedConditions represents the edited updates to conditions in a ruleset.
type RepositoryRulesetUpdatedConditions struct {
Condition *RepositoryRulesetConditions `json:"condition,omitempty"`
Changes *RepositoryRulesetUpdatedCondition `json:"changes,omitempty"`
}
// RepositoryRulesetUpdatedCondition represents the changes to a condition in a ruleset.
type RepositoryRulesetUpdatedCondition struct {
ConditionType *RepositoryRulesetChangeSource `json:"condition_type,omitempty"`
Target *RepositoryRulesetChangeSource `json:"target,omitempty"`
Include *RepositoryRulesetChangeSources `json:"include,omitempty"`
Exclude *RepositoryRulesetChangeSources `json:"exclude,omitempty"`
}
// RepositoryRulesetChangedRules holds changes to rules in a ruleset.
type RepositoryRulesetChangedRules struct {
Added []*RepositoryRule `json:"added,omitempty"`
Deleted []*RepositoryRule `json:"deleted,omitempty"`
Updated []*RepositoryRulesetUpdatedRules `json:"updated,omitempty"`
}
// RepositoryRulesetUpdatedRules holds updates to rules in a ruleset.
type RepositoryRulesetUpdatedRules struct {
Rule *RepositoryRule `json:"rule,omitempty"`
Changes *RepositoryRulesetChangedRule `json:"changes,omitempty"`
}
// RepositoryRulesetChangedRule holds changes made to a rule in a ruleset.
type RepositoryRulesetChangedRule struct {
Configuration *RepositoryRulesetChangeSource `json:"configuration,omitempty"`
RuleType *RepositoryRulesetChangeSource `json:"rule_type,omitempty"`
Pattern *RepositoryRulesetChangeSource `json:"pattern,omitempty"`
}
// RepositoryVulnerabilityAlertEvent is triggered when a security alert is created, dismissed, or resolved.

View file

@ -1,6 +1,6 @@
// Copyright 2013 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by BSD-style
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package github

View file

@ -19,6 +19,7 @@ import (
"net/http"
"net/url"
"reflect"
"regexp"
"strconv"
"strings"
"sync"
@ -28,7 +29,7 @@ import (
)
const (
Version = "v68.0.0"
Version = "v69.2.0"
defaultAPIVersion = "2022-11-28"
defaultBaseURL = "https://api.github.com/"
@ -38,7 +39,9 @@ const (
headerAPIVersion = "X-Github-Api-Version"
headerRateLimit = "X-Ratelimit-Limit"
headerRateRemaining = "X-Ratelimit-Remaining"
headerRateUsed = "X-Ratelimit-Used"
headerRateReset = "X-Ratelimit-Reset"
headerRateResource = "X-Ratelimit-Resource"
headerOTP = "X-Github-Otp"
headerRetryAfter = "Retry-After"
@ -155,8 +158,9 @@ var errNonNilContext = errors.New("context must be non-nil")
// A Client manages communication with the GitHub API.
type Client struct {
clientMu sync.Mutex // clientMu protects the client during calls that modify the CheckRedirect func.
client *http.Client // HTTP client used to communicate with the API.
clientMu sync.Mutex // clientMu protects the client during calls that modify the CheckRedirect func.
client *http.Client // HTTP client used to communicate with the API.
clientIgnoreRedirects *http.Client // HTTP client used to communicate with the API on endpoints where we don't want to follow redirects.
// Base URL for API requests. Defaults to the public GitHub API, but can be
// set to a domain endpoint to use with GitHub Enterprise. BaseURL should
@ -173,6 +177,13 @@ type Client struct {
rateLimits [Categories]Rate // Rate limits for the client as determined by the most recent API calls.
secondaryRateLimitReset time.Time // Secondary rate limit reset for the client as determined by the most recent API calls.
// If specified, Client will block requests for at most this duration in case of reaching a secondary
// rate limit
MaxSecondaryRateLimitRetryAfterDuration time.Duration
// Whether to respect rate limit headers on endpoints that return 302 redirections to artifacts
RateLimitRedirectionalEndpoints bool
common service // Reuse a single struct instead of allocating one for each service on the heap.
// Services used for talking to different parts of the GitHub API.
@ -394,6 +405,14 @@ func (c *Client) initialize() {
if c.client == nil {
c.client = &http.Client{}
}
// Copy the main http client into the IgnoreRedirects one, overriding the `CheckRedirect` func
c.clientIgnoreRedirects = &http.Client{}
c.clientIgnoreRedirects.Transport = c.client.Transport
c.clientIgnoreRedirects.Timeout = c.client.Timeout
c.clientIgnoreRedirects.Jar = c.client.Jar
c.clientIgnoreRedirects.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
if c.BaseURL == nil {
c.BaseURL, _ = url.Parse(defaultBaseURL)
}
@ -448,11 +467,12 @@ func (c *Client) copy() *Client {
c.clientMu.Lock()
// can't use *c here because that would copy mutexes by value.
clone := Client{
client: &http.Client{},
UserAgent: c.UserAgent,
BaseURL: c.BaseURL,
UploadURL: c.UploadURL,
secondaryRateLimitReset: c.secondaryRateLimitReset,
client: &http.Client{},
UserAgent: c.UserAgent,
BaseURL: c.BaseURL,
UploadURL: c.UploadURL,
RateLimitRedirectionalEndpoints: c.RateLimitRedirectionalEndpoints,
secondaryRateLimitReset: c.secondaryRateLimitReset,
}
c.clientMu.Unlock()
if c.client != nil {
@ -506,7 +526,7 @@ func WithVersion(version string) RequestOption {
// request body.
func (c *Client) NewRequest(method, urlStr string, body interface{}, opts ...RequestOption) (*http.Request, error) {
if !strings.HasSuffix(c.BaseURL.Path, "/") {
return nil, fmt.Errorf("BaseURL must have a trailing slash, but %q does not", c.BaseURL)
return nil, fmt.Errorf("baseURL must have a trailing slash, but %q does not", c.BaseURL)
}
u, err := c.BaseURL.Parse(urlStr)
@ -552,7 +572,7 @@ func (c *Client) NewRequest(method, urlStr string, body interface{}, opts ...Req
// Body is sent with Content-Type: application/x-www-form-urlencoded.
func (c *Client) NewFormRequest(urlStr string, body io.Reader, opts ...RequestOption) (*http.Request, error) {
if !strings.HasSuffix(c.BaseURL.Path, "/") {
return nil, fmt.Errorf("BaseURL must have a trailing slash, but %q does not", c.BaseURL)
return nil, fmt.Errorf("baseURL must have a trailing slash, but %q does not", c.BaseURL)
}
u, err := c.BaseURL.Parse(urlStr)
@ -584,7 +604,7 @@ func (c *Client) NewFormRequest(urlStr string, body io.Reader, opts ...RequestOp
// Relative URLs should always be specified without a preceding slash.
func (c *Client) NewUploadRequest(urlStr string, reader io.Reader, size int64, mediaType string, opts ...RequestOption) (*http.Request, error) {
if !strings.HasSuffix(c.UploadURL.Path, "/") {
return nil, fmt.Errorf("UploadURL must have a trailing slash, but %q does not", c.UploadURL)
return nil, fmt.Errorf("uploadURL must have a trailing slash, but %q does not", c.UploadURL)
}
u, err := c.UploadURL.Parse(urlStr)
if err != nil {
@ -750,11 +770,17 @@ func parseRate(r *http.Response) Rate {
if remaining := r.Header.Get(headerRateRemaining); remaining != "" {
rate.Remaining, _ = strconv.Atoi(remaining)
}
if used := r.Header.Get(headerRateUsed); used != "" {
rate.Used, _ = strconv.Atoi(used)
}
if reset := r.Header.Get(headerRateReset); reset != "" {
if v, _ := strconv.ParseInt(reset, 10, 64); v != 0 {
rate.Reset = Timestamp{time.Unix(v, 0)}
}
}
if resource := r.Header.Get(headerRateResource); resource != "" {
rate.Resource = resource
}
return rate
}
@ -801,19 +827,23 @@ func parseTokenExpiration(r *http.Response) Timestamp {
type requestContext uint8
const (
bypassRateLimitCheck requestContext = iota
// BypassRateLimitCheck prevents a pre-emptive check for exceeded primary rate limits
// Specify this by providing a context with this key, e.g.
// context.WithValue(context.Background(), github.BypassRateLimitCheck, true)
BypassRateLimitCheck requestContext = iota
SleepUntilPrimaryRateLimitResetWhenRateLimited
)
// BareDo sends an API request and lets you handle the api response. If an error
// or API Error occurs, the error will contain more information. Otherwise you
// are supposed to read and close the response's Body. If rate limit is exceeded
// and reset time is in the future, BareDo returns *RateLimitError immediately
// without making a network API call.
// bareDo sends an API request using `caller` http.Client passed in the parameters
// and lets you handle the api response. If an error or API Error occurs, the error
// will contain more information. Otherwise you are supposed to read and close the
// response's Body. If rate limit is exceeded and reset time is in the future,
// bareDo returns *RateLimitError immediately without making a network API call.
//
// The provided ctx must be non-nil, if it is nil an error is returned. If it is
// canceled or times out, ctx.Err() will be returned.
func (c *Client) BareDo(ctx context.Context, req *http.Request) (*Response, error) {
func (c *Client) bareDo(ctx context.Context, caller *http.Client, req *http.Request) (*Response, error) {
if ctx == nil {
return nil, errNonNilContext
}
@ -822,7 +852,7 @@ func (c *Client) BareDo(ctx context.Context, req *http.Request) (*Response, erro
rateLimitCategory := GetRateLimitCategory(req.Method, req.URL.Path)
if bypass := ctx.Value(bypassRateLimitCheck); bypass == nil {
if bypass := ctx.Value(BypassRateLimitCheck); bypass == nil {
// If we've hit rate limit, don't make further requests before Reset time.
if err := c.checkRateLimitBeforeDo(req, rateLimitCategory); err != nil {
return &Response{
@ -838,7 +868,7 @@ func (c *Client) BareDo(ctx context.Context, req *http.Request) (*Response, erro
}
}
resp, err := c.client.Do(req)
resp, err := caller.Do(req)
var response *Response
if resp != nil {
response = newResponse(resp)
@ -897,12 +927,16 @@ func (c *Client) BareDo(ctx context.Context, req *http.Request) (*Response, erro
return response, err
}
// retry the request once when the rate limit has reset
return c.BareDo(context.WithValue(req.Context(), SleepUntilPrimaryRateLimitResetWhenRateLimited, nil), req)
return c.bareDo(context.WithValue(req.Context(), SleepUntilPrimaryRateLimitResetWhenRateLimited, nil), caller, req)
}
// Update the secondary rate limit if we hit it.
rerr, ok := err.(*AbuseRateLimitError)
if ok && rerr.RetryAfter != nil {
// if a max duration is specified, make sure that we are waiting at most this duration
if c.MaxSecondaryRateLimitRetryAfterDuration > 0 && rerr.GetRetryAfter() > c.MaxSecondaryRateLimitRetryAfterDuration {
rerr.RetryAfter = &c.MaxSecondaryRateLimitRetryAfterDuration
}
c.rateMu.Lock()
c.secondaryRateLimitReset = time.Now().Add(*rerr.RetryAfter)
c.rateMu.Unlock()
@ -911,6 +945,72 @@ func (c *Client) BareDo(ctx context.Context, req *http.Request) (*Response, erro
return response, err
}
// BareDo sends an API request and lets you handle the api response. If an error
// or API Error occurs, the error will contain more information. Otherwise you
// are supposed to read and close the response's Body. If rate limit is exceeded
// and reset time is in the future, BareDo returns *RateLimitError immediately
// without making a network API call.
//
// The provided ctx must be non-nil, if it is nil an error is returned. If it is
// canceled or times out, ctx.Err() will be returned.
func (c *Client) BareDo(ctx context.Context, req *http.Request) (*Response, error) {
return c.bareDo(ctx, c.client, req)
}
// bareDoIgnoreRedirects has the exact same behavior as BareDo but stops at the first
// redirection code returned by the API. If a redirection is returned by the api, bareDoIgnoreRedirects
// returns a *RedirectionError.
//
// The provided ctx must be non-nil, if it is nil an error is returned. If it is
// canceled or times out, ctx.Err() will be returned.
func (c *Client) bareDoIgnoreRedirects(ctx context.Context, req *http.Request) (*Response, error) {
return c.bareDo(ctx, c.clientIgnoreRedirects, req)
}
var errInvalidLocation = errors.New("invalid or empty Location header in redirection response")
// bareDoUntilFound has the exact same behavior as BareDo but only follows 301s, up to maxRedirects times. If it receives
// a 302, it will parse the Location header into a *url.URL and return that.
// This is useful for endpoints that return a 302 in successful cases but still might return 301s for
// permanent redirections.
//
// The provided ctx must be non-nil, if it is nil an error is returned. If it is
// canceled or times out, ctx.Err() will be returned.
func (c *Client) bareDoUntilFound(ctx context.Context, req *http.Request, maxRedirects int) (*url.URL, *Response, error) {
response, err := c.bareDoIgnoreRedirects(ctx, req)
if err != nil {
rerr, ok := err.(*RedirectionError)
if ok {
// If we receive a 302, transform potential relative locations into absolute and return it.
if rerr.StatusCode == http.StatusFound {
if rerr.Location == nil {
return nil, nil, errInvalidLocation
}
newURL := c.BaseURL.ResolveReference(rerr.Location)
return newURL, response, nil
}
// If permanent redirect response is returned, follow it
if maxRedirects > 0 && rerr.StatusCode == http.StatusMovedPermanently {
if rerr.Location == nil {
return nil, nil, errInvalidLocation
}
newURL := c.BaseURL.ResolveReference(rerr.Location)
newRequest := req.Clone(ctx)
newRequest.URL = newURL
return c.bareDoUntilFound(ctx, newRequest, maxRedirects-1)
}
// If we reached the maximum amount of redirections, return an error
if maxRedirects <= 0 && rerr.StatusCode == http.StatusMovedPermanently {
return nil, response, fmt.Errorf("reached the maximum amount of redirections: %w", err)
}
return nil, response, fmt.Errorf("unexpected redirection response: %w", err)
}
}
// If we don't receive a redirection, forward the response and potential error
return nil, response, err
}
// Do sends an API request and returns the API response. The API response is
// JSON decoded and stored in the value pointed to by v, or returned as an
// error if an API error has occurred. If v implements the io.Writer interface,
@ -1034,7 +1134,8 @@ GitHub API docs: https://docs.github.com/rest/#client-errors
type ErrorResponse struct {
Response *http.Response `json:"-"` // HTTP response that caused this error
Message string `json:"message"` // error message
Errors []Error `json:"errors"` // more detail on individual errors
//nolint:sliceofpointers
Errors []Error `json:"errors"` // more detail on individual errors
// Block is only populated on certain types of errors such as code 451.
Block *ErrorBlock `json:"block,omitempty"`
// Most errors will also include a documentation_url field pointing
@ -1196,6 +1297,40 @@ func (r *AbuseRateLimitError) Is(target error) bool {
compareHTTPResponse(r.Response, v.Response)
}
// RedirectionError represents a response that returned a redirect status code:
//
// 301 (Moved Permanently)
// 302 (Found)
// 303 (See Other)
// 307 (Temporary Redirect)
// 308 (Permanent Redirect)
//
// If there was a valid Location header included, it will be parsed to a URL. You should use
// `BaseURL.ResolveReference()` to enrich it with the correct hostname where needed.
type RedirectionError struct {
Response *http.Response // HTTP response that caused this error
StatusCode int
Location *url.URL // location header of the redirection if present
}
func (r *RedirectionError) Error() string {
return fmt.Sprintf("%v %v: %d location %v",
r.Response.Request.Method, sanitizeURL(r.Response.Request.URL),
r.StatusCode, sanitizeURL(r.Location))
}
// Is returns whether the provided error equals this error.
func (r *RedirectionError) Is(target error) bool {
v, ok := target.(*RedirectionError)
if !ok {
return false
}
return r.StatusCode == v.StatusCode &&
(r.Location == v.Location || // either both locations are nil or exactly the same pointer
r.Location != nil && v.Location != nil && r.Location.String() == v.Location.String()) // or they are both not nil and marshaled identically
}
// sanitizeURL redacts the client_secret parameter from the URL which may be
// exposed to the user.
func sanitizeURL(uri *url.URL) *url.URL {
@ -1260,7 +1395,8 @@ func (e *Error) UnmarshalJSON(data []byte) error {
//
// The error type will be *RateLimitError for rate limit exceeded errors,
// *AcceptedError for 202 Accepted status codes,
// and *TwoFactorAuthError for two-factor authentication errors.
// *TwoFactorAuthError for two-factor authentication errors,
// and *RedirectionError for redirect status codes (only happens when ignoring redirections).
func CheckResponse(r *http.Response) error {
if r.StatusCode == http.StatusAccepted {
return &AcceptedError{}
@ -1302,6 +1438,25 @@ func CheckResponse(r *http.Response) error {
abuseRateLimitError.RetryAfter = retryAfter
}
return abuseRateLimitError
// Check that the status code is a redirection and return a sentinel error that can be used to handle special cases
// where 302 is considered a successful result.
// This should never happen with the default `CheckRedirect`, because it would return a `url.Error` that should be handled upstream.
case r.StatusCode == http.StatusMovedPermanently ||
r.StatusCode == http.StatusFound ||
r.StatusCode == http.StatusSeeOther ||
r.StatusCode == http.StatusTemporaryRedirect ||
r.StatusCode == http.StatusPermanentRedirect:
locationStr := r.Header.Get("Location")
var location *url.URL
if locationStr != "" {
location, _ = url.Parse(locationStr)
}
return &RedirectionError{
Response: errorResponse.Response,
StatusCode: r.StatusCode,
Location: location,
}
default:
return errorResponse
}
@ -1616,3 +1771,18 @@ type roundTripperFunc func(*http.Request) (*http.Response, error)
func (fn roundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
return fn(r)
}
var runIDFromURLRE = regexp.MustCompile(`^repos/.*/actions/runs/(\d+)/deployment_protection_rule$`)
// GetRunID is a Helper Function used to extract the workflow RunID from the *DeploymentProtectionRuleEvent.DeploymentCallBackURL.
func (e *DeploymentProtectionRuleEvent) GetRunID() (int64, error) {
match := runIDFromURLRE.FindStringSubmatch(*e.DeploymentCallbackURL)
if len(match) != 2 {
return -1, errors.New("no match")
}
runID, err := strconv.ParseInt(match[1], 10, 64)
if err != nil {
return -1, err
}
return runID, nil
}

Some files were not shown because too many files have changed in this diff Show more