Update dependencies

Signed-off-by: Gabriel Adrian Samfira <gsamfira@cloudbasesolutions.com>
This commit is contained in:
Gabriel Adrian Samfira 2026-02-07 22:13:06 +02:00 committed by Gabriel
parent def4b4aaf1
commit 04c370e9f3
57 changed files with 18874 additions and 1758 deletions

28
go.mod
View file

@ -7,12 +7,12 @@ require (
github.com/bradleyfalzon/ghinstallation/v2 v2.17.0
github.com/cloudbase/garm-provider-common v0.1.8-0.20251001105909-bbcacae60e7c
github.com/felixge/httpsnoop v1.0.4
github.com/gdamore/tcell/v2 v2.13.5
github.com/gdamore/tcell/v2 v2.13.8
github.com/go-openapi/errors v0.22.6
github.com/go-openapi/runtime v0.29.2
github.com/go-openapi/strfmt v0.25.0
github.com/go-openapi/swag v0.25.4
github.com/golang-jwt/jwt/v5 v5.3.0
github.com/golang-jwt/jwt/v5 v5.3.1
github.com/google/go-github/v72 v72.0.0
github.com/google/uuid v1.6.0
github.com/gorilla/handlers v1.5.2
@ -27,11 +27,11 @@ require (
github.com/rivo/tview v0.42.0
github.com/spf13/cobra v1.10.2
github.com/stretchr/testify v1.11.1
golang.org/x/crypto v0.46.0
golang.org/x/mod v0.31.0
golang.org/x/crypto v0.47.0
golang.org/x/mod v0.32.0
golang.org/x/oauth2 v0.34.0
golang.org/x/sync v0.19.0
golang.org/x/term v0.38.0
golang.org/x/term v0.39.0
gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0
gopkg.in/natefinch/lumberjack.v2 v2.2.1
gorm.io/datatypes v1.2.7
@ -48,7 +48,7 @@ require (
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/chzyer/readline v1.5.1 // indirect
github.com/clipperhouse/stringish v0.1.1 // indirect
github.com/clipperhouse/uax29/v2 v2.3.0 // indirect
github.com/clipperhouse/uax29/v2 v2.5.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/gdamore/encoding v1.0.1 // indirect
github.com/go-logr/logr v1.4.3 // indirect
@ -71,7 +71,7 @@ require (
github.com/go-openapi/swag/yamlutils v0.25.4 // indirect
github.com/go-openapi/validate v0.25.1 // indirect
github.com/go-sql-driver/mysql v1.9.3 // indirect
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
github.com/go-viper/mapstructure/v2 v2.5.0 // indirect
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
github.com/google/go-github/v75 v75.0.0 // indirect
github.com/google/go-querystring v1.2.0 // indirect
@ -93,16 +93,16 @@ require (
github.com/spf13/pflag v1.0.10 // indirect
github.com/stretchr/objx v0.5.3 // indirect
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569 // indirect
go.mongodb.org/mongo-driver v1.17.6 // indirect
go.mongodb.org/mongo-driver v1.17.9 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/otel v1.39.0 // indirect
go.opentelemetry.io/otel/metric v1.39.0 // indirect
go.opentelemetry.io/otel/trace v1.39.0 // indirect
go.opentelemetry.io/otel v1.40.0 // indirect
go.opentelemetry.io/otel/metric v1.40.0 // indirect
go.opentelemetry.io/otel/trace v1.40.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/net v0.48.0 // indirect
golang.org/x/sys v0.39.0 // indirect
golang.org/x/text v0.32.0 // indirect
golang.org/x/net v0.49.0 // indirect
golang.org/x/sys v0.40.0 // indirect
golang.org/x/text v0.33.0 // indirect
google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

56
go.sum
View file

@ -19,8 +19,8 @@ github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04=
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs=
github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA=
github.com/clipperhouse/uax29/v2 v2.3.0 h1:SNdx9DVUqMoBuBoW3iLOj4FQv3dN5mDtuqwuhIGpJy4=
github.com/clipperhouse/uax29/v2 v2.3.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=
github.com/clipperhouse/uax29/v2 v2.5.0 h1:x7T0T4eTHDONxFJsL94uKNKPHrclyFI0lm7+w94cO8U=
github.com/clipperhouse/uax29/v2 v2.5.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g=
github.com/cloudbase/garm-provider-common v0.1.8-0.20251001105909-bbcacae60e7c h1:IaIJoyugbSAYRHkiVJaBpibFftsQAi/mle7k11Ze94g=
github.com/cloudbase/garm-provider-common v0.1.8-0.20251001105909-bbcacae60e7c/go.mod h1:2O51WbcfqRx5fDHyyJgIFq7KdTZZnefsM+aoOchyleU=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
@ -33,8 +33,8 @@ github.com/gabriel-samfira/go-sqlite3 v0.0.0-20251005121134-bc61ecf9b4c7 h1:+r9O
github.com/gabriel-samfira/go-sqlite3 v0.0.0-20251005121134-bc61ecf9b4c7/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/gdamore/encoding v1.0.1 h1:YzKZckdBL6jVt2Gc+5p82qhrGiqMdG/eNs6Wy0u3Uhw=
github.com/gdamore/encoding v1.0.1/go.mod h1:0Z0cMFinngz9kS1QfMjCP8TY7em3bZYeeklsSDPivEo=
github.com/gdamore/tcell/v2 v2.13.5 h1:YvWYCSr6gr2Ovs84dXbZLjDuOfQchhj8buOEqY52rpA=
github.com/gdamore/tcell/v2 v2.13.5/go.mod h1:+Wfe208WDdB7INEtCsNrAN6O2m+wsTPk1RAovjaILlo=
github.com/gdamore/tcell/v2 v2.13.8 h1:Mys/Kl5wfC/GcC5Cx4C2BIQH9dbnhnkPgS9/wF3RlfU=
github.com/gdamore/tcell/v2 v2.13.8/go.mod h1:+Wfe208WDdB7INEtCsNrAN6O2m+wsTPk1RAovjaILlo=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
@ -90,12 +90,12 @@ github.com/go-openapi/validate v0.25.1 h1:sSACUI6Jcnbo5IWqbYHgjibrhhmt3vR6lCzKZn
github.com/go-openapi/validate v0.25.1/go.mod h1:RMVyVFYte0gbSTaZ0N4KmTn6u/kClvAFp+mAVfS/DQc=
github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo=
github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/go-viper/mapstructure/v2 v2.5.0 h1:vM5IJoUAy3d7zRSVtIwQgBj7BiWtMPfmPEgAXnvj1Ro=
github.com/go-viper/mapstructure/v2 v2.5.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/golang-jwt/jwt/v5 v5.3.1 h1:kYf81DTWFe7t+1VvL7eS+jKFVWaUnK9cB1qbwn63YCY=
github.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA=
github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A=
@ -194,18 +194,18 @@ github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569 h1:xzABM9let0HLLqFypcxvLmlvEciCHL7+Lv+4vwZqecI=
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569/go.mod h1:2Ly+NIftZN4de9zRmENdYbvPQeaVIYKWpLFStLFEBgI=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss=
go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
go.mongodb.org/mongo-driver v1.17.9 h1:IexDdCuuNJ3BHrELgBlyaH9p60JXAvdzWR128q+U5tU=
go.mongodb.org/mongo-driver v1.17.9/go.mod h1:LlOhpH5NUEfhxcAwG0UEkMqwYcc4JU18gtCdGudk/tQ=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48=
go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8=
go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0=
go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs=
go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=
go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=
go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI=
go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA=
go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=
go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
@ -214,18 +214,18 @@ go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI=
golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg=
golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -242,20 +242,20 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=

View file

@ -1,4 +1,4 @@
An implementation of grapheme cluster boundaries from [Unicode text segmentation](https://unicode.org/reports/tr29/#Grapheme_Cluster_Boundaries) (UAX 29), for Unicode version 15.0.0.
An implementation of grapheme cluster boundaries from [Unicode text segmentation](https://unicode.org/reports/tr29/#Grapheme_Cluster_Boundaries) (UAX 29), for Unicode 17.
[![Documentation](https://pkg.go.dev/badge/github.com/clipperhouse/uax29/v2/graphemes.svg)](https://pkg.go.dev/github.com/clipperhouse/uax29/v2/graphemes)
![Tests](https://github.com/clipperhouse/uax29/actions/workflows/gotest.yml/badge.svg)
@ -26,7 +26,7 @@ _A grapheme is a “single visible character”, which might be a simple as a si
## Conformance
We use the Unicode [test suite](https://unicode.org/reports/tr41/tr41-26.html#Tests29).
We use the Unicode [test suite](https://unicode.org/reports/tr41/tr41-36.html#Tests29).
![Tests](https://github.com/clipperhouse/uax29/actions/workflows/gotest.yml/badge.svg)
![Fuzz](https://github.com/clipperhouse/uax29/actions/workflows/gofuzz.yml/badge.svg)
@ -83,8 +83,8 @@ goos: darwin
goarch: arm64
pkg: github.com/clipperhouse/uax29/graphemes/comparative
cpu: Apple M2
BenchmarkGraphemes/clipperhouse/uax29-8 173805 ns/op 201.16 MB/s 0 B/op 0 allocs/op
BenchmarkGraphemes/rivo/uniseg-8 2045128 ns/op 17.10 MB/s 0 B/op 0 allocs/op
BenchmarkGraphemes/clipperhouse/uax29-8 171895 ns/op 203.39 MB/s 0 B/op 0 allocs/op
BenchmarkGraphemes/rivo/uniseg-8 1980475 ns/op 17.65 MB/s 0 B/op 0 allocs/op
```
### Invalid inputs

View file

@ -1,12 +1,30 @@
package graphemes
import (
"github.com/clipperhouse/stringish"
"github.com/clipperhouse/uax29/v2/internal/iterators"
)
// FromString returns an iterator for the grapheme clusters in the input string.
// Iterate while Next() is true, and access the grapheme via Value().
func FromString(s string) *Iterator[string] {
return &Iterator[string]{
split: splitFuncString,
data: s,
}
}
type Iterator[T stringish.Interface] struct {
*iterators.Iterator[T]
// FromBytes returns an iterator for the grapheme clusters in the input bytes.
// Iterate while Next() is true, and access the grapheme via Value().
func FromBytes(b []byte) *Iterator[[]byte] {
return &Iterator[[]byte]{
split: splitFuncBytes,
data: b,
}
}
// Iterator is a generic iterator for grapheme clusters in strings or byte slices,
// with an ASCII hot path optimization.
type Iterator[T ~string | ~[]byte] struct {
split func(T, bool) (int, T, error)
data T
pos int
start int
}
var (
@ -14,18 +32,79 @@ var (
splitFuncBytes = splitFunc[[]byte]
)
// FromString returns an iterator for the grapheme clusters in the input string.
// Iterate while Next() is true, and access the grapheme via Value().
func FromString(s string) Iterator[string] {
return Iterator[string]{
iterators.New(splitFuncString, s),
// Next advances the iterator to the next grapheme cluster.
// Returns false when there are no more grapheme clusters.
func (iter *Iterator[T]) Next() bool {
if iter.pos >= len(iter.data) {
return false
}
iter.start = iter.pos
// ASCII hot path: if current byte is printable ASCII and
// next byte is also ASCII (or end of data), return single byte
b := iter.data[iter.pos]
if b >= 0x20 && b < 0x7F {
// If next byte is non-ASCII, it could be a combining mark
if iter.pos+1 >= len(iter.data) || iter.data[iter.pos+1] < 0x80 {
iter.pos++
return true
}
}
// Fall back to actual grapheme parsing
remaining := iter.data[iter.pos:]
advance, _, err := iter.split(remaining, true)
if err != nil {
panic(err)
}
if advance <= 0 {
panic("splitFunc returned a zero or negative advance")
}
iter.pos += advance
if iter.pos > len(iter.data) {
panic("splitFunc advanced beyond end of data")
}
return true
}
// FromBytes returns an iterator for the grapheme clusters in the input bytes.
// Iterate while Next() is true, and access the grapheme via Value().
func FromBytes(b []byte) Iterator[[]byte] {
return Iterator[[]byte]{
iterators.New(splitFuncBytes, b),
}
// Value returns the current grapheme cluster.
func (iter *Iterator[T]) Value() T {
return iter.data[iter.start:iter.pos]
}
// Start returns the byte position of the current grapheme in the original data.
func (iter *Iterator[T]) Start() int {
return iter.start
}
// End returns the byte position after the current grapheme in the original data.
func (iter *Iterator[T]) End() int {
return iter.pos
}
// Reset resets the iterator to the beginning of the data.
func (iter *Iterator[T]) Reset() {
iter.start = 0
iter.pos = 0
}
// SetText sets the data for the iterator to operate on, and resets all state.
func (iter *Iterator[T]) SetText(data T) {
iter.data = data
iter.start = 0
iter.pos = 0
}
// First returns the first grapheme cluster without advancing the iterator.
func (iter *Iterator[T]) First() T {
if len(iter.data) == 0 {
return iter.data
}
// Use a copy to leverage Next()'s ASCII optimization
cp := *iter
cp.pos = 0
cp.start = 0
cp.Next()
return cp.Value()
}

View file

@ -13,6 +13,16 @@ func (lookup property) is(properties property) bool {
const _Ignore = _Extend
// incbState tracks state for GB9c rule (Indic conjunct clusters)
// Pattern: Consonant (Extend|Linker)* Linker (Extend|Linker)* × Consonant
type incbState int
const (
incbNone incbState = iota // initial/reset
incbConsonant // seen Consonant, awaiting Linker
incbLinker // seen Consonant and Linker (conjunct ready)
)
// SplitFunc is a bufio.SplitFunc implementation of Unicode grapheme cluster segmentation, for use with bufio.Scanner.
//
// See https://unicode.org/reports/tr29/#Grapheme_Cluster_Boundaries.
@ -30,6 +40,9 @@ func splitFunc[T stringish.Interface](data T, atEOF bool) (advance int, token T,
var lastLastExIgnore property = 0 // "last one before that"
var regionalIndicatorCount int
// GB9c state: tracking Indic conjunct clusters
var incb incbState
// Rules are usually of the form Cat1 × Cat2; "current" refers to the first property
// to the right of the ×, from which we look back or forward
@ -76,6 +89,23 @@ func splitFunc[T stringish.Interface](data T, atEOF bool) (advance int, token T,
lastExIgnore = last
}
// Update GB9c state based on what we just advanced past
if last.is(_InCBConsonant | _InCBLinker | _InCBExtend) {
switch {
case last.is(_InCBConsonant):
if incb != incbLinker {
incb = incbConsonant
}
case last.is(_InCBLinker):
if incb >= incbConsonant {
incb = incbLinker
}
// case last.is(_InCBExtend): stay in current state
}
} else {
incb = incbNone
}
current, w = lookup(data[pos:])
if w == 0 {
if atEOF {
@ -141,11 +171,14 @@ func splitFunc[T stringish.Interface](data T, atEOF bool) (advance int, token T,
}
// https://unicode.org/reports/tr29/#GB9c
// TODO(clipperhouse):
// It appears to be added in Unicode 15.1.0:
// https://unicode.org/versions/Unicode15.1.0/#Migration
// This package currently supports Unicode 15.0.0, so
// out of scope for now
// Do not break within certain combinations with Indic_Conjunct_Break (InCB)=Linker.
if incb == incbLinker && current.is(_InCBConsonant) {
// After matching the pattern, reset state to start tracking a new pattern
// The current Consonant becomes the start of the new pattern
incb = incbConsonant
pos += w
continue
}
// https://unicode.org/reports/tr29/#GB11
if current.is(_ExtendedPictographic) && last.is(_ZWJ) && lastLastExIgnore.is(_ExtendedPictographic) {

File diff suppressed because it is too large Load diff

View file

@ -1,100 +0,0 @@
package iterators
import "github.com/clipperhouse/stringish"
type SplitFunc[T stringish.Interface] func(T, bool) (int, T, error)
// Iterator is a generic iterator for words that are either []byte or string.
// Iterate while Next() is true, and access the word via Value().
type Iterator[T stringish.Interface] struct {
split SplitFunc[T]
data T
start int
pos int
}
// New creates a new Iterator for the given data and SplitFunc.
func New[T stringish.Interface](split SplitFunc[T], data T) *Iterator[T] {
return &Iterator[T]{
split: split,
data: data,
}
}
// SetText sets the text for the iterator to operate on, and resets all state.
func (iter *Iterator[T]) SetText(data T) {
iter.data = data
iter.start = 0
iter.pos = 0
}
// Split sets the SplitFunc for the Iterator.
func (iter *Iterator[T]) Split(split SplitFunc[T]) {
iter.split = split
}
// Next advances the iterator to the next token. It returns false when there
// are no remaining tokens or an error occurred.
func (iter *Iterator[T]) Next() bool {
if iter.pos == len(iter.data) {
return false
}
if iter.pos > len(iter.data) {
panic("SplitFunc advanced beyond the end of the data")
}
iter.start = iter.pos
advance, _, err := iter.split(iter.data[iter.pos:], true)
if err != nil {
panic(err)
}
if advance <= 0 {
panic("SplitFunc returned a zero or negative advance")
}
iter.pos += advance
if iter.pos > len(iter.data) {
panic("SplitFunc advanced beyond the end of the data")
}
return true
}
// Value returns the current token.
func (iter *Iterator[T]) Value() T {
return iter.data[iter.start:iter.pos]
}
// Start returns the byte position of the current token in the original data.
func (iter *Iterator[T]) Start() int {
return iter.start
}
// End returns the byte position after the current token in the original data.
func (iter *Iterator[T]) End() int {
return iter.pos
}
// Reset resets the iterator to the beginning of the data.
func (iter *Iterator[T]) Reset() {
iter.start = 0
iter.pos = 0
}
func (iter *Iterator[T]) First() T {
if len(iter.data) == 0 {
return iter.data
}
advance, _, err := iter.split(iter.data, true)
if err != nil {
panic(err)
}
if advance <= 0 {
panic("SplitFunc returned a zero or negative advance")
}
if advance > len(iter.data) {
panic("SplitFunc advanced beyond the end of the data")
}
return iter.data[:advance]
}

View file

@ -84,6 +84,7 @@ type inputProcessor struct {
evch chan<- Event
rows int // used for clipping mouse coordinates
cols int // used for clipping mouse coordinates
surrogate rune
nested *inputProcessor
}
@ -135,8 +136,9 @@ type csiParamMode struct {
}
type keyMap struct {
Key Key
Mod ModMask
Key Key
Mod ModMask
Rune rune
}
var csiAllKeys = map[csiParamMode]keyMap{
@ -275,41 +277,70 @@ var csiAllKeys = map[csiParamMode]keyMap{
}
// keys reported using Kitty csi-u protocol
var csiUKeys = map[int]Key{
27: KeyESC,
9: KeyTAB,
13: KeyEnter,
127: KeyBS,
57358: KeyCapsLock,
57359: KeyScrollLock,
57360: KeyNumLock,
57361: KeyPrint,
57362: KeyPause,
57363: KeyMenu,
57376: KeyF13,
57377: KeyF14,
57378: KeyF15,
57379: KeyF16,
57380: KeyF17,
57381: KeyF18,
57382: KeyF19,
57383: KeyF20,
57384: KeyF21,
57385: KeyF22,
57386: KeyF23,
57387: KeyF24,
57388: KeyF25,
57389: KeyF26,
57390: KeyF27,
57391: KeyF28,
57392: KeyF29,
57393: KeyF30,
57394: KeyF31,
57395: KeyF32,
57396: KeyF33,
57397: KeyF34,
57398: KeyF35,
// TODO: KP keys
var csiUKeys = map[int]keyMap{
27: {Key: KeyESC},
9: {Key: KeyTAB},
13: {Key: KeyEnter},
127: {Key: KeyBS},
57358: {Key: KeyCapsLock},
57359: {Key: KeyScrollLock},
57360: {Key: KeyNumLock},
57361: {Key: KeyPrint},
57362: {Key: KeyPause},
57363: {Key: KeyMenu},
57376: {Key: KeyF13},
57377: {Key: KeyF14},
57378: {Key: KeyF15},
57379: {Key: KeyF16},
57380: {Key: KeyF17},
57381: {Key: KeyF18},
57382: {Key: KeyF19},
57383: {Key: KeyF20},
57384: {Key: KeyF21},
57385: {Key: KeyF22},
57386: {Key: KeyF23},
57387: {Key: KeyF24},
57388: {Key: KeyF25},
57389: {Key: KeyF26},
57390: {Key: KeyF27},
57391: {Key: KeyF28},
57392: {Key: KeyF29},
57393: {Key: KeyF30},
57394: {Key: KeyF31},
57395: {Key: KeyF32},
57396: {Key: KeyF33},
57397: {Key: KeyF34},
57398: {Key: KeyF35},
57399: {Key: KeyRune, Rune: '0'}, // KP 0
57400: {Key: KeyRune, Rune: '1'}, // KP 1
57401: {Key: KeyRune, Rune: '2'}, // KP 2
57402: {Key: KeyRune, Rune: '3'}, // KP 3
57403: {Key: KeyRune, Rune: '4'}, // KP 4
57404: {Key: KeyRune, Rune: '5'}, // KP 5
57405: {Key: KeyRune, Rune: '6'}, // KP 6
57406: {Key: KeyRune, Rune: '7'}, // KP 7
57407: {Key: KeyRune, Rune: '8'}, // KP 8
57408: {Key: KeyRune, Rune: '9'}, // KP 9
57409: {Key: KeyRune, Rune: '.'}, // KP_DECIMAL
57410: {Key: KeyRune, Rune: '/'}, // KP_DIVIDE
57411: {Key: KeyRune, Rune: '*'}, // KP_MULTIPLY
57412: {Key: KeyRune, Rune: '-'}, // KP_SUBTRACT
57413: {Key: KeyRune, Rune: '+'}, // KP_ADD
57414: {Key: KeyEnter}, // KP_ENTER
57415: {Key: KeyRune, Rune: '='}, // KP_EQUAL
57416: {Key: KeyClear}, // KP_SEPARATOR
57417: {Key: KeyLeft}, // KP_LEFT
57418: {Key: KeyRight}, // KP_RIGHT
57419: {Key: KeyUp}, // KP_UP
57420: {Key: KeyDown}, // KP_DOWN
57421: {Key: KeyPgUp}, // KP_PG_UP
57422: {Key: KeyPgDn}, // KP_PG_DN
57423: {Key: KeyHome}, // KP_HOME
57424: {Key: KeyEnd}, // KP_END
57425: {Key: KeyInsert}, // KP_INSERT
57426: {Key: KeyDelete}, // KP_DELETE
// 57427: {Key: KeyBegin}, // KP_BEGIN
// TODO: Media keys
}
@ -318,8 +349,8 @@ var winKeys = map[int]Key{
0x03: KeyCancel, // vkCancel
0x08: KeyBackspace, // vkBackspace
0x09: KeyTab, // vkTab
0x0c: KeyClear, // vClear
0x0d: KeyEnter, // vkReturn
0x12: KeyClear, // vClear
0x13: KeyPause, // vkPause
0x1b: KeyEscape, // vkEscape
0x21: KeyPgUp, // vkPrior
@ -701,11 +732,22 @@ func (ip *inputProcessor) handleWinKey(P []int) {
} else if chr < ' ' && P[0] >= 0x41 && P[0] <= 0x5a {
key = Key(P[0])
chr = 0
} else if key == 0x11 || key == 0x13 || key == 0x14 {
} else if chr >= 0xD800 && chr <= 0xDBFF {
// high surrogate pair
ip.surrogate = chr
return
} else if chr >= 0xDC00 && chr <= 0xDFFF {
// low surrogate pair
chr = utf16.DecodeRune(ip.surrogate, chr)
} else if P[0] == 0x10 || P[0] == 0x11 || P[0] == 0x12 || P[0] == 0x14 {
// lone modifiers
ip.surrogate = 0
return
}
ip.surrogate = 0
// Modifiers
if P[4]&0x010 != 0 {
mod |= ModShift
@ -791,8 +833,8 @@ func (ip *inputProcessor) handleCsi(mode rune, params []byte, intermediate []byt
key := KeyRune
chr := rune(0)
if k1, ok := csiUKeys[P0]; ok {
key = k1
chr = 0
key = k1.Key
chr = k1.Rune
} else {
chr = rune(P0)
}

View file

@ -622,6 +622,14 @@ func (t *tScreen) drawCell(x, y int) int {
width = 1
str = " "
}
if width > 1 && x+width < t.w {
// Clobber over any content in the next cell.
// This fixes a problem with some terminals where overwriting two
// adjacent single cells with a wide rune would leave an image
// of the second cell. This is a workaround for buggy terminals.
t.writeString(" \b\b")
}
t.writeString(str)
t.cx += width
t.cells.SetDirty(x, y, false)
@ -989,9 +997,14 @@ func (t *tScreen) buildAcsMap() {
}
func (t *tScreen) scanInput(buf *bytes.Buffer) {
// The end of the buffer isn't necessarily the end of the input, because
// large inputs are chunked. Set atEOF to false so the UTF-8 validating decoder
// returns ErrShortSrc instead of ErrInvalidUTF8 for incomplete multi-byte codepoints.
const atEOF = false
for buf.Len() > 0 {
utf := make([]byte, min(8, max(buf.Len()*2, 128)))
nOut, nIn, e := t.decoder.Transform(utf, buf.Bytes(), true)
nOut, nIn, e := t.decoder.Transform(utf, buf.Bytes(), atEOF)
_ = buf.Next(nIn)
t.input.ScanUTF8(utf[:nOut])
if e == transform.ErrShortSrc {

View file

@ -1,4 +1,4 @@
// Copyright 2025 The TCell Authors
// Copyright 2026 The TCell Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use file except in compliance with the License.
@ -72,6 +72,7 @@ type winTty struct {
oomode uint32 // original output mode
oscreen consoleInfo
wg sync.WaitGroup
surrogate rune
sync.Mutex
}
@ -126,7 +127,7 @@ func (w *winTty) Drain() error {
func (w *winTty) getConsoleInput() error {
// cancelFlag comes first as WaitForMultipleObjects returns the lowest index
// in the event that both events are signalled.
// in the event that both events are signaled.
waitObjects := []syscall.Handle{w.cancelFlag, w.in}
// As arrays are contiguous in memory, a pointer to the first object is the
@ -158,19 +159,30 @@ func (w *winTty) getConsoleInput() error {
if rv == 0 {
return er
}
loop:
for i := range nrec {
ir := rec[i]
switch ir.typ {
case keyEvent:
chr := ir.data[10] // we only see ASCII, key down events in VT mode
// because we use win32-input-mode, we will only
// see US-ASCII characters - (Q: will they be
// 16-bit values with possible surrogate pairs?)
select {
case w.buf <- chr:
case <-w.stopQ:
break
// we normally only expect to see ascii, but paste data may come in as UTF-16.
wc := rune(binary.LittleEndian.Uint16(ir.data[10:]))
if wc >= 0xD800 && wc <= 0xDBFF {
// if it was a high surrogate, which happens for pasted UTF-16,
// then save it until we get the low and can decode it.
w.surrogate = wc
continue
} else if wc >= 0xDC00 && wc <= 0xDFFF {
wc = utf16.DecodeRune(w.surrogate, wc)
}
w.surrogate = 0
for _, chr := range []byte(string(wc)) {
// We normally expect only to see ASCII (win32-input-mode),
// but apparently pasted data can arrive in UTF-16 here.
select {
case w.buf <- chr:
case <-w.stopQ:
break loop
}
}
case resizeEvent:

View file

@ -19,3 +19,6 @@ indent_size = 2
[.golangci.yaml]
indent_size = 2
[devenv.yaml]
indent_size = 2

View file

@ -1,4 +1,7 @@
if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then
source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4="
fi
use flake . --impure
#!/usr/bin/env bash
export DIRENV_WARN_TIMEOUT=20s
eval "$(devenv direnvrc)"
use devenv

View file

@ -1,6 +1,10 @@
/.devenv/
/.direnv/
/.pre-commit-config.yaml
/bin/
/build/
/var/
# Devenv
.devenv*
devenv.local.nix
devenv.local.yaml
.direnv
.pre-commit-config.yaml

103
vendor/github.com/go-viper/mapstructure/v2/devenv.lock generated vendored Normal file
View file

@ -0,0 +1,103 @@
{
"nodes": {
"devenv": {
"locked": {
"dir": "src/modules",
"lastModified": 1765288076,
"owner": "cachix",
"repo": "devenv",
"rev": "93c055af1e8fcac49251f1b2e1c57f78620ad351",
"type": "github"
},
"original": {
"dir": "src/modules",
"owner": "cachix",
"repo": "devenv",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1765121682,
"owner": "edolstra",
"repo": "flake-compat",
"rev": "65f23138d8d09a92e30f1e5c87611b23ef451bf3",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"git-hooks": {
"inputs": {
"flake-compat": "flake-compat",
"gitignore": "gitignore",
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1765016596,
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "548fc44fca28a5e81c5d6b846e555e6b9c2a5a3c",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "git-hooks.nix",
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"git-hooks",
"nixpkgs"
]
},
"locked": {
"lastModified": 1762808025,
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "cb5e3fdca1de58ccbc3ef53de65bd372b48f567c",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1764580874,
"owner": "cachix",
"repo": "devenv-nixpkgs",
"rev": "dcf61356c3ab25f1362b4a4428a6d871e84f1d1d",
"type": "github"
},
"original": {
"owner": "cachix",
"ref": "rolling",
"repo": "devenv-nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"devenv": "devenv",
"git-hooks": "git-hooks",
"nixpkgs": "nixpkgs",
"pre-commit-hooks": [
"git-hooks"
]
}
}
},
"root": "root",
"version": 7
}

14
vendor/github.com/go-viper/mapstructure/v2/devenv.nix generated vendored Normal file
View file

@ -0,0 +1,14 @@
{
pkgs,
...
}:
{
languages = {
go.enable = true;
};
packages = with pkgs; [
golangci-lint
];
}

View file

@ -0,0 +1,4 @@
# yaml-language-server: $schema=https://devenv.sh/devenv.schema.json
inputs:
nixpkgs:
url: github:cachix/devenv-nixpkgs/rolling

View file

@ -1,294 +0,0 @@
{
"nodes": {
"cachix": {
"inputs": {
"devenv": [
"devenv"
],
"flake-compat": [
"devenv"
],
"git-hooks": [
"devenv"
],
"nixpkgs": "nixpkgs"
},
"locked": {
"lastModified": 1742042642,
"narHash": "sha256-D0gP8srrX0qj+wNYNPdtVJsQuFzIng3q43thnHXQ/es=",
"owner": "cachix",
"repo": "cachix",
"rev": "a624d3eaf4b1d225f918de8543ed739f2f574203",
"type": "github"
},
"original": {
"owner": "cachix",
"ref": "latest",
"repo": "cachix",
"type": "github"
}
},
"devenv": {
"inputs": {
"cachix": "cachix",
"flake-compat": "flake-compat",
"git-hooks": "git-hooks",
"nix": "nix",
"nixpkgs": "nixpkgs_3"
},
"locked": {
"lastModified": 1744876578,
"narHash": "sha256-8MTBj2REB8t29sIBLpxbR0+AEGJ7f+RkzZPAGsFd40c=",
"owner": "cachix",
"repo": "devenv",
"rev": "7ff7c351bba20d0615be25ecdcbcf79b57b85fe1",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "devenv",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1733328505,
"narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-parts": {
"inputs": {
"nixpkgs-lib": [
"devenv",
"nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1712014858,
"narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "9126214d0a59633752a136528f5f3b9aa8565b7d",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-parts_2": {
"inputs": {
"nixpkgs-lib": "nixpkgs-lib"
},
"locked": {
"lastModified": 1743550720,
"narHash": "sha256-hIshGgKZCgWh6AYJpJmRgFdR3WUbkY04o82X05xqQiY=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "c621e8422220273271f52058f618c94e405bb0f5",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"git-hooks": {
"inputs": {
"flake-compat": [
"devenv"
],
"gitignore": "gitignore",
"nixpkgs": [
"devenv",
"nixpkgs"
]
},
"locked": {
"lastModified": 1742649964,
"narHash": "sha256-DwOTp7nvfi8mRfuL1escHDXabVXFGT1VlPD1JHrtrco=",
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "dcf5072734cb576d2b0c59b2ac44f5050b5eac82",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "git-hooks.nix",
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"devenv",
"git-hooks",
"nixpkgs"
]
},
"locked": {
"lastModified": 1709087332,
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"libgit2": {
"flake": false,
"locked": {
"lastModified": 1697646580,
"narHash": "sha256-oX4Z3S9WtJlwvj0uH9HlYcWv+x1hqp8mhXl7HsLu2f0=",
"owner": "libgit2",
"repo": "libgit2",
"rev": "45fd9ed7ae1a9b74b957ef4f337bc3c8b3df01b5",
"type": "github"
},
"original": {
"owner": "libgit2",
"repo": "libgit2",
"type": "github"
}
},
"nix": {
"inputs": {
"flake-compat": [
"devenv"
],
"flake-parts": "flake-parts",
"libgit2": "libgit2",
"nixpkgs": "nixpkgs_2",
"nixpkgs-23-11": [
"devenv"
],
"nixpkgs-regression": [
"devenv"
],
"pre-commit-hooks": [
"devenv"
]
},
"locked": {
"lastModified": 1741798497,
"narHash": "sha256-E3j+3MoY8Y96mG1dUIiLFm2tZmNbRvSiyN7CrSKuAVg=",
"owner": "domenkozar",
"repo": "nix",
"rev": "f3f44b2baaf6c4c6e179de8cbb1cc6db031083cd",
"type": "github"
},
"original": {
"owner": "domenkozar",
"ref": "devenv-2.24",
"repo": "nix",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1733212471,
"narHash": "sha256-M1+uCoV5igihRfcUKrr1riygbe73/dzNnzPsmaLCmpo=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "55d15ad12a74eb7d4646254e13638ad0c4128776",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-lib": {
"locked": {
"lastModified": 1743296961,
"narHash": "sha256-b1EdN3cULCqtorQ4QeWgLMrd5ZGOjLSLemfa00heasc=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "e4822aea2a6d1cdd36653c134cacfd64c97ff4fa",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nixpkgs.lib",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1717432640,
"narHash": "sha256-+f9c4/ZX5MWDOuB1rKoWj+lBNm0z0rs4CK47HBLxy1o=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "88269ab3044128b7c2f4c7d68448b2fb50456870",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "release-24.05",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_3": {
"locked": {
"lastModified": 1733477122,
"narHash": "sha256-qamMCz5mNpQmgBwc8SB5tVMlD5sbwVIToVZtSxMph9s=",
"owner": "cachix",
"repo": "devenv-nixpkgs",
"rev": "7bd9e84d0452f6d2e63b6e6da29fe73fac951857",
"type": "github"
},
"original": {
"owner": "cachix",
"ref": "rolling",
"repo": "devenv-nixpkgs",
"type": "github"
}
},
"nixpkgs_4": {
"locked": {
"lastModified": 1744536153,
"narHash": "sha256-awS2zRgF4uTwrOKwwiJcByDzDOdo3Q1rPZbiHQg/N38=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "18dd725c29603f582cf1900e0d25f9f1063dbf11",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"devenv": "devenv",
"flake-parts": "flake-parts_2",
"nixpkgs": "nixpkgs_4"
}
}
},
"root": "root",
"version": 7
}

View file

@ -1,46 +0,0 @@
{
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
flake-parts.url = "github:hercules-ci/flake-parts";
devenv.url = "github:cachix/devenv";
};
outputs =
inputs@{ flake-parts, ... }:
flake-parts.lib.mkFlake { inherit inputs; } {
imports = [
inputs.devenv.flakeModule
];
systems = [
"x86_64-linux"
"x86_64-darwin"
"aarch64-darwin"
];
perSystem =
{ pkgs, ... }:
rec {
devenv.shells = {
default = {
languages = {
go.enable = true;
};
pre-commit.hooks = {
nixpkgs-fmt.enable = true;
};
packages = with pkgs; [
golangci-lint
];
# https://github.com/cachix/devenv/issues/528#issuecomment-1556108767
containers = pkgs.lib.mkForce { };
};
ci = devenv.shells.default;
};
};
};
}

View file

@ -173,6 +173,25 @@
// Public: "I made it through!"
// }
//
// # Custom Decoding with Unmarshaler
//
// Types can implement the Unmarshaler interface to control their own decoding. The interface
// behaves similarly to how UnmarshalJSON does in the standard library. It can be used as an
// alternative or companion to a DecodeHook.
//
// type TrimmedString string
//
// func (t *TrimmedString) UnmarshalMapstructure(input any) error {
// str, ok := input.(string)
// if !ok {
// return fmt.Errorf("expected string, got %T", input)
// }
// *t = TrimmedString(strings.TrimSpace(str))
// return nil
// }
//
// See the Unmarshaler interface documentation for more details.
//
// # Other Configuration
//
// mapstructure is highly configurable. See the DecoderConfig struct
@ -218,6 +237,17 @@ type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, any) (any, error)
// values.
type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (any, error)
// Unmarshaler is the interface implemented by types that can unmarshal
// themselves. UnmarshalMapstructure receives the input data (potentially
// transformed by DecodeHook) and should populate the receiver with the
// decoded values.
//
// The Unmarshaler interface takes precedence over the default decoding
// logic for any type (structs, slices, maps, primitives, etc.).
type Unmarshaler interface {
UnmarshalMapstructure(any) error
}
// DecoderConfig is the configuration that is used to create a new decoder
// and allows customization of various aspects of decoding.
type DecoderConfig struct {
@ -281,6 +311,13 @@ type DecoderConfig struct {
// }
Squash bool
// Deep will map structures in slices instead of copying them
//
// type Parent struct {
// Children []Child `mapstructure:",deep"`
// }
Deep bool
// Metadata is the struct that will contain extra metadata about
// the decoding. If this is nil, then no metadata will be tracked.
Metadata *Metadata
@ -290,9 +327,15 @@ type DecoderConfig struct {
Result any
// The tag name that mapstructure reads for field names. This
// defaults to "mapstructure"
// defaults to "mapstructure". Multiple tag names can be specified
// as a comma-separated list (e.g., "yaml,json"), and the first
// matching non-empty tag will be used.
TagName string
// RootName specifies the name to use for the root element in error messages. For example:
// '<rootName>' has unset fields: <fieldName>
RootName string
// The option of the value in the tag that indicates a field should
// be squashed. This defaults to "squash".
SquashTagOption string
@ -304,11 +347,34 @@ type DecoderConfig struct {
// MatchName is the function used to match the map key to the struct
// field name or tag. Defaults to `strings.EqualFold`. This can be used
// to implement case-sensitive tag values, support snake casing, etc.
//
// MatchName is used as a fallback comparison when the direct key lookup fails.
// See also MapFieldName for transforming field names before lookup.
MatchName func(mapKey, fieldName string) bool
// DecodeNil, if set to true, will cause the DecodeHook (if present) to run
// even if the input is nil. This can be used to provide default values.
DecodeNil bool
// MapFieldName is the function used to convert the struct field name to the map's key name.
//
// This is useful for automatically converting between naming conventions without
// explicitly tagging each field. For example, to convert Go's PascalCase field names
// to snake_case map keys:
//
// MapFieldName: func(s string) string {
// return strcase.ToSnake(s)
// }
//
// When decoding from a map to a struct, the transformed field name is used for
// the initial lookup. If not found, MatchName is used as a fallback comparison.
// Explicit struct tags always take precedence over MapFieldName.
MapFieldName func(string) string
// DisableUnmarshaler, if set to true, disables the use of the Unmarshaler
// interface. Types implementing Unmarshaler will be decoded using the
// standard struct decoding logic instead.
DisableUnmarshaler bool
}
// A Decoder takes a raw interface value and turns it into structured
@ -445,6 +511,12 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) {
config.MatchName = strings.EqualFold
}
if config.MapFieldName == nil {
config.MapFieldName = func(s string) string {
return s
}
}
result := &Decoder{
config: config,
}
@ -458,7 +530,7 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) {
// Decode decodes the given raw interface to the target pointer specified
// by the configuration.
func (d *Decoder) Decode(input any) error {
err := d.decode("", input, reflect.ValueOf(d.config.Result).Elem())
err := d.decode(d.config.RootName, input, reflect.ValueOf(d.config.Result).Elem())
// Retain some of the original behavior when multiple errors ocurr
var joinedErr interface{ Unwrap() []error }
@ -540,36 +612,50 @@ func (d *Decoder) decode(name string, input any, outVal reflect.Value) error {
var err error
addMetaKey := true
switch outputKind {
case reflect.Bool:
err = d.decodeBool(name, input, outVal)
case reflect.Interface:
err = d.decodeBasic(name, input, outVal)
case reflect.String:
err = d.decodeString(name, input, outVal)
case reflect.Int:
err = d.decodeInt(name, input, outVal)
case reflect.Uint:
err = d.decodeUint(name, input, outVal)
case reflect.Float32:
err = d.decodeFloat(name, input, outVal)
case reflect.Complex64:
err = d.decodeComplex(name, input, outVal)
case reflect.Struct:
err = d.decodeStruct(name, input, outVal)
case reflect.Map:
err = d.decodeMap(name, input, outVal)
case reflect.Ptr:
addMetaKey, err = d.decodePtr(name, input, outVal)
case reflect.Slice:
err = d.decodeSlice(name, input, outVal)
case reflect.Array:
err = d.decodeArray(name, input, outVal)
case reflect.Func:
err = d.decodeFunc(name, input, outVal)
default:
// If we reached this point then we weren't able to decode it
return newDecodeError(name, fmt.Errorf("unsupported type: %s", outputKind))
// Check if the target implements Unmarshaler and use it if not disabled
unmarshaled := false
if !d.config.DisableUnmarshaler {
if unmarshaler, ok := getUnmarshaler(outVal); ok {
if err = unmarshaler.UnmarshalMapstructure(input); err != nil {
err = newDecodeError(name, err)
}
unmarshaled = true
}
}
if !unmarshaled {
switch outputKind {
case reflect.Bool:
err = d.decodeBool(name, input, outVal)
case reflect.Interface:
err = d.decodeBasic(name, input, outVal)
case reflect.String:
err = d.decodeString(name, input, outVal)
case reflect.Int:
err = d.decodeInt(name, input, outVal)
case reflect.Uint:
err = d.decodeUint(name, input, outVal)
case reflect.Float32:
err = d.decodeFloat(name, input, outVal)
case reflect.Complex64:
err = d.decodeComplex(name, input, outVal)
case reflect.Struct:
err = d.decodeStruct(name, input, outVal)
case reflect.Map:
err = d.decodeMap(name, input, outVal)
case reflect.Ptr:
addMetaKey, err = d.decodePtr(name, input, outVal)
case reflect.Slice:
err = d.decodeSlice(name, input, outVal)
case reflect.Array:
err = d.decodeArray(name, input, outVal)
case reflect.Func:
err = d.decodeFunc(name, input, outVal)
default:
// If we reached this point then we weren't able to decode it
return newDecodeError(name, fmt.Errorf("unsupported type: %s", outputKind))
}
}
// If we reached here, then we successfully decoded SOMETHING, so
@ -668,7 +754,7 @@ func (d *Decoder) decodeString(name string, data any, val reflect.Value) error {
case reflect.Uint8:
var uints []uint8
if dataKind == reflect.Array {
uints = make([]uint8, dataVal.Len(), dataVal.Len())
uints = make([]uint8, dataVal.Len())
for i := range uints {
uints[i] = dataVal.Index(i).Interface().(uint8)
}
@ -1060,8 +1146,8 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
)
}
tagValue := f.Tag.Get(d.config.TagName)
keyName := f.Name
tagValue, _ := getTagValue(f, d.config.TagName)
keyName := d.config.MapFieldName(f.Name)
if tagValue == "" && d.config.IgnoreUntaggedFields {
continue
@ -1070,6 +1156,9 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
// If Squash is set in the config, we squash the field down.
squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous
// If Deep is set in the config, set as default value.
deep := d.config.Deep
v = dereferencePtrToStructIfNeeded(v, d.config.TagName)
// Determine the name of the key in the map
@ -1078,12 +1167,12 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
continue
}
// If "omitempty" is specified in the tag, it ignores empty values.
if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) {
if strings.Contains(tagValue[index+1:], "omitempty") && isEmptyValue(v) {
continue
}
// If "omitzero" is specified in the tag, it ignores zero values.
if strings.Index(tagValue[index+1:], "omitzero") != -1 && v.IsZero() {
if strings.Contains(tagValue[index+1:], "omitzero") && v.IsZero() {
continue
}
@ -1103,7 +1192,7 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
)
}
} else {
if strings.Index(tagValue[index+1:], "remain") != -1 {
if strings.Contains(tagValue[index+1:], "remain") {
if v.Kind() != reflect.Map {
return newDecodeError(
name+"."+f.Name,
@ -1118,6 +1207,9 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
continue
}
}
deep = deep || strings.Contains(tagValue[index+1:], "deep")
if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" {
keyName = keyNameTagValue
}
@ -1164,6 +1256,41 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
valMap.SetMapIndex(reflect.ValueOf(keyName), vMap)
}
case reflect.Slice:
if deep {
var childType reflect.Type
switch v.Type().Elem().Kind() {
case reflect.Struct:
childType = reflect.TypeOf(map[string]any{})
default:
childType = v.Type().Elem()
}
sType := reflect.SliceOf(childType)
addrVal := reflect.New(sType)
vSlice := reflect.MakeSlice(sType, v.Len(), v.Cap())
if v.Len() > 0 {
reflect.Indirect(addrVal).Set(vSlice)
err := d.decode(keyName, v.Interface(), reflect.Indirect(addrVal))
if err != nil {
return err
}
}
vSlice = reflect.Indirect(addrVal)
valMap.SetMapIndex(reflect.ValueOf(keyName), vSlice)
break
}
// When deep mapping is not needed, fallthrough to normal copy
fallthrough
default:
valMap.SetMapIndex(reflect.ValueOf(keyName), v)
}
@ -1471,7 +1598,10 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
remain := false
// We always parse the tags cause we're looking for other tags too
tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
tagParts := getTagParts(fieldType, d.config.TagName)
if len(tagParts) == 0 {
tagParts = []string{""}
}
for _, tag := range tagParts[1:] {
if tag == d.config.SquashTagOption {
squash = true
@ -1492,6 +1622,18 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
if !fieldVal.IsNil() {
structs = append(structs, fieldVal.Elem().Elem())
}
case reflect.Ptr:
if fieldVal.Type().Elem().Kind() == reflect.Struct {
if fieldVal.IsNil() {
fieldVal.Set(reflect.New(fieldVal.Type().Elem()))
}
structs = append(structs, fieldVal.Elem())
} else {
errs = append(errs, newDecodeError(
name+"."+fieldType.Name,
fmt.Errorf("unsupported type for squashed pointer: %s", fieldVal.Type().Elem().Kind()),
))
}
default:
errs = append(errs, newDecodeError(
name+"."+fieldType.Name,
@ -1516,13 +1658,15 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
field, fieldValue := f.field, f.val
fieldName := field.Name
tagValue := field.Tag.Get(d.config.TagName)
tagValue, _ := getTagValue(field, d.config.TagName)
if tagValue == "" && d.config.IgnoreUntaggedFields {
continue
}
tagValue = strings.SplitN(tagValue, ",", 2)[0]
if tagValue != "" {
fieldName = tagValue
} else {
fieldName = d.config.MapFieldName(fieldName)
}
rawMapKey := reflect.ValueOf(fieldName)
@ -1605,8 +1749,14 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
}
sort.Strings(keys)
// Improve error message when name is empty by showing the target struct type
// in the case where it is empty for embedded structs.
errorName := name
if errorName == "" {
errorName = val.Type().String()
}
errs = append(errs, newDecodeError(
name,
errorName,
fmt.Errorf("has invalid keys: %s", strings.Join(keys, ", ")),
))
}
@ -1692,7 +1842,7 @@ func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool,
if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields
return true
}
if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside
if checkMapstructureTags && hasAnyTag(f, tagName) { // check for mapstructure tags inside
return true
}
}
@ -1700,13 +1850,99 @@ func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool,
}
func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value {
if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {
if v.Kind() != reflect.Ptr {
return v
}
deref := v.Elem()
derefT := deref.Type()
if isStructTypeConvertibleToMap(derefT, true, tagName) {
return deref
switch v.Elem().Kind() {
case reflect.Slice:
return v.Elem()
case reflect.Struct:
deref := v.Elem()
derefT := deref.Type()
if isStructTypeConvertibleToMap(derefT, true, tagName) {
return deref
}
return v
default:
return v
}
return v
}
func hasAnyTag(field reflect.StructField, tagName string) bool {
_, ok := getTagValue(field, tagName)
return ok
}
func getTagParts(field reflect.StructField, tagName string) []string {
tagValue, ok := getTagValue(field, tagName)
if !ok {
return nil
}
return strings.Split(tagValue, ",")
}
func getTagValue(field reflect.StructField, tagName string) (string, bool) {
for _, name := range splitTagNames(tagName) {
if tag := field.Tag.Get(name); tag != "" {
return tag, true
}
}
return "", false
}
func splitTagNames(tagName string) []string {
if tagName == "" {
return []string{"mapstructure"}
}
parts := strings.Split(tagName, ",")
result := make([]string, 0, len(parts))
for _, name := range parts {
name = strings.TrimSpace(name)
if name != "" {
result = append(result, name)
}
}
return result
}
// unmarshalerType is cached for performance
var unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
// getUnmarshaler checks if the value implements Unmarshaler and returns
// the Unmarshaler and a boolean indicating if it was found. It handles both
// pointer and value receivers.
func getUnmarshaler(val reflect.Value) (Unmarshaler, bool) {
// Skip invalid or nil values
if !val.IsValid() {
return nil, false
}
switch val.Kind() {
case reflect.Pointer, reflect.Interface:
if val.IsNil() {
return nil, false
}
}
// Check pointer receiver first (most common case)
if val.CanAddr() {
ptrVal := val.Addr()
// Quick check: if no methods, can't implement any interface
if ptrVal.Type().NumMethod() > 0 && ptrVal.Type().Implements(unmarshalerType) {
return ptrVal.Interface().(Unmarshaler), true
}
}
// Check value receiver
// Quick check: if no methods, can't implement any interface
if val.Type().NumMethod() > 0 && val.CanInterface() && val.Type().Implements(unmarshalerType) {
return val.Interface().(Unmarshaler), true
}
return nil, false
}

View file

@ -140,11 +140,12 @@ A common use case would be integrating with different 3rd party signature
providers, like key management services from various cloud providers or Hardware
Security Modules (HSMs) or to implement additional standards.
| Extension | Purpose | Repo |
| --------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------------ |
| GCP | Integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS) | https://github.com/someone1/gcp-jwt-go |
| AWS | Integrates with AWS Key Management Service, KMS | https://github.com/matelang/jwt-go-aws-kms |
| JWKS | Provides support for JWKS ([RFC 7517](https://datatracker.ietf.org/doc/html/rfc7517)) as a `jwt.Keyfunc` | https://github.com/MicahParks/keyfunc |
| Extension | Purpose | Repo |
| --------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------------------- |
| GCP | Integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS) | https://github.com/someone1/gcp-jwt-go |
| AWS | Integrates with AWS Key Management Service, KMS | https://github.com/matelang/jwt-go-aws-kms |
| JWKS | Provides support for JWKS ([RFC 7517](https://datatracker.ietf.org/doc/html/rfc7517)) as a `jwt.Keyfunc` | https://github.com/MicahParks/keyfunc |
| TPM | Integrates with Trusted Platform Module (TPM) | https://github.com/salrashid123/golang-jwt-tpm |
*Disclaimer*: Unless otherwise specified, these integrations are maintained by
third parties and should not be considered as a primary offer by any of the

View file

@ -97,7 +97,7 @@ Backwards compatible API change that was missed in 2.0.0.
There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change.
The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibility has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`.

View file

@ -76,13 +76,6 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf
}
}
// Decode signature
token.Signature, err = p.DecodeSegment(parts[2])
if err != nil {
return token, newError("could not base64 decode signature", ErrTokenMalformed, err)
}
text := strings.Join(parts[0:2], ".")
// Lookup key(s)
if keyFunc == nil {
// keyFunc was not provided. short circuiting validation
@ -94,11 +87,14 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf
return token, newError("error while executing keyfunc", ErrTokenUnverifiable, err)
}
// Join together header and claims in order to verify them with the signature
text := strings.Join(parts[0:2], ".")
switch have := got.(type) {
case VerificationKeySet:
if len(have.Keys) == 0 {
return token, newError("keyfunc returned empty verification key set", ErrTokenUnverifiable)
}
// Iterate through keys and verify signature, skipping the rest when a match is found.
// Return the last error if no match is found.
for _, key := range have.Keys {
@ -131,7 +127,7 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf
return token, nil
}
// ParseUnverified parses the token but doesn't validate the signature.
// ParseUnverified parses the token but does not validate the signature.
//
// WARNING: Don't use this method unless you know what you're doing.
//
@ -146,7 +142,7 @@ func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Toke
token = &Token{Raw: tokenString}
// parse Header
// Parse Header
var headerBytes []byte
if headerBytes, err = p.DecodeSegment(parts[0]); err != nil {
return token, parts, newError("could not base64 decode header", ErrTokenMalformed, err)
@ -155,7 +151,7 @@ func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Toke
return token, parts, newError("could not JSON decode header", ErrTokenMalformed, err)
}
// parse Claims
// Parse Claims
token.Claims = claims
claimBytes, err := p.DecodeSegment(parts[1])
@ -196,6 +192,12 @@ func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Toke
return token, parts, newError("signing method (alg) is unspecified", ErrTokenUnverifiable)
}
// Parse token signature
token.Signature, err = p.DecodeSegment(parts[2])
if err != nil {
return token, parts, newError("could not base64 decode signature", ErrTokenMalformed, err)
}
return token, parts, nil
}
@ -216,7 +218,7 @@ func splitToken(token string) ([]string, bool) {
parts[1] = claims
// One more cut to ensure the signature is the last part of the token and there are no more
// delimiters. This avoids an issue where malicious input could contain additional delimiters
// causing unecessary overhead parsing tokens.
// causing unnecessary overhead parsing tokens.
signature, _, unexpected := strings.Cut(remain, tokenDelimiter)
if unexpected {
return nil, false

View file

@ -3,9 +3,7 @@ package jwt
import "time"
// ParserOption is used to implement functional-style options that modify the
// behavior of the parser. To add new options, just create a function (ideally
// beginning with With or Without) that returns an anonymous function that takes
// a *Parser type as input and manipulates its configuration accordingly.
// behavior of the parser.
type ParserOption func(*Parser)
// WithValidMethods is an option to supply algorithm methods that the parser
@ -66,6 +64,14 @@ func WithExpirationRequired() ParserOption {
}
}
// WithNotBeforeRequired returns the ParserOption to make nbf claim required.
// By default nbf claim is optional.
func WithNotBeforeRequired() ParserOption {
return func(p *Parser) {
p.validator.requireNbf = true
}
}
// WithAudience configures the validator to require any of the specified
// audiences in the `aud` claim. Validation will fail if the audience is not
// listed in the token or the `aud` claim is missing.

View file

@ -32,8 +32,8 @@ type Token struct {
Method SigningMethod // Method is the signing method used or to be used
Header map[string]any // Header is the first segment of the token in decoded form
Claims Claims // Claims is the second segment of the token in decoded form
Signature []byte // Signature is the third segment of the token in decoded form. Populated when you Parse a token
Valid bool // Valid specifies if the token is valid. Populated when you Parse/Verify a token
Signature []byte // Signature is the third segment of the token in decoded form. Populated when you [Parse] or sign a token
Valid bool // Valid specifies if the token is valid. Populated when you [Parse] a token
}
// New creates a new [Token] with the specified signing method and an empty map
@ -71,6 +71,8 @@ func (t *Token) SignedString(key any) (string, error) {
return "", err
}
t.Signature = sig
return sstr + "." + t.EncodeSegment(sig), nil
}

View file

@ -44,6 +44,9 @@ type Validator struct {
// requireExp specifies whether the exp claim is required
requireExp bool
// requireNbf specifies whether the nbf claim is required
requireNbf bool
// verifyIat specifies whether the iat (Issued At) claim will be verified.
// According to https://www.rfc-editor.org/rfc/rfc7519#section-4.1.6 this
// only specifies the age of the token, but no validation check is
@ -111,8 +114,9 @@ func (v *Validator) Validate(claims Claims) error {
}
// We always need to check not-before, but usage of the claim itself is
// OPTIONAL.
if err = v.verifyNotBefore(claims, now, false); err != nil {
// OPTIONAL by default. requireNbf overrides this behavior and makes
// the nbf claim mandatory.
if err = v.verifyNotBefore(claims, now, v.requireNbf); err != nil {
errs = append(errs, err)
}

View file

@ -16,6 +16,7 @@ linters:
- govet
- ineffassign
- misspell
- modernize
- perfsprint
- revive
- staticcheck
@ -111,6 +112,9 @@ linters:
locale: US
ignore-rules:
- cancelled
modernize:
disable:
- omitzero
perfsprint:
int-conversion: true
err-error: true

View file

@ -11,6 +11,43 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
<!-- Released section -->
<!-- Don't change this section unless doing release -->
## [1.40.0/0.62.0/0.16.0] 2026-02-02
### Added
- Add `AlwaysRecord` sampler in `go.opentelemetry.io/otel/sdk/trace`. (#7724)
- Add `Enabled` method to all synchronous instrument interfaces (`Float64Counter`, `Float64UpDownCounter`, `Float64Histogram`, `Float64Gauge`, `Int64Counter`, `Int64UpDownCounter`, `Int64Histogram`, `Int64Gauge`,) in `go.opentelemetry.io/otel/metric`.
This stabilizes the synchronous instrument enabled feature, allowing users to check if an instrument will process measurements before performing computationally expensive operations. (#7763)
- Add `go.opentelemetry.io/otel/semconv/v1.39.0` package.
The package contains semantic conventions from the `v1.39.0` version of the OpenTelemetry Semantic Conventions.
See the [migration documentation](./semconv/v1.39.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.38.0.` (#7783, #7789)
### Changed
- Improve the concurrent performance of `HistogramReservoir` in `go.opentelemetry.io/otel/sdk/metric/exemplar` by 4x. (#7443)
- Improve the concurrent performance of `FixedSizeReservoir` in `go.opentelemetry.io/otel/sdk/metric/exemplar`. (#7447)
- Improve performance of concurrent histogram measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7474)
- Improve performance of concurrent synchronous gauge measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7478)
- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric`. (#7492)
- `Exporter` in `go.opentelemetry.io/otel/exporters/prometheus` ignores metrics with the scope `go.opentelemetry.io/contrib/bridges/prometheus`.
This prevents scrape failures when the Prometheus exporter is misconfigured to get data from the Prometheus bridge. (#7688)
- Improve performance of concurrent exponential histogram measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7702)
- The `rpc.grpc.status_code` attribute in the experimental metrics emitted from `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` is replaced with the `rpc.response.status_code` attribute to align with the semantic conventions. (#7854)
- The `rpc.grpc.status_code` attribute in the experimental metrics emitted from `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` is replaced with the `rpc.response.status_code` attribute to align with the semantic conventions. (#7854)
### Fixed
- Fix bad log message when key-value pairs are dropped because of key duplication in `go.opentelemetry.io/otel/sdk/log`. (#7662)
- Fix `DroppedAttributes` on `Record` in `go.opentelemetry.io/otel/sdk/log` to not count the non-attribute key-value pairs dropped because of key duplication. (#7662)
- Fix `SetAttributes` on `Record` in `go.opentelemetry.io/otel/sdk/log` to not log that attributes are dropped when they are actually not dropped. (#7662)
- Fix missing `request.GetBody` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` to correctly handle HTTP/2 `GOAWAY` frame. (#7794)
- `WithHostID` detector in `go.opentelemetry.io/otel/sdk/resource` to use full path for `ioreg` command on Darwin (macOS). (#7818)
### Deprecated
- Deprecate `go.opentelemetry.io/otel/exporters/zipkin`.
For more information, see the [OTel blog post deprecating the Zipkin exporter](https://opentelemetry.io/blog/2025/deprecating-zipkin-exporters/). (#7670)
## [1.39.0/0.61.0/0.15.0/0.0.14] 2025-12-05
### Added
@ -3498,7 +3535,8 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.39.0...HEAD
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.40.0...HEAD
[1.40.0/0.62.0/0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.40.0
[1.39.0/0.61.0/0.15.0/0.0.14]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.39.0
[1.38.0/0.60.0/0.14.0/0.0.13]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.38.0
[0.59.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/exporters/prometheus/v0.59.1

View file

@ -746,8 +746,8 @@ Encapsulate setup in constructor functions, ensuring clear ownership and scope:
import (
"errors"
semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
"go.opentelemetry.io/otel/semconv/v1.37.0/otelconv"
semconv "go.opentelemetry.io/otel/semconv/v1.39.0"
"go.opentelemetry.io/otel/semconv/v1.39.0/otelconv"
)
type SDKComponent struct {
@ -1039,7 +1039,7 @@ func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan)
All observability metrics should follow the [OpenTelemetry Semantic Conventions for SDK metrics](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/otel/sdk-metrics.md).
Use the metric semantic conventions convenience package [otelconv](./semconv/v1.37.0/otelconv/metric.go).
Use the metric semantic conventions convenience package [otelconv](./semconv/v1.39.0/otelconv/metric.go).
##### Component Identification

View file

@ -13,32 +13,28 @@ import (
// BoolSliceValue converts a bool slice into an array with same elements as slice.
func BoolSliceValue(v []bool) any {
var zero bool
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[bool]())).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
return cp.Interface()
}
// Int64SliceValue converts an int64 slice into an array with same elements as slice.
func Int64SliceValue(v []int64) any {
var zero int64
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[int64]())).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
return cp.Interface()
}
// Float64SliceValue converts a float64 slice into an array with same elements as slice.
func Float64SliceValue(v []float64) any {
var zero float64
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[float64]())).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
return cp.Interface()
}
// StringSliceValue converts a string slice into an array with same elements as slice.
func StringSliceValue(v []string) any {
var zero string
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[string]())).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
return cp.Interface()
}

View file

@ -58,7 +58,7 @@ func isComparable[T comparable](t T) T { return t }
var (
// keyValueType is used in computeDistinctReflect.
keyValueType = reflect.TypeOf(KeyValue{})
keyValueType = reflect.TypeFor[KeyValue]()
// emptyHash is the hash of an empty set.
emptyHash = xxhash.New().Sum64()

View file

@ -66,8 +66,7 @@ func IntValue(v int) Value {
// IntSliceValue creates an INTSLICE Value.
func IntSliceValue(v []int) Value {
var int64Val int64
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val)))
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[int64]()))
for i, val := range v {
cp.Elem().Index(i).SetInt(int64(val))
}

View file

@ -317,7 +317,7 @@ func parseMember(member string) (Member, error) {
keyValue, properties, found := strings.Cut(member, propertyDelimiter)
if found {
// Parse the member properties.
for _, pStr := range strings.Split(properties, propertyDelimiter) {
for pStr := range strings.SplitSeq(properties, propertyDelimiter) {
p, err := parseProperty(pStr)
if err != nil {
return newInvalidMember(), err
@ -480,7 +480,7 @@ func Parse(bStr string) (Baggage, error) {
}
b := make(baggage.List)
for _, memberStr := range strings.Split(bStr, listDelimiter) {
for memberStr := range strings.SplitSeq(bStr, listDelimiter) {
m, err := parseMember(memberStr)
if err != nil {
return Baggage{}, err

View file

@ -1,4 +1,4 @@
# This is a renovate-friendly source of Docker images.
FROM python:3.13.6-slim-bullseye@sha256:e98b521460ee75bca92175c16247bdf7275637a8faaeb2bcfa19d879ae5c4b9a AS python
FROM otel/weaver:v0.19.0@sha256:3d20814cef548f1d31f27f054fb4cd6a05125641a9f7cc29fc7eb234e8052cd9 AS weaver
FROM otel/weaver:v0.20.0@sha256:fa4f1c6954ecea78ab1a4e865bd6f5b4aaba80c1896f9f4a11e2c361d04e197e AS weaver
FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown

View file

@ -229,6 +229,13 @@ func (i *sfCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOpt
}
}
func (i *sfCounter) Enabled(ctx context.Context) bool {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Float64Counter).Enabled(ctx)
}
return false
}
type sfUpDownCounter struct {
embedded.Float64UpDownCounter
@ -255,6 +262,13 @@ func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, opts ...metric.
}
}
func (i *sfUpDownCounter) Enabled(ctx context.Context) bool {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Float64UpDownCounter).Enabled(ctx)
}
return false
}
type sfHistogram struct {
embedded.Float64Histogram
@ -281,6 +295,13 @@ func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.Reco
}
}
func (i *sfHistogram) Enabled(ctx context.Context) bool {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Float64Histogram).Enabled(ctx)
}
return false
}
type sfGauge struct {
embedded.Float64Gauge
@ -307,6 +328,13 @@ func (i *sfGauge) Record(ctx context.Context, x float64, opts ...metric.RecordOp
}
}
func (i *sfGauge) Enabled(ctx context.Context) bool {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Float64Gauge).Enabled(ctx)
}
return false
}
type siCounter struct {
embedded.Int64Counter
@ -333,6 +361,13 @@ func (i *siCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption)
}
}
func (i *siCounter) Enabled(ctx context.Context) bool {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Int64Counter).Enabled(ctx)
}
return false
}
type siUpDownCounter struct {
embedded.Int64UpDownCounter
@ -359,6 +394,13 @@ func (i *siUpDownCounter) Add(ctx context.Context, x int64, opts ...metric.AddOp
}
}
func (i *siUpDownCounter) Enabled(ctx context.Context) bool {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Int64UpDownCounter).Enabled(ctx)
}
return false
}
type siHistogram struct {
embedded.Int64Histogram
@ -385,6 +427,13 @@ func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.Record
}
}
func (i *siHistogram) Enabled(ctx context.Context) bool {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Int64Histogram).Enabled(ctx)
}
return false
}
type siGauge struct {
embedded.Int64Gauge
@ -410,3 +459,10 @@ func (i *siGauge) Record(ctx context.Context, x int64, opts ...metric.RecordOpti
ctr.(metric.Int64Gauge).Record(ctx, x, opts...)
}
}
func (i *siGauge) Enabled(ctx context.Context) bool {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Int64Gauge).Enabled(ctx)
}
return false
}

View file

@ -157,7 +157,7 @@ func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption)
cfg := metric.NewInt64CounterConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*siCounter)(nil)),
kind: reflect.TypeFor[*siCounter](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@ -183,7 +183,7 @@ func (m *meter) Int64UpDownCounter(
cfg := metric.NewInt64UpDownCounterConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*siUpDownCounter)(nil)),
kind: reflect.TypeFor[*siUpDownCounter](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@ -206,7 +206,7 @@ func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOpti
cfg := metric.NewInt64HistogramConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*siHistogram)(nil)),
kind: reflect.TypeFor[*siHistogram](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@ -229,7 +229,7 @@ func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (met
cfg := metric.NewInt64GaugeConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*siGauge)(nil)),
kind: reflect.TypeFor[*siGauge](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@ -255,7 +255,7 @@ func (m *meter) Int64ObservableCounter(
cfg := metric.NewInt64ObservableCounterConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*aiCounter)(nil)),
kind: reflect.TypeFor[*aiCounter](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@ -281,7 +281,7 @@ func (m *meter) Int64ObservableUpDownCounter(
cfg := metric.NewInt64ObservableUpDownCounterConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*aiUpDownCounter)(nil)),
kind: reflect.TypeFor[*aiUpDownCounter](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@ -307,7 +307,7 @@ func (m *meter) Int64ObservableGauge(
cfg := metric.NewInt64ObservableGaugeConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*aiGauge)(nil)),
kind: reflect.TypeFor[*aiGauge](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@ -330,7 +330,7 @@ func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOpti
cfg := metric.NewFloat64CounterConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*sfCounter)(nil)),
kind: reflect.TypeFor[*sfCounter](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@ -356,7 +356,7 @@ func (m *meter) Float64UpDownCounter(
cfg := metric.NewFloat64UpDownCounterConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*sfUpDownCounter)(nil)),
kind: reflect.TypeFor[*sfUpDownCounter](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@ -382,7 +382,7 @@ func (m *meter) Float64Histogram(
cfg := metric.NewFloat64HistogramConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*sfHistogram)(nil)),
kind: reflect.TypeFor[*sfHistogram](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@ -405,7 +405,7 @@ func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption)
cfg := metric.NewFloat64GaugeConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*sfGauge)(nil)),
kind: reflect.TypeFor[*sfGauge](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@ -431,7 +431,7 @@ func (m *meter) Float64ObservableCounter(
cfg := metric.NewFloat64ObservableCounterConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*afCounter)(nil)),
kind: reflect.TypeFor[*afCounter](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@ -457,7 +457,7 @@ func (m *meter) Float64ObservableUpDownCounter(
cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*afUpDownCounter)(nil)),
kind: reflect.TypeFor[*afUpDownCounter](),
description: cfg.Description(),
unit: cfg.Unit(),
}
@ -483,7 +483,7 @@ func (m *meter) Float64ObservableGauge(
cfg := metric.NewFloat64ObservableGaugeConfig(options...)
id := instID{
name: name,
kind: reflect.TypeOf((*afGauge)(nil)),
kind: reflect.TypeFor[*afGauge](),
description: cfg.Description(),
unit: cfg.Unit(),
}

View file

@ -227,7 +227,11 @@ type Float64Observer interface {
// attributes as another Float64Callbacks also registered for the same
// instrument.
//
// The function needs to be concurrent safe.
// The function needs to be reentrant and concurrent safe.
//
// Note that Go's mutexes are not reentrant, and locking a mutex takes
// an indefinite amount of time. It is therefore advised to avoid
// using mutexes inside callbacks.
type Float64Callback func(context.Context, Float64Observer) error
// Float64ObservableOption applies options to float64 Observer instruments.

View file

@ -225,7 +225,11 @@ type Int64Observer interface {
// attributes as another Int64Callbacks also registered for the same
// instrument.
//
// The function needs to be concurrent safe.
// The function needs to be reentrant and concurrent safe.
//
// Note that Go's mutexes are not reentrant, and locking a mutex takes
// an indefinite amount of time. It is therefore advised to avoid
// using mutexes inside callbacks.
type Int64Callback func(context.Context, Int64Observer) error
// Int64ObservableOption applies options to int64 Observer instruments.

View file

@ -244,7 +244,11 @@ type Meter interface {
// Callbacks. Meaning, it should not report measurements for an instrument with
// the same attributes as another Callback will report.
//
// The function needs to be concurrent safe.
// The function needs to be reentrant and concurrent safe.
//
// Note that Go's mutexes are not reentrant, and locking a mutex takes
// an indefinite amount of time. It is therefore advised to avoid
// using mutexes inside callbacks.
type Callback func(context.Context, Observer) error
// Observer records measurements for multiple instruments in a Callback.

View file

@ -25,6 +25,12 @@ type Float64Counter interface {
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Add(ctx context.Context, incr float64, options ...AddOption)
// Enabled reports whether the instrument will process measurements for the given context.
//
// This function can be used in places where measuring an instrument
// would result in computationally expensive operations.
Enabled(context.Context) bool
}
// Float64CounterConfig contains options for synchronous counter instruments that
@ -78,6 +84,12 @@ type Float64UpDownCounter interface {
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Add(ctx context.Context, incr float64, options ...AddOption)
// Enabled reports whether the instrument will process measurements for the given context.
//
// This function can be used in places where measuring an instrument
// would result in computationally expensive operations.
Enabled(context.Context) bool
}
// Float64UpDownCounterConfig contains options for synchronous counter
@ -131,6 +143,12 @@ type Float64Histogram interface {
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Record(ctx context.Context, incr float64, options ...RecordOption)
// Enabled reports whether the instrument will process measurements for the given context.
//
// This function can be used in places where measuring an instrument
// would result in computationally expensive operations.
Enabled(context.Context) bool
}
// Float64HistogramConfig contains options for synchronous histogram
@ -189,6 +207,12 @@ type Float64Gauge interface {
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Record(ctx context.Context, value float64, options ...RecordOption)
// Enabled reports whether the instrument will process measurements for the given context.
//
// This function can be used in places where measuring an instrument
// would result in computationally expensive operations.
Enabled(context.Context) bool
}
// Float64GaugeConfig contains options for synchronous gauge instruments that

View file

@ -25,6 +25,12 @@ type Int64Counter interface {
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Add(ctx context.Context, incr int64, options ...AddOption)
// Enabled reports whether the instrument will process measurements for the given context.
//
// This function can be used in places where measuring an instrument
// would result in computationally expensive operations.
Enabled(context.Context) bool
}
// Int64CounterConfig contains options for synchronous counter instruments that
@ -78,6 +84,12 @@ type Int64UpDownCounter interface {
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Add(ctx context.Context, incr int64, options ...AddOption)
// Enabled reports whether the instrument will process measurements for the given context.
//
// This function can be used in places where measuring an instrument
// would result in computationally expensive operations.
Enabled(context.Context) bool
}
// Int64UpDownCounterConfig contains options for synchronous counter
@ -131,6 +143,12 @@ type Int64Histogram interface {
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Record(ctx context.Context, incr int64, options ...RecordOption)
// Enabled reports whether the instrument will process measurements for the given context.
//
// This function can be used in places where measuring an instrument
// would result in computationally expensive operations.
Enabled(context.Context) bool
}
// Int64HistogramConfig contains options for synchronous histogram instruments
@ -189,6 +207,12 @@ type Int64Gauge interface {
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Record(ctx context.Context, value int64, options ...RecordOption)
// Enabled reports whether the instrument will process measurements for the given context.
//
// This function can be used in places where measuring an instrument
// would result in computationally expensive operations.
Enabled(context.Context) bool
}
// Int64GaugeConfig contains options for synchronous gauge instruments that

View file

@ -0,0 +1,78 @@
<!-- Generated. DO NOT MODIFY. -->
# Migration from v1.38.0 to v1.39.0
The `go.opentelemetry.io/otel/semconv/v1.39.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.38.0` with the following exceptions.
## Removed
The following declarations have been removed.
Refer to the [OpenTelemetry Semantic Conventions documentation] for deprecation instructions.
If the type is not listed in the documentation as deprecated, it has been removed in this version due to lack of applicability or use.
If you use any of these non-deprecated declarations in your Go application, please [open an issue] describing your use-case.
- `LinuxMemorySlabStateKey`
- `LinuxMemorySlabStateReclaimable`
- `LinuxMemorySlabStateUnreclaimable`
- `PeerService`
- `PeerServiceKey`
- `RPCConnectRPCErrorCodeAborted`
- `RPCConnectRPCErrorCodeAlreadyExists`
- `RPCConnectRPCErrorCodeCancelled`
- `RPCConnectRPCErrorCodeDataLoss`
- `RPCConnectRPCErrorCodeDeadlineExceeded`
- `RPCConnectRPCErrorCodeFailedPrecondition`
- `RPCConnectRPCErrorCodeInternal`
- `RPCConnectRPCErrorCodeInvalidArgument`
- `RPCConnectRPCErrorCodeKey`
- `RPCConnectRPCErrorCodeNotFound`
- `RPCConnectRPCErrorCodeOutOfRange`
- `RPCConnectRPCErrorCodePermissionDenied`
- `RPCConnectRPCErrorCodeResourceExhausted`
- `RPCConnectRPCErrorCodeUnauthenticated`
- `RPCConnectRPCErrorCodeUnavailable`
- `RPCConnectRPCErrorCodeUnimplemented`
- `RPCConnectRPCErrorCodeUnknown`
- `RPCConnectRPCRequestMetadata`
- `RPCConnectRPCResponseMetadata`
- `RPCGRPCRequestMetadata`
- `RPCGRPCResponseMetadata`
- `RPCGRPCStatusCodeAborted`
- `RPCGRPCStatusCodeAlreadyExists`
- `RPCGRPCStatusCodeCancelled`
- `RPCGRPCStatusCodeDataLoss`
- `RPCGRPCStatusCodeDeadlineExceeded`
- `RPCGRPCStatusCodeFailedPrecondition`
- `RPCGRPCStatusCodeInternal`
- `RPCGRPCStatusCodeInvalidArgument`
- `RPCGRPCStatusCodeKey`
- `RPCGRPCStatusCodeNotFound`
- `RPCGRPCStatusCodeOk`
- `RPCGRPCStatusCodeOutOfRange`
- `RPCGRPCStatusCodePermissionDenied`
- `RPCGRPCStatusCodeResourceExhausted`
- `RPCGRPCStatusCodeUnauthenticated`
- `RPCGRPCStatusCodeUnavailable`
- `RPCGRPCStatusCodeUnimplemented`
- `RPCGRPCStatusCodeUnknown`
- `RPCJSONRPCErrorCode`
- `RPCJSONRPCErrorCodeKey`
- `RPCJSONRPCErrorMessage`
- `RPCJSONRPCErrorMessageKey`
- `RPCJSONRPCRequestID`
- `RPCJSONRPCRequestIDKey`
- `RPCJSONRPCVersion`
- `RPCJSONRPCVersionKey`
- `RPCService`
- `RPCServiceKey`
- `RPCSystemApacheDubbo`
- `RPCSystemConnectRPC`
- `RPCSystemDotnetWcf`
- `RPCSystemGRPC`
- `RPCSystemJSONRPC`
- `RPCSystemJavaRmi`
- `RPCSystemKey`
- `RPCSystemOncRPC`
[OpenTelemetry Semantic Conventions documentation]: https://github.com/open-telemetry/semantic-conventions
[open an issue]: https://github.com/open-telemetry/opentelemetry-go/issues/new?template=Blank+issue

View file

@ -0,0 +1,3 @@
# Semconv v1.39.0
[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.39.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.39.0)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,9 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Package semconv implements OpenTelemetry semantic conventions.
//
// OpenTelemetry semantic conventions are agreed standardized naming
// patterns for OpenTelemetry things. This package represents the v1.39.0
// version of the OpenTelemetry semantic conventions.
package semconv // import "go.opentelemetry.io/otel/semconv/v1.39.0"

View file

@ -0,0 +1,56 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package semconv // import "go.opentelemetry.io/otel/semconv/v1.39.0"
import (
"reflect"
"go.opentelemetry.io/otel/attribute"
)
// ErrorType returns an [attribute.KeyValue] identifying the error type of err.
//
// If err is nil, the returned attribute has the default value
// [ErrorTypeOther].
//
// If err's type has the method
//
// ErrorType() string
//
// then the returned attribute has the value of err.ErrorType(). Otherwise, the
// returned attribute has a value derived from the concrete type of err.
//
// The key of the returned attribute is [ErrorTypeKey].
func ErrorType(err error) attribute.KeyValue {
if err == nil {
return ErrorTypeOther
}
return ErrorTypeKey.String(errorType(err))
}
func errorType(err error) string {
var s string
if et, ok := err.(interface{ ErrorType() string }); ok {
// Prioritize the ErrorType method if available.
s = et.ErrorType()
}
if s == "" {
// Fallback to reflection if the ErrorType method is not supported or
// returns an empty value.
t := reflect.TypeOf(err)
pkg, name := t.PkgPath(), t.Name()
if pkg != "" && name != "" {
s = pkg + "." + name
} else {
// The type has no package path or name (predeclared, not-defined,
// or alias for a not-defined type).
//
// This is not guaranteed to be unique, but is a best effort.
s = t.String()
}
}
return s
}

View file

@ -0,0 +1,9 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package semconv // import "go.opentelemetry.io/otel/semconv/v1.39.0"
const (
// ExceptionEventName is the name of the Span event representing an exception.
ExceptionEventName = "exception"
)

View file

@ -0,0 +1,9 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package semconv // import "go.opentelemetry.io/otel/semconv/v1.39.0"
// SchemaURL is the schema URL that matches the version of the semantic conventions
// that this package defines. Semconv packages starting from v1.4.0 must declare
// non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
const SchemaURL = "https://opentelemetry.io/schemas/1.39.0"

View file

@ -20,7 +20,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
semconv "go.opentelemetry.io/otel/semconv/v1.39.0"
"go.opentelemetry.io/otel/trace/embedded"
"go.opentelemetry.io/otel/trace/internal/telemetry"
)

View file

@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
return "1.39.0"
return "1.40.0"
}

View file

@ -3,7 +3,7 @@
module-sets:
stable-v1:
version: v1.39.0
version: v1.40.0
modules:
- go.opentelemetry.io/otel
- go.opentelemetry.io/otel/bridge/opencensus
@ -22,11 +22,11 @@ module-sets:
- go.opentelemetry.io/otel/sdk/metric
- go.opentelemetry.io/otel/trace
experimental-metrics:
version: v0.61.0
version: v0.62.0
modules:
- go.opentelemetry.io/otel/exporters/prometheus
experimental-logs:
version: v0.15.0
version: v0.16.0
modules:
- go.opentelemetry.io/otel/log
- go.opentelemetry.io/otel/log/logtest
@ -46,6 +46,9 @@ modules:
go.opentelemetry.io/otel/exporters/stdout/stdouttrace:
version-refs:
- ./internal/version.go
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric:
version-refs:
- ./internal/version.go
go.opentelemetry.io/otel/exporters/prometheus:
version-refs:
- ./internal/version.go

View file

@ -38,6 +38,9 @@ type chacha20poly1305 struct {
// New returns a ChaCha20-Poly1305 AEAD that uses the given 256-bit key.
func New(key []byte) (cipher.AEAD, error) {
if fips140Enforced() {
return nil, errors.New("chacha20poly1305: use of ChaCha20Poly1305 is not allowed in FIPS 140-only mode")
}
if len(key) != KeySize {
return nil, errors.New("chacha20poly1305: bad key length")
}

View file

@ -0,0 +1,9 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.26
package chacha20poly1305
func fips140Enforced() bool { return false }

View file

@ -0,0 +1,11 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.26
package chacha20poly1305
import "crypto/fips140"
func fips140Enforced() bool { return fips140.Enforced() }

View file

@ -22,6 +22,9 @@ type xchacha20poly1305 struct {
// preferred when nonce uniqueness cannot be trivially ensured, or whenever
// nonces are randomly generated.
func NewX(key []byte) (cipher.AEAD, error) {
if fips140Enforced() {
return nil, errors.New("chacha20poly1305: use of ChaCha20Poly1305 is not allowed in FIPS 140-only mode")
}
if len(key) != KeySize {
return nil, errors.New("chacha20poly1305: bad key length")
}

View file

@ -64,6 +64,80 @@ func initOptions() {
func archInit() {
// From internal/cpu
const (
// eax bits
cpuid_AVXVNNI = 1 << 4
// ecx bits
cpuid_SSE3 = 1 << 0
cpuid_PCLMULQDQ = 1 << 1
cpuid_AVX512VBMI = 1 << 1
cpuid_AVX512VBMI2 = 1 << 6
cpuid_SSSE3 = 1 << 9
cpuid_AVX512GFNI = 1 << 8
cpuid_AVX512VAES = 1 << 9
cpuid_AVX512VNNI = 1 << 11
cpuid_AVX512BITALG = 1 << 12
cpuid_FMA = 1 << 12
cpuid_AVX512VPOPCNTDQ = 1 << 14
cpuid_SSE41 = 1 << 19
cpuid_SSE42 = 1 << 20
cpuid_POPCNT = 1 << 23
cpuid_AES = 1 << 25
cpuid_OSXSAVE = 1 << 27
cpuid_AVX = 1 << 28
// "Extended Feature Flag" bits returned in EBX for CPUID EAX=0x7 ECX=0x0
cpuid_BMI1 = 1 << 3
cpuid_AVX2 = 1 << 5
cpuid_BMI2 = 1 << 8
cpuid_ERMS = 1 << 9
cpuid_AVX512F = 1 << 16
cpuid_AVX512DQ = 1 << 17
cpuid_ADX = 1 << 19
cpuid_AVX512CD = 1 << 28
cpuid_SHA = 1 << 29
cpuid_AVX512BW = 1 << 30
cpuid_AVX512VL = 1 << 31
// "Extended Feature Flag" bits returned in ECX for CPUID EAX=0x7 ECX=0x0
cpuid_AVX512_VBMI = 1 << 1
cpuid_AVX512_VBMI2 = 1 << 6
cpuid_GFNI = 1 << 8
cpuid_AVX512VPCLMULQDQ = 1 << 10
cpuid_AVX512_BITALG = 1 << 12
// edx bits
cpuid_FSRM = 1 << 4
// edx bits for CPUID 0x80000001
cpuid_RDTSCP = 1 << 27
)
// Additional constants not in internal/cpu
const (
// eax=1: edx
cpuid_SSE2 = 1 << 26
// eax=1: ecx
cpuid_CX16 = 1 << 13
cpuid_RDRAND = 1 << 30
// eax=7,ecx=0: ebx
cpuid_RDSEED = 1 << 18
cpuid_AVX512IFMA = 1 << 21
cpuid_AVX512PF = 1 << 26
cpuid_AVX512ER = 1 << 27
// eax=7,ecx=0: edx
cpuid_AVX5124VNNIW = 1 << 2
cpuid_AVX5124FMAPS = 1 << 3
cpuid_AMXBF16 = 1 << 22
cpuid_AMXTile = 1 << 24
cpuid_AMXInt8 = 1 << 25
// eax=7,ecx=1: eax
cpuid_AVX512BF16 = 1 << 5
cpuid_AVXIFMA = 1 << 23
// eax=7,ecx=1: edx
cpuid_AVXVNNIInt8 = 1 << 4
)
Initialized = true
maxID, _, _, _ := cpuid(0, 0)
@ -73,90 +147,90 @@ func archInit() {
}
_, _, ecx1, edx1 := cpuid(1, 0)
X86.HasSSE2 = isSet(26, edx1)
X86.HasSSE2 = isSet(edx1, cpuid_SSE2)
X86.HasSSE3 = isSet(0, ecx1)
X86.HasPCLMULQDQ = isSet(1, ecx1)
X86.HasSSSE3 = isSet(9, ecx1)
X86.HasFMA = isSet(12, ecx1)
X86.HasCX16 = isSet(13, ecx1)
X86.HasSSE41 = isSet(19, ecx1)
X86.HasSSE42 = isSet(20, ecx1)
X86.HasPOPCNT = isSet(23, ecx1)
X86.HasAES = isSet(25, ecx1)
X86.HasOSXSAVE = isSet(27, ecx1)
X86.HasRDRAND = isSet(30, ecx1)
X86.HasSSE3 = isSet(ecx1, cpuid_SSE3)
X86.HasPCLMULQDQ = isSet(ecx1, cpuid_PCLMULQDQ)
X86.HasSSSE3 = isSet(ecx1, cpuid_SSSE3)
X86.HasFMA = isSet(ecx1, cpuid_FMA)
X86.HasCX16 = isSet(ecx1, cpuid_CX16)
X86.HasSSE41 = isSet(ecx1, cpuid_SSE41)
X86.HasSSE42 = isSet(ecx1, cpuid_SSE42)
X86.HasPOPCNT = isSet(ecx1, cpuid_POPCNT)
X86.HasAES = isSet(ecx1, cpuid_AES)
X86.HasOSXSAVE = isSet(ecx1, cpuid_OSXSAVE)
X86.HasRDRAND = isSet(ecx1, cpuid_RDRAND)
var osSupportsAVX, osSupportsAVX512 bool
// For XGETBV, OSXSAVE bit is required and sufficient.
if X86.HasOSXSAVE {
eax, _ := xgetbv()
// Check if XMM and YMM registers have OS support.
osSupportsAVX = isSet(1, eax) && isSet(2, eax)
osSupportsAVX = isSet(eax, 1<<1) && isSet(eax, 1<<2)
if runtime.GOOS == "darwin" {
// Darwin requires special AVX512 checks, see cpu_darwin_x86.go
osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512()
} else {
// Check if OPMASK and ZMM registers have OS support.
osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax)
osSupportsAVX512 = osSupportsAVX && isSet(eax, 1<<5) && isSet(eax, 1<<6) && isSet(eax, 1<<7)
}
}
X86.HasAVX = isSet(28, ecx1) && osSupportsAVX
X86.HasAVX = isSet(ecx1, cpuid_AVX) && osSupportsAVX
if maxID < 7 {
return
}
eax7, ebx7, ecx7, edx7 := cpuid(7, 0)
X86.HasBMI1 = isSet(3, ebx7)
X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX
X86.HasBMI2 = isSet(8, ebx7)
X86.HasERMS = isSet(9, ebx7)
X86.HasRDSEED = isSet(18, ebx7)
X86.HasADX = isSet(19, ebx7)
X86.HasBMI1 = isSet(ebx7, cpuid_BMI1)
X86.HasAVX2 = isSet(ebx7, cpuid_AVX2) && osSupportsAVX
X86.HasBMI2 = isSet(ebx7, cpuid_BMI2)
X86.HasERMS = isSet(ebx7, cpuid_ERMS)
X86.HasRDSEED = isSet(ebx7, cpuid_RDSEED)
X86.HasADX = isSet(ebx7, cpuid_ADX)
X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension
X86.HasAVX512 = isSet(ebx7, cpuid_AVX512F) && osSupportsAVX512 // Because avx-512 foundation is the core required extension
if X86.HasAVX512 {
X86.HasAVX512F = true
X86.HasAVX512CD = isSet(28, ebx7)
X86.HasAVX512ER = isSet(27, ebx7)
X86.HasAVX512PF = isSet(26, ebx7)
X86.HasAVX512VL = isSet(31, ebx7)
X86.HasAVX512BW = isSet(30, ebx7)
X86.HasAVX512DQ = isSet(17, ebx7)
X86.HasAVX512IFMA = isSet(21, ebx7)
X86.HasAVX512VBMI = isSet(1, ecx7)
X86.HasAVX5124VNNIW = isSet(2, edx7)
X86.HasAVX5124FMAPS = isSet(3, edx7)
X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7)
X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7)
X86.HasAVX512VNNI = isSet(11, ecx7)
X86.HasAVX512GFNI = isSet(8, ecx7)
X86.HasAVX512VAES = isSet(9, ecx7)
X86.HasAVX512VBMI2 = isSet(6, ecx7)
X86.HasAVX512BITALG = isSet(12, ecx7)
X86.HasAVX512CD = isSet(ebx7, cpuid_AVX512CD)
X86.HasAVX512ER = isSet(ebx7, cpuid_AVX512ER)
X86.HasAVX512PF = isSet(ebx7, cpuid_AVX512PF)
X86.HasAVX512VL = isSet(ebx7, cpuid_AVX512VL)
X86.HasAVX512BW = isSet(ebx7, cpuid_AVX512BW)
X86.HasAVX512DQ = isSet(ebx7, cpuid_AVX512DQ)
X86.HasAVX512IFMA = isSet(ebx7, cpuid_AVX512IFMA)
X86.HasAVX512VBMI = isSet(ecx7, cpuid_AVX512_VBMI)
X86.HasAVX5124VNNIW = isSet(edx7, cpuid_AVX5124VNNIW)
X86.HasAVX5124FMAPS = isSet(edx7, cpuid_AVX5124FMAPS)
X86.HasAVX512VPOPCNTDQ = isSet(ecx7, cpuid_AVX512VPOPCNTDQ)
X86.HasAVX512VPCLMULQDQ = isSet(ecx7, cpuid_AVX512VPCLMULQDQ)
X86.HasAVX512VNNI = isSet(ecx7, cpuid_AVX512VNNI)
X86.HasAVX512GFNI = isSet(ecx7, cpuid_AVX512GFNI)
X86.HasAVX512VAES = isSet(ecx7, cpuid_AVX512VAES)
X86.HasAVX512VBMI2 = isSet(ecx7, cpuid_AVX512VBMI2)
X86.HasAVX512BITALG = isSet(ecx7, cpuid_AVX512BITALG)
}
X86.HasAMXTile = isSet(24, edx7)
X86.HasAMXInt8 = isSet(25, edx7)
X86.HasAMXBF16 = isSet(22, edx7)
X86.HasAMXTile = isSet(edx7, cpuid_AMXTile)
X86.HasAMXInt8 = isSet(edx7, cpuid_AMXInt8)
X86.HasAMXBF16 = isSet(edx7, cpuid_AMXBF16)
// These features depend on the second level of extended features.
if eax7 >= 1 {
eax71, _, _, edx71 := cpuid(7, 1)
if X86.HasAVX512 {
X86.HasAVX512BF16 = isSet(5, eax71)
X86.HasAVX512BF16 = isSet(eax71, cpuid_AVX512BF16)
}
if X86.HasAVX {
X86.HasAVXIFMA = isSet(23, eax71)
X86.HasAVXVNNI = isSet(4, eax71)
X86.HasAVXVNNIInt8 = isSet(4, edx71)
X86.HasAVXIFMA = isSet(eax71, cpuid_AVXIFMA)
X86.HasAVXVNNI = isSet(eax71, cpuid_AVXVNNI)
X86.HasAVXVNNIInt8 = isSet(edx71, cpuid_AVXVNNIInt8)
}
}
}
func isSet(bitpos uint, value uint32) bool {
return value&(1<<bitpos) != 0
func isSet(hwc uint32, value uint32) bool {
return hwc&value != 0
}

28
vendor/golang.org/x/term/terminal.go generated vendored
View file

@ -160,7 +160,9 @@ const (
keyEnd
keyDeleteWord
keyDeleteLine
keyDelete
keyClearScreen
keyTranspose
keyPasteStart
keyPasteEnd
)
@ -194,6 +196,8 @@ func bytesToKey(b []byte, pasteActive bool) (rune, []byte) {
return keyDeleteLine, b[1:]
case 12: // ^L
return keyClearScreen, b[1:]
case 20: // ^T
return keyTranspose, b[1:]
case 23: // ^W
return keyDeleteWord, b[1:]
case 14: // ^N
@ -228,6 +232,10 @@ func bytesToKey(b []byte, pasteActive bool) (rune, []byte) {
}
}
if !pasteActive && len(b) >= 4 && b[0] == keyEscape && b[1] == '[' && b[2] == '3' && b[3] == '~' {
return keyDelete, b[4:]
}
if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' {
switch b[5] {
case 'C':
@ -590,7 +598,7 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) {
}
t.line = t.line[:t.pos]
t.moveCursorToPos(t.pos)
case keyCtrlD:
case keyCtrlD, keyDelete:
// Erase the character under the current position.
// The EOF case when the line is empty is handled in
// readLine().
@ -600,6 +608,24 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) {
}
case keyCtrlU:
t.eraseNPreviousChars(t.pos)
case keyTranspose:
// This transposes the two characters around the cursor and advances the cursor. Best-effort.
if len(t.line) < 2 || t.pos < 1 {
return
}
swap := t.pos
if swap == len(t.line) {
swap-- // special: at end of line, swap previous two chars
}
t.line[swap-1], t.line[swap] = t.line[swap], t.line[swap-1]
if t.pos < len(t.line) {
t.pos++
}
if t.echo {
t.moveCursorToPos(swap - 1)
t.writeLine(t.line[swap-1:])
t.moveCursorToPos(t.pos)
}
case keyClearScreen:
// Erases the screen and moves the cursor to the home position.
t.queue([]rune("\x1b[2J\x1b[H"))

30
vendor/modules.txt vendored
View file

@ -21,10 +21,9 @@ github.com/chzyer/readline
# github.com/clipperhouse/stringish v0.1.1
## explicit; go 1.18
github.com/clipperhouse/stringish
# github.com/clipperhouse/uax29/v2 v2.3.0
# github.com/clipperhouse/uax29/v2 v2.5.0
## explicit; go 1.18
github.com/clipperhouse/uax29/v2/graphemes
github.com/clipperhouse/uax29/v2/internal/iterators
# github.com/cloudbase/garm-provider-common v0.1.8-0.20251001105909-bbcacae60e7c
## explicit; go 1.23.0
github.com/cloudbase/garm-provider-common/cloudconfig
@ -44,7 +43,7 @@ github.com/felixge/httpsnoop
# github.com/gdamore/encoding v1.0.1
## explicit; go 1.9
github.com/gdamore/encoding
# github.com/gdamore/tcell/v2 v2.13.5
# github.com/gdamore/tcell/v2 v2.13.8
## explicit; go 1.24.0
github.com/gdamore/tcell/v2
github.com/gdamore/tcell/v2/terminfo
@ -169,14 +168,14 @@ github.com/go-openapi/validate
# github.com/go-sql-driver/mysql v1.9.3
## explicit; go 1.21.0
github.com/go-sql-driver/mysql
# github.com/go-viper/mapstructure/v2 v2.4.0
# github.com/go-viper/mapstructure/v2 v2.5.0
## explicit; go 1.18
github.com/go-viper/mapstructure/v2
github.com/go-viper/mapstructure/v2/internal/errors
# github.com/golang-jwt/jwt/v4 v4.5.2
## explicit; go 1.16
github.com/golang-jwt/jwt/v4
# github.com/golang-jwt/jwt/v5 v5.3.0
# github.com/golang-jwt/jwt/v5 v5.3.1
## explicit; go 1.21
github.com/golang-jwt/jwt/v5
# github.com/google/go-github/v72 v72.0.0
@ -307,7 +306,7 @@ github.com/stretchr/testify/suite
# github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569
## explicit; go 1.18
github.com/teris-io/shortid
# go.mongodb.org/mongo-driver v1.17.6
# go.mongodb.org/mongo-driver v1.17.9
## explicit; go 1.18
go.mongodb.org/mongo-driver/bson
go.mongodb.org/mongo-driver/bson/bsoncodec
@ -320,7 +319,7 @@ go.mongodb.org/mongo-driver/x/bsonx/bsoncore
## explicit; go 1.24.0
go.opentelemetry.io/auto/sdk
go.opentelemetry.io/auto/sdk/internal/telemetry
# go.opentelemetry.io/otel v1.39.0
# go.opentelemetry.io/otel v1.40.0
## explicit; go 1.24.0
go.opentelemetry.io/otel
go.opentelemetry.io/otel/attribute
@ -332,11 +331,12 @@ go.opentelemetry.io/otel/internal/baggage
go.opentelemetry.io/otel/internal/global
go.opentelemetry.io/otel/propagation
go.opentelemetry.io/otel/semconv/v1.37.0
# go.opentelemetry.io/otel/metric v1.39.0
go.opentelemetry.io/otel/semconv/v1.39.0
# go.opentelemetry.io/otel/metric v1.40.0
## explicit; go 1.24.0
go.opentelemetry.io/otel/metric
go.opentelemetry.io/otel/metric/embedded
# go.opentelemetry.io/otel/trace v1.39.0
# go.opentelemetry.io/otel/trace v1.40.0
## explicit; go 1.24.0
go.opentelemetry.io/otel/trace
go.opentelemetry.io/otel/trace/embedded
@ -348,7 +348,7 @@ go.yaml.in/yaml/v2
# go.yaml.in/yaml/v3 v3.0.4
## explicit; go 1.16
go.yaml.in/yaml/v3
# golang.org/x/crypto v0.46.0
# golang.org/x/crypto v0.47.0
## explicit; go 1.24.0
golang.org/x/crypto/bcrypt
golang.org/x/crypto/blowfish
@ -357,10 +357,10 @@ golang.org/x/crypto/chacha20poly1305
golang.org/x/crypto/hkdf
golang.org/x/crypto/internal/alias
golang.org/x/crypto/internal/poly1305
# golang.org/x/mod v0.31.0
# golang.org/x/mod v0.32.0
## explicit; go 1.24.0
golang.org/x/mod/semver
# golang.org/x/net v0.48.0
# golang.org/x/net v0.49.0
## explicit; go 1.24.0
golang.org/x/net/idna
golang.org/x/net/internal/socks
@ -372,16 +372,16 @@ golang.org/x/oauth2/internal
# golang.org/x/sync v0.19.0
## explicit; go 1.24.0
golang.org/x/sync/errgroup
# golang.org/x/sys v0.39.0
# golang.org/x/sys v0.40.0
## explicit; go 1.24.0
golang.org/x/sys/cpu
golang.org/x/sys/plan9
golang.org/x/sys/unix
golang.org/x/sys/windows
# golang.org/x/term v0.38.0
# golang.org/x/term v0.39.0
## explicit; go 1.24.0
golang.org/x/term
# golang.org/x/text v0.32.0
# golang.org/x/text v0.33.0
## explicit; go 1.24.0
golang.org/x/text/cases
golang.org/x/text/encoding